2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
65 #include <machine/vmparam.h>
66 #include <mach/vm_statistics.h>
67 #include <mach/mach_init.h>
68 #define __APPLE_API_PRIVATE
69 #include <machine/cpu_capabilities.h>
70 #include <libkern/OSAtomic.h>
72 #include <libkern/OSCrossEndian.h>
74 #include <dispatch/private.h> /* for at_fork handlers */
77 extern int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
78 extern int __pthread_sigmask(int, const sigset_t
*, sigset_t
*);
80 #ifndef BUILDING_VARIANT /* [ */
82 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
86 int32_t workq_targetconc
[WORKQ_NUM_PRIOQUEUE
];
88 /* Per-thread kernel support */
89 extern void _pthread_set_self(pthread_t
);
90 extern void mig_init(int);
91 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
92 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
93 static void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
94 static int _new_pthread_create_suspended(pthread_t
*thread
,
95 const pthread_attr_t
*attr
,
96 void *(*start_routine
)(void *),
100 /* the registered libdispatch worker function */
101 static void (*__libdispatch_workerfunction
)(int, int, void *) = NULL
;
103 /* Get CPU capabilities from the kernel */
104 __private_extern__
void _init_cpu_capabilities(void);
106 /* Needed to tell the malloc subsystem we're going multithreaded */
107 extern void set_malloc_singlethreaded(int);
109 /* Used when we need to call into the kernel with no reply port */
110 extern pthread_lock_t reply_port_lock
;
111 int _pthread_find_thread(pthread_t thread
);
113 /* Mach message used to notify that a thread needs to be reaped */
115 typedef struct _pthread_reap_msg_t
{
116 mach_msg_header_t header
;
118 mach_msg_trailer_t trailer
;
119 } pthread_reap_msg_t
;
123 __private_extern__
uintptr_t commpage_pfz_base
=0;
125 void __pthread_pfz_setup(const char *apple
[]) __attribute__ ((visibility ("hidden")));
127 static uintptr_t __pfz_from_kernel(const char *str
)
129 unsigned long tmpval
;
130 /* Skip over key to the first value */
131 str
= strchr(str
, '=');
135 tmpval
= strtoul(str
, NULL
, 0); /* may err by 0 or ULONG_MAX */
136 if (tmpval
== ULONG_MAX
)
139 return (uintptr_t) tmpval
;
143 __pthread_pfz_setup(const char *apple
[])
146 for (p
= apple
; p
&& *p
; p
++) {
147 /* checking if matching apple variable is at begining */
148 if (strstr(*p
, "pfz=") == *p
) {
149 commpage_pfz_base
= __pfz_from_kernel(*p
);
150 bzero(*p
,strlen(*p
));
155 if (commpage_pfz_base
== 0)
156 commpage_pfz_base
= _COMM_PAGE_TEXT_START
;
162 /* We'll implement this when the main thread is a pthread */
163 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
164 static struct _pthread _thread
= {0};
166 /* This global should be used (carefully) by anyone needing to know if a
167 ** pthread has been created.
169 int __is_threaded
= 0;
170 /* _pthread_count is protected by _pthread_list_lock */
171 static int _pthread_count
= 1;
172 int __unix_conforming
= 0;
173 static int __workqueue_newspis
= 0;
174 static int __workqueue_oldspis
= 0;
175 __private_extern__
size_t pthreadsize
= 0;
177 /* under rosetta we will use old style creation of threads */
178 static int __oldstyle
= 0;
180 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
182 /* Same implementation as LOCK, but without the __is_threaded check */
184 extern kern_return_t
syscall_thread_switch(mach_port_name_t
, int, mach_msg_timeout_t
);
185 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
187 int tries
= _spin_tries
;
191 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
193 } while(!_spin_lock_try(lock
));
196 static mach_port_t thread_recycle_port
= MACH_PORT_NULL
;
198 /* These are used to keep track of a semaphore pool shared by mutexes and condition
202 static semaphore_t
*sem_pool
= NULL
;
203 static int sem_pool_count
= 0;
204 static int sem_pool_current
= 0;
205 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
207 static int default_priority
;
208 static int max_priority
;
209 static int min_priority
;
210 static int pthread_concurrency
;
212 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
214 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
215 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
216 static pthread_attr_t _pthread_attr_default
= {0};
217 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
218 static int kernel_workq_setup
= 0;
219 static volatile int32_t kernel_workq_count
= 0;
220 static volatile unsigned int user_workq_count
= 0; /* number of outstanding workqueues */
221 static volatile unsigned int user_workitem_count
= 0; /* number of outstanding workitems */
222 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
223 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
225 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
226 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
228 static struct _pthread_workitem
* __workqueue_pool_ptr
;
229 static size_t __workqueue_pool_size
= 0;
230 static int __workqueue_nitems
= 0;
232 struct _pthread_workqueue_head __pthread_workq0_head
;
233 struct _pthread_workqueue_head __pthread_workq1_head
;
234 struct _pthread_workqueue_head __pthread_workq2_head
;
235 struct _pthread_workqueue_head __pthread_workq3_head
;
236 pthread_workqueue_head_t __pthread_wq_head_tbl
[WORKQ_NUM_PRIOQUEUE
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
, &__pthread_workq3_head
};
238 static void workqueue_list_lock(void);
239 static void workqueue_list_unlock(void);
240 static int valid_workq(pthread_workqueue_t
);
241 static void pick_nextworkqueue_droplock(void);
242 static int post_nextworkitem(pthread_workqueue_t workq
);
243 static void _pthread_workq_return(pthread_t self
);
244 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
245 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
246 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
247 static pthread_workitem_t
alloc_workitem(void);
248 static void free_workitem(pthread_workitem_t
);
249 static void grow_workitem(void);
250 static pthread_workqueue_t
alloc_workqueue(void);
251 static void free_workqueue(pthread_workqueue_t
);
252 static int _pthread_work_internal_init(void);
253 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
254 void _pthread_fork_child_postinit();
256 void pthread_workqueue_atfork_prepare(void);
257 void pthread_workqueue_atfork_parent(void);
258 void pthread_workqueue_atfork_child(void);
260 extern void dispatch_atfork_prepare(void);
261 extern void dispatch_atfork_parent(void);
262 extern void dispatch_atfork_child(void);
264 /* workq_kernreturn commands */
265 #define WQOPS_QUEUE_ADD 1
266 #define WQOPS_QUEUE_REMOVE 2
267 #define WQOPS_THREAD_RETURN 4
268 #define WQOPS_THREAD_SETCONC 8
269 #define WQOPS_QUEUE_NEWSPISUPP 0x10 /* this is to check for newer SPI support */
270 #define WQOPS_QUEUE_REQTHREADS 0x20 /* request number of threads of a prio */
272 /* flag values for reuse field in the libc side _pthread_wqthread */
273 #define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff
274 #define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */
275 #define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */
276 #define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */
279 #define WORKQUEUE_OVERCOMMIT 0x10000 /* the work_kernreturn() for overcommit in prio field */
282 * Flags filed passed to bsdthread_create and back in pthread_start
283 31 <---------------------------------> 0
284 _________________________________________
285 | flags(8) | policy(8) | importance(16) |
286 -----------------------------------------
289 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
292 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
294 #define PTHREAD_START_CUSTOM 0x01000000
295 #define PTHREAD_START_SETSCHED 0x02000000
296 #define PTHREAD_START_DETACHED 0x04000000
297 #define PTHREAD_START_POLICY_BITSHIFT 16
298 #define PTHREAD_START_POLICY_MASK 0xff
299 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
301 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
302 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
303 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
304 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
305 extern __uint64_t
__thread_selfid( void );
306 extern int __pthread_canceled(int);
307 extern void _pthread_keys_init(void);
308 extern int __pthread_kill(mach_port_t
, int);
309 extern int __pthread_markcancel(int);
310 extern int __workq_open(void);
313 extern int __workq_kernreturn(int, pthread_workitem_t
, int, int);
315 #if defined(__ppc__) || defined(__ppc64__)
316 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
317 #elif defined(__i386__) || defined(__x86_64__)
318 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
319 #elif defined(__arm__)
320 static const vm_address_t PTHREAD_STACK_HINT
= 0x30000000;
322 #error Need to define a stack address hint for this architecture
325 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
326 * The guardpages for stackoverflow protection is also allocated here
327 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
328 * set up for the thread
332 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
335 vm_address_t stackaddr
;
338 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
339 if (attrs
->stackaddr
!= NULL
) {
340 /* No guard pages setup in this case */
341 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
342 *stack
= attrs
->stackaddr
;
346 guardsize
= attrs
->guardsize
;
347 stackaddr
= PTHREAD_STACK_HINT
;
348 kr
= vm_map(mach_task_self(), &stackaddr
,
349 attrs
->stacksize
+ guardsize
,
351 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
352 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
354 if (kr
!= KERN_SUCCESS
)
355 kr
= vm_allocate(mach_task_self(),
356 &stackaddr
, attrs
->stacksize
+ guardsize
,
357 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
358 if (kr
!= KERN_SUCCESS
) {
361 /* The guard page is at the lowest address */
362 /* The stack base is the highest address */
364 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
365 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
370 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
374 vm_address_t stackaddr
;
375 size_t guardsize
, allocsize
;
377 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
379 if (attrs
->stackaddr
!= NULL
) {
380 /* No guard pages setup in this case */
381 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
382 *stack
= attrs
->stackaddr
;
383 t
= (pthread_t
)malloc(pthreadsize
);
384 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
385 t
->freeStackOnExit
= 0;
392 guardsize
= attrs
->guardsize
;
393 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
394 stackaddr
= PTHREAD_STACK_HINT
;
395 kr
= vm_map(mach_task_self(), &stackaddr
,
398 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
399 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
401 if (kr
!= KERN_SUCCESS
)
402 kr
= vm_allocate(mach_task_self(),
403 &stackaddr
, allocsize
,
404 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
405 if (kr
!= KERN_SUCCESS
) {
408 /* The guard page is at the lowest address */
409 /* The stack base is the highest address */
411 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
414 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
416 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
417 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
419 t
->freesize
= allocsize
;
420 t
->freeaddr
= (void *)stackaddr
;
421 t
->freeStackOnExit
= 1;
428 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
430 kern_return_t res
= 0;
431 vm_address_t freeaddr
;
435 semaphore_t joinsem
= SEMAPHORE_NULL
;
438 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
440 kport
= t
->kernel_thread
;
441 joinsem
= t
->joiner_notify
;
443 if (t
->freeStackOnExit
) {
444 freeaddr
= (vm_address_t
)t
->freeaddr
;
446 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
448 freesize
= t
->stacksize
+ t
->guardsize
;
450 mig_dealloc_reply_port(MACH_PORT_NULL
);
451 LOCK(_pthread_list_lock
);
452 if (freestruct
!= 0) {
453 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
454 /* if parent has not returned from create yet keep pthread_t */
456 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
458 if (t
->parentcheck
== 0)
459 freesize
-= pthreadsize
;
462 thread_count
= --_pthread_count
;
463 UNLOCK(_pthread_list_lock
);
466 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
468 if (thread_count
<=0)
471 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
472 LIBC_ABORT("thread %p didn't terminate", t
);
475 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
477 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
481 mig_dealloc_reply_port(MACH_PORT_NULL
);
482 LOCK(_pthread_list_lock
);
483 if (freestruct
!= 0) {
484 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
486 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
489 thread_count
= --_pthread_count
;
491 UNLOCK(_pthread_list_lock
);
495 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
503 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
506 if (thread_count
<=0)
509 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
510 LIBC_ABORT("thread %p didn't terminate", t
);
511 } else if (freestruct
) {
512 t
->sig
= _PTHREAD_NO_SIG
;
514 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
525 * Destroy a thread attribute structure
528 pthread_attr_destroy(pthread_attr_t
*attr
)
530 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
536 return (EINVAL
); /* Not an attribute structure! */
541 * Get the 'detach' state from a thread attribute structure.
542 * Note: written as a helper function for info hiding
545 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
548 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
550 *detachstate
= attr
->detached
;
554 return (EINVAL
); /* Not an attribute structure! */
559 * Get the 'inherit scheduling' info from a thread attribute structure.
560 * Note: written as a helper function for info hiding
563 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
566 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
568 *inheritsched
= attr
->inherit
;
572 return (EINVAL
); /* Not an attribute structure! */
577 * Get the scheduling parameters from a thread attribute structure.
578 * Note: written as a helper function for info hiding
581 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
582 struct sched_param
*param
)
584 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
586 *param
= attr
->param
;
590 return (EINVAL
); /* Not an attribute structure! */
595 * Get the scheduling policy from a thread attribute structure.
596 * Note: written as a helper function for info hiding
599 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
602 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
604 *policy
= attr
->policy
;
608 return (EINVAL
); /* Not an attribute structure! */
612 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
613 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
615 * Initialize a thread attribute structure to default values.
618 pthread_attr_init(pthread_attr_t
*attr
)
620 attr
->stacksize
= DEFAULT_STACK_SIZE
;
621 attr
->stackaddr
= NULL
;
622 attr
->sig
= _PTHREAD_ATTR_SIG
;
623 attr
->param
.sched_priority
= default_priority
;
624 attr
->param
.quantum
= 10; /* quantum isn't public yet */
625 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
626 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
627 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
628 attr
->freeStackOnExit
= 1;
631 attr
->guardsize
= vm_page_size
;
636 * Set the 'detach' state in a thread attribute structure.
637 * Note: written as a helper function for info hiding
640 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
643 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
645 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
646 (detachstate
== PTHREAD_CREATE_DETACHED
))
648 attr
->detached
= detachstate
;
656 return (EINVAL
); /* Not an attribute structure! */
661 * Set the 'inherit scheduling' state in a thread attribute structure.
662 * Note: written as a helper function for info hiding
665 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
668 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
670 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
671 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
673 attr
->inherit
= inheritsched
;
681 return (EINVAL
); /* Not an attribute structure! */
686 * Set the scheduling paramters in a thread attribute structure.
687 * Note: written as a helper function for info hiding
690 pthread_attr_setschedparam(pthread_attr_t
*attr
,
691 const struct sched_param
*param
)
693 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
695 /* TODO: Validate sched_param fields */
696 attr
->param
= *param
;
701 return (EINVAL
); /* Not an attribute structure! */
706 * Set the scheduling policy in a thread attribute structure.
707 * Note: written as a helper function for info hiding
710 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
713 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
715 if ((policy
== SCHED_OTHER
) ||
716 (policy
== SCHED_RR
) ||
717 (policy
== SCHED_FIFO
))
719 attr
->policy
= policy
;
728 return (EINVAL
); /* Not an attribute structure! */
733 * Set the scope for the thread.
734 * We currently only provide PTHREAD_SCOPE_SYSTEM
737 pthread_attr_setscope(pthread_attr_t
*attr
,
740 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
741 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
742 /* No attribute yet for the scope */
744 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
748 return (EINVAL
); /* Not an attribute structure! */
752 * Get the scope for the thread.
753 * We currently only provide PTHREAD_SCOPE_SYSTEM
756 pthread_attr_getscope(const pthread_attr_t
*attr
,
759 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
760 *scope
= PTHREAD_SCOPE_SYSTEM
;
763 return (EINVAL
); /* Not an attribute structure! */
766 /* Get the base stack address of the given thread */
768 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
770 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
771 *stackaddr
= attr
->stackaddr
;
774 return (EINVAL
); /* Not an attribute structure! */
779 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
781 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
782 attr
->stackaddr
= stackaddr
;
783 attr
->freeStackOnExit
= 0;
787 return (EINVAL
); /* Not an attribute structure! */
792 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
794 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
795 *stacksize
= attr
->stacksize
;
798 return (EINVAL
); /* Not an attribute structure! */
803 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
805 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
806 attr
->stacksize
= stacksize
;
809 return (EINVAL
); /* Not an attribute structure! */
814 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
816 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
817 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
818 *stacksize
= attr
->stacksize
;
821 return (EINVAL
); /* Not an attribute structure! */
825 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
826 * byte address. This is not the same as in pthread_attr_setstackaddr.
829 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
831 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
832 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
833 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
834 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
835 attr
->stacksize
= stacksize
;
836 attr
->freeStackOnExit
= 0;
840 return (EINVAL
); /* Not an attribute structure! */
846 * Set the guardsize attribute in the attr.
849 pthread_attr_setguardsize(pthread_attr_t
*attr
,
852 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
853 /* Guardsize of 0 is valid, ot means no guard */
854 if ((guardsize
% vm_page_size
) == 0) {
855 attr
->guardsize
= guardsize
;
861 return (EINVAL
); /* Not an attribute structure! */
865 * Get the guardsize attribute in the attr.
868 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
871 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
872 *guardsize
= attr
->guardsize
;
875 return (EINVAL
); /* Not an attribute structure! */
880 * Create and start execution of a new thread.
884 _pthread_body(pthread_t self
)
886 _pthread_set_self(self
);
887 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
888 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
889 printf("Failed to set thread_id in _pthread_body\n");
891 _pthread_exit(self
, (self
->fun
)(self
->arg
));
895 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
900 pthread_attr_t
*attrs
= &_pthread_attr_default
;
903 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
904 stackaddr
= (char *)self
;
905 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
906 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
907 _pthread_set_self(self
);
909 LOCK(_pthread_list_lock
);
910 if (pflags
& PTHREAD_START_SETSCHED
) {
911 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
912 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
914 /* These are not joinable threads */
915 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
916 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
917 self
->detached
|= PTHREAD_CREATE_DETACHED
;
920 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
921 _pthread_set_self(self
);
923 LOCK(_pthread_list_lock
);
925 self
->kernel_thread
= kport
;
929 /* Add to the pthread list */
930 if (self
->parentcheck
== 0) {
931 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
933 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
938 UNLOCK(_pthread_list_lock
);
940 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
941 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
942 printf("Failed to set thread_id in pthread_start\n");
946 pself
= pthread_self();
948 LIBC_ABORT("self %p != pself %p", self
, pself
);
951 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
954 _pthread_exit(self
, (self
->fun
)(self
->arg
));
958 _pthread_create(pthread_t t
,
959 const pthread_attr_t
*attrs
,
961 const mach_port_t kernel_thread
)
968 memset(t
, 0, sizeof(*t
));
977 t
->stacksize
= attrs
->stacksize
;
978 t
->stackaddr
= (void *)stack
;
979 t
->guardsize
= attrs
->guardsize
;
980 t
->kernel_thread
= kernel_thread
;
981 t
->detached
= attrs
->detached
;
982 t
->inherit
= attrs
->inherit
;
983 t
->policy
= attrs
->policy
;
984 t
->param
= attrs
->param
;
985 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
987 t
->sig
= _PTHREAD_SIG
;
988 t
->reply_port
= MACH_PORT_NULL
;
989 t
->cthread_self
= NULL
;
991 t
->plist
.tqe_next
= (struct _pthread
*)0;
992 t
->plist
.tqe_prev
= (struct _pthread
**)0;
993 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
994 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
995 t
->death
= SEMAPHORE_NULL
;
997 if (kernel_thread
!= MACH_PORT_NULL
)
998 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
1004 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
1006 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)(uintptr_t)stack
;
1009 memset(t
, 0, sizeof(*t
));
1010 t
->plist
.tqe_next
= (struct _pthread
*)0;
1011 t
->plist
.tqe_prev
= (struct _pthread
**)0;
1013 t
->schedset
= attrs
->schedset
;
1015 if (kernalloc
!= 0) {
1016 stackaddr
= (mach_vm_offset_t
)(uintptr_t)t
;
1018 /* if allocated from kernel set values appropriately */
1019 t
->stacksize
= stacksize
;
1020 t
->stackaddr
= (void *)(uintptr_t)stackaddr
;
1021 t
->freeStackOnExit
= 1;
1022 t
->freeaddr
= (void *)(uintptr_t)(stackaddr
- stacksize
- vm_page_size
);
1023 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
1025 t
->stacksize
= attrs
->stacksize
;
1026 t
->stackaddr
= (void *)stack
;
1028 t
->guardsize
= attrs
->guardsize
;
1029 t
->detached
= attrs
->detached
;
1030 t
->inherit
= attrs
->inherit
;
1031 t
->policy
= attrs
->policy
;
1032 t
->param
= attrs
->param
;
1033 t
->cancel_error
= 0;
1034 t
->sig
= _PTHREAD_SIG
;
1035 t
->reply_port
= MACH_PORT_NULL
;
1036 t
->cthread_self
= NULL
;
1038 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
1039 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
1040 t
->death
= SEMAPHORE_NULL
;
1042 t
->kernalloc
= kernalloc
;
1045 t
->cur_workitem
= 0;
1049 /* Need to deprecate this in future */
1051 _pthread_is_threaded(void)
1053 return __is_threaded
;
1056 /* Non portable public api to know whether this process has(had) atleast one thread
1057 * apart from main thread. There could be race if there is a thread in the process of
1058 * creation at the time of call . It does not tell whether there are more than one thread
1059 * at this point of time.
1062 pthread_is_threaded_np(void)
1064 return (__is_threaded
);
1068 pthread_mach_thread_np(pthread_t t
)
1070 mach_port_t kport
= MACH_PORT_NULL
;
1076 * If the call is on self, return the kernel port. We cannot
1077 * add this bypass for main thread as it might have exited,
1078 * and we should not return stale port info.
1080 if (t
== pthread_self())
1082 kport
= t
->kernel_thread
;
1086 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1087 return((mach_port_t
)0);
1093 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
1095 struct _pthread
* p
= NULL
;
1097 /* No need to wait as mach port is already known */
1098 LOCK(_pthread_list_lock
);
1099 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1100 if (p
->kernel_thread
== kernel_thread
)
1103 UNLOCK(_pthread_list_lock
);
1108 pthread_get_stacksize_np(pthread_t t
)
1116 if ( t
== pthread_self() || t
== &_thread
) //since the main thread will not get de-allocated from underneath us
1123 LOCK(_pthread_list_lock
);
1125 if ((ret
= _pthread_find_thread(t
)) != 0) {
1126 UNLOCK(_pthread_list_lock
);
1131 UNLOCK(_pthread_list_lock
);
1137 pthread_get_stackaddr_np(pthread_t t
)
1143 return((void *)(uintptr_t)ESRCH
);
1145 if(t
== pthread_self() || t
== &_thread
) //since the main thread will not get deallocated from underneath us
1146 return t
->stackaddr
;
1148 LOCK(_pthread_list_lock
);
1150 if ((ret
= _pthread_find_thread(t
)) != 0) {
1151 UNLOCK(_pthread_list_lock
);
1152 return((void *)(uintptr_t)ret
);
1154 addr
= t
->stackaddr
;
1155 UNLOCK(_pthread_list_lock
);
1161 _pthread_reply_port(pthread_t t
)
1163 return t
->reply_port
;
1167 /* returns non-zero if the current thread is the main thread */
1169 pthread_main_np(void)
1171 pthread_t self
= pthread_self();
1173 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1177 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1178 /* if we are passed in a pthread_t that is NULL, then we return
1179 the current thread's thread_id. So folks don't have to call
1180 pthread_self, in addition to us doing it, if they just want
1184 pthread_threadid_np(pthread_t thread
, __uint64_t
*thread_id
)
1187 pthread_t self
= pthread_self();
1189 if (thread_id
== NULL
) {
1191 } else if (thread
== NULL
|| thread
== self
) {
1192 *thread_id
= self
->thread_id
;
1196 LOCK(_pthread_list_lock
);
1197 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1198 UNLOCK(_pthread_list_lock
);
1201 *thread_id
= thread
->thread_id
;
1202 UNLOCK(_pthread_list_lock
);
1208 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1216 LOCK(_pthread_list_lock
);
1217 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1218 UNLOCK(_pthread_list_lock
);
1221 strlcpy(threadname
, thread
->pthread_name
, len
);
1222 UNLOCK(_pthread_list_lock
);
1227 pthread_setname_np(const char *threadname
)
1231 pthread_t current_thread
= pthread_self();
1233 if (threadname
!= NULL
)
1234 len
= strlen(threadname
);
1236 /* protytype is in pthread_internals.h */
1237 rval
= proc_setthreadname((void *)threadname
, len
);
1239 if (threadname
!= NULL
) {
1240 strlcpy(current_thread
->pthread_name
, threadname
, MAXTHREADNAMESIZE
);
1242 memset(current_thread
->pthread_name
, 0 , MAXTHREADNAMESIZE
);
1251 _new_pthread_create_suspended(pthread_t
*thread
,
1252 const pthread_attr_t
*attr
,
1253 void *(*start_routine
)(void *),
1257 pthread_attr_t
*attrs
;
1262 kern_return_t kern_res
;
1263 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1265 task_t self
= mach_task_self();
1267 int susp
= create_susp
;
1269 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1270 { /* Set up default paramters */
1271 attrs
= &_pthread_attr_default
;
1272 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1277 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1278 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1284 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1285 * any change in priority or policy is needed here.
1287 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1288 /* Rosetta or pthread_create_suspended() */
1289 /* running under rosetta */
1290 /* Allocate a stack for the thread */
1292 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1294 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1297 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1300 /* Create the Mach thread for this thread */
1301 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1302 if (kern_res
!= KERN_SUCCESS
)
1304 printf("Can't create thread: %d\n", kern_res
);
1308 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1312 set_malloc_singlethreaded(0);
1315 /* Send it on it's way */
1317 t
->fun
= start_routine
;
1319 /* Now set it up to execute */
1320 LOCK(_pthread_list_lock
);
1321 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1323 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1326 UNLOCK(_pthread_list_lock
);
1327 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1332 if (attrs
->fastpath
== 1)
1335 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1336 flags
|= PTHREAD_START_DETACHED
;
1337 if (attrs
->schedset
!= 0) {
1338 flags
|= PTHREAD_START_SETSCHED
;
1339 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1340 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1343 set_malloc_singlethreaded(0);
1346 if (kernalloc
== 0) {
1347 /* Allocate a stack for the thread */
1348 flags
|= PTHREAD_START_CUSTOM
;
1349 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1352 /* Send it on it's way */
1354 t
->fun
= start_routine
;
1358 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1361 if ((t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == (pthread_t
)-1) {
1362 _pthread_free_pthread_onstack(t
, 1, 0);
1366 LOCK(_pthread_list_lock
);
1368 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1369 /* detached child exited, mop up */
1370 UNLOCK(_pthread_list_lock
);
1372 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1374 if(t
->freeStackOnExit
)
1375 vm_deallocate(self
, (mach_vm_address_t
)(uintptr_t)t
, pthreadsize
);
1378 } else if (t
->childrun
== 0) {
1379 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1382 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1384 UNLOCK(_pthread_list_lock
);
1386 UNLOCK(_pthread_list_lock
);
1391 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1396 /* kernel allocation */
1398 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1400 if ((t
= __bsdthread_create(start_routine
, arg
, (void *)attrs
->stacksize
, NULL
, flags
)) == (pthread_t
)-1)
1402 /* Now set it up to execute */
1403 LOCK(_pthread_list_lock
);
1405 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1406 /* detached child exited, mop up */
1407 UNLOCK(_pthread_list_lock
);
1409 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1411 vm_deallocate(self
, (mach_vm_address_t
)(uintptr_t)t
, pthreadsize
);
1412 } else if (t
->childrun
== 0) {
1413 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1416 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1418 UNLOCK(_pthread_list_lock
);
1420 UNLOCK(_pthread_list_lock
);
1425 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1433 _pthread_create_suspended(pthread_t
*thread
,
1434 const pthread_attr_t
*attr
,
1435 void *(*start_routine
)(void *),
1439 pthread_attr_t
*attrs
;
1443 kern_return_t kern_res
;
1444 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1447 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1448 { /* Set up default paramters */
1449 attrs
= &_pthread_attr_default
;
1450 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1455 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1456 * any change in priority or policy is needed here.
1458 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1459 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1467 /* Allocate a stack for the thread */
1468 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1471 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1474 /* Create the Mach thread for this thread */
1475 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1476 if (kern_res
!= KERN_SUCCESS
)
1478 printf("Can't create thread: %d\n", kern_res
);
1479 res
= EINVAL
; /* Need better error here? */
1483 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1487 set_malloc_singlethreaded(0);
1490 /* Send it on it's way */
1492 t
->fun
= start_routine
;
1493 /* Now set it up to execute */
1494 LOCK(_pthread_list_lock
);
1495 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1497 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1500 UNLOCK(_pthread_list_lock
);
1501 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1507 pthread_create(pthread_t
*thread
,
1508 const pthread_attr_t
*attr
,
1509 void *(*start_routine
)(void *),
1512 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1516 pthread_create_suspended_np(pthread_t
*thread
,
1517 const pthread_attr_t
*attr
,
1518 void *(*start_routine
)(void *),
1521 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1525 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1528 pthread_detach(pthread_t thread
)
1533 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0) {
1534 return (ret
); /* Not a valid thread */
1538 newstyle
= thread
->newstyle
;
1539 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1541 if (thread
->detached
& _PTHREAD_EXITED
) {
1542 UNLOCK(thread
->lock
);
1543 pthread_join(thread
, NULL
);
1546 if (newstyle
== 0) {
1547 semaphore_t death
= thread
->death
;
1549 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1550 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1551 UNLOCK(thread
->lock
);
1553 (void) semaphore_signal(death
);
1555 mach_port_t joinport
= thread
->joiner_notify
;
1557 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1558 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1560 UNLOCK(thread
->lock
);
1562 semaphore_signal(joinport
);
1568 UNLOCK(thread
->lock
);
1575 * pthread_kill call to system call
1583 mach_port_t kport
= MACH_PORT_NULL
;
1585 if ((sig
< 0) || (sig
> NSIG
))
1588 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1589 return (ESRCH
); /* Not a valid thread */
1591 /* if the thread is a workqueue thread, just return error */
1592 if ((th
->wqthread
!= 0) && (th
->wqkillset
==0)) {
1596 error
= __pthread_kill(kport
, sig
);
1604 __pthread_workqueue_setkill(int enable
)
1606 pthread_t self
= pthread_self();
1610 self
->wqkillset
= 0;
1612 self
->wqkillset
= 1;
1619 /* Announce that there are pthread resources ready to be reclaimed in a */
1620 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1621 /* thread underneath is terminated right away. */
1623 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1624 pthread_reap_msg_t msg
;
1627 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1628 MACH_MSG_TYPE_MOVE_SEND
);
1629 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1630 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1631 msg
.header
.msgh_local_port
= kernel_thread
;
1632 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1633 msg
.thread
= thread
;
1634 ret
= mach_msg_send(&msg
.header
);
1635 assert(ret
== MACH_MSG_SUCCESS
);
1638 /* Reap the resources for available threads */
1640 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1641 mach_port_type_t ptype
;
1645 self
= mach_task_self();
1646 if (kernel_thread
!= MACH_PORT_DEAD
) {
1647 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1648 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1649 /* not quite dead yet... */
1652 ret
= mach_port_deallocate(self
, kernel_thread
);
1653 if (ret
!= KERN_SUCCESS
) {
1655 "mach_port_deallocate(kernel_thread) failed: %s\n",
1656 mach_error_string(ret
));
1660 if (th
->reply_port
!= MACH_PORT_NULL
) {
1661 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1662 MACH_PORT_RIGHT_RECEIVE
, -1);
1663 if (ret
!= KERN_SUCCESS
) {
1665 "mach_port_mod_refs(reply_port) failed: %s\n",
1666 mach_error_string(ret
));
1670 if (th
->freeStackOnExit
) {
1671 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1674 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1677 ret
= vm_deallocate(self
, addr
, size
);
1678 if (ret
!= KERN_SUCCESS
) {
1680 "vm_deallocate(stack) failed: %s\n",
1681 mach_error_string(ret
));
1687 *value_ptr
= th
->exit_value
;
1689 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1690 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
))
1691 *value_ptr
= PTHREAD_CANCELED
;
1692 th
->sig
= _PTHREAD_NO_SIG
;
1703 void _pthread_reap_threads(void)
1705 pthread_reap_msg_t msg
;
1708 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1709 sizeof msg
, thread_recycle_port
,
1710 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1711 while (ret
== MACH_MSG_SUCCESS
) {
1712 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1713 pthread_t thread
= msg
.thread
;
1715 /* deal with race with thread_create_running() */
1716 if (kernel_thread
== MACH_PORT_NULL
&&
1717 kernel_thread
!= thread
->kernel_thread
) {
1718 kernel_thread
= thread
->kernel_thread
;
1721 if ( kernel_thread
== MACH_PORT_NULL
||
1722 _pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1724 /* not dead yet, put it back for someone else to reap, stop here */
1725 _pthread_become_available(thread
, kernel_thread
);
1729 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1730 sizeof msg
, thread_recycle_port
,
1731 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1735 /* For compatibility... */
1739 return pthread_self();
1743 * Terminate a thread.
1745 int __disable_threadsignal(int);
1748 _pthread_exit(pthread_t self
, void *value_ptr
)
1750 struct __darwin_pthread_handler_rec
*handler
;
1751 kern_return_t kern_res
;
1753 int newstyle
= self
->newstyle
;
1755 /* Make this thread not to receive any signals */
1756 __disable_threadsignal(1);
1759 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1762 /* set cancel state to disable and type to deferred */
1763 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1765 while ((handler
= self
->__cleanup_stack
) != 0)
1767 (handler
->__routine
)(handler
->__arg
);
1768 self
->__cleanup_stack
= handler
->__next
;
1770 _pthread_tsd_cleanup(self
);
1772 if (newstyle
== 0) {
1773 _pthread_reap_threads();
1776 self
->detached
|= _PTHREAD_EXITED
;
1778 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1779 mach_port_t death
= self
->death
;
1780 self
->exit_value
= value_ptr
;
1782 /* the joiner will need a kernel thread reference, leave ours for it */
1784 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1785 if (kern_res
!= KERN_SUCCESS
)
1787 "semaphore_signal(death) failed: %s\n",
1788 mach_error_string(kern_res
));
1790 LOCK(_pthread_list_lock
);
1791 thread_count
= --_pthread_count
;
1792 UNLOCK(_pthread_list_lock
);
1795 LOCK(_pthread_list_lock
);
1796 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1798 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1800 thread_count
= --_pthread_count
;
1801 UNLOCK(_pthread_list_lock
);
1802 /* with no joiner, we let become available consume our cached ref */
1803 _pthread_become_available(self
, self
->kernel_thread
);
1806 if (thread_count
<= 0)
1809 /* Use a new reference to terminate ourselves. Should never return. */
1810 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1811 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1812 mach_error_string(kern_res
));
1814 semaphore_t joinsem
= SEMAPHORE_NULL
;
1816 if ((self
->joiner_notify
== (mach_port_t
)0) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1817 joinsem
= new_sem_from_pool();
1819 self
->detached
|= _PTHREAD_EXITED
;
1821 self
->exit_value
= value_ptr
;
1822 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1823 if (self
->joiner_notify
== (mach_port_t
)0) {
1824 self
->joiner_notify
= joinsem
;
1825 joinsem
= SEMAPHORE_NULL
;
1828 if (joinsem
!= SEMAPHORE_NULL
)
1829 restore_sem_to_pool(joinsem
);
1830 _pthread_free_pthread_onstack(self
, 0, 1);
1833 /* with no joiner, we let become available consume our cached ref */
1834 if (joinsem
!= SEMAPHORE_NULL
)
1835 restore_sem_to_pool(joinsem
);
1836 _pthread_free_pthread_onstack(self
, 1, 1);
1839 LIBC_ABORT("thread %p didn't exit", self
);
1843 pthread_exit(void *value_ptr
)
1845 pthread_t self
= pthread_self();
1846 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1847 if (self
->wqthread
== 0) {
1848 _pthread_exit(self
, value_ptr
);
1850 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1855 * Get the scheduling policy and scheduling paramters for a thread.
1858 pthread_getschedparam(pthread_t thread
,
1860 struct sched_param
*param
)
1867 LOCK(_pthread_list_lock
);
1869 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1870 UNLOCK(_pthread_list_lock
);
1874 *policy
= thread
->policy
;
1876 *param
= thread
->param
;
1877 UNLOCK(_pthread_list_lock
);
1883 * Set the scheduling policy and scheduling paramters for a thread.
1886 pthread_setschedparam_internal(pthread_t thread
,
1889 const struct sched_param
*param
)
1891 policy_base_data_t bases
;
1893 mach_msg_type_number_t count
;
1899 bases
.ts
.base_priority
= param
->sched_priority
;
1900 base
= (policy_base_t
)&bases
.ts
;
1901 count
= POLICY_TIMESHARE_BASE_COUNT
;
1904 bases
.fifo
.base_priority
= param
->sched_priority
;
1905 base
= (policy_base_t
)&bases
.fifo
;
1906 count
= POLICY_FIFO_BASE_COUNT
;
1909 bases
.rr
.base_priority
= param
->sched_priority
;
1910 /* quantum isn't public yet */
1911 bases
.rr
.quantum
= param
->quantum
;
1912 base
= (policy_base_t
)&bases
.rr
;
1913 count
= POLICY_RR_BASE_COUNT
;
1918 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1919 if (ret
!= KERN_SUCCESS
)
1925 pthread_setschedparam(pthread_t t
,
1927 const struct sched_param
*param
)
1929 mach_port_t kport
= MACH_PORT_NULL
;
1933 if (t
!= pthread_self() && t
!= &_thread
) { //since the main thread will not get de-allocated from underneath us
1935 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1938 kport
= t
->kernel_thread
;
1940 error
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1943 /* ensure the thread is still valid */
1944 LOCK(_pthread_list_lock
);
1945 if ((error
= _pthread_find_thread(t
)) != 0) {
1946 UNLOCK(_pthread_list_lock
);
1951 UNLOCK(_pthread_list_lock
);
1961 * Get the minimum priority for the given policy
1964 sched_get_priority_min(int policy
)
1966 return default_priority
- 16;
1970 * Get the maximum priority for the given policy
1973 sched_get_priority_max(int policy
)
1975 return default_priority
+ 16;
1979 * Determine if two thread identifiers represent the same thread.
1982 pthread_equal(pthread_t t1
,
1988 // Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1989 // then _pthread_set_self won't be bound when secondary threads try and start up.
1990 void __attribute__((noinline
))
1991 _pthread_set_self(pthread_t p
)
1993 extern void __pthread_set_self(void *);
1996 if (_thread
.tsd
[0] != 0) {
1997 bzero(&_thread
, sizeof(struct _pthread
));
2002 __pthread_set_self(&p
->tsd
[0]);
2006 cthread_set_self(void *cself
)
2008 pthread_t self
= pthread_self();
2009 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
2010 _pthread_set_self(cself
);
2013 self
->cthread_self
= cself
;
2017 ur_cthread_self(void) {
2018 pthread_t self
= pthread_self();
2019 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
2020 return (void *)self
;
2022 return self
->cthread_self
;
2026 * cancellation handler for pthread once as the init routine can have a
2027 * cancellation point. In that case we need to restore the spin unlock
2030 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
2032 _spin_unlock(&once_control
->lock
);
2037 * Execute a function exactly one time in a thread-safe fashion.
2040 pthread_once(pthread_once_t
*once_control
,
2041 void (*init_routine
)(void))
2043 _spin_lock(&once_control
->lock
);
2044 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
2046 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler
, once_control
);
2048 pthread_cleanup_pop(0);
2049 once_control
->sig
= _PTHREAD_ONCE_SIG
;
2051 _spin_unlock(&once_control
->lock
);
2052 return (0); /* Spec defines no possible errors! */
2056 * Insert a cancellation point in a thread.
2058 __private_extern__
void
2059 _pthread_testcancel(pthread_t thread
, int isconforming
)
2062 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2063 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
2065 UNLOCK(thread
->lock
);
2067 pthread_exit(PTHREAD_CANCELED
);
2071 UNLOCK(thread
->lock
);
2077 pthread_getconcurrency(void)
2079 return(pthread_concurrency
);
2083 pthread_setconcurrency(int new_level
)
2087 pthread_concurrency
= new_level
;
2092 * Perform package initialization - called automatically when application starts
2097 pthread_attr_t
*attrs
;
2100 host_priority_info_data_t priority_info
;
2102 host_flavor_t flavor
;
2104 mach_msg_type_number_t count
;
2110 pthreadsize
= round_page(sizeof (struct _pthread
));
2111 count
= HOST_PRIORITY_INFO_COUNT
;
2112 info
= (host_info_t
)&priority_info
;
2113 flavor
= HOST_PRIORITY_INFO
;
2114 host
= mach_host_self();
2115 kr
= host_info(host
, flavor
, info
, &count
);
2116 if (kr
!= KERN_SUCCESS
)
2117 printf("host_info failed (%d); probably need privilege.\n", kr
);
2119 default_priority
= priority_info
.user_priority
;
2120 min_priority
= priority_info
.minimum_priority
;
2121 max_priority
= priority_info
.maximum_priority
;
2123 attrs
= &_pthread_attr_default
;
2124 pthread_attr_init(attrs
);
2126 TAILQ_INIT(&__pthread_head
);
2127 LOCK_INIT(_pthread_list_lock
);
2129 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
2130 _pthread_set_self(thread
);
2132 __kdebug_trace(0x900000c, thread
, 0, 0, 10, 0);
2135 /* In case of dyld reset the tsd keys from 1 - 10 */
2136 _pthread_keys_init();
2139 mib
[1] = KERN_USRSTACK
;
2140 len
= sizeof (stackaddr
);
2141 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
2142 stackaddr
= (void *)USRSTACK
;
2143 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
2144 thread
->stacksize
= DFLSSIZ
; //initialize main thread's stacksize based on vmparam.h
2145 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
2147 _init_cpu_capabilities();
2148 if ((ncpus
= _NumCPUs()) > 1)
2149 _spin_tries
= MP_SPIN_TRIES
;
2151 workq_targetconc
[WORKQ_HIGH_PRIOQUEUE
] = ncpus
;
2152 workq_targetconc
[WORKQ_DEFAULT_PRIOQUEUE
] = ncpus
;
2153 workq_targetconc
[WORKQ_LOW_PRIOQUEUE
] = ncpus
;
2154 workq_targetconc
[WORKQ_BG_PRIOQUEUE
] = ncpus
;
2156 mach_port_deallocate(mach_task_self(), host
);
2158 #if defined(__ppc__)
2164 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2166 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
2167 kr
= vm_map(mach_task_self(),
2168 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
2169 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
2171 (vm_address_t
)0, FALSE
,
2172 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
2173 VM_INHERIT_DEFAULT
);
2174 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2177 //added so that thread_recycle_port is initialized on new launch.
2178 _pthread_fork_child_postinit();
2179 mig_init(1); /* enable multi-threaded mig interfaces */
2180 if (__oldstyle
== 0) {
2181 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2182 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (uintptr_t)(&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (uintptr_t)(&thread
->tsd
[0]));
2184 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)), NULL
, &workq_targetconc
[0], (uintptr_t)&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
] - (uintptr_t)thread
);
2188 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2189 if( (thread
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2190 printf("Failed to set thread_id in pthread_init\n");
2195 int sched_yield(void)
2201 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2203 * (These has been moved to setenv.c, so we can use it to fix a less than 10.5
2205 * static int _do_nothing(void) { return 0; }
2206 * int (*_cthread_init_routine)(void) = _do_nothing;
2209 /* Get a semaphore from the pool, growing it if necessary */
2211 __private_extern__ semaphore_t
new_sem_from_pool(void) {
2216 LOCK(sem_pool_lock
);
2217 if (sem_pool_current
== sem_pool_count
) {
2218 sem_pool_count
+= 16;
2219 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
2220 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
2221 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
2224 sem
= sem_pool
[sem_pool_current
++];
2225 UNLOCK(sem_pool_lock
);
2229 /* Put a semaphore back into the pool */
2230 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
2231 LOCK(sem_pool_lock
);
2232 sem_pool
[--sem_pool_current
] = sem
;
2233 UNLOCK(sem_pool_lock
);
2236 static void sem_pool_reset(void) {
2237 LOCK(sem_pool_lock
);
2239 sem_pool_current
= 0;
2241 UNLOCK(sem_pool_lock
);
2244 __private_extern__
void _pthread_fork_child(pthread_t p
) {
2245 /* Just in case somebody had it locked... */
2246 UNLOCK(sem_pool_lock
);
2248 /* No need to hold the pthread_list_lock as no one other than this
2249 * thread is present at this time
2251 TAILQ_INIT(&__pthread_head
);
2252 LOCK_INIT(_pthread_list_lock
);
2253 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
2255 __kdebug_trace(0x900000c, p
, 0, 0, 10, 0);
2258 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2259 if( (p
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2260 printf("Failed to set thread_id in pthread_fork_child\n");
2264 void _pthread_fork_child_postinit() {
2267 kr
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &thread_recycle_port
);
2268 if (kr
!= KERN_SUCCESS
) {
2274 * Query/update the cancelability 'state' of a thread
2277 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
2279 pthread_t self
= pthread_self();
2283 case PTHREAD_CANCEL_ENABLE
:
2285 __pthread_canceled(1);
2287 case PTHREAD_CANCEL_DISABLE
:
2289 __pthread_canceled(2);
2295 self
= pthread_self();
2298 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
2299 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
2300 self
->cancel_state
|= state
;
2303 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2307 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2309 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2312 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2313 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2314 if ((value_ptr
== PTHREAD_CANCELED
)) {
2316 self
->detached
|= _PTHREAD_WASCANCEL
;
2323 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2329 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2331 /* The scenario where the joiner was waiting for the thread and
2332 * the pthread detach happened on that thread. Then the semaphore
2333 * will trigger but by the time joiner runs, the target thread could be
2334 * freed. So we need to make sure that the thread is still in the list
2335 * and is joinable before we continue with the join.
2337 LOCK(_pthread_list_lock
);
2338 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2339 UNLOCK(_pthread_list_lock
);
2343 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2344 /* the thread might be a detached thread */
2345 UNLOCK(_pthread_list_lock
);
2349 /* It is still a joinable thread and needs to be reaped */
2350 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2352 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2354 UNLOCK(_pthread_list_lock
);
2357 *value_ptr
= thread
->exit_value
;
2359 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2360 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
)) {
2361 *value_ptr
= PTHREAD_CANCELED
;
2364 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2365 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2366 if (res
!= KERN_SUCCESS
)
2367 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2368 thread
->reply_port
= MACH_PORT_NULL
;
2370 if (thread
->freeStackOnExit
) {
2371 thread
->sig
= _PTHREAD_NO_SIG
;
2373 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2375 vm_deallocate(mach_task_self(), (mach_vm_address_t
)(uintptr_t)thread
, pthreadsize
);
2377 thread
->sig
= _PTHREAD_NO_SIG
;
2379 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2386 /* ALWAYS called with list lock and return with list lock */
2388 _pthread_find_thread(pthread_t thread
)
2393 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2395 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2396 UNLOCK(_pthread_list_lock
);
2398 LOCK(_pthread_list_lock
);
2408 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2416 LOCK(_pthread_list_lock
);
2418 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2419 UNLOCK(_pthread_list_lock
);
2422 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2423 UNLOCK(_pthread_list_lock
);
2426 kport
= thread
->kernel_thread
;
2427 UNLOCK(_pthread_list_lock
);
2433 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2435 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2437 attrp
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2438 attrp
->sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2439 attrp
->overcommit
= 0;
2444 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2446 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
)
2451 return (EINVAL
); /* Not an attribute structure! */
2456 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2458 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2459 *qpriop
= attr
->queueprio
;
2462 return (EINVAL
); /* Not an attribute structure! */
2468 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2472 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2474 case WORKQ_HIGH_PRIOQUEUE
:
2475 case WORKQ_DEFAULT_PRIOQUEUE
:
2476 case WORKQ_LOW_PRIOQUEUE
:
2477 case WORKQ_BG_PRIOQUEUE
:
2478 attr
->queueprio
= qprio
;
2491 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t
* attr
, int * ocommp
)
2493 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2494 *ocommp
= attr
->overcommit
;
2497 return (EINVAL
); /* Not an attribute structure! */
2503 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t
* attr
, int ocomm
)
2507 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2508 attr
->overcommit
= ocomm
;
2514 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2517 workqueue_list_lock()
2519 OSSpinLockLock(&__workqueue_list_lock
);
2523 workqueue_list_unlock()
2525 OSSpinLockUnlock(&__workqueue_list_lock
);
2529 pthread_workqueue_init_np()
2533 if (__workqueue_newspis
!= 0)
2535 __workqueue_oldspis
= 1;
2537 workqueue_list_lock();
2538 ret
=_pthread_work_internal_init();
2539 workqueue_list_unlock();
2545 pthread_workqueue_requestconcurrency_np(int queue
, int request_concurrency
)
2549 if (__workqueue_newspis
!= 0)
2552 if (queue
< 0 || queue
> WORKQ_NUM_PRIOQUEUE
)
2555 error
=__workq_kernreturn(WQOPS_THREAD_SETCONC
, NULL
, request_concurrency
, queue
);
2563 pthread_workqueue_atfork_prepare(void)
2566 * NOTE: Any workq additions here
2567 * should be for i386,x86_64 only
2569 dispatch_atfork_prepare();
2573 pthread_workqueue_atfork_parent(void)
2576 * NOTE: Any workq additions here
2577 * should be for i386,x86_64 only
2579 dispatch_atfork_parent();
2583 pthread_workqueue_atfork_child(void)
2585 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2586 pthread_t self
= pthread_self();
2588 __workqueue_list_lock
= OS_SPINLOCK_INIT
;
2590 /* already using new spis? */
2591 if (__workqueue_newspis
!= 0) {
2592 /* prepare the kernel for workq action */
2593 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2594 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (uintptr_t)(&self
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (uintptr_t)(&self
->tsd
[0]));
2596 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
,0);
2598 (void)__workq_open();
2599 kernel_workq_setup
= 1;
2603 /* not using old spis either? */
2604 if (__workqueue_oldspis
== 0)
2608 * NOTE: workq additions here
2609 * are for i386,x86_64 only as
2610 * ppc and arm do not support it
2612 if (kernel_workq_setup
!= 0){
2613 kernel_workq_setup
= 0;
2614 _pthread_work_internal_init();
2617 dispatch_atfork_child();
2621 _pthread_work_internal_init(void)
2624 pthread_workqueue_head_t headp
;
2625 pthread_workqueue_t wq
;
2626 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2627 pthread_t self
= pthread_self();
2630 if (kernel_workq_setup
== 0) {
2631 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2632 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (uintptr_t)(&self
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (uintptr_t)(&self
->tsd
[0]));
2634 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
,0);
2637 _pthread_wq_attr_default
.queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2638 _pthread_wq_attr_default
.sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2640 for( i
= 0; i
< WORKQ_NUM_PRIOQUEUE
; i
++) {
2641 headp
= __pthread_wq_head_tbl
[i
];
2642 TAILQ_INIT(&headp
->wqhead
);
2643 headp
->next_workq
= 0;
2646 __workqueue_pool_ptr
= NULL
;
2647 __workqueue_pool_size
= round_page(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2649 __workqueue_pool_ptr
= (struct _pthread_workitem
*)mmap(NULL
, __workqueue_pool_size
,
2650 PROT_READ
|PROT_WRITE
,
2651 MAP_ANON
| MAP_PRIVATE
,
2655 if (__workqueue_pool_ptr
== MAP_FAILED
) {
2656 /* Not expected to fail, if it does, always malloc for work items */
2657 __workqueue_nitems
= WORKITEM_POOL_SIZE
;
2658 __workqueue_pool_ptr
= NULL
;
2660 __workqueue_nitems
= 0;
2662 /* sets up the workitem pool */
2665 /* since the size is less than a page, leaving this in malloc pool */
2666 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2667 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2668 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2669 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2672 if (error
= __workq_open()) {
2673 TAILQ_INIT(&__pthread_workitem_pool_head
);
2674 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2675 if (__workqueue_pool_ptr
!= NULL
) {
2676 munmap((void *)__workqueue_pool_ptr
, __workqueue_pool_size
);
2681 kernel_workq_setup
= 1;
2687 /* This routine is called with list lock held */
2688 static pthread_workitem_t
2689 alloc_workitem(void)
2691 pthread_workitem_t witem
;
2693 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2694 /* the chunk size is set so some multiple of it is pool size */
2695 if (__workqueue_nitems
< WORKITEM_POOL_SIZE
) {
2698 workqueue_list_unlock();
2699 witem
= malloc(sizeof(struct _pthread_workitem
));
2700 workqueue_list_lock();
2701 witem
->fromcache
= 0;
2705 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2706 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2707 witem
->fromcache
= 1;
2710 witem
->item_entry
.tqe_next
= 0;
2711 witem
->item_entry
.tqe_prev
= 0;
2712 user_workitem_count
++;
2716 /* This routine is called with list lock held */
2718 free_workitem(pthread_workitem_t witem
)
2720 user_workitem_count
--;
2722 if (witem
->fromcache
!= 0)
2723 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2731 pthread_workitem_t witemp
;
2734 witemp
= &__workqueue_pool_ptr
[__workqueue_nitems
];
2735 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_CHUNK_SIZE
));
2736 for (i
= 0; i
< WORKITEM_CHUNK_SIZE
; i
++) {
2737 witemp
[i
].fromcache
= 1;
2738 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2740 __workqueue_nitems
+= WORKITEM_CHUNK_SIZE
;
2743 /* This routine is called with list lock held */
2744 static pthread_workqueue_t
2745 alloc_workqueue(void)
2747 pthread_workqueue_t wq
;
2749 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2750 workqueue_list_unlock();
2751 wq
= malloc(sizeof(struct _pthread_workqueue
));
2752 workqueue_list_lock();
2754 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2755 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2761 /* This routine is called with list lock held */
2763 free_workqueue(pthread_workqueue_t wq
)
2766 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2770 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2772 bzero(wq
, sizeof(struct _pthread_workqueue
));
2774 wq
->queueprio
= attr
->queueprio
;
2775 wq
->overcommit
= attr
->overcommit
;
2777 wq
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2780 LOCK_INIT(wq
->lock
);
2782 TAILQ_INIT(&wq
->item_listhead
);
2783 TAILQ_INIT(&wq
->item_kernhead
);
2785 __kdebug_trace(0x90080ac, wq
, &wq
->item_listhead
, wq
->item_listhead
.tqh_first
, wq
->item_listhead
.tqh_last
, 0);
2787 wq
->wq_list
.tqe_next
= 0;
2788 wq
->wq_list
.tqe_prev
= 0;
2789 wq
->sig
= PTHREAD_WORKQUEUE_SIG
;
2790 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2794 valid_workq(pthread_workqueue_t workq
)
2796 if (workq
->sig
== PTHREAD_WORKQUEUE_SIG
)
2803 /* called with list lock */
2805 pick_nextworkqueue_droplock()
2807 int i
, curwqprio
, val
, found
;
2808 pthread_workqueue_head_t headp
;
2809 pthread_workqueue_t workq
;
2810 pthread_workqueue_t nworkq
= NULL
;
2813 __kdebug_trace(0x9008098, kernel_workq_count
, 0, 0, 0, 0);
2816 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2818 for (i
= 0; i
< WORKQ_NUM_PRIOQUEUE
; i
++) {
2819 wqreadyprio
= i
; /* because there is nothing else higher to run */
2820 headp
= __pthread_wq_head_tbl
[i
];
2822 if (TAILQ_EMPTY(&headp
->wqhead
))
2824 workq
= headp
->next_workq
;
2826 workq
= TAILQ_FIRST(&headp
->wqhead
);
2827 curwqprio
= workq
->queueprio
;
2828 nworkq
= workq
; /* starting pt */
2829 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2830 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2831 if (headp
->next_workq
== NULL
)
2832 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2834 __kdebug_trace(0x9008098, kernel_workq_count
, workq
, 0, 1, 0);
2836 val
= post_nextworkitem(workq
);
2839 /* things could have changed so reasses */
2840 /* If kernel queue is full , skip */
2841 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2843 /* If anything with higher prio arrived, then reevaluate */
2844 if (wqreadyprio
< curwqprio
)
2845 goto loop
; /* we need re evaluate again */
2846 /* we can post some more work items */
2850 /* cannot use workq here as it could be freed */
2851 if (TAILQ_EMPTY(&headp
->wqhead
))
2853 /* if we found nothing to run and only one workqueue in the list, skip */
2854 if ((val
== 0) && (workq
== headp
->next_workq
))
2856 workq
= headp
->next_workq
;
2858 workq
= TAILQ_FIRST(&headp
->wqhead
);
2861 /* if we found nothing to run and back to workq where we started */
2862 if ((val
== 0) && (workq
== nworkq
))
2865 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2868 /* nothing found to run? */
2872 workqueue_list_unlock();
2876 post_nextworkitem(pthread_workqueue_t workq
)
2879 pthread_workitem_t witem
;
2880 pthread_workqueue_head_t headp
;
2881 void (*func
)(pthread_workqueue_t
, void *);
2883 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2887 __kdebug_trace(0x900809c, workq
, workq
->item_listhead
.tqh_first
, 0, 1, 0);
2889 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2892 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
)
2895 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2896 headp
= workq
->headp
;
2898 __kdebug_trace(0x900809c, workq
, witem
, 0, 0xee, 0);
2900 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2902 __kdebug_trace(0x9000064, workq
, 0, 0, 2, 0);
2905 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2908 /* Also barrier when nothing is there needs to be handled */
2909 /* Nothing to wait for */
2910 if (workq
->kq_count
!= 0) {
2911 witem
->flags
|= PTH_WQITEM_APPLIED
;
2912 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2913 workq
->barrier_count
= workq
->kq_count
;
2915 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2920 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2922 if (witem
->func
!= NULL
) {
2923 /* since we are going to drop list lock */
2924 witem
->flags
|= PTH_WQITEM_APPLIED
;
2925 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2926 workqueue_list_unlock();
2927 func
= (void (*)(pthread_workqueue_t
, void *))witem
->func
;
2928 (*func
)(workq
, witem
->func_arg
);
2930 __kdebug_trace(0x9000064, 3, workq
->barrier_count
, 0, 0, 0);
2932 workqueue_list_lock();
2933 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2935 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2937 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2939 free_workitem(witem
);
2941 __kdebug_trace(0x9000064, 4, workq
->barrier_count
, 0, 0, 0);
2945 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2947 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2949 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2952 witem
->flags
|= PTH_WQITEM_APPLIED
;
2953 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2954 workq
->barrier_count
= workq
->kq_count
;
2955 workq
->term_callback
= (void (*)(struct _pthread_workqueue
*,void *))witem
->func
;
2956 workq
->term_callarg
= witem
->func_arg
;
2957 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2959 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2961 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2962 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2964 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2967 free_workitem(witem
);
2968 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2970 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2972 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2973 if (headp
->next_workq
== workq
) {
2974 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2975 if (headp
->next_workq
== NULL
) {
2976 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2977 if (headp
->next_workq
== workq
)
2978 headp
->next_workq
= NULL
;
2982 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2983 if (workq
->term_callback
!= NULL
) {
2984 workqueue_list_unlock();
2985 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2986 workqueue_list_lock();
2988 free_workqueue(workq
);
2991 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2993 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2997 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
3002 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
3004 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
3006 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3008 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
3009 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
3011 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
3013 OSAtomicIncrement32Barrier(&kernel_workq_count
);
3014 workqueue_list_unlock();
3016 prio
= workq
->queueprio
;
3017 if (workq
->overcommit
!= 0) {
3018 prio
|= WORKQUEUE_OVERCOMMIT
;
3021 if (( error
=__workq_kernreturn(WQOPS_QUEUE_ADD
, witem
, workq
->affinity
, prio
)) == -1) {
3022 OSAtomicDecrement32Barrier(&kernel_workq_count
);
3023 workqueue_list_lock();
3025 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
3027 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
3028 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
3030 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3032 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
3033 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
3035 workqueue_list_lock();
3037 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
3041 /* noone should come here */
3043 printf("error in logic for next workitem\n");
3044 LIBC_ABORT("error in logic for next workitem");
3050 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
3053 pthread_attr_t
*attrs
= &_pthread_attr_default
;
3054 pthread_workqueue_t workq
;
3058 int thread_reuse
= 0;
3059 int thread_priority
= 0;
3060 int thread_newspi
= 0;
3061 int thread_options
= 0;
3063 if (reuse
& WQ_FLAG_THREAD_NEWSPI
) {
3064 thread_reuse
= reuse
& WQ_FLAG_THREAD_REUSE
;
3065 if ((reuse
& WQ_FLAG_THREAD_OVERCOMMIT
) != 0)
3066 thread_options
= WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
;
3067 thread_priority
= reuse
& WQ_FLAG_THREAD_PRIOMASK
;
3071 thread_reuse
= (reuse
== 0)? 0: WQ_FLAG_THREAD_REUSE
;
3072 workq
= item
->workq
;
3076 if (thread_reuse
== 0) {
3077 /* reuse is set to 0, when a thread is newly created to run a workitem */
3078 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
3080 self
->wqkillset
= 0;
3081 self
->parentcheck
= 1;
3083 /* These are not joinable threads */
3084 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
3085 self
->detached
|= PTHREAD_CREATE_DETACHED
;
3086 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3087 _pthread_set_self(self
);
3090 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
3092 self
->kernel_thread
= kport
;
3093 if (thread_newspi
!= 0) {
3094 self
->fun
= (void *(*)(void *))__libdispatch_workerfunction
;
3095 self
->arg
= thread_priority
;
3097 self
->fun
= (void *(*)(void *))item
->func
;
3098 self
->arg
= item
->func_arg
;
3100 /* Add to the pthread list */
3101 LOCK(_pthread_list_lock
);
3102 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
3104 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
3107 UNLOCK(_pthread_list_lock
);
3109 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3110 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
3111 printf("Failed to set thread_id in pthread_wqthread\n");
3115 /* reuse is set to 1, when a thread is resued to run another work item */
3117 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
3119 /* reset all tsd from 1 to KEYS_MAX */
3121 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self
);
3123 if (thread_newspi
!= 0) {
3124 self
->fun
= (void *(*)(void *))__libdispatch_workerfunction
;
3127 self
->fun
= (void *(*)(void *))item
->func
;
3128 self
->arg
= item
->func_arg
;
3134 pself
= pthread_self();
3135 if (self
!= pself
) {
3137 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
3139 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
3140 _pthread_set_self(self
);
3141 pself
= pthread_self();
3143 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
3147 pself
= pthread_self();
3148 if (self
!= pself
) {
3149 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
3150 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself
, self
);
3153 #endif /* WQ_DEBUG */
3155 if (thread_newspi
!= 0) {
3156 (*__libdispatch_workerfunction
)(thread_priority
, thread_options
, NULL
);
3157 _pthread_workq_return(self
);
3159 self
->cur_workq
= workq
;
3160 self
->cur_workitem
= item
;
3161 OSAtomicDecrement32Barrier(&kernel_workq_count
);
3163 ret
= (int)(intptr_t)(*self
->fun
)(self
->arg
);
3164 /* If we reach here without going through the above initialization path then don't go through
3165 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
3167 if (self
!= pthread_self()) {
3168 pthread_exit(PTHREAD_CANCELED
);
3171 workqueue_exit(self
, workq
, item
);
3176 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
3178 pthread_workitem_t baritem
;
3179 pthread_workqueue_head_t headp
;
3180 void (*func
)(pthread_workqueue_t
, void *);
3182 workqueue_list_lock();
3184 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3187 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
3189 free_workitem(item
);
3191 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
3192 workq
->barrier_count
--;
3194 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
3196 if (workq
->barrier_count
<= 0 ) {
3197 /* Need to remove barrier item from the list */
3198 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
3200 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
3201 printf("Incorect bar item being removed in barrier processing\n");
3202 #endif /* WQ_DEBUG */
3203 /* if the front item is a barrier and call back is registered, run that */
3204 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
3205 workqueue_list_unlock();
3206 func
= (void (*)(pthread_workqueue_t
, void *))baritem
->func
;
3207 (*func
)(workq
, baritem
->func_arg
);
3208 workqueue_list_lock();
3210 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
3212 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3214 free_workitem(baritem
);
3215 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3217 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
3219 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
3220 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
3221 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
3223 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
3225 if (headp
->next_workq
== workq
) {
3226 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
3227 if (headp
->next_workq
== NULL
) {
3228 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3229 if (headp
->next_workq
== workq
)
3230 headp
->next_workq
= NULL
;
3233 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
3235 if (workq
->term_callback
!= NULL
) {
3236 workqueue_list_unlock();
3237 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
3238 workqueue_list_lock();
3240 free_workqueue(workq
);
3242 /* if there are higher prio schedulabel item reset to wqreadyprio */
3243 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
3244 wqreadyprio
= workq
->queueprio
;
3250 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
3253 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
3255 pick_nextworkqueue_droplock();
3256 _pthread_workq_return(self
);
3260 _pthread_workq_return(pthread_t self
)
3262 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
3264 /* This is the way to terminate the thread */
3265 _pthread_exit(self
, NULL
);
3269 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3272 pthread_workqueue_setdispatch_np(void (*worker_func
)(int, int, void *))
3276 if (__workqueue_oldspis
!= 0)
3279 __workqueue_newspis
= 1;
3281 if (__libdispatch_workerfunction
== NULL
) {
3282 __libdispatch_workerfunction
= worker_func
;
3283 /* check whether the kernel supports new SPIs */
3284 error
= __workq_kernreturn(WQOPS_QUEUE_NEWSPISUPP
, NULL
, 0, 0);
3286 __libdispatch_workerfunction
= NULL
;
3288 __workqueue_newspis
= 0;
3290 /* prepare the kernel for workq action */
3291 (void)__workq_open();
3292 kernel_workq_setup
= 1;
3293 if (__is_threaded
== 0)
3295 __workqueue_newspis
= 1;
3305 pthread_workqueue_addthreads_np(int queue_priority
, int options
, int numthreads
)
3307 int priority
= queue_priority
& WQ_FLAG_THREAD_PRIOMASK
;
3310 /* new spi not inited yet?? */
3311 if (__workqueue_newspis
== 0)
3315 if ((options
& WORKQ_ADDTHREADS_OPTION_OVERCOMMIT
) != 0)
3316 priority
|= WORKQUEUE_OVERCOMMIT
;
3318 error
= __workq_kernreturn(WQOPS_QUEUE_REQTHREADS
, NULL
, numthreads
, priority
);
3327 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
3329 pthread_workqueue_t wq
;
3330 pthread_workqueue_head_t headp
;
3332 #if defined(__ppc__)
3337 if (__workqueue_newspis
!= 0)
3340 if (__workqueue_oldspis
== 0)
3341 __workqueue_oldspis
= 1;
3343 if ((attr
!= NULL
) && (attr
->sig
!= PTHREAD_WORKQUEUE_ATTR_SIG
)) {
3347 if (__is_threaded
== 0)
3350 workqueue_list_lock();
3351 if (kernel_workq_setup
== 0) {
3352 int ret
= _pthread_work_internal_init();
3354 workqueue_list_unlock();
3359 wq
= alloc_workqueue();
3361 _pthread_workq_init(wq
, attr
);
3363 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
3364 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
3365 if (headp
->next_workq
== NULL
) {
3366 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3369 workqueue_list_unlock();
3377 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
, unsigned int *gencountp
)
3379 pthread_workitem_t witem
;
3381 if (__workqueue_newspis
!= 0)
3384 if (valid_workq(workq
) == 0) {
3388 workqueue_list_lock();
3391 * Allocate the workitem here as it can drop the lock.
3392 * Also we can evaluate the workqueue state only once.
3394 witem
= alloc_workitem();
3395 witem
->func
= workitem_func
;
3396 witem
->func_arg
= workitem_arg
;
3397 witem
->workq
= workq
;
3399 /* alloc workitem can drop the lock, check the state */
3400 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3401 free_workitem(witem
);
3402 workqueue_list_unlock();
3407 if (itemhandlep
!= NULL
)
3408 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3409 if (gencountp
!= NULL
)
3412 __kdebug_trace(0x9008090, witem
, witem
->func
, witem
->func_arg
, workq
, 0);
3414 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3416 __kdebug_trace(0x90080a4, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3419 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3420 wqreadyprio
= workq
->queueprio
;
3422 pick_nextworkqueue_droplock();
3428 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq
, unsigned int *ocommp
)
3430 if (__workqueue_newspis
!= 0)
3433 if (valid_workq(workq
) == 0) {
3438 *ocommp
= workq
->overcommit
;
3443 #else /* !BUILDING_VARIANT ] [ */
3444 extern int __unix_conforming
;
3445 extern int _pthread_count
;
3446 extern int __workqueue_newspis
;
3447 extern int __workqueue_oldspis
;
3449 extern pthread_lock_t _pthread_list_lock
;
3450 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3451 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3453 #endif /* !BUILDING_VARIANT ] */
3457 __private_extern__
void
3458 __posix_join_cleanup(void *arg
)
3460 pthread_t thread
= (pthread_t
)arg
;
3461 int already_exited
, res
;
3467 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3469 newstyle
= thread
->newstyle
;
3472 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3474 if (newstyle
== 0) {
3475 death
= thread
->death
;
3476 if (!already_exited
){
3477 thread
->joiner
= (struct _pthread
*)NULL
;
3478 UNLOCK(thread
->lock
);
3479 restore_sem_to_pool(death
);
3481 UNLOCK(thread
->lock
);
3482 while ((res
= _pthread_reap_thread(thread
,
3483 thread
->kernel_thread
,
3484 &dummy
, 1)) == EAGAIN
)
3488 restore_sem_to_pool(death
);
3493 /* leave another thread to join */
3494 thread
->joiner
= (struct _pthread
*)NULL
;
3495 UNLOCK(thread
->lock
);
3499 #endif /* __DARWIN_UNIX03 */
3503 * Wait for a thread to terminate and obtain its exit value.
3507 pthread_join(pthread_t thread,
3510 moved to pthread_cancelable.c */
3516 pthread_cancel(pthread_t thread
)
3519 if (__unix_conforming
== 0)
3520 __unix_conforming
= 1;
3521 #endif /* __DARWIN_UNIX03 */
3523 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3526 /* if the thread is a workqueue thread, then return error */
3527 if (thread
->wqthread
!= 0) {
3534 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3535 UNLOCK(thread
->lock
);
3536 if (state
& PTHREAD_CANCEL_ENABLE
)
3537 __pthread_markcancel(thread
->kernel_thread
);
3538 #else /* __DARWIN_UNIX03 */
3539 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3540 #endif /* __DARWIN_UNIX03 */
3545 pthread_testcancel(void)
3547 pthread_t self
= pthread_self();
3550 if (__unix_conforming
== 0)
3551 __unix_conforming
= 1;
3552 _pthread_testcancel(self
, 1);
3553 #else /* __DARWIN_UNIX03 */
3554 _pthread_testcancel(self
, 0);
3555 #endif /* __DARWIN_UNIX03 */
3561 * Query/update the cancelability 'state' of a thread
3564 pthread_setcancelstate(int state
, int *oldstate
)
3567 if (__unix_conforming
== 0) {
3568 __unix_conforming
= 1;
3570 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3571 #else /* __DARWIN_UNIX03 */
3572 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3573 #endif /* __DARWIN_UNIX03 */
3580 * Query/update the cancelability 'type' of a thread
3583 pthread_setcanceltype(int type
, int *oldtype
)
3585 pthread_t self
= pthread_self();
3588 if (__unix_conforming
== 0)
3589 __unix_conforming
= 1;
3590 #endif /* __DARWIN_UNIX03 */
3592 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3593 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3595 self
= pthread_self();
3598 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3599 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3600 self
->cancel_state
|= type
;
3602 #if !__DARWIN_UNIX03
3603 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3604 #endif /* __DARWIN_UNIX03 */
3609 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3614 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3618 #else /* __DARWIN_UNIX03 */
3619 return(__pthread_sigmask(how
, set
, oset
));
3620 #endif /* __DARWIN_UNIX03 */
3625 sigwait(const sigset_t * set, int * sig)
3627 moved to pthread_cancelable.c */