2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
70 #include <libkern/OSCrossEndian.h>
74 extern int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
75 extern int __pthread_sigmask(int, const sigset_t
*, sigset_t
*);
77 #ifndef BUILDING_VARIANT /* [ */
79 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
83 int32_t workq_targetconc
[WORKQ_NUM_PRIOQUEUE
];
85 /* Per-thread kernel support */
86 extern void _pthread_set_self(pthread_t
);
87 extern void mig_init(int);
88 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
89 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
90 static void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
91 static void _pthread_tsd_reinit(pthread_t t
);
92 static int _new_pthread_create_suspended(pthread_t
*thread
,
93 const pthread_attr_t
*attr
,
94 void *(*start_routine
)(void *),
98 /* Get CPU capabilities from the kernel */
99 __private_extern__
void _init_cpu_capabilities(void);
101 /* Needed to tell the malloc subsystem we're going multithreaded */
102 extern void set_malloc_singlethreaded(int);
104 /* Used when we need to call into the kernel with no reply port */
105 extern pthread_lock_t reply_port_lock
;
106 int _pthread_find_thread(pthread_t thread
);
108 /* Mach message used to notify that a thread needs to be reaped */
110 typedef struct _pthread_reap_msg_t
{
111 mach_msg_header_t header
;
113 mach_msg_trailer_t trailer
;
114 } pthread_reap_msg_t
;
116 /* We'll implement this when the main thread is a pthread */
117 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
118 static struct _pthread _thread
= {0};
120 /* This global should be used (carefully) by anyone needing to know if a
121 ** pthread has been created.
123 int __is_threaded
= 0;
124 /* _pthread_count is protected by _pthread_list_lock */
125 static int _pthread_count
= 1;
126 int __unix_conforming
= 0;
127 __private_extern__
size_t pthreadsize
= 0;
129 /* under rosetta we will use old style creation of threads */
130 static int __oldstyle
= 0;
132 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
134 /* Same implementation as LOCK, but without the __is_threaded check */
136 extern kern_return_t
syscall_thread_switch(mach_port_name_t
, int, mach_msg_timeout_t
);
137 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
139 int tries
= _spin_tries
;
143 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
145 } while(!_spin_lock_try(lock
));
148 extern mach_port_t thread_recycle_port
;
150 /* These are used to keep track of a semaphore pool shared by mutexes and condition
154 static semaphore_t
*sem_pool
= NULL
;
155 static int sem_pool_count
= 0;
156 static int sem_pool_current
= 0;
157 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
159 static int default_priority
;
160 static int max_priority
;
161 static int min_priority
;
162 static int pthread_concurrency
;
164 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
166 static void _pthread_exit(pthread_t self
, void *value_ptr
);
167 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
168 static pthread_attr_t _pthread_attr_default
= {0};
169 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
170 static int kernel_workq_setup
= 0;
171 static volatile int32_t kernel_workq_count
= 0;
172 static volatile unsigned int user_workq_count
= 0;
173 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
174 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
176 static int __pthread_workqueue_affinity
= 1; /* 0 means no affinity */
177 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
178 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
180 struct _pthread_workqueue_head __pthread_workq0_head
;
181 struct _pthread_workqueue_head __pthread_workq1_head
;
182 struct _pthread_workqueue_head __pthread_workq2_head
;
183 pthread_workqueue_head_t __pthread_wq_head_tbl
[WQ_NUM_PRIO_QS
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
};
185 static void workqueue_list_lock(void);
186 static void workqueue_list_unlock(void);
187 static int valid_workq(pthread_workqueue_t
);
188 static void pick_nextworkqueue_droplock(void);
189 static int post_nextworkitem(pthread_workqueue_t workq
);
190 static void _pthread_workq_return(pthread_t self
);
191 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
192 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
193 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
194 static pthread_workitem_t
alloc_workitem(void);
195 static void free_workitem(pthread_workitem_t
);
196 static pthread_workqueue_t
alloc_workqueue(void);
197 static void free_workqueue(pthread_workqueue_t
);
198 static int _pthread_work_internal_init(void);
199 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
201 void pthread_workqueue_atfork_prepare(void);
202 void pthread_workqueue_atfork_parent(void);
203 void pthread_workqueue_atfork_child(void);
205 extern void dispatch_atfork_prepare(void);
206 extern void dispatch_atfork_parent(void);
207 extern void dispatch_atfork_child(void);
209 /* workq_kernreturn commands */
210 #define WQOPS_QUEUE_ADD 1
211 #define WQOPS_QUEUE_REMOVE 2
212 #define WQOPS_THREAD_RETURN 4
213 #define WQOPS_THREAD_SETCONC 8
216 * Flags filed passed to bsdthread_create and back in pthread_start
217 31 <---------------------------------> 0
218 _________________________________________
219 | flags(8) | policy(8) | importance(16) |
220 -----------------------------------------
223 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
226 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
228 #define PTHREAD_START_CUSTOM 0x01000000
229 #define PTHREAD_START_SETSCHED 0x02000000
230 #define PTHREAD_START_DETACHED 0x04000000
231 #define PTHREAD_START_POLICY_BITSHIFT 16
232 #define PTHREAD_START_POLICY_MASK 0xff
233 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
235 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
236 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
237 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int),__uint64_t
);
238 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
239 extern __uint64_t
__thread_selfid( void );
240 extern int __pthread_canceled(int);
241 extern void _pthread_keys_init(void);
242 extern int __pthread_kill(mach_port_t
, int);
243 extern int __pthread_markcancel(int);
244 extern int __workq_open(void);
246 #define WORKQUEUE_OVERCOMMIT 0x10000
248 extern int __workq_kernreturn(int, pthread_workitem_t
, int, int);
250 #if defined(__ppc__) || defined(__ppc64__)
251 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
252 #elif defined(__i386__) || defined(__x86_64__)
253 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
254 #elif defined(__arm__)
255 static const vm_address_t PTHREAD_STACK_HINT
= 0x30000000;
257 #error Need to define a stack address hint for this architecture
260 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
261 * The guardpages for stackoverflow protection is also allocated here
262 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
263 * set up for the thread
267 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
270 vm_address_t stackaddr
;
273 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
274 if (attrs
->stackaddr
!= NULL
) {
275 /* No guard pages setup in this case */
276 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
277 *stack
= attrs
->stackaddr
;
281 guardsize
= attrs
->guardsize
;
282 stackaddr
= PTHREAD_STACK_HINT
;
283 kr
= vm_map(mach_task_self(), &stackaddr
,
284 attrs
->stacksize
+ guardsize
,
286 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
287 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
289 if (kr
!= KERN_SUCCESS
)
290 kr
= vm_allocate(mach_task_self(),
291 &stackaddr
, attrs
->stacksize
+ guardsize
,
292 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
293 if (kr
!= KERN_SUCCESS
) {
296 /* The guard page is at the lowest address */
297 /* The stack base is the highest address */
299 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
300 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
305 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
309 vm_address_t stackaddr
;
310 size_t guardsize
, allocsize
;
312 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
314 if (attrs
->stackaddr
!= NULL
) {
315 /* No guard pages setup in this case */
316 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
317 *stack
= attrs
->stackaddr
;
318 t
= (pthread_t
)malloc(pthreadsize
);
319 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
320 t
->freeStackOnExit
= 0;
327 guardsize
= attrs
->guardsize
;
328 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
329 stackaddr
= PTHREAD_STACK_HINT
;
330 kr
= vm_map(mach_task_self(), &stackaddr
,
333 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
334 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
336 if (kr
!= KERN_SUCCESS
)
337 kr
= vm_allocate(mach_task_self(),
338 &stackaddr
, allocsize
,
339 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
340 if (kr
!= KERN_SUCCESS
) {
343 /* The guard page is at the lowest address */
344 /* The stack base is the highest address */
346 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
349 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
351 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
352 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
354 t
->freesize
= allocsize
;
355 t
->freeaddr
= (void *)stackaddr
;
356 t
->freeStackOnExit
= 1;
363 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
365 kern_return_t res
= 0;
366 vm_address_t freeaddr
;
368 task_t self
= mach_task_self();
371 semaphore_t joinsem
= SEMAPHORE_NULL
;
374 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
376 kport
= t
->kernel_thread
;
377 joinsem
= t
->joiner_notify
;
379 if (t
->freeStackOnExit
) {
380 freeaddr
= (vm_address_t
)t
->freeaddr
;
382 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
384 freesize
= t
->stacksize
+ t
->guardsize
;
386 mig_dealloc_reply_port(MACH_PORT_NULL
);
387 LOCK(_pthread_list_lock
);
388 if (freestruct
!= 0) {
389 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
390 /* if parent has not returned from create yet keep pthread_t */
392 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
394 if (t
->parentcheck
== 0)
395 freesize
-= pthreadsize
;
398 thread_count
= --_pthread_count
;
399 UNLOCK(_pthread_list_lock
);
402 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
404 if (thread_count
<=0)
407 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
408 LIBC_ABORT("thread %p didn't terminate", t
);
411 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
413 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
417 mig_dealloc_reply_port(MACH_PORT_NULL
);
418 LOCK(_pthread_list_lock
);
419 if (freestruct
!= 0) {
420 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
422 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
425 thread_count
= --_pthread_count
;
427 UNLOCK(_pthread_list_lock
);
431 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
439 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
442 if (thread_count
<=0)
445 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
446 LIBC_ABORT("thread %p didn't terminate", t
);
447 } else if (freestruct
) {
448 t
->sig
= _PTHREAD_NO_SIG
;
450 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
461 * Destroy a thread attribute structure
464 pthread_attr_destroy(pthread_attr_t
*attr
)
466 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
472 return (EINVAL
); /* Not an attribute structure! */
477 * Get the 'detach' state from a thread attribute structure.
478 * Note: written as a helper function for info hiding
481 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
484 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
486 *detachstate
= attr
->detached
;
490 return (EINVAL
); /* Not an attribute structure! */
495 * Get the 'inherit scheduling' info from a thread attribute structure.
496 * Note: written as a helper function for info hiding
499 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
502 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
504 *inheritsched
= attr
->inherit
;
508 return (EINVAL
); /* Not an attribute structure! */
513 * Get the scheduling parameters from a thread attribute structure.
514 * Note: written as a helper function for info hiding
517 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
518 struct sched_param
*param
)
520 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
522 *param
= attr
->param
;
526 return (EINVAL
); /* Not an attribute structure! */
531 * Get the scheduling policy from a thread attribute structure.
532 * Note: written as a helper function for info hiding
535 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
538 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
540 *policy
= attr
->policy
;
544 return (EINVAL
); /* Not an attribute structure! */
548 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
549 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
551 * Initialize a thread attribute structure to default values.
554 pthread_attr_init(pthread_attr_t
*attr
)
556 attr
->stacksize
= DEFAULT_STACK_SIZE
;
557 attr
->stackaddr
= NULL
;
558 attr
->sig
= _PTHREAD_ATTR_SIG
;
559 attr
->param
.sched_priority
= default_priority
;
560 attr
->param
.quantum
= 10; /* quantum isn't public yet */
561 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
562 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
563 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
564 attr
->freeStackOnExit
= 1;
567 attr
->guardsize
= vm_page_size
;
572 * Set the 'detach' state in a thread attribute structure.
573 * Note: written as a helper function for info hiding
576 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
579 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
581 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
582 (detachstate
== PTHREAD_CREATE_DETACHED
))
584 attr
->detached
= detachstate
;
592 return (EINVAL
); /* Not an attribute structure! */
597 * Set the 'inherit scheduling' state in a thread attribute structure.
598 * Note: written as a helper function for info hiding
601 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
604 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
606 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
607 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
609 attr
->inherit
= inheritsched
;
617 return (EINVAL
); /* Not an attribute structure! */
622 * Set the scheduling paramters in a thread attribute structure.
623 * Note: written as a helper function for info hiding
626 pthread_attr_setschedparam(pthread_attr_t
*attr
,
627 const struct sched_param
*param
)
629 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
631 /* TODO: Validate sched_param fields */
632 attr
->param
= *param
;
637 return (EINVAL
); /* Not an attribute structure! */
642 * Set the scheduling policy in a thread attribute structure.
643 * Note: written as a helper function for info hiding
646 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
649 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
651 if ((policy
== SCHED_OTHER
) ||
652 (policy
== SCHED_RR
) ||
653 (policy
== SCHED_FIFO
))
655 attr
->policy
= policy
;
664 return (EINVAL
); /* Not an attribute structure! */
669 * Set the scope for the thread.
670 * We currently only provide PTHREAD_SCOPE_SYSTEM
673 pthread_attr_setscope(pthread_attr_t
*attr
,
676 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
677 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
678 /* No attribute yet for the scope */
680 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
684 return (EINVAL
); /* Not an attribute structure! */
688 * Get the scope for the thread.
689 * We currently only provide PTHREAD_SCOPE_SYSTEM
692 pthread_attr_getscope(const pthread_attr_t
*attr
,
695 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
696 *scope
= PTHREAD_SCOPE_SYSTEM
;
699 return (EINVAL
); /* Not an attribute structure! */
702 /* Get the base stack address of the given thread */
704 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
706 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
707 *stackaddr
= attr
->stackaddr
;
710 return (EINVAL
); /* Not an attribute structure! */
715 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
717 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
718 attr
->stackaddr
= stackaddr
;
719 attr
->freeStackOnExit
= 0;
723 return (EINVAL
); /* Not an attribute structure! */
728 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
730 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
731 *stacksize
= attr
->stacksize
;
734 return (EINVAL
); /* Not an attribute structure! */
739 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
741 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
742 attr
->stacksize
= stacksize
;
745 return (EINVAL
); /* Not an attribute structure! */
750 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
752 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
753 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
754 *stacksize
= attr
->stacksize
;
757 return (EINVAL
); /* Not an attribute structure! */
761 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
762 * byte address. This is not the same as in pthread_attr_setstackaddr.
765 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
767 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
768 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
769 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
770 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
771 attr
->stacksize
= stacksize
;
772 attr
->freeStackOnExit
= 0;
776 return (EINVAL
); /* Not an attribute structure! */
782 * Set the guardsize attribute in the attr.
785 pthread_attr_setguardsize(pthread_attr_t
*attr
,
788 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
789 /* Guardsize of 0 is valid, ot means no guard */
790 if ((guardsize
% vm_page_size
) == 0) {
791 attr
->guardsize
= guardsize
;
797 return (EINVAL
); /* Not an attribute structure! */
801 * Get the guardsize attribute in the attr.
804 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
807 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
808 *guardsize
= attr
->guardsize
;
811 return (EINVAL
); /* Not an attribute structure! */
816 * Create and start execution of a new thread.
820 _pthread_body(pthread_t self
)
822 _pthread_set_self(self
);
823 _pthread_exit(self
, (self
->fun
)(self
->arg
));
827 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
833 pthread_attr_t
*attrs
= &_pthread_attr_default
;
836 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
837 stackaddr
= (char *)self
;
838 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
839 #if defined(__i386__) || defined(__x86_64__)
840 _pthread_set_self(self
);
842 LOCK(_pthread_list_lock
);
843 if (pflags
& PTHREAD_START_SETSCHED
) {
844 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
845 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
847 /* These are not joinable threads */
848 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
849 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
850 self
->detached
|= PTHREAD_CREATE_DETACHED
;
853 #if defined(__i386__) || defined(__x86_64__)
854 _pthread_set_self(self
);
856 LOCK(_pthread_list_lock
);
858 self
->kernel_thread
= kport
;
862 /* Add to the pthread list */
863 if (self
->parentcheck
== 0) {
864 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
866 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
871 UNLOCK(_pthread_list_lock
);
873 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
874 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
875 printf("Failed to set thread_id in pthread_start\n");
879 pself
= pthread_self();
881 LIBC_ABORT("self %p != pself %p", self
, pself
);
884 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
887 _pthread_exit(self
, (self
->fun
)(self
->arg
));
891 _pthread_create(pthread_t t
,
892 const pthread_attr_t
*attrs
,
894 const mach_port_t kernel_thread
)
901 memset(t
, 0, sizeof(*t
));
910 t
->stacksize
= attrs
->stacksize
;
911 t
->stackaddr
= (void *)stack
;
912 t
->guardsize
= attrs
->guardsize
;
913 t
->kernel_thread
= kernel_thread
;
914 t
->detached
= attrs
->detached
;
915 t
->inherit
= attrs
->inherit
;
916 t
->policy
= attrs
->policy
;
917 t
->param
= attrs
->param
;
918 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
919 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
920 t
->sig
= _PTHREAD_SIG
;
921 t
->reply_port
= MACH_PORT_NULL
;
922 t
->cthread_self
= NULL
;
924 t
->plist
.tqe_next
= (struct _pthread
*)0;
925 t
->plist
.tqe_prev
= (struct _pthread
**)0;
926 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
927 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
928 t
->death
= SEMAPHORE_NULL
;
930 if (kernel_thread
!= MACH_PORT_NULL
)
931 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
937 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
939 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)(long)stack
;
942 memset(t
, 0, sizeof(*t
));
943 t
->plist
.tqe_next
= (struct _pthread
*)0;
944 t
->plist
.tqe_prev
= (struct _pthread
**)0;
946 t
->schedset
= attrs
->schedset
;
948 if (kernalloc
!= 0) {
949 stackaddr
= (mach_vm_offset_t
)(long)t
;
951 /* if allocated from kernel set values appropriately */
952 t
->stacksize
= stacksize
;
953 t
->stackaddr
= (void *)(long)stackaddr
;
954 t
->freeStackOnExit
= 1;
955 t
->freeaddr
= (void *)(long)(stackaddr
- stacksize
- vm_page_size
);
956 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
958 t
->stacksize
= attrs
->stacksize
;
959 t
->stackaddr
= (void *)stack
;
961 t
->guardsize
= attrs
->guardsize
;
962 t
->detached
= attrs
->detached
;
963 t
->inherit
= attrs
->inherit
;
964 t
->policy
= attrs
->policy
;
965 t
->param
= attrs
->param
;
966 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
967 t
->sig
= _PTHREAD_SIG
;
968 t
->reply_port
= MACH_PORT_NULL
;
969 t
->cthread_self
= NULL
;
971 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
972 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
973 t
->death
= SEMAPHORE_NULL
;
975 t
->kernalloc
= kernalloc
;
983 _pthread_tsd_reinit(pthread_t t
)
985 bzero(&t
->tsd
[1], (_INTERNAL_POSIX_THREAD_KEYS_END
-1) * sizeof(void *));
989 /* Need to deprecate this in future */
991 _pthread_is_threaded(void)
993 return __is_threaded
;
996 /* Non portable public api to know whether this process has(had) atleast one thread
997 * apart from main thread. There could be race if there is a thread in the process of
998 * creation at the time of call . It does not tell whether there are more than one thread
999 * at this point of time.
1002 pthread_is_threaded_np(void)
1004 return (__is_threaded
);
1008 pthread_mach_thread_np(pthread_t t
)
1010 mach_port_t kport
= MACH_PORT_NULL
;
1016 * If the call is on self, return the kernel port. We cannot
1017 * add this bypass for main thread as it might have exited,
1018 * and we should not return stale port info.
1020 if (t
== pthread_self())
1022 kport
= t
->kernel_thread
;
1026 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1027 return((mach_port_t
)0);
1033 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
1035 struct _pthread
* p
= NULL
;
1037 /* No need to wait as mach port is already known */
1038 LOCK(_pthread_list_lock
);
1039 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1040 if (p
->kernel_thread
== kernel_thread
)
1043 UNLOCK(_pthread_list_lock
);
1048 pthread_get_stacksize_np(pthread_t t
)
1050 int ret
,nestingDepth
=0;
1052 vm_address_t address
=0;
1053 vm_size_t region_size
=0;
1054 struct vm_region_submap_info_64 info
;
1055 mach_msg_type_number_t count
;
1060 if ( t
== pthread_self() || t
== &_thread
) //since the main thread will not get de-allocated from underneath us
1067 LOCK(_pthread_list_lock
);
1069 if ((ret
= _pthread_find_thread(t
)) != 0) {
1070 UNLOCK(_pthread_list_lock
);
1075 UNLOCK(_pthread_list_lock
);
1081 pthread_get_stackaddr_np(pthread_t t
)
1087 return((void *)(long)ESRCH
);
1089 if(t
== pthread_self() || t
== &_thread
) //since the main thread will not get deallocated from underneath us
1090 return t
->stackaddr
;
1092 LOCK(_pthread_list_lock
);
1094 if ((ret
= _pthread_find_thread(t
)) != 0) {
1095 UNLOCK(_pthread_list_lock
);
1096 return((void *)(long)ret
);
1098 addr
= t
->stackaddr
;
1099 UNLOCK(_pthread_list_lock
);
1105 _pthread_reply_port(pthread_t t
)
1107 return t
->reply_port
;
1111 /* returns non-zero if the current thread is the main thread */
1113 pthread_main_np(void)
1115 pthread_t self
= pthread_self();
1117 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1121 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1122 /* if we are passed in a pthread_t that is NULL, then we return
1123 the current thread's thread_id. So folks don't have to call
1124 pthread_self, in addition to us doing it, if they just want
1128 pthread_threadid_np(pthread_t thread
, __uint64_t
*thread_id
)
1131 pthread_t self
= pthread_self();
1133 if (thread_id
== NULL
) {
1135 } else if (thread
== NULL
|| thread
== self
) {
1136 *thread_id
= self
->thread_id
;
1140 LOCK(_pthread_list_lock
);
1141 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1142 UNLOCK(_pthread_list_lock
);
1145 *thread_id
= thread
->thread_id
;
1146 UNLOCK(_pthread_list_lock
);
1152 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1160 LOCK(_pthread_list_lock
);
1161 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1162 UNLOCK(_pthread_list_lock
);
1165 strlcpy(threadname
, thread
->pthread_name
, len
);
1166 UNLOCK(_pthread_list_lock
);
1171 pthread_setname_np(const char *threadname
)
1177 len
= strlen(threadname
);
1178 rval
= sysctlbyname("kern.threadname", NULL
, 0, threadname
, len
);
1181 strlcpy((pthread_self())->pthread_name
, threadname
, len
+1);
1188 _new_pthread_create_suspended(pthread_t
*thread
,
1189 const pthread_attr_t
*attr
,
1190 void *(*start_routine
)(void *),
1194 pthread_attr_t
*attrs
;
1199 kern_return_t kern_res
;
1200 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1202 task_t self
= mach_task_self();
1204 int susp
= create_susp
;
1206 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1207 { /* Set up default paramters */
1208 attrs
= &_pthread_attr_default
;
1209 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1214 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1215 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1221 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1222 * any change in priority or policy is needed here.
1224 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1225 /* Rosetta or pthread_create_suspended() */
1226 /* running under rosetta */
1227 /* Allocate a stack for the thread */
1229 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1231 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1234 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1237 /* Create the Mach thread for this thread */
1238 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1239 if (kern_res
!= KERN_SUCCESS
)
1241 printf("Can't create thread: %d\n", kern_res
);
1245 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1249 set_malloc_singlethreaded(0);
1252 /* Send it on it's way */
1254 t
->fun
= start_routine
;
1256 /* Now set it up to execute */
1257 LOCK(_pthread_list_lock
);
1258 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1260 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1263 UNLOCK(_pthread_list_lock
);
1264 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1269 if (attrs
->fastpath
== 1)
1272 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1273 flags
|= PTHREAD_START_DETACHED
;
1274 if (attrs
->schedset
!= 0) {
1275 flags
|= PTHREAD_START_SETSCHED
;
1276 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1277 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1280 set_malloc_singlethreaded(0);
1283 if (kernalloc
== 0) {
1284 /* Allocate a stack for the thread */
1285 flags
|= PTHREAD_START_CUSTOM
;
1286 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1289 /* Send it on it's way */
1291 t
->fun
= start_routine
;
1295 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1298 if ((t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == (pthread_t
)-1) {
1299 _pthread_free_pthread_onstack(t
, 1, 0);
1303 LOCK(_pthread_list_lock
);
1305 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1306 /* detached child exited, mop up */
1307 UNLOCK(_pthread_list_lock
);
1309 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1311 if(t
->freeStackOnExit
)
1312 vm_deallocate(self
, (mach_vm_address_t
)(long)t
, pthreadsize
);
1315 } else if (t
->childrun
== 0) {
1316 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1319 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1321 UNLOCK(_pthread_list_lock
);
1323 UNLOCK(_pthread_list_lock
);
1328 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1333 /* kernel allocation */
1335 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1337 if ((t
= __bsdthread_create(start_routine
, arg
, (void *)attrs
->stacksize
, NULL
, flags
)) == (pthread_t
)-1)
1339 /* Now set it up to execute */
1340 LOCK(_pthread_list_lock
);
1342 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1343 /* detached child exited, mop up */
1344 UNLOCK(_pthread_list_lock
);
1346 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1348 vm_deallocate(self
, (mach_vm_address_t
)(long)t
, pthreadsize
);
1349 } else if (t
->childrun
== 0) {
1350 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1353 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1355 UNLOCK(_pthread_list_lock
);
1357 UNLOCK(_pthread_list_lock
);
1362 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1370 _pthread_create_suspended(pthread_t
*thread
,
1371 const pthread_attr_t
*attr
,
1372 void *(*start_routine
)(void *),
1376 pthread_attr_t
*attrs
;
1380 kern_return_t kern_res
;
1381 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1384 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1385 { /* Set up default paramters */
1386 attrs
= &_pthread_attr_default
;
1387 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1392 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1393 * any change in priority or policy is needed here.
1395 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1396 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1404 /* Allocate a stack for the thread */
1405 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1408 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1411 /* Create the Mach thread for this thread */
1412 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1413 if (kern_res
!= KERN_SUCCESS
)
1415 printf("Can't create thread: %d\n", kern_res
);
1416 res
= EINVAL
; /* Need better error here? */
1420 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1424 set_malloc_singlethreaded(0);
1427 /* Send it on it's way */
1429 t
->fun
= start_routine
;
1430 /* Now set it up to execute */
1431 LOCK(_pthread_list_lock
);
1432 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1434 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1437 UNLOCK(_pthread_list_lock
);
1438 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1444 pthread_create(pthread_t
*thread
,
1445 const pthread_attr_t
*attr
,
1446 void *(*start_routine
)(void *),
1449 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1453 pthread_create_suspended_np(pthread_t
*thread
,
1454 const pthread_attr_t
*attr
,
1455 void *(*start_routine
)(void *),
1458 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1462 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1465 pthread_detach(pthread_t thread
)
1470 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0)
1471 return (ret
); /* Not a valid thread */
1474 newstyle
= thread
->newstyle
;
1475 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1477 if (thread
->detached
& _PTHREAD_EXITED
) {
1478 UNLOCK(thread
->lock
);
1479 pthread_join(thread
, NULL
);
1482 if (newstyle
== 0) {
1483 semaphore_t death
= thread
->death
;
1485 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1486 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1487 UNLOCK(thread
->lock
);
1489 (void) semaphore_signal(death
);
1491 mach_port_t joinport
= thread
->joiner_notify
;
1493 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1494 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1496 UNLOCK(thread
->lock
);
1498 semaphore_signal(joinport
);
1504 UNLOCK(thread
->lock
);
1511 * pthread_kill call to system call
1519 mach_port_t kport
= MACH_PORT_NULL
;
1521 if ((sig
< 0) || (sig
> NSIG
))
1524 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1525 return (ESRCH
); /* Not a valid thread */
1527 /* if the thread is a workqueue thread, just return error */
1528 if ((th
->wqthread
!= 0) && (th
->wqkillset
==0)) {
1532 error
= __pthread_kill(kport
, sig
);
1540 __pthread_workqueue_setkill(int enable
)
1542 pthread_t self
= pthread_self();
1546 self
->wqkillset
= 0;
1548 self
->wqkillset
= 1;
1555 /* Announce that there are pthread resources ready to be reclaimed in a */
1556 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1557 /* thread underneath is terminated right away. */
1559 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1560 pthread_reap_msg_t msg
;
1563 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1564 MACH_MSG_TYPE_MOVE_SEND
);
1565 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1566 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1567 msg
.header
.msgh_local_port
= kernel_thread
;
1568 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1569 msg
.thread
= thread
;
1570 ret
= mach_msg_send(&msg
.header
);
1571 assert(ret
== MACH_MSG_SUCCESS
);
1574 /* Reap the resources for available threads */
1576 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1577 mach_port_type_t ptype
;
1581 self
= mach_task_self();
1582 if (kernel_thread
!= MACH_PORT_DEAD
) {
1583 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1584 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1585 /* not quite dead yet... */
1588 ret
= mach_port_deallocate(self
, kernel_thread
);
1589 if (ret
!= KERN_SUCCESS
) {
1591 "mach_port_deallocate(kernel_thread) failed: %s\n",
1592 mach_error_string(ret
));
1596 if (th
->reply_port
!= MACH_PORT_NULL
) {
1597 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1598 MACH_PORT_RIGHT_RECEIVE
, -1);
1599 if (ret
!= KERN_SUCCESS
) {
1601 "mach_port_mod_refs(reply_port) failed: %s\n",
1602 mach_error_string(ret
));
1606 if (th
->freeStackOnExit
) {
1607 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1610 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1613 ret
= vm_deallocate(self
, addr
, size
);
1614 if (ret
!= KERN_SUCCESS
) {
1616 "vm_deallocate(stack) failed: %s\n",
1617 mach_error_string(ret
));
1623 *value_ptr
= th
->exit_value
;
1625 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1626 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
))
1627 *value_ptr
= PTHREAD_CANCELED
;
1628 th
->sig
= _PTHREAD_NO_SIG
;
1639 void _pthread_reap_threads(void)
1641 pthread_reap_msg_t msg
;
1644 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1645 sizeof msg
, thread_recycle_port
,
1646 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1647 while (ret
== MACH_MSG_SUCCESS
) {
1648 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1649 pthread_t thread
= msg
.thread
;
1651 /* deal with race with thread_create_running() */
1652 if (kernel_thread
== MACH_PORT_NULL
&&
1653 kernel_thread
!= thread
->kernel_thread
) {
1654 kernel_thread
= thread
->kernel_thread
;
1657 if ( kernel_thread
== MACH_PORT_NULL
||
1658 _pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1660 /* not dead yet, put it back for someone else to reap, stop here */
1661 _pthread_become_available(thread
, kernel_thread
);
1665 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1666 sizeof msg
, thread_recycle_port
,
1667 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1671 /* For compatibility... */
1675 return pthread_self();
1679 * Terminate a thread.
1681 int __disable_threadsignal(int);
1684 _pthread_exit(pthread_t self
, void *value_ptr
)
1686 struct __darwin_pthread_handler_rec
*handler
;
1687 kern_return_t kern_res
;
1689 int newstyle
= self
->newstyle
;
1691 /* Make this thread not to receive any signals */
1692 __disable_threadsignal(1);
1695 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1698 /* set cancel state to disable and type to deferred */
1699 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1701 while ((handler
= self
->__cleanup_stack
) != 0)
1703 (handler
->__routine
)(handler
->__arg
);
1704 self
->__cleanup_stack
= handler
->__next
;
1706 _pthread_tsd_cleanup(self
);
1708 if (newstyle
== 0) {
1709 _pthread_reap_threads();
1712 self
->detached
|= _PTHREAD_EXITED
;
1714 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1715 mach_port_t death
= self
->death
;
1716 self
->exit_value
= value_ptr
;
1718 /* the joiner will need a kernel thread reference, leave ours for it */
1720 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1721 if (kern_res
!= KERN_SUCCESS
)
1723 "semaphore_signal(death) failed: %s\n",
1724 mach_error_string(kern_res
));
1726 LOCK(_pthread_list_lock
);
1727 thread_count
= --_pthread_count
;
1728 UNLOCK(_pthread_list_lock
);
1731 LOCK(_pthread_list_lock
);
1732 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1734 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1736 thread_count
= --_pthread_count
;
1737 UNLOCK(_pthread_list_lock
);
1738 /* with no joiner, we let become available consume our cached ref */
1739 _pthread_become_available(self
, self
->kernel_thread
);
1742 if (thread_count
<= 0)
1745 /* Use a new reference to terminate ourselves. Should never return. */
1746 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1747 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1748 mach_error_string(kern_res
));
1750 semaphore_t joinsem
= SEMAPHORE_NULL
;
1752 if ((self
->joiner_notify
== (mach_port_t
)0) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1753 joinsem
= new_sem_from_pool();
1755 self
->detached
|= _PTHREAD_EXITED
;
1757 self
->exit_value
= value_ptr
;
1758 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1759 if (self
->joiner_notify
== (mach_port_t
)0) {
1760 self
->joiner_notify
= joinsem
;
1761 joinsem
= SEMAPHORE_NULL
;
1764 if (joinsem
!= SEMAPHORE_NULL
)
1765 restore_sem_to_pool(joinsem
);
1766 _pthread_free_pthread_onstack(self
, 0, 1);
1769 /* with no joiner, we let become available consume our cached ref */
1770 if (joinsem
!= SEMAPHORE_NULL
)
1771 restore_sem_to_pool(joinsem
);
1772 _pthread_free_pthread_onstack(self
, 1, 1);
1775 LIBC_ABORT("thread %p didn't exit", self
);
1779 pthread_exit(void *value_ptr
)
1781 pthread_t self
= pthread_self();
1782 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1783 if (self
->wqthread
== 0) {
1784 _pthread_exit(self
, value_ptr
);
1786 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1791 * Get the scheduling policy and scheduling paramters for a thread.
1794 pthread_getschedparam(pthread_t thread
,
1796 struct sched_param
*param
)
1803 LOCK(_pthread_list_lock
);
1805 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1806 UNLOCK(_pthread_list_lock
);
1810 *policy
= thread
->policy
;
1812 *param
= thread
->param
;
1813 UNLOCK(_pthread_list_lock
);
1819 * Set the scheduling policy and scheduling paramters for a thread.
1822 pthread_setschedparam_internal(pthread_t thread
,
1825 const struct sched_param
*param
)
1827 policy_base_data_t bases
;
1829 mach_msg_type_number_t count
;
1835 bases
.ts
.base_priority
= param
->sched_priority
;
1836 base
= (policy_base_t
)&bases
.ts
;
1837 count
= POLICY_TIMESHARE_BASE_COUNT
;
1840 bases
.fifo
.base_priority
= param
->sched_priority
;
1841 base
= (policy_base_t
)&bases
.fifo
;
1842 count
= POLICY_FIFO_BASE_COUNT
;
1845 bases
.rr
.base_priority
= param
->sched_priority
;
1846 /* quantum isn't public yet */
1847 bases
.rr
.quantum
= param
->quantum
;
1848 base
= (policy_base_t
)&bases
.rr
;
1849 count
= POLICY_RR_BASE_COUNT
;
1854 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1855 if (ret
!= KERN_SUCCESS
)
1861 pthread_setschedparam(pthread_t t
,
1863 const struct sched_param
*param
)
1865 mach_port_t kport
= MACH_PORT_NULL
;
1869 if (t
!= pthread_self() && t
!= &_thread
) { //since the main thread will not get de-allocated from underneath us
1871 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1874 kport
= t
->kernel_thread
;
1876 error
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1879 /* ensure the thread is still valid */
1880 LOCK(_pthread_list_lock
);
1881 if ((error
= _pthread_find_thread(t
)) != 0) {
1882 UNLOCK(_pthread_list_lock
);
1887 UNLOCK(_pthread_list_lock
);
1897 * Get the minimum priority for the given policy
1900 sched_get_priority_min(int policy
)
1902 return default_priority
- 16;
1906 * Get the maximum priority for the given policy
1909 sched_get_priority_max(int policy
)
1911 return default_priority
+ 16;
1915 * Determine if two thread identifiers represent the same thread.
1918 pthread_equal(pthread_t t1
,
1924 __private_extern__
void
1925 _pthread_set_self(pthread_t p
)
1927 extern void __pthread_set_self(pthread_t
);
1929 bzero(&_thread
, sizeof(struct _pthread
));
1933 __pthread_set_self(p
);
1937 cthread_set_self(void *cself
)
1939 pthread_t self
= pthread_self();
1940 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1941 _pthread_set_self(cself
);
1944 self
->cthread_self
= cself
;
1948 ur_cthread_self(void) {
1949 pthread_t self
= pthread_self();
1950 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1951 return (void *)self
;
1953 return self
->cthread_self
;
1957 * cancellation handler for pthread once as the init routine can have a
1958 * cancellation point. In that case we need to restore the spin unlock
1961 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
1963 _spin_unlock(&once_control
->lock
);
1968 * Execute a function exactly one time in a thread-safe fashion.
1971 pthread_once(pthread_once_t
*once_control
,
1972 void (*init_routine
)(void))
1974 _spin_lock(&once_control
->lock
);
1975 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1977 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler
, once_control
);
1979 pthread_cleanup_pop(0);
1980 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1982 _spin_unlock(&once_control
->lock
);
1983 return (0); /* Spec defines no possible errors! */
1987 * Insert a cancellation point in a thread.
1989 __private_extern__
void
1990 _pthread_testcancel(pthread_t thread
, int isconforming
)
1993 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1994 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1996 UNLOCK(thread
->lock
);
1998 pthread_exit(PTHREAD_CANCELED
);
2002 UNLOCK(thread
->lock
);
2008 pthread_getconcurrency(void)
2010 return(pthread_concurrency
);
2014 pthread_setconcurrency(int new_level
)
2018 pthread_concurrency
= new_level
;
2023 * Perform package initialization - called automatically when application starts
2025 __private_extern__
int
2028 pthread_attr_t
*attrs
;
2031 host_priority_info_data_t priority_info
;
2033 host_flavor_t flavor
;
2035 mach_msg_type_number_t count
;
2041 pthreadsize
= round_page(sizeof (struct _pthread
));
2042 count
= HOST_PRIORITY_INFO_COUNT
;
2043 info
= (host_info_t
)&priority_info
;
2044 flavor
= HOST_PRIORITY_INFO
;
2045 host
= mach_host_self();
2046 kr
= host_info(host
, flavor
, info
, &count
);
2047 if (kr
!= KERN_SUCCESS
)
2048 printf("host_info failed (%d); probably need privilege.\n", kr
);
2050 default_priority
= priority_info
.user_priority
;
2051 min_priority
= priority_info
.minimum_priority
;
2052 max_priority
= priority_info
.maximum_priority
;
2054 attrs
= &_pthread_attr_default
;
2055 pthread_attr_init(attrs
);
2057 TAILQ_INIT(&__pthread_head
);
2058 LOCK_INIT(_pthread_list_lock
);
2060 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
2061 _pthread_set_self(thread
);
2063 __kdebug_trace(0x900000c, thread
, 0, 0, 10, 0);
2066 /* In case of dyld reset the tsd keys from 1 - 10 */
2067 _pthread_keys_init();
2070 mib
[1] = KERN_USRSTACK
;
2071 len
= sizeof (stackaddr
);
2072 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
2073 stackaddr
= (void *)USRSTACK
;
2074 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
2075 thread
->stacksize
= DFLSSIZ
; //initialize main thread's stacksize based on vmparam.h
2076 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
2078 _init_cpu_capabilities();
2079 if ((ncpus
= _NumCPUs()) > 1)
2080 _spin_tries
= MP_SPIN_TRIES
;
2082 workq_targetconc
[WORKQ_HIGH_PRIOQUEUE
] = ncpus
;
2083 workq_targetconc
[WORKQ_DEFAULT_PRIOQUEUE
] = ncpus
;
2084 workq_targetconc
[WORKQ_LOW_PRIOQUEUE
] = ncpus
;
2086 mach_port_deallocate(mach_task_self(), host
);
2088 #if defined(__ppc__)
2093 #if defined(__arm__)
2097 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2099 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
2100 kr
= vm_map(mach_task_self(),
2101 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
2102 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
2104 (vm_address_t
)0, FALSE
,
2105 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
2106 VM_INHERIT_DEFAULT
);
2107 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2111 mig_init(1); /* enable multi-threaded mig interfaces */
2112 if (__oldstyle
== 0) {
2113 #if defined(__i386__) || defined(__x86_64__)
2114 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (__uint64_t
)(&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (__uint64_t
)thread
);
2116 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)), NULL
, &workq_targetconc
[0], (__uint64_t
)&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
] - (__uint64_t
)thread
);
2120 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2121 if( (thread
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2122 printf("Failed to set thread_id in pthread_init\n");
2127 int sched_yield(void)
2133 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2134 static int _do_nothing(void) { return 0; }
2135 int (*_cthread_init_routine
)(void) = _do_nothing
;
2137 /* Get a semaphore from the pool, growing it if necessary */
2139 __private_extern__ semaphore_t
new_sem_from_pool(void) {
2144 LOCK(sem_pool_lock
);
2145 if (sem_pool_current
== sem_pool_count
) {
2146 sem_pool_count
+= 16;
2147 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
2148 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
2149 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
2152 sem
= sem_pool
[sem_pool_current
++];
2153 UNLOCK(sem_pool_lock
);
2157 /* Put a semaphore back into the pool */
2158 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
2159 LOCK(sem_pool_lock
);
2160 sem_pool
[--sem_pool_current
] = sem
;
2161 UNLOCK(sem_pool_lock
);
2164 static void sem_pool_reset(void) {
2165 LOCK(sem_pool_lock
);
2167 sem_pool_current
= 0;
2169 UNLOCK(sem_pool_lock
);
2172 __private_extern__
void _pthread_fork_child(pthread_t p
) {
2173 /* Just in case somebody had it locked... */
2174 UNLOCK(sem_pool_lock
);
2176 /* No need to hold the pthread_list_lock as no one other than this
2177 * thread is present at this time
2179 TAILQ_INIT(&__pthread_head
);
2180 LOCK_INIT(_pthread_list_lock
);
2181 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
2183 __kdebug_trace(0x900000c, p
, 0, 0, 10, 0);
2186 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2187 if( (p
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2188 printf("Failed to set thread_id in pthread_fork_child\n");
2193 * Query/update the cancelability 'state' of a thread
2196 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
2198 pthread_t self
= pthread_self();
2202 case PTHREAD_CANCEL_ENABLE
:
2204 __pthread_canceled(1);
2206 case PTHREAD_CANCEL_DISABLE
:
2208 __pthread_canceled(2);
2214 self
= pthread_self();
2217 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
2218 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
2219 self
->cancel_state
|= state
;
2222 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2226 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2228 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2231 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2232 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2233 if ((value_ptr
== PTHREAD_CANCELED
)) {
2235 self
->detached
|= _PTHREAD_WASCANCEL
;
2242 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2245 int detached
= 0, ret
;
2248 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2250 /* The scenario where the joiner was waiting for the thread and
2251 * the pthread detach happened on that thread. Then the semaphore
2252 * will trigger but by the time joiner runs, the target thread could be
2253 * freed. So we need to make sure that the thread is still in the list
2254 * and is joinable before we continue with the join.
2256 LOCK(_pthread_list_lock
);
2257 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2258 UNLOCK(_pthread_list_lock
);
2262 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2263 /* the thread might be a detached thread */
2264 UNLOCK(_pthread_list_lock
);
2268 /* It is still a joinable thread and needs to be reaped */
2269 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2271 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2273 UNLOCK(_pthread_list_lock
);
2276 *value_ptr
= thread
->exit_value
;
2278 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2279 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
)) {
2280 *value_ptr
= PTHREAD_CANCELED
;
2283 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2284 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2285 if (res
!= KERN_SUCCESS
)
2286 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2287 thread
->reply_port
= MACH_PORT_NULL
;
2289 if (thread
->freeStackOnExit
) {
2290 thread
->sig
= _PTHREAD_NO_SIG
;
2292 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2294 vm_deallocate(mach_task_self(), (mach_vm_address_t
)(long)thread
, pthreadsize
);
2296 thread
->sig
= _PTHREAD_NO_SIG
;
2298 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2305 /* ALWAYS called with list lock and return with list lock */
2307 _pthread_find_thread(pthread_t thread
)
2312 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2314 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2315 UNLOCK(_pthread_list_lock
);
2317 LOCK(_pthread_list_lock
);
2327 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2335 LOCK(_pthread_list_lock
);
2337 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2338 UNLOCK(_pthread_list_lock
);
2341 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2342 UNLOCK(_pthread_list_lock
);
2345 kport
= thread
->kernel_thread
;
2346 UNLOCK(_pthread_list_lock
);
2352 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2354 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2356 attrp
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2357 attrp
->sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2358 attrp
->overcommit
= 0;
2363 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2365 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
)
2370 return (EINVAL
); /* Not an attribute structure! */
2375 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2377 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2378 *qpriop
= attr
->queueprio
;
2381 return (EINVAL
); /* Not an attribute structure! */
2387 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2391 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2393 case WORKQ_HIGH_PRIOQUEUE
:
2394 case WORKQ_DEFAULT_PRIOQUEUE
:
2395 case WORKQ_LOW_PRIOQUEUE
:
2396 attr
->queueprio
= qprio
;
2409 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t
* attr
, int * ocommp
)
2411 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2412 *ocommp
= attr
->overcommit
;
2415 return (EINVAL
); /* Not an attribute structure! */
2421 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t
* attr
, int ocomm
)
2425 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2426 attr
->overcommit
= ocomm
;
2432 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2435 workqueue_list_lock()
2437 OSSpinLockLock(&__workqueue_list_lock
);
2441 workqueue_list_unlock()
2443 OSSpinLockUnlock(&__workqueue_list_lock
);
2447 pthread_workqueue_init_np()
2451 workqueue_list_lock();
2452 ret
=_pthread_work_internal_init();
2453 workqueue_list_unlock();
2459 pthread_workqueue_requestconcurrency_np(int queue
, int request_concurrency
)
2463 if (queue
< 0 || queue
> WORKQ_NUM_PRIOQUEUE
)
2466 error
=__workq_kernreturn(WQOPS_THREAD_SETCONC
, NULL
, request_concurrency
, queue
);
2474 pthread_workqueue_atfork_prepare(void)
2477 * NOTE: Any workq additions here
2478 * should be for i386,x86_64 only
2480 dispatch_atfork_prepare();
2484 pthread_workqueue_atfork_parent(void)
2487 * NOTE: Any workq additions here
2488 * should be for i386,x86_64 only
2490 dispatch_atfork_parent();
2494 pthread_workqueue_atfork_child(void)
2496 #if defined(__i386__) || defined(__x86_64__)
2498 * NOTE: workq additions here
2499 * are for i386,x86_64 only as
2500 * ppc and arm do not support it
2502 __workqueue_list_lock
= OS_SPINLOCK_INIT
;
2503 if (kernel_workq_setup
!= 0){
2504 kernel_workq_setup
= 0;
2505 _pthread_work_internal_init();
2508 dispatch_atfork_child();
2512 _pthread_work_internal_init(void)
2515 pthread_workqueue_head_t headp
;
2516 pthread_workitem_t witemp
;
2517 pthread_workqueue_t wq
;
2519 if (kernel_workq_setup
== 0) {
2520 #if defined(__i386__) || defined(__x86_64__)
2521 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
, NULL
);
2523 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
, NULL
);
2526 _pthread_wq_attr_default
.queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2527 _pthread_wq_attr_default
.sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2529 for( i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2530 headp
= __pthread_wq_head_tbl
[i
];
2531 TAILQ_INIT(&headp
->wqhead
);
2532 headp
->next_workq
= 0;
2535 /* create work item and workqueue pools */
2536 witemp
= (struct _pthread_workitem
*)malloc(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2537 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
));
2538 for (i
= 0; i
< WORKITEM_POOL_SIZE
; i
++) {
2539 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2541 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2542 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2543 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2544 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2547 if (error
= __workq_open()) {
2548 TAILQ_INIT(&__pthread_workitem_pool_head
);
2549 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2554 kernel_workq_setup
= 1;
2560 /* This routine is called with list lock held */
2561 static pthread_workitem_t
2562 alloc_workitem(void)
2564 pthread_workitem_t witem
;
2566 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2567 workqueue_list_unlock();
2568 witem
= malloc(sizeof(struct _pthread_workitem
));
2569 witem
->gencount
= 0;
2570 workqueue_list_lock();
2572 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2573 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2578 /* This routine is called with list lock held */
2580 free_workitem(pthread_workitem_t witem
)
2583 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2586 /* This routine is called with list lock held */
2587 static pthread_workqueue_t
2588 alloc_workqueue(void)
2590 pthread_workqueue_t wq
;
2592 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2593 workqueue_list_unlock();
2594 wq
= malloc(sizeof(struct _pthread_workqueue
));
2595 workqueue_list_lock();
2597 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2598 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2604 /* This routine is called with list lock held */
2606 free_workqueue(pthread_workqueue_t wq
)
2609 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2613 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2615 bzero(wq
, sizeof(struct _pthread_workqueue
));
2617 wq
->queueprio
= attr
->queueprio
;
2618 wq
->overcommit
= attr
->overcommit
;
2620 wq
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2623 LOCK_INIT(wq
->lock
);
2625 TAILQ_INIT(&wq
->item_listhead
);
2626 TAILQ_INIT(&wq
->item_kernhead
);
2628 __kdebug_trace(0x90080ac, wq
, &wq
->item_listhead
, wq
->item_listhead
.tqh_first
, wq
->item_listhead
.tqh_last
, 0);
2630 wq
->wq_list
.tqe_next
= 0;
2631 wq
->wq_list
.tqe_prev
= 0;
2632 wq
->sig
= PTHREAD_WORKQUEUE_SIG
;
2633 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2637 valid_workq(pthread_workqueue_t workq
)
2639 if (workq
->sig
== PTHREAD_WORKQUEUE_SIG
)
2646 /* called with list lock */
2648 pick_nextworkqueue_droplock()
2650 int i
, curwqprio
, val
, found
;
2651 pthread_workqueue_head_t headp
;
2652 pthread_workqueue_t workq
;
2653 pthread_workqueue_t nworkq
= NULL
;
2656 __kdebug_trace(0x9008098, kernel_workq_count
, 0, 0, 0, 0);
2659 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2661 for (i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2662 wqreadyprio
= i
; /* because there is nothing else higher to run */
2663 headp
= __pthread_wq_head_tbl
[i
];
2665 if (TAILQ_EMPTY(&headp
->wqhead
))
2667 workq
= headp
->next_workq
;
2669 workq
= TAILQ_FIRST(&headp
->wqhead
);
2670 curwqprio
= workq
->queueprio
;
2671 nworkq
= workq
; /* starting pt */
2672 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2673 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2674 if (headp
->next_workq
== NULL
)
2675 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2677 __kdebug_trace(0x9008098, kernel_workq_count
, workq
, 0, 1, 0);
2679 val
= post_nextworkitem(workq
);
2682 /* things could have changed so reasses */
2683 /* If kernel queue is full , skip */
2684 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2686 /* If anything with higher prio arrived, then reevaluate */
2687 if (wqreadyprio
< curwqprio
)
2688 goto loop
; /* we need re evaluate again */
2689 /* we can post some more work items */
2693 /* cannot use workq here as it could be freed */
2694 if (TAILQ_EMPTY(&headp
->wqhead
))
2696 /* if we found nothing to run and only one workqueue in the list, skip */
2697 if ((val
== 0) && (workq
== headp
->next_workq
))
2699 workq
= headp
->next_workq
;
2701 workq
= TAILQ_FIRST(&headp
->wqhead
);
2704 /* if we found nothing to run and back to workq where we started */
2705 if ((val
== 0) && (workq
== nworkq
))
2708 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2711 /* nothing found to run? */
2715 workqueue_list_unlock();
2719 post_nextworkitem(pthread_workqueue_t workq
)
2722 pthread_workitem_t witem
;
2723 pthread_workqueue_head_t headp
;
2724 void (*func
)(pthread_workqueue_t
, void *);
2726 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2730 __kdebug_trace(0x900809c, workq
, workq
->item_listhead
.tqh_first
, 0, 1, 0);
2732 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2735 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
)
2738 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2739 headp
= workq
->headp
;
2741 __kdebug_trace(0x900809c, workq
, witem
, 0, 0xee, 0);
2743 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2745 __kdebug_trace(0x9000064, workq
, 0, 0, 2, 0);
2748 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2751 /* Also barrier when nothing is there needs to be handled */
2752 /* Nothing to wait for */
2753 if (workq
->kq_count
!= 0) {
2754 witem
->flags
|= PTH_WQITEM_APPLIED
;
2755 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2756 workq
->barrier_count
= workq
->kq_count
;
2758 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2763 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2765 if (witem
->func
!= NULL
) {
2766 /* since we are going to drop list lock */
2767 witem
->flags
|= PTH_WQITEM_APPLIED
;
2768 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2769 workqueue_list_unlock();
2770 func
= (void (*)(pthread_workqueue_t
, void *))witem
->func
;
2771 (*func
)(workq
, witem
->func_arg
);
2773 __kdebug_trace(0x9000064, 3, workq
->barrier_count
, 0, 0, 0);
2775 workqueue_list_lock();
2776 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2778 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2780 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2783 free_workitem(witem
);
2785 __kdebug_trace(0x9000064, 4, workq
->barrier_count
, 0, 0, 0);
2789 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2791 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2793 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2796 witem
->flags
|= PTH_WQITEM_APPLIED
;
2797 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2798 workq
->barrier_count
= workq
->kq_count
;
2799 workq
->term_callback
= (void (*)(struct _pthread_workqueue
*,void *))witem
->func
;
2800 workq
->term_callarg
= witem
->func_arg
;
2801 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2803 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2805 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2806 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2808 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2812 free_workitem(witem
);
2813 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2815 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2817 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2818 if (headp
->next_workq
== workq
) {
2819 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2820 if (headp
->next_workq
== NULL
) {
2821 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2822 if (headp
->next_workq
== workq
)
2823 headp
->next_workq
= NULL
;
2827 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2828 if (workq
->term_callback
!= NULL
) {
2829 workqueue_list_unlock();
2830 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2831 workqueue_list_lock();
2833 free_workqueue(workq
);
2836 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2838 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2842 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
2847 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
2849 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2851 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2853 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
2854 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
2856 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
2858 OSAtomicIncrement32(&kernel_workq_count
);
2859 workqueue_list_unlock();
2861 prio
= workq
->queueprio
;
2862 if (workq
->overcommit
!= 0) {
2863 prio
|= WORKQUEUE_OVERCOMMIT
;
2866 if (( error
=__workq_kernreturn(WQOPS_QUEUE_ADD
, witem
, workq
->affinity
, prio
)) == -1) {
2867 OSAtomicDecrement32(&kernel_workq_count
);
2868 workqueue_list_lock();
2870 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2872 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
2873 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2875 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2877 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
2878 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
2880 workqueue_list_lock();
2882 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2886 /* noone should come here */
2888 printf("error in logic for next workitem\n");
2889 LIBC_ABORT("error in logic for next workitem");
2895 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
2898 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2899 pthread_workqueue_t workq
;
2905 workq
= item
->workq
;
2907 /* reuse is set to 0, when a thread is newly created to run a workitem */
2908 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
2910 self
->wqkillset
= 0;
2911 self
->parentcheck
= 1;
2913 /* These are not joinable threads */
2914 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2915 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2916 #if defined(__i386__) || defined(__x86_64__)
2917 _pthread_set_self(self
);
2920 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
2922 self
->kernel_thread
= kport
;
2923 self
->fun
= (void *(*)(void *))item
->func
;
2924 self
->arg
= item
->func_arg
;
2925 /* Add to the pthread list */
2926 LOCK(_pthread_list_lock
);
2927 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
2929 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
2932 UNLOCK(_pthread_list_lock
);
2934 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2935 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2936 printf("Failed to set thread_id in pthread_wqthread\n");
2940 /* reuse is set to 1, when a thread is resued to run another work item */
2942 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
2944 /* reset all tsd from 1 to KEYS_MAX */
2946 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self
);
2948 self
->fun
= (void *(*)(void *))item
->func
;
2949 self
->arg
= item
->func_arg
;
2954 pself
= pthread_self();
2955 if (self
!= pself
) {
2957 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
2959 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2960 _pthread_set_self(self
);
2961 pself
= pthread_self();
2963 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2967 pself
= pthread_self();
2968 if (self
!= pself
) {
2969 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
2970 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself
, self
);
2973 #endif /* WQ_DEBUG */
2975 self
->cur_workq
= workq
;
2976 self
->cur_workitem
= item
;
2977 OSAtomicDecrement32(&kernel_workq_count
);
2979 ret
= (int)(intptr_t)(*self
->fun
)(self
->arg
);
2981 /* If we reach here without going through the above initialization path then don't go through
2982 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
2984 if(self
!= pthread_self()) {
2985 pthread_exit(PTHREAD_CANCELED
);
2988 workqueue_exit(self
, workq
, item
);
2993 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
2995 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2996 pthread_workitem_t baritem
;
2997 pthread_workqueue_head_t headp
;
2998 void (*func
)(pthread_workqueue_t
, void *);
3000 workqueue_list_lock();
3002 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3005 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
3008 free_workitem(item
);
3010 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
3011 workq
->barrier_count
--;
3013 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
3015 if (workq
->barrier_count
<= 0 ) {
3016 /* Need to remove barrier item from the list */
3017 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
3019 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
3020 printf("Incorect bar item being removed in barrier processing\n");
3021 #endif /* WQ_DEBUG */
3022 /* if the front item is a barrier and call back is registered, run that */
3023 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
3024 workqueue_list_unlock();
3025 func
= (void (*)(pthread_workqueue_t
, void *))baritem
->func
;
3026 (*func
)(workq
, baritem
->func_arg
);
3027 workqueue_list_lock();
3029 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
3031 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3034 free_workitem(baritem
);
3035 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3037 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
3039 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
3040 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
3041 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
3043 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
3045 if (headp
->next_workq
== workq
) {
3046 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
3047 if (headp
->next_workq
== NULL
) {
3048 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3049 if (headp
->next_workq
== workq
)
3050 headp
->next_workq
= NULL
;
3053 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
3055 if (workq
->term_callback
!= NULL
) {
3056 workqueue_list_unlock();
3057 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
3058 workqueue_list_lock();
3060 free_workqueue(workq
);
3062 /* if there are higher prio schedulabel item reset to wqreadyprio */
3063 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
3064 wqreadyprio
= workq
->queueprio
;
3070 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
3073 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
3075 pick_nextworkqueue_droplock();
3076 _pthread_workq_return(self
);
3080 _pthread_workq_return(pthread_t self
)
3082 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
3084 /* This is the way to terminate the thread */
3085 _pthread_exit(self
, NULL
);
3089 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3092 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
3094 pthread_workqueue_t wq
;
3095 pthread_workqueue_head_t headp
;
3097 #if defined(__arm__)
3098 /* not supported under arm */
3101 #if defined(__ppc__)
3106 if ((attr
!= NULL
) && (attr
->sig
!= PTHREAD_WORKQUEUE_ATTR_SIG
)) {
3110 if (__is_threaded
== 0)
3113 workqueue_list_lock();
3114 if (kernel_workq_setup
== 0) {
3115 int ret
= _pthread_work_internal_init();
3117 workqueue_list_unlock();
3122 wq
= alloc_workqueue();
3124 _pthread_workq_init(wq
, attr
);
3126 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
3127 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
3128 if (headp
->next_workq
== NULL
) {
3129 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3132 workqueue_list_unlock();
3140 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
, unsigned int *gencountp
)
3142 pthread_workitem_t witem
;
3144 if (valid_workq(workq
) == 0) {
3148 workqueue_list_lock();
3151 * Allocate the workitem here as it can drop the lock.
3152 * Also we can evaluate the workqueue state only once.
3154 witem
= alloc_workitem();
3155 witem
->func
= workitem_func
;
3156 witem
->func_arg
= workitem_arg
;
3158 witem
->workq
= workq
;
3159 witem
->item_entry
.tqe_next
= 0;
3160 witem
->item_entry
.tqe_prev
= 0;
3162 /* alloc workitem can drop the lock, check the state */
3163 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3164 free_workitem(witem
);
3165 workqueue_list_unlock();
3170 if (itemhandlep
!= NULL
)
3171 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3172 if (gencountp
!= NULL
)
3173 *gencountp
= witem
->gencount
;
3175 __kdebug_trace(0x9008090, witem
, witem
->func
, witem
->func_arg
, workq
, 0);
3177 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3179 __kdebug_trace(0x90080a4, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3182 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3183 wqreadyprio
= workq
->queueprio
;
3185 pick_nextworkqueue_droplock();
3191 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq
, unsigned int *ocommp
)
3193 pthread_workitem_t witem
;
3195 if (valid_workq(workq
) == 0) {
3200 *ocommp
= workq
->overcommit
;
3206 int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle, unsigned int gencount)
3207 int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, pthread_workitem_handle_t *itemhandlep, unsigned int *gencountp)
3208 int pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3209 int pthread_workqueue_resume_np(pthread_workqueue_t workq)
3212 #else /* !BUILDING_VARIANT ] [ */
3213 extern int __unix_conforming
;
3214 extern int _pthread_count
;
3215 extern pthread_lock_t _pthread_list_lock
;
3216 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3217 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3219 #endif /* !BUILDING_VARIANT ] */
3223 __private_extern__
void
3224 __posix_join_cleanup(void *arg
)
3226 pthread_t thread
= (pthread_t
)arg
;
3227 int already_exited
, res
;
3230 mach_port_t joinport
;
3234 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3236 newstyle
= thread
->newstyle
;
3239 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3241 if (newstyle
== 0) {
3242 death
= thread
->death
;
3243 if (!already_exited
){
3244 thread
->joiner
= (struct _pthread
*)NULL
;
3245 UNLOCK(thread
->lock
);
3246 restore_sem_to_pool(death
);
3248 UNLOCK(thread
->lock
);
3249 while ((res
= _pthread_reap_thread(thread
,
3250 thread
->kernel_thread
,
3251 &dummy
, 1)) == EAGAIN
)
3255 restore_sem_to_pool(death
);
3260 /* leave another thread to join */
3261 thread
->joiner
= (struct _pthread
*)NULL
;
3262 UNLOCK(thread
->lock
);
3266 #endif /* __DARWIN_UNIX03 */
3270 * Wait for a thread to terminate and obtain its exit value.
3274 pthread_join(pthread_t thread,
3277 moved to pthread_cancelable.c */
3283 pthread_cancel(pthread_t thread
)
3286 if (__unix_conforming
== 0)
3287 __unix_conforming
= 1;
3288 #endif /* __DARWIN_UNIX03 */
3290 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3293 /* if the thread is a workqueue thread, then return error */
3294 if (thread
->wqthread
!= 0) {
3301 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3302 UNLOCK(thread
->lock
);
3303 if (state
& PTHREAD_CANCEL_ENABLE
)
3304 __pthread_markcancel(thread
->kernel_thread
);
3305 #else /* __DARWIN_UNIX03 */
3306 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3307 #endif /* __DARWIN_UNIX03 */
3312 pthread_testcancel(void)
3314 pthread_t self
= pthread_self();
3317 if (__unix_conforming
== 0)
3318 __unix_conforming
= 1;
3319 _pthread_testcancel(self
, 1);
3320 #else /* __DARWIN_UNIX03 */
3321 _pthread_testcancel(self
, 0);
3322 #endif /* __DARWIN_UNIX03 */
3328 * Query/update the cancelability 'state' of a thread
3331 pthread_setcancelstate(int state
, int *oldstate
)
3334 if (__unix_conforming
== 0) {
3335 __unix_conforming
= 1;
3337 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3338 #else /* __DARWIN_UNIX03 */
3339 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3340 #endif /* __DARWIN_UNIX03 */
3347 * Query/update the cancelability 'type' of a thread
3350 pthread_setcanceltype(int type
, int *oldtype
)
3352 pthread_t self
= pthread_self();
3355 if (__unix_conforming
== 0)
3356 __unix_conforming
= 1;
3357 #endif /* __DARWIN_UNIX03 */
3359 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3360 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3362 self
= pthread_self();
3365 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3366 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3367 self
->cancel_state
|= type
;
3369 #if !__DARWIN_UNIX03
3370 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3371 #endif /* __DARWIN_UNIX03 */
3376 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3381 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3385 #else /* __DARWIN_UNIX03 */
3386 return(__pthread_sigmask(how
, set
, oset
));
3387 #endif /* __DARWIN_UNIX03 */
3392 sigwait(const sigset_t * set, int * sig)
3394 moved to pthread_cancelable.c */