2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
70 #include <libkern/OSCrossEndian.h>
74 #ifndef BUILDING_VARIANT /* [ */
76 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
80 /* Per-thread kernel support */
81 extern void _pthread_set_self(pthread_t
);
82 extern void mig_init(int);
83 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
84 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
85 void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
86 static void _pthread_tsd_reinit(pthread_t t
);
87 static int _new_pthread_create_suspended(pthread_t
*thread
,
88 const pthread_attr_t
*attr
,
89 void *(*start_routine
)(void *),
93 /* Get CPU capabilities from the kernel */
94 __private_extern__
void _init_cpu_capabilities(void);
96 /* Needed to tell the malloc subsystem we're going multithreaded */
97 extern void set_malloc_singlethreaded(int);
99 /* Used when we need to call into the kernel with no reply port */
100 extern pthread_lock_t reply_port_lock
;
101 int _pthread_find_thread(pthread_t thread
);
103 /* Mach message used to notify that a thread needs to be reaped */
105 typedef struct _pthread_reap_msg_t
{
106 mach_msg_header_t header
;
108 mach_msg_trailer_t trailer
;
109 } pthread_reap_msg_t
;
111 /* We'll implement this when the main thread is a pthread */
112 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
113 static struct _pthread _thread
= {0};
115 /* This global should be used (carefully) by anyone needing to know if a
116 ** pthread has been created.
118 int __is_threaded
= 0;
119 /* _pthread_count is protected by _pthread_list_lock */
120 static int _pthread_count
= 1;
121 int __unix_conforming
= 0;
122 __private_extern__
size_t pthreadsize
= 0;
124 /* under rosetta we will use old style creation of threads */
125 static int __oldstyle
= 0;
127 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
129 /* Same implementation as LOCK, but without the __is_threaded check */
131 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
133 int tries
= _spin_tries
;
137 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
139 } while(!_spin_lock_try(lock
));
142 extern mach_port_t thread_recycle_port
;
144 /* These are used to keep track of a semaphore pool shared by mutexes and condition
148 static semaphore_t
*sem_pool
= NULL
;
149 static int sem_pool_count
= 0;
150 static int sem_pool_current
= 0;
151 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
153 static int default_priority
;
154 static int max_priority
;
155 static int min_priority
;
156 static int pthread_concurrency
;
158 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
160 static void _pthread_exit(pthread_t self
, void *value_ptr
);
161 int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
162 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
163 static pthread_attr_t _pthread_attr_default
= {0};
164 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
165 static int handle_removeitem(pthread_workqueue_t workq
, pthread_workitem_t item
);
166 static int kernel_workq_setup
= 0;
167 static volatile int32_t kernel_workq_count
= 0;
168 static volatile unsigned int user_workq_count
= 0;
169 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
170 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
172 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
173 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
175 struct _pthread_workqueue_head __pthread_workq0_head
;
176 struct _pthread_workqueue_head __pthread_workq1_head
;
177 struct _pthread_workqueue_head __pthread_workq2_head
;
178 struct _pthread_workqueue_head __pthread_workq3_head
;
179 struct _pthread_workqueue_head __pthread_workq4_head
;
180 pthread_workqueue_head_t __pthread_wq_head_tbl
[WQ_NUM_PRIO_QS
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
, &__pthread_workq3_head
, &__pthread_workq4_head
};
182 static void workqueue_list_lock(void);
183 static void workqueue_list_unlock(void);
184 static int valid_workq(pthread_workqueue_t
);
185 static void pick_nextworkqueue_droplock(void);
186 static int post_nextworkitem(pthread_workqueue_t workq
);
187 static void _pthread_workq_return(pthread_t self
);
188 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
189 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
190 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
191 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, int flags
);
192 static pthread_workitem_t
alloc_workitem(void);
193 static void free_workitem(pthread_workitem_t
);
194 static pthread_workqueue_t
alloc_workqueue(void);
195 static void free_workqueue(pthread_workqueue_t
);
196 static int _pthread_work_internal_init(void);
197 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
199 /* workq_ops commands */
200 #define WQOPS_QUEUE_ADD 1
201 #define WQOPS_QUEUE_REMOVE 2
202 #define WQOPS_THREAD_RETURN 4
205 * Flags filed passed to bsdthread_create and back in pthread_start
206 31 <---------------------------------> 0
207 _________________________________________
208 | flags(8) | policy(8) | importance(16) |
209 -----------------------------------------
211 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
213 #define PTHREAD_START_CUSTOM 0x01000000
214 #define PTHREAD_START_SETSCHED 0x02000000
215 #define PTHREAD_START_DETACHED 0x04000000
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
220 extern pthread_t
__bsdthread_create(void (*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
221 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
223 #if defined(__ppc__) || defined(__ppc64__)
224 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
225 #elif defined(__i386__) || defined(__x86_64__)
226 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
228 #error Need to define a stack address hint for this architecture
231 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
232 * The guardpages for stackoverflow protection is also allocated here
233 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
234 * set up for the thread
238 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
241 vm_address_t stackaddr
;
244 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
245 if (attrs
->stackaddr
!= NULL
) {
246 /* No guard pages setup in this case */
247 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
248 *stack
= attrs
->stackaddr
;
252 guardsize
= attrs
->guardsize
;
253 stackaddr
= PTHREAD_STACK_HINT
;
254 kr
= vm_map(mach_task_self(), &stackaddr
,
255 attrs
->stacksize
+ guardsize
,
257 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
258 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
260 if (kr
!= KERN_SUCCESS
)
261 kr
= vm_allocate(mach_task_self(),
262 &stackaddr
, attrs
->stacksize
+ guardsize
,
263 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
264 if (kr
!= KERN_SUCCESS
) {
267 /* The guard page is at the lowest address */
268 /* The stack base is the highest address */
270 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
271 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
276 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
280 vm_address_t stackaddr
;
281 size_t guardsize
, allocsize
;
283 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
285 if (attrs
->stackaddr
!= NULL
) {
286 /* No guard pages setup in this case */
287 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
288 *stack
= attrs
->stackaddr
;
289 t
= (pthread_t
)malloc(pthreadsize
);
290 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
291 t
->freeStackOnExit
= 0;
298 guardsize
= attrs
->guardsize
;
299 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
300 stackaddr
= PTHREAD_STACK_HINT
;
301 kr
= vm_map(mach_task_self(), &stackaddr
,
304 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
305 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
307 if (kr
!= KERN_SUCCESS
)
308 kr
= vm_allocate(mach_task_self(),
309 &stackaddr
, allocsize
,
310 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
311 if (kr
!= KERN_SUCCESS
) {
314 /* The guard page is at the lowest address */
315 /* The stack base is the highest address */
317 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
320 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
322 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
323 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
325 t
->freesize
= allocsize
;
326 t
->freeaddr
= (void *)stackaddr
;
327 t
->freeStackOnExit
= 1;
334 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
336 kern_return_t res
= 0;
337 vm_address_t freeaddr
;
339 task_t self
= mach_task_self();
342 semaphore_t joinsem
= SEMAPHORE_NULL
;
345 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
347 kport
= t
->kernel_thread
;
348 joinsem
= t
->joiner_notify
;
350 if (t
->freeStackOnExit
) {
351 freeaddr
= (vm_address_t
)t
->freeaddr
;
353 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
355 freesize
= t
->stacksize
+ t
->guardsize
;
357 mig_dealloc_reply_port(MACH_PORT_NULL
);
358 LOCK(_pthread_list_lock
);
359 if (freestruct
!= 0) {
360 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
361 /* if parent has not returned from create yet keep pthread_t */
363 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
365 if (t
->parentcheck
== 0)
366 freesize
-= pthreadsize
;
369 thread_count
= --_pthread_count
;
370 UNLOCK(_pthread_list_lock
);
373 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
375 if (thread_count
<=0)
378 __bsdthread_terminate(freeaddr
, freesize
, kport
, joinsem
);
382 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
384 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
388 mig_dealloc_reply_port(MACH_PORT_NULL
);
389 LOCK(_pthread_list_lock
);
390 if (freestruct
!= 0) {
391 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
393 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
396 thread_count
= --_pthread_count
;
398 UNLOCK(_pthread_list_lock
);
402 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
410 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
413 if (thread_count
<=0)
416 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
418 } else if (freestruct
) {
419 t
->sig
= _PTHREAD_NO_SIG
;
421 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
432 * Destroy a thread attribute structure
435 pthread_attr_destroy(pthread_attr_t
*attr
)
437 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
443 return (EINVAL
); /* Not an attribute structure! */
448 * Get the 'detach' state from a thread attribute structure.
449 * Note: written as a helper function for info hiding
452 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
455 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
457 *detachstate
= attr
->detached
;
461 return (EINVAL
); /* Not an attribute structure! */
466 * Get the 'inherit scheduling' info from a thread attribute structure.
467 * Note: written as a helper function for info hiding
470 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
473 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
475 *inheritsched
= attr
->inherit
;
479 return (EINVAL
); /* Not an attribute structure! */
484 * Get the scheduling parameters from a thread attribute structure.
485 * Note: written as a helper function for info hiding
488 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
489 struct sched_param
*param
)
491 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
493 *param
= attr
->param
;
497 return (EINVAL
); /* Not an attribute structure! */
502 * Get the scheduling policy from a thread attribute structure.
503 * Note: written as a helper function for info hiding
506 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
509 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
511 *policy
= attr
->policy
;
515 return (EINVAL
); /* Not an attribute structure! */
519 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
520 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
522 * Initialize a thread attribute structure to default values.
525 pthread_attr_init(pthread_attr_t
*attr
)
527 attr
->stacksize
= DEFAULT_STACK_SIZE
;
528 attr
->stackaddr
= NULL
;
529 attr
->sig
= _PTHREAD_ATTR_SIG
;
530 attr
->param
.sched_priority
= default_priority
;
531 attr
->param
.quantum
= 10; /* quantum isn't public yet */
532 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
533 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
534 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
535 attr
->freeStackOnExit
= 1;
538 attr
->guardsize
= vm_page_size
;
543 * Set the 'detach' state in a thread attribute structure.
544 * Note: written as a helper function for info hiding
547 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
550 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
552 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
553 (detachstate
== PTHREAD_CREATE_DETACHED
))
555 attr
->detached
= detachstate
;
563 return (EINVAL
); /* Not an attribute structure! */
568 * Set the 'inherit scheduling' state in a thread attribute structure.
569 * Note: written as a helper function for info hiding
572 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
575 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
577 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
578 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
580 attr
->inherit
= inheritsched
;
588 return (EINVAL
); /* Not an attribute structure! */
593 * Set the scheduling paramters in a thread attribute structure.
594 * Note: written as a helper function for info hiding
597 pthread_attr_setschedparam(pthread_attr_t
*attr
,
598 const struct sched_param
*param
)
600 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
602 /* TODO: Validate sched_param fields */
603 attr
->param
= *param
;
608 return (EINVAL
); /* Not an attribute structure! */
613 * Set the scheduling policy in a thread attribute structure.
614 * Note: written as a helper function for info hiding
617 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
620 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
622 if ((policy
== SCHED_OTHER
) ||
623 (policy
== SCHED_RR
) ||
624 (policy
== SCHED_FIFO
))
626 attr
->policy
= policy
;
635 return (EINVAL
); /* Not an attribute structure! */
640 * Set the scope for the thread.
641 * We currently only provide PTHREAD_SCOPE_SYSTEM
644 pthread_attr_setscope(pthread_attr_t
*attr
,
647 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
648 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
649 /* No attribute yet for the scope */
651 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
655 return (EINVAL
); /* Not an attribute structure! */
659 * Get the scope for the thread.
660 * We currently only provide PTHREAD_SCOPE_SYSTEM
663 pthread_attr_getscope(const pthread_attr_t
*attr
,
666 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
667 *scope
= PTHREAD_SCOPE_SYSTEM
;
670 return (EINVAL
); /* Not an attribute structure! */
673 /* Get the base stack address of the given thread */
675 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
677 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
678 *stackaddr
= attr
->stackaddr
;
681 return (EINVAL
); /* Not an attribute structure! */
686 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
688 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
689 attr
->stackaddr
= stackaddr
;
690 attr
->freeStackOnExit
= 0;
694 return (EINVAL
); /* Not an attribute structure! */
699 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
701 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
702 *stacksize
= attr
->stacksize
;
705 return (EINVAL
); /* Not an attribute structure! */
710 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
712 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
713 attr
->stacksize
= stacksize
;
716 return (EINVAL
); /* Not an attribute structure! */
721 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
723 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
724 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
725 *stacksize
= attr
->stacksize
;
728 return (EINVAL
); /* Not an attribute structure! */
732 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
733 * byte address. This is not the same as in pthread_attr_setstackaddr.
736 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
738 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
739 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
740 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
741 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
742 attr
->stacksize
= stacksize
;
743 attr
->freeStackOnExit
= 0;
747 return (EINVAL
); /* Not an attribute structure! */
753 * Set the guardsize attribute in the attr.
756 pthread_attr_setguardsize(pthread_attr_t
*attr
,
759 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
760 /* Guardsize of 0 is valid, ot means no guard */
761 if ((guardsize
% vm_page_size
) == 0) {
762 attr
->guardsize
= guardsize
;
768 return (EINVAL
); /* Not an attribute structure! */
772 * Get the guardsize attribute in the attr.
775 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
778 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
779 *guardsize
= attr
->guardsize
;
782 return (EINVAL
); /* Not an attribute structure! */
787 * Create and start execution of a new thread.
791 _pthread_body(pthread_t self
)
793 _pthread_set_self(self
);
794 _pthread_exit(self
, (self
->fun
)(self
->arg
));
798 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
804 pthread_attr_t
*attrs
= &_pthread_attr_default
;
807 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
809 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
810 LOCK(_pthread_list_lock
);
811 if (pflags
& PTHREAD_START_SETSCHED
) {
812 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
813 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
815 /* These are not joinable threads */
816 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
817 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
818 self
->detached
|= PTHREAD_CREATE_DETACHED
;
821 LOCK(_pthread_list_lock
);
822 self
->kernel_thread
= kport
;
826 /* Add to the pthread list */
827 if (self
->parentcheck
== 0) {
828 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
830 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
835 UNLOCK(_pthread_list_lock
);
836 #if defined(__i386__) || defined(__x86_64__)
837 _pthread_set_self(self
);
841 pself
= pthread_self();
846 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
849 _pthread_exit(self
, (self
->fun
)(self
->arg
));
853 _pthread_create(pthread_t t
,
854 const pthread_attr_t
*attrs
,
856 const mach_port_t kernel_thread
)
863 memset(t
, 0, sizeof(*t
));
872 t
->stacksize
= attrs
->stacksize
;
873 t
->stackaddr
= (void *)stack
;
874 t
->guardsize
= attrs
->guardsize
;
875 t
->kernel_thread
= kernel_thread
;
876 t
->detached
= attrs
->detached
;
877 t
->inherit
= attrs
->inherit
;
878 t
->policy
= attrs
->policy
;
879 t
->param
= attrs
->param
;
880 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
881 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
882 t
->sig
= _PTHREAD_SIG
;
883 t
->reply_port
= MACH_PORT_NULL
;
884 t
->cthread_self
= NULL
;
886 t
->plist
.tqe_next
= (struct _pthread
*)0;
887 t
->plist
.tqe_prev
= (struct _pthread
**)0;
888 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
889 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
890 t
->death
= SEMAPHORE_NULL
;
892 if (kernel_thread
!= MACH_PORT_NULL
)
893 pthread_setschedparam(t
, t
->policy
, &t
->param
);
899 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
901 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)stack
;
904 memset(t
, 0, sizeof(*t
));
905 t
->plist
.tqe_next
= (struct _pthread
*)0;
906 t
->plist
.tqe_prev
= (struct _pthread
**)0;
908 t
->schedset
= attrs
->schedset
;
910 if (kernalloc
!= 0) {
911 stackaddr
= (mach_vm_offset_t
)t
;
913 /* if allocated from kernel set values appropriately */
914 t
->stacksize
= stacksize
;
915 t
->stackaddr
= stackaddr
;
916 t
->freeStackOnExit
= 1;
917 t
->freeaddr
= stackaddr
- stacksize
- vm_page_size
;
918 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
920 t
->stacksize
= attrs
->stacksize
;
921 t
->stackaddr
= (void *)stack
;
923 t
->guardsize
= attrs
->guardsize
;
924 t
->detached
= attrs
->detached
;
925 t
->inherit
= attrs
->inherit
;
926 t
->policy
= attrs
->policy
;
927 t
->param
= attrs
->param
;
928 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
929 t
->sig
= _PTHREAD_SIG
;
930 t
->reply_port
= MACH_PORT_NULL
;
931 t
->cthread_self
= NULL
;
933 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
934 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
935 t
->death
= SEMAPHORE_NULL
;
937 t
->kernalloc
= kernalloc
;
945 _pthread_tsd_reinit(pthread_t t
)
947 bzero(&t
->tsd
[1], (_INTERNAL_POSIX_THREAD_KEYS_END
-1) * sizeof(void *));
951 /* Need to deprecate this in future */
953 _pthread_is_threaded(void)
955 return __is_threaded
;
958 /* Non portable public api to know whether this process has(had) atleast one thread
959 * apart from main thread. There could be race if there is a thread in the process of
960 * creation at the time of call . It does not tell whether there are more than one thread
961 * at this point of time.
964 pthread_is_threaded_np(void)
966 return (__is_threaded
);
970 pthread_mach_thread_np(pthread_t t
)
972 mach_port_t kport
= MACH_PORT_NULL
;
974 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
980 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
982 struct _pthread
* p
= NULL
;
984 /* No need to wait as mach port is already known */
985 LOCK(_pthread_list_lock
);
986 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
987 if (p
->kernel_thread
== kernel_thread
)
990 UNLOCK(_pthread_list_lock
);
995 pthread_get_stacksize_np(pthread_t t
)
1003 LOCK(_pthread_list_lock
);
1005 if ((ret
= _pthread_find_thread(t
)) != 0) {
1006 UNLOCK(_pthread_list_lock
);
1009 size
= t
->stacksize
;
1010 UNLOCK(_pthread_list_lock
);
1015 pthread_get_stackaddr_np(pthread_t t
)
1023 LOCK(_pthread_list_lock
);
1025 if ((ret
= _pthread_find_thread(t
)) != 0) {
1026 UNLOCK(_pthread_list_lock
);
1029 addr
= t
->stackaddr
;
1030 UNLOCK(_pthread_list_lock
);
1036 _pthread_reply_port(pthread_t t
)
1038 return t
->reply_port
;
1042 /* returns non-zero if the current thread is the main thread */
1044 pthread_main_np(void)
1046 pthread_t self
= pthread_self();
1048 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1052 _new_pthread_create_suspended(pthread_t
*thread
,
1053 const pthread_attr_t
*attr
,
1054 void *(*start_routine
)(void *),
1058 pthread_attr_t
*attrs
;
1063 kern_return_t kern_res
;
1064 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1066 task_t self
= mach_task_self();
1068 int susp
= create_susp
;
1070 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1071 { /* Set up default paramters */
1072 attrs
= &_pthread_attr_default
;
1073 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1078 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1079 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1085 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1086 * any change in priority or policy is needed here.
1088 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1089 /* Rosetta or pthread_create_suspended() */
1090 /* running under rosetta */
1091 /* Allocate a stack for the thread */
1093 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1095 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1098 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1101 /* Create the Mach thread for this thread */
1102 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1103 if (kern_res
!= KERN_SUCCESS
)
1105 printf("Can't create thread: %d\n", kern_res
);
1109 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1113 set_malloc_singlethreaded(0);
1116 /* Send it on it's way */
1118 t
->fun
= start_routine
;
1120 /* Now set it up to execute */
1121 LOCK(_pthread_list_lock
);
1122 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1124 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1127 UNLOCK(_pthread_list_lock
);
1128 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1133 if (attrs
->fastpath
== 1)
1136 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1137 flags
|= PTHREAD_START_DETACHED
;
1138 if (attrs
->schedset
!= 0) {
1139 flags
|= PTHREAD_START_SETSCHED
;
1140 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1141 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1144 set_malloc_singlethreaded(0);
1147 if (kernalloc
== 0) {
1148 /* Allocate a stack for the thread */
1149 flags
|= PTHREAD_START_CUSTOM
;
1150 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1153 /* Send it on it's way */
1155 t
->fun
= start_routine
;
1159 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1162 if ((t
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == -1) {
1163 _pthread_free_pthread_onstack(t
, 1, 0);
1166 LOCK(_pthread_list_lock
);
1168 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1169 /* detached child exited, mop up */
1170 UNLOCK(_pthread_list_lock
);
1172 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1175 } else if (t
->childrun
== 0) {
1176 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1179 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1181 UNLOCK(_pthread_list_lock
);
1183 UNLOCK(_pthread_list_lock
);
1188 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1193 /* kernel allocation */
1195 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1197 if ((t
= __bsdthread_create(start_routine
, arg
, attrs
->stacksize
, NULL
, flags
)) == -1)
1199 /* Now set it up to execute */
1200 LOCK(_pthread_list_lock
);
1202 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1203 /* detached child exited, mop up */
1204 UNLOCK(_pthread_list_lock
);
1206 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1208 vm_deallocate(self
, t
, pthreadsize
);
1209 } else if (t
->childrun
== 0) {
1210 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1213 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1215 UNLOCK(_pthread_list_lock
);
1217 UNLOCK(_pthread_list_lock
);
1222 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1230 _pthread_create_suspended(pthread_t
*thread
,
1231 const pthread_attr_t
*attr
,
1232 void *(*start_routine
)(void *),
1236 pthread_attr_t
*attrs
;
1240 kern_return_t kern_res
;
1241 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1244 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1245 { /* Set up default paramters */
1246 attrs
= &_pthread_attr_default
;
1247 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1252 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1253 * any change in priority or policy is needed here.
1255 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1256 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1264 /* Allocate a stack for the thread */
1265 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1268 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1271 /* Create the Mach thread for this thread */
1272 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1273 if (kern_res
!= KERN_SUCCESS
)
1275 printf("Can't create thread: %d\n", kern_res
);
1276 res
= EINVAL
; /* Need better error here? */
1280 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1284 set_malloc_singlethreaded(0);
1287 /* Send it on it's way */
1289 t
->fun
= start_routine
;
1290 /* Now set it up to execute */
1291 LOCK(_pthread_list_lock
);
1292 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1294 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1297 UNLOCK(_pthread_list_lock
);
1298 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1304 pthread_create(pthread_t
*thread
,
1305 const pthread_attr_t
*attr
,
1306 void *(*start_routine
)(void *),
1309 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1313 pthread_create_suspended_np(pthread_t
*thread
,
1314 const pthread_attr_t
*attr
,
1315 void *(*start_routine
)(void *),
1318 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1322 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1325 pthread_detach(pthread_t thread
)
1330 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0)
1331 return (ret
); /* Not a valid thread */
1334 newstyle
= thread
->newstyle
;
1335 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1337 if (thread
->detached
& _PTHREAD_EXITED
) {
1338 UNLOCK(thread
->lock
);
1339 pthread_join(thread
, NULL
);
1342 if (newstyle
== 0) {
1343 semaphore_t death
= thread
->death
;
1345 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1346 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1347 UNLOCK(thread
->lock
);
1349 (void) semaphore_signal(death
);
1351 mach_port_t joinport
= thread
->joiner_notify
;
1353 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1354 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1356 UNLOCK(thread
->lock
);
1358 semaphore_signal(joinport
);
1364 UNLOCK(thread
->lock
);
1371 * pthread_kill call to system call
1374 extern int __pthread_kill(mach_port_t
, int);
1382 mach_port_t kport
= MACH_PORT_NULL
;
1384 if ((sig
< 0) || (sig
> NSIG
))
1387 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1388 return (ESRCH
); /* Not a valid thread */
1390 error
= __pthread_kill(kport
, sig
);
1397 /* Announce that there are pthread resources ready to be reclaimed in a */
1398 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1399 /* thread underneath is terminated right away. */
1401 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1402 pthread_reap_msg_t msg
;
1405 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1406 MACH_MSG_TYPE_MOVE_SEND
);
1407 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1408 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1409 msg
.header
.msgh_local_port
= kernel_thread
;
1410 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1411 msg
.thread
= thread
;
1412 ret
= mach_msg_send(&msg
.header
);
1413 assert(ret
== MACH_MSG_SUCCESS
);
1416 /* Reap the resources for available threads */
1418 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1419 mach_port_type_t ptype
;
1423 self
= mach_task_self();
1424 if (kernel_thread
!= MACH_PORT_DEAD
) {
1425 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1426 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1427 /* not quite dead yet... */
1430 ret
= mach_port_deallocate(self
, kernel_thread
);
1431 if (ret
!= KERN_SUCCESS
) {
1433 "mach_port_deallocate(kernel_thread) failed: %s\n",
1434 mach_error_string(ret
));
1438 if (th
->reply_port
!= MACH_PORT_NULL
) {
1439 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1440 MACH_PORT_RIGHT_RECEIVE
, -1);
1441 if (ret
!= KERN_SUCCESS
) {
1443 "mach_port_mod_refs(reply_port) failed: %s\n",
1444 mach_error_string(ret
));
1448 if (th
->freeStackOnExit
) {
1449 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1452 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1455 ret
= vm_deallocate(self
, addr
, size
);
1456 if (ret
!= KERN_SUCCESS
) {
1458 "vm_deallocate(stack) failed: %s\n",
1459 mach_error_string(ret
));
1465 *value_ptr
= th
->exit_value
;
1467 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1468 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1469 *value_ptr
= PTHREAD_CANCELED
;
1470 th
->sig
= _PTHREAD_NO_SIG
;
1481 void _pthread_reap_threads(void)
1483 pthread_reap_msg_t msg
;
1486 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1487 sizeof msg
, thread_recycle_port
,
1488 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1489 while (ret
== MACH_MSG_SUCCESS
) {
1490 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1491 pthread_t thread
= msg
.thread
;
1493 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1495 /* not dead yet, put it back for someone else to reap, stop here */
1496 _pthread_become_available(thread
, kernel_thread
);
1499 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1500 sizeof msg
, thread_recycle_port
,
1501 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1505 /* For compatibility... */
1509 return pthread_self();
1513 * Terminate a thread.
1515 int __disable_threadsignal(int);
1518 _pthread_exit(pthread_t self
, void *value_ptr
)
1520 struct __darwin_pthread_handler_rec
*handler
;
1521 kern_return_t kern_res
;
1523 int newstyle
= self
->newstyle
;
1525 /* Make this thread not to receive any signals */
1526 __disable_threadsignal(1);
1529 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1532 /* set cancel state to disable and type to deferred */
1533 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1535 while ((handler
= self
->__cleanup_stack
) != 0)
1537 (handler
->__routine
)(handler
->__arg
);
1538 self
->__cleanup_stack
= handler
->__next
;
1540 _pthread_tsd_cleanup(self
);
1542 if (newstyle
== 0) {
1543 _pthread_reap_threads();
1546 self
->detached
|= _PTHREAD_EXITED
;
1548 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1549 mach_port_t death
= self
->death
;
1550 self
->exit_value
= value_ptr
;
1552 /* the joiner will need a kernel thread reference, leave ours for it */
1554 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1555 if (kern_res
!= KERN_SUCCESS
)
1557 "semaphore_signal(death) failed: %s\n",
1558 mach_error_string(kern_res
));
1560 LOCK(_pthread_list_lock
);
1561 thread_count
= --_pthread_count
;
1562 UNLOCK(_pthread_list_lock
);
1565 LOCK(_pthread_list_lock
);
1566 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1568 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1570 thread_count
= --_pthread_count
;
1571 UNLOCK(_pthread_list_lock
);
1572 /* with no joiner, we let become available consume our cached ref */
1573 _pthread_become_available(self
, self
->kernel_thread
);
1576 if (thread_count
<= 0)
1579 /* Use a new reference to terminate ourselves. Should never return. */
1580 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1581 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1582 mach_error_string(kern_res
));
1584 semaphore_t joinsem
= SEMAPHORE_NULL
;
1586 if ((self
->joiner_notify
== NULL
) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1587 joinsem
= new_sem_from_pool();
1589 self
->detached
|= _PTHREAD_EXITED
;
1591 self
->exit_value
= value_ptr
;
1592 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1593 if (self
->joiner_notify
== NULL
) {
1594 self
->joiner_notify
= joinsem
;
1595 joinsem
= SEMAPHORE_NULL
;
1598 if (joinsem
!= SEMAPHORE_NULL
)
1599 restore_sem_to_pool(joinsem
);
1600 _pthread_free_pthread_onstack(self
, 0, 1);
1603 /* with no joiner, we let become available consume our cached ref */
1604 if (joinsem
!= SEMAPHORE_NULL
)
1605 restore_sem_to_pool(joinsem
);
1606 _pthread_free_pthread_onstack(self
, 1, 1);
1613 pthread_exit(void *value_ptr
)
1615 pthread_t self
= pthread_self();
1616 if (self
->wqthread
!= 0)
1617 workqueue_exit(self
, self
->cur_workq
, self
->cur_workitem
);
1619 _pthread_exit(self
, value_ptr
);
1623 * Get the scheduling policy and scheduling paramters for a thread.
1626 pthread_getschedparam(pthread_t thread
,
1628 struct sched_param
*param
)
1635 LOCK(_pthread_list_lock
);
1637 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1638 UNLOCK(_pthread_list_lock
);
1642 *policy
= thread
->policy
;
1644 *param
= thread
->param
;
1645 UNLOCK(_pthread_list_lock
);
1651 * Set the scheduling policy and scheduling paramters for a thread.
1654 pthread_setschedparam(pthread_t thread
,
1656 const struct sched_param
*param
)
1658 policy_base_data_t bases
;
1660 mach_msg_type_number_t count
;
1666 bases
.ts
.base_priority
= param
->sched_priority
;
1667 base
= (policy_base_t
)&bases
.ts
;
1668 count
= POLICY_TIMESHARE_BASE_COUNT
;
1671 bases
.fifo
.base_priority
= param
->sched_priority
;
1672 base
= (policy_base_t
)&bases
.fifo
;
1673 count
= POLICY_FIFO_BASE_COUNT
;
1676 bases
.rr
.base_priority
= param
->sched_priority
;
1677 /* quantum isn't public yet */
1678 bases
.rr
.quantum
= param
->quantum
;
1679 base
= (policy_base_t
)&bases
.rr
;
1680 count
= POLICY_RR_BASE_COUNT
;
1685 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1686 if (ret
!= KERN_SUCCESS
)
1688 thread
->policy
= policy
;
1689 thread
->param
= *param
;
1694 * Get the minimum priority for the given policy
1697 sched_get_priority_min(int policy
)
1699 return default_priority
- 16;
1703 * Get the maximum priority for the given policy
1706 sched_get_priority_max(int policy
)
1708 return default_priority
+ 16;
1712 * Determine if two thread identifiers represent the same thread.
1715 pthread_equal(pthread_t t1
,
1721 __private_extern__
void
1722 _pthread_set_self(pthread_t p
)
1724 extern void __pthread_set_self(pthread_t
);
1726 bzero(&_thread
, sizeof(struct _pthread
));
1730 __pthread_set_self(p
);
1734 cthread_set_self(void *cself
)
1736 pthread_t self
= pthread_self();
1737 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1738 _pthread_set_self(cself
);
1741 self
->cthread_self
= cself
;
1745 ur_cthread_self(void) {
1746 pthread_t self
= pthread_self();
1747 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1748 return (void *)self
;
1750 return self
->cthread_self
;
1754 * cancellation handler for pthread once as the init routine can have a
1755 * cancellation point. In that case we need to restore the spin unlock
1758 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
1760 _spin_unlock(&once_control
->lock
);
1765 * Execute a function exactly one time in a thread-safe fashion.
1768 pthread_once(pthread_once_t
*once_control
,
1769 void (*init_routine
)(void))
1771 _spin_lock(&once_control
->lock
);
1772 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1774 pthread_cleanup_push(__pthread_once_cancel_handler
, once_control
);
1776 pthread_cleanup_pop(0);
1777 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1779 _spin_unlock(&once_control
->lock
);
1780 return (0); /* Spec defines no possible errors! */
1784 * Insert a cancellation point in a thread.
1786 __private_extern__
void
1787 _pthread_testcancel(pthread_t thread
, int isconforming
)
1790 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1791 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1793 UNLOCK(thread
->lock
);
1795 pthread_exit(PTHREAD_CANCELED
);
1799 UNLOCK(thread
->lock
);
1805 pthread_getconcurrency(void)
1807 return(pthread_concurrency
);
1811 pthread_setconcurrency(int new_level
)
1815 pthread_concurrency
= new_level
;
1820 * Perform package initialization - called automatically when application starts
1823 __private_extern__
int
1826 pthread_attr_t
*attrs
;
1829 host_priority_info_data_t priority_info
;
1831 host_flavor_t flavor
;
1833 mach_msg_type_number_t count
;
1838 pthreadsize
= round_page(sizeof (struct _pthread
));
1839 count
= HOST_PRIORITY_INFO_COUNT
;
1840 info
= (host_info_t
)&priority_info
;
1841 flavor
= HOST_PRIORITY_INFO
;
1842 host
= mach_host_self();
1843 kr
= host_info(host
, flavor
, info
, &count
);
1844 if (kr
!= KERN_SUCCESS
)
1845 printf("host_info failed (%d); probably need privilege.\n", kr
);
1847 default_priority
= priority_info
.user_priority
;
1848 min_priority
= priority_info
.minimum_priority
;
1849 max_priority
= priority_info
.maximum_priority
;
1851 attrs
= &_pthread_attr_default
;
1852 pthread_attr_init(attrs
);
1854 TAILQ_INIT(&__pthread_head
);
1855 LOCK_INIT(_pthread_list_lock
);
1857 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
1858 _pthread_set_self(thread
);
1860 /* In case of dyld reset the tsd keys from 1 - 10 */
1861 _pthread_keys_init();
1864 mib
[1] = KERN_USRSTACK
;
1865 len
= sizeof (stackaddr
);
1866 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
1867 stackaddr
= (void *)USRSTACK
;
1868 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
1869 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1871 _init_cpu_capabilities();
1873 _spin_tries
= MP_SPIN_TRIES
;
1875 mach_port_deallocate(mach_task_self(), host
);
1877 #if defined(__ppc__)
1882 #if defined(__arm__)
1886 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1888 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
1889 kr
= vm_map(mach_task_self(),
1890 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
1891 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
1893 (vm_address_t
)0, FALSE
,
1894 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
1895 VM_INHERIT_DEFAULT
);
1896 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1900 mig_init(1); /* enable multi-threaded mig interfaces */
1901 if (__oldstyle
== 0) {
1902 #if defined(__i386__) || defined(__x86_64__)
1903 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)));
1905 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)));
1911 int sched_yield(void)
1917 /* This used to be the "magic" that gets the initialization routine called when the application starts */
1918 static int _do_nothing(void) { return 0; }
1919 int (*_cthread_init_routine
)(void) = _do_nothing
;
1921 /* Get a semaphore from the pool, growing it if necessary */
1923 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1928 LOCK(sem_pool_lock
);
1929 if (sem_pool_current
== sem_pool_count
) {
1930 sem_pool_count
+= 16;
1931 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1932 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1933 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1936 sem
= sem_pool
[sem_pool_current
++];
1937 UNLOCK(sem_pool_lock
);
1941 /* Put a semaphore back into the pool */
1942 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1943 LOCK(sem_pool_lock
);
1944 sem_pool
[--sem_pool_current
] = sem
;
1945 UNLOCK(sem_pool_lock
);
1948 static void sem_pool_reset(void) {
1949 LOCK(sem_pool_lock
);
1951 sem_pool_current
= 0;
1953 UNLOCK(sem_pool_lock
);
1956 __private_extern__
void _pthread_fork_child(pthread_t p
) {
1957 /* Just in case somebody had it locked... */
1958 UNLOCK(sem_pool_lock
);
1960 /* No need to hold the pthread_list_lock as no one other than this
1961 * thread is present at this time
1963 TAILQ_INIT(&__pthread_head
);
1964 LOCK_INIT(_pthread_list_lock
);
1965 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
1970 * Query/update the cancelability 'state' of a thread
1973 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
1975 pthread_t self
= pthread_self();
1979 case PTHREAD_CANCEL_ENABLE
:
1981 __pthread_canceled(1);
1983 case PTHREAD_CANCEL_DISABLE
:
1985 __pthread_canceled(2);
1991 self
= pthread_self();
1994 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1995 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
1996 self
->cancel_state
|= state
;
1999 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2003 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2005 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2008 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2009 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2010 if ((value_ptr
== PTHREAD_CANCELED
)) {
2012 self
->detached
|= _PTHREAD_WASCANCEL
;
2019 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2022 int detached
= 0, ret
;
2025 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2027 /* The scenario where the joiner was waiting for the thread and
2028 * the pthread detach happened on that thread. Then the semaphore
2029 * will trigger but by the time joiner runs, the target thread could be
2030 * freed. So we need to make sure that the thread is still in the list
2031 * and is joinable before we continue with the join.
2033 LOCK(_pthread_list_lock
);
2034 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2035 UNLOCK(_pthread_list_lock
);
2039 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2040 /* the thread might be a detached thread */
2041 UNLOCK(_pthread_list_lock
);
2045 /* It is still a joinable thread and needs to be reaped */
2046 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2048 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2050 UNLOCK(_pthread_list_lock
);
2053 *value_ptr
= thread
->exit_value
;
2055 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2056 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) {
2057 *value_ptr
= PTHREAD_CANCELED
;
2060 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2061 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2062 if (res
!= KERN_SUCCESS
)
2063 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2064 thread
->reply_port
= MACH_PORT_NULL
;
2066 if (thread
->freeStackOnExit
) {
2067 thread
->sig
= _PTHREAD_NO_SIG
;
2069 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2071 vm_deallocate(mach_task_self(), thread
, pthreadsize
);
2073 thread
->sig
= _PTHREAD_NO_SIG
;
2075 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2082 /* ALWAYS called with list lock and return with list lock */
2084 _pthread_find_thread(pthread_t thread
)
2089 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2091 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2092 UNLOCK(_pthread_list_lock
);
2094 LOCK(_pthread_list_lock
);
2104 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2112 LOCK(_pthread_list_lock
);
2114 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2115 UNLOCK(_pthread_list_lock
);
2118 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2119 UNLOCK(_pthread_list_lock
);
2122 kport
= thread
->kernel_thread
;
2123 UNLOCK(_pthread_list_lock
);
2129 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2131 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2133 attrp
->stacksize
= DEFAULT_STACK_SIZE
;
2134 attrp
->istimeshare
= 1;
2135 attrp
->importance
= 0;
2136 attrp
->affinity
= 0;
2137 attrp
->queueprio
= WORK_QUEUE_NORMALIZER
;
2138 attrp
->sig
= PTHEAD_WRKQUEUE_ATTR_SIG
;
2143 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2145 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
)
2150 return (EINVAL
); /* Not an attribute structure! */
2154 #ifdef NOTYET /* [ */
2156 pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t
* attr
, size_t * stacksizep
)
2158 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2159 *stacksizep
= attr
->stacksize
;
2162 return (EINVAL
); /* Not an attribute structure! */
2167 pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t
* attr
, size_t stacksize
)
2169 if ((attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
2170 attr
->stacksize
= stacksize
;
2173 return (EINVAL
); /* Not an attribute structure! */
2179 pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t
* attr
, int * istimesahrep
)
2181 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2182 *istimesahrep
= attr
->istimeshare
;
2185 return (EINVAL
); /* Not an attribute structure! */
2190 pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t
* attr
, int istimeshare
)
2192 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2193 if (istimeshare
!= 0)
2194 attr
->istimeshare
= istimeshare
;
2196 attr
->istimeshare
= 0;
2199 return (EINVAL
); /* Not an attribute structure! */
2204 pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t
* attr
, int * importancep
)
2206 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2207 *importancep
= attr
->importance
;
2210 return (EINVAL
); /* Not an attribute structure! */
2215 pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t
* attr
, int importance
)
2217 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
){
2218 attr
->importance
= importance
;
2221 return (EINVAL
); /* Not an attribute structure! */
2226 pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t
* attr
, int * affinityp
)
2228 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2229 *affinityp
= attr
->affinity
;
2232 return (EINVAL
); /* Not an attribute structure! */
2237 pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t
* attr
, int affinity
)
2239 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
){
2240 attr
->affinity
= affinity
;
2243 return (EINVAL
); /* Not an attribute structure! */
2247 #endif /* NOTYET ] */
2250 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2252 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2253 *qpriop
= (attr
->queueprio
- WORK_QUEUE_NORMALIZER
);
2256 return (EINVAL
); /* Not an attribute structure! */
2261 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2263 /* only -2 to +2 is valid */
2264 if ((attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) && (qprio
<= 2) && (qprio
>= -2)) {
2265 attr
->queueprio
= (qprio
+ WORK_QUEUE_NORMALIZER
);
2268 return (EINVAL
); /* Not an attribute structure! */
2272 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2275 workqueue_list_lock()
2277 OSSpinLockLock(&__workqueue_list_lock
);
2281 workqueue_list_unlock()
2283 OSSpinLockUnlock(&__workqueue_list_lock
);
2287 pthread_workqueue_init_np()
2291 workqueue_list_lock();
2292 ret
=_pthread_work_internal_init();
2293 workqueue_list_unlock();
2299 _pthread_work_internal_init(void)
2302 pthread_workqueue_head_t headp
;
2303 pthread_workitem_t witemp
;
2304 pthread_workqueue_t wq
;
2306 if (kernel_workq_setup
== 0) {
2307 #if defined(__i386__) || defined(__x86_64__)
2308 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)));
2310 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)));
2313 _pthread_wq_attr_default
.stacksize
= DEFAULT_STACK_SIZE
;
2314 _pthread_wq_attr_default
.istimeshare
= 1;
2315 _pthread_wq_attr_default
.importance
= 0;
2316 _pthread_wq_attr_default
.affinity
= 0;
2317 _pthread_wq_attr_default
.queueprio
= WORK_QUEUE_NORMALIZER
;
2318 _pthread_wq_attr_default
.sig
= PTHEAD_WRKQUEUE_ATTR_SIG
;
2320 for( i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2321 headp
= __pthread_wq_head_tbl
[i
];
2322 TAILQ_INIT(&headp
->wqhead
);
2323 headp
->next_workq
= 0;
2326 /* create work item and workqueue pools */
2327 witemp
= (struct _pthread_workitem
*)malloc(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2328 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
));
2329 for (i
= 0; i
< WORKITEM_POOL_SIZE
; i
++) {
2330 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2332 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2333 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2334 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2335 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2338 if (error
= __workq_open()) {
2339 TAILQ_INIT(&__pthread_workitem_pool_head
);
2340 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2345 kernel_workq_setup
= 1;
2351 /* This routine is called with list lock held */
2352 static pthread_workitem_t
2353 alloc_workitem(void)
2355 pthread_workitem_t witem
;
2357 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2358 workqueue_list_unlock();
2359 witem
= malloc(sizeof(struct _pthread_workitem
));
2360 workqueue_list_lock();
2362 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2363 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2368 /* This routine is called with list lock held */
2370 free_workitem(pthread_workitem_t witem
)
2372 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2375 /* This routine is called with list lock held */
2376 static pthread_workqueue_t
2377 alloc_workqueue(void)
2379 pthread_workqueue_t wq
;
2381 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2382 workqueue_list_unlock();
2383 wq
= malloc(sizeof(struct _pthread_workqueue
));
2384 workqueue_list_lock();
2386 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2387 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2393 /* This routine is called with list lock held */
2395 free_workqueue(pthread_workqueue_t wq
)
2398 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2402 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2404 bzero(wq
, sizeof(struct _pthread_workqueue
));
2406 wq
->stacksize
= attr
->stacksize
;
2407 wq
->istimeshare
= attr
->istimeshare
;
2408 wq
->importance
= attr
->importance
;
2409 wq
->affinity
= attr
->affinity
;
2410 wq
->queueprio
= attr
->queueprio
;
2412 wq
->stacksize
= DEFAULT_STACK_SIZE
;
2413 wq
->istimeshare
= 1;
2416 wq
->queueprio
= WORK_QUEUE_NORMALIZER
;
2418 LOCK_INIT(wq
->lock
);
2420 TAILQ_INIT(&wq
->item_listhead
);
2421 TAILQ_INIT(&wq
->item_kernhead
);
2422 wq
->wq_list
.tqe_next
= 0;
2423 wq
->wq_list
.tqe_prev
= 0;
2424 wq
->sig
= PTHEAD_WRKQUEUE_SIG
;
2425 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2429 valid_workq(pthread_workqueue_t workq
)
2431 if (workq
->sig
== PTHEAD_WRKQUEUE_SIG
)
2438 /* called with list lock */
2440 pick_nextworkqueue_droplock()
2442 int i
, curwqprio
, val
, found
;
2443 pthread_workqueue_head_t headp
;
2444 pthread_workqueue_t workq
;
2445 pthread_workqueue_t nworkq
= NULL
;
2448 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2450 for (i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2451 wqreadyprio
= i
; /* because there is nothing else higher to run */
2452 headp
= __pthread_wq_head_tbl
[i
];
2454 if (TAILQ_EMPTY(&headp
->wqhead
))
2456 workq
= headp
->next_workq
;
2458 workq
= TAILQ_FIRST(&headp
->wqhead
);
2459 curwqprio
= workq
->queueprio
;
2460 nworkq
= workq
; /* starting pt */
2461 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2462 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2463 if (headp
->next_workq
== NULL
)
2464 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2465 val
= post_nextworkitem(workq
);
2468 /* things could have changed so reasses */
2469 /* If kernel queue is full , skip */
2470 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2472 /* If anything with higher prio arrived, then reevaluate */
2473 if (wqreadyprio
< curwqprio
)
2474 goto loop
; /* we need re evaluate again */
2475 /* we can post some more work items */
2479 /* cannot use workq here as it could be freed */
2480 if (TAILQ_EMPTY(&headp
->wqhead
))
2482 /* if we found nothing to run and only one workqueue in the list, skip */
2483 if ((val
== 0) && (workq
== headp
->next_workq
))
2485 workq
= headp
->next_workq
;
2487 workq
= TAILQ_FIRST(&headp
->wqhead
);
2490 /* if we found nothing to run and back to workq where we started */
2491 if ((val
== 0) && (workq
== nworkq
))
2494 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2497 /* nothing found to run? */
2501 workqueue_list_unlock();
2505 post_nextworkitem(pthread_workqueue_t workq
)
2508 pthread_workitem_t witem
;
2509 pthread_workqueue_head_t headp
;
2510 void (*func
)(pthread_workqueue_t
, void *);
2512 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2515 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2518 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2519 headp
= workq
->headp
;
2520 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2522 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2525 /* Also barrier when nothing is there needs to be handled */
2526 /* Nothing to wait for */
2527 if (workq
->kq_count
!= 0) {
2528 witem
->flags
|= PTH_WQITEM_APPLIED
;
2529 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2530 workq
->barrier_count
= workq
->kq_count
;
2532 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2537 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2539 if (witem
->func
!= NULL
) {
2540 workqueue_list_unlock();
2542 (*func
)(workq
, witem
->func_arg
);
2543 workqueue_list_lock();
2545 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2547 free_workitem(witem
);
2550 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2552 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2554 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2557 witem
->flags
|= PTH_WQITEM_APPLIED
;
2558 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2559 workq
->barrier_count
= workq
->kq_count
;
2560 workq
->term_callback
= witem
->func
;
2561 workq
->term_callarg
= witem
->func_arg
;
2562 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2563 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2564 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2566 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2570 free_workitem(witem
);
2571 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2573 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2575 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2576 if (headp
->next_workq
== workq
) {
2577 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2578 if (headp
->next_workq
== NULL
) {
2579 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2580 if (headp
->next_workq
== workq
)
2581 headp
->next_workq
= NULL
;
2585 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2586 if (workq
->term_callback
!= NULL
) {
2587 workqueue_list_unlock();
2588 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2589 workqueue_list_lock();
2591 free_workqueue(workq
);
2594 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2596 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
2601 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
2603 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2604 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
2605 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
2607 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
2609 OSAtomicIncrement32(&kernel_workq_count
);
2610 workqueue_list_unlock();
2611 if (( error
=__workq_ops(WQOPS_QUEUE_ADD
, witem
, 0)) == -1) {
2612 OSAtomicDecrement32(&kernel_workq_count
);
2613 workqueue_list_lock();
2615 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2617 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
2618 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2619 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
2620 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
2622 workqueue_list_lock();
2624 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2628 /* noone should come here */
2630 printf("error in logic for next workitem\n");
2637 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
2640 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2641 pthread_workqueue_t workq
;
2645 workq
= item
->workq
;
2647 /* reuse is set to 0, when a thread is newly created to run a workitem */
2648 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
2650 self
->parentcheck
= 1;
2652 /* These are not joinable threads */
2653 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2654 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2655 #if defined(__i386__) || defined(__x86_64__)
2656 _pthread_set_self(self
);
2659 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
2661 self
->kernel_thread
= kport
;
2662 self
->fun
= item
->func
;
2663 self
->arg
= item
->func_arg
;
2664 /* Add to the pthread list */
2665 LOCK(_pthread_list_lock
);
2666 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
2668 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
2671 UNLOCK(_pthread_list_lock
);
2673 /* reuse is set to 1, when a thread is resued to run another work item */
2675 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
2677 /* reset all tsd from 1 to KEYS_MAX */
2678 _pthread_tsd_reinit(self
);
2680 self
->fun
= item
->func
;
2681 self
->arg
= item
->func_arg
;
2686 pself
= pthread_self();
2687 if (self
!= pself
) {
2689 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
2691 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2692 _pthread_set_self(self
);
2693 pself
= pthread_self();
2695 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2699 pself
= pthread_self();
2700 if (self
!= pself
) {
2701 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
2705 #endif /* WQ_DEBUG */
2707 self
->cur_workq
= workq
;
2708 self
->cur_workitem
= item
;
2709 OSAtomicDecrement32(&kernel_workq_count
);
2711 ret
= (*self
->fun
)(self
->arg
);
2713 workqueue_exit(self
, workq
, item
);
2718 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
2720 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2721 pthread_workitem_t baritem
;
2722 pthread_workqueue_head_t headp
;
2723 void (*func
)(pthread_workqueue_t
, void *);
2725 workqueue_list_lock();
2727 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
2730 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
2733 free_workitem(item
);
2735 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
2736 workq
->barrier_count
--;
2738 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
2740 if (workq
->barrier_count
<= 0 ) {
2741 /* Need to remove barrier item from the list */
2742 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
2744 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
2745 printf("Incorect bar item being removed in barrier processing\n");
2746 #endif /* WQ_DEBUG */
2747 /* if the front item is a barrier and call back is registered, run that */
2748 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
2749 workqueue_list_unlock();
2750 func
= baritem
->func
;
2751 (*func
)(workq
, baritem
->func_arg
);
2752 workqueue_list_lock();
2754 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
2756 free_workitem(baritem
);
2757 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2759 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
2761 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
2762 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2763 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2765 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
2767 if (headp
->next_workq
== workq
) {
2768 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2769 if (headp
->next_workq
== NULL
) {
2770 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2771 if (headp
->next_workq
== workq
)
2772 headp
->next_workq
= NULL
;
2775 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2777 if (workq
->term_callback
!= NULL
) {
2778 workqueue_list_unlock();
2779 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2780 workqueue_list_lock();
2782 free_workqueue(workq
);
2784 /* if there are higher prio schedulabel item reset to wqreadyprio */
2785 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
2786 wqreadyprio
= workq
->queueprio
;
2792 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
2795 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
2797 pick_nextworkqueue_droplock();
2798 _pthread_workq_return(self
);
2802 _pthread_workq_return(pthread_t self
)
2804 struct __darwin_pthread_handler_rec
*handler
;
2806 int * value_ptr
=&value
;
2808 /* set cancel state to disable and type to deferred */
2809 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
2811 /* Make this thread not to receive any signals */
2812 __disable_threadsignal(1);
2814 while ((handler
= self
->__cleanup_stack
) != 0)
2816 (handler
->__routine
)(handler
->__arg
);
2817 self
->__cleanup_stack
= handler
->__next
;
2819 _pthread_tsd_cleanup(self
);
2821 __workq_ops(WQOPS_THREAD_RETURN
, NULL
, 0);
2823 /* This is the way to terminate the thread */
2824 _pthread_exit(self
, NULL
);
2828 /* returns 0 if it handles it, otherwise 1 */
2830 handle_removeitem(pthread_workqueue_t workq
, pthread_workitem_t item
)
2832 pthread_workitem_t baritem
;
2833 pthread_workqueue_head_t headp
;
2834 void (*func
)(pthread_workqueue_t
, void *);
2836 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
2837 workq
->barrier_count
--;
2838 if (workq
->barrier_count
<= 0 ) {
2839 /* Need to remove barrier item from the list */
2840 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
2842 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
2843 printf("Incorect bar item being removed in barrier processing\n");
2844 #endif /* WQ_DEBUG */
2845 /* if the front item is a barrier and call back is registered, run that */
2846 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
)
2847 && (baritem
->func
!= NULL
)) {
2848 workqueue_list_unlock();
2849 func
= baritem
->func
;
2850 (*func
)(workq
, baritem
->func_arg
);
2851 workqueue_list_lock();
2853 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
2855 free_workitem(baritem
);
2857 free_workitem(item
);
2858 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2860 __kdebug_trace(0x9000058, pthread_self(), item
, item
->func_arg
, 0, 0);
2862 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
2863 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2864 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2866 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
2868 if (headp
->next_workq
== workq
) {
2869 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2870 if (headp
->next_workq
== NULL
) {
2871 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2872 if (headp
->next_workq
== workq
)
2873 headp
->next_workq
= NULL
;
2876 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2878 if (workq
->term_callback
!= NULL
) {
2879 workqueue_list_unlock();
2880 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2881 workqueue_list_lock();
2883 free_workqueue(workq
);
2884 pick_nextworkqueue_droplock();
2887 /* if there are higher prio schedulabel item reset to wqreadyprio */
2888 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
2889 wqreadyprio
= workq
->queueprio
;
2890 free_workitem(item
);
2891 pick_nextworkqueue_droplock();
2898 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
2901 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
2903 pthread_workqueue_t wq
;
2904 pthread_workqueue_head_t headp
;
2906 if ((attr
!= NULL
) && (attr
->sig
!= PTHEAD_WRKQUEUE_ATTR_SIG
)) {
2910 if (__is_threaded
== 0)
2913 workqueue_list_lock();
2914 if (kernel_workq_setup
== 0) {
2915 int ret
= _pthread_work_internal_init();
2917 workqueue_list_unlock();
2922 wq
= alloc_workqueue();
2924 _pthread_workq_init(wq
, attr
);
2926 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2927 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
2928 if (headp
->next_workq
== NULL
) {
2929 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2932 workqueue_list_unlock();
2940 pthread_workqueue_destroy_np(pthread_workqueue_t workq
, void (* callback_func
)(pthread_workqueue_t
, void *), void * callback_arg
)
2942 pthread_workitem_t witem
;
2943 pthread_workqueue_head_t headp
;
2945 if (valid_workq(workq
) == 0) {
2949 workqueue_list_lock();
2952 * Allocate the workitem here as it can drop the lock.
2953 * Also we can evaluate the workqueue state only once.
2955 witem
= alloc_workitem();
2956 witem
->item_entry
.tqe_next
= 0;
2957 witem
->item_entry
.tqe_prev
= 0;
2958 witem
->func
= callback_func
;
2959 witem
->func_arg
= callback_arg
;
2960 witem
->flags
= PTH_WQITEM_DESTROY
;
2962 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_TERM_ON
| PTHREAD_WORKQ_DESTROYED
)) == 0) {
2963 workq
->flags
|= PTHREAD_WORKQ_IN_TERMINATE
;
2964 /* If nothing queued or running, destroy now */
2965 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (TAILQ_EMPTY(&workq
->item_kernhead
))) {
2966 workq
->flags
|= (PTHREAD_WORKQ_TERM_ON
| PTHREAD_WORKQ_DESTROYED
);
2967 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2968 workq
->term_callback
= callback_func
;
2969 workq
->term_callarg
= callback_arg
;
2970 if (headp
->next_workq
== workq
) {
2971 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2972 if (headp
->next_workq
== NULL
) {
2973 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2974 if (headp
->next_workq
== workq
)
2975 headp
->next_workq
= NULL
;
2978 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2980 free_workitem(witem
);
2981 if (workq
->term_callback
!= NULL
) {
2982 workqueue_list_unlock();
2983 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2984 workqueue_list_lock();
2987 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 3, 0);
2989 free_workqueue(workq
);
2990 workqueue_list_unlock();
2993 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
2995 free_workitem(witem
);
2996 workqueue_list_unlock();
2997 return(EINPROGRESS
);
2999 workqueue_list_unlock();
3005 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
)
3007 pthread_workitem_t witem
;
3009 if (valid_workq(workq
) == 0) {
3013 workqueue_list_lock();
3016 * Allocate the workitem here as it can drop the lock.
3017 * Also we can evaluate the workqueue state only once.
3019 witem
= alloc_workitem();
3020 witem
->func
= workitem_func
;
3021 witem
->func_arg
= workitem_arg
;
3023 witem
->workq
= workq
;
3024 witem
->item_entry
.tqe_next
= 0;
3025 witem
->item_entry
.tqe_prev
= 0;
3027 /* alloc workitem can drop the lock, check the state */
3028 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3029 free_workitem(witem
);
3030 workqueue_list_unlock();
3035 if (itemhandlep
!= NULL
)
3036 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3037 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3038 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3039 wqreadyprio
= workq
->queueprio
;
3041 pick_nextworkqueue_droplock();
3047 pthread_workqueue_removeitem_np(pthread_workqueue_t workq
, pthread_workitem_handle_t itemhandle
)
3049 pthread_workitem_t item
, baritem
;
3050 pthread_workqueue_head_t headp
;
3053 if (valid_workq(workq
) == 0) {
3057 workqueue_list_lock();
3058 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3059 workqueue_list_unlock();
3063 TAILQ_FOREACH(item
, &workq
->item_listhead
, item_entry
) {
3064 if (item
== (pthread_workitem_t
)itemhandle
) {
3065 TAILQ_REMOVE(&workq
->item_listhead
, item
, item_entry
);
3066 if ((item
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_APPLIED
)) == (PTH_WQITEM_BARRIER
| PTH_WQITEM_APPLIED
)) {
3067 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3068 workq
->barrier_count
= 0;
3069 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
)))) {
3070 wqreadyprio
= workq
->queueprio
;
3072 } else if ((item
->flags
& PTH_WQITEM_KERN_COUNT
) == PTH_WQITEM_KERN_COUNT
) {
3074 item
->flags
|= PTH_WQITEM_REMOVED
;
3075 if (handle_removeitem(workq
, item
) == 0)
3078 item
->flags
|= PTH_WQITEM_NOTINLIST
;
3079 free_workitem(item
);
3080 workqueue_list_unlock();
3085 TAILQ_FOREACH(item
, &workq
->item_kernhead
, item_entry
) {
3086 if (item
== (pthread_workitem_t
)itemhandle
) {
3087 workqueue_list_unlock();
3088 if ((error
= __workq_ops(WQOPS_QUEUE_REMOVE
, item
, 0)) == 0) {
3089 workqueue_list_lock();
3090 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3091 OSAtomicDecrement32(&kernel_workq_count
);
3093 item
->flags
|= PTH_WQITEM_REMOVED
;
3094 if (handle_removeitem(workq
, item
) != 0) {
3095 free_workitem(item
);
3096 pick_nextworkqueue_droplock();
3100 workqueue_list_unlock();
3105 workqueue_list_unlock();
3111 pthread_workqueue_addbarrier_np(pthread_workqueue_t workq
, void (* callback_func
)(pthread_workqueue_t
, void *), void * callback_arg
, __unused
int waitforcallback
, pthread_workitem_handle_t
*itemhandlep
)
3113 pthread_workitem_t witem
;
3115 if (valid_workq(workq
) == 0) {
3119 workqueue_list_lock();
3122 * Allocate the workitem here as it can drop the lock.
3123 * Also we can evaluate the workqueue state only once.
3125 witem
= alloc_workitem();
3126 witem
->item_entry
.tqe_next
= 0;
3127 witem
->item_entry
.tqe_prev
= 0;
3128 witem
->func
= callback_func
;
3129 witem
->func_arg
= callback_arg
;
3130 witem
->flags
= PTH_WQITEM_BARRIER
;
3132 /* alloc workitem can drop the lock, check the state */
3133 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3134 free_workitem(witem
);
3135 workqueue_list_unlock();
3139 if (itemhandlep
!= NULL
)
3140 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3142 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3143 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3144 wqreadyprio
= workq
->queueprio
;
3146 pick_nextworkqueue_droplock();
3152 pthread_workqueue_suspend_np(pthread_workqueue_t workq
)
3154 if (valid_workq(workq
) == 0) {
3157 workqueue_list_lock();
3158 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3159 workqueue_list_unlock();
3163 workq
->flags
|= PTHREAD_WORKQ_SUSPEND
;
3164 workq
->suspend_count
++;
3165 workqueue_list_unlock();
3170 pthread_workqueue_resume_np(pthread_workqueue_t workq
)
3172 if (valid_workq(workq
) == 0) {
3175 workqueue_list_lock();
3176 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3177 workqueue_list_unlock();
3181 workq
->suspend_count
--;
3182 if (workq
->suspend_count
<= 0) {
3183 workq
->flags
&= ~PTHREAD_WORKQ_SUSPEND
;
3184 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3185 wqreadyprio
= workq
->queueprio
;
3187 pick_nextworkqueue_droplock();
3189 workqueue_list_unlock();
3195 #else /* !BUILDING_VARIANT ] [ */
3196 extern int __unix_conforming
;
3197 extern int _pthread_count
;
3198 extern pthread_lock_t _pthread_list_lock
;
3199 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3200 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3202 #endif /* !BUILDING_VARIANT ] */
3206 __private_extern__
void
3207 __posix_join_cleanup(void *arg
)
3209 pthread_t thread
= (pthread_t
)arg
;
3210 int already_exited
, res
;
3213 mach_port_t joinport
;
3217 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3219 newstyle
= thread
->newstyle
;
3222 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3225 death
= thread
->death
;
3226 if (!already_exited
){
3227 thread
->joiner
= (struct _pthread
*)NULL
;
3228 UNLOCK(thread
->lock
);
3229 restore_sem_to_pool(death
);
3231 UNLOCK(thread
->lock
);
3232 while ((res
= _pthread_reap_thread(thread
,
3233 thread
->kernel_thread
,
3234 &dummy
, 1)) == EAGAIN
)
3238 restore_sem_to_pool(death
);
3243 /* leave another thread to join */
3244 thread
->joiner
= (struct _pthread
*)NULL
;
3245 UNLOCK(thread
->lock
);
3249 #endif /* __DARWIN_UNIX03 */
3253 * Wait for a thread to terminate and obtain its exit value.
3257 pthread_join(pthread_t thread,
3260 moved to pthread_cancelable.c */
3266 pthread_cancel(pthread_t thread
)
3269 if (__unix_conforming
== 0)
3270 __unix_conforming
= 1;
3271 #endif /* __DARWIN_UNIX03 */
3273 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3280 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3281 UNLOCK(thread
->lock
);
3282 if (state
& PTHREAD_CANCEL_ENABLE
)
3283 __pthread_markcancel(thread
->kernel_thread
);
3284 #else /* __DARWIN_UNIX03 */
3285 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3286 #endif /* __DARWIN_UNIX03 */
3291 pthread_testcancel(void)
3293 pthread_t self
= pthread_self();
3296 if (__unix_conforming
== 0)
3297 __unix_conforming
= 1;
3298 _pthread_testcancel(self
, 1);
3299 #else /* __DARWIN_UNIX03 */
3300 _pthread_testcancel(self
, 0);
3301 #endif /* __DARWIN_UNIX03 */
3307 * Query/update the cancelability 'state' of a thread
3310 pthread_setcancelstate(int state
, int *oldstate
)
3313 if (__unix_conforming
== 0) {
3314 __unix_conforming
= 1;
3316 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3317 #else /* __DARWIN_UNIX03 */
3318 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3319 #endif /* __DARWIN_UNIX03 */
3326 * Query/update the cancelability 'type' of a thread
3329 pthread_setcanceltype(int type
, int *oldtype
)
3331 pthread_t self
= pthread_self();
3334 if (__unix_conforming
== 0)
3335 __unix_conforming
= 1;
3336 #endif /* __DARWIN_UNIX03 */
3338 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3339 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3341 self
= pthread_self();
3344 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3345 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3346 self
->cancel_state
|= type
;
3348 #if !__DARWIN_UNIX03
3349 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3350 #endif /* __DARWIN_UNIX03 */
3355 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3360 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3364 #else /* __DARWIN_UNIX03 */
3365 return(__pthread_sigmask(how
, set
, oset
));
3366 #endif /* __DARWIN_UNIX03 */
3371 sigwait(const sigset_t * set, int * sig)
3373 moved to pthread_cancelable.c */