2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
70 #include <libkern/OSCrossEndian.h>
74 #ifndef BUILDING_VARIANT /* [ */
76 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
80 /* Per-thread kernel support */
81 extern void _pthread_set_self(pthread_t
);
82 extern void mig_init(int);
83 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
84 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
85 void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
86 static void _pthread_tsd_reinit(pthread_t t
);
87 static int _new_pthread_create_suspended(pthread_t
*thread
,
88 const pthread_attr_t
*attr
,
89 void *(*start_routine
)(void *),
93 /* Get CPU capabilities from the kernel */
94 __private_extern__
void _init_cpu_capabilities(void);
96 /* Needed to tell the malloc subsystem we're going multithreaded */
97 extern void set_malloc_singlethreaded(int);
99 /* Used when we need to call into the kernel with no reply port */
100 extern pthread_lock_t reply_port_lock
;
101 int _pthread_find_thread(pthread_t thread
);
103 /* Mach message used to notify that a thread needs to be reaped */
105 typedef struct _pthread_reap_msg_t
{
106 mach_msg_header_t header
;
108 mach_msg_trailer_t trailer
;
109 } pthread_reap_msg_t
;
111 /* We'll implement this when the main thread is a pthread */
112 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
113 static struct _pthread _thread
= {0};
115 /* This global should be used (carefully) by anyone needing to know if a
116 ** pthread has been created.
118 int __is_threaded
= 0;
119 /* _pthread_count is protected by _pthread_list_lock */
120 static int _pthread_count
= 1;
121 int __unix_conforming
= 0;
122 __private_extern__
size_t pthreadsize
= 0;
124 /* under rosetta we will use old style creation of threads */
125 static int __oldstyle
= 0;
127 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
129 /* Same implementation as LOCK, but without the __is_threaded check */
131 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
133 int tries
= _spin_tries
;
137 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
139 } while(!_spin_lock_try(lock
));
142 extern mach_port_t thread_recycle_port
;
144 /* These are used to keep track of a semaphore pool shared by mutexes and condition
148 static semaphore_t
*sem_pool
= NULL
;
149 static int sem_pool_count
= 0;
150 static int sem_pool_current
= 0;
151 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
153 static int default_priority
;
154 static int max_priority
;
155 static int min_priority
;
156 static int pthread_concurrency
;
158 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
160 static void _pthread_exit(pthread_t self
, void *value_ptr
);
161 int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
162 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
163 static pthread_attr_t _pthread_attr_default
= {0};
164 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
165 static int handle_removeitem(pthread_workqueue_t workq
, pthread_workitem_t item
);
166 static int kernel_workq_setup
= 0;
167 static volatile int32_t kernel_workq_count
= 0;
168 static volatile unsigned int user_workq_count
= 0;
169 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
170 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
172 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
173 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
175 struct _pthread_workqueue_head __pthread_workq0_head
;
176 struct _pthread_workqueue_head __pthread_workq1_head
;
177 struct _pthread_workqueue_head __pthread_workq2_head
;
178 struct _pthread_workqueue_head __pthread_workq3_head
;
179 struct _pthread_workqueue_head __pthread_workq4_head
;
180 pthread_workqueue_head_t __pthread_wq_head_tbl
[WQ_NUM_PRIO_QS
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
, &__pthread_workq3_head
, &__pthread_workq4_head
};
182 static void workqueue_list_lock(void);
183 static void workqueue_list_unlock(void);
184 static int valid_workq(pthread_workqueue_t
);
185 static void pick_nextworkqueue_droplock(void);
186 static int post_nextworkitem(pthread_workqueue_t workq
);
187 static void _pthread_workq_return(pthread_t self
);
188 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
189 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
190 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
191 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, int flags
);
192 static pthread_workitem_t
alloc_workitem(void);
193 static void free_workitem(pthread_workitem_t
);
194 static pthread_workqueue_t
alloc_workqueue(void);
195 static void free_workqueue(pthread_workqueue_t
);
196 static int _pthread_work_internal_init(void);
197 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
199 /* workq_ops commands */
200 #define WQOPS_QUEUE_ADD 1
201 #define WQOPS_QUEUE_REMOVE 2
202 #define WQOPS_THREAD_RETURN 4
205 * Flags filed passed to bsdthread_create and back in pthread_start
206 31 <---------------------------------> 0
207 _________________________________________
208 | flags(8) | policy(8) | importance(16) |
209 -----------------------------------------
211 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
213 #define PTHREAD_START_CUSTOM 0x01000000
214 #define PTHREAD_START_SETSCHED 0x02000000
215 #define PTHREAD_START_DETACHED 0x04000000
216 #define PTHREAD_START_POLICY_BITSHIFT 16
217 #define PTHREAD_START_POLICY_MASK 0xff
218 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
220 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
221 extern pthread_t
__bsdthread_create(void (*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
222 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
224 #if defined(__ppc__) || defined(__ppc64__)
225 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
226 #elif defined(__i386__) || defined(__x86_64__)
227 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
228 #elif defined(__arm__)
229 static const vm_address_t PTHREAD_STACK_HINT
= 0x30000000;
231 #error Need to define a stack address hint for this architecture
234 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
235 * The guardpages for stackoverflow protection is also allocated here
236 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
237 * set up for the thread
241 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
244 vm_address_t stackaddr
;
247 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
248 if (attrs
->stackaddr
!= NULL
) {
249 /* No guard pages setup in this case */
250 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
251 *stack
= attrs
->stackaddr
;
255 guardsize
= attrs
->guardsize
;
256 stackaddr
= PTHREAD_STACK_HINT
;
257 kr
= vm_map(mach_task_self(), &stackaddr
,
258 attrs
->stacksize
+ guardsize
,
260 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
261 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
263 if (kr
!= KERN_SUCCESS
)
264 kr
= vm_allocate(mach_task_self(),
265 &stackaddr
, attrs
->stacksize
+ guardsize
,
266 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
267 if (kr
!= KERN_SUCCESS
) {
270 /* The guard page is at the lowest address */
271 /* The stack base is the highest address */
273 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
274 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
279 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
283 vm_address_t stackaddr
;
284 size_t guardsize
, allocsize
;
286 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
288 if (attrs
->stackaddr
!= NULL
) {
289 /* No guard pages setup in this case */
290 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
291 *stack
= attrs
->stackaddr
;
292 t
= (pthread_t
)malloc(pthreadsize
);
293 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
294 t
->freeStackOnExit
= 0;
301 guardsize
= attrs
->guardsize
;
302 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
303 stackaddr
= PTHREAD_STACK_HINT
;
304 kr
= vm_map(mach_task_self(), &stackaddr
,
307 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
308 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
310 if (kr
!= KERN_SUCCESS
)
311 kr
= vm_allocate(mach_task_self(),
312 &stackaddr
, allocsize
,
313 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
314 if (kr
!= KERN_SUCCESS
) {
317 /* The guard page is at the lowest address */
318 /* The stack base is the highest address */
320 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
323 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
325 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
326 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
328 t
->freesize
= allocsize
;
329 t
->freeaddr
= (void *)stackaddr
;
330 t
->freeStackOnExit
= 1;
337 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
339 kern_return_t res
= 0;
340 vm_address_t freeaddr
;
342 task_t self
= mach_task_self();
345 semaphore_t joinsem
= SEMAPHORE_NULL
;
348 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
350 kport
= t
->kernel_thread
;
351 joinsem
= t
->joiner_notify
;
353 if (t
->freeStackOnExit
) {
354 freeaddr
= (vm_address_t
)t
->freeaddr
;
356 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
358 freesize
= t
->stacksize
+ t
->guardsize
;
360 mig_dealloc_reply_port(MACH_PORT_NULL
);
361 LOCK(_pthread_list_lock
);
362 if (freestruct
!= 0) {
363 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
364 /* if parent has not returned from create yet keep pthread_t */
366 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
368 if (t
->parentcheck
== 0)
369 freesize
-= pthreadsize
;
372 thread_count
= --_pthread_count
;
373 UNLOCK(_pthread_list_lock
);
376 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
378 if (thread_count
<=0)
381 __bsdthread_terminate(freeaddr
, freesize
, kport
, joinsem
);
385 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
387 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
391 mig_dealloc_reply_port(MACH_PORT_NULL
);
392 LOCK(_pthread_list_lock
);
393 if (freestruct
!= 0) {
394 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
396 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
399 thread_count
= --_pthread_count
;
401 UNLOCK(_pthread_list_lock
);
405 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
413 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
416 if (thread_count
<=0)
419 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
421 } else if (freestruct
) {
422 t
->sig
= _PTHREAD_NO_SIG
;
424 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
435 * Destroy a thread attribute structure
438 pthread_attr_destroy(pthread_attr_t
*attr
)
440 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
446 return (EINVAL
); /* Not an attribute structure! */
451 * Get the 'detach' state from a thread attribute structure.
452 * Note: written as a helper function for info hiding
455 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
458 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
460 *detachstate
= attr
->detached
;
464 return (EINVAL
); /* Not an attribute structure! */
469 * Get the 'inherit scheduling' info from a thread attribute structure.
470 * Note: written as a helper function for info hiding
473 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
476 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
478 *inheritsched
= attr
->inherit
;
482 return (EINVAL
); /* Not an attribute structure! */
487 * Get the scheduling parameters from a thread attribute structure.
488 * Note: written as a helper function for info hiding
491 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
492 struct sched_param
*param
)
494 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
496 *param
= attr
->param
;
500 return (EINVAL
); /* Not an attribute structure! */
505 * Get the scheduling policy from a thread attribute structure.
506 * Note: written as a helper function for info hiding
509 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
512 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
514 *policy
= attr
->policy
;
518 return (EINVAL
); /* Not an attribute structure! */
522 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
523 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
525 * Initialize a thread attribute structure to default values.
528 pthread_attr_init(pthread_attr_t
*attr
)
530 attr
->stacksize
= DEFAULT_STACK_SIZE
;
531 attr
->stackaddr
= NULL
;
532 attr
->sig
= _PTHREAD_ATTR_SIG
;
533 attr
->param
.sched_priority
= default_priority
;
534 attr
->param
.quantum
= 10; /* quantum isn't public yet */
535 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
536 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
537 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
538 attr
->freeStackOnExit
= 1;
541 attr
->guardsize
= vm_page_size
;
546 * Set the 'detach' state in a thread attribute structure.
547 * Note: written as a helper function for info hiding
550 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
553 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
555 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
556 (detachstate
== PTHREAD_CREATE_DETACHED
))
558 attr
->detached
= detachstate
;
566 return (EINVAL
); /* Not an attribute structure! */
571 * Set the 'inherit scheduling' state in a thread attribute structure.
572 * Note: written as a helper function for info hiding
575 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
578 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
580 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
581 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
583 attr
->inherit
= inheritsched
;
591 return (EINVAL
); /* Not an attribute structure! */
596 * Set the scheduling paramters in a thread attribute structure.
597 * Note: written as a helper function for info hiding
600 pthread_attr_setschedparam(pthread_attr_t
*attr
,
601 const struct sched_param
*param
)
603 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
605 /* TODO: Validate sched_param fields */
606 attr
->param
= *param
;
611 return (EINVAL
); /* Not an attribute structure! */
616 * Set the scheduling policy in a thread attribute structure.
617 * Note: written as a helper function for info hiding
620 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
623 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
625 if ((policy
== SCHED_OTHER
) ||
626 (policy
== SCHED_RR
) ||
627 (policy
== SCHED_FIFO
))
629 attr
->policy
= policy
;
638 return (EINVAL
); /* Not an attribute structure! */
643 * Set the scope for the thread.
644 * We currently only provide PTHREAD_SCOPE_SYSTEM
647 pthread_attr_setscope(pthread_attr_t
*attr
,
650 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
651 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
652 /* No attribute yet for the scope */
654 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
658 return (EINVAL
); /* Not an attribute structure! */
662 * Get the scope for the thread.
663 * We currently only provide PTHREAD_SCOPE_SYSTEM
666 pthread_attr_getscope(const pthread_attr_t
*attr
,
669 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
670 *scope
= PTHREAD_SCOPE_SYSTEM
;
673 return (EINVAL
); /* Not an attribute structure! */
676 /* Get the base stack address of the given thread */
678 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
680 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
681 *stackaddr
= attr
->stackaddr
;
684 return (EINVAL
); /* Not an attribute structure! */
689 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
691 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
692 attr
->stackaddr
= stackaddr
;
693 attr
->freeStackOnExit
= 0;
697 return (EINVAL
); /* Not an attribute structure! */
702 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
704 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
705 *stacksize
= attr
->stacksize
;
708 return (EINVAL
); /* Not an attribute structure! */
713 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
715 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
716 attr
->stacksize
= stacksize
;
719 return (EINVAL
); /* Not an attribute structure! */
724 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
726 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
727 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
728 *stacksize
= attr
->stacksize
;
731 return (EINVAL
); /* Not an attribute structure! */
735 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
736 * byte address. This is not the same as in pthread_attr_setstackaddr.
739 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
741 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
742 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
743 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
744 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
745 attr
->stacksize
= stacksize
;
746 attr
->freeStackOnExit
= 0;
750 return (EINVAL
); /* Not an attribute structure! */
756 * Set the guardsize attribute in the attr.
759 pthread_attr_setguardsize(pthread_attr_t
*attr
,
762 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
763 /* Guardsize of 0 is valid, ot means no guard */
764 if ((guardsize
% vm_page_size
) == 0) {
765 attr
->guardsize
= guardsize
;
771 return (EINVAL
); /* Not an attribute structure! */
775 * Get the guardsize attribute in the attr.
778 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
781 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
782 *guardsize
= attr
->guardsize
;
785 return (EINVAL
); /* Not an attribute structure! */
790 * Create and start execution of a new thread.
794 _pthread_body(pthread_t self
)
796 _pthread_set_self(self
);
797 _pthread_exit(self
, (self
->fun
)(self
->arg
));
801 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
807 pthread_attr_t
*attrs
= &_pthread_attr_default
;
810 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
812 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
813 LOCK(_pthread_list_lock
);
814 if (pflags
& PTHREAD_START_SETSCHED
) {
815 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
816 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
818 /* These are not joinable threads */
819 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
820 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
821 self
->detached
|= PTHREAD_CREATE_DETACHED
;
824 LOCK(_pthread_list_lock
);
825 self
->kernel_thread
= kport
;
829 /* Add to the pthread list */
830 if (self
->parentcheck
== 0) {
831 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
833 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
838 UNLOCK(_pthread_list_lock
);
839 #if defined(__i386__) || defined(__x86_64__)
840 _pthread_set_self(self
);
844 pself
= pthread_self();
849 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
852 _pthread_exit(self
, (self
->fun
)(self
->arg
));
856 _pthread_create(pthread_t t
,
857 const pthread_attr_t
*attrs
,
859 const mach_port_t kernel_thread
)
866 memset(t
, 0, sizeof(*t
));
875 t
->stacksize
= attrs
->stacksize
;
876 t
->stackaddr
= (void *)stack
;
877 t
->guardsize
= attrs
->guardsize
;
878 t
->kernel_thread
= kernel_thread
;
879 t
->detached
= attrs
->detached
;
880 t
->inherit
= attrs
->inherit
;
881 t
->policy
= attrs
->policy
;
882 t
->param
= attrs
->param
;
883 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
884 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
885 t
->sig
= _PTHREAD_SIG
;
886 t
->reply_port
= MACH_PORT_NULL
;
887 t
->cthread_self
= NULL
;
889 t
->plist
.tqe_next
= (struct _pthread
*)0;
890 t
->plist
.tqe_prev
= (struct _pthread
**)0;
891 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
892 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
893 t
->death
= SEMAPHORE_NULL
;
895 if (kernel_thread
!= MACH_PORT_NULL
)
896 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
902 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
904 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)stack
;
907 memset(t
, 0, sizeof(*t
));
908 t
->plist
.tqe_next
= (struct _pthread
*)0;
909 t
->plist
.tqe_prev
= (struct _pthread
**)0;
911 t
->schedset
= attrs
->schedset
;
913 if (kernalloc
!= 0) {
914 stackaddr
= (mach_vm_offset_t
)t
;
916 /* if allocated from kernel set values appropriately */
917 t
->stacksize
= stacksize
;
918 t
->stackaddr
= stackaddr
;
919 t
->freeStackOnExit
= 1;
920 t
->freeaddr
= stackaddr
- stacksize
- vm_page_size
;
921 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
923 t
->stacksize
= attrs
->stacksize
;
924 t
->stackaddr
= (void *)stack
;
926 t
->guardsize
= attrs
->guardsize
;
927 t
->detached
= attrs
->detached
;
928 t
->inherit
= attrs
->inherit
;
929 t
->policy
= attrs
->policy
;
930 t
->param
= attrs
->param
;
931 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
932 t
->sig
= _PTHREAD_SIG
;
933 t
->reply_port
= MACH_PORT_NULL
;
934 t
->cthread_self
= NULL
;
936 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
937 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
938 t
->death
= SEMAPHORE_NULL
;
940 t
->kernalloc
= kernalloc
;
948 _pthread_tsd_reinit(pthread_t t
)
950 bzero(&t
->tsd
[1], (_INTERNAL_POSIX_THREAD_KEYS_END
-1) * sizeof(void *));
954 /* Need to deprecate this in future */
956 _pthread_is_threaded(void)
958 return __is_threaded
;
961 /* Non portable public api to know whether this process has(had) atleast one thread
962 * apart from main thread. There could be race if there is a thread in the process of
963 * creation at the time of call . It does not tell whether there are more than one thread
964 * at this point of time.
967 pthread_is_threaded_np(void)
969 return (__is_threaded
);
973 pthread_mach_thread_np(pthread_t t
)
975 mach_port_t kport
= MACH_PORT_NULL
;
977 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
983 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
985 struct _pthread
* p
= NULL
;
987 /* No need to wait as mach port is already known */
988 LOCK(_pthread_list_lock
);
989 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
990 if (p
->kernel_thread
== kernel_thread
)
993 UNLOCK(_pthread_list_lock
);
998 pthread_get_stacksize_np(pthread_t t
)
1006 LOCK(_pthread_list_lock
);
1008 if ((ret
= _pthread_find_thread(t
)) != 0) {
1009 UNLOCK(_pthread_list_lock
);
1012 size
= t
->stacksize
;
1013 UNLOCK(_pthread_list_lock
);
1018 pthread_get_stackaddr_np(pthread_t t
)
1026 LOCK(_pthread_list_lock
);
1028 if ((ret
= _pthread_find_thread(t
)) != 0) {
1029 UNLOCK(_pthread_list_lock
);
1032 addr
= t
->stackaddr
;
1033 UNLOCK(_pthread_list_lock
);
1039 _pthread_reply_port(pthread_t t
)
1041 return t
->reply_port
;
1045 /* returns non-zero if the current thread is the main thread */
1047 pthread_main_np(void)
1049 pthread_t self
= pthread_self();
1051 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1055 _new_pthread_create_suspended(pthread_t
*thread
,
1056 const pthread_attr_t
*attr
,
1057 void *(*start_routine
)(void *),
1061 pthread_attr_t
*attrs
;
1066 kern_return_t kern_res
;
1067 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1069 task_t self
= mach_task_self();
1071 int susp
= create_susp
;
1073 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1074 { /* Set up default paramters */
1075 attrs
= &_pthread_attr_default
;
1076 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1081 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1082 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1088 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1089 * any change in priority or policy is needed here.
1091 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1092 /* Rosetta or pthread_create_suspended() */
1093 /* running under rosetta */
1094 /* Allocate a stack for the thread */
1096 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1098 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1101 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1104 /* Create the Mach thread for this thread */
1105 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1106 if (kern_res
!= KERN_SUCCESS
)
1108 printf("Can't create thread: %d\n", kern_res
);
1112 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1116 set_malloc_singlethreaded(0);
1119 /* Send it on it's way */
1121 t
->fun
= start_routine
;
1123 /* Now set it up to execute */
1124 LOCK(_pthread_list_lock
);
1125 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1127 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1130 UNLOCK(_pthread_list_lock
);
1131 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1136 if (attrs
->fastpath
== 1)
1139 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1140 flags
|= PTHREAD_START_DETACHED
;
1141 if (attrs
->schedset
!= 0) {
1142 flags
|= PTHREAD_START_SETSCHED
;
1143 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1144 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1147 set_malloc_singlethreaded(0);
1150 if (kernalloc
== 0) {
1151 /* Allocate a stack for the thread */
1152 flags
|= PTHREAD_START_CUSTOM
;
1153 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1156 /* Send it on it's way */
1158 t
->fun
= start_routine
;
1162 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1165 if ((t
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == -1) {
1166 _pthread_free_pthread_onstack(t
, 1, 0);
1169 LOCK(_pthread_list_lock
);
1171 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1172 /* detached child exited, mop up */
1173 UNLOCK(_pthread_list_lock
);
1175 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1178 } else if (t
->childrun
== 0) {
1179 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1182 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1184 UNLOCK(_pthread_list_lock
);
1186 UNLOCK(_pthread_list_lock
);
1191 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1196 /* kernel allocation */
1198 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1200 if ((t
= __bsdthread_create(start_routine
, arg
, attrs
->stacksize
, NULL
, flags
)) == -1)
1202 /* Now set it up to execute */
1203 LOCK(_pthread_list_lock
);
1205 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1206 /* detached child exited, mop up */
1207 UNLOCK(_pthread_list_lock
);
1209 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1211 vm_deallocate(self
, t
, pthreadsize
);
1212 } else if (t
->childrun
== 0) {
1213 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1216 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1218 UNLOCK(_pthread_list_lock
);
1220 UNLOCK(_pthread_list_lock
);
1225 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1233 _pthread_create_suspended(pthread_t
*thread
,
1234 const pthread_attr_t
*attr
,
1235 void *(*start_routine
)(void *),
1239 pthread_attr_t
*attrs
;
1243 kern_return_t kern_res
;
1244 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1247 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1248 { /* Set up default paramters */
1249 attrs
= &_pthread_attr_default
;
1250 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1255 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1256 * any change in priority or policy is needed here.
1258 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1259 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1267 /* Allocate a stack for the thread */
1268 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1271 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1274 /* Create the Mach thread for this thread */
1275 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1276 if (kern_res
!= KERN_SUCCESS
)
1278 printf("Can't create thread: %d\n", kern_res
);
1279 res
= EINVAL
; /* Need better error here? */
1283 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1287 set_malloc_singlethreaded(0);
1290 /* Send it on it's way */
1292 t
->fun
= start_routine
;
1293 /* Now set it up to execute */
1294 LOCK(_pthread_list_lock
);
1295 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1297 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1300 UNLOCK(_pthread_list_lock
);
1301 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1307 pthread_create(pthread_t
*thread
,
1308 const pthread_attr_t
*attr
,
1309 void *(*start_routine
)(void *),
1312 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1316 pthread_create_suspended_np(pthread_t
*thread
,
1317 const pthread_attr_t
*attr
,
1318 void *(*start_routine
)(void *),
1321 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1325 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1328 pthread_detach(pthread_t thread
)
1333 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0)
1334 return (ret
); /* Not a valid thread */
1337 newstyle
= thread
->newstyle
;
1338 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1340 if (thread
->detached
& _PTHREAD_EXITED
) {
1341 UNLOCK(thread
->lock
);
1342 pthread_join(thread
, NULL
);
1345 if (newstyle
== 0) {
1346 semaphore_t death
= thread
->death
;
1348 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1349 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1350 UNLOCK(thread
->lock
);
1352 (void) semaphore_signal(death
);
1354 mach_port_t joinport
= thread
->joiner_notify
;
1356 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1357 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1359 UNLOCK(thread
->lock
);
1361 semaphore_signal(joinport
);
1367 UNLOCK(thread
->lock
);
1374 * pthread_kill call to system call
1377 extern int __pthread_kill(mach_port_t
, int);
1385 mach_port_t kport
= MACH_PORT_NULL
;
1387 if ((sig
< 0) || (sig
> NSIG
))
1390 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1391 return (ESRCH
); /* Not a valid thread */
1393 error
= __pthread_kill(kport
, sig
);
1400 /* Announce that there are pthread resources ready to be reclaimed in a */
1401 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1402 /* thread underneath is terminated right away. */
1404 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1405 pthread_reap_msg_t msg
;
1408 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1409 MACH_MSG_TYPE_MOVE_SEND
);
1410 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1411 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1412 msg
.header
.msgh_local_port
= kernel_thread
;
1413 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1414 msg
.thread
= thread
;
1415 ret
= mach_msg_send(&msg
.header
);
1416 assert(ret
== MACH_MSG_SUCCESS
);
1419 /* Reap the resources for available threads */
1421 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1422 mach_port_type_t ptype
;
1426 self
= mach_task_self();
1427 if (kernel_thread
!= MACH_PORT_DEAD
) {
1428 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1429 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1430 /* not quite dead yet... */
1433 ret
= mach_port_deallocate(self
, kernel_thread
);
1434 if (ret
!= KERN_SUCCESS
) {
1436 "mach_port_deallocate(kernel_thread) failed: %s\n",
1437 mach_error_string(ret
));
1441 if (th
->reply_port
!= MACH_PORT_NULL
) {
1442 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1443 MACH_PORT_RIGHT_RECEIVE
, -1);
1444 if (ret
!= KERN_SUCCESS
) {
1446 "mach_port_mod_refs(reply_port) failed: %s\n",
1447 mach_error_string(ret
));
1451 if (th
->freeStackOnExit
) {
1452 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1455 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1458 ret
= vm_deallocate(self
, addr
, size
);
1459 if (ret
!= KERN_SUCCESS
) {
1461 "vm_deallocate(stack) failed: %s\n",
1462 mach_error_string(ret
));
1468 *value_ptr
= th
->exit_value
;
1470 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1471 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1472 *value_ptr
= PTHREAD_CANCELED
;
1473 th
->sig
= _PTHREAD_NO_SIG
;
1484 void _pthread_reap_threads(void)
1486 pthread_reap_msg_t msg
;
1489 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1490 sizeof msg
, thread_recycle_port
,
1491 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1492 while (ret
== MACH_MSG_SUCCESS
) {
1493 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1494 pthread_t thread
= msg
.thread
;
1496 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1498 /* not dead yet, put it back for someone else to reap, stop here */
1499 _pthread_become_available(thread
, kernel_thread
);
1502 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1503 sizeof msg
, thread_recycle_port
,
1504 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1508 /* For compatibility... */
1512 return pthread_self();
1516 * Terminate a thread.
1518 int __disable_threadsignal(int);
1521 _pthread_exit(pthread_t self
, void *value_ptr
)
1523 struct __darwin_pthread_handler_rec
*handler
;
1524 kern_return_t kern_res
;
1526 int newstyle
= self
->newstyle
;
1528 /* Make this thread not to receive any signals */
1529 __disable_threadsignal(1);
1532 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1535 /* set cancel state to disable and type to deferred */
1536 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1538 while ((handler
= self
->__cleanup_stack
) != 0)
1540 (handler
->__routine
)(handler
->__arg
);
1541 self
->__cleanup_stack
= handler
->__next
;
1543 _pthread_tsd_cleanup(self
);
1545 if (newstyle
== 0) {
1546 _pthread_reap_threads();
1549 self
->detached
|= _PTHREAD_EXITED
;
1551 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1552 mach_port_t death
= self
->death
;
1553 self
->exit_value
= value_ptr
;
1555 /* the joiner will need a kernel thread reference, leave ours for it */
1557 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1558 if (kern_res
!= KERN_SUCCESS
)
1560 "semaphore_signal(death) failed: %s\n",
1561 mach_error_string(kern_res
));
1563 LOCK(_pthread_list_lock
);
1564 thread_count
= --_pthread_count
;
1565 UNLOCK(_pthread_list_lock
);
1568 LOCK(_pthread_list_lock
);
1569 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1571 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1573 thread_count
= --_pthread_count
;
1574 UNLOCK(_pthread_list_lock
);
1575 /* with no joiner, we let become available consume our cached ref */
1576 _pthread_become_available(self
, self
->kernel_thread
);
1579 if (thread_count
<= 0)
1582 /* Use a new reference to terminate ourselves. Should never return. */
1583 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1584 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1585 mach_error_string(kern_res
));
1587 semaphore_t joinsem
= SEMAPHORE_NULL
;
1589 if ((self
->joiner_notify
== NULL
) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1590 joinsem
= new_sem_from_pool();
1592 self
->detached
|= _PTHREAD_EXITED
;
1594 self
->exit_value
= value_ptr
;
1595 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1596 if (self
->joiner_notify
== NULL
) {
1597 self
->joiner_notify
= joinsem
;
1598 joinsem
= SEMAPHORE_NULL
;
1601 if (joinsem
!= SEMAPHORE_NULL
)
1602 restore_sem_to_pool(joinsem
);
1603 _pthread_free_pthread_onstack(self
, 0, 1);
1606 /* with no joiner, we let become available consume our cached ref */
1607 if (joinsem
!= SEMAPHORE_NULL
)
1608 restore_sem_to_pool(joinsem
);
1609 _pthread_free_pthread_onstack(self
, 1, 1);
1616 pthread_exit(void *value_ptr
)
1618 pthread_t self
= pthread_self();
1619 if (self
->wqthread
!= 0)
1620 workqueue_exit(self
, self
->cur_workq
, self
->cur_workitem
);
1622 _pthread_exit(self
, value_ptr
);
1626 * Get the scheduling policy and scheduling paramters for a thread.
1629 pthread_getschedparam(pthread_t thread
,
1631 struct sched_param
*param
)
1638 LOCK(_pthread_list_lock
);
1640 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1641 UNLOCK(_pthread_list_lock
);
1645 *policy
= thread
->policy
;
1647 *param
= thread
->param
;
1648 UNLOCK(_pthread_list_lock
);
1654 * Set the scheduling policy and scheduling paramters for a thread.
1657 pthread_setschedparam_internal(pthread_t thread
,
1660 const struct sched_param
*param
)
1662 policy_base_data_t bases
;
1664 mach_msg_type_number_t count
;
1670 bases
.ts
.base_priority
= param
->sched_priority
;
1671 base
= (policy_base_t
)&bases
.ts
;
1672 count
= POLICY_TIMESHARE_BASE_COUNT
;
1675 bases
.fifo
.base_priority
= param
->sched_priority
;
1676 base
= (policy_base_t
)&bases
.fifo
;
1677 count
= POLICY_FIFO_BASE_COUNT
;
1680 bases
.rr
.base_priority
= param
->sched_priority
;
1681 /* quantum isn't public yet */
1682 bases
.rr
.quantum
= param
->quantum
;
1683 base
= (policy_base_t
)&bases
.rr
;
1684 count
= POLICY_RR_BASE_COUNT
;
1689 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1690 if (ret
!= KERN_SUCCESS
)
1696 pthread_setschedparam(pthread_t t
,
1698 const struct sched_param
*param
)
1700 mach_port_t kport
= MACH_PORT_NULL
;
1704 if (t
!= pthread_self() && t
!= &_thread
) { //since the main thread will not get de-allocated from underneath us
1706 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1709 kport
= t
->kernel_thread
;
1711 error
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1714 /* ensure the thread is still valid */
1715 LOCK(_pthread_list_lock
);
1716 if ((error
= _pthread_find_thread(t
)) != 0) {
1717 UNLOCK(_pthread_list_lock
);
1722 UNLOCK(_pthread_list_lock
);
1732 * Get the minimum priority for the given policy
1735 sched_get_priority_min(int policy
)
1737 return default_priority
- 16;
1741 * Get the maximum priority for the given policy
1744 sched_get_priority_max(int policy
)
1746 return default_priority
+ 16;
1750 * Determine if two thread identifiers represent the same thread.
1753 pthread_equal(pthread_t t1
,
1759 __private_extern__
void
1760 _pthread_set_self(pthread_t p
)
1762 extern void __pthread_set_self(pthread_t
);
1764 bzero(&_thread
, sizeof(struct _pthread
));
1768 __pthread_set_self(p
);
1772 cthread_set_self(void *cself
)
1774 pthread_t self
= pthread_self();
1775 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1776 _pthread_set_self(cself
);
1779 self
->cthread_self
= cself
;
1783 ur_cthread_self(void) {
1784 pthread_t self
= pthread_self();
1785 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1786 return (void *)self
;
1788 return self
->cthread_self
;
1792 * cancellation handler for pthread once as the init routine can have a
1793 * cancellation point. In that case we need to restore the spin unlock
1796 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
1798 _spin_unlock(&once_control
->lock
);
1803 * Execute a function exactly one time in a thread-safe fashion.
1806 pthread_once(pthread_once_t
*once_control
,
1807 void (*init_routine
)(void))
1809 _spin_lock(&once_control
->lock
);
1810 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1812 pthread_cleanup_push(__pthread_once_cancel_handler
, once_control
);
1814 pthread_cleanup_pop(0);
1815 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1817 _spin_unlock(&once_control
->lock
);
1818 return (0); /* Spec defines no possible errors! */
1822 * Insert a cancellation point in a thread.
1824 __private_extern__
void
1825 _pthread_testcancel(pthread_t thread
, int isconforming
)
1828 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1829 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1831 UNLOCK(thread
->lock
);
1833 pthread_exit(PTHREAD_CANCELED
);
1837 UNLOCK(thread
->lock
);
1843 pthread_getconcurrency(void)
1845 return(pthread_concurrency
);
1849 pthread_setconcurrency(int new_level
)
1853 pthread_concurrency
= new_level
;
1858 * Perform package initialization - called automatically when application starts
1861 __private_extern__
int
1864 pthread_attr_t
*attrs
;
1867 host_priority_info_data_t priority_info
;
1869 host_flavor_t flavor
;
1871 mach_msg_type_number_t count
;
1876 pthreadsize
= round_page(sizeof (struct _pthread
));
1877 count
= HOST_PRIORITY_INFO_COUNT
;
1878 info
= (host_info_t
)&priority_info
;
1879 flavor
= HOST_PRIORITY_INFO
;
1880 host
= mach_host_self();
1881 kr
= host_info(host
, flavor
, info
, &count
);
1882 if (kr
!= KERN_SUCCESS
)
1883 printf("host_info failed (%d); probably need privilege.\n", kr
);
1885 default_priority
= priority_info
.user_priority
;
1886 min_priority
= priority_info
.minimum_priority
;
1887 max_priority
= priority_info
.maximum_priority
;
1889 attrs
= &_pthread_attr_default
;
1890 pthread_attr_init(attrs
);
1892 TAILQ_INIT(&__pthread_head
);
1893 LOCK_INIT(_pthread_list_lock
);
1895 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
1896 _pthread_set_self(thread
);
1898 /* In case of dyld reset the tsd keys from 1 - 10 */
1899 _pthread_keys_init();
1902 mib
[1] = KERN_USRSTACK
;
1903 len
= sizeof (stackaddr
);
1904 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
1905 stackaddr
= (void *)USRSTACK
;
1906 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
1907 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1909 _init_cpu_capabilities();
1911 _spin_tries
= MP_SPIN_TRIES
;
1913 mach_port_deallocate(mach_task_self(), host
);
1915 #if defined(__ppc__)
1920 #if defined(__arm__)
1924 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1926 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
1927 kr
= vm_map(mach_task_self(),
1928 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
1929 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
1931 (vm_address_t
)0, FALSE
,
1932 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
1933 VM_INHERIT_DEFAULT
);
1934 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1938 mig_init(1); /* enable multi-threaded mig interfaces */
1939 if (__oldstyle
== 0) {
1940 #if defined(__i386__) || defined(__x86_64__)
1941 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)));
1943 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)));
1949 int sched_yield(void)
1955 /* This used to be the "magic" that gets the initialization routine called when the application starts */
1956 static int _do_nothing(void) { return 0; }
1957 int (*_cthread_init_routine
)(void) = _do_nothing
;
1959 /* Get a semaphore from the pool, growing it if necessary */
1961 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1966 LOCK(sem_pool_lock
);
1967 if (sem_pool_current
== sem_pool_count
) {
1968 sem_pool_count
+= 16;
1969 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1970 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1971 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1974 sem
= sem_pool
[sem_pool_current
++];
1975 UNLOCK(sem_pool_lock
);
1979 /* Put a semaphore back into the pool */
1980 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1981 LOCK(sem_pool_lock
);
1982 sem_pool
[--sem_pool_current
] = sem
;
1983 UNLOCK(sem_pool_lock
);
1986 static void sem_pool_reset(void) {
1987 LOCK(sem_pool_lock
);
1989 sem_pool_current
= 0;
1991 UNLOCK(sem_pool_lock
);
1994 __private_extern__
void _pthread_fork_child(pthread_t p
) {
1995 /* Just in case somebody had it locked... */
1996 UNLOCK(sem_pool_lock
);
1998 /* No need to hold the pthread_list_lock as no one other than this
1999 * thread is present at this time
2001 TAILQ_INIT(&__pthread_head
);
2002 LOCK_INIT(_pthread_list_lock
);
2003 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
2008 * Query/update the cancelability 'state' of a thread
2011 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
2013 pthread_t self
= pthread_self();
2017 case PTHREAD_CANCEL_ENABLE
:
2019 __pthread_canceled(1);
2021 case PTHREAD_CANCEL_DISABLE
:
2023 __pthread_canceled(2);
2029 self
= pthread_self();
2032 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
2033 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
2034 self
->cancel_state
|= state
;
2037 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2041 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2043 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2046 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2047 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2048 if ((value_ptr
== PTHREAD_CANCELED
)) {
2050 self
->detached
|= _PTHREAD_WASCANCEL
;
2057 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2060 int detached
= 0, ret
;
2063 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2065 /* The scenario where the joiner was waiting for the thread and
2066 * the pthread detach happened on that thread. Then the semaphore
2067 * will trigger but by the time joiner runs, the target thread could be
2068 * freed. So we need to make sure that the thread is still in the list
2069 * and is joinable before we continue with the join.
2071 LOCK(_pthread_list_lock
);
2072 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2073 UNLOCK(_pthread_list_lock
);
2077 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2078 /* the thread might be a detached thread */
2079 UNLOCK(_pthread_list_lock
);
2083 /* It is still a joinable thread and needs to be reaped */
2084 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2086 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2088 UNLOCK(_pthread_list_lock
);
2091 *value_ptr
= thread
->exit_value
;
2093 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2094 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) {
2095 *value_ptr
= PTHREAD_CANCELED
;
2098 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2099 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2100 if (res
!= KERN_SUCCESS
)
2101 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2102 thread
->reply_port
= MACH_PORT_NULL
;
2104 if (thread
->freeStackOnExit
) {
2105 thread
->sig
= _PTHREAD_NO_SIG
;
2107 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2109 vm_deallocate(mach_task_self(), thread
, pthreadsize
);
2111 thread
->sig
= _PTHREAD_NO_SIG
;
2113 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2120 /* ALWAYS called with list lock and return with list lock */
2122 _pthread_find_thread(pthread_t thread
)
2127 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2129 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2130 UNLOCK(_pthread_list_lock
);
2132 LOCK(_pthread_list_lock
);
2142 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2150 LOCK(_pthread_list_lock
);
2152 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2153 UNLOCK(_pthread_list_lock
);
2156 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2157 UNLOCK(_pthread_list_lock
);
2160 kport
= thread
->kernel_thread
;
2161 UNLOCK(_pthread_list_lock
);
2167 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2169 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2171 attrp
->stacksize
= DEFAULT_STACK_SIZE
;
2172 attrp
->istimeshare
= 1;
2173 attrp
->importance
= 0;
2174 attrp
->affinity
= 0;
2175 attrp
->queueprio
= WORK_QUEUE_NORMALIZER
;
2176 attrp
->sig
= PTHEAD_WRKQUEUE_ATTR_SIG
;
2181 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2183 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
)
2188 return (EINVAL
); /* Not an attribute structure! */
2192 #ifdef NOTYET /* [ */
2194 pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t
* attr
, size_t * stacksizep
)
2196 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2197 *stacksizep
= attr
->stacksize
;
2200 return (EINVAL
); /* Not an attribute structure! */
2205 pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t
* attr
, size_t stacksize
)
2207 if ((attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
2208 attr
->stacksize
= stacksize
;
2211 return (EINVAL
); /* Not an attribute structure! */
2217 pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t
* attr
, int * istimesahrep
)
2219 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2220 *istimesahrep
= attr
->istimeshare
;
2223 return (EINVAL
); /* Not an attribute structure! */
2228 pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t
* attr
, int istimeshare
)
2230 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2231 if (istimeshare
!= 0)
2232 attr
->istimeshare
= istimeshare
;
2234 attr
->istimeshare
= 0;
2237 return (EINVAL
); /* Not an attribute structure! */
2242 pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t
* attr
, int * importancep
)
2244 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2245 *importancep
= attr
->importance
;
2248 return (EINVAL
); /* Not an attribute structure! */
2253 pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t
* attr
, int importance
)
2255 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
){
2256 attr
->importance
= importance
;
2259 return (EINVAL
); /* Not an attribute structure! */
2264 pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t
* attr
, int * affinityp
)
2266 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2267 *affinityp
= attr
->affinity
;
2270 return (EINVAL
); /* Not an attribute structure! */
2275 pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t
* attr
, int affinity
)
2277 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
){
2278 attr
->affinity
= affinity
;
2281 return (EINVAL
); /* Not an attribute structure! */
2285 #endif /* NOTYET ] */
2288 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2290 if (attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) {
2291 *qpriop
= (attr
->queueprio
- WORK_QUEUE_NORMALIZER
);
2294 return (EINVAL
); /* Not an attribute structure! */
2299 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2301 /* only -2 to +2 is valid */
2302 if ((attr
->sig
== PTHEAD_WRKQUEUE_ATTR_SIG
) && (qprio
<= 2) && (qprio
>= -2)) {
2303 attr
->queueprio
= (qprio
+ WORK_QUEUE_NORMALIZER
);
2306 return (EINVAL
); /* Not an attribute structure! */
2310 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2313 workqueue_list_lock()
2315 OSSpinLockLock(&__workqueue_list_lock
);
2319 workqueue_list_unlock()
2321 OSSpinLockUnlock(&__workqueue_list_lock
);
2325 pthread_workqueue_init_np()
2329 workqueue_list_lock();
2330 ret
=_pthread_work_internal_init();
2331 workqueue_list_unlock();
2337 _pthread_work_internal_init(void)
2340 pthread_workqueue_head_t headp
;
2341 pthread_workitem_t witemp
;
2342 pthread_workqueue_t wq
;
2344 if (kernel_workq_setup
== 0) {
2345 #if defined(__i386__) || defined(__x86_64__)
2346 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)));
2348 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)));
2351 _pthread_wq_attr_default
.stacksize
= DEFAULT_STACK_SIZE
;
2352 _pthread_wq_attr_default
.istimeshare
= 1;
2353 _pthread_wq_attr_default
.importance
= 0;
2354 _pthread_wq_attr_default
.affinity
= 0;
2355 _pthread_wq_attr_default
.queueprio
= WORK_QUEUE_NORMALIZER
;
2356 _pthread_wq_attr_default
.sig
= PTHEAD_WRKQUEUE_ATTR_SIG
;
2358 for( i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2359 headp
= __pthread_wq_head_tbl
[i
];
2360 TAILQ_INIT(&headp
->wqhead
);
2361 headp
->next_workq
= 0;
2364 /* create work item and workqueue pools */
2365 witemp
= (struct _pthread_workitem
*)malloc(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2366 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
));
2367 for (i
= 0; i
< WORKITEM_POOL_SIZE
; i
++) {
2368 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2370 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2371 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2372 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2373 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2376 if (error
= __workq_open()) {
2377 TAILQ_INIT(&__pthread_workitem_pool_head
);
2378 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2383 kernel_workq_setup
= 1;
2389 /* This routine is called with list lock held */
2390 static pthread_workitem_t
2391 alloc_workitem(void)
2393 pthread_workitem_t witem
;
2395 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2396 workqueue_list_unlock();
2397 witem
= malloc(sizeof(struct _pthread_workitem
));
2398 workqueue_list_lock();
2400 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2401 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2406 /* This routine is called with list lock held */
2408 free_workitem(pthread_workitem_t witem
)
2410 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2413 /* This routine is called with list lock held */
2414 static pthread_workqueue_t
2415 alloc_workqueue(void)
2417 pthread_workqueue_t wq
;
2419 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2420 workqueue_list_unlock();
2421 wq
= malloc(sizeof(struct _pthread_workqueue
));
2422 workqueue_list_lock();
2424 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2425 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2431 /* This routine is called with list lock held */
2433 free_workqueue(pthread_workqueue_t wq
)
2436 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2440 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2442 bzero(wq
, sizeof(struct _pthread_workqueue
));
2444 wq
->stacksize
= attr
->stacksize
;
2445 wq
->istimeshare
= attr
->istimeshare
;
2446 wq
->importance
= attr
->importance
;
2447 wq
->affinity
= attr
->affinity
;
2448 wq
->queueprio
= attr
->queueprio
;
2450 wq
->stacksize
= DEFAULT_STACK_SIZE
;
2451 wq
->istimeshare
= 1;
2454 wq
->queueprio
= WORK_QUEUE_NORMALIZER
;
2456 LOCK_INIT(wq
->lock
);
2458 TAILQ_INIT(&wq
->item_listhead
);
2459 TAILQ_INIT(&wq
->item_kernhead
);
2460 wq
->wq_list
.tqe_next
= 0;
2461 wq
->wq_list
.tqe_prev
= 0;
2462 wq
->sig
= PTHEAD_WRKQUEUE_SIG
;
2463 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2467 valid_workq(pthread_workqueue_t workq
)
2469 if (workq
->sig
== PTHEAD_WRKQUEUE_SIG
)
2476 /* called with list lock */
2478 pick_nextworkqueue_droplock()
2480 int i
, curwqprio
, val
, found
;
2481 pthread_workqueue_head_t headp
;
2482 pthread_workqueue_t workq
;
2483 pthread_workqueue_t nworkq
= NULL
;
2486 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2488 for (i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2489 wqreadyprio
= i
; /* because there is nothing else higher to run */
2490 headp
= __pthread_wq_head_tbl
[i
];
2492 if (TAILQ_EMPTY(&headp
->wqhead
))
2494 workq
= headp
->next_workq
;
2496 workq
= TAILQ_FIRST(&headp
->wqhead
);
2497 curwqprio
= workq
->queueprio
;
2498 nworkq
= workq
; /* starting pt */
2499 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2500 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2501 if (headp
->next_workq
== NULL
)
2502 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2503 val
= post_nextworkitem(workq
);
2506 /* things could have changed so reasses */
2507 /* If kernel queue is full , skip */
2508 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2510 /* If anything with higher prio arrived, then reevaluate */
2511 if (wqreadyprio
< curwqprio
)
2512 goto loop
; /* we need re evaluate again */
2513 /* we can post some more work items */
2517 /* cannot use workq here as it could be freed */
2518 if (TAILQ_EMPTY(&headp
->wqhead
))
2520 /* if we found nothing to run and only one workqueue in the list, skip */
2521 if ((val
== 0) && (workq
== headp
->next_workq
))
2523 workq
= headp
->next_workq
;
2525 workq
= TAILQ_FIRST(&headp
->wqhead
);
2528 /* if we found nothing to run and back to workq where we started */
2529 if ((val
== 0) && (workq
== nworkq
))
2532 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2535 /* nothing found to run? */
2539 workqueue_list_unlock();
2543 post_nextworkitem(pthread_workqueue_t workq
)
2546 pthread_workitem_t witem
;
2547 pthread_workqueue_head_t headp
;
2548 void (*func
)(pthread_workqueue_t
, void *);
2550 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2553 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2556 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2557 headp
= workq
->headp
;
2558 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2560 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2563 /* Also barrier when nothing is there needs to be handled */
2564 /* Nothing to wait for */
2565 if (workq
->kq_count
!= 0) {
2566 witem
->flags
|= PTH_WQITEM_APPLIED
;
2567 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2568 workq
->barrier_count
= workq
->kq_count
;
2570 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2575 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2577 if (witem
->func
!= NULL
) {
2578 workqueue_list_unlock();
2580 (*func
)(workq
, witem
->func_arg
);
2581 workqueue_list_lock();
2583 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2585 free_workitem(witem
);
2588 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2590 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2592 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2595 witem
->flags
|= PTH_WQITEM_APPLIED
;
2596 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2597 workq
->barrier_count
= workq
->kq_count
;
2598 workq
->term_callback
= witem
->func
;
2599 workq
->term_callarg
= witem
->func_arg
;
2600 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2601 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2602 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2604 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2608 free_workitem(witem
);
2609 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2611 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2613 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2614 if (headp
->next_workq
== workq
) {
2615 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2616 if (headp
->next_workq
== NULL
) {
2617 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2618 if (headp
->next_workq
== workq
)
2619 headp
->next_workq
= NULL
;
2623 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2624 if (workq
->term_callback
!= NULL
) {
2625 workqueue_list_unlock();
2626 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2627 workqueue_list_lock();
2629 free_workqueue(workq
);
2632 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2634 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
2639 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
2641 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2642 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
2643 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
2645 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
2647 OSAtomicIncrement32(&kernel_workq_count
);
2648 workqueue_list_unlock();
2649 if (( error
=__workq_ops(WQOPS_QUEUE_ADD
, witem
, 0)) == -1) {
2650 OSAtomicDecrement32(&kernel_workq_count
);
2651 workqueue_list_lock();
2653 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2655 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
2656 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2657 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
2658 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
2660 workqueue_list_lock();
2662 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2666 /* noone should come here */
2668 printf("error in logic for next workitem\n");
2675 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
2678 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2679 pthread_workqueue_t workq
;
2683 workq
= item
->workq
;
2685 /* reuse is set to 0, when a thread is newly created to run a workitem */
2686 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
2688 self
->parentcheck
= 1;
2690 /* These are not joinable threads */
2691 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2692 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2693 #if defined(__i386__) || defined(__x86_64__)
2694 _pthread_set_self(self
);
2697 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
2699 self
->kernel_thread
= kport
;
2700 self
->fun
= item
->func
;
2701 self
->arg
= item
->func_arg
;
2702 /* Add to the pthread list */
2703 LOCK(_pthread_list_lock
);
2704 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
2706 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
2709 UNLOCK(_pthread_list_lock
);
2711 /* reuse is set to 1, when a thread is resued to run another work item */
2713 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
2715 /* reset all tsd from 1 to KEYS_MAX */
2716 _pthread_tsd_reinit(self
);
2718 self
->fun
= item
->func
;
2719 self
->arg
= item
->func_arg
;
2724 pself
= pthread_self();
2725 if (self
!= pself
) {
2727 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
2729 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2730 _pthread_set_self(self
);
2731 pself
= pthread_self();
2733 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2737 pself
= pthread_self();
2738 if (self
!= pself
) {
2739 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
2743 #endif /* WQ_DEBUG */
2745 self
->cur_workq
= workq
;
2746 self
->cur_workitem
= item
;
2747 OSAtomicDecrement32(&kernel_workq_count
);
2749 ret
= (*self
->fun
)(self
->arg
);
2751 workqueue_exit(self
, workq
, item
);
2756 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
2758 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2759 pthread_workitem_t baritem
;
2760 pthread_workqueue_head_t headp
;
2761 void (*func
)(pthread_workqueue_t
, void *);
2763 workqueue_list_lock();
2765 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
2768 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
2771 free_workitem(item
);
2773 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
2774 workq
->barrier_count
--;
2776 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
2778 if (workq
->barrier_count
<= 0 ) {
2779 /* Need to remove barrier item from the list */
2780 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
2782 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
2783 printf("Incorect bar item being removed in barrier processing\n");
2784 #endif /* WQ_DEBUG */
2785 /* if the front item is a barrier and call back is registered, run that */
2786 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
2787 workqueue_list_unlock();
2788 func
= baritem
->func
;
2789 (*func
)(workq
, baritem
->func_arg
);
2790 workqueue_list_lock();
2792 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
2794 free_workitem(baritem
);
2795 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2797 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
2799 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
2800 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2801 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2803 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
2805 if (headp
->next_workq
== workq
) {
2806 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2807 if (headp
->next_workq
== NULL
) {
2808 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2809 if (headp
->next_workq
== workq
)
2810 headp
->next_workq
= NULL
;
2813 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2815 if (workq
->term_callback
!= NULL
) {
2816 workqueue_list_unlock();
2817 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2818 workqueue_list_lock();
2820 free_workqueue(workq
);
2822 /* if there are higher prio schedulabel item reset to wqreadyprio */
2823 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
2824 wqreadyprio
= workq
->queueprio
;
2830 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
2833 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
2835 pick_nextworkqueue_droplock();
2836 _pthread_workq_return(self
);
2840 _pthread_workq_return(pthread_t self
)
2842 struct __darwin_pthread_handler_rec
*handler
;
2844 int * value_ptr
=&value
;
2846 /* set cancel state to disable and type to deferred */
2847 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
2849 /* Make this thread not to receive any signals */
2850 __disable_threadsignal(1);
2852 while ((handler
= self
->__cleanup_stack
) != 0)
2854 (handler
->__routine
)(handler
->__arg
);
2855 self
->__cleanup_stack
= handler
->__next
;
2857 _pthread_tsd_cleanup(self
);
2859 __workq_ops(WQOPS_THREAD_RETURN
, NULL
, 0);
2861 /* This is the way to terminate the thread */
2862 _pthread_exit(self
, NULL
);
2866 /* returns 0 if it handles it, otherwise 1 */
2868 handle_removeitem(pthread_workqueue_t workq
, pthread_workitem_t item
)
2870 pthread_workitem_t baritem
;
2871 pthread_workqueue_head_t headp
;
2872 void (*func
)(pthread_workqueue_t
, void *);
2874 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
2875 workq
->barrier_count
--;
2876 if (workq
->barrier_count
<= 0 ) {
2877 /* Need to remove barrier item from the list */
2878 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
2880 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
2881 printf("Incorect bar item being removed in barrier processing\n");
2882 #endif /* WQ_DEBUG */
2883 /* if the front item is a barrier and call back is registered, run that */
2884 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
)
2885 && (baritem
->func
!= NULL
)) {
2886 workqueue_list_unlock();
2887 func
= baritem
->func
;
2888 (*func
)(workq
, baritem
->func_arg
);
2889 workqueue_list_lock();
2891 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
2893 free_workitem(baritem
);
2895 free_workitem(item
);
2896 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2898 __kdebug_trace(0x9000058, pthread_self(), item
, item
->func_arg
, 0, 0);
2900 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
2901 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2902 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2904 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
2906 if (headp
->next_workq
== workq
) {
2907 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2908 if (headp
->next_workq
== NULL
) {
2909 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2910 if (headp
->next_workq
== workq
)
2911 headp
->next_workq
= NULL
;
2914 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2916 if (workq
->term_callback
!= NULL
) {
2917 workqueue_list_unlock();
2918 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2919 workqueue_list_lock();
2921 free_workqueue(workq
);
2922 pick_nextworkqueue_droplock();
2925 /* if there are higher prio schedulabel item reset to wqreadyprio */
2926 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
2927 wqreadyprio
= workq
->queueprio
;
2928 free_workitem(item
);
2929 pick_nextworkqueue_droplock();
2936 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
2939 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
2941 pthread_workqueue_t wq
;
2942 pthread_workqueue_head_t headp
;
2944 if ((attr
!= NULL
) && (attr
->sig
!= PTHEAD_WRKQUEUE_ATTR_SIG
)) {
2948 if (__is_threaded
== 0)
2951 workqueue_list_lock();
2952 if (kernel_workq_setup
== 0) {
2953 int ret
= _pthread_work_internal_init();
2955 workqueue_list_unlock();
2960 wq
= alloc_workqueue();
2962 _pthread_workq_init(wq
, attr
);
2964 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2965 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
2966 if (headp
->next_workq
== NULL
) {
2967 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2970 workqueue_list_unlock();
2978 pthread_workqueue_destroy_np(pthread_workqueue_t workq
, void (* callback_func
)(pthread_workqueue_t
, void *), void * callback_arg
)
2980 pthread_workitem_t witem
;
2981 pthread_workqueue_head_t headp
;
2983 if (valid_workq(workq
) == 0) {
2987 workqueue_list_lock();
2990 * Allocate the workitem here as it can drop the lock.
2991 * Also we can evaluate the workqueue state only once.
2993 witem
= alloc_workitem();
2994 witem
->item_entry
.tqe_next
= 0;
2995 witem
->item_entry
.tqe_prev
= 0;
2996 witem
->func
= callback_func
;
2997 witem
->func_arg
= callback_arg
;
2998 witem
->flags
= PTH_WQITEM_DESTROY
;
3000 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_TERM_ON
| PTHREAD_WORKQ_DESTROYED
)) == 0) {
3001 workq
->flags
|= PTHREAD_WORKQ_IN_TERMINATE
;
3002 /* If nothing queued or running, destroy now */
3003 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (TAILQ_EMPTY(&workq
->item_kernhead
))) {
3004 workq
->flags
|= (PTHREAD_WORKQ_TERM_ON
| PTHREAD_WORKQ_DESTROYED
);
3005 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
3006 workq
->term_callback
= callback_func
;
3007 workq
->term_callarg
= callback_arg
;
3008 if (headp
->next_workq
== workq
) {
3009 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
3010 if (headp
->next_workq
== NULL
) {
3011 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3012 if (headp
->next_workq
== workq
)
3013 headp
->next_workq
= NULL
;
3016 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
3018 free_workitem(witem
);
3019 if (workq
->term_callback
!= NULL
) {
3020 workqueue_list_unlock();
3021 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
3022 workqueue_list_lock();
3025 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 3, 0);
3027 free_workqueue(workq
);
3028 workqueue_list_unlock();
3031 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3033 free_workitem(witem
);
3034 workqueue_list_unlock();
3035 return(EINPROGRESS
);
3037 workqueue_list_unlock();
3043 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
)
3045 pthread_workitem_t witem
;
3047 if (valid_workq(workq
) == 0) {
3051 workqueue_list_lock();
3054 * Allocate the workitem here as it can drop the lock.
3055 * Also we can evaluate the workqueue state only once.
3057 witem
= alloc_workitem();
3058 witem
->func
= workitem_func
;
3059 witem
->func_arg
= workitem_arg
;
3061 witem
->workq
= workq
;
3062 witem
->item_entry
.tqe_next
= 0;
3063 witem
->item_entry
.tqe_prev
= 0;
3065 /* alloc workitem can drop the lock, check the state */
3066 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3067 free_workitem(witem
);
3068 workqueue_list_unlock();
3073 if (itemhandlep
!= NULL
)
3074 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3075 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3076 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3077 wqreadyprio
= workq
->queueprio
;
3079 pick_nextworkqueue_droplock();
3085 pthread_workqueue_removeitem_np(pthread_workqueue_t workq
, pthread_workitem_handle_t itemhandle
)
3087 pthread_workitem_t item
, baritem
;
3088 pthread_workqueue_head_t headp
;
3091 if (valid_workq(workq
) == 0) {
3095 workqueue_list_lock();
3096 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3097 workqueue_list_unlock();
3101 TAILQ_FOREACH(item
, &workq
->item_listhead
, item_entry
) {
3102 if (item
== (pthread_workitem_t
)itemhandle
) {
3103 TAILQ_REMOVE(&workq
->item_listhead
, item
, item_entry
);
3104 if ((item
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_APPLIED
)) == (PTH_WQITEM_BARRIER
| PTH_WQITEM_APPLIED
)) {
3105 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3106 workq
->barrier_count
= 0;
3107 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
)))) {
3108 wqreadyprio
= workq
->queueprio
;
3110 } else if ((item
->flags
& PTH_WQITEM_KERN_COUNT
) == PTH_WQITEM_KERN_COUNT
) {
3112 item
->flags
|= PTH_WQITEM_REMOVED
;
3113 if (handle_removeitem(workq
, item
) == 0)
3116 item
->flags
|= PTH_WQITEM_NOTINLIST
;
3117 free_workitem(item
);
3118 workqueue_list_unlock();
3123 TAILQ_FOREACH(item
, &workq
->item_kernhead
, item_entry
) {
3124 if (item
== (pthread_workitem_t
)itemhandle
) {
3125 workqueue_list_unlock();
3126 if ((error
= __workq_ops(WQOPS_QUEUE_REMOVE
, item
, 0)) == 0) {
3127 workqueue_list_lock();
3128 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3129 OSAtomicDecrement32(&kernel_workq_count
);
3131 item
->flags
|= PTH_WQITEM_REMOVED
;
3132 if (handle_removeitem(workq
, item
) != 0) {
3133 free_workitem(item
);
3134 pick_nextworkqueue_droplock();
3138 workqueue_list_unlock();
3143 workqueue_list_unlock();
3149 pthread_workqueue_addbarrier_np(pthread_workqueue_t workq
, void (* callback_func
)(pthread_workqueue_t
, void *), void * callback_arg
, __unused
int waitforcallback
, pthread_workitem_handle_t
*itemhandlep
)
3151 pthread_workitem_t witem
;
3153 if (valid_workq(workq
) == 0) {
3157 workqueue_list_lock();
3160 * Allocate the workitem here as it can drop the lock.
3161 * Also we can evaluate the workqueue state only once.
3163 witem
= alloc_workitem();
3164 witem
->item_entry
.tqe_next
= 0;
3165 witem
->item_entry
.tqe_prev
= 0;
3166 witem
->func
= callback_func
;
3167 witem
->func_arg
= callback_arg
;
3168 witem
->flags
= PTH_WQITEM_BARRIER
;
3170 /* alloc workitem can drop the lock, check the state */
3171 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3172 free_workitem(witem
);
3173 workqueue_list_unlock();
3177 if (itemhandlep
!= NULL
)
3178 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3180 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3181 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3182 wqreadyprio
= workq
->queueprio
;
3184 pick_nextworkqueue_droplock();
3190 pthread_workqueue_suspend_np(pthread_workqueue_t workq
)
3192 if (valid_workq(workq
) == 0) {
3195 workqueue_list_lock();
3196 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3197 workqueue_list_unlock();
3201 workq
->flags
|= PTHREAD_WORKQ_SUSPEND
;
3202 workq
->suspend_count
++;
3203 workqueue_list_unlock();
3208 pthread_workqueue_resume_np(pthread_workqueue_t workq
)
3210 if (valid_workq(workq
) == 0) {
3213 workqueue_list_lock();
3214 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3215 workqueue_list_unlock();
3219 workq
->suspend_count
--;
3220 if (workq
->suspend_count
<= 0) {
3221 workq
->flags
&= ~PTHREAD_WORKQ_SUSPEND
;
3222 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3223 wqreadyprio
= workq
->queueprio
;
3225 pick_nextworkqueue_droplock();
3227 workqueue_list_unlock();
3233 #else /* !BUILDING_VARIANT ] [ */
3234 extern int __unix_conforming
;
3235 extern int _pthread_count
;
3236 extern pthread_lock_t _pthread_list_lock
;
3237 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3238 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3240 #endif /* !BUILDING_VARIANT ] */
3244 __private_extern__
void
3245 __posix_join_cleanup(void *arg
)
3247 pthread_t thread
= (pthread_t
)arg
;
3248 int already_exited
, res
;
3251 mach_port_t joinport
;
3255 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3257 newstyle
= thread
->newstyle
;
3260 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3263 death
= thread
->death
;
3264 if (!already_exited
){
3265 thread
->joiner
= (struct _pthread
*)NULL
;
3266 UNLOCK(thread
->lock
);
3267 restore_sem_to_pool(death
);
3269 UNLOCK(thread
->lock
);
3270 while ((res
= _pthread_reap_thread(thread
,
3271 thread
->kernel_thread
,
3272 &dummy
, 1)) == EAGAIN
)
3276 restore_sem_to_pool(death
);
3281 /* leave another thread to join */
3282 thread
->joiner
= (struct _pthread
*)NULL
;
3283 UNLOCK(thread
->lock
);
3287 #endif /* __DARWIN_UNIX03 */
3291 * Wait for a thread to terminate and obtain its exit value.
3295 pthread_join(pthread_t thread,
3298 moved to pthread_cancelable.c */
3304 pthread_cancel(pthread_t thread
)
3307 if (__unix_conforming
== 0)
3308 __unix_conforming
= 1;
3309 #endif /* __DARWIN_UNIX03 */
3311 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3318 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3319 UNLOCK(thread
->lock
);
3320 if (state
& PTHREAD_CANCEL_ENABLE
)
3321 __pthread_markcancel(thread
->kernel_thread
);
3322 #else /* __DARWIN_UNIX03 */
3323 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3324 #endif /* __DARWIN_UNIX03 */
3329 pthread_testcancel(void)
3331 pthread_t self
= pthread_self();
3334 if (__unix_conforming
== 0)
3335 __unix_conforming
= 1;
3336 _pthread_testcancel(self
, 1);
3337 #else /* __DARWIN_UNIX03 */
3338 _pthread_testcancel(self
, 0);
3339 #endif /* __DARWIN_UNIX03 */
3345 * Query/update the cancelability 'state' of a thread
3348 pthread_setcancelstate(int state
, int *oldstate
)
3351 if (__unix_conforming
== 0) {
3352 __unix_conforming
= 1;
3354 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3355 #else /* __DARWIN_UNIX03 */
3356 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3357 #endif /* __DARWIN_UNIX03 */
3364 * Query/update the cancelability 'type' of a thread
3367 pthread_setcanceltype(int type
, int *oldtype
)
3369 pthread_t self
= pthread_self();
3372 if (__unix_conforming
== 0)
3373 __unix_conforming
= 1;
3374 #endif /* __DARWIN_UNIX03 */
3376 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3377 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3379 self
= pthread_self();
3382 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3383 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3384 self
->cancel_state
|= type
;
3386 #if !__DARWIN_UNIX03
3387 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3388 #endif /* __DARWIN_UNIX03 */
3393 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3398 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3402 #else /* __DARWIN_UNIX03 */
3403 return(__pthread_sigmask(how
, set
, oset
));
3404 #endif /* __DARWIN_UNIX03 */
3409 sigwait(const sigset_t * set, int * sig)
3411 moved to pthread_cancelable.c */