2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <machine/vmparam.h>
65 #include <mach/vm_statistics.h>
66 #define __APPLE_API_PRIVATE
67 #include <machine/cpu_capabilities.h>
68 #include <libkern/OSAtomic.h>
70 #include <libkern/OSCrossEndian.h>
74 extern int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
75 extern int __pthread_sigmask(int, const sigset_t
*, sigset_t
*);
77 #ifndef BUILDING_VARIANT /* [ */
79 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
83 int32_t workq_targetconc
[WORKQ_NUM_PRIOQUEUE
];
85 /* Per-thread kernel support */
86 extern void _pthread_set_self(pthread_t
);
87 extern void mig_init(int);
88 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
89 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
90 static void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
91 static void _pthread_tsd_reinit(pthread_t t
);
92 static int _new_pthread_create_suspended(pthread_t
*thread
,
93 const pthread_attr_t
*attr
,
94 void *(*start_routine
)(void *),
98 /* Get CPU capabilities from the kernel */
99 __private_extern__
void _init_cpu_capabilities(void);
101 /* Needed to tell the malloc subsystem we're going multithreaded */
102 extern void set_malloc_singlethreaded(int);
104 /* Used when we need to call into the kernel with no reply port */
105 extern pthread_lock_t reply_port_lock
;
106 int _pthread_find_thread(pthread_t thread
);
108 /* Mach message used to notify that a thread needs to be reaped */
110 typedef struct _pthread_reap_msg_t
{
111 mach_msg_header_t header
;
113 mach_msg_trailer_t trailer
;
114 } pthread_reap_msg_t
;
116 /* We'll implement this when the main thread is a pthread */
117 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
118 static struct _pthread _thread
= {0};
120 /* This global should be used (carefully) by anyone needing to know if a
121 ** pthread has been created.
123 int __is_threaded
= 0;
124 /* _pthread_count is protected by _pthread_list_lock */
125 static int _pthread_count
= 1;
126 int __unix_conforming
= 0;
127 __private_extern__
size_t pthreadsize
= 0;
129 /* under rosetta we will use old style creation of threads */
130 static int __oldstyle
= 0;
132 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
134 /* Same implementation as LOCK, but without the __is_threaded check */
136 extern kern_return_t
syscall_thread_switch(mach_port_name_t
, int, mach_msg_timeout_t
);
137 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
139 int tries
= _spin_tries
;
143 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
145 } while(!_spin_lock_try(lock
));
148 extern mach_port_t thread_recycle_port
;
150 /* These are used to keep track of a semaphore pool shared by mutexes and condition
154 static semaphore_t
*sem_pool
= NULL
;
155 static int sem_pool_count
= 0;
156 static int sem_pool_current
= 0;
157 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
159 static int default_priority
;
160 static int max_priority
;
161 static int min_priority
;
162 static int pthread_concurrency
;
164 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
166 static void _pthread_exit(pthread_t self
, void *value_ptr
);
167 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
168 static pthread_attr_t _pthread_attr_default
= {0};
169 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
170 static int kernel_workq_setup
= 0;
171 static volatile int32_t kernel_workq_count
= 0;
172 static volatile unsigned int user_workq_count
= 0;
173 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
174 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
176 static int __pthread_workqueue_affinity
= 1; /* 0 means no affinity */
177 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
178 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
180 struct _pthread_workqueue_head __pthread_workq0_head
;
181 struct _pthread_workqueue_head __pthread_workq1_head
;
182 struct _pthread_workqueue_head __pthread_workq2_head
;
183 pthread_workqueue_head_t __pthread_wq_head_tbl
[WQ_NUM_PRIO_QS
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
};
185 static void workqueue_list_lock(void);
186 static void workqueue_list_unlock(void);
187 static int valid_workq(pthread_workqueue_t
);
188 static void pick_nextworkqueue_droplock(void);
189 static int post_nextworkitem(pthread_workqueue_t workq
);
190 static void _pthread_workq_return(pthread_t self
);
191 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
192 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
193 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
194 static pthread_workitem_t
alloc_workitem(void);
195 static void free_workitem(pthread_workitem_t
);
196 static pthread_workqueue_t
alloc_workqueue(void);
197 static void free_workqueue(pthread_workqueue_t
);
198 static int _pthread_work_internal_init(void);
199 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
201 void pthread_workqueue_atfork_prepare(void);
202 void pthread_workqueue_atfork_parent(void);
203 void pthread_workqueue_atfork_child(void);
205 extern void dispatch_atfork_prepare(void);
206 extern void dispatch_atfork_parent(void);
207 extern void dispatch_atfork_child(void);
209 /* workq_kernreturn commands */
210 #define WQOPS_QUEUE_ADD 1
211 #define WQOPS_QUEUE_REMOVE 2
212 #define WQOPS_THREAD_RETURN 4
213 #define WQOPS_THREAD_SETCONC 8
216 * Flags filed passed to bsdthread_create and back in pthread_start
217 31 <---------------------------------> 0
218 _________________________________________
219 | flags(8) | policy(8) | importance(16) |
220 -----------------------------------------
223 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
226 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
228 #define PTHREAD_START_CUSTOM 0x01000000
229 #define PTHREAD_START_SETSCHED 0x02000000
230 #define PTHREAD_START_DETACHED 0x04000000
231 #define PTHREAD_START_POLICY_BITSHIFT 16
232 #define PTHREAD_START_POLICY_MASK 0xff
233 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
235 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
236 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
237 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int),__uint64_t
);
238 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
239 extern __uint64_t
__thread_selfid( void );
240 extern int __pthread_canceled(int);
241 extern void _pthread_keys_init(void);
242 extern int __pthread_kill(mach_port_t
, int);
243 extern int __pthread_markcancel(int);
244 extern int __workq_open(void);
246 #define WORKQUEUE_OVERCOMMIT 0x10000
248 extern int __workq_kernreturn(int, pthread_workitem_t
, int, int);
250 #if defined(__ppc__) || defined(__ppc64__)
251 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
252 #elif defined(__i386__) || defined(__x86_64__)
253 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
254 #elif defined(__arm__)
255 static const vm_address_t PTHREAD_STACK_HINT
= 0x30000000;
257 #error Need to define a stack address hint for this architecture
260 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
261 * The guardpages for stackoverflow protection is also allocated here
262 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
263 * set up for the thread
267 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
270 vm_address_t stackaddr
;
273 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
274 if (attrs
->stackaddr
!= NULL
) {
275 /* No guard pages setup in this case */
276 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
277 *stack
= attrs
->stackaddr
;
281 guardsize
= attrs
->guardsize
;
282 stackaddr
= PTHREAD_STACK_HINT
;
283 kr
= vm_map(mach_task_self(), &stackaddr
,
284 attrs
->stacksize
+ guardsize
,
286 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
287 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
289 if (kr
!= KERN_SUCCESS
)
290 kr
= vm_allocate(mach_task_self(),
291 &stackaddr
, attrs
->stacksize
+ guardsize
,
292 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
293 if (kr
!= KERN_SUCCESS
) {
296 /* The guard page is at the lowest address */
297 /* The stack base is the highest address */
299 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
300 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
305 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
309 vm_address_t stackaddr
;
310 size_t guardsize
, allocsize
;
312 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
314 if (attrs
->stackaddr
!= NULL
) {
315 /* No guard pages setup in this case */
316 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
317 *stack
= attrs
->stackaddr
;
318 t
= (pthread_t
)malloc(pthreadsize
);
319 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
320 t
->freeStackOnExit
= 0;
327 guardsize
= attrs
->guardsize
;
328 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
329 stackaddr
= PTHREAD_STACK_HINT
;
330 kr
= vm_map(mach_task_self(), &stackaddr
,
333 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
334 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
336 if (kr
!= KERN_SUCCESS
)
337 kr
= vm_allocate(mach_task_self(),
338 &stackaddr
, allocsize
,
339 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
340 if (kr
!= KERN_SUCCESS
) {
343 /* The guard page is at the lowest address */
344 /* The stack base is the highest address */
346 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
349 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
351 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
352 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
354 t
->freesize
= allocsize
;
355 t
->freeaddr
= (void *)stackaddr
;
356 t
->freeStackOnExit
= 1;
363 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
365 kern_return_t res
= 0;
366 vm_address_t freeaddr
;
368 task_t self
= mach_task_self();
371 semaphore_t joinsem
= SEMAPHORE_NULL
;
374 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
376 kport
= t
->kernel_thread
;
377 joinsem
= t
->joiner_notify
;
379 if (t
->freeStackOnExit
) {
380 freeaddr
= (vm_address_t
)t
->freeaddr
;
382 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
384 freesize
= t
->stacksize
+ t
->guardsize
;
386 mig_dealloc_reply_port(MACH_PORT_NULL
);
387 LOCK(_pthread_list_lock
);
388 if (freestruct
!= 0) {
389 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
390 /* if parent has not returned from create yet keep pthread_t */
392 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
394 if (t
->parentcheck
== 0)
395 freesize
-= pthreadsize
;
398 thread_count
= --_pthread_count
;
399 UNLOCK(_pthread_list_lock
);
402 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
404 if (thread_count
<=0)
407 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
408 LIBC_ABORT("thread %p didn't terminate", t
);
411 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
413 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
417 mig_dealloc_reply_port(MACH_PORT_NULL
);
418 LOCK(_pthread_list_lock
);
419 if (freestruct
!= 0) {
420 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
422 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
425 thread_count
= --_pthread_count
;
427 UNLOCK(_pthread_list_lock
);
431 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
439 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
442 if (thread_count
<=0)
445 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
446 LIBC_ABORT("thread %p didn't terminate", t
);
447 } else if (freestruct
) {
448 t
->sig
= _PTHREAD_NO_SIG
;
450 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
461 * Destroy a thread attribute structure
464 pthread_attr_destroy(pthread_attr_t
*attr
)
466 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
472 return (EINVAL
); /* Not an attribute structure! */
477 * Get the 'detach' state from a thread attribute structure.
478 * Note: written as a helper function for info hiding
481 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
484 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
486 *detachstate
= attr
->detached
;
490 return (EINVAL
); /* Not an attribute structure! */
495 * Get the 'inherit scheduling' info from a thread attribute structure.
496 * Note: written as a helper function for info hiding
499 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
502 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
504 *inheritsched
= attr
->inherit
;
508 return (EINVAL
); /* Not an attribute structure! */
513 * Get the scheduling parameters from a thread attribute structure.
514 * Note: written as a helper function for info hiding
517 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
518 struct sched_param
*param
)
520 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
522 *param
= attr
->param
;
526 return (EINVAL
); /* Not an attribute structure! */
531 * Get the scheduling policy from a thread attribute structure.
532 * Note: written as a helper function for info hiding
535 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
538 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
540 *policy
= attr
->policy
;
544 return (EINVAL
); /* Not an attribute structure! */
548 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
549 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
551 * Initialize a thread attribute structure to default values.
554 pthread_attr_init(pthread_attr_t
*attr
)
556 attr
->stacksize
= DEFAULT_STACK_SIZE
;
557 attr
->stackaddr
= NULL
;
558 attr
->sig
= _PTHREAD_ATTR_SIG
;
559 attr
->param
.sched_priority
= default_priority
;
560 attr
->param
.quantum
= 10; /* quantum isn't public yet */
561 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
562 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
563 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
564 attr
->freeStackOnExit
= 1;
567 attr
->guardsize
= vm_page_size
;
572 * Set the 'detach' state in a thread attribute structure.
573 * Note: written as a helper function for info hiding
576 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
579 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
581 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
582 (detachstate
== PTHREAD_CREATE_DETACHED
))
584 attr
->detached
= detachstate
;
592 return (EINVAL
); /* Not an attribute structure! */
597 * Set the 'inherit scheduling' state in a thread attribute structure.
598 * Note: written as a helper function for info hiding
601 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
604 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
606 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
607 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
609 attr
->inherit
= inheritsched
;
617 return (EINVAL
); /* Not an attribute structure! */
622 * Set the scheduling paramters in a thread attribute structure.
623 * Note: written as a helper function for info hiding
626 pthread_attr_setschedparam(pthread_attr_t
*attr
,
627 const struct sched_param
*param
)
629 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
631 /* TODO: Validate sched_param fields */
632 attr
->param
= *param
;
637 return (EINVAL
); /* Not an attribute structure! */
642 * Set the scheduling policy in a thread attribute structure.
643 * Note: written as a helper function for info hiding
646 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
649 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
651 if ((policy
== SCHED_OTHER
) ||
652 (policy
== SCHED_RR
) ||
653 (policy
== SCHED_FIFO
))
655 attr
->policy
= policy
;
664 return (EINVAL
); /* Not an attribute structure! */
669 * Set the scope for the thread.
670 * We currently only provide PTHREAD_SCOPE_SYSTEM
673 pthread_attr_setscope(pthread_attr_t
*attr
,
676 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
677 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
678 /* No attribute yet for the scope */
680 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
684 return (EINVAL
); /* Not an attribute structure! */
688 * Get the scope for the thread.
689 * We currently only provide PTHREAD_SCOPE_SYSTEM
692 pthread_attr_getscope(const pthread_attr_t
*attr
,
695 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
696 *scope
= PTHREAD_SCOPE_SYSTEM
;
699 return (EINVAL
); /* Not an attribute structure! */
702 /* Get the base stack address of the given thread */
704 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
706 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
707 *stackaddr
= attr
->stackaddr
;
710 return (EINVAL
); /* Not an attribute structure! */
715 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
717 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
718 attr
->stackaddr
= stackaddr
;
719 attr
->freeStackOnExit
= 0;
723 return (EINVAL
); /* Not an attribute structure! */
728 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
730 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
731 *stacksize
= attr
->stacksize
;
734 return (EINVAL
); /* Not an attribute structure! */
739 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
741 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
742 attr
->stacksize
= stacksize
;
745 return (EINVAL
); /* Not an attribute structure! */
750 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
752 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
753 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
754 *stacksize
= attr
->stacksize
;
757 return (EINVAL
); /* Not an attribute structure! */
761 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
762 * byte address. This is not the same as in pthread_attr_setstackaddr.
765 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
767 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
768 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
769 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
770 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
771 attr
->stacksize
= stacksize
;
772 attr
->freeStackOnExit
= 0;
776 return (EINVAL
); /* Not an attribute structure! */
782 * Set the guardsize attribute in the attr.
785 pthread_attr_setguardsize(pthread_attr_t
*attr
,
788 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
789 /* Guardsize of 0 is valid, ot means no guard */
790 if ((guardsize
% vm_page_size
) == 0) {
791 attr
->guardsize
= guardsize
;
797 return (EINVAL
); /* Not an attribute structure! */
801 * Get the guardsize attribute in the attr.
804 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
807 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
808 *guardsize
= attr
->guardsize
;
811 return (EINVAL
); /* Not an attribute structure! */
816 * Create and start execution of a new thread.
820 _pthread_body(pthread_t self
)
822 _pthread_set_self(self
);
823 _pthread_exit(self
, (self
->fun
)(self
->arg
));
827 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
833 pthread_attr_t
*attrs
= &_pthread_attr_default
;
836 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
837 stackaddr
= (char *)self
;
838 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
839 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
840 _pthread_set_self(self
);
842 LOCK(_pthread_list_lock
);
843 if (pflags
& PTHREAD_START_SETSCHED
) {
844 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
845 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
847 /* These are not joinable threads */
848 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
849 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
850 self
->detached
|= PTHREAD_CREATE_DETACHED
;
853 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
854 _pthread_set_self(self
);
856 LOCK(_pthread_list_lock
);
858 self
->kernel_thread
= kport
;
862 /* Add to the pthread list */
863 if (self
->parentcheck
== 0) {
864 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
866 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
871 UNLOCK(_pthread_list_lock
);
873 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
874 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
875 printf("Failed to set thread_id in pthread_start\n");
879 pself
= pthread_self();
881 LIBC_ABORT("self %p != pself %p", self
, pself
);
884 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
887 _pthread_exit(self
, (self
->fun
)(self
->arg
));
891 _pthread_create(pthread_t t
,
892 const pthread_attr_t
*attrs
,
894 const mach_port_t kernel_thread
)
901 memset(t
, 0, sizeof(*t
));
910 t
->stacksize
= attrs
->stacksize
;
911 t
->stackaddr
= (void *)stack
;
912 t
->guardsize
= attrs
->guardsize
;
913 t
->kernel_thread
= kernel_thread
;
914 t
->detached
= attrs
->detached
;
915 t
->inherit
= attrs
->inherit
;
916 t
->policy
= attrs
->policy
;
917 t
->param
= attrs
->param
;
918 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
919 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
920 t
->sig
= _PTHREAD_SIG
;
921 t
->reply_port
= MACH_PORT_NULL
;
922 t
->cthread_self
= NULL
;
924 t
->plist
.tqe_next
= (struct _pthread
*)0;
925 t
->plist
.tqe_prev
= (struct _pthread
**)0;
926 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
927 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
928 t
->death
= SEMAPHORE_NULL
;
930 if (kernel_thread
!= MACH_PORT_NULL
)
931 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
937 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
939 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)(long)stack
;
942 memset(t
, 0, sizeof(*t
));
943 t
->plist
.tqe_next
= (struct _pthread
*)0;
944 t
->plist
.tqe_prev
= (struct _pthread
**)0;
946 t
->schedset
= attrs
->schedset
;
948 if (kernalloc
!= 0) {
949 stackaddr
= (mach_vm_offset_t
)(long)t
;
951 /* if allocated from kernel set values appropriately */
952 t
->stacksize
= stacksize
;
953 t
->stackaddr
= (void *)(long)stackaddr
;
954 t
->freeStackOnExit
= 1;
955 t
->freeaddr
= (void *)(long)(stackaddr
- stacksize
- vm_page_size
);
956 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
958 t
->stacksize
= attrs
->stacksize
;
959 t
->stackaddr
= (void *)stack
;
961 t
->guardsize
= attrs
->guardsize
;
962 t
->detached
= attrs
->detached
;
963 t
->inherit
= attrs
->inherit
;
964 t
->policy
= attrs
->policy
;
965 t
->param
= attrs
->param
;
966 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
967 t
->sig
= _PTHREAD_SIG
;
968 t
->reply_port
= MACH_PORT_NULL
;
969 t
->cthread_self
= NULL
;
971 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
972 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
973 t
->death
= SEMAPHORE_NULL
;
975 t
->kernalloc
= kernalloc
;
983 _pthread_tsd_reinit(pthread_t t
)
985 bzero(&t
->tsd
[1], (_INTERNAL_POSIX_THREAD_KEYS_END
-1) * sizeof(void *));
989 /* Need to deprecate this in future */
991 _pthread_is_threaded(void)
993 return __is_threaded
;
996 /* Non portable public api to know whether this process has(had) atleast one thread
997 * apart from main thread. There could be race if there is a thread in the process of
998 * creation at the time of call . It does not tell whether there are more than one thread
999 * at this point of time.
1002 pthread_is_threaded_np(void)
1004 return (__is_threaded
);
1008 pthread_mach_thread_np(pthread_t t
)
1010 mach_port_t kport
= MACH_PORT_NULL
;
1016 * If the call is on self, return the kernel port. We cannot
1017 * add this bypass for main thread as it might have exited,
1018 * and we should not return stale port info.
1020 if (t
== pthread_self())
1022 kport
= t
->kernel_thread
;
1026 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1027 return((mach_port_t
)0);
1033 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
1035 struct _pthread
* p
= NULL
;
1037 /* No need to wait as mach port is already known */
1038 LOCK(_pthread_list_lock
);
1039 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1040 if (p
->kernel_thread
== kernel_thread
)
1043 UNLOCK(_pthread_list_lock
);
1048 pthread_get_stacksize_np(pthread_t t
)
1050 int ret
,nestingDepth
=0;
1052 vm_address_t address
=0;
1053 vm_size_t region_size
=0;
1054 struct vm_region_submap_info_64 info
;
1055 mach_msg_type_number_t count
;
1060 if ( t
== pthread_self() || t
== &_thread
) //since the main thread will not get de-allocated from underneath us
1067 LOCK(_pthread_list_lock
);
1069 if ((ret
= _pthread_find_thread(t
)) != 0) {
1070 UNLOCK(_pthread_list_lock
);
1075 UNLOCK(_pthread_list_lock
);
1081 pthread_get_stackaddr_np(pthread_t t
)
1087 return((void *)(long)ESRCH
);
1089 if(t
== pthread_self() || t
== &_thread
) //since the main thread will not get deallocated from underneath us
1090 return t
->stackaddr
;
1092 LOCK(_pthread_list_lock
);
1094 if ((ret
= _pthread_find_thread(t
)) != 0) {
1095 UNLOCK(_pthread_list_lock
);
1096 return((void *)(long)ret
);
1098 addr
= t
->stackaddr
;
1099 UNLOCK(_pthread_list_lock
);
1105 _pthread_reply_port(pthread_t t
)
1107 return t
->reply_port
;
1111 /* returns non-zero if the current thread is the main thread */
1113 pthread_main_np(void)
1115 pthread_t self
= pthread_self();
1117 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1121 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1122 /* if we are passed in a pthread_t that is NULL, then we return
1123 the current thread's thread_id. So folks don't have to call
1124 pthread_self, in addition to us doing it, if they just want
1128 pthread_threadid_np(pthread_t thread
, __uint64_t
*thread_id
)
1131 pthread_t self
= pthread_self();
1133 if (thread_id
== NULL
) {
1135 } else if (thread
== NULL
|| thread
== self
) {
1136 *thread_id
= self
->thread_id
;
1140 LOCK(_pthread_list_lock
);
1141 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1142 UNLOCK(_pthread_list_lock
);
1145 *thread_id
= thread
->thread_id
;
1146 UNLOCK(_pthread_list_lock
);
1152 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1160 LOCK(_pthread_list_lock
);
1161 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1162 UNLOCK(_pthread_list_lock
);
1165 strlcpy(threadname
, thread
->pthread_name
, len
);
1166 UNLOCK(_pthread_list_lock
);
1171 pthread_setname_np(const char *threadname
)
1177 len
= strlen(threadname
);
1178 rval
= sysctlbyname("kern.threadname", NULL
, 0, threadname
, len
);
1181 strlcpy((pthread_self())->pthread_name
, threadname
, len
+1);
1188 _new_pthread_create_suspended(pthread_t
*thread
,
1189 const pthread_attr_t
*attr
,
1190 void *(*start_routine
)(void *),
1194 pthread_attr_t
*attrs
;
1199 kern_return_t kern_res
;
1200 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1202 task_t self
= mach_task_self();
1204 int susp
= create_susp
;
1206 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1207 { /* Set up default paramters */
1208 attrs
= &_pthread_attr_default
;
1209 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1214 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1215 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1221 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1222 * any change in priority or policy is needed here.
1224 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1225 /* Rosetta or pthread_create_suspended() */
1226 /* running under rosetta */
1227 /* Allocate a stack for the thread */
1229 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1231 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1234 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1237 /* Create the Mach thread for this thread */
1238 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1239 if (kern_res
!= KERN_SUCCESS
)
1241 printf("Can't create thread: %d\n", kern_res
);
1245 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1249 set_malloc_singlethreaded(0);
1252 /* Send it on it's way */
1254 t
->fun
= start_routine
;
1256 /* Now set it up to execute */
1257 LOCK(_pthread_list_lock
);
1258 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1260 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1263 UNLOCK(_pthread_list_lock
);
1264 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1269 if (attrs
->fastpath
== 1)
1272 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1273 flags
|= PTHREAD_START_DETACHED
;
1274 if (attrs
->schedset
!= 0) {
1275 flags
|= PTHREAD_START_SETSCHED
;
1276 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1277 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1280 set_malloc_singlethreaded(0);
1283 if (kernalloc
== 0) {
1284 /* Allocate a stack for the thread */
1285 flags
|= PTHREAD_START_CUSTOM
;
1286 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1289 /* Send it on it's way */
1291 t
->fun
= start_routine
;
1295 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1298 if ((t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == (pthread_t
)-1) {
1299 _pthread_free_pthread_onstack(t
, 1, 0);
1303 LOCK(_pthread_list_lock
);
1305 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1306 /* detached child exited, mop up */
1307 UNLOCK(_pthread_list_lock
);
1309 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1311 if(t
->freeStackOnExit
)
1312 vm_deallocate(self
, (mach_vm_address_t
)(long)t
, pthreadsize
);
1315 } else if (t
->childrun
== 0) {
1316 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1319 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1321 UNLOCK(_pthread_list_lock
);
1323 UNLOCK(_pthread_list_lock
);
1328 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1333 /* kernel allocation */
1335 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1337 if ((t
= __bsdthread_create(start_routine
, arg
, (void *)attrs
->stacksize
, NULL
, flags
)) == (pthread_t
)-1)
1339 /* Now set it up to execute */
1340 LOCK(_pthread_list_lock
);
1342 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1343 /* detached child exited, mop up */
1344 UNLOCK(_pthread_list_lock
);
1346 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1348 vm_deallocate(self
, (mach_vm_address_t
)(long)t
, pthreadsize
);
1349 } else if (t
->childrun
== 0) {
1350 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1353 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1355 UNLOCK(_pthread_list_lock
);
1357 UNLOCK(_pthread_list_lock
);
1362 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1370 _pthread_create_suspended(pthread_t
*thread
,
1371 const pthread_attr_t
*attr
,
1372 void *(*start_routine
)(void *),
1376 pthread_attr_t
*attrs
;
1380 kern_return_t kern_res
;
1381 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1384 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1385 { /* Set up default paramters */
1386 attrs
= &_pthread_attr_default
;
1387 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1392 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1393 * any change in priority or policy is needed here.
1395 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1396 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1404 /* Allocate a stack for the thread */
1405 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1408 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1411 /* Create the Mach thread for this thread */
1412 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1413 if (kern_res
!= KERN_SUCCESS
)
1415 printf("Can't create thread: %d\n", kern_res
);
1416 res
= EINVAL
; /* Need better error here? */
1420 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1424 set_malloc_singlethreaded(0);
1427 /* Send it on it's way */
1429 t
->fun
= start_routine
;
1430 /* Now set it up to execute */
1431 LOCK(_pthread_list_lock
);
1432 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1434 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1437 UNLOCK(_pthread_list_lock
);
1438 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1444 pthread_create(pthread_t
*thread
,
1445 const pthread_attr_t
*attr
,
1446 void *(*start_routine
)(void *),
1449 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1453 pthread_create_suspended_np(pthread_t
*thread
,
1454 const pthread_attr_t
*attr
,
1455 void *(*start_routine
)(void *),
1458 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1462 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1465 pthread_detach(pthread_t thread
)
1470 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0)
1471 return (ret
); /* Not a valid thread */
1474 newstyle
= thread
->newstyle
;
1475 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1477 if (thread
->detached
& _PTHREAD_EXITED
) {
1478 UNLOCK(thread
->lock
);
1479 pthread_join(thread
, NULL
);
1482 if (newstyle
== 0) {
1483 semaphore_t death
= thread
->death
;
1485 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1486 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1487 UNLOCK(thread
->lock
);
1489 (void) semaphore_signal(death
);
1491 mach_port_t joinport
= thread
->joiner_notify
;
1493 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1494 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1496 UNLOCK(thread
->lock
);
1498 semaphore_signal(joinport
);
1504 UNLOCK(thread
->lock
);
1511 * pthread_kill call to system call
1519 mach_port_t kport
= MACH_PORT_NULL
;
1521 if ((sig
< 0) || (sig
> NSIG
))
1524 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1525 return (ESRCH
); /* Not a valid thread */
1527 /* if the thread is a workqueue thread, just return error */
1528 if ((th
->wqthread
!= 0) && (th
->wqkillset
==0)) {
1532 error
= __pthread_kill(kport
, sig
);
1540 __pthread_workqueue_setkill(int enable
)
1542 pthread_t self
= pthread_self();
1546 self
->wqkillset
= 0;
1548 self
->wqkillset
= 1;
1555 /* Announce that there are pthread resources ready to be reclaimed in a */
1556 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1557 /* thread underneath is terminated right away. */
1559 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1560 pthread_reap_msg_t msg
;
1563 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1564 MACH_MSG_TYPE_MOVE_SEND
);
1565 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1566 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1567 msg
.header
.msgh_local_port
= kernel_thread
;
1568 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1569 msg
.thread
= thread
;
1570 ret
= mach_msg_send(&msg
.header
);
1571 assert(ret
== MACH_MSG_SUCCESS
);
1574 /* Reap the resources for available threads */
1576 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1577 mach_port_type_t ptype
;
1581 self
= mach_task_self();
1582 if (kernel_thread
!= MACH_PORT_DEAD
) {
1583 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1584 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1585 /* not quite dead yet... */
1588 ret
= mach_port_deallocate(self
, kernel_thread
);
1589 if (ret
!= KERN_SUCCESS
) {
1591 "mach_port_deallocate(kernel_thread) failed: %s\n",
1592 mach_error_string(ret
));
1596 if (th
->reply_port
!= MACH_PORT_NULL
) {
1597 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1598 MACH_PORT_RIGHT_RECEIVE
, -1);
1599 if (ret
!= KERN_SUCCESS
) {
1601 "mach_port_mod_refs(reply_port) failed: %s\n",
1602 mach_error_string(ret
));
1606 if (th
->freeStackOnExit
) {
1607 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1610 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1613 ret
= vm_deallocate(self
, addr
, size
);
1614 if (ret
!= KERN_SUCCESS
) {
1616 "vm_deallocate(stack) failed: %s\n",
1617 mach_error_string(ret
));
1623 *value_ptr
= th
->exit_value
;
1625 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1626 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
))
1627 *value_ptr
= PTHREAD_CANCELED
;
1628 th
->sig
= _PTHREAD_NO_SIG
;
1639 void _pthread_reap_threads(void)
1641 pthread_reap_msg_t msg
;
1644 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1645 sizeof msg
, thread_recycle_port
,
1646 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1647 while (ret
== MACH_MSG_SUCCESS
) {
1648 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1649 pthread_t thread
= msg
.thread
;
1651 /* deal with race with thread_create_running() */
1652 if (kernel_thread
== MACH_PORT_NULL
&&
1653 kernel_thread
!= thread
->kernel_thread
) {
1654 kernel_thread
= thread
->kernel_thread
;
1657 if ( kernel_thread
== MACH_PORT_NULL
||
1658 _pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1660 /* not dead yet, put it back for someone else to reap, stop here */
1661 _pthread_become_available(thread
, kernel_thread
);
1665 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1666 sizeof msg
, thread_recycle_port
,
1667 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1671 /* For compatibility... */
1675 return pthread_self();
1679 * Terminate a thread.
1681 int __disable_threadsignal(int);
1684 _pthread_exit(pthread_t self
, void *value_ptr
)
1686 struct __darwin_pthread_handler_rec
*handler
;
1687 kern_return_t kern_res
;
1689 int newstyle
= self
->newstyle
;
1691 /* Make this thread not to receive any signals */
1692 __disable_threadsignal(1);
1695 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1698 /* set cancel state to disable and type to deferred */
1699 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1701 while ((handler
= self
->__cleanup_stack
) != 0)
1703 (handler
->__routine
)(handler
->__arg
);
1704 self
->__cleanup_stack
= handler
->__next
;
1706 _pthread_tsd_cleanup(self
);
1708 if (newstyle
== 0) {
1709 _pthread_reap_threads();
1712 self
->detached
|= _PTHREAD_EXITED
;
1714 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1715 mach_port_t death
= self
->death
;
1716 self
->exit_value
= value_ptr
;
1718 /* the joiner will need a kernel thread reference, leave ours for it */
1720 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1721 if (kern_res
!= KERN_SUCCESS
)
1723 "semaphore_signal(death) failed: %s\n",
1724 mach_error_string(kern_res
));
1726 LOCK(_pthread_list_lock
);
1727 thread_count
= --_pthread_count
;
1728 UNLOCK(_pthread_list_lock
);
1731 LOCK(_pthread_list_lock
);
1732 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1734 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1736 thread_count
= --_pthread_count
;
1737 UNLOCK(_pthread_list_lock
);
1738 /* with no joiner, we let become available consume our cached ref */
1739 _pthread_become_available(self
, self
->kernel_thread
);
1742 if (thread_count
<= 0)
1745 /* Use a new reference to terminate ourselves. Should never return. */
1746 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1747 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1748 mach_error_string(kern_res
));
1750 semaphore_t joinsem
= SEMAPHORE_NULL
;
1752 if ((self
->joiner_notify
== (mach_port_t
)0) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1753 joinsem
= new_sem_from_pool();
1755 self
->detached
|= _PTHREAD_EXITED
;
1757 self
->exit_value
= value_ptr
;
1758 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1759 if (self
->joiner_notify
== (mach_port_t
)0) {
1760 self
->joiner_notify
= joinsem
;
1761 joinsem
= SEMAPHORE_NULL
;
1764 if (joinsem
!= SEMAPHORE_NULL
)
1765 restore_sem_to_pool(joinsem
);
1766 _pthread_free_pthread_onstack(self
, 0, 1);
1769 /* with no joiner, we let become available consume our cached ref */
1770 if (joinsem
!= SEMAPHORE_NULL
)
1771 restore_sem_to_pool(joinsem
);
1772 _pthread_free_pthread_onstack(self
, 1, 1);
1775 LIBC_ABORT("thread %p didn't exit", self
);
1779 pthread_exit(void *value_ptr
)
1781 pthread_t self
= pthread_self();
1782 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1783 if (self
->wqthread
== 0) {
1784 _pthread_exit(self
, value_ptr
);
1786 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1791 * Get the scheduling policy and scheduling paramters for a thread.
1794 pthread_getschedparam(pthread_t thread
,
1796 struct sched_param
*param
)
1803 LOCK(_pthread_list_lock
);
1805 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1806 UNLOCK(_pthread_list_lock
);
1810 *policy
= thread
->policy
;
1812 *param
= thread
->param
;
1813 UNLOCK(_pthread_list_lock
);
1819 * Set the scheduling policy and scheduling paramters for a thread.
1822 pthread_setschedparam_internal(pthread_t thread
,
1825 const struct sched_param
*param
)
1827 policy_base_data_t bases
;
1829 mach_msg_type_number_t count
;
1835 bases
.ts
.base_priority
= param
->sched_priority
;
1836 base
= (policy_base_t
)&bases
.ts
;
1837 count
= POLICY_TIMESHARE_BASE_COUNT
;
1840 bases
.fifo
.base_priority
= param
->sched_priority
;
1841 base
= (policy_base_t
)&bases
.fifo
;
1842 count
= POLICY_FIFO_BASE_COUNT
;
1845 bases
.rr
.base_priority
= param
->sched_priority
;
1846 /* quantum isn't public yet */
1847 bases
.rr
.quantum
= param
->quantum
;
1848 base
= (policy_base_t
)&bases
.rr
;
1849 count
= POLICY_RR_BASE_COUNT
;
1854 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1855 if (ret
!= KERN_SUCCESS
)
1861 pthread_setschedparam(pthread_t t
,
1863 const struct sched_param
*param
)
1865 mach_port_t kport
= MACH_PORT_NULL
;
1869 if (t
!= pthread_self() && t
!= &_thread
) { //since the main thread will not get de-allocated from underneath us
1871 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1874 kport
= t
->kernel_thread
;
1876 error
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1879 /* ensure the thread is still valid */
1880 LOCK(_pthread_list_lock
);
1881 if ((error
= _pthread_find_thread(t
)) != 0) {
1882 UNLOCK(_pthread_list_lock
);
1887 UNLOCK(_pthread_list_lock
);
1897 * Get the minimum priority for the given policy
1900 sched_get_priority_min(int policy
)
1902 return default_priority
- 16;
1906 * Get the maximum priority for the given policy
1909 sched_get_priority_max(int policy
)
1911 return default_priority
+ 16;
1915 * Determine if two thread identifiers represent the same thread.
1918 pthread_equal(pthread_t t1
,
1924 __private_extern__
void
1925 _pthread_set_self(pthread_t p
)
1927 extern void __pthread_set_self(pthread_t
);
1929 bzero(&_thread
, sizeof(struct _pthread
));
1933 __pthread_set_self(p
);
1937 cthread_set_self(void *cself
)
1939 pthread_t self
= pthread_self();
1940 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1941 _pthread_set_self(cself
);
1944 self
->cthread_self
= cself
;
1948 ur_cthread_self(void) {
1949 pthread_t self
= pthread_self();
1950 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1951 return (void *)self
;
1953 return self
->cthread_self
;
1957 * cancellation handler for pthread once as the init routine can have a
1958 * cancellation point. In that case we need to restore the spin unlock
1961 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
1963 _spin_unlock(&once_control
->lock
);
1968 * Execute a function exactly one time in a thread-safe fashion.
1971 pthread_once(pthread_once_t
*once_control
,
1972 void (*init_routine
)(void))
1974 _spin_lock(&once_control
->lock
);
1975 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1977 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler
, once_control
);
1979 pthread_cleanup_pop(0);
1980 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1982 _spin_unlock(&once_control
->lock
);
1983 return (0); /* Spec defines no possible errors! */
1987 * Insert a cancellation point in a thread.
1989 __private_extern__
void
1990 _pthread_testcancel(pthread_t thread
, int isconforming
)
1993 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1994 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1996 UNLOCK(thread
->lock
);
1998 pthread_exit(PTHREAD_CANCELED
);
2002 UNLOCK(thread
->lock
);
2008 pthread_getconcurrency(void)
2010 return(pthread_concurrency
);
2014 pthread_setconcurrency(int new_level
)
2018 pthread_concurrency
= new_level
;
2023 * Perform package initialization - called automatically when application starts
2025 __private_extern__
int
2028 pthread_attr_t
*attrs
;
2031 host_priority_info_data_t priority_info
;
2033 host_flavor_t flavor
;
2035 mach_msg_type_number_t count
;
2041 pthreadsize
= round_page(sizeof (struct _pthread
));
2042 count
= HOST_PRIORITY_INFO_COUNT
;
2043 info
= (host_info_t
)&priority_info
;
2044 flavor
= HOST_PRIORITY_INFO
;
2045 host
= mach_host_self();
2046 kr
= host_info(host
, flavor
, info
, &count
);
2047 if (kr
!= KERN_SUCCESS
)
2048 printf("host_info failed (%d); probably need privilege.\n", kr
);
2050 default_priority
= priority_info
.user_priority
;
2051 min_priority
= priority_info
.minimum_priority
;
2052 max_priority
= priority_info
.maximum_priority
;
2054 attrs
= &_pthread_attr_default
;
2055 pthread_attr_init(attrs
);
2057 TAILQ_INIT(&__pthread_head
);
2058 LOCK_INIT(_pthread_list_lock
);
2060 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
2061 _pthread_set_self(thread
);
2063 __kdebug_trace(0x900000c, thread
, 0, 0, 10, 0);
2066 /* In case of dyld reset the tsd keys from 1 - 10 */
2067 _pthread_keys_init();
2070 mib
[1] = KERN_USRSTACK
;
2071 len
= sizeof (stackaddr
);
2072 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
2073 stackaddr
= (void *)USRSTACK
;
2074 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
2075 thread
->stacksize
= DFLSSIZ
; //initialize main thread's stacksize based on vmparam.h
2076 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
2078 _init_cpu_capabilities();
2079 if ((ncpus
= _NumCPUs()) > 1)
2080 _spin_tries
= MP_SPIN_TRIES
;
2082 workq_targetconc
[WORKQ_HIGH_PRIOQUEUE
] = ncpus
;
2083 workq_targetconc
[WORKQ_DEFAULT_PRIOQUEUE
] = ncpus
;
2084 workq_targetconc
[WORKQ_LOW_PRIOQUEUE
] = ncpus
;
2086 mach_port_deallocate(mach_task_self(), host
);
2088 #if defined(__ppc__)
2094 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2096 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
2097 kr
= vm_map(mach_task_self(),
2098 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
2099 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
2101 (vm_address_t
)0, FALSE
,
2102 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
2103 VM_INHERIT_DEFAULT
);
2104 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2108 mig_init(1); /* enable multi-threaded mig interfaces */
2109 if (__oldstyle
== 0) {
2110 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2111 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (__uint64_t
)(&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (__uint64_t
)thread
);
2113 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)), NULL
, &workq_targetconc
[0], (__uint64_t
)&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
] - (__uint64_t
)thread
);
2117 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2118 if( (thread
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2119 printf("Failed to set thread_id in pthread_init\n");
2124 int sched_yield(void)
2130 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2131 static int _do_nothing(void) { return 0; }
2132 int (*_cthread_init_routine
)(void) = _do_nothing
;
2134 /* Get a semaphore from the pool, growing it if necessary */
2136 __private_extern__ semaphore_t
new_sem_from_pool(void) {
2141 LOCK(sem_pool_lock
);
2142 if (sem_pool_current
== sem_pool_count
) {
2143 sem_pool_count
+= 16;
2144 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
2145 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
2146 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
2149 sem
= sem_pool
[sem_pool_current
++];
2150 UNLOCK(sem_pool_lock
);
2154 /* Put a semaphore back into the pool */
2155 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
2156 LOCK(sem_pool_lock
);
2157 sem_pool
[--sem_pool_current
] = sem
;
2158 UNLOCK(sem_pool_lock
);
2161 static void sem_pool_reset(void) {
2162 LOCK(sem_pool_lock
);
2164 sem_pool_current
= 0;
2166 UNLOCK(sem_pool_lock
);
2169 __private_extern__
void _pthread_fork_child(pthread_t p
) {
2170 /* Just in case somebody had it locked... */
2171 UNLOCK(sem_pool_lock
);
2173 /* No need to hold the pthread_list_lock as no one other than this
2174 * thread is present at this time
2176 TAILQ_INIT(&__pthread_head
);
2177 LOCK_INIT(_pthread_list_lock
);
2178 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
2180 __kdebug_trace(0x900000c, p
, 0, 0, 10, 0);
2183 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2184 if( (p
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2185 printf("Failed to set thread_id in pthread_fork_child\n");
2190 * Query/update the cancelability 'state' of a thread
2193 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
2195 pthread_t self
= pthread_self();
2199 case PTHREAD_CANCEL_ENABLE
:
2201 __pthread_canceled(1);
2203 case PTHREAD_CANCEL_DISABLE
:
2205 __pthread_canceled(2);
2211 self
= pthread_self();
2214 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
2215 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
2216 self
->cancel_state
|= state
;
2219 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2223 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2225 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2228 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2229 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2230 if ((value_ptr
== PTHREAD_CANCELED
)) {
2232 self
->detached
|= _PTHREAD_WASCANCEL
;
2239 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2242 int detached
= 0, ret
;
2245 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2247 /* The scenario where the joiner was waiting for the thread and
2248 * the pthread detach happened on that thread. Then the semaphore
2249 * will trigger but by the time joiner runs, the target thread could be
2250 * freed. So we need to make sure that the thread is still in the list
2251 * and is joinable before we continue with the join.
2253 LOCK(_pthread_list_lock
);
2254 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2255 UNLOCK(_pthread_list_lock
);
2259 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2260 /* the thread might be a detached thread */
2261 UNLOCK(_pthread_list_lock
);
2265 /* It is still a joinable thread and needs to be reaped */
2266 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2268 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2270 UNLOCK(_pthread_list_lock
);
2273 *value_ptr
= thread
->exit_value
;
2275 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2276 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
)) {
2277 *value_ptr
= PTHREAD_CANCELED
;
2280 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2281 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2282 if (res
!= KERN_SUCCESS
)
2283 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2284 thread
->reply_port
= MACH_PORT_NULL
;
2286 if (thread
->freeStackOnExit
) {
2287 thread
->sig
= _PTHREAD_NO_SIG
;
2289 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2291 vm_deallocate(mach_task_self(), (mach_vm_address_t
)(long)thread
, pthreadsize
);
2293 thread
->sig
= _PTHREAD_NO_SIG
;
2295 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2302 /* ALWAYS called with list lock and return with list lock */
2304 _pthread_find_thread(pthread_t thread
)
2309 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2311 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2312 UNLOCK(_pthread_list_lock
);
2314 LOCK(_pthread_list_lock
);
2324 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2332 LOCK(_pthread_list_lock
);
2334 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2335 UNLOCK(_pthread_list_lock
);
2338 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2339 UNLOCK(_pthread_list_lock
);
2342 kport
= thread
->kernel_thread
;
2343 UNLOCK(_pthread_list_lock
);
2349 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2351 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2353 attrp
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2354 attrp
->sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2355 attrp
->overcommit
= 0;
2360 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2362 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
)
2367 return (EINVAL
); /* Not an attribute structure! */
2372 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2374 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2375 *qpriop
= attr
->queueprio
;
2378 return (EINVAL
); /* Not an attribute structure! */
2384 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2388 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2390 case WORKQ_HIGH_PRIOQUEUE
:
2391 case WORKQ_DEFAULT_PRIOQUEUE
:
2392 case WORKQ_LOW_PRIOQUEUE
:
2393 attr
->queueprio
= qprio
;
2406 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t
* attr
, int * ocommp
)
2408 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2409 *ocommp
= attr
->overcommit
;
2412 return (EINVAL
); /* Not an attribute structure! */
2418 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t
* attr
, int ocomm
)
2422 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2423 attr
->overcommit
= ocomm
;
2429 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2432 workqueue_list_lock()
2434 OSSpinLockLock(&__workqueue_list_lock
);
2438 workqueue_list_unlock()
2440 OSSpinLockUnlock(&__workqueue_list_lock
);
2444 pthread_workqueue_init_np()
2448 workqueue_list_lock();
2449 ret
=_pthread_work_internal_init();
2450 workqueue_list_unlock();
2456 pthread_workqueue_requestconcurrency_np(int queue
, int request_concurrency
)
2460 if (queue
< 0 || queue
> WORKQ_NUM_PRIOQUEUE
)
2463 error
=__workq_kernreturn(WQOPS_THREAD_SETCONC
, NULL
, request_concurrency
, queue
);
2471 pthread_workqueue_atfork_prepare(void)
2474 * NOTE: Any workq additions here
2475 * should be for i386,x86_64 only
2477 dispatch_atfork_prepare();
2481 pthread_workqueue_atfork_parent(void)
2484 * NOTE: Any workq additions here
2485 * should be for i386,x86_64 only
2487 dispatch_atfork_parent();
2491 pthread_workqueue_atfork_child(void)
2493 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2495 * NOTE: workq additions here
2496 * are for i386,x86_64 only as
2497 * ppc and arm do not support it
2499 __workqueue_list_lock
= OS_SPINLOCK_INIT
;
2500 if (kernel_workq_setup
!= 0){
2501 kernel_workq_setup
= 0;
2502 _pthread_work_internal_init();
2505 dispatch_atfork_child();
2509 _pthread_work_internal_init(void)
2512 pthread_workqueue_head_t headp
;
2513 pthread_workitem_t witemp
;
2514 pthread_workqueue_t wq
;
2516 if (kernel_workq_setup
== 0) {
2517 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2518 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
, NULL
);
2520 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
, NULL
);
2523 _pthread_wq_attr_default
.queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2524 _pthread_wq_attr_default
.sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2526 for( i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2527 headp
= __pthread_wq_head_tbl
[i
];
2528 TAILQ_INIT(&headp
->wqhead
);
2529 headp
->next_workq
= 0;
2532 /* create work item and workqueue pools */
2533 witemp
= (struct _pthread_workitem
*)malloc(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2534 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
));
2535 for (i
= 0; i
< WORKITEM_POOL_SIZE
; i
++) {
2536 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2538 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2539 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2540 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2541 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2544 if (error
= __workq_open()) {
2545 TAILQ_INIT(&__pthread_workitem_pool_head
);
2546 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2551 kernel_workq_setup
= 1;
2557 /* This routine is called with list lock held */
2558 static pthread_workitem_t
2559 alloc_workitem(void)
2561 pthread_workitem_t witem
;
2563 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2564 workqueue_list_unlock();
2565 witem
= malloc(sizeof(struct _pthread_workitem
));
2566 witem
->gencount
= 0;
2567 workqueue_list_lock();
2569 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2570 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2575 /* This routine is called with list lock held */
2577 free_workitem(pthread_workitem_t witem
)
2580 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2583 /* This routine is called with list lock held */
2584 static pthread_workqueue_t
2585 alloc_workqueue(void)
2587 pthread_workqueue_t wq
;
2589 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2590 workqueue_list_unlock();
2591 wq
= malloc(sizeof(struct _pthread_workqueue
));
2592 workqueue_list_lock();
2594 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2595 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2601 /* This routine is called with list lock held */
2603 free_workqueue(pthread_workqueue_t wq
)
2606 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2610 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2612 bzero(wq
, sizeof(struct _pthread_workqueue
));
2614 wq
->queueprio
= attr
->queueprio
;
2615 wq
->overcommit
= attr
->overcommit
;
2617 wq
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2620 LOCK_INIT(wq
->lock
);
2622 TAILQ_INIT(&wq
->item_listhead
);
2623 TAILQ_INIT(&wq
->item_kernhead
);
2625 __kdebug_trace(0x90080ac, wq
, &wq
->item_listhead
, wq
->item_listhead
.tqh_first
, wq
->item_listhead
.tqh_last
, 0);
2627 wq
->wq_list
.tqe_next
= 0;
2628 wq
->wq_list
.tqe_prev
= 0;
2629 wq
->sig
= PTHREAD_WORKQUEUE_SIG
;
2630 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2634 valid_workq(pthread_workqueue_t workq
)
2636 if (workq
->sig
== PTHREAD_WORKQUEUE_SIG
)
2643 /* called with list lock */
2645 pick_nextworkqueue_droplock()
2647 int i
, curwqprio
, val
, found
;
2648 pthread_workqueue_head_t headp
;
2649 pthread_workqueue_t workq
;
2650 pthread_workqueue_t nworkq
= NULL
;
2653 __kdebug_trace(0x9008098, kernel_workq_count
, 0, 0, 0, 0);
2656 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2658 for (i
= 0; i
< WQ_NUM_PRIO_QS
; i
++) {
2659 wqreadyprio
= i
; /* because there is nothing else higher to run */
2660 headp
= __pthread_wq_head_tbl
[i
];
2662 if (TAILQ_EMPTY(&headp
->wqhead
))
2664 workq
= headp
->next_workq
;
2666 workq
= TAILQ_FIRST(&headp
->wqhead
);
2667 curwqprio
= workq
->queueprio
;
2668 nworkq
= workq
; /* starting pt */
2669 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2670 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2671 if (headp
->next_workq
== NULL
)
2672 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2674 __kdebug_trace(0x9008098, kernel_workq_count
, workq
, 0, 1, 0);
2676 val
= post_nextworkitem(workq
);
2679 /* things could have changed so reasses */
2680 /* If kernel queue is full , skip */
2681 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2683 /* If anything with higher prio arrived, then reevaluate */
2684 if (wqreadyprio
< curwqprio
)
2685 goto loop
; /* we need re evaluate again */
2686 /* we can post some more work items */
2690 /* cannot use workq here as it could be freed */
2691 if (TAILQ_EMPTY(&headp
->wqhead
))
2693 /* if we found nothing to run and only one workqueue in the list, skip */
2694 if ((val
== 0) && (workq
== headp
->next_workq
))
2696 workq
= headp
->next_workq
;
2698 workq
= TAILQ_FIRST(&headp
->wqhead
);
2701 /* if we found nothing to run and back to workq where we started */
2702 if ((val
== 0) && (workq
== nworkq
))
2705 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2708 /* nothing found to run? */
2712 workqueue_list_unlock();
2716 post_nextworkitem(pthread_workqueue_t workq
)
2719 pthread_workitem_t witem
;
2720 pthread_workqueue_head_t headp
;
2721 void (*func
)(pthread_workqueue_t
, void *);
2723 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2727 __kdebug_trace(0x900809c, workq
, workq
->item_listhead
.tqh_first
, 0, 1, 0);
2729 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2732 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
)
2735 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2736 headp
= workq
->headp
;
2738 __kdebug_trace(0x900809c, workq
, witem
, 0, 0xee, 0);
2740 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2742 __kdebug_trace(0x9000064, workq
, 0, 0, 2, 0);
2745 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2748 /* Also barrier when nothing is there needs to be handled */
2749 /* Nothing to wait for */
2750 if (workq
->kq_count
!= 0) {
2751 witem
->flags
|= PTH_WQITEM_APPLIED
;
2752 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2753 workq
->barrier_count
= workq
->kq_count
;
2755 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2760 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2762 if (witem
->func
!= NULL
) {
2763 /* since we are going to drop list lock */
2764 witem
->flags
|= PTH_WQITEM_APPLIED
;
2765 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2766 workqueue_list_unlock();
2767 func
= (void (*)(pthread_workqueue_t
, void *))witem
->func
;
2768 (*func
)(workq
, witem
->func_arg
);
2770 __kdebug_trace(0x9000064, 3, workq
->barrier_count
, 0, 0, 0);
2772 workqueue_list_lock();
2773 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2775 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2777 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2780 free_workitem(witem
);
2782 __kdebug_trace(0x9000064, 4, workq
->barrier_count
, 0, 0, 0);
2786 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2788 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2790 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2793 witem
->flags
|= PTH_WQITEM_APPLIED
;
2794 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2795 workq
->barrier_count
= workq
->kq_count
;
2796 workq
->term_callback
= (void (*)(struct _pthread_workqueue
*,void *))witem
->func
;
2797 workq
->term_callarg
= witem
->func_arg
;
2798 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2800 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2802 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2803 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2805 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2809 free_workitem(witem
);
2810 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2812 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2814 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2815 if (headp
->next_workq
== workq
) {
2816 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2817 if (headp
->next_workq
== NULL
) {
2818 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2819 if (headp
->next_workq
== workq
)
2820 headp
->next_workq
= NULL
;
2824 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2825 if (workq
->term_callback
!= NULL
) {
2826 workqueue_list_unlock();
2827 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2828 workqueue_list_lock();
2830 free_workqueue(workq
);
2833 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2835 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2839 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
2844 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
2846 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2848 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2850 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
2851 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
2853 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
2855 OSAtomicIncrement32(&kernel_workq_count
);
2856 workqueue_list_unlock();
2858 prio
= workq
->queueprio
;
2859 if (workq
->overcommit
!= 0) {
2860 prio
|= WORKQUEUE_OVERCOMMIT
;
2863 if (( error
=__workq_kernreturn(WQOPS_QUEUE_ADD
, witem
, workq
->affinity
, prio
)) == -1) {
2864 OSAtomicDecrement32(&kernel_workq_count
);
2865 workqueue_list_lock();
2867 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2869 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
2870 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2872 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2874 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
2875 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
2877 workqueue_list_lock();
2879 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2883 /* noone should come here */
2885 printf("error in logic for next workitem\n");
2886 LIBC_ABORT("error in logic for next workitem");
2892 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
2895 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2896 pthread_workqueue_t workq
;
2902 workq
= item
->workq
;
2904 /* reuse is set to 0, when a thread is newly created to run a workitem */
2905 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
2907 self
->wqkillset
= 0;
2908 self
->parentcheck
= 1;
2910 /* These are not joinable threads */
2911 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2912 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2913 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2914 _pthread_set_self(self
);
2917 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
2919 self
->kernel_thread
= kport
;
2920 self
->fun
= (void *(*)(void *))item
->func
;
2921 self
->arg
= item
->func_arg
;
2922 /* Add to the pthread list */
2923 LOCK(_pthread_list_lock
);
2924 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
2926 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
2929 UNLOCK(_pthread_list_lock
);
2931 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2932 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2933 printf("Failed to set thread_id in pthread_wqthread\n");
2937 /* reuse is set to 1, when a thread is resued to run another work item */
2939 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
2941 /* reset all tsd from 1 to KEYS_MAX */
2943 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self
);
2945 self
->fun
= (void *(*)(void *))item
->func
;
2946 self
->arg
= item
->func_arg
;
2951 pself
= pthread_self();
2952 if (self
!= pself
) {
2954 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
2956 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2957 _pthread_set_self(self
);
2958 pself
= pthread_self();
2960 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
2964 pself
= pthread_self();
2965 if (self
!= pself
) {
2966 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
2967 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself
, self
);
2970 #endif /* WQ_DEBUG */
2972 self
->cur_workq
= workq
;
2973 self
->cur_workitem
= item
;
2974 OSAtomicDecrement32(&kernel_workq_count
);
2976 ret
= (int)(intptr_t)(*self
->fun
)(self
->arg
);
2978 /* If we reach here without going through the above initialization path then don't go through
2979 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
2981 if(self
!= pthread_self()) {
2982 pthread_exit(PTHREAD_CANCELED
);
2985 workqueue_exit(self
, workq
, item
);
2990 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
2992 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2993 pthread_workitem_t baritem
;
2994 pthread_workqueue_head_t headp
;
2995 void (*func
)(pthread_workqueue_t
, void *);
2997 workqueue_list_lock();
2999 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3002 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
3005 free_workitem(item
);
3007 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
3008 workq
->barrier_count
--;
3010 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
3012 if (workq
->barrier_count
<= 0 ) {
3013 /* Need to remove barrier item from the list */
3014 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
3016 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
3017 printf("Incorect bar item being removed in barrier processing\n");
3018 #endif /* WQ_DEBUG */
3019 /* if the front item is a barrier and call back is registered, run that */
3020 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
3021 workqueue_list_unlock();
3022 func
= (void (*)(pthread_workqueue_t
, void *))baritem
->func
;
3023 (*func
)(workq
, baritem
->func_arg
);
3024 workqueue_list_lock();
3026 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
3028 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3031 free_workitem(baritem
);
3032 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3034 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
3036 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
3037 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
3038 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
3040 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
3042 if (headp
->next_workq
== workq
) {
3043 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
3044 if (headp
->next_workq
== NULL
) {
3045 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3046 if (headp
->next_workq
== workq
)
3047 headp
->next_workq
= NULL
;
3050 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
3052 if (workq
->term_callback
!= NULL
) {
3053 workqueue_list_unlock();
3054 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
3055 workqueue_list_lock();
3057 free_workqueue(workq
);
3059 /* if there are higher prio schedulabel item reset to wqreadyprio */
3060 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
3061 wqreadyprio
= workq
->queueprio
;
3067 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
3070 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
3072 pick_nextworkqueue_droplock();
3073 _pthread_workq_return(self
);
3077 _pthread_workq_return(pthread_t self
)
3079 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
3081 /* This is the way to terminate the thread */
3082 _pthread_exit(self
, NULL
);
3086 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3089 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
3091 pthread_workqueue_t wq
;
3092 pthread_workqueue_head_t headp
;
3094 #if defined(__ppc__)
3099 if ((attr
!= NULL
) && (attr
->sig
!= PTHREAD_WORKQUEUE_ATTR_SIG
)) {
3103 if (__is_threaded
== 0)
3106 workqueue_list_lock();
3107 if (kernel_workq_setup
== 0) {
3108 int ret
= _pthread_work_internal_init();
3110 workqueue_list_unlock();
3115 wq
= alloc_workqueue();
3117 _pthread_workq_init(wq
, attr
);
3119 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
3120 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
3121 if (headp
->next_workq
== NULL
) {
3122 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3125 workqueue_list_unlock();
3133 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
, unsigned int *gencountp
)
3135 pthread_workitem_t witem
;
3137 if (valid_workq(workq
) == 0) {
3141 workqueue_list_lock();
3144 * Allocate the workitem here as it can drop the lock.
3145 * Also we can evaluate the workqueue state only once.
3147 witem
= alloc_workitem();
3148 witem
->func
= workitem_func
;
3149 witem
->func_arg
= workitem_arg
;
3151 witem
->workq
= workq
;
3152 witem
->item_entry
.tqe_next
= 0;
3153 witem
->item_entry
.tqe_prev
= 0;
3155 /* alloc workitem can drop the lock, check the state */
3156 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3157 free_workitem(witem
);
3158 workqueue_list_unlock();
3163 if (itemhandlep
!= NULL
)
3164 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3165 if (gencountp
!= NULL
)
3166 *gencountp
= witem
->gencount
;
3168 __kdebug_trace(0x9008090, witem
, witem
->func
, witem
->func_arg
, workq
, 0);
3170 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3172 __kdebug_trace(0x90080a4, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3175 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3176 wqreadyprio
= workq
->queueprio
;
3178 pick_nextworkqueue_droplock();
3184 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq
, unsigned int *ocommp
)
3186 pthread_workitem_t witem
;
3188 if (valid_workq(workq
) == 0) {
3193 *ocommp
= workq
->overcommit
;
3199 int pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle, unsigned int gencount)
3200 int pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, pthread_workitem_handle_t *itemhandlep, unsigned int *gencountp)
3201 int pthread_workqueue_suspend_np(pthread_workqueue_t workq)
3202 int pthread_workqueue_resume_np(pthread_workqueue_t workq)
3205 #else /* !BUILDING_VARIANT ] [ */
3206 extern int __unix_conforming
;
3207 extern int _pthread_count
;
3208 extern pthread_lock_t _pthread_list_lock
;
3209 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3210 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3212 #endif /* !BUILDING_VARIANT ] */
3216 __private_extern__
void
3217 __posix_join_cleanup(void *arg
)
3219 pthread_t thread
= (pthread_t
)arg
;
3220 int already_exited
, res
;
3223 mach_port_t joinport
;
3227 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3229 newstyle
= thread
->newstyle
;
3232 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3234 if (newstyle
== 0) {
3235 death
= thread
->death
;
3236 if (!already_exited
){
3237 thread
->joiner
= (struct _pthread
*)NULL
;
3238 UNLOCK(thread
->lock
);
3239 restore_sem_to_pool(death
);
3241 UNLOCK(thread
->lock
);
3242 while ((res
= _pthread_reap_thread(thread
,
3243 thread
->kernel_thread
,
3244 &dummy
, 1)) == EAGAIN
)
3248 restore_sem_to_pool(death
);
3253 /* leave another thread to join */
3254 thread
->joiner
= (struct _pthread
*)NULL
;
3255 UNLOCK(thread
->lock
);
3259 #endif /* __DARWIN_UNIX03 */
3263 * Wait for a thread to terminate and obtain its exit value.
3267 pthread_join(pthread_t thread,
3270 moved to pthread_cancelable.c */
3276 pthread_cancel(pthread_t thread
)
3279 if (__unix_conforming
== 0)
3280 __unix_conforming
= 1;
3281 #endif /* __DARWIN_UNIX03 */
3283 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3286 /* if the thread is a workqueue thread, then return error */
3287 if (thread
->wqthread
!= 0) {
3294 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3295 UNLOCK(thread
->lock
);
3296 if (state
& PTHREAD_CANCEL_ENABLE
)
3297 __pthread_markcancel(thread
->kernel_thread
);
3298 #else /* __DARWIN_UNIX03 */
3299 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3300 #endif /* __DARWIN_UNIX03 */
3305 pthread_testcancel(void)
3307 pthread_t self
= pthread_self();
3310 if (__unix_conforming
== 0)
3311 __unix_conforming
= 1;
3312 _pthread_testcancel(self
, 1);
3313 #else /* __DARWIN_UNIX03 */
3314 _pthread_testcancel(self
, 0);
3315 #endif /* __DARWIN_UNIX03 */
3321 * Query/update the cancelability 'state' of a thread
3324 pthread_setcancelstate(int state
, int *oldstate
)
3327 if (__unix_conforming
== 0) {
3328 __unix_conforming
= 1;
3330 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3331 #else /* __DARWIN_UNIX03 */
3332 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3333 #endif /* __DARWIN_UNIX03 */
3340 * Query/update the cancelability 'type' of a thread
3343 pthread_setcanceltype(int type
, int *oldtype
)
3345 pthread_t self
= pthread_self();
3348 if (__unix_conforming
== 0)
3349 __unix_conforming
= 1;
3350 #endif /* __DARWIN_UNIX03 */
3352 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3353 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3355 self
= pthread_self();
3358 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3359 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3360 self
->cancel_state
|= type
;
3362 #if !__DARWIN_UNIX03
3363 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3364 #endif /* __DARWIN_UNIX03 */
3369 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3374 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3378 #else /* __DARWIN_UNIX03 */
3379 return(__pthread_sigmask(how
, set
, oset
));
3380 #endif /* __DARWIN_UNIX03 */
3385 sigwait(const sigset_t * set, int * sig)
3387 moved to pthread_cancelable.c */