2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
53 #include "pthread_workqueue.h"
56 #include <stdio.h> /* For printf(). */
58 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
65 #include <machine/vmparam.h>
66 #include <mach/vm_statistics.h>
67 #include <mach/mach_init.h>
68 #define __APPLE_API_PRIVATE
69 #include <machine/cpu_capabilities.h>
70 #include <libkern/OSAtomic.h>
72 #include <libkern/OSCrossEndian.h>
76 extern int _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
);
77 extern int __pthread_sigmask(int, const sigset_t
*, sigset_t
*);
79 #ifndef BUILDING_VARIANT /* [ */
81 __private_extern__
struct __pthread_list __pthread_head
= TAILQ_HEAD_INITIALIZER(__pthread_head
);
85 int32_t workq_targetconc
[WORKQ_NUM_PRIOQUEUE
];
87 /* Per-thread kernel support */
88 extern void _pthread_set_self(pthread_t
);
89 extern void mig_init(int);
90 static int _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
);
91 static kern_return_t
_pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
);
92 static void _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
);
93 static int _new_pthread_create_suspended(pthread_t
*thread
,
94 const pthread_attr_t
*attr
,
95 void *(*start_routine
)(void *),
99 /* Get CPU capabilities from the kernel */
100 __private_extern__
void _init_cpu_capabilities(void);
102 /* Needed to tell the malloc subsystem we're going multithreaded */
103 extern void set_malloc_singlethreaded(int);
105 /* Used when we need to call into the kernel with no reply port */
106 extern pthread_lock_t reply_port_lock
;
107 int _pthread_find_thread(pthread_t thread
);
109 /* Mach message used to notify that a thread needs to be reaped */
111 typedef struct _pthread_reap_msg_t
{
112 mach_msg_header_t header
;
114 mach_msg_trailer_t trailer
;
115 } pthread_reap_msg_t
;
117 /* We'll implement this when the main thread is a pthread */
118 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
119 static struct _pthread _thread
= {0};
121 /* This global should be used (carefully) by anyone needing to know if a
122 ** pthread has been created.
124 int __is_threaded
= 0;
125 /* _pthread_count is protected by _pthread_list_lock */
126 static int _pthread_count
= 1;
127 int __unix_conforming
= 0;
128 __private_extern__
size_t pthreadsize
= 0;
130 /* under rosetta we will use old style creation of threads */
131 static int __oldstyle
= 0;
133 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
135 /* Same implementation as LOCK, but without the __is_threaded check */
137 extern kern_return_t
syscall_thread_switch(mach_port_name_t
, int, mach_msg_timeout_t
);
138 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
140 int tries
= _spin_tries
;
144 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
146 } while(!_spin_lock_try(lock
));
149 static mach_port_t thread_recycle_port
= MACH_PORT_NULL
;
151 /* These are used to keep track of a semaphore pool shared by mutexes and condition
155 static semaphore_t
*sem_pool
= NULL
;
156 static int sem_pool_count
= 0;
157 static int sem_pool_current
= 0;
158 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
160 static int default_priority
;
161 static int max_priority
;
162 static int min_priority
;
163 static int pthread_concurrency
;
165 static OSSpinLock __workqueue_list_lock
= OS_SPINLOCK_INIT
;
167 static void _pthread_exit(pthread_t self
, void *value_ptr
) __dead2
;
168 static void _pthread_setcancelstate_exit(pthread_t self
, void *value_ptr
, int conforming
);
169 static pthread_attr_t _pthread_attr_default
= {0};
170 static void _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
);
171 static int kernel_workq_setup
= 0;
172 static volatile int32_t kernel_workq_count
= 0;
173 static volatile unsigned int user_workq_count
= 0; /* number of outstanding workqueues */
174 static volatile unsigned int user_workitem_count
= 0; /* number of outstanding workitems */
175 #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */
176 static int wqreadyprio
= 0; /* current highest prio queue ready with items */
178 __private_extern__
struct __pthread_workitem_pool __pthread_workitem_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head
);
179 __private_extern__
struct __pthread_workqueue_pool __pthread_workqueue_pool_head
= TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head
);
181 static struct _pthread_workitem
* __workqueue_pool_ptr
;
182 static size_t __workqueue_pool_size
= 0;
183 static int __workqueue_nitems
= 0;
185 struct _pthread_workqueue_head __pthread_workq0_head
;
186 struct _pthread_workqueue_head __pthread_workq1_head
;
187 struct _pthread_workqueue_head __pthread_workq2_head
;
188 struct _pthread_workqueue_head __pthread_workq3_head
;
189 pthread_workqueue_head_t __pthread_wq_head_tbl
[WORKQ_NUM_PRIOQUEUE
] = {&__pthread_workq0_head
, &__pthread_workq1_head
, &__pthread_workq2_head
, &__pthread_workq3_head
};
191 static void workqueue_list_lock(void);
192 static void workqueue_list_unlock(void);
193 static int valid_workq(pthread_workqueue_t
);
194 static void pick_nextworkqueue_droplock(void);
195 static int post_nextworkitem(pthread_workqueue_t workq
);
196 static void _pthread_workq_return(pthread_t self
);
197 static pthread_workqueue_attr_t _pthread_wq_attr_default
= {0};
198 extern void start_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
199 extern void thread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
200 static pthread_workitem_t
alloc_workitem(void);
201 static void free_workitem(pthread_workitem_t
);
202 static void grow_workitem(void);
203 static pthread_workqueue_t
alloc_workqueue(void);
204 static void free_workqueue(pthread_workqueue_t
);
205 static int _pthread_work_internal_init(void);
206 static void workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
);
207 void _pthread_fork_child_postinit();
209 void pthread_workqueue_atfork_prepare(void);
210 void pthread_workqueue_atfork_parent(void);
211 void pthread_workqueue_atfork_child(void);
213 extern void dispatch_atfork_prepare(void);
214 extern void dispatch_atfork_parent(void);
215 extern void dispatch_atfork_child(void);
217 /* workq_kernreturn commands */
218 #define WQOPS_QUEUE_ADD 1
219 #define WQOPS_QUEUE_REMOVE 2
220 #define WQOPS_THREAD_RETURN 4
221 #define WQOPS_THREAD_SETCONC 8
224 * Flags filed passed to bsdthread_create and back in pthread_start
225 31 <---------------------------------> 0
226 _________________________________________
227 | flags(8) | policy(8) | importance(16) |
228 -----------------------------------------
231 void _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int flags
);
234 void _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
);
236 #define PTHREAD_START_CUSTOM 0x01000000
237 #define PTHREAD_START_SETSCHED 0x02000000
238 #define PTHREAD_START_DETACHED 0x04000000
239 #define PTHREAD_START_POLICY_BITSHIFT 16
240 #define PTHREAD_START_POLICY_MASK 0xff
241 #define PTHREAD_START_IMPORTANCE_MASK 0xffff
243 static int pthread_setschedparam_internal(pthread_t
, mach_port_t
, int, const struct sched_param
*);
244 extern pthread_t
__bsdthread_create(void *(*func
)(void *), void * func_arg
, void * stack
, pthread_t thread
, unsigned int flags
);
245 extern int __bsdthread_register(void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t
, mach_port_t
, void *, pthread_workitem_t
, int), int,void (*)(pthread_t
, mach_port_t
, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t
);
246 extern int __bsdthread_terminate(void * freeaddr
, size_t freesize
, mach_port_t kport
, mach_port_t joinsem
);
247 extern __uint64_t
__thread_selfid( void );
248 extern int __pthread_canceled(int);
249 extern void _pthread_keys_init(void);
250 extern int __pthread_kill(mach_port_t
, int);
251 extern int __pthread_markcancel(int);
252 extern int __workq_open(void);
254 #define WORKQUEUE_OVERCOMMIT 0x10000
256 extern int __workq_kernreturn(int, pthread_workitem_t
, int, int);
258 #if defined(__ppc__) || defined(__ppc64__)
259 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
260 #elif defined(__i386__) || defined(__x86_64__)
261 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
262 #elif defined(__arm__)
263 static const vm_address_t PTHREAD_STACK_HINT
= 0x30000000;
265 #error Need to define a stack address hint for this architecture
268 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
269 * The guardpages for stackoverflow protection is also allocated here
270 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
271 * set up for the thread
275 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
278 vm_address_t stackaddr
;
281 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
282 if (attrs
->stackaddr
!= NULL
) {
283 /* No guard pages setup in this case */
284 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
285 *stack
= attrs
->stackaddr
;
289 guardsize
= attrs
->guardsize
;
290 stackaddr
= PTHREAD_STACK_HINT
;
291 kr
= vm_map(mach_task_self(), &stackaddr
,
292 attrs
->stacksize
+ guardsize
,
294 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
295 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
297 if (kr
!= KERN_SUCCESS
)
298 kr
= vm_allocate(mach_task_self(),
299 &stackaddr
, attrs
->stacksize
+ guardsize
,
300 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
301 if (kr
!= KERN_SUCCESS
) {
304 /* The guard page is at the lowest address */
305 /* The stack base is the highest address */
307 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
308 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
313 _pthread_create_pthread_onstack(pthread_attr_t
*attrs
, void **stack
, pthread_t
*thread
)
317 vm_address_t stackaddr
;
318 size_t guardsize
, allocsize
;
320 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
322 if (attrs
->stackaddr
!= NULL
) {
323 /* No guard pages setup in this case */
324 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
325 *stack
= attrs
->stackaddr
;
326 t
= (pthread_t
)malloc(pthreadsize
);
327 _pthread_struct_init(t
, attrs
, attrs
->stackaddr
, 0, 0, 0);
328 t
->freeStackOnExit
= 0;
335 guardsize
= attrs
->guardsize
;
336 allocsize
= attrs
->stacksize
+ guardsize
+ pthreadsize
;
337 stackaddr
= PTHREAD_STACK_HINT
;
338 kr
= vm_map(mach_task_self(), &stackaddr
,
341 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
342 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
344 if (kr
!= KERN_SUCCESS
)
345 kr
= vm_allocate(mach_task_self(),
346 &stackaddr
, allocsize
,
347 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
348 if (kr
!= KERN_SUCCESS
) {
351 /* The guard page is at the lowest address */
352 /* The stack base is the highest address */
354 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
357 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
359 t
= (pthread_t
)(stackaddr
+ attrs
->stacksize
+ guardsize
);
360 _pthread_struct_init(t
, attrs
, *stack
, 0, 0, 1);
362 t
->freesize
= allocsize
;
363 t
->freeaddr
= (void *)stackaddr
;
364 t
->freeStackOnExit
= 1;
371 _pthread_free_pthread_onstack(pthread_t t
, int freestruct
, int termthread
)
373 kern_return_t res
= 0;
374 vm_address_t freeaddr
;
378 semaphore_t joinsem
= SEMAPHORE_NULL
;
381 __kdebug_trace(0x900001c, freestruct
, termthread
, 0, 0, 0);
383 kport
= t
->kernel_thread
;
384 joinsem
= t
->joiner_notify
;
386 if (t
->freeStackOnExit
) {
387 freeaddr
= (vm_address_t
)t
->freeaddr
;
389 freesize
= t
->stacksize
+ t
->guardsize
+ pthreadsize
;
391 freesize
= t
->stacksize
+ t
->guardsize
;
393 mig_dealloc_reply_port(MACH_PORT_NULL
);
394 LOCK(_pthread_list_lock
);
395 if (freestruct
!= 0) {
396 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
397 /* if parent has not returned from create yet keep pthread_t */
399 __kdebug_trace(0x9000010, t
, 0, 0, 1, 0);
401 if (t
->parentcheck
== 0)
402 freesize
-= pthreadsize
;
405 thread_count
= --_pthread_count
;
406 UNLOCK(_pthread_list_lock
);
409 __kdebug_trace(0x9000020, freeaddr
, freesize
, kport
, 1, 0);
411 if (thread_count
<=0)
414 __bsdthread_terminate((void *)freeaddr
, freesize
, kport
, joinsem
);
415 LIBC_ABORT("thread %p didn't terminate", t
);
418 __kdebug_trace(0x9000024, freeaddr
, freesize
, 0, 1, 0);
420 res
= vm_deallocate(mach_task_self(), freeaddr
, freesize
);
424 mig_dealloc_reply_port(MACH_PORT_NULL
);
425 LOCK(_pthread_list_lock
);
426 if (freestruct
!= 0) {
427 TAILQ_REMOVE(&__pthread_head
, t
, plist
);
429 __kdebug_trace(0x9000010, t
, 0, 0, 2, 0);
432 thread_count
= --_pthread_count
;
434 UNLOCK(_pthread_list_lock
);
438 __kdebug_trace(0x9000008, t
, 0, 0, 2, 0);
446 __kdebug_trace(0x9000020, 0, 0, kport
, 2, 0);
449 if (thread_count
<=0)
452 __bsdthread_terminate(NULL
, 0, kport
, joinsem
);
453 LIBC_ABORT("thread %p didn't terminate", t
);
454 } else if (freestruct
) {
455 t
->sig
= _PTHREAD_NO_SIG
;
457 __kdebug_trace(0x9000024, t
, 0, 0, 2, 0);
468 * Destroy a thread attribute structure
471 pthread_attr_destroy(pthread_attr_t
*attr
)
473 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
479 return (EINVAL
); /* Not an attribute structure! */
484 * Get the 'detach' state from a thread attribute structure.
485 * Note: written as a helper function for info hiding
488 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
491 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
493 *detachstate
= attr
->detached
;
497 return (EINVAL
); /* Not an attribute structure! */
502 * Get the 'inherit scheduling' info from a thread attribute structure.
503 * Note: written as a helper function for info hiding
506 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
509 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
511 *inheritsched
= attr
->inherit
;
515 return (EINVAL
); /* Not an attribute structure! */
520 * Get the scheduling parameters from a thread attribute structure.
521 * Note: written as a helper function for info hiding
524 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
525 struct sched_param
*param
)
527 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
529 *param
= attr
->param
;
533 return (EINVAL
); /* Not an attribute structure! */
538 * Get the scheduling policy from a thread attribute structure.
539 * Note: written as a helper function for info hiding
542 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
545 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
547 *policy
= attr
->policy
;
551 return (EINVAL
); /* Not an attribute structure! */
555 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
556 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
558 * Initialize a thread attribute structure to default values.
561 pthread_attr_init(pthread_attr_t
*attr
)
563 attr
->stacksize
= DEFAULT_STACK_SIZE
;
564 attr
->stackaddr
= NULL
;
565 attr
->sig
= _PTHREAD_ATTR_SIG
;
566 attr
->param
.sched_priority
= default_priority
;
567 attr
->param
.quantum
= 10; /* quantum isn't public yet */
568 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
569 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
570 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
571 attr
->freeStackOnExit
= 1;
574 attr
->guardsize
= vm_page_size
;
579 * Set the 'detach' state in a thread attribute structure.
580 * Note: written as a helper function for info hiding
583 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
586 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
588 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
589 (detachstate
== PTHREAD_CREATE_DETACHED
))
591 attr
->detached
= detachstate
;
599 return (EINVAL
); /* Not an attribute structure! */
604 * Set the 'inherit scheduling' state in a thread attribute structure.
605 * Note: written as a helper function for info hiding
608 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
611 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
613 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
614 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
616 attr
->inherit
= inheritsched
;
624 return (EINVAL
); /* Not an attribute structure! */
629 * Set the scheduling paramters in a thread attribute structure.
630 * Note: written as a helper function for info hiding
633 pthread_attr_setschedparam(pthread_attr_t
*attr
,
634 const struct sched_param
*param
)
636 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
638 /* TODO: Validate sched_param fields */
639 attr
->param
= *param
;
644 return (EINVAL
); /* Not an attribute structure! */
649 * Set the scheduling policy in a thread attribute structure.
650 * Note: written as a helper function for info hiding
653 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
656 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
658 if ((policy
== SCHED_OTHER
) ||
659 (policy
== SCHED_RR
) ||
660 (policy
== SCHED_FIFO
))
662 attr
->policy
= policy
;
671 return (EINVAL
); /* Not an attribute structure! */
676 * Set the scope for the thread.
677 * We currently only provide PTHREAD_SCOPE_SYSTEM
680 pthread_attr_setscope(pthread_attr_t
*attr
,
683 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
684 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
685 /* No attribute yet for the scope */
687 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
691 return (EINVAL
); /* Not an attribute structure! */
695 * Get the scope for the thread.
696 * We currently only provide PTHREAD_SCOPE_SYSTEM
699 pthread_attr_getscope(const pthread_attr_t
*attr
,
702 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
703 *scope
= PTHREAD_SCOPE_SYSTEM
;
706 return (EINVAL
); /* Not an attribute structure! */
709 /* Get the base stack address of the given thread */
711 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
713 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
714 *stackaddr
= attr
->stackaddr
;
717 return (EINVAL
); /* Not an attribute structure! */
722 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
724 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
725 attr
->stackaddr
= stackaddr
;
726 attr
->freeStackOnExit
= 0;
730 return (EINVAL
); /* Not an attribute structure! */
735 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
737 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
738 *stacksize
= attr
->stacksize
;
741 return (EINVAL
); /* Not an attribute structure! */
746 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
748 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
749 attr
->stacksize
= stacksize
;
752 return (EINVAL
); /* Not an attribute structure! */
757 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
759 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
760 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
761 *stacksize
= attr
->stacksize
;
764 return (EINVAL
); /* Not an attribute structure! */
768 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
769 * byte address. This is not the same as in pthread_attr_setstackaddr.
772 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
774 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
775 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
776 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
777 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
778 attr
->stacksize
= stacksize
;
779 attr
->freeStackOnExit
= 0;
783 return (EINVAL
); /* Not an attribute structure! */
789 * Set the guardsize attribute in the attr.
792 pthread_attr_setguardsize(pthread_attr_t
*attr
,
795 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
796 /* Guardsize of 0 is valid, ot means no guard */
797 if ((guardsize
% vm_page_size
) == 0) {
798 attr
->guardsize
= guardsize
;
804 return (EINVAL
); /* Not an attribute structure! */
808 * Get the guardsize attribute in the attr.
811 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
814 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
815 *guardsize
= attr
->guardsize
;
818 return (EINVAL
); /* Not an attribute structure! */
823 * Create and start execution of a new thread.
827 _pthread_body(pthread_t self
)
829 _pthread_set_self(self
);
830 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
831 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
832 printf("Failed to set thread_id in _pthread_body\n");
834 _pthread_exit(self
, (self
->fun
)(self
->arg
));
838 _pthread_start(pthread_t self
, mach_port_t kport
, void *(*fun
)(void *), void * funarg
, size_t stacksize
, unsigned int pflags
)
843 pthread_attr_t
*attrs
= &_pthread_attr_default
;
846 if ((pflags
& PTHREAD_START_CUSTOM
) == 0) {
847 stackaddr
= (char *)self
;
848 _pthread_struct_init(self
, attrs
, stackaddr
, stacksize
, 1, 1);
849 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
850 _pthread_set_self(self
);
852 LOCK(_pthread_list_lock
);
853 if (pflags
& PTHREAD_START_SETSCHED
) {
854 self
->policy
= ((pflags
>> PTHREAD_START_POLICY_BITSHIFT
) & PTHREAD_START_POLICY_MASK
);
855 self
->param
.sched_priority
= (pflags
& PTHREAD_START_IMPORTANCE_MASK
);
857 /* These are not joinable threads */
858 if ((pflags
& PTHREAD_START_DETACHED
) == PTHREAD_START_DETACHED
) {
859 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
860 self
->detached
|= PTHREAD_CREATE_DETACHED
;
863 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
864 _pthread_set_self(self
);
866 LOCK(_pthread_list_lock
);
868 self
->kernel_thread
= kport
;
872 /* Add to the pthread list */
873 if (self
->parentcheck
== 0) {
874 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
876 __kdebug_trace(0x900000c, self
, 0, 0, 3, 0);
881 UNLOCK(_pthread_list_lock
);
883 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
884 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
885 printf("Failed to set thread_id in pthread_start\n");
889 pself
= pthread_self();
891 LIBC_ABORT("self %p != pself %p", self
, pself
);
894 __kdebug_trace(0x9000030, self
, pflags
, 0, 0, 0);
897 _pthread_exit(self
, (self
->fun
)(self
->arg
));
901 _pthread_create(pthread_t t
,
902 const pthread_attr_t
*attrs
,
904 const mach_port_t kernel_thread
)
911 memset(t
, 0, sizeof(*t
));
920 t
->stacksize
= attrs
->stacksize
;
921 t
->stackaddr
= (void *)stack
;
922 t
->guardsize
= attrs
->guardsize
;
923 t
->kernel_thread
= kernel_thread
;
924 t
->detached
= attrs
->detached
;
925 t
->inherit
= attrs
->inherit
;
926 t
->policy
= attrs
->policy
;
927 t
->param
= attrs
->param
;
928 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
930 t
->sig
= _PTHREAD_SIG
;
931 t
->reply_port
= MACH_PORT_NULL
;
932 t
->cthread_self
= NULL
;
934 t
->plist
.tqe_next
= (struct _pthread
*)0;
935 t
->plist
.tqe_prev
= (struct _pthread
**)0;
936 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
937 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
938 t
->death
= SEMAPHORE_NULL
;
940 if (kernel_thread
!= MACH_PORT_NULL
)
941 (void)pthread_setschedparam_internal(t
, kernel_thread
, t
->policy
, &t
->param
);
947 _pthread_struct_init(pthread_t t
, const pthread_attr_t
*attrs
, void * stack
, size_t stacksize
, int kernalloc
, int nozero
)
949 mach_vm_offset_t stackaddr
= (mach_vm_offset_t
)(uintptr_t)stack
;
952 memset(t
, 0, sizeof(*t
));
953 t
->plist
.tqe_next
= (struct _pthread
*)0;
954 t
->plist
.tqe_prev
= (struct _pthread
**)0;
956 t
->schedset
= attrs
->schedset
;
958 if (kernalloc
!= 0) {
959 stackaddr
= (mach_vm_offset_t
)(uintptr_t)t
;
961 /* if allocated from kernel set values appropriately */
962 t
->stacksize
= stacksize
;
963 t
->stackaddr
= (void *)(uintptr_t)stackaddr
;
964 t
->freeStackOnExit
= 1;
965 t
->freeaddr
= (void *)(uintptr_t)(stackaddr
- stacksize
- vm_page_size
);
966 t
->freesize
= pthreadsize
+ stacksize
+ vm_page_size
;
968 t
->stacksize
= attrs
->stacksize
;
969 t
->stackaddr
= (void *)stack
;
971 t
->guardsize
= attrs
->guardsize
;
972 t
->detached
= attrs
->detached
;
973 t
->inherit
= attrs
->inherit
;
974 t
->policy
= attrs
->policy
;
975 t
->param
= attrs
->param
;
977 t
->sig
= _PTHREAD_SIG
;
978 t
->reply_port
= MACH_PORT_NULL
;
979 t
->cthread_self
= NULL
;
981 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
982 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
983 t
->death
= SEMAPHORE_NULL
;
985 t
->kernalloc
= kernalloc
;
992 /* Need to deprecate this in future */
994 _pthread_is_threaded(void)
996 return __is_threaded
;
999 /* Non portable public api to know whether this process has(had) atleast one thread
1000 * apart from main thread. There could be race if there is a thread in the process of
1001 * creation at the time of call . It does not tell whether there are more than one thread
1002 * at this point of time.
1005 pthread_is_threaded_np(void)
1007 return (__is_threaded
);
1011 pthread_mach_thread_np(pthread_t t
)
1013 mach_port_t kport
= MACH_PORT_NULL
;
1019 * If the call is on self, return the kernel port. We cannot
1020 * add this bypass for main thread as it might have exited,
1021 * and we should not return stale port info.
1023 if (t
== pthread_self())
1025 kport
= t
->kernel_thread
;
1029 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1030 return((mach_port_t
)0);
1036 pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread
)
1038 struct _pthread
* p
= NULL
;
1040 /* No need to wait as mach port is already known */
1041 LOCK(_pthread_list_lock
);
1042 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
1043 if (p
->kernel_thread
== kernel_thread
)
1046 UNLOCK(_pthread_list_lock
);
1051 pthread_get_stacksize_np(pthread_t t
)
1059 if ( t
== pthread_self() || t
== &_thread
) //since the main thread will not get de-allocated from underneath us
1066 LOCK(_pthread_list_lock
);
1068 if ((ret
= _pthread_find_thread(t
)) != 0) {
1069 UNLOCK(_pthread_list_lock
);
1074 UNLOCK(_pthread_list_lock
);
1080 pthread_get_stackaddr_np(pthread_t t
)
1086 return((void *)(uintptr_t)ESRCH
);
1088 if(t
== pthread_self() || t
== &_thread
) //since the main thread will not get deallocated from underneath us
1089 return t
->stackaddr
;
1091 LOCK(_pthread_list_lock
);
1093 if ((ret
= _pthread_find_thread(t
)) != 0) {
1094 UNLOCK(_pthread_list_lock
);
1095 return((void *)(uintptr_t)ret
);
1097 addr
= t
->stackaddr
;
1098 UNLOCK(_pthread_list_lock
);
1104 _pthread_reply_port(pthread_t t
)
1106 return t
->reply_port
;
1110 /* returns non-zero if the current thread is the main thread */
1112 pthread_main_np(void)
1114 pthread_t self
= pthread_self();
1116 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
1120 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
1121 /* if we are passed in a pthread_t that is NULL, then we return
1122 the current thread's thread_id. So folks don't have to call
1123 pthread_self, in addition to us doing it, if they just want
1127 pthread_threadid_np(pthread_t thread
, __uint64_t
*thread_id
)
1130 pthread_t self
= pthread_self();
1132 if (thread_id
== NULL
) {
1134 } else if (thread
== NULL
|| thread
== self
) {
1135 *thread_id
= self
->thread_id
;
1139 LOCK(_pthread_list_lock
);
1140 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1141 UNLOCK(_pthread_list_lock
);
1144 *thread_id
= thread
->thread_id
;
1145 UNLOCK(_pthread_list_lock
);
1151 pthread_getname_np(pthread_t thread
, char *threadname
, size_t len
)
1159 LOCK(_pthread_list_lock
);
1160 if ((rval
= _pthread_find_thread(thread
)) != 0) {
1161 UNLOCK(_pthread_list_lock
);
1164 strlcpy(threadname
, thread
->pthread_name
, len
);
1165 UNLOCK(_pthread_list_lock
);
1170 pthread_setname_np(const char *threadname
)
1176 len
= strlen(threadname
);
1178 /* protytype is in pthread_internals.h */
1179 rval
= proc_setthreadname((void *)threadname
, len
);
1182 strlcpy((pthread_self())->pthread_name
, threadname
, MAXTHREADNAMESIZE
);
1189 _new_pthread_create_suspended(pthread_t
*thread
,
1190 const pthread_attr_t
*attr
,
1191 void *(*start_routine
)(void *),
1195 pthread_attr_t
*attrs
;
1200 kern_return_t kern_res
;
1201 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1203 task_t self
= mach_task_self();
1205 int susp
= create_susp
;
1207 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1208 { /* Set up default paramters */
1209 attrs
= &_pthread_attr_default
;
1210 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1215 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1216 (attrs
->param
.sched_priority
!= default_priority
)) && (create_susp
== 0)) {
1222 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1223 * any change in priority or policy is needed here.
1225 if ((__oldstyle
== 1) || (create_susp
!= 0)) {
1226 /* Rosetta or pthread_create_suspended() */
1227 /* running under rosetta */
1228 /* Allocate a stack for the thread */
1230 __kdebug_trace(0x9000000, create_susp
, 0, 0, 0, 0);
1232 if ((error
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1235 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1238 /* Create the Mach thread for this thread */
1239 PTHREAD_MACH_CALL(thread_create(self
, &kernel_thread
), kern_res
);
1240 if (kern_res
!= KERN_SUCCESS
)
1242 printf("Can't create thread: %d\n", kern_res
);
1246 if ((error
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1250 set_malloc_singlethreaded(0);
1253 /* Send it on it's way */
1255 t
->fun
= start_routine
;
1257 /* Now set it up to execute */
1258 LOCK(_pthread_list_lock
);
1259 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1261 __kdebug_trace(0x900000c, t
, 0, 0, 4, 0);
1264 UNLOCK(_pthread_list_lock
);
1265 _pthread_setup(t
, _pthread_body
, stack
, susp
, needresume
);
1270 if (attrs
->fastpath
== 1)
1273 if (attrs
->detached
== PTHREAD_CREATE_DETACHED
)
1274 flags
|= PTHREAD_START_DETACHED
;
1275 if (attrs
->schedset
!= 0) {
1276 flags
|= PTHREAD_START_SETSCHED
;
1277 flags
|= ((attrs
->policy
& PTHREAD_START_POLICY_MASK
) << PTHREAD_START_POLICY_BITSHIFT
);
1278 flags
|= (attrs
->param
.sched_priority
& PTHREAD_START_IMPORTANCE_MASK
);
1281 set_malloc_singlethreaded(0);
1284 if (kernalloc
== 0) {
1285 /* Allocate a stack for the thread */
1286 flags
|= PTHREAD_START_CUSTOM
;
1287 if ((error
= _pthread_create_pthread_onstack(attrs
, &stack
, &t
)) != 0) {
1290 /* Send it on it's way */
1292 t
->fun
= start_routine
;
1296 __kdebug_trace(0x9000004, t
, flags
, 0, 0, 0);
1299 if ((t2
= __bsdthread_create(start_routine
, arg
, stack
, t
, flags
)) == (pthread_t
)-1) {
1300 _pthread_free_pthread_onstack(t
, 1, 0);
1304 LOCK(_pthread_list_lock
);
1306 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1307 /* detached child exited, mop up */
1308 UNLOCK(_pthread_list_lock
);
1310 __kdebug_trace(0x9000008, t
, 0, 0, 1, 0);
1312 if(t
->freeStackOnExit
)
1313 vm_deallocate(self
, (mach_vm_address_t
)(uintptr_t)t
, pthreadsize
);
1316 } else if (t
->childrun
== 0) {
1317 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1320 __kdebug_trace(0x900000c, t
, 0, 0, 1, 0);
1322 UNLOCK(_pthread_list_lock
);
1324 UNLOCK(_pthread_list_lock
);
1329 __kdebug_trace(0x9000014, t
, 0, 0, 1, 0);
1334 /* kernel allocation */
1336 __kdebug_trace(0x9000018, flags
, 0, 0, 0, 0);
1338 if ((t
= __bsdthread_create(start_routine
, arg
, (void *)attrs
->stacksize
, NULL
, flags
)) == (pthread_t
)-1)
1340 /* Now set it up to execute */
1341 LOCK(_pthread_list_lock
);
1343 if ((t
->childexit
!= 0) && ((t
->detached
& PTHREAD_CREATE_DETACHED
) == PTHREAD_CREATE_DETACHED
)) {
1344 /* detached child exited, mop up */
1345 UNLOCK(_pthread_list_lock
);
1347 __kdebug_trace(0x9000008, t
, pthreadsize
, 0, 2, 0);
1349 vm_deallocate(self
, (mach_vm_address_t
)(uintptr_t)t
, pthreadsize
);
1350 } else if (t
->childrun
== 0) {
1351 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1354 __kdebug_trace(0x900000c, t
, 0, 0, 2, 0);
1356 UNLOCK(_pthread_list_lock
);
1358 UNLOCK(_pthread_list_lock
);
1363 __kdebug_trace(0x9000014, t
, 0, 0, 2, 0);
1371 _pthread_create_suspended(pthread_t
*thread
,
1372 const pthread_attr_t
*attr
,
1373 void *(*start_routine
)(void *),
1377 pthread_attr_t
*attrs
;
1381 kern_return_t kern_res
;
1382 mach_port_t kernel_thread
= MACH_PORT_NULL
;
1385 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
1386 { /* Set up default paramters */
1387 attrs
= &_pthread_attr_default
;
1388 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
1393 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
1394 * any change in priority or policy is needed here.
1396 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
1397 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
1405 /* Allocate a stack for the thread */
1406 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
1409 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
1412 /* Create the Mach thread for this thread */
1413 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
1414 if (kern_res
!= KERN_SUCCESS
)
1416 printf("Can't create thread: %d\n", kern_res
);
1417 res
= EINVAL
; /* Need better error here? */
1421 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
1425 set_malloc_singlethreaded(0);
1428 /* Send it on it's way */
1430 t
->fun
= start_routine
;
1431 /* Now set it up to execute */
1432 LOCK(_pthread_list_lock
);
1433 TAILQ_INSERT_TAIL(&__pthread_head
, t
, plist
);
1435 __kdebug_trace(0x900000c, t
, 0, 0, 5, 0);
1438 UNLOCK(_pthread_list_lock
);
1439 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
1445 pthread_create(pthread_t
*thread
,
1446 const pthread_attr_t
*attr
,
1447 void *(*start_routine
)(void *),
1450 return _new_pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
1454 pthread_create_suspended_np(pthread_t
*thread
,
1455 const pthread_attr_t
*attr
,
1456 void *(*start_routine
)(void *),
1459 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
1463 * Make a thread 'undetached' - no longer 'joinable' with other threads.
1466 pthread_detach(pthread_t thread
)
1471 if ((ret
= _pthread_lookup_thread(thread
, NULL
, 1)) != 0)
1472 return (ret
); /* Not a valid thread */
1475 newstyle
= thread
->newstyle
;
1476 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
1478 if (thread
->detached
& _PTHREAD_EXITED
) {
1479 UNLOCK(thread
->lock
);
1480 pthread_join(thread
, NULL
);
1483 if (newstyle
== 0) {
1484 semaphore_t death
= thread
->death
;
1486 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1487 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1488 UNLOCK(thread
->lock
);
1490 (void) semaphore_signal(death
);
1492 mach_port_t joinport
= thread
->joiner_notify
;
1494 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
1495 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
1497 UNLOCK(thread
->lock
);
1499 semaphore_signal(joinport
);
1505 UNLOCK(thread
->lock
);
1512 * pthread_kill call to system call
1520 mach_port_t kport
= MACH_PORT_NULL
;
1522 if ((sig
< 0) || (sig
> NSIG
))
1525 if (_pthread_lookup_thread(th
, &kport
, 0) != 0)
1526 return (ESRCH
); /* Not a valid thread */
1528 /* if the thread is a workqueue thread, just return error */
1529 if ((th
->wqthread
!= 0) && (th
->wqkillset
==0)) {
1533 error
= __pthread_kill(kport
, sig
);
1541 __pthread_workqueue_setkill(int enable
)
1543 pthread_t self
= pthread_self();
1547 self
->wqkillset
= 0;
1549 self
->wqkillset
= 1;
1556 /* Announce that there are pthread resources ready to be reclaimed in a */
1557 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
1558 /* thread underneath is terminated right away. */
1560 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
1561 pthread_reap_msg_t msg
;
1564 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
1565 MACH_MSG_TYPE_MOVE_SEND
);
1566 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
1567 msg
.header
.msgh_remote_port
= thread_recycle_port
;
1568 msg
.header
.msgh_local_port
= kernel_thread
;
1569 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
1570 msg
.thread
= thread
;
1571 ret
= mach_msg_send(&msg
.header
);
1572 assert(ret
== MACH_MSG_SUCCESS
);
1575 /* Reap the resources for available threads */
1577 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
) {
1578 mach_port_type_t ptype
;
1582 self
= mach_task_self();
1583 if (kernel_thread
!= MACH_PORT_DEAD
) {
1584 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
1585 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
1586 /* not quite dead yet... */
1589 ret
= mach_port_deallocate(self
, kernel_thread
);
1590 if (ret
!= KERN_SUCCESS
) {
1592 "mach_port_deallocate(kernel_thread) failed: %s\n",
1593 mach_error_string(ret
));
1597 if (th
->reply_port
!= MACH_PORT_NULL
) {
1598 ret
= mach_port_mod_refs(self
, th
->reply_port
,
1599 MACH_PORT_RIGHT_RECEIVE
, -1);
1600 if (ret
!= KERN_SUCCESS
) {
1602 "mach_port_mod_refs(reply_port) failed: %s\n",
1603 mach_error_string(ret
));
1607 if (th
->freeStackOnExit
) {
1608 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
1611 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
1614 ret
= vm_deallocate(self
, addr
, size
);
1615 if (ret
!= KERN_SUCCESS
) {
1617 "vm_deallocate(stack) failed: %s\n",
1618 mach_error_string(ret
));
1624 *value_ptr
= th
->exit_value
;
1626 if ((th
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1627 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
))
1628 *value_ptr
= PTHREAD_CANCELED
;
1629 th
->sig
= _PTHREAD_NO_SIG
;
1640 void _pthread_reap_threads(void)
1642 pthread_reap_msg_t msg
;
1645 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1646 sizeof msg
, thread_recycle_port
,
1647 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1648 while (ret
== MACH_MSG_SUCCESS
) {
1649 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
1650 pthread_t thread
= msg
.thread
;
1652 /* deal with race with thread_create_running() */
1653 if (kernel_thread
== MACH_PORT_NULL
&&
1654 kernel_thread
!= thread
->kernel_thread
) {
1655 kernel_thread
= thread
->kernel_thread
;
1658 if ( kernel_thread
== MACH_PORT_NULL
||
1659 _pthread_reap_thread(thread
, kernel_thread
, (void **)0, 0) == EAGAIN
)
1661 /* not dead yet, put it back for someone else to reap, stop here */
1662 _pthread_become_available(thread
, kernel_thread
);
1666 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
1667 sizeof msg
, thread_recycle_port
,
1668 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
1672 /* For compatibility... */
1676 return pthread_self();
1680 * Terminate a thread.
1682 int __disable_threadsignal(int);
1685 _pthread_exit(pthread_t self
, void *value_ptr
)
1687 struct __darwin_pthread_handler_rec
*handler
;
1688 kern_return_t kern_res
;
1690 int newstyle
= self
->newstyle
;
1692 /* Make this thread not to receive any signals */
1693 __disable_threadsignal(1);
1696 __kdebug_trace(0x900001c, self
, newstyle
, 0, 0, 0);
1699 /* set cancel state to disable and type to deferred */
1700 _pthread_setcancelstate_exit(self
, value_ptr
, __unix_conforming
);
1702 while ((handler
= self
->__cleanup_stack
) != 0)
1704 (handler
->__routine
)(handler
->__arg
);
1705 self
->__cleanup_stack
= handler
->__next
;
1707 _pthread_tsd_cleanup(self
);
1709 if (newstyle
== 0) {
1710 _pthread_reap_threads();
1713 self
->detached
|= _PTHREAD_EXITED
;
1715 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1716 mach_port_t death
= self
->death
;
1717 self
->exit_value
= value_ptr
;
1719 /* the joiner will need a kernel thread reference, leave ours for it */
1721 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1722 if (kern_res
!= KERN_SUCCESS
)
1724 "semaphore_signal(death) failed: %s\n",
1725 mach_error_string(kern_res
));
1727 LOCK(_pthread_list_lock
);
1728 thread_count
= --_pthread_count
;
1729 UNLOCK(_pthread_list_lock
);
1732 LOCK(_pthread_list_lock
);
1733 TAILQ_REMOVE(&__pthread_head
, self
, plist
);
1735 __kdebug_trace(0x9000010, self
, 0, 0, 5, 0);
1737 thread_count
= --_pthread_count
;
1738 UNLOCK(_pthread_list_lock
);
1739 /* with no joiner, we let become available consume our cached ref */
1740 _pthread_become_available(self
, self
->kernel_thread
);
1743 if (thread_count
<= 0)
1746 /* Use a new reference to terminate ourselves. Should never return. */
1747 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1748 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1749 mach_error_string(kern_res
));
1751 semaphore_t joinsem
= SEMAPHORE_NULL
;
1753 if ((self
->joiner_notify
== (mach_port_t
)0) && (self
->detached
& PTHREAD_CREATE_JOINABLE
))
1754 joinsem
= new_sem_from_pool();
1756 self
->detached
|= _PTHREAD_EXITED
;
1758 self
->exit_value
= value_ptr
;
1759 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1760 if (self
->joiner_notify
== (mach_port_t
)0) {
1761 self
->joiner_notify
= joinsem
;
1762 joinsem
= SEMAPHORE_NULL
;
1765 if (joinsem
!= SEMAPHORE_NULL
)
1766 restore_sem_to_pool(joinsem
);
1767 _pthread_free_pthread_onstack(self
, 0, 1);
1770 /* with no joiner, we let become available consume our cached ref */
1771 if (joinsem
!= SEMAPHORE_NULL
)
1772 restore_sem_to_pool(joinsem
);
1773 _pthread_free_pthread_onstack(self
, 1, 1);
1776 LIBC_ABORT("thread %p didn't exit", self
);
1780 pthread_exit(void *value_ptr
)
1782 pthread_t self
= pthread_self();
1783 /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */
1784 if (self
->wqthread
== 0) {
1785 _pthread_exit(self
, value_ptr
);
1787 LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()");
1792 * Get the scheduling policy and scheduling paramters for a thread.
1795 pthread_getschedparam(pthread_t thread
,
1797 struct sched_param
*param
)
1804 LOCK(_pthread_list_lock
);
1806 if ((ret
= _pthread_find_thread(thread
)) != 0) {
1807 UNLOCK(_pthread_list_lock
);
1811 *policy
= thread
->policy
;
1813 *param
= thread
->param
;
1814 UNLOCK(_pthread_list_lock
);
1820 * Set the scheduling policy and scheduling paramters for a thread.
1823 pthread_setschedparam_internal(pthread_t thread
,
1826 const struct sched_param
*param
)
1828 policy_base_data_t bases
;
1830 mach_msg_type_number_t count
;
1836 bases
.ts
.base_priority
= param
->sched_priority
;
1837 base
= (policy_base_t
)&bases
.ts
;
1838 count
= POLICY_TIMESHARE_BASE_COUNT
;
1841 bases
.fifo
.base_priority
= param
->sched_priority
;
1842 base
= (policy_base_t
)&bases
.fifo
;
1843 count
= POLICY_FIFO_BASE_COUNT
;
1846 bases
.rr
.base_priority
= param
->sched_priority
;
1847 /* quantum isn't public yet */
1848 bases
.rr
.quantum
= param
->quantum
;
1849 base
= (policy_base_t
)&bases
.rr
;
1850 count
= POLICY_RR_BASE_COUNT
;
1855 ret
= thread_policy(kport
, policy
, base
, count
, TRUE
);
1856 if (ret
!= KERN_SUCCESS
)
1862 pthread_setschedparam(pthread_t t
,
1864 const struct sched_param
*param
)
1866 mach_port_t kport
= MACH_PORT_NULL
;
1870 if (t
!= pthread_self() && t
!= &_thread
) { //since the main thread will not get de-allocated from underneath us
1872 if (_pthread_lookup_thread(t
, &kport
, 0) != 0)
1875 kport
= t
->kernel_thread
;
1877 error
= pthread_setschedparam_internal(t
, kport
, policy
, param
);
1880 /* ensure the thread is still valid */
1881 LOCK(_pthread_list_lock
);
1882 if ((error
= _pthread_find_thread(t
)) != 0) {
1883 UNLOCK(_pthread_list_lock
);
1888 UNLOCK(_pthread_list_lock
);
1898 * Get the minimum priority for the given policy
1901 sched_get_priority_min(int policy
)
1903 return default_priority
- 16;
1907 * Get the maximum priority for the given policy
1910 sched_get_priority_max(int policy
)
1912 return default_priority
+ 16;
1916 * Determine if two thread identifiers represent the same thread.
1919 pthread_equal(pthread_t t1
,
1925 // Force LLVM not to optimise this to a call to __pthread_set_self, if it does
1926 // then _pthread_set_self won't be bound when secondary threads try and start up.
1927 void __attribute__((noinline
))
1928 _pthread_set_self(pthread_t p
)
1930 extern void __pthread_set_self(void *);
1933 if (_thread
.tsd
[0] != 0) {
1934 bzero(&_thread
, sizeof(struct _pthread
));
1939 __pthread_set_self(&p
->tsd
[0]);
1943 cthread_set_self(void *cself
)
1945 pthread_t self
= pthread_self();
1946 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1947 _pthread_set_self(cself
);
1950 self
->cthread_self
= cself
;
1954 ur_cthread_self(void) {
1955 pthread_t self
= pthread_self();
1956 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1957 return (void *)self
;
1959 return self
->cthread_self
;
1963 * cancellation handler for pthread once as the init routine can have a
1964 * cancellation point. In that case we need to restore the spin unlock
1967 __pthread_once_cancel_handler(pthread_once_t
*once_control
)
1969 _spin_unlock(&once_control
->lock
);
1974 * Execute a function exactly one time in a thread-safe fashion.
1977 pthread_once(pthread_once_t
*once_control
,
1978 void (*init_routine
)(void))
1980 _spin_lock(&once_control
->lock
);
1981 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1983 pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler
, once_control
);
1985 pthread_cleanup_pop(0);
1986 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1988 _spin_unlock(&once_control
->lock
);
1989 return (0); /* Spec defines no possible errors! */
1993 * Insert a cancellation point in a thread.
1995 __private_extern__
void
1996 _pthread_testcancel(pthread_t thread
, int isconforming
)
1999 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2000 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
2002 UNLOCK(thread
->lock
);
2004 pthread_exit(PTHREAD_CANCELED
);
2008 UNLOCK(thread
->lock
);
2014 pthread_getconcurrency(void)
2016 return(pthread_concurrency
);
2020 pthread_setconcurrency(int new_level
)
2024 pthread_concurrency
= new_level
;
2029 * Perform package initialization - called automatically when application starts
2034 pthread_attr_t
*attrs
;
2037 host_priority_info_data_t priority_info
;
2039 host_flavor_t flavor
;
2041 mach_msg_type_number_t count
;
2047 pthreadsize
= round_page(sizeof (struct _pthread
));
2048 count
= HOST_PRIORITY_INFO_COUNT
;
2049 info
= (host_info_t
)&priority_info
;
2050 flavor
= HOST_PRIORITY_INFO
;
2051 host
= mach_host_self();
2052 kr
= host_info(host
, flavor
, info
, &count
);
2053 if (kr
!= KERN_SUCCESS
)
2054 printf("host_info failed (%d); probably need privilege.\n", kr
);
2056 default_priority
= priority_info
.user_priority
;
2057 min_priority
= priority_info
.minimum_priority
;
2058 max_priority
= priority_info
.maximum_priority
;
2060 attrs
= &_pthread_attr_default
;
2061 pthread_attr_init(attrs
);
2063 TAILQ_INIT(&__pthread_head
);
2064 LOCK_INIT(_pthread_list_lock
);
2066 TAILQ_INSERT_HEAD(&__pthread_head
, thread
, plist
);
2067 _pthread_set_self(thread
);
2069 __kdebug_trace(0x900000c, thread
, 0, 0, 10, 0);
2072 /* In case of dyld reset the tsd keys from 1 - 10 */
2073 _pthread_keys_init();
2076 mib
[1] = KERN_USRSTACK
;
2077 len
= sizeof (stackaddr
);
2078 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
2079 stackaddr
= (void *)USRSTACK
;
2080 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
2081 thread
->stacksize
= DFLSSIZ
; //initialize main thread's stacksize based on vmparam.h
2082 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
2084 _init_cpu_capabilities();
2085 if ((ncpus
= _NumCPUs()) > 1)
2086 _spin_tries
= MP_SPIN_TRIES
;
2088 workq_targetconc
[WORKQ_HIGH_PRIOQUEUE
] = ncpus
;
2089 workq_targetconc
[WORKQ_DEFAULT_PRIOQUEUE
] = ncpus
;
2090 workq_targetconc
[WORKQ_LOW_PRIOQUEUE
] = ncpus
;
2091 workq_targetconc
[WORKQ_BG_PRIOQUEUE
] = ncpus
;
2093 mach_port_deallocate(mach_task_self(), host
);
2095 #if defined(__ppc__)
2101 #if defined(_OBJC_PAGE_BASE_ADDRESS)
2103 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
2104 kr
= vm_map(mach_task_self(),
2105 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
2106 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
2108 (vm_address_t
)0, FALSE
,
2109 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
2110 VM_INHERIT_DEFAULT
);
2111 /* We ignore the return result here. The ObjC runtime will just have to deal. */
2114 //added so that thread_recycle_port is initialized on new launch.
2115 _pthread_fork_child_postinit();
2116 mig_init(1); /* enable multi-threaded mig interfaces */
2117 if (__oldstyle
== 0) {
2118 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2119 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (uintptr_t)(&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (uintptr_t)(&thread
->tsd
[0]));
2121 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)), NULL
, &workq_targetconc
[0], (uintptr_t)&thread
->tsd
[__PTK_LIBDISPATCH_KEY0
] - (uintptr_t)thread
);
2125 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2126 if( (thread
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2127 printf("Failed to set thread_id in pthread_init\n");
2132 int sched_yield(void)
2138 /* This used to be the "magic" that gets the initialization routine called when the application starts */
2140 * (These has been moved to setenv.c, so we can use it to fix a less than 10.5
2142 * static int _do_nothing(void) { return 0; }
2143 * int (*_cthread_init_routine)(void) = _do_nothing;
2146 /* Get a semaphore from the pool, growing it if necessary */
2148 __private_extern__ semaphore_t
new_sem_from_pool(void) {
2153 LOCK(sem_pool_lock
);
2154 if (sem_pool_current
== sem_pool_count
) {
2155 sem_pool_count
+= 16;
2156 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
2157 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
2158 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
2161 sem
= sem_pool
[sem_pool_current
++];
2162 UNLOCK(sem_pool_lock
);
2166 /* Put a semaphore back into the pool */
2167 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
2168 LOCK(sem_pool_lock
);
2169 sem_pool
[--sem_pool_current
] = sem
;
2170 UNLOCK(sem_pool_lock
);
2173 static void sem_pool_reset(void) {
2174 LOCK(sem_pool_lock
);
2176 sem_pool_current
= 0;
2178 UNLOCK(sem_pool_lock
);
2181 __private_extern__
void _pthread_fork_child(pthread_t p
) {
2182 /* Just in case somebody had it locked... */
2183 UNLOCK(sem_pool_lock
);
2185 /* No need to hold the pthread_list_lock as no one other than this
2186 * thread is present at this time
2188 TAILQ_INIT(&__pthread_head
);
2189 LOCK_INIT(_pthread_list_lock
);
2190 TAILQ_INSERT_HEAD(&__pthread_head
, p
, plist
);
2192 __kdebug_trace(0x900000c, p
, 0, 0, 10, 0);
2195 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2196 if( (p
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
2197 printf("Failed to set thread_id in pthread_fork_child\n");
2201 void _pthread_fork_child_postinit() {
2204 kr
= mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &thread_recycle_port
);
2205 if (kr
!= KERN_SUCCESS
) {
2211 * Query/update the cancelability 'state' of a thread
2214 _pthread_setcancelstate_internal(int state
, int *oldstate
, int conforming
)
2216 pthread_t self
= pthread_self();
2220 case PTHREAD_CANCEL_ENABLE
:
2222 __pthread_canceled(1);
2224 case PTHREAD_CANCEL_DISABLE
:
2226 __pthread_canceled(2);
2232 self
= pthread_self();
2235 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
2236 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
2237 self
->cancel_state
|= state
;
2240 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
2244 /* When a thread exits set the cancellation state to DISABLE and DEFERRED */
2246 _pthread_setcancelstate_exit(pthread_t self
, void * value_ptr
, int conforming
)
2249 self
->cancel_state
&= ~(_PTHREAD_CANCEL_STATE_MASK
| _PTHREAD_CANCEL_TYPE_MASK
);
2250 self
->cancel_state
|= (PTHREAD_CANCEL_DISABLE
| PTHREAD_CANCEL_DEFERRED
);
2251 if ((value_ptr
== PTHREAD_CANCELED
)) {
2253 self
->detached
|= _PTHREAD_WASCANCEL
;
2260 _pthread_join_cleanup(pthread_t thread
, void ** value_ptr
, int conforming
)
2266 __kdebug_trace(0x9000028, thread
, 0, 0, 1, 0);
2268 /* The scenario where the joiner was waiting for the thread and
2269 * the pthread detach happened on that thread. Then the semaphore
2270 * will trigger but by the time joiner runs, the target thread could be
2271 * freed. So we need to make sure that the thread is still in the list
2272 * and is joinable before we continue with the join.
2274 LOCK(_pthread_list_lock
);
2275 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2276 UNLOCK(_pthread_list_lock
);
2280 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) == 0) {
2281 /* the thread might be a detached thread */
2282 UNLOCK(_pthread_list_lock
);
2286 /* It is still a joinable thread and needs to be reaped */
2287 TAILQ_REMOVE(&__pthread_head
, thread
, plist
);
2289 __kdebug_trace(0x9000010, thread
, 0, 0, 3, 0);
2291 UNLOCK(_pthread_list_lock
);
2294 *value_ptr
= thread
->exit_value
;
2296 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
2297 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
) && (value_ptr
!= NULL
)) {
2298 *value_ptr
= PTHREAD_CANCELED
;
2301 if (thread
->reply_port
!= MACH_PORT_NULL
) {
2302 res
= mach_port_mod_refs(mach_task_self(), thread
->reply_port
, MACH_PORT_RIGHT_RECEIVE
, -1);
2303 if (res
!= KERN_SUCCESS
)
2304 fprintf(stderr
,"mach_port_mod_refs(reply_port) failed: %s\n",mach_error_string(res
));
2305 thread
->reply_port
= MACH_PORT_NULL
;
2307 if (thread
->freeStackOnExit
) {
2308 thread
->sig
= _PTHREAD_NO_SIG
;
2310 __kdebug_trace(0x9000028, thread
, 0, 0, 2, 0);
2312 vm_deallocate(mach_task_self(), (mach_vm_address_t
)(uintptr_t)thread
, pthreadsize
);
2314 thread
->sig
= _PTHREAD_NO_SIG
;
2316 __kdebug_trace(0x9000028, thread
, 0, 0, 3, 0);
2323 /* ALWAYS called with list lock and return with list lock */
2325 _pthread_find_thread(pthread_t thread
)
2330 TAILQ_FOREACH(p
, &__pthread_head
, plist
) {
2332 if (thread
->kernel_thread
== MACH_PORT_NULL
) {
2333 UNLOCK(_pthread_list_lock
);
2335 LOCK(_pthread_list_lock
);
2345 _pthread_lookup_thread(pthread_t thread
, mach_port_t
* portp
, int only_joinable
)
2353 LOCK(_pthread_list_lock
);
2355 if ((ret
= _pthread_find_thread(thread
)) != 0) {
2356 UNLOCK(_pthread_list_lock
);
2359 if ((only_joinable
!= 0) && ((thread
->detached
& PTHREAD_CREATE_DETACHED
) != 0)) {
2360 UNLOCK(_pthread_list_lock
);
2363 kport
= thread
->kernel_thread
;
2364 UNLOCK(_pthread_list_lock
);
2370 /* XXXXXXXXXXXXX Pthread Workqueue Attributes XXXXXXXXXXXXXXXXXX */
2372 pthread_workqueue_attr_init_np(pthread_workqueue_attr_t
* attrp
)
2374 attrp
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2375 attrp
->sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2376 attrp
->overcommit
= 0;
2381 pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t
* attr
)
2383 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
)
2388 return (EINVAL
); /* Not an attribute structure! */
2393 pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t
* attr
, int * qpriop
)
2395 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2396 *qpriop
= attr
->queueprio
;
2399 return (EINVAL
); /* Not an attribute structure! */
2405 pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t
* attr
, int qprio
)
2409 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2411 case WORKQ_HIGH_PRIOQUEUE
:
2412 case WORKQ_DEFAULT_PRIOQUEUE
:
2413 case WORKQ_LOW_PRIOQUEUE
:
2414 case WORKQ_BG_PRIOQUEUE
:
2415 attr
->queueprio
= qprio
;
2428 pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t
* attr
, int * ocommp
)
2430 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2431 *ocommp
= attr
->overcommit
;
2434 return (EINVAL
); /* Not an attribute structure! */
2440 pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t
* attr
, int ocomm
)
2444 if (attr
->sig
== PTHREAD_WORKQUEUE_ATTR_SIG
) {
2445 attr
->overcommit
= ocomm
;
2451 /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */
2454 workqueue_list_lock()
2456 OSSpinLockLock(&__workqueue_list_lock
);
2460 workqueue_list_unlock()
2462 OSSpinLockUnlock(&__workqueue_list_lock
);
2466 pthread_workqueue_init_np()
2470 workqueue_list_lock();
2471 ret
=_pthread_work_internal_init();
2472 workqueue_list_unlock();
2478 pthread_workqueue_requestconcurrency_np(int queue
, int request_concurrency
)
2482 if (queue
< 0 || queue
> WORKQ_NUM_PRIOQUEUE
)
2485 error
=__workq_kernreturn(WQOPS_THREAD_SETCONC
, NULL
, request_concurrency
, queue
);
2493 pthread_workqueue_atfork_prepare(void)
2496 * NOTE: Any workq additions here
2497 * should be for i386,x86_64 only
2499 dispatch_atfork_prepare();
2503 pthread_workqueue_atfork_parent(void)
2506 * NOTE: Any workq additions here
2507 * should be for i386,x86_64 only
2509 dispatch_atfork_parent();
2513 pthread_workqueue_atfork_child(void)
2515 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2517 * NOTE: workq additions here
2518 * are for i386,x86_64 only as
2519 * ppc and arm do not support it
2521 __workqueue_list_lock
= OS_SPINLOCK_INIT
;
2522 if (kernel_workq_setup
!= 0){
2523 kernel_workq_setup
= 0;
2524 _pthread_work_internal_init();
2527 dispatch_atfork_child();
2531 _pthread_work_internal_init(void)
2534 pthread_workqueue_head_t headp
;
2535 pthread_workqueue_t wq
;
2536 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2537 pthread_t self
= pthread_self();
2540 if (kernel_workq_setup
== 0) {
2541 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2542 __bsdthread_register(thread_start
, start_wqthread
, round_page(sizeof(struct _pthread
)), _pthread_start
, &workq_targetconc
[0], (uintptr_t)(&self
->tsd
[__PTK_LIBDISPATCH_KEY0
]) - (uintptr_t)(&self
->tsd
[0]));
2544 __bsdthread_register(_pthread_start
, _pthread_wqthread
, round_page(sizeof(struct _pthread
)),NULL
,NULL
,0);
2547 _pthread_wq_attr_default
.queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2548 _pthread_wq_attr_default
.sig
= PTHREAD_WORKQUEUE_ATTR_SIG
;
2550 for( i
= 0; i
< WORKQ_NUM_PRIOQUEUE
; i
++) {
2551 headp
= __pthread_wq_head_tbl
[i
];
2552 TAILQ_INIT(&headp
->wqhead
);
2553 headp
->next_workq
= 0;
2556 __workqueue_pool_ptr
= NULL
;
2557 __workqueue_pool_size
= round_page(sizeof(struct _pthread_workitem
) * WORKITEM_POOL_SIZE
);
2559 __workqueue_pool_ptr
= (struct _pthread_workitem
*)mmap(NULL
, __workqueue_pool_size
,
2560 PROT_READ
|PROT_WRITE
,
2561 MAP_ANON
| MAP_PRIVATE
,
2565 if (__workqueue_pool_ptr
== MAP_FAILED
) {
2566 /* Not expected to fail, if it does, always malloc for work items */
2567 __workqueue_nitems
= WORKITEM_POOL_SIZE
;
2568 __workqueue_pool_ptr
= NULL
;
2570 __workqueue_nitems
= 0;
2572 /* sets up the workitem pool */
2575 /* since the size is less than a page, leaving this in malloc pool */
2576 wq
= (struct _pthread_workqueue
*)malloc(sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
);
2577 bzero(wq
, (sizeof(struct _pthread_workqueue
) * WORKQUEUE_POOL_SIZE
));
2578 for (i
= 0; i
< WORKQUEUE_POOL_SIZE
; i
++) {
2579 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, &wq
[i
], wq_list
);
2582 if (error
= __workq_open()) {
2583 TAILQ_INIT(&__pthread_workitem_pool_head
);
2584 TAILQ_INIT(&__pthread_workqueue_pool_head
);
2585 if (__workqueue_pool_ptr
!= NULL
) {
2586 munmap((void *)__workqueue_pool_ptr
, __workqueue_pool_size
);
2591 kernel_workq_setup
= 1;
2597 /* This routine is called with list lock held */
2598 static pthread_workitem_t
2599 alloc_workitem(void)
2601 pthread_workitem_t witem
;
2603 if (TAILQ_EMPTY(&__pthread_workitem_pool_head
)) {
2604 /* the chunk size is set so some multiple of it is pool size */
2605 if (__workqueue_nitems
< WORKITEM_POOL_SIZE
) {
2608 workqueue_list_unlock();
2609 witem
= malloc(sizeof(struct _pthread_workitem
));
2610 workqueue_list_lock();
2611 witem
->fromcache
= 0;
2615 witem
= TAILQ_FIRST(&__pthread_workitem_pool_head
);
2616 TAILQ_REMOVE(&__pthread_workitem_pool_head
, witem
, item_entry
);
2617 witem
->fromcache
= 1;
2620 witem
->item_entry
.tqe_next
= 0;
2621 witem
->item_entry
.tqe_prev
= 0;
2622 user_workitem_count
++;
2626 /* This routine is called with list lock held */
2628 free_workitem(pthread_workitem_t witem
)
2630 user_workitem_count
--;
2632 if (witem
->fromcache
!= 0)
2633 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, witem
, item_entry
);
2641 pthread_workitem_t witemp
;
2644 witemp
= &__workqueue_pool_ptr
[__workqueue_nitems
];
2645 bzero(witemp
, (sizeof(struct _pthread_workitem
) * WORKITEM_CHUNK_SIZE
));
2646 for (i
= 0; i
< WORKITEM_CHUNK_SIZE
; i
++) {
2647 witemp
[i
].fromcache
= 1;
2648 TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head
, &witemp
[i
], item_entry
);
2650 __workqueue_nitems
+= WORKITEM_CHUNK_SIZE
;
2653 /* This routine is called with list lock held */
2654 static pthread_workqueue_t
2655 alloc_workqueue(void)
2657 pthread_workqueue_t wq
;
2659 if (TAILQ_EMPTY(&__pthread_workqueue_pool_head
)) {
2660 workqueue_list_unlock();
2661 wq
= malloc(sizeof(struct _pthread_workqueue
));
2662 workqueue_list_lock();
2664 wq
= TAILQ_FIRST(&__pthread_workqueue_pool_head
);
2665 TAILQ_REMOVE(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2671 /* This routine is called with list lock held */
2673 free_workqueue(pthread_workqueue_t wq
)
2676 TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head
, wq
, wq_list
);
2680 _pthread_workq_init(pthread_workqueue_t wq
, const pthread_workqueue_attr_t
* attr
)
2682 bzero(wq
, sizeof(struct _pthread_workqueue
));
2684 wq
->queueprio
= attr
->queueprio
;
2685 wq
->overcommit
= attr
->overcommit
;
2687 wq
->queueprio
= WORKQ_DEFAULT_PRIOQUEUE
;
2690 LOCK_INIT(wq
->lock
);
2692 TAILQ_INIT(&wq
->item_listhead
);
2693 TAILQ_INIT(&wq
->item_kernhead
);
2695 __kdebug_trace(0x90080ac, wq
, &wq
->item_listhead
, wq
->item_listhead
.tqh_first
, wq
->item_listhead
.tqh_last
, 0);
2697 wq
->wq_list
.tqe_next
= 0;
2698 wq
->wq_list
.tqe_prev
= 0;
2699 wq
->sig
= PTHREAD_WORKQUEUE_SIG
;
2700 wq
->headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
2704 valid_workq(pthread_workqueue_t workq
)
2706 if (workq
->sig
== PTHREAD_WORKQUEUE_SIG
)
2713 /* called with list lock */
2715 pick_nextworkqueue_droplock()
2717 int i
, curwqprio
, val
, found
;
2718 pthread_workqueue_head_t headp
;
2719 pthread_workqueue_t workq
;
2720 pthread_workqueue_t nworkq
= NULL
;
2723 __kdebug_trace(0x9008098, kernel_workq_count
, 0, 0, 0, 0);
2726 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2728 for (i
= 0; i
< WORKQ_NUM_PRIOQUEUE
; i
++) {
2729 wqreadyprio
= i
; /* because there is nothing else higher to run */
2730 headp
= __pthread_wq_head_tbl
[i
];
2732 if (TAILQ_EMPTY(&headp
->wqhead
))
2734 workq
= headp
->next_workq
;
2736 workq
= TAILQ_FIRST(&headp
->wqhead
);
2737 curwqprio
= workq
->queueprio
;
2738 nworkq
= workq
; /* starting pt */
2739 while (kernel_workq_count
< KERNEL_WORKQ_ELEM_MAX
) {
2740 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2741 if (headp
->next_workq
== NULL
)
2742 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2744 __kdebug_trace(0x9008098, kernel_workq_count
, workq
, 0, 1, 0);
2746 val
= post_nextworkitem(workq
);
2749 /* things could have changed so reasses */
2750 /* If kernel queue is full , skip */
2751 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2753 /* If anything with higher prio arrived, then reevaluate */
2754 if (wqreadyprio
< curwqprio
)
2755 goto loop
; /* we need re evaluate again */
2756 /* we can post some more work items */
2760 /* cannot use workq here as it could be freed */
2761 if (TAILQ_EMPTY(&headp
->wqhead
))
2763 /* if we found nothing to run and only one workqueue in the list, skip */
2764 if ((val
== 0) && (workq
== headp
->next_workq
))
2766 workq
= headp
->next_workq
;
2768 workq
= TAILQ_FIRST(&headp
->wqhead
);
2771 /* if we found nothing to run and back to workq where we started */
2772 if ((val
== 0) && (workq
== nworkq
))
2775 if (kernel_workq_count
>= KERNEL_WORKQ_ELEM_MAX
)
2778 /* nothing found to run? */
2782 workqueue_list_unlock();
2786 post_nextworkitem(pthread_workqueue_t workq
)
2789 pthread_workitem_t witem
;
2790 pthread_workqueue_head_t headp
;
2791 void (*func
)(pthread_workqueue_t
, void *);
2793 if ((workq
->flags
& PTHREAD_WORKQ_SUSPEND
) == PTHREAD_WORKQ_SUSPEND
) {
2797 __kdebug_trace(0x900809c, workq
, workq
->item_listhead
.tqh_first
, 0, 1, 0);
2799 if (TAILQ_EMPTY(&workq
->item_listhead
)) {
2802 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
)
2805 witem
= TAILQ_FIRST(&workq
->item_listhead
);
2806 headp
= workq
->headp
;
2808 __kdebug_trace(0x900809c, workq
, witem
, 0, 0xee, 0);
2810 if ((witem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) {
2812 __kdebug_trace(0x9000064, workq
, 0, 0, 2, 0);
2815 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2818 /* Also barrier when nothing is there needs to be handled */
2819 /* Nothing to wait for */
2820 if (workq
->kq_count
!= 0) {
2821 witem
->flags
|= PTH_WQITEM_APPLIED
;
2822 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2823 workq
->barrier_count
= workq
->kq_count
;
2825 __kdebug_trace(0x9000064, 1, workq
->barrier_count
, 0, 0, 0);
2830 __kdebug_trace(0x9000064, 2, workq
->barrier_count
, 0, 0, 0);
2832 if (witem
->func
!= NULL
) {
2833 /* since we are going to drop list lock */
2834 witem
->flags
|= PTH_WQITEM_APPLIED
;
2835 workq
->flags
|= PTHREAD_WORKQ_BARRIER_ON
;
2836 workqueue_list_unlock();
2837 func
= (void (*)(pthread_workqueue_t
, void *))witem
->func
;
2838 (*func
)(workq
, witem
->func_arg
);
2840 __kdebug_trace(0x9000064, 3, workq
->barrier_count
, 0, 0, 0);
2842 workqueue_list_lock();
2843 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
2845 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2847 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2849 free_workitem(witem
);
2851 __kdebug_trace(0x9000064, 4, workq
->barrier_count
, 0, 0, 0);
2855 } else if ((witem
->flags
& PTH_WQITEM_DESTROY
) == PTH_WQITEM_DESTROY
) {
2857 __kdebug_trace(0x9000068, 1, workq
->kq_count
, 0, 0, 0);
2859 if ((witem
->flags
& PTH_WQITEM_APPLIED
) != 0) {
2862 witem
->flags
|= PTH_WQITEM_APPLIED
;
2863 workq
->flags
|= (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
);
2864 workq
->barrier_count
= workq
->kq_count
;
2865 workq
->term_callback
= (void (*)(struct _pthread_workqueue
*,void *))witem
->func
;
2866 workq
->term_callarg
= witem
->func_arg
;
2867 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2869 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2871 if ((TAILQ_EMPTY(&workq
->item_listhead
)) && (workq
->kq_count
== 0)) {
2872 if (!(TAILQ_EMPTY(&workq
->item_kernhead
))) {
2874 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 0xff, 0);
2877 free_workitem(witem
);
2878 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
2880 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 1, 0);
2882 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
2883 if (headp
->next_workq
== workq
) {
2884 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
2885 if (headp
->next_workq
== NULL
) {
2886 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
2887 if (headp
->next_workq
== workq
)
2888 headp
->next_workq
= NULL
;
2892 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
2893 if (workq
->term_callback
!= NULL
) {
2894 workqueue_list_unlock();
2895 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
2896 workqueue_list_lock();
2898 free_workqueue(workq
);
2901 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2903 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2907 __kdebug_trace(0x9000068, 2, workq
->barrier_count
, 0, 0, 0);
2912 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, 0xfff, 0);
2914 TAILQ_REMOVE(&workq
->item_listhead
, witem
, item_entry
);
2916 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2918 TAILQ_INSERT_TAIL(&workq
->item_kernhead
, witem
, item_entry
);
2919 if ((witem
->flags
& PTH_WQITEM_KERN_COUNT
) == 0) {
2921 witem
->flags
|= PTH_WQITEM_KERN_COUNT
;
2923 OSAtomicIncrement32(&kernel_workq_count
);
2924 workqueue_list_unlock();
2926 prio
= workq
->queueprio
;
2927 if (workq
->overcommit
!= 0) {
2928 prio
|= WORKQUEUE_OVERCOMMIT
;
2931 if (( error
=__workq_kernreturn(WQOPS_QUEUE_ADD
, witem
, workq
->affinity
, prio
)) == -1) {
2932 OSAtomicDecrement32(&kernel_workq_count
);
2933 workqueue_list_lock();
2935 __kdebug_trace(0x900007c, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2937 TAILQ_REMOVE(&workq
->item_kernhead
, witem
, item_entry
);
2938 TAILQ_INSERT_HEAD(&workq
->item_listhead
, witem
, item_entry
);
2940 __kdebug_trace(0x90080b0, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
2942 if ((workq
->flags
& (PTHREAD_WORKQ_BARRIER_ON
| PTHREAD_WORKQ_TERM_ON
)) != 0)
2943 workq
->flags
|= PTHREAD_WORKQ_REQUEUED
;
2945 workqueue_list_lock();
2947 __kdebug_trace(0x9000060, witem
, workq
, witem
->func_arg
, workq
->kq_count
, 0);
2951 /* noone should come here */
2953 printf("error in logic for next workitem\n");
2954 LIBC_ABORT("error in logic for next workitem");
2960 _pthread_wqthread(pthread_t self
, mach_port_t kport
, void * stackaddr
, pthread_workitem_t item
, int reuse
)
2963 pthread_attr_t
*attrs
= &_pthread_attr_default
;
2964 pthread_workqueue_t workq
;
2970 workq
= item
->workq
;
2972 /* reuse is set to 0, when a thread is newly created to run a workitem */
2973 _pthread_struct_init(self
, attrs
, stackaddr
, DEFAULT_STACK_SIZE
, 1, 1);
2975 self
->wqkillset
= 0;
2976 self
->parentcheck
= 1;
2978 /* These are not joinable threads */
2979 self
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
2980 self
->detached
|= PTHREAD_CREATE_DETACHED
;
2981 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
2982 _pthread_set_self(self
);
2985 __kdebug_trace(0x9000050, self
, item
, item
->func_arg
, 0, 0);
2987 self
->kernel_thread
= kport
;
2988 self
->fun
= (void *(*)(void *))item
->func
;
2989 self
->arg
= item
->func_arg
;
2990 /* Add to the pthread list */
2991 LOCK(_pthread_list_lock
);
2992 TAILQ_INSERT_TAIL(&__pthread_head
, self
, plist
);
2994 __kdebug_trace(0x900000c, self
, 0, 0, 10, 0);
2997 UNLOCK(_pthread_list_lock
);
2999 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
3000 if( (self
->thread_id
= __thread_selfid()) == (__uint64_t
)-1)
3001 printf("Failed to set thread_id in pthread_wqthread\n");
3005 /* reuse is set to 1, when a thread is resued to run another work item */
3007 __kdebug_trace(0x9000054, self
, item
, item
->func_arg
, 0, 0);
3009 /* reset all tsd from 1 to KEYS_MAX */
3011 LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self
);
3013 self
->fun
= (void *(*)(void *))item
->func
;
3014 self
->arg
= item
->func_arg
;
3019 pself
= pthread_self();
3020 if (self
!= pself
) {
3022 __kdebug_trace(0x9000078, self
, pself
, item
->func_arg
, 0, 0);
3024 printf("pthread_self not set: pself %p, passed in %p\n", pself
, self
);
3025 _pthread_set_self(self
);
3026 pself
= pthread_self();
3028 printf("(2)pthread_self not set: pself %p, passed in %p\n", pself
, self
);
3032 pself
= pthread_self();
3033 if (self
!= pself
) {
3034 printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself
, self
);
3035 LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself
, self
);
3038 #endif /* WQ_DEBUG */
3040 self
->cur_workq
= workq
;
3041 self
->cur_workitem
= item
;
3042 OSAtomicDecrement32(&kernel_workq_count
);
3044 ret
= (int)(intptr_t)(*self
->fun
)(self
->arg
);
3046 /* If we reach here without going through the above initialization path then don't go through
3047 * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread.
3049 if(self
!= pthread_self()) {
3050 pthread_exit(PTHREAD_CANCELED
);
3053 workqueue_exit(self
, workq
, item
);
3058 workqueue_exit(pthread_t self
, pthread_workqueue_t workq
, pthread_workitem_t item
)
3060 pthread_workitem_t baritem
;
3061 pthread_workqueue_head_t headp
;
3062 void (*func
)(pthread_workqueue_t
, void *);
3064 workqueue_list_lock();
3066 TAILQ_REMOVE(&workq
->item_kernhead
, item
, item_entry
);
3069 __kdebug_trace(0x9000070, self
, 1, item
->func_arg
, workq
->kq_count
, 0);
3071 free_workitem(item
);
3073 if ((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == PTHREAD_WORKQ_BARRIER_ON
) {
3074 workq
->barrier_count
--;
3076 __kdebug_trace(0x9000084, self
, workq
->barrier_count
, workq
->kq_count
, 1, 0);
3078 if (workq
->barrier_count
<= 0 ) {
3079 /* Need to remove barrier item from the list */
3080 baritem
= TAILQ_FIRST(&workq
->item_listhead
);
3082 if ((baritem
->flags
& (PTH_WQITEM_BARRIER
| PTH_WQITEM_DESTROY
| PTH_WQITEM_APPLIED
)) == 0)
3083 printf("Incorect bar item being removed in barrier processing\n");
3084 #endif /* WQ_DEBUG */
3085 /* if the front item is a barrier and call back is registered, run that */
3086 if (((baritem
->flags
& PTH_WQITEM_BARRIER
) == PTH_WQITEM_BARRIER
) && (baritem
->func
!= NULL
)) {
3087 workqueue_list_unlock();
3088 func
= (void (*)(pthread_workqueue_t
, void *))baritem
->func
;
3089 (*func
)(workq
, baritem
->func_arg
);
3090 workqueue_list_lock();
3092 TAILQ_REMOVE(&workq
->item_listhead
, baritem
, item_entry
);
3094 __kdebug_trace(0x90080a8, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3096 free_workitem(baritem
);
3097 workq
->flags
&= ~PTHREAD_WORKQ_BARRIER_ON
;
3099 __kdebug_trace(0x9000058, self
, item
, item
->func_arg
, 0, 0);
3101 if ((workq
->flags
& PTHREAD_WORKQ_TERM_ON
) != 0) {
3102 headp
= __pthread_wq_head_tbl
[workq
->queueprio
];
3103 workq
->flags
|= PTHREAD_WORKQ_DESTROYED
;
3105 __kdebug_trace(0x900006c, workq
, workq
->kq_count
, 0, 2, 0);
3107 if (headp
->next_workq
== workq
) {
3108 headp
->next_workq
= TAILQ_NEXT(workq
, wq_list
);
3109 if (headp
->next_workq
== NULL
) {
3110 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3111 if (headp
->next_workq
== workq
)
3112 headp
->next_workq
= NULL
;
3115 TAILQ_REMOVE(&headp
->wqhead
, workq
, wq_list
);
3117 if (workq
->term_callback
!= NULL
) {
3118 workqueue_list_unlock();
3119 (*workq
->term_callback
)(workq
, workq
->term_callarg
);
3120 workqueue_list_lock();
3122 free_workqueue(workq
);
3124 /* if there are higher prio schedulabel item reset to wqreadyprio */
3125 if ((workq
->queueprio
< wqreadyprio
) && (!(TAILQ_EMPTY(&workq
->item_listhead
))))
3126 wqreadyprio
= workq
->queueprio
;
3132 __kdebug_trace(0x9000070, self
, 2, item
->func_arg
, workq
->barrier_count
, 0);
3135 __kdebug_trace(0x900005c, self
, item
, 0, 0, 0);
3137 pick_nextworkqueue_droplock();
3138 _pthread_workq_return(self
);
3142 _pthread_workq_return(pthread_t self
)
3144 __workq_kernreturn(WQOPS_THREAD_RETURN
, NULL
, 0, 0);
3146 /* This is the way to terminate the thread */
3147 _pthread_exit(self
, NULL
);
3151 /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */
3154 pthread_workqueue_create_np(pthread_workqueue_t
* workqp
, const pthread_workqueue_attr_t
* attr
)
3156 pthread_workqueue_t wq
;
3157 pthread_workqueue_head_t headp
;
3159 #if defined(__ppc__)
3164 if ((attr
!= NULL
) && (attr
->sig
!= PTHREAD_WORKQUEUE_ATTR_SIG
)) {
3168 if (__is_threaded
== 0)
3171 workqueue_list_lock();
3172 if (kernel_workq_setup
== 0) {
3173 int ret
= _pthread_work_internal_init();
3175 workqueue_list_unlock();
3180 wq
= alloc_workqueue();
3182 _pthread_workq_init(wq
, attr
);
3184 headp
= __pthread_wq_head_tbl
[wq
->queueprio
];
3185 TAILQ_INSERT_TAIL(&headp
->wqhead
, wq
, wq_list
);
3186 if (headp
->next_workq
== NULL
) {
3187 headp
->next_workq
= TAILQ_FIRST(&headp
->wqhead
);
3190 workqueue_list_unlock();
3198 pthread_workqueue_additem_np(pthread_workqueue_t workq
, void ( *workitem_func
)(void *), void * workitem_arg
, pthread_workitem_handle_t
* itemhandlep
, unsigned int *gencountp
)
3200 pthread_workitem_t witem
;
3202 if (valid_workq(workq
) == 0) {
3206 workqueue_list_lock();
3209 * Allocate the workitem here as it can drop the lock.
3210 * Also we can evaluate the workqueue state only once.
3212 witem
= alloc_workitem();
3213 witem
->func
= workitem_func
;
3214 witem
->func_arg
= workitem_arg
;
3215 witem
->workq
= workq
;
3217 /* alloc workitem can drop the lock, check the state */
3218 if ((workq
->flags
& (PTHREAD_WORKQ_IN_TERMINATE
| PTHREAD_WORKQ_DESTROYED
)) != 0) {
3219 free_workitem(witem
);
3220 workqueue_list_unlock();
3225 if (itemhandlep
!= NULL
)
3226 *itemhandlep
= (pthread_workitem_handle_t
*)witem
;
3227 if (gencountp
!= NULL
)
3230 __kdebug_trace(0x9008090, witem
, witem
->func
, witem
->func_arg
, workq
, 0);
3232 TAILQ_INSERT_TAIL(&workq
->item_listhead
, witem
, item_entry
);
3234 __kdebug_trace(0x90080a4, workq
, &workq
->item_listhead
, workq
->item_listhead
.tqh_first
, workq
->item_listhead
.tqh_last
, 0);
3237 if (((workq
->flags
& PTHREAD_WORKQ_BARRIER_ON
) == 0) && (workq
->queueprio
< wqreadyprio
))
3238 wqreadyprio
= workq
->queueprio
;
3240 pick_nextworkqueue_droplock();
3246 pthread_workqueue_getovercommit_np(pthread_workqueue_t workq
, unsigned int *ocommp
)
3248 if (valid_workq(workq
) == 0) {
3253 *ocommp
= workq
->overcommit
;
3258 #else /* !BUILDING_VARIANT ] [ */
3259 extern int __unix_conforming
;
3260 extern int _pthread_count
;
3261 extern pthread_lock_t _pthread_list_lock
;
3262 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
3263 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
, int conforming
);
3265 #endif /* !BUILDING_VARIANT ] */
3269 __private_extern__
void
3270 __posix_join_cleanup(void *arg
)
3272 pthread_t thread
= (pthread_t
)arg
;
3273 int already_exited
, res
;
3279 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
3281 newstyle
= thread
->newstyle
;
3284 __kdebug_trace(0x900002c, thread
, newstyle
, 0, 0, 0);
3286 if (newstyle
== 0) {
3287 death
= thread
->death
;
3288 if (!already_exited
){
3289 thread
->joiner
= (struct _pthread
*)NULL
;
3290 UNLOCK(thread
->lock
);
3291 restore_sem_to_pool(death
);
3293 UNLOCK(thread
->lock
);
3294 while ((res
= _pthread_reap_thread(thread
,
3295 thread
->kernel_thread
,
3296 &dummy
, 1)) == EAGAIN
)
3300 restore_sem_to_pool(death
);
3305 /* leave another thread to join */
3306 thread
->joiner
= (struct _pthread
*)NULL
;
3307 UNLOCK(thread
->lock
);
3311 #endif /* __DARWIN_UNIX03 */
3315 * Wait for a thread to terminate and obtain its exit value.
3319 pthread_join(pthread_t thread,
3322 moved to pthread_cancelable.c */
3328 pthread_cancel(pthread_t thread
)
3331 if (__unix_conforming
== 0)
3332 __unix_conforming
= 1;
3333 #endif /* __DARWIN_UNIX03 */
3335 if (_pthread_lookup_thread(thread
, NULL
, 0) != 0)
3338 /* if the thread is a workqueue thread, then return error */
3339 if (thread
->wqthread
!= 0) {
3346 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3347 UNLOCK(thread
->lock
);
3348 if (state
& PTHREAD_CANCEL_ENABLE
)
3349 __pthread_markcancel(thread
->kernel_thread
);
3350 #else /* __DARWIN_UNIX03 */
3351 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
3352 #endif /* __DARWIN_UNIX03 */
3357 pthread_testcancel(void)
3359 pthread_t self
= pthread_self();
3362 if (__unix_conforming
== 0)
3363 __unix_conforming
= 1;
3364 _pthread_testcancel(self
, 1);
3365 #else /* __DARWIN_UNIX03 */
3366 _pthread_testcancel(self
, 0);
3367 #endif /* __DARWIN_UNIX03 */
3373 * Query/update the cancelability 'state' of a thread
3376 pthread_setcancelstate(int state
, int *oldstate
)
3379 if (__unix_conforming
== 0) {
3380 __unix_conforming
= 1;
3382 return (_pthread_setcancelstate_internal(state
, oldstate
, 1));
3383 #else /* __DARWIN_UNIX03 */
3384 return (_pthread_setcancelstate_internal(state
, oldstate
, 0));
3385 #endif /* __DARWIN_UNIX03 */
3392 * Query/update the cancelability 'type' of a thread
3395 pthread_setcanceltype(int type
, int *oldtype
)
3397 pthread_t self
= pthread_self();
3400 if (__unix_conforming
== 0)
3401 __unix_conforming
= 1;
3402 #endif /* __DARWIN_UNIX03 */
3404 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
3405 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
3407 self
= pthread_self();
3410 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
3411 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
3412 self
->cancel_state
|= type
;
3414 #if !__DARWIN_UNIX03
3415 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
3416 #endif /* __DARWIN_UNIX03 */
3421 pthread_sigmask(int how
, const sigset_t
* set
, sigset_t
* oset
)
3426 if (__pthread_sigmask(how
, set
, oset
) == -1) {
3430 #else /* __DARWIN_UNIX03 */
3431 return(__pthread_sigmask(how
, set
, oset
));
3432 #endif /* __DARWIN_UNIX03 */
3437 sigwait(const sigset_t * set, int * sig)
3439 moved to pthread_cancelable.c */