2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
55 #include <stdio.h> /* For printf(). */
57 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <sys/resource.h>
60 #include <sys/sysctl.h>
61 #include <sys/queue.h>
62 #include <sys/syscall.h>
63 #include <machine/vmparam.h>
64 #include <mach/vm_statistics.h>
65 #define __APPLE_API_PRIVATE
66 #include <machine/cpu_capabilities.h>
69 #ifndef BUILDING_VARIANT /* [ */
71 __private_extern__
struct __pthread_list __pthread_head
= LIST_HEAD_INITIALIZER(&__pthread_head
);
73 /* Per-thread kernel support */
74 extern void _pthread_set_self(pthread_t
);
75 extern void mig_init(int);
77 /* Get CPU capabilities from the kernel */
78 __private_extern__
void _init_cpu_capabilities(void);
80 /* Needed to tell the malloc subsystem we're going multithreaded */
81 extern void set_malloc_singlethreaded(int);
83 /* Used when we need to call into the kernel with no reply port */
84 extern pthread_lock_t reply_port_lock
;
86 /* Mach message used to notify that a thread needs to be reaped */
88 typedef struct _pthread_reap_msg_t
{
89 mach_msg_header_t header
;
91 mach_msg_trailer_t trailer
;
94 /* We'll implement this when the main thread is a pthread */
95 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
96 static struct _pthread _thread
= {0};
98 /* This global should be used (carefully) by anyone needing to know if a
99 ** pthread has been created.
101 int __is_threaded
= 0;
102 /* _pthread_count is protected by _pthread_list_lock */
103 static int _pthread_count
= 1;
104 int __unix_conforming
= 0;
107 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
109 /* Same implementation as LOCK, but without the __is_threaded check */
111 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
113 int tries
= _spin_tries
;
117 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
119 } while(!_spin_lock_try(lock
));
122 extern mach_port_t thread_recycle_port
;
124 /* These are used to keep track of a semaphore pool shared by mutexes and condition
128 static semaphore_t
*sem_pool
= NULL
;
129 static int sem_pool_count
= 0;
130 static int sem_pool_current
= 0;
131 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
133 static int default_priority
;
134 static int max_priority
;
135 static int min_priority
;
136 static int pthread_concurrency
;
138 static void _pthread_exit(pthread_t self
, void *value_ptr
);
141 * [Internal] stack support
143 size_t _pthread_stack_size
= 0;
144 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
145 #define STACK_RESERVED (sizeof (struct _pthread))
148 /* The stack grows towards lower addresses:
149 |<----------------user stack|struct _pthread|
150 ^STACK_LOWEST ^STACK_START ^STACK_BASE
153 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
154 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
155 #define STACK_SELF(sp) STACK_START(sp)
157 #if defined(__ppc__) || defined(__ppc64__)
158 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
159 #elif defined(__i386__)
160 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
162 #error Need to define a stack address hint for this architecture
165 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
166 * The guardpages for stackoverflow protection is also allocated here
167 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
168 * set up for the thread
172 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
175 vm_address_t stackaddr
;
178 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
179 if (attrs
->stackaddr
!= NULL
) {
180 /* No guard pages setup in this case */
181 assert(((uintptr_t)attrs
->stackaddr
% vm_page_size
) == 0);
182 *stack
= attrs
->stackaddr
;
186 guardsize
= attrs
->guardsize
;
187 stackaddr
= PTHREAD_STACK_HINT
;
188 kr
= vm_map(mach_task_self(), &stackaddr
,
189 attrs
->stacksize
+ guardsize
,
191 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
192 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
194 if (kr
!= KERN_SUCCESS
)
195 kr
= vm_allocate(mach_task_self(),
196 &stackaddr
, attrs
->stacksize
+ guardsize
,
197 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
198 if (kr
!= KERN_SUCCESS
) {
201 /* The guard page is at the lowest address */
202 /* The stack base is the highest address */
204 kr
= vm_protect(mach_task_self(), stackaddr
, guardsize
, FALSE
, VM_PROT_NONE
);
205 *stack
= (void *)(stackaddr
+ attrs
->stacksize
+ guardsize
);
208 vm_address_t cur_stack
= (vm_address_t
)0;
209 if (free_stacks
== 0)
211 /* Allocating guard pages is done by doubling
212 * the actual stack size, since STACK_BASE() needs
213 * to have stacks aligned on stack_size. Allocating just
214 * one page takes as much memory as allocating more pages
215 * since it will remain one entry in the vm map.
216 * Besides, allocating more than one page allows tracking the
217 * overflow pattern when the overflow is bigger than one page.
219 #ifndef NO_GUARD_PAGES
220 # define GUARD_SIZE(a) (2*(a))
221 # define GUARD_MASK(a) (((a)<<1) | 1)
223 # define GUARD_SIZE(a) (a)
224 # define GUARD_MASK(a) (a)
226 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
228 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
229 /* Ensure stack is there */
230 kr
= vm_allocate(mach_task_self(),
232 GUARD_SIZE(__pthread_stack_size
),
234 #ifndef NO_GUARD_PAGES
235 if (kr
== KERN_SUCCESS
) {
236 kr
= vm_protect(mach_task_self(),
238 __pthread_stack_size
,
239 FALSE
, VM_PROT_NONE
);
240 lowest_stack
+= __pthread_stack_size
;
241 if (kr
== KERN_SUCCESS
)
245 if (kr
== KERN_SUCCESS
)
249 if (lowest_stack
> 0)
250 free_stacks
= (vm_address_t
*)lowest_stack
;
253 /* Too bad. We'll just have to take what comes.
254 Use vm_map instead of vm_allocate so we can
255 specify alignment. */
256 kr
= vm_map(mach_task_self(), &lowest_stack
,
257 GUARD_SIZE(__pthread_stack_size
),
258 GUARD_MASK(__pthread_stack_mask
),
259 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
260 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
262 /* This really shouldn't fail and if it does I don't
264 #ifndef NO_GUARD_PAGES
265 if (kr
== KERN_SUCCESS
) {
266 kr
= vm_protect(mach_task_self(),
268 __pthread_stack_size
,
269 FALSE
, VM_PROT_NONE
);
270 lowest_stack
+= __pthread_stack_size
;
273 free_stacks
= (vm_address_t
*)lowest_stack
;
276 *free_stacks
= 0; /* No other free stacks */
278 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
279 free_stacks
= (vm_address_t
*)*free_stacks
;
280 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
285 static pthread_attr_t _pthread_attr_default
= {0};
288 * Destroy a thread attribute structure
291 pthread_attr_destroy(pthread_attr_t
*attr
)
293 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
298 return (EINVAL
); /* Not an attribute structure! */
303 * Get the 'detach' state from a thread attribute structure.
304 * Note: written as a helper function for info hiding
307 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
310 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
312 *detachstate
= attr
->detached
;
316 return (EINVAL
); /* Not an attribute structure! */
321 * Get the 'inherit scheduling' info from a thread attribute structure.
322 * Note: written as a helper function for info hiding
325 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
328 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
330 *inheritsched
= attr
->inherit
;
334 return (EINVAL
); /* Not an attribute structure! */
339 * Get the scheduling parameters from a thread attribute structure.
340 * Note: written as a helper function for info hiding
343 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
344 struct sched_param
*param
)
346 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
348 *param
= attr
->param
;
352 return (EINVAL
); /* Not an attribute structure! */
357 * Get the scheduling policy from a thread attribute structure.
358 * Note: written as a helper function for info hiding
361 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
364 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
366 *policy
= attr
->policy
;
370 return (EINVAL
); /* Not an attribute structure! */
374 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
375 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
377 * Initialize a thread attribute structure to default values.
380 pthread_attr_init(pthread_attr_t
*attr
)
382 attr
->stacksize
= DEFAULT_STACK_SIZE
;
383 attr
->stackaddr
= NULL
;
384 attr
->sig
= _PTHREAD_ATTR_SIG
;
385 attr
->param
.sched_priority
= default_priority
;
386 attr
->param
.quantum
= 10; /* quantum isn't public yet */
387 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
388 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
389 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
390 attr
->freeStackOnExit
= TRUE
;
391 attr
->guardsize
= vm_page_size
;
396 * Set the 'detach' state in a thread attribute structure.
397 * Note: written as a helper function for info hiding
400 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
403 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
405 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
406 (detachstate
== PTHREAD_CREATE_DETACHED
))
408 attr
->detached
= detachstate
;
416 return (EINVAL
); /* Not an attribute structure! */
421 * Set the 'inherit scheduling' state in a thread attribute structure.
422 * Note: written as a helper function for info hiding
425 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
428 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
430 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
431 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
433 attr
->inherit
= inheritsched
;
441 return (EINVAL
); /* Not an attribute structure! */
446 * Set the scheduling paramters in a thread attribute structure.
447 * Note: written as a helper function for info hiding
450 pthread_attr_setschedparam(pthread_attr_t
*attr
,
451 const struct sched_param
*param
)
453 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
455 /* TODO: Validate sched_param fields */
456 attr
->param
= *param
;
460 return (EINVAL
); /* Not an attribute structure! */
465 * Set the scheduling policy in a thread attribute structure.
466 * Note: written as a helper function for info hiding
469 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
472 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
474 if ((policy
== SCHED_OTHER
) ||
475 (policy
== SCHED_RR
) ||
476 (policy
== SCHED_FIFO
))
478 attr
->policy
= policy
;
486 return (EINVAL
); /* Not an attribute structure! */
491 * Set the scope for the thread.
492 * We currently only provide PTHREAD_SCOPE_SYSTEM
495 pthread_attr_setscope(pthread_attr_t
*attr
,
498 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
499 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
500 /* No attribute yet for the scope */
502 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
506 return (EINVAL
); /* Not an attribute structure! */
510 * Get the scope for the thread.
511 * We currently only provide PTHREAD_SCOPE_SYSTEM
514 pthread_attr_getscope(pthread_attr_t
*attr
,
517 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
518 *scope
= PTHREAD_SCOPE_SYSTEM
;
521 return (EINVAL
); /* Not an attribute structure! */
524 /* Get the base stack address of the given thread */
526 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
528 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
529 *stackaddr
= attr
->stackaddr
;
532 return (EINVAL
); /* Not an attribute structure! */
537 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
539 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((uintptr_t)stackaddr
% vm_page_size
) == 0)) {
540 attr
->stackaddr
= stackaddr
;
541 attr
->freeStackOnExit
= FALSE
;
544 return (EINVAL
); /* Not an attribute structure! */
549 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
551 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
552 *stacksize
= attr
->stacksize
;
555 return (EINVAL
); /* Not an attribute structure! */
560 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
562 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
563 attr
->stacksize
= stacksize
;
566 return (EINVAL
); /* Not an attribute structure! */
571 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
573 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
574 *stackaddr
= (void *)((uintptr_t)attr
->stackaddr
- attr
->stacksize
);
575 *stacksize
= attr
->stacksize
;
578 return (EINVAL
); /* Not an attribute structure! */
582 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
583 * byte address. This is not the same as in pthread_attr_setstackaddr.
586 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
588 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
589 (((uintptr_t)stackaddr
% vm_page_size
) == 0) &&
590 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
591 attr
->stackaddr
= (void *)((uintptr_t)stackaddr
+ stacksize
);
592 attr
->stacksize
= stacksize
;
593 attr
->freeStackOnExit
= FALSE
;
596 return (EINVAL
); /* Not an attribute structure! */
602 * Set the guardsize attribute in the attr.
605 pthread_attr_setguardsize(pthread_attr_t
*attr
,
608 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
609 /* Guardsize of 0 is valid, ot means no guard */
610 if ((guardsize
% vm_page_size
) == 0) {
611 attr
->guardsize
= guardsize
;
616 return (EINVAL
); /* Not an attribute structure! */
620 * Get the guardsize attribute in the attr.
623 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
626 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
627 *guardsize
= attr
->guardsize
;
630 return (EINVAL
); /* Not an attribute structure! */
635 * Create and start execution of a new thread.
639 _pthread_body(pthread_t self
)
641 _pthread_set_self(self
);
642 _pthread_exit(self
, (self
->fun
)(self
->arg
));
646 _pthread_create(pthread_t t
,
647 const pthread_attr_t
*attrs
,
649 const mach_port_t kernel_thread
)
656 memset(t
, 0, sizeof(*t
));
658 t
->stacksize
= attrs
->stacksize
;
659 t
->stackaddr
= (void *)stack
;
660 t
->guardsize
= attrs
->guardsize
;
661 t
->kernel_thread
= kernel_thread
;
662 t
->detached
= attrs
->detached
;
663 t
->inherit
= attrs
->inherit
;
664 t
->policy
= attrs
->policy
;
665 t
->param
= attrs
->param
;
666 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
667 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
668 t
->sig
= _PTHREAD_SIG
;
669 t
->reply_port
= MACH_PORT_NULL
;
670 t
->cthread_self
= NULL
;
672 t
->plist
.le_next
= (struct _pthread
*)0;
673 t
->plist
.le_prev
= (struct _pthread
**)0;
674 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
675 t
->__cleanup_stack
= (struct __darwin_pthread_handler_rec
*)NULL
;
676 t
->death
= SEMAPHORE_NULL
;
678 if (kernel_thread
!= MACH_PORT_NULL
)
679 pthread_setschedparam(t
, t
->policy
, &t
->param
);
684 /* Need to deprecate this in future */
686 _pthread_is_threaded(void)
688 return __is_threaded
;
691 /* Non portable public api to know whether this process has(had) atleast one thread
692 * apart from main thread. There could be race if there is a thread in the process of
693 * creation at the time of call . It does not tell whether there are more than one thread
694 * at this point of time.
697 pthread_is_threaded_np(void)
699 return (__is_threaded
);
703 pthread_mach_thread_np(pthread_t t
)
705 thread_t kernel_thread
;
707 /* Wait for the creator to initialize it */
708 while ((kernel_thread
= t
->kernel_thread
) == MACH_PORT_NULL
)
711 return kernel_thread
;
715 pthread_get_stacksize_np(pthread_t t
)
721 pthread_get_stackaddr_np(pthread_t t
)
727 _pthread_reply_port(pthread_t t
)
729 return t
->reply_port
;
733 /* returns non-zero if the current thread is the main thread */
735 pthread_main_np(void)
737 pthread_t self
= pthread_self();
739 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
743 _pthread_create_suspended(pthread_t
*thread
,
744 const pthread_attr_t
*attr
,
745 void *(*start_routine
)(void *),
749 pthread_attr_t
*attrs
;
753 kern_return_t kern_res
;
754 mach_port_t kernel_thread
= MACH_PORT_NULL
;
757 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
758 { /* Set up default paramters */
759 attrs
= &_pthread_attr_default
;
760 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
765 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
766 * any change in priority or policy is needed here.
768 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
769 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
777 /* Allocate a stack for the thread */
778 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
781 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
784 /* Create the Mach thread for this thread */
785 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
786 if (kern_res
!= KERN_SUCCESS
)
788 printf("Can't create thread: %d\n", kern_res
);
789 res
= EINVAL
; /* Need better error here? */
793 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
797 set_malloc_singlethreaded(0);
800 /* Send it on it's way */
802 t
->fun
= start_routine
;
803 /* Now set it up to execute */
804 LOCK(_pthread_list_lock
);
805 LIST_INSERT_HEAD(&__pthread_head
, t
, plist
);
807 UNLOCK(_pthread_list_lock
);
808 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
814 pthread_create(pthread_t
*thread
,
815 const pthread_attr_t
*attr
,
816 void *(*start_routine
)(void *),
819 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
823 pthread_create_suspended_np(pthread_t
*thread
,
824 const pthread_attr_t
*attr
,
825 void *(*start_routine
)(void *),
828 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
832 * Make a thread 'undetached' - no longer 'joinable' with other threads.
835 pthread_detach(pthread_t thread
)
837 if (thread
->sig
== _PTHREAD_SIG
)
840 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
842 if (thread
->detached
& _PTHREAD_EXITED
) {
843 UNLOCK(thread
->lock
);
844 pthread_join(thread
, NULL
);
847 semaphore_t death
= thread
->death
;
849 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
850 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
851 UNLOCK(thread
->lock
);
853 (void) semaphore_signal(death
);
857 UNLOCK(thread
->lock
);
861 return (ESRCH
); /* Not a valid thread */
867 * pthread_kill call to system call
870 extern int __pthread_kill(mach_port_t
, int);
879 if ((sig
< 0) || (sig
> NSIG
))
882 if (th
&& (th
->sig
== _PTHREAD_SIG
)) {
883 error
= __pthread_kill(pthread_mach_thread_np(th
), sig
);
892 /* Announce that there are pthread resources ready to be reclaimed in a */
893 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
894 /* thread underneath is terminated right away. */
896 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
897 pthread_reap_msg_t msg
;
900 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
901 MACH_MSG_TYPE_MOVE_SEND
);
902 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
903 msg
.header
.msgh_remote_port
= thread_recycle_port
;
904 msg
.header
.msgh_local_port
= kernel_thread
;
905 msg
.header
.msgh_id
= 0x44454144; /* 'DEAD' */
907 ret
= mach_msg_send(&msg
.header
);
908 assert(ret
== MACH_MSG_SUCCESS
);
911 /* Reap the resources for available threads */
913 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
) {
914 mach_port_type_t ptype
;
918 self
= mach_task_self();
919 if (kernel_thread
!= MACH_PORT_DEAD
) {
920 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
921 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
922 /* not quite dead yet... */
925 ret
= mach_port_deallocate(self
, kernel_thread
);
926 if (ret
!= KERN_SUCCESS
) {
928 "mach_port_deallocate(kernel_thread) failed: %s\n",
929 mach_error_string(ret
));
933 if (th
->reply_port
!= MACH_PORT_NULL
) {
934 ret
= mach_port_mod_refs(self
, th
->reply_port
,
935 MACH_PORT_RIGHT_RECEIVE
, -1);
936 if (ret
!= KERN_SUCCESS
) {
938 "mach_port_mod_refs(reply_port) failed: %s\n",
939 mach_error_string(ret
));
943 if (th
->freeStackOnExit
) {
944 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
947 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
950 ret
= vm_deallocate(self
, addr
, size
);
951 if (ret
!= KERN_SUCCESS
) {
953 "vm_deallocate(stack) failed: %s\n",
954 mach_error_string(ret
));
959 *value_ptr
= th
->exit_value
;
968 void _pthread_reap_threads(void)
970 pthread_reap_msg_t msg
;
973 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
974 sizeof msg
, thread_recycle_port
,
975 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
976 while (ret
== MACH_MSG_SUCCESS
) {
977 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
978 pthread_t thread
= msg
.thread
;
980 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0) == EAGAIN
)
982 /* not dead yet, put it back for someone else to reap, stop here */
983 _pthread_become_available(thread
, kernel_thread
);
986 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
987 sizeof msg
, thread_recycle_port
,
988 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
992 /* For compatibility... */
996 return pthread_self();
1000 * Terminate a thread.
1003 _pthread_exit(pthread_t self
, void *value_ptr
)
1005 struct __darwin_pthread_handler_rec
*handler
;
1006 kern_return_t kern_res
;
1009 /* Make this thread not to receive any signals */
1012 while ((handler
= self
->__cleanup_stack
) != 0)
1014 (handler
->__routine
)(handler
->__arg
);
1015 self
->__cleanup_stack
= handler
->__next
;
1017 _pthread_tsd_cleanup(self
);
1019 _pthread_reap_threads();
1022 self
->detached
|= _PTHREAD_EXITED
;
1024 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1025 mach_port_t death
= self
->death
;
1026 self
->exit_value
= value_ptr
;
1028 /* the joiner will need a kernel thread reference, leave ours for it */
1030 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1031 if (kern_res
!= KERN_SUCCESS
)
1033 "semaphore_signal(death) failed: %s\n",
1034 mach_error_string(kern_res
));
1036 LOCK(_pthread_list_lock
);
1037 thread_count
= --_pthread_count
;
1038 UNLOCK(_pthread_list_lock
);
1041 LOCK(_pthread_list_lock
);
1042 LIST_REMOVE(self
, plist
);
1043 thread_count
= --_pthread_count
;
1044 UNLOCK(_pthread_list_lock
);
1045 /* with no joiner, we let become available consume our cached ref */
1046 _pthread_become_available(self
, pthread_mach_thread_np(self
));
1049 if (thread_count
<= 0)
1052 /* Use a new reference to terminate ourselves. Should never return. */
1053 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1054 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1055 mach_error_string(kern_res
));
1060 pthread_exit(void *value_ptr
)
1062 _pthread_exit(pthread_self(), value_ptr
);
1066 * Get the scheduling policy and scheduling paramters for a thread.
1069 pthread_getschedparam(pthread_t thread
,
1071 struct sched_param
*param
)
1073 if (thread
->sig
== _PTHREAD_SIG
)
1075 *policy
= thread
->policy
;
1076 *param
= thread
->param
;
1080 return (ESRCH
); /* Not a valid thread structure */
1085 * Set the scheduling policy and scheduling paramters for a thread.
1088 pthread_setschedparam(pthread_t thread
,
1090 const struct sched_param
*param
)
1092 policy_base_data_t bases
;
1094 mach_msg_type_number_t count
;
1097 if (thread
->sig
== _PTHREAD_SIG
)
1102 bases
.ts
.base_priority
= param
->sched_priority
;
1103 base
= (policy_base_t
)&bases
.ts
;
1104 count
= POLICY_TIMESHARE_BASE_COUNT
;
1107 bases
.fifo
.base_priority
= param
->sched_priority
;
1108 base
= (policy_base_t
)&bases
.fifo
;
1109 count
= POLICY_FIFO_BASE_COUNT
;
1112 bases
.rr
.base_priority
= param
->sched_priority
;
1113 /* quantum isn't public yet */
1114 bases
.rr
.quantum
= param
->quantum
;
1115 base
= (policy_base_t
)&bases
.rr
;
1116 count
= POLICY_RR_BASE_COUNT
;
1121 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1122 if (ret
!= KERN_SUCCESS
)
1126 thread
->policy
= policy
;
1127 thread
->param
= *param
;
1131 return (ESRCH
); /* Not a valid thread structure */
1136 * Get the minimum priority for the given policy
1139 sched_get_priority_min(int policy
)
1141 return default_priority
- 16;
1145 * Get the maximum priority for the given policy
1148 sched_get_priority_max(int policy
)
1150 return default_priority
+ 16;
1154 * Determine if two thread identifiers represent the same thread.
1157 pthread_equal(pthread_t t1
,
1163 __private_extern__
void
1164 _pthread_set_self(pthread_t p
)
1166 extern void __pthread_set_self(pthread_t
);
1168 bzero(&_thread
, sizeof(struct _pthread
));
1172 __pthread_set_self(p
);
1176 cthread_set_self(void *cself
)
1178 pthread_t self
= pthread_self();
1179 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1180 _pthread_set_self(cself
);
1183 self
->cthread_self
= cself
;
1187 ur_cthread_self(void) {
1188 pthread_t self
= pthread_self();
1189 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1190 return (void *)self
;
1192 return self
->cthread_self
;
1196 * Execute a function exactly one time in a thread-safe fashion.
1199 pthread_once(pthread_once_t
*once_control
,
1200 void (*init_routine
)(void))
1202 _spin_lock(&once_control
->lock
);
1203 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1206 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1208 _spin_unlock(&once_control
->lock
);
1209 return (ESUCCESS
); /* Spec defines no possible errors! */
1213 * Insert a cancellation point in a thread.
1215 __private_extern__
void
1216 _pthread_testcancel(pthread_t thread
, int isconforming
)
1219 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1220 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1222 UNLOCK(thread
->lock
);
1224 pthread_exit(PTHREAD_CANCELED
);
1228 UNLOCK(thread
->lock
);
1234 pthread_getconcurrency(void)
1236 return(pthread_concurrency
);
1240 pthread_setconcurrency(int new_level
)
1242 pthread_concurrency
= new_level
;
1247 * Perform package initialization - called automatically when application starts
1253 pthread_attr_t
*attrs
;
1256 host_basic_info_data_t basic_info
;
1257 host_priority_info_data_t priority_info
;
1259 host_flavor_t flavor
;
1261 mach_msg_type_number_t count
;
1267 count
= HOST_PRIORITY_INFO_COUNT
;
1268 info
= (host_info_t
)&priority_info
;
1269 flavor
= HOST_PRIORITY_INFO
;
1270 host
= mach_host_self();
1271 kr
= host_info(host
, flavor
, info
, &count
);
1272 if (kr
!= KERN_SUCCESS
)
1273 printf("host_info failed (%d); probably need privilege.\n", kr
);
1275 default_priority
= priority_info
.user_priority
;
1276 min_priority
= priority_info
.minimum_priority
;
1277 max_priority
= priority_info
.maximum_priority
;
1279 attrs
= &_pthread_attr_default
;
1280 pthread_attr_init(attrs
);
1282 LIST_INIT(&__pthread_head
);
1283 LOCK_INIT(_pthread_list_lock
);
1285 LIST_INSERT_HEAD(&__pthread_head
, thread
, plist
);
1286 _pthread_set_self(thread
);
1289 mib
[1] = KERN_USRSTACK
;
1290 len
= sizeof (stackaddr
);
1291 if (sysctl (mib
, 2, &stackaddr
, &len
, NULL
, 0) != 0)
1292 stackaddr
= (void *)USRSTACK
;
1293 _pthread_create(thread
, attrs
, stackaddr
, mach_thread_self());
1294 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1296 /* See if we're on a multiprocessor and set _spin_tries if so. */
1299 len
= sizeof(numcpus
);
1300 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1302 _spin_tries
= MP_SPIN_TRIES
;
1305 count
= HOST_BASIC_INFO_COUNT
;
1306 info
= (host_info_t
)&basic_info
;
1307 flavor
= HOST_BASIC_INFO
;
1308 kr
= host_info(host
, flavor
, info
, &count
);
1309 if (kr
!= KERN_SUCCESS
)
1310 printf("host_info failed (%d)\n", kr
);
1312 if (basic_info
.avail_cpus
> 1)
1313 _spin_tries
= MP_SPIN_TRIES
;
1317 mach_port_deallocate(mach_task_self(), host
);
1319 _init_cpu_capabilities();
1321 #if defined(_OBJC_PAGE_BASE_ADDRESS)
1323 vm_address_t objcRTPage
= (vm_address_t
)_OBJC_PAGE_BASE_ADDRESS
;
1324 kr
= vm_map(mach_task_self(),
1325 &objcRTPage
, vm_page_size
* 4, vm_page_size
- 1,
1326 VM_FLAGS_FIXED
| VM_MAKE_TAG(0), // Which tag to use?
1328 (vm_address_t
)0, FALSE
,
1329 (vm_prot_t
)0, VM_PROT_READ
| VM_PROT_WRITE
| VM_PROT_EXECUTE
,
1330 VM_INHERIT_DEFAULT
);
1331 /* We ignore the return result here. The ObjC runtime will just have to deal. */
1335 mig_init(1); /* enable multi-threaded mig interfaces */
1339 int sched_yield(void)
1345 /* This is the "magic" that gets the initialization routine called when the application starts */
1346 int (*_cthread_init_routine
)(void) = pthread_init
;
1348 /* Get a semaphore from the pool, growing it if necessary */
1350 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1355 LOCK(sem_pool_lock
);
1356 if (sem_pool_current
== sem_pool_count
) {
1357 sem_pool_count
+= 16;
1358 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1359 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1360 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1363 sem
= sem_pool
[sem_pool_current
++];
1364 UNLOCK(sem_pool_lock
);
1368 /* Put a semaphore back into the pool */
1369 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1370 LOCK(sem_pool_lock
);
1371 sem_pool
[--sem_pool_current
] = sem
;
1372 UNLOCK(sem_pool_lock
);
1375 static void sem_pool_reset(void) {
1376 LOCK(sem_pool_lock
);
1378 sem_pool_current
= 0;
1380 UNLOCK(sem_pool_lock
);
1383 __private_extern__
void _pthread_fork_child(pthread_t p
) {
1384 /* Just in case somebody had it locked... */
1385 UNLOCK(sem_pool_lock
);
1387 /* No need to hold the pthread_list_lock as no one other than this
1388 * thread is present at this time
1390 LIST_INIT(&__pthread_head
);
1391 LOCK_INIT(_pthread_list_lock
);
1392 LIST_INSERT_HEAD(&__pthread_head
, p
, plist
);
1396 #else /* !BUILDING_VARIANT ] [ */
1397 extern int __unix_conforming
;
1398 extern pthread_lock_t _pthread_list_lock
;
1399 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
1400 extern int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
);
1402 #endif /* !BUILDING_VARIANT ] */
1406 static void __posix_join_cleanup(void *arg
)
1408 pthread_t thread
= (pthread_t
)arg
;
1409 int already_exited
, res
;
1414 death
= thread
->death
;
1415 already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1417 if (!already_exited
){
1418 thread
->joiner
= (struct _pthread
*)NULL
;
1419 UNLOCK(thread
->lock
);
1420 restore_sem_to_pool(death
);
1422 UNLOCK(thread
->lock
);
1423 while ((res
= _pthread_reap_thread(thread
,
1424 thread
->kernel_thread
,
1429 restore_sem_to_pool(death
);
1434 #endif /* __DARWIN_UNIX03 */
1438 * Wait for a thread to terminate and obtain its exit value.
1441 pthread_join(pthread_t thread
,
1444 kern_return_t kern_res
;
1448 if (__unix_conforming
== 0)
1449 __unix_conforming
= 1;
1450 #endif /* __DARWIN_UNIX03 */
1452 if (thread
->sig
== _PTHREAD_SIG
)
1454 semaphore_t death
= new_sem_from_pool(); /* in case we need it */
1457 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1458 thread
->death
== SEMAPHORE_NULL
)
1460 pthread_t self
= pthread_self();
1462 assert(thread
->joiner
== NULL
);
1463 if (thread
!= self
&& (self
== NULL
|| self
->joiner
!= thread
))
1465 int already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1467 thread
->death
= death
;
1468 thread
->joiner
= self
;
1469 UNLOCK(thread
->lock
);
1471 if (!already_exited
)
1474 /* Wait for it to signal... */
1475 pthread_cleanup_push(__posix_join_cleanup
, (void *)thread
);
1477 res
= __semwait_signal(death
, 0, 0, 0, 0, 0);
1478 } while ((res
< 0) && (errno
== EINTR
));
1479 pthread_cleanup_pop(0);
1481 #else /* __DARWIN_UNIX03 */
1482 /* Wait for it to signal... */
1484 PTHREAD_MACH_CALL(semaphore_wait(death
), kern_res
);
1485 } while (kern_res
!= KERN_SUCCESS
);
1486 #endif /* __DARWIN_UNIX03 */
1490 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) == (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1491 res
= PTHREAD_CANCELED
;
1493 #endif /* __DARWIN_UNIX03 */
1495 LOCK(_pthread_list_lock
);
1496 LIST_REMOVE(thread
, plist
);
1497 UNLOCK(_pthread_list_lock
);
1498 /* ... and wait for it to really be dead */
1499 while ((res
= _pthread_reap_thread(thread
,
1500 thread
->kernel_thread
,
1501 value_ptr
)) == EAGAIN
)
1506 UNLOCK(thread
->lock
);
1510 UNLOCK(thread
->lock
);
1513 restore_sem_to_pool(death
);
1523 pthread_cancel(pthread_t thread
)
1526 if (__unix_conforming
== 0)
1527 __unix_conforming
= 1;
1528 #endif /* __DARWIN_UNIX03 */
1530 if (thread
->sig
== _PTHREAD_SIG
)
1535 state
= thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1536 UNLOCK(thread
->lock
);
1537 if (state
& PTHREAD_CANCEL_ENABLE
)
1538 __pthread_markcancel(thread
->kernel_thread
);
1539 #else /* __DARWIN_UNIX03 */
1540 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1541 #endif /* __DARWIN_UNIX03 */
1550 pthread_testcancel(void)
1552 pthread_t self
= pthread_self();
1555 if (__unix_conforming
== 0)
1556 __unix_conforming
= 1;
1557 _pthread_testcancel(self
, 1);
1558 #else /* __DARWIN_UNIX03 */
1559 _pthread_testcancel(self
, 0);
1560 #endif /* __DARWIN_UNIX03 */
1564 * Query/update the cancelability 'state' of a thread
1567 pthread_setcancelstate(int state
, int *oldstate
)
1569 pthread_t self
= pthread_self();
1572 if (__unix_conforming
== 0)
1573 __unix_conforming
= 1;
1574 #endif /* __DARWIN_UNIX03 */
1577 case PTHREAD_CANCEL_ENABLE
:
1579 __pthread_canceled(1);
1580 #endif /* __DARWIN_UNIX03 */
1582 case PTHREAD_CANCEL_DISABLE
:
1584 __pthread_canceled(2);
1585 #endif /* __DARWIN_UNIX03 */
1591 self
= pthread_self();
1594 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1595 self
->cancel_state
&= ~_PTHREAD_CANCEL_STATE_MASK
;
1596 self
->cancel_state
|= state
;
1598 #if !__DARWIN_UNIX03
1599 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
1600 #endif /* __DARWIN_UNIX03 */
1605 * Query/update the cancelability 'type' of a thread
1608 pthread_setcanceltype(int type
, int *oldtype
)
1610 pthread_t self
= pthread_self();
1613 if (__unix_conforming
== 0)
1614 __unix_conforming
= 1;
1615 #endif /* __DARWIN_UNIX03 */
1617 if ((type
!= PTHREAD_CANCEL_DEFERRED
) &&
1618 (type
!= PTHREAD_CANCEL_ASYNCHRONOUS
))
1620 self
= pthread_self();
1623 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
1624 self
->cancel_state
&= ~_PTHREAD_CANCEL_TYPE_MASK
;
1625 self
->cancel_state
|= type
;
1627 #if !__DARWIN_UNIX03
1628 _pthread_testcancel(self
, 0); /* See if we need to 'die' now... */
1629 #endif /* __DARWIN_UNIX03 */