2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 * POSIX Pthread Library
32 #include <stdio.h> /* For printf(). */
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <sys/syscall.h>
39 #include <machine/vmparam.h>
40 #include <mach/vm_statistics.h>
41 #define __APPLE_API_PRIVATE
42 #include <machine/cpu_capabilities.h>
44 #include "pthread_internals.h"
46 /* Per-thread kernel support */
47 extern void _pthread_set_self(pthread_t
);
48 extern void mig_init(int);
50 /* Get CPU capabilities from the kernel */
51 __private_extern__
void _init_cpu_capabilities(void);
53 /* Needed to tell the malloc subsystem we're going multithreaded */
54 extern void set_malloc_singlethreaded(int);
56 /* Used when we need to call into the kernel with no reply port */
57 extern pthread_lock_t reply_port_lock
;
59 /* We'll implement this when the main thread is a pthread */
60 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
61 static struct _pthread _thread
= {0};
63 /* This global should be used (carefully) by anyone needing to know if a
64 ** pthread has been created.
66 int __is_threaded
= 0;
67 static int _pthread_count
= 1;
69 static pthread_lock_t _pthread_count_lock
= LOCK_INITIALIZER
;
71 /* Same implementation as LOCK, but without the __is_threaded check */
73 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
75 int tries
= _spin_tries
;
79 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
81 } while(!_spin_lock_try(lock
));
84 extern mach_port_t thread_recycle_port
;
86 /* These are used to keep track of a semaphore pool shared by mutexes and condition
90 static semaphore_t
*sem_pool
= NULL
;
91 static int sem_pool_count
= 0;
92 static int sem_pool_current
= 0;
93 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
95 static int default_priority
;
96 static int max_priority
;
97 static int min_priority
;
98 static int pthread_concurrency
;
101 * [Internal] stack support
103 size_t _pthread_stack_size
= 0;
104 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
105 #define STACK_RESERVED (sizeof (struct _pthread))
107 #ifdef STACK_GROWS_UP
109 /* The stack grows towards higher addresses:
110 |struct _pthread|user stack---------------->|
111 ^STACK_BASE ^STACK_START
114 #define STACK_BASE(sp) STACK_LOWEST(sp)
115 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
116 #define STACK_SELF(sp) STACK_BASE(sp)
120 /* The stack grows towards lower addresses:
121 |<----------------user stack|struct _pthread|
122 ^STACK_LOWEST ^STACK_START ^STACK_BASE
125 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
126 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
127 #define STACK_SELF(sp) STACK_START(sp)
132 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
133 #elif defined(__i386__)
134 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
136 #error Need to define a stack address hint for this architecture
139 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
142 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
146 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
147 if (attrs
->stackaddr
!= NULL
) {
148 assert(((vm_address_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
149 *stack
= attrs
->stackaddr
;
153 *((vm_address_t
*)stack
) = PTHREAD_STACK_HINT
;
154 kr
= vm_map(mach_task_self(), (vm_address_t
*)stack
,
155 attrs
->stacksize
+ vm_page_size
,
157 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
158 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
160 if (kr
!= KERN_SUCCESS
)
161 kr
= vm_allocate(mach_task_self(),
162 (vm_address_t
*)stack
, attrs
->stacksize
+ vm_page_size
,
163 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
164 if (kr
!= KERN_SUCCESS
) {
167 #ifdef STACK_GROWS_UP
168 /* The guard page is the page one higher than the stack */
169 /* The stack base is at the lowest address */
170 kr
= vm_protect(mach_task_self(), *stack
+ attrs
->stacksize
, vm_page_size
, FALSE
, VM_PROT_NONE
);
172 /* The guard page is at the lowest address */
173 /* The stack base is the highest address */
174 kr
= vm_protect(mach_task_self(), (vm_address_t
)*stack
, vm_page_size
, FALSE
, VM_PROT_NONE
);
175 *stack
+= attrs
->stacksize
+ vm_page_size
;
179 vm_address_t cur_stack
= (vm_address_t
)0;
180 if (free_stacks
== 0)
182 /* Allocating guard pages is done by doubling
183 * the actual stack size, since STACK_BASE() needs
184 * to have stacks aligned on stack_size. Allocating just
185 * one page takes as much memory as allocating more pages
186 * since it will remain one entry in the vm map.
187 * Besides, allocating more than one page allows tracking the
188 * overflow pattern when the overflow is bigger than one page.
190 #ifndef NO_GUARD_PAGES
191 # define GUARD_SIZE(a) (2*(a))
192 # define GUARD_MASK(a) (((a)<<1) | 1)
194 # define GUARD_SIZE(a) (a)
195 # define GUARD_MASK(a) (a)
197 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
199 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
200 /* Ensure stack is there */
201 kr
= vm_allocate(mach_task_self(),
203 GUARD_SIZE(__pthread_stack_size
),
205 #ifndef NO_GUARD_PAGES
206 if (kr
== KERN_SUCCESS
) {
207 # ifdef STACK_GROWS_UP
208 kr
= vm_protect(mach_task_self(),
209 lowest_stack
+__pthread_stack_size
,
210 __pthread_stack_size
,
211 FALSE
, VM_PROT_NONE
);
212 # else /* STACK_GROWS_UP */
213 kr
= vm_protect(mach_task_self(),
215 __pthread_stack_size
,
216 FALSE
, VM_PROT_NONE
);
217 lowest_stack
+= __pthread_stack_size
;
218 # endif /* STACK_GROWS_UP */
219 if (kr
== KERN_SUCCESS
)
223 if (kr
== KERN_SUCCESS
)
227 if (lowest_stack
> 0)
228 free_stacks
= (vm_address_t
*)lowest_stack
;
231 /* Too bad. We'll just have to take what comes.
232 Use vm_map instead of vm_allocate so we can
233 specify alignment. */
234 kr
= vm_map(mach_task_self(), &lowest_stack
,
235 GUARD_SIZE(__pthread_stack_size
),
236 GUARD_MASK(__pthread_stack_mask
),
237 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
238 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
240 /* This really shouldn't fail and if it does I don't
242 #ifndef NO_GUARD_PAGES
243 if (kr
== KERN_SUCCESS
) {
244 # ifdef STACK_GROWS_UP
245 kr
= vm_protect(mach_task_self(),
246 lowest_stack
+__pthread_stack_size
,
247 __pthread_stack_size
,
248 FALSE
, VM_PROT_NONE
);
249 # else /* STACK_GROWS_UP */
250 kr
= vm_protect(mach_task_self(),
252 __pthread_stack_size
,
253 FALSE
, VM_PROT_NONE
);
254 lowest_stack
+= __pthread_stack_size
;
255 # endif /* STACK_GROWS_UP */
258 free_stacks
= (vm_address_t
*)lowest_stack
;
261 *free_stacks
= 0; /* No other free stacks */
263 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
264 free_stacks
= (vm_address_t
*)*free_stacks
;
265 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
270 static pthread_attr_t _pthread_attr_default
= {0};
273 * Destroy a thread attribute structure
276 pthread_attr_destroy(pthread_attr_t
*attr
)
278 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
283 return (EINVAL
); /* Not an attribute structure! */
288 * Get the 'detach' state from a thread attribute structure.
289 * Note: written as a helper function for info hiding
292 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
295 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
297 *detachstate
= attr
->detached
;
301 return (EINVAL
); /* Not an attribute structure! */
306 * Get the 'inherit scheduling' info from a thread attribute structure.
307 * Note: written as a helper function for info hiding
310 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
313 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
315 *inheritsched
= attr
->inherit
;
319 return (EINVAL
); /* Not an attribute structure! */
324 * Get the scheduling parameters from a thread attribute structure.
325 * Note: written as a helper function for info hiding
328 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
329 struct sched_param
*param
)
331 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
333 *param
= attr
->param
;
337 return (EINVAL
); /* Not an attribute structure! */
342 * Get the scheduling policy from a thread attribute structure.
343 * Note: written as a helper function for info hiding
346 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
349 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
351 *policy
= attr
->policy
;
355 return (EINVAL
); /* Not an attribute structure! */
359 static const size_t DEFAULT_STACK_SIZE
= DFLSSIZ
;
361 * Initialize a thread attribute structure to default values.
364 pthread_attr_init(pthread_attr_t
*attr
)
366 attr
->stacksize
= DEFAULT_STACK_SIZE
;
367 attr
->stackaddr
= NULL
;
368 attr
->sig
= _PTHREAD_ATTR_SIG
;
369 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
370 attr
->param
.sched_priority
= default_priority
;
371 attr
->param
.quantum
= 10; /* quantum isn't public yet */
372 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
373 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
374 attr
->freeStackOnExit
= TRUE
;
379 * Set the 'detach' state in a thread attribute structure.
380 * Note: written as a helper function for info hiding
383 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
386 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
388 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
389 (detachstate
== PTHREAD_CREATE_DETACHED
))
391 attr
->detached
= detachstate
;
399 return (EINVAL
); /* Not an attribute structure! */
404 * Set the 'inherit scheduling' state in a thread attribute structure.
405 * Note: written as a helper function for info hiding
408 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
411 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
413 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
414 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
416 attr
->inherit
= inheritsched
;
424 return (EINVAL
); /* Not an attribute structure! */
429 * Set the scheduling paramters in a thread attribute structure.
430 * Note: written as a helper function for info hiding
433 pthread_attr_setschedparam(pthread_attr_t
*attr
,
434 const struct sched_param
*param
)
436 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
438 /* TODO: Validate sched_param fields */
439 attr
->param
= *param
;
443 return (EINVAL
); /* Not an attribute structure! */
448 * Set the scheduling policy in a thread attribute structure.
449 * Note: written as a helper function for info hiding
452 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
455 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
457 if ((policy
== SCHED_OTHER
) ||
458 (policy
== SCHED_RR
) ||
459 (policy
== SCHED_FIFO
))
461 attr
->policy
= policy
;
469 return (EINVAL
); /* Not an attribute structure! */
474 * Set the scope for the thread.
475 * We currently only provide PTHREAD_SCOPE_SYSTEM
478 pthread_attr_setscope(pthread_attr_t
*attr
,
481 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
482 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
483 /* No attribute yet for the scope */
485 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
489 return (EINVAL
); /* Not an attribute structure! */
493 * Get the scope for the thread.
494 * We currently only provide PTHREAD_SCOPE_SYSTEM
497 pthread_attr_getscope(pthread_attr_t
*attr
,
500 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
501 *scope
= PTHREAD_SCOPE_SYSTEM
;
504 return (EINVAL
); /* Not an attribute structure! */
507 /* Get the base stack address of the given thread */
509 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
511 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
512 *stackaddr
= attr
->stackaddr
;
515 return (EINVAL
); /* Not an attribute structure! */
520 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
522 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
523 attr
->stackaddr
= stackaddr
;
524 attr
->freeStackOnExit
= FALSE
;
527 return (EINVAL
); /* Not an attribute structure! */
532 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
534 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
535 *stacksize
= attr
->stacksize
;
538 return (EINVAL
); /* Not an attribute structure! */
543 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
545 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
546 attr
->stacksize
= stacksize
;
549 return (EINVAL
); /* Not an attribute structure! */
554 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
556 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
557 *stackaddr
= attr
->stackaddr
;
558 *stacksize
= attr
->stacksize
;
561 return (EINVAL
); /* Not an attribute structure! */
566 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
568 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
569 (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0) &&
570 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
571 attr
->stackaddr
= stackaddr
;
572 attr
->freeStackOnExit
= FALSE
;
573 attr
->stacksize
= stacksize
;
576 return (EINVAL
); /* Not an attribute structure! */
581 * Create and start execution of a new thread.
585 _pthread_body(pthread_t self
)
587 _pthread_set_self(self
);
588 pthread_exit((self
->fun
)(self
->arg
));
592 _pthread_create(pthread_t t
,
593 const pthread_attr_t
*attrs
,
595 const mach_port_t kernel_thread
)
602 memset(t
, 0, sizeof(*t
));
603 t
->stacksize
= attrs
->stacksize
;
604 t
->stackaddr
= (void *)stack
;
605 t
->kernel_thread
= kernel_thread
;
606 t
->detached
= attrs
->detached
;
607 t
->inherit
= attrs
->inherit
;
608 t
->policy
= attrs
->policy
;
609 t
->param
= attrs
->param
;
610 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
611 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
612 t
->sig
= _PTHREAD_SIG
;
613 t
->reply_port
= MACH_PORT_NULL
;
614 t
->cthread_self
= NULL
;
616 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
617 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
618 t
->death
= SEMAPHORE_NULL
;
620 if (kernel_thread
!= MACH_PORT_NULL
)
621 pthread_setschedparam(t
, t
->policy
, &t
->param
);
626 /* Need to deprecate this in future */
628 _pthread_is_threaded(void)
630 return __is_threaded
;
633 /* Non portable public api to know whether this process has(had) atleast one thread
634 * apart from main thread. There could be race if there is a thread in the process of
635 * creation at the time of call . It does not tell whether there are more than one thread
636 * at this point of time.
639 pthread_is_threaded_np(void)
641 return (__is_threaded
);
645 pthread_mach_thread_np(pthread_t t
)
647 thread_t kernel_thread
;
649 /* Wait for the creator to initialize it */
650 while ((kernel_thread
= t
->kernel_thread
) == MACH_PORT_NULL
)
653 return kernel_thread
;
657 pthread_get_stacksize_np(pthread_t t
)
663 pthread_get_stackaddr_np(pthread_t t
)
669 _pthread_reply_port(pthread_t t
)
671 return t
->reply_port
;
675 /* returns non-zero if the current thread is the main thread */
677 pthread_main_np(void)
679 pthread_t self
= pthread_self();
681 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
685 _pthread_create_suspended(pthread_t
*thread
,
686 const pthread_attr_t
*attr
,
687 void *(*start_routine
)(void *),
691 pthread_attr_t
*attrs
;
695 kern_return_t kern_res
;
696 mach_port_t kernel_thread
= MACH_PORT_NULL
;
699 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
700 { /* Set up default paramters */
701 attrs
= &_pthread_attr_default
;
702 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
707 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
708 * any change in priority or policy is needed here.
710 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
711 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
719 /* Allocate a stack for the thread */
720 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
723 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
726 /* Create the Mach thread for this thread */
727 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
728 if (kern_res
!= KERN_SUCCESS
)
730 printf("Can't create thread: %d\n", kern_res
);
731 res
= EINVAL
; /* Need better error here? */
735 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
739 set_malloc_singlethreaded(0);
741 LOCK(_pthread_count_lock
);
743 UNLOCK(_pthread_count_lock
);
745 /* Send it on it's way */
747 t
->fun
= start_routine
;
748 /* Now set it up to execute */
749 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
755 pthread_create(pthread_t
*thread
,
756 const pthread_attr_t
*attr
,
757 void *(*start_routine
)(void *),
760 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
764 pthread_create_suspended_np(pthread_t
*thread
,
765 const pthread_attr_t
*attr
,
766 void *(*start_routine
)(void *),
769 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
773 * Make a thread 'undetached' - no longer 'joinable' with other threads.
776 pthread_detach(pthread_t thread
)
778 if (thread
->sig
== _PTHREAD_SIG
)
781 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
783 if (thread
->detached
& _PTHREAD_EXITED
) {
784 UNLOCK(thread
->lock
);
785 pthread_join(thread
, NULL
);
788 semaphore_t death
= thread
->death
;
790 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
791 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
792 UNLOCK(thread
->lock
);
794 (void) semaphore_signal(death
);
798 UNLOCK(thread
->lock
);
802 return (ESRCH
); /* Not a valid thread */
808 * pthread_kill call to system call
819 if ((sig
< 0) || (sig
> NSIG
))
822 if (th
&& (th
->sig
== _PTHREAD_SIG
)) {
823 error
= __pthread_kill(pthread_mach_thread_np(th
), sig
);
832 /* Announce that there are pthread resources ready to be reclaimed in a */
833 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
834 /* thread underneath is terminated right away. */
836 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
837 mach_msg_empty_rcv_t msg
;
840 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
841 MACH_MSG_TYPE_MOVE_SEND
);
842 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
843 msg
.header
.msgh_remote_port
= thread_recycle_port
;
844 msg
.header
.msgh_local_port
= kernel_thread
;
845 msg
.header
.msgh_id
= (int)thread
;
846 ret
= mach_msg_send(&msg
.header
);
847 assert(ret
== MACH_MSG_SUCCESS
);
850 /* Reap the resources for available threads */
852 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
) {
853 mach_port_type_t ptype
;
857 self
= mach_task_self();
858 if (kernel_thread
!= MACH_PORT_DEAD
) {
859 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
860 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
861 /* not quite dead yet... */
864 ret
= mach_port_deallocate(self
, kernel_thread
);
865 if (ret
!= KERN_SUCCESS
) {
867 "mach_port_deallocate(kernel_thread) failed: %s\n",
868 mach_error_string(ret
));
872 if (th
->reply_port
!= MACH_PORT_NULL
) {
873 ret
= mach_port_mod_refs(self
, th
->reply_port
,
874 MACH_PORT_RIGHT_RECEIVE
, -1);
875 if (ret
!= KERN_SUCCESS
) {
877 "mach_port_mod_refs(reply_port) failed: %s\n",
878 mach_error_string(ret
));
882 if (th
->freeStackOnExit
) {
883 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
886 size
= (vm_size_t
)th
->stacksize
+ vm_page_size
;
888 #if !defined(STACK_GROWS_UP)
891 ret
= vm_deallocate(self
, addr
, size
);
892 if (ret
!= KERN_SUCCESS
) {
894 "vm_deallocate(stack) failed: %s\n",
895 mach_error_string(ret
));
900 *value_ptr
= th
->exit_value
;
909 void _pthread_reap_threads(void)
911 mach_msg_empty_rcv_t msg
;
914 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
915 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
916 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
917 while (ret
== MACH_MSG_SUCCESS
) {
918 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
919 pthread_t thread
= (pthread_t
)msg
.header
.msgh_id
;
921 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0) == EAGAIN
)
923 /* not dead yet, put it back for someone else to reap, stop here */
924 _pthread_become_available(thread
, kernel_thread
);
927 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
928 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
929 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
933 /* For compatibility... */
937 return pthread_self();
941 * Terminate a thread.
944 pthread_exit(void *value_ptr
)
946 struct _pthread_handler_rec
*handler
;
947 pthread_t self
= pthread_self();
948 kern_return_t kern_res
;
951 /* Make this thread not to receive any signals */
954 while ((handler
= self
->cleanup_stack
) != 0)
956 (handler
->routine
)(handler
->arg
);
957 self
->cleanup_stack
= handler
->next
;
959 _pthread_tsd_cleanup(self
);
961 _pthread_reap_threads();
964 self
->detached
|= _PTHREAD_EXITED
;
966 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
967 mach_port_t death
= self
->death
;
968 self
->exit_value
= value_ptr
;
970 /* the joiner will need a kernel thread reference, leave ours for it */
972 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
973 if (kern_res
!= KERN_SUCCESS
)
975 "semaphore_signal(death) failed: %s\n",
976 mach_error_string(kern_res
));
980 /* with no joiner, we let become available consume our cached ref */
981 _pthread_become_available(self
, pthread_mach_thread_np(self
));
984 LOCK(_pthread_count_lock
);
985 thread_count
= --_pthread_count
;
986 UNLOCK(_pthread_count_lock
);
987 if (thread_count
<= 0)
990 /* Use a new reference to terminate ourselves. Should never return. */
991 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
992 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
993 mach_error_string(kern_res
));
998 * Wait for a thread to terminate and obtain its exit value.
1001 pthread_join(pthread_t thread
,
1004 kern_return_t kern_res
;
1007 if (thread
->sig
== _PTHREAD_SIG
)
1009 semaphore_t death
= new_sem_from_pool(); /* in case we need it */
1012 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1013 thread
->death
== SEMAPHORE_NULL
)
1015 pthread_t self
= pthread_self();
1017 assert(thread
->joiner
== NULL
);
1018 if (thread
!= self
&& (self
== NULL
|| self
->joiner
!= thread
))
1020 int already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1022 thread
->death
= death
;
1023 thread
->joiner
= self
;
1024 UNLOCK(thread
->lock
);
1026 if (!already_exited
)
1028 /* Wait for it to signal... */
1030 PTHREAD_MACH_CALL(semaphore_wait(death
), kern_res
);
1031 } while (kern_res
!= KERN_SUCCESS
);
1034 /* ... and wait for it to really be dead */
1035 while ((res
= _pthread_reap_thread(thread
,
1036 thread
->kernel_thread
,
1037 value_ptr
)) == EAGAIN
)
1042 UNLOCK(thread
->lock
);
1046 UNLOCK(thread
->lock
);
1049 restore_sem_to_pool(death
);
1056 * Get the scheduling policy and scheduling paramters for a thread.
1059 pthread_getschedparam(pthread_t thread
,
1061 struct sched_param
*param
)
1063 if (thread
->sig
== _PTHREAD_SIG
)
1065 *policy
= thread
->policy
;
1066 *param
= thread
->param
;
1070 return (ESRCH
); /* Not a valid thread structure */
1075 * Set the scheduling policy and scheduling paramters for a thread.
1078 pthread_setschedparam(pthread_t thread
,
1080 const struct sched_param
*param
)
1082 policy_base_data_t bases
;
1084 mach_msg_type_number_t count
;
1087 if (thread
->sig
== _PTHREAD_SIG
)
1092 bases
.ts
.base_priority
= param
->sched_priority
;
1093 base
= (policy_base_t
)&bases
.ts
;
1094 count
= POLICY_TIMESHARE_BASE_COUNT
;
1097 bases
.fifo
.base_priority
= param
->sched_priority
;
1098 base
= (policy_base_t
)&bases
.fifo
;
1099 count
= POLICY_FIFO_BASE_COUNT
;
1102 bases
.rr
.base_priority
= param
->sched_priority
;
1103 /* quantum isn't public yet */
1104 bases
.rr
.quantum
= param
->quantum
;
1105 base
= (policy_base_t
)&bases
.rr
;
1106 count
= POLICY_RR_BASE_COUNT
;
1111 thread
->policy
= policy
;
1112 thread
->param
= *param
;
1113 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1114 if (ret
!= KERN_SUCCESS
)
1121 return (ESRCH
); /* Not a valid thread structure */
1126 * Get the minimum priority for the given policy
1129 sched_get_priority_min(int policy
)
1131 return default_priority
- 16;
1135 * Get the maximum priority for the given policy
1138 sched_get_priority_max(int policy
)
1140 return default_priority
+ 16;
1144 * Determine if two thread identifiers represent the same thread.
1147 pthread_equal(pthread_t t1
,
1154 cthread_set_self(void *cself
)
1156 pthread_t self
= pthread_self();
1157 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1158 _pthread_set_self(cself
);
1161 self
->cthread_self
= cself
;
1165 ur_cthread_self(void) {
1166 pthread_t self
= pthread_self();
1167 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1168 return (void *)self
;
1170 return self
->cthread_self
;
1174 * Execute a function exactly one time in a thread-safe fashion.
1177 pthread_once(pthread_once_t
*once_control
,
1178 void (*init_routine
)(void))
1180 LOCK(once_control
->lock
);
1181 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1184 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1186 UNLOCK(once_control
->lock
);
1187 return (ESUCCESS
); /* Spec defines no possible errors! */
1194 pthread_cancel(pthread_t thread
)
1196 if (thread
->sig
== _PTHREAD_SIG
)
1198 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1207 * Insert a cancellation point in a thread.
1210 _pthread_testcancel(pthread_t thread
)
1213 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1214 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1216 UNLOCK(thread
->lock
);
1219 UNLOCK(thread
->lock
);
1223 pthread_testcancel(void)
1225 pthread_t self
= pthread_self();
1226 _pthread_testcancel(self
);
1230 * Query/update the cancelability 'state' of a thread
1233 pthread_setcancelstate(int state
, int *oldstate
)
1235 pthread_t self
= pthread_self();
1239 *oldstate
= self
->cancel_state
& ~_PTHREAD_CANCEL_STATE_MASK
;
1240 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1242 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1248 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1253 * Query/update the cancelability 'type' of a thread
1256 pthread_setcanceltype(int type
, int *oldtype
)
1258 pthread_t self
= pthread_self();
1262 *oldtype
= self
->cancel_state
& ~_PTHREAD_CANCEL_TYPE_MASK
;
1263 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1265 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1271 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1276 pthread_getconcurrency(void)
1278 return(pthread_concurrency
);
1282 pthread_setconcurrency(int new_level
)
1284 pthread_concurrency
= new_level
;
1289 * Perform package initialization - called automatically when application starts
1295 pthread_attr_t
*attrs
;
1298 host_basic_info_data_t basic_info
;
1299 host_priority_info_data_t priority_info
;
1301 host_flavor_t flavor
;
1303 mach_msg_type_number_t count
;
1308 count
= HOST_PRIORITY_INFO_COUNT
;
1309 info
= (host_info_t
)&priority_info
;
1310 flavor
= HOST_PRIORITY_INFO
;
1311 host
= mach_host_self();
1312 kr
= host_info(host
, flavor
, info
, &count
);
1313 if (kr
!= KERN_SUCCESS
)
1314 printf("host_info failed (%d); probably need privilege.\n", kr
);
1316 default_priority
= priority_info
.user_priority
;
1317 min_priority
= priority_info
.minimum_priority
;
1318 max_priority
= priority_info
.maximum_priority
;
1320 attrs
= &_pthread_attr_default
;
1321 pthread_attr_init(attrs
);
1324 _pthread_set_self(thread
);
1325 _pthread_create(thread
, attrs
, (void *)USRSTACK
, mach_thread_self());
1326 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1328 /* See if we're on a multiprocessor and set _spin_tries if so. */
1331 len
= sizeof(numcpus
);
1332 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1334 _spin_tries
= MP_SPIN_TRIES
;
1337 count
= HOST_BASIC_INFO_COUNT
;
1338 info
= (host_info_t
)&basic_info
;
1339 flavor
= HOST_BASIC_INFO
;
1340 kr
= host_info(host
, flavor
, info
, &count
);
1341 if (kr
!= KERN_SUCCESS
)
1342 printf("host_info failed (%d)\n", kr
);
1344 if (basic_info
.avail_cpus
> 1)
1345 _spin_tries
= MP_SPIN_TRIES
;
1349 mach_port_deallocate(mach_task_self(), host
);
1351 _init_cpu_capabilities(); /* check for vector unit, cache line size etc */
1353 #if defined(__ppc__)
1354 /* Use fsqrt instruction in sqrt() if available. */
1355 if (_cpu_capabilities
& kHasFsqrt
) {
1356 extern size_t hw_sqrt_len
;
1357 extern double sqrt( double );
1358 extern double hw_sqrt( double );
1359 extern void sys_icache_invalidate(void *, size_t);
1361 memcpy ( (void *)sqrt
, (void *)hw_sqrt
, hw_sqrt_len
);
1362 sys_icache_invalidate((void *)sqrt
, hw_sqrt_len
);
1366 mig_init(1); /* enable multi-threaded mig interfaces */
1370 int sched_yield(void)
1376 /* This is the "magic" that gets the initialization routine called when the application starts */
1377 int (*_cthread_init_routine
)(void) = pthread_init
;
1379 /* Get a semaphore from the pool, growing it if necessary */
1381 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1386 LOCK(sem_pool_lock
);
1387 if (sem_pool_current
== sem_pool_count
) {
1388 sem_pool_count
+= 16;
1389 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1390 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1391 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1394 sem
= sem_pool
[sem_pool_current
++];
1395 UNLOCK(sem_pool_lock
);
1399 /* Put a semaphore back into the pool */
1400 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1401 LOCK(sem_pool_lock
);
1402 sem_pool
[--sem_pool_current
] = sem
;
1403 UNLOCK(sem_pool_lock
);
1406 static void sem_pool_reset(void) {
1407 LOCK(sem_pool_lock
);
1409 sem_pool_current
= 0;
1411 UNLOCK(sem_pool_lock
);
1414 __private_extern__
void _pthread_fork_child(void) {
1415 /* Just in case somebody had it locked... */
1416 UNLOCK(sem_pool_lock
);
1418 UNLOCK(_pthread_count_lock
);