2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 * POSIX Pthread Library
32 #include <stdio.h> /* For printf(). */
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <machine/vmparam.h>
39 #include <mach/vm_statistics.h>
41 #include "pthread_internals.h"
43 /* Per-thread kernel support */
44 extern void _pthread_set_self(pthread_t
);
45 extern void mig_init(int);
47 /* Needed to tell the malloc subsystem we're going multithreaded */
48 extern void set_malloc_singlethreaded(int);
50 /* Used when we need to call into the kernel with no reply port */
51 extern pthread_lock_t reply_port_lock
;
54 * [Internal] stack support
57 size_t _pthread_stack_size
= 0;
59 int _cpu_has_altivec
= 0;
61 /* This global should be used (carefully) by anyone needing to know if a pthread has been
64 int __is_threaded
= 0;
66 /* These are used to keep track of a semaphore pool shared by mutexes and condition
70 static semaphore_t
*sem_pool
= NULL
;
71 static int sem_pool_count
= 0;
72 static int sem_pool_current
= 0;
73 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
75 static int default_priority
;
76 static int max_priority
;
77 static int min_priority
;
79 extern mach_port_t thread_recycle_port
;
81 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
82 #define STACK_RESERVED (sizeof (struct _pthread))
86 /* The stack grows towards higher addresses:
87 |struct _pthread|user stack---------------->|
88 ^STACK_BASE ^STACK_START
91 #define STACK_BASE(sp) STACK_LOWEST(sp)
92 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
93 #define STACK_SELF(sp) STACK_BASE(sp)
97 /* The stack grows towards lower addresses:
98 |<----------------user stack|struct _pthread|
99 ^STACK_LOWEST ^STACK_START ^STACK_BASE
102 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
103 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
104 #define STACK_SELF(sp) STACK_START(sp)
108 /* This is the struct used to recycle (or terminate) a thread */
109 /* We stash the thread port into the reply port of the message */
112 mach_msg_header_t header
;
113 mach_msg_trailer_t trailer
;
116 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
119 _pthread_allocate_stack(pthread_attr_t
*attrs
, vm_address_t
*stack
)
123 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
124 if (attrs
->stackaddr
!= NULL
) {
125 assert(((vm_offset_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
126 *stack
= (vm_address_t
)attrs
->stackaddr
;
129 kr
= vm_allocate(mach_task_self(), stack
, attrs
->stacksize
+ vm_page_size
, VM_MAKE_TAG(VM_MEMORY_STACK
)| TRUE
);
130 if (kr
!= KERN_SUCCESS
) {
133 #ifdef STACK_GROWS_UP
134 /* The guard page is the page one higher than the stack */
135 /* The stack base is at the lowest address */
136 kr
= vm_protect(mach_task_self(), *stack
+ attrs
->stacksize
, vm_page_size
, FALSE
, VM_PROT_NONE
);
138 /* The guard page is at the lowest address */
139 /* The stack base is the highest address */
140 kr
= vm_protect(mach_task_self(), *stack
, vm_page_size
, FALSE
, VM_PROT_NONE
);
141 *stack
+= attrs
->stacksize
+ vm_page_size
;
145 vm_address_t cur_stack
= (vm_address_t
)0;
146 if (free_stacks
== 0)
148 /* Allocating guard pages is done by doubling
149 * the actual stack size, since STACK_BASE() needs
150 * to have stacks aligned on stack_size. Allocating just
151 * one page takes as much memory as allocating more pages
152 * since it will remain one entry in the vm map.
153 * Besides, allocating more than one page allows tracking the
154 * overflow pattern when the overflow is bigger than one page.
156 #ifndef NO_GUARD_PAGES
157 # define GUARD_SIZE(a) (2*(a))
158 # define GUARD_MASK(a) (((a)<<1) | 1)
160 # define GUARD_SIZE(a) (a)
161 # define GUARD_MASK(a) (a)
163 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
165 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
166 /* Ensure stack is there */
167 kr
= vm_allocate(mach_task_self(),
169 GUARD_SIZE(__pthread_stack_size
),
171 #ifndef NO_GUARD_PAGES
172 if (kr
== KERN_SUCCESS
) {
173 # ifdef STACK_GROWS_UP
174 kr
= vm_protect(mach_task_self(),
175 lowest_stack
+__pthread_stack_size
,
176 __pthread_stack_size
,
177 FALSE
, VM_PROT_NONE
);
178 # else /* STACK_GROWS_UP */
179 kr
= vm_protect(mach_task_self(),
181 __pthread_stack_size
,
182 FALSE
, VM_PROT_NONE
);
183 lowest_stack
+= __pthread_stack_size
;
184 # endif /* STACK_GROWS_UP */
185 if (kr
== KERN_SUCCESS
)
189 if (kr
== KERN_SUCCESS
)
193 if (lowest_stack
> 0)
194 free_stacks
= (vm_address_t
*)lowest_stack
;
197 /* Too bad. We'll just have to take what comes.
198 Use vm_map instead of vm_allocate so we can
199 specify alignment. */
200 kr
= vm_map(mach_task_self(), &lowest_stack
,
201 GUARD_SIZE(__pthread_stack_size
),
202 GUARD_MASK(__pthread_stack_mask
),
203 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
204 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
206 /* This really shouldn't fail and if it does I don't
208 #ifndef NO_GUARD_PAGES
209 if (kr
== KERN_SUCCESS
) {
210 # ifdef STACK_GROWS_UP
211 kr
= vm_protect(mach_task_self(),
212 lowest_stack
+__pthread_stack_size
,
213 __pthread_stack_size
,
214 FALSE
, VM_PROT_NONE
);
215 # else /* STACK_GROWS_UP */
216 kr
= vm_protect(mach_task_self(),
218 __pthread_stack_size
,
219 FALSE
, VM_PROT_NONE
);
220 lowest_stack
+= __pthread_stack_size
;
221 # endif /* STACK_GROWS_UP */
224 free_stacks
= (vm_address_t
*)lowest_stack
;
227 *free_stacks
= 0; /* No other free stacks */
229 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
230 free_stacks
= (vm_address_t
*)*free_stacks
;
231 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
237 * Destroy a thread attribute structure
240 pthread_attr_destroy(pthread_attr_t
*attr
)
242 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
247 return (EINVAL
); /* Not an attribute structure! */
252 * Get the 'detach' state from a thread attribute structure.
253 * Note: written as a helper function for info hiding
256 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
259 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
261 *detachstate
= attr
->detached
;
265 return (EINVAL
); /* Not an attribute structure! */
270 * Get the 'inherit scheduling' info from a thread attribute structure.
271 * Note: written as a helper function for info hiding
274 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
277 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
279 *inheritsched
= attr
->inherit
;
283 return (EINVAL
); /* Not an attribute structure! */
288 * Get the scheduling parameters from a thread attribute structure.
289 * Note: written as a helper function for info hiding
292 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
293 struct sched_param
*param
)
295 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
297 *param
= attr
->param
;
301 return (EINVAL
); /* Not an attribute structure! */
306 * Get the scheduling policy from a thread attribute structure.
307 * Note: written as a helper function for info hiding
310 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
313 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
315 *policy
= attr
->policy
;
319 return (EINVAL
); /* Not an attribute structure! */
323 static const size_t DEFAULT_STACK_SIZE
= DFLSSIZ
;
325 * Initialize a thread attribute structure to default values.
328 pthread_attr_init(pthread_attr_t
*attr
)
330 attr
->stacksize
= DEFAULT_STACK_SIZE
;
331 attr
->stackaddr
= NULL
;
332 attr
->sig
= _PTHREAD_ATTR_SIG
;
333 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
334 attr
->param
.sched_priority
= default_priority
;
335 attr
->param
.quantum
= 10; /* quantum isn't public yet */
336 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
337 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
338 attr
->freeStackOnExit
= TRUE
;
343 * Set the 'detach' state in a thread attribute structure.
344 * Note: written as a helper function for info hiding
347 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
350 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
352 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
353 (detachstate
== PTHREAD_CREATE_DETACHED
))
355 attr
->detached
= detachstate
;
363 return (EINVAL
); /* Not an attribute structure! */
368 * Set the 'inherit scheduling' state in a thread attribute structure.
369 * Note: written as a helper function for info hiding
372 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
375 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
377 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
378 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
380 attr
->inherit
= inheritsched
;
388 return (EINVAL
); /* Not an attribute structure! */
393 * Set the scheduling paramters in a thread attribute structure.
394 * Note: written as a helper function for info hiding
397 pthread_attr_setschedparam(pthread_attr_t
*attr
,
398 const struct sched_param
*param
)
400 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
402 /* TODO: Validate sched_param fields */
403 attr
->param
= *param
;
407 return (EINVAL
); /* Not an attribute structure! */
412 * Set the scheduling policy in a thread attribute structure.
413 * Note: written as a helper function for info hiding
416 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
419 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
421 if ((policy
== SCHED_OTHER
) ||
422 (policy
== SCHED_RR
) ||
423 (policy
== SCHED_FIFO
))
425 attr
->policy
= policy
;
433 return (EINVAL
); /* Not an attribute structure! */
438 * Set the scope for the thread.
439 * We currently only provide PTHREAD_SCOPE_SYSTEM
442 pthread_attr_setscope(pthread_attr_t
*attr
,
445 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
446 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
447 /* No attribute yet for the scope */
449 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
453 return (EINVAL
); /* Not an attribute structure! */
457 * Get the scope for the thread.
458 * We currently only provide PTHREAD_SCOPE_SYSTEM
461 pthread_attr_getscope(pthread_attr_t
*attr
,
464 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
465 *scope
= PTHREAD_SCOPE_SYSTEM
;
468 return (EINVAL
); /* Not an attribute structure! */
471 /* Get the base stack address of the given thread */
473 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
475 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
476 *stackaddr
= attr
->stackaddr
;
479 return (EINVAL
); /* Not an attribute structure! */
484 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
486 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
487 attr
->stackaddr
= stackaddr
;
488 attr
->freeStackOnExit
= FALSE
;
491 return (EINVAL
); /* Not an attribute structure! */
496 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
498 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
499 *stacksize
= attr
->stacksize
;
502 return (EINVAL
); /* Not an attribute structure! */
507 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
509 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
510 attr
->stacksize
= stacksize
;
513 return (EINVAL
); /* Not an attribute structure! */
517 pthread_t _cachedThread
= (pthread_t
)0;
519 void _clear_thread_cache(void) {
520 _cachedThread
= (pthread_t
)0;
524 * Create and start execution of a new thread.
528 _pthread_body(pthread_t self
)
530 _clear_thread_cache();
531 _pthread_set_self(self
);
532 pthread_exit((self
->fun
)(self
->arg
));
536 _pthread_create(pthread_t t
,
537 const pthread_attr_t
*attrs
,
539 const mach_port_t kernel_thread
)
542 kern_return_t kern_res
;
546 memset(t
, 0, sizeof(*t
));
547 t
->stacksize
= attrs
->stacksize
;
548 t
->stackaddr
= (void *)stack
;
549 t
->kernel_thread
= kernel_thread
;
550 t
->detached
= attrs
->detached
;
551 t
->inherit
= attrs
->inherit
;
552 t
->policy
= attrs
->policy
;
553 t
->param
= attrs
->param
;
554 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
555 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
556 t
->sig
= _PTHREAD_SIG
;
557 t
->reply_port
= MACH_PORT_NULL
;
558 t
->cthread_self
= NULL
;
560 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
561 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
562 pthread_setschedparam(t
, t
->policy
, &t
->param
);
563 /* Create control semaphores */
564 if (t
->detached
== PTHREAD_CREATE_JOINABLE
)
566 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
570 if (kern_res
!= KERN_SUCCESS
)
572 printf("Can't create 'death' semaphore: %d\n", kern_res
);
573 res
= EINVAL
; /* Need better error here? */
576 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
580 if (kern_res
!= KERN_SUCCESS
)
582 printf("Can't create 'joiners' semaphore: %d\n", kern_res
);
583 res
= EINVAL
; /* Need better error here? */
589 t
->death
= MACH_PORT_NULL
;
596 _pthread_is_threaded(void)
598 return __is_threaded
;
602 pthread_mach_thread_np(pthread_t t
)
604 return t
->kernel_thread
;
608 pthread_get_stacksize_np(pthread_t t
)
614 pthread_get_stackaddr_np(pthread_t t
)
620 _pthread_reply_port(pthread_t t
)
622 return t
->reply_port
;
626 _pthread_create_suspended(pthread_t
*thread
,
627 const pthread_attr_t
*attr
,
628 void *(*start_routine
)(void *),
632 pthread_attr_t _attr
, *attrs
;
636 kern_return_t kern_res
;
637 mach_port_t kernel_thread
;
638 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
639 { /* Set up default paramters */
641 pthread_attr_init(attrs
);
642 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
648 /* Allocate a stack for the thread */
649 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
652 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
654 /* Create the Mach thread for this thread */
655 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
656 if (kern_res
!= KERN_SUCCESS
)
658 printf("Can't create thread: %d\n", kern_res
);
659 res
= EINVAL
; /* Need better error here? */
662 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
667 t
->fun
= start_routine
;
668 /* Now set it up to execute */
669 _pthread_setup(t
, _pthread_body
, stack
);
670 /* Send it on it's way */
671 set_malloc_singlethreaded(0);
673 if (suspended
== 0) {
674 PTHREAD_MACH_CALL(thread_resume(kernel_thread
), kern_res
);
676 if (kern_res
!= KERN_SUCCESS
)
678 printf("Can't resume thread: %d\n", kern_res
);
679 res
= EINVAL
; /* Need better error here? */
687 pthread_create(pthread_t
*thread
,
688 const pthread_attr_t
*attr
,
689 void *(*start_routine
)(void *),
692 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
696 pthread_create_suspended_np(pthread_t
*thread
,
697 const pthread_attr_t
*attr
,
698 void *(*start_routine
)(void *),
701 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
705 * Make a thread 'undetached' - no longer 'joinable' with other threads.
708 pthread_detach(pthread_t thread
)
710 kern_return_t kern_res
;
713 if (thread
->sig
== _PTHREAD_SIG
)
716 if (thread
->detached
== PTHREAD_CREATE_JOINABLE
)
718 thread
->detached
= PTHREAD_CREATE_DETACHED
;
719 num_joiners
= thread
->num_joiners
;
720 death
= thread
->death
;
721 thread
->death
= MACH_PORT_NULL
;
722 UNLOCK(thread
->lock
);
724 { /* Have to tell these guys this thread can't be joined with */
726 PTHREAD_MACH_CALL(semaphore_signal_all(thread
->joiners
), kern_res
);
728 /* Destroy 'control' semaphores */
729 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
730 thread
->joiners
), kern_res
);
731 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
736 UNLOCK(thread
->lock
);
741 return (ESRCH
); /* Not a valid thread */
745 /* Announce that there is a thread ready to be reclaimed for pthread_create */
746 /* or terminated by pthread_exit. If the thread is reused, it will have its */
747 /* thread state set and will continue in the thread body function. If it is */
748 /* terminated, it will be yanked out from under the mach_msg() call. */
750 static void _pthread_become_available(pthread_t thread
) {
751 recycle_msg_t msg
= { { 0 } };
754 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
755 msg
.header
.msgh_remote_port
= thread_recycle_port
;
756 msg
.header
.msgh_local_port
= MACH_PORT_NULL
;
757 msg
.header
.msgh_id
= (int)thread
;
758 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0);
759 ret
= mach_msg(&msg
.header
, MACH_SEND_MSG
, msg
.header
.msgh_size
, 0,
760 MACH_PORT_NULL
, MACH_MSG_TIMEOUT_NONE
,
763 ret
= thread_suspend(thread
->kernel_thread
);
765 /* We should never get here */
768 /* Check to see if any threads are available. Return immediately */
770 static kern_return_t
_pthread_check_for_available_threads(recycle_msg_t
*msg
) {
771 return mach_msg(&msg
->header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
772 sizeof(recycle_msg_t
), thread_recycle_port
, 0,
776 /* Terminate all available threads and deallocate their stacks */
777 static void _pthread_reap_threads(void) {
779 recycle_msg_t msg
= { { 0 } };
780 while(_pthread_check_for_available_threads(&msg
) == KERN_SUCCESS
) {
781 pthread_t th
= (pthread_t
)msg
.header
.msgh_id
;
782 mach_port_t kernel_thread
= th
->kernel_thread
;
783 mach_port_t reply_port
= th
->reply_port
;
784 vm_size_t size
= (vm_size_t
)th
->stacksize
+ vm_page_size
;
785 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
786 #if !defined(STACK_GROWS_UP)
789 ret
= thread_terminate(kernel_thread
);
790 if (ret
!= KERN_SUCCESS
) {
791 fprintf(stderr
, "thread_terminate() failed: %s\n",
792 mach_error_string(ret
));
794 ret
= mach_port_destroy(mach_task_self(), reply_port
);
795 if (ret
!= KERN_SUCCESS
) {
797 "mach_port_destroy(thread_reply) failed: %s\n",
798 mach_error_string(ret
));
800 if (th
->freeStackOnExit
) {
801 ret
= vm_deallocate(mach_task_self(), addr
, size
);
802 if (ret
!= KERN_SUCCESS
) {
804 "vm_deallocate(stack) failed: %s\n",
805 mach_error_string(ret
));
817 return (void *)((unsigned)&dummy
& ~ (PTHREAD_STACK_MIN
- 1));
820 extern pthread_t
_pthread_self(void);
825 void * myStack
= (void *)0;
826 pthread_t cachedThread
= _cachedThread
;
828 myStack
= stackAddress();
829 if ((void *)((unsigned)(cachedThread
->stackaddr
- 1) & ~ (PTHREAD_STACK_MIN
- 1)) == myStack
) {
833 _cachedThread
= _pthread_self();
834 return _cachedThread
;
838 * Terminate a thread.
841 pthread_exit(void *value_ptr
)
843 pthread_t self
= pthread_self();
844 struct _pthread_handler_rec
*handler
;
845 kern_return_t kern_res
;
847 _clear_thread_cache();
848 while ((handler
= self
->cleanup_stack
) != 0)
850 (handler
->routine
)(handler
->arg
);
851 self
->cleanup_stack
= handler
->next
;
853 _pthread_tsd_cleanup(self
);
855 if (self
->detached
== PTHREAD_CREATE_JOINABLE
)
857 self
->detached
= _PTHREAD_EXITED
;
858 self
->exit_value
= value_ptr
;
859 num_joiners
= self
->num_joiners
;
864 PTHREAD_MACH_CALL(semaphore_signal_all(self
->joiners
), kern_res
);
866 PTHREAD_MACH_CALL(semaphore_wait(self
->death
), kern_res
);
869 /* Destroy thread & reclaim resources */
872 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self
->joiners
), kern_res
);
873 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self
->death
), kern_res
);
875 if (self
->detached
== _PTHREAD_CREATE_PARENT
) {
876 exit((int)(self
->exit_value
));
879 _pthread_reap_threads();
881 _pthread_become_available(self
);
885 * Wait for a thread to terminate and obtain its exit value.
888 pthread_join(pthread_t thread
,
891 kern_return_t kern_res
;
892 if (thread
->sig
== _PTHREAD_SIG
)
895 if (thread
->detached
== PTHREAD_CREATE_JOINABLE
)
897 thread
->num_joiners
++;
898 UNLOCK(thread
->lock
);
899 PTHREAD_MACH_CALL(semaphore_wait(thread
->joiners
), kern_res
);
901 thread
->num_joiners
--;
903 if (thread
->detached
== _PTHREAD_EXITED
)
905 if (thread
->num_joiners
== 0)
906 { /* Give the result to this thread */
909 *value_ptr
= thread
->exit_value
;
911 UNLOCK(thread
->lock
);
913 PTHREAD_MACH_CALL(semaphore_signal(thread
->death
), kern_res
);
916 { /* This 'joiner' missed the catch! */
917 UNLOCK(thread
->lock
);
921 { /* The thread has become anti-social! */
922 UNLOCK(thread
->lock
);
927 return (ESRCH
); /* Not a valid thread */
932 * Get the scheduling policy and scheduling paramters for a thread.
935 pthread_getschedparam(pthread_t thread
,
937 struct sched_param
*param
)
939 if (thread
->sig
== _PTHREAD_SIG
)
941 *policy
= thread
->policy
;
942 *param
= thread
->param
;
946 return (ESRCH
); /* Not a valid thread structure */
951 * Set the scheduling policy and scheduling paramters for a thread.
954 pthread_setschedparam(pthread_t thread
,
956 const struct sched_param
*param
)
958 policy_base_data_t bases
;
960 mach_msg_type_number_t count
;
963 if (thread
->sig
== _PTHREAD_SIG
)
968 bases
.ts
.base_priority
= param
->sched_priority
;
969 base
= (policy_base_t
)&bases
.ts
;
970 count
= POLICY_TIMESHARE_BASE_COUNT
;
973 bases
.fifo
.base_priority
= param
->sched_priority
;
974 base
= (policy_base_t
)&bases
.fifo
;
975 count
= POLICY_FIFO_BASE_COUNT
;
978 bases
.rr
.base_priority
= param
->sched_priority
;
979 /* quantum isn't public yet */
980 bases
.rr
.quantum
= param
->quantum
;
981 base
= (policy_base_t
)&bases
.rr
;
982 count
= POLICY_RR_BASE_COUNT
;
987 thread
->policy
= policy
;
988 thread
->param
= *param
;
989 ret
= thread_policy(thread
->kernel_thread
, policy
, base
, count
, TRUE
);
990 if (ret
!= KERN_SUCCESS
)
997 return (ESRCH
); /* Not a valid thread structure */
1002 * Get the minimum priority for the given policy
1005 sched_get_priority_min(int policy
)
1007 return default_priority
- 16;
1011 * Get the maximum priority for the given policy
1014 sched_get_priority_max(int policy
)
1016 return default_priority
+ 16;
1020 * Determine if two thread identifiers represent the same thread.
1023 pthread_equal(pthread_t t1
,
1030 cthread_set_self(void *cself
)
1032 pthread_t self
= pthread_self();
1033 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1034 _pthread_set_self(cself
);
1037 self
->cthread_self
= cself
;
1041 ur_cthread_self(void) {
1042 pthread_t self
= pthread_self();
1043 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1044 return (void *)self
;
1046 return self
->cthread_self
;
1050 * Execute a function exactly one time in a thread-safe fashion.
1053 pthread_once(pthread_once_t
*once_control
,
1054 void (*init_routine
)(void))
1056 LOCK(once_control
->lock
);
1057 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1060 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1062 UNLOCK(once_control
->lock
);
1063 return (ESUCCESS
); /* Spec defines no possible errors! */
1070 pthread_cancel(pthread_t thread
)
1072 if (thread
->sig
== _PTHREAD_SIG
)
1074 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1083 * Insert a cancellation point in a thread.
1086 _pthread_testcancel(pthread_t thread
)
1089 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1090 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1092 UNLOCK(thread
->lock
);
1095 UNLOCK(thread
->lock
);
1099 pthread_testcancel(void)
1101 pthread_t self
= pthread_self();
1102 _pthread_testcancel(self
);
1106 * Query/update the cancelability 'state' of a thread
1109 pthread_setcancelstate(int state
, int *oldstate
)
1111 pthread_t self
= pthread_self();
1114 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1115 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1117 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1123 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1128 * Query/update the cancelability 'type' of a thread
1131 pthread_setcanceltype(int type
, int *oldtype
)
1133 pthread_t self
= pthread_self();
1136 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
1137 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1139 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1145 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1150 * Perform package initialization - called automatically when application starts
1153 /* We'll implement this when the main thread is a pthread */
1154 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
1156 static struct _pthread _thread
= {0};
1161 pthread_attr_t _attr
, *attrs
;
1164 host_basic_info_data_t basic_info
;
1165 host_priority_info_data_t priority_info
;
1167 host_flavor_t flavor
;
1168 mach_msg_type_number_t count
;
1171 int hasvectorunit
, numcpus
;
1173 count
= HOST_PRIORITY_INFO_COUNT
;
1174 info
= (host_info_t
)&priority_info
;
1175 flavor
= HOST_PRIORITY_INFO
;
1176 kr
= host_info(mach_host_self(), flavor
, info
, &count
);
1177 if (kr
!= KERN_SUCCESS
)
1178 printf("host_info failed (%d); probably need privilege.\n", kr
);
1180 default_priority
= priority_info
.user_priority
;
1181 min_priority
= priority_info
.minimum_priority
;
1182 max_priority
= priority_info
.maximum_priority
;
1185 pthread_attr_init(attrs
);
1186 _clear_thread_cache();
1187 _pthread_set_self(&_thread
);
1189 _pthread_create(&_thread
, attrs
, USRSTACK
, mach_thread_self());
1190 thread
= (pthread_t
)malloc(sizeof(struct _pthread
));
1191 memcpy(thread
, &_thread
, sizeof(struct _pthread
));
1192 _clear_thread_cache();
1193 _pthread_set_self(thread
);
1194 thread
->detached
= _PTHREAD_CREATE_PARENT
;
1196 /* See if we're on a multiprocessor and set _spin_tries if so. */
1199 len
= sizeof(numcpus
);
1200 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1202 _spin_tries
= SPIN_TRIES
;
1205 count
= HOST_BASIC_INFO_COUNT
;
1206 info
= (host_info_t
)&basic_info
;
1207 flavor
= HOST_BASIC_INFO
;
1208 kr
= host_info(mach_host_self(), flavor
, info
, &count
);
1209 if (kr
!= KERN_SUCCESS
)
1210 printf("host_info failed (%d)\n", kr
);
1212 if (basic_info
.avail_cpus
> 1)
1213 _spin_tries
= SPIN_TRIES
;
1214 /* This is a crude test */
1215 if (basic_info
.cpu_subtype
>= CPU_SUBTYPE_POWERPC_7400
)
1216 _cpu_has_altivec
= 1;
1220 mib
[1] = HW_VECTORUNIT
;
1221 len
= sizeof(hasvectorunit
);
1222 if (sysctl(mib
, 2, &hasvectorunit
, &len
, NULL
, 0) == 0) {
1223 _cpu_has_altivec
= hasvectorunit
;
1225 mig_init(1); /* enable multi-threaded mig interfaces */
1229 int sched_yield(void)
1235 /* This is the "magic" that gets the initialization routine called when the application starts */
1236 int (*_cthread_init_routine
)(void) = pthread_init
;
1238 /* Get a semaphore from the pool, growing it if necessary */
1240 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1245 LOCK(sem_pool_lock
);
1246 if (sem_pool_current
== sem_pool_count
) {
1247 sem_pool_count
+= 16;
1248 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1249 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1250 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1253 sem
= sem_pool
[sem_pool_current
++];
1254 UNLOCK(sem_pool_lock
);
1258 /* Put a semaphore back into the pool */
1259 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1260 LOCK(sem_pool_lock
);
1261 sem_pool
[--sem_pool_current
] = sem
;
1262 UNLOCK(sem_pool_lock
);
1265 static void sem_pool_reset(void) {
1266 LOCK(sem_pool_lock
);
1268 sem_pool_current
= 0;
1270 UNLOCK(sem_pool_lock
);
1273 __private_extern__
void _pthread_fork_child(void) {
1274 /* Just in case somebody had it locked... */
1275 UNLOCK(sem_pool_lock
);