2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 * POSIX Pthread Library
32 #include <stdio.h> /* For printf(). */
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <machine/vmparam.h>
39 #include <mach/vm_statistics.h>
41 #include "pthread_internals.h"
43 /* Per-thread kernel support */
44 extern void _pthread_set_self(pthread_t
);
45 extern void mig_init(int);
47 /* Needed to tell the malloc subsystem we're going multithreaded */
48 extern void set_malloc_singlethreaded(int);
50 /* Used when we need to call into the kernel with no reply port */
51 extern pthread_lock_t reply_port_lock
;
54 * [Internal] stack support
57 size_t _pthread_stack_size
= 0;
60 int _cpu_has_altivec
= 0;
63 /* This global should be used (carefully) by anyone needing to know if a pthread has been
66 int __is_threaded
= 0;
68 /* These are used to keep track of a semaphore pool shared by mutexes and condition
72 static semaphore_t
*sem_pool
= NULL
;
73 static int sem_pool_count
= 0;
74 static int sem_pool_current
= 0;
75 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
77 static int default_priority
;
78 static int max_priority
;
79 static int min_priority
;
81 extern mach_port_t thread_recycle_port
;
83 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
84 #define STACK_RESERVED (sizeof (struct _pthread))
88 /* The stack grows towards higher addresses:
89 |struct _pthread|user stack---------------->|
90 ^STACK_BASE ^STACK_START
93 #define STACK_BASE(sp) STACK_LOWEST(sp)
94 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
95 #define STACK_SELF(sp) STACK_BASE(sp)
99 /* The stack grows towards lower addresses:
100 |<----------------user stack|struct _pthread|
101 ^STACK_LOWEST ^STACK_START ^STACK_BASE
104 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
105 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
106 #define STACK_SELF(sp) STACK_START(sp)
110 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
113 _pthread_allocate_stack(pthread_attr_t
*attrs
, vm_address_t
*stack
)
117 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
118 if (attrs
->stackaddr
!= NULL
) {
119 assert(((vm_offset_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
120 *stack
= (vm_address_t
)attrs
->stackaddr
;
123 kr
= vm_allocate(mach_task_self(), stack
, attrs
->stacksize
+ vm_page_size
, VM_MAKE_TAG(VM_MEMORY_STACK
)| TRUE
);
124 if (kr
!= KERN_SUCCESS
) {
127 #ifdef STACK_GROWS_UP
128 /* The guard page is the page one higher than the stack */
129 /* The stack base is at the lowest address */
130 kr
= vm_protect(mach_task_self(), *stack
+ attrs
->stacksize
, vm_page_size
, FALSE
, VM_PROT_NONE
);
132 /* The guard page is at the lowest address */
133 /* The stack base is the highest address */
134 kr
= vm_protect(mach_task_self(), *stack
, vm_page_size
, FALSE
, VM_PROT_NONE
);
135 *stack
+= attrs
->stacksize
+ vm_page_size
;
139 vm_address_t cur_stack
= (vm_address_t
)0;
140 if (free_stacks
== 0)
142 /* Allocating guard pages is done by doubling
143 * the actual stack size, since STACK_BASE() needs
144 * to have stacks aligned on stack_size. Allocating just
145 * one page takes as much memory as allocating more pages
146 * since it will remain one entry in the vm map.
147 * Besides, allocating more than one page allows tracking the
148 * overflow pattern when the overflow is bigger than one page.
150 #ifndef NO_GUARD_PAGES
151 # define GUARD_SIZE(a) (2*(a))
152 # define GUARD_MASK(a) (((a)<<1) | 1)
154 # define GUARD_SIZE(a) (a)
155 # define GUARD_MASK(a) (a)
157 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
159 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
160 /* Ensure stack is there */
161 kr
= vm_allocate(mach_task_self(),
163 GUARD_SIZE(__pthread_stack_size
),
165 #ifndef NO_GUARD_PAGES
166 if (kr
== KERN_SUCCESS
) {
167 # ifdef STACK_GROWS_UP
168 kr
= vm_protect(mach_task_self(),
169 lowest_stack
+__pthread_stack_size
,
170 __pthread_stack_size
,
171 FALSE
, VM_PROT_NONE
);
172 # else /* STACK_GROWS_UP */
173 kr
= vm_protect(mach_task_self(),
175 __pthread_stack_size
,
176 FALSE
, VM_PROT_NONE
);
177 lowest_stack
+= __pthread_stack_size
;
178 # endif /* STACK_GROWS_UP */
179 if (kr
== KERN_SUCCESS
)
183 if (kr
== KERN_SUCCESS
)
187 if (lowest_stack
> 0)
188 free_stacks
= (vm_address_t
*)lowest_stack
;
191 /* Too bad. We'll just have to take what comes.
192 Use vm_map instead of vm_allocate so we can
193 specify alignment. */
194 kr
= vm_map(mach_task_self(), &lowest_stack
,
195 GUARD_SIZE(__pthread_stack_size
),
196 GUARD_MASK(__pthread_stack_mask
),
197 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
198 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
200 /* This really shouldn't fail and if it does I don't
202 #ifndef NO_GUARD_PAGES
203 if (kr
== KERN_SUCCESS
) {
204 # ifdef STACK_GROWS_UP
205 kr
= vm_protect(mach_task_self(),
206 lowest_stack
+__pthread_stack_size
,
207 __pthread_stack_size
,
208 FALSE
, VM_PROT_NONE
);
209 # else /* STACK_GROWS_UP */
210 kr
= vm_protect(mach_task_self(),
212 __pthread_stack_size
,
213 FALSE
, VM_PROT_NONE
);
214 lowest_stack
+= __pthread_stack_size
;
215 # endif /* STACK_GROWS_UP */
218 free_stacks
= (vm_address_t
*)lowest_stack
;
221 *free_stacks
= 0; /* No other free stacks */
223 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
224 free_stacks
= (vm_address_t
*)*free_stacks
;
225 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
231 * Destroy a thread attribute structure
234 pthread_attr_destroy(pthread_attr_t
*attr
)
236 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
241 return (EINVAL
); /* Not an attribute structure! */
246 * Get the 'detach' state from a thread attribute structure.
247 * Note: written as a helper function for info hiding
250 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
253 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
255 *detachstate
= attr
->detached
;
259 return (EINVAL
); /* Not an attribute structure! */
264 * Get the 'inherit scheduling' info from a thread attribute structure.
265 * Note: written as a helper function for info hiding
268 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
271 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
273 *inheritsched
= attr
->inherit
;
277 return (EINVAL
); /* Not an attribute structure! */
282 * Get the scheduling parameters from a thread attribute structure.
283 * Note: written as a helper function for info hiding
286 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
287 struct sched_param
*param
)
289 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
291 *param
= attr
->param
;
295 return (EINVAL
); /* Not an attribute structure! */
300 * Get the scheduling policy from a thread attribute structure.
301 * Note: written as a helper function for info hiding
304 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
307 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
309 *policy
= attr
->policy
;
313 return (EINVAL
); /* Not an attribute structure! */
317 static const size_t DEFAULT_STACK_SIZE
= DFLSSIZ
;
319 * Initialize a thread attribute structure to default values.
322 pthread_attr_init(pthread_attr_t
*attr
)
324 attr
->stacksize
= DEFAULT_STACK_SIZE
;
325 attr
->stackaddr
= NULL
;
326 attr
->sig
= _PTHREAD_ATTR_SIG
;
327 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
328 attr
->param
.sched_priority
= default_priority
;
329 attr
->param
.quantum
= 10; /* quantum isn't public yet */
330 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
331 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
332 attr
->freeStackOnExit
= TRUE
;
337 * Set the 'detach' state in a thread attribute structure.
338 * Note: written as a helper function for info hiding
341 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
344 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
346 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
347 (detachstate
== PTHREAD_CREATE_DETACHED
))
349 attr
->detached
= detachstate
;
357 return (EINVAL
); /* Not an attribute structure! */
362 * Set the 'inherit scheduling' state in a thread attribute structure.
363 * Note: written as a helper function for info hiding
366 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
369 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
371 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
372 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
374 attr
->inherit
= inheritsched
;
382 return (EINVAL
); /* Not an attribute structure! */
387 * Set the scheduling paramters in a thread attribute structure.
388 * Note: written as a helper function for info hiding
391 pthread_attr_setschedparam(pthread_attr_t
*attr
,
392 const struct sched_param
*param
)
394 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
396 /* TODO: Validate sched_param fields */
397 attr
->param
= *param
;
401 return (EINVAL
); /* Not an attribute structure! */
406 * Set the scheduling policy in a thread attribute structure.
407 * Note: written as a helper function for info hiding
410 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
413 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
415 if ((policy
== SCHED_OTHER
) ||
416 (policy
== SCHED_RR
) ||
417 (policy
== SCHED_FIFO
))
419 attr
->policy
= policy
;
427 return (EINVAL
); /* Not an attribute structure! */
432 * Set the scope for the thread.
433 * We currently only provide PTHREAD_SCOPE_SYSTEM
436 pthread_attr_setscope(pthread_attr_t
*attr
,
439 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
440 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
441 /* No attribute yet for the scope */
443 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
447 return (EINVAL
); /* Not an attribute structure! */
451 * Get the scope for the thread.
452 * We currently only provide PTHREAD_SCOPE_SYSTEM
455 pthread_attr_getscope(pthread_attr_t
*attr
,
458 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
459 *scope
= PTHREAD_SCOPE_SYSTEM
;
462 return (EINVAL
); /* Not an attribute structure! */
465 /* Get the base stack address of the given thread */
467 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
469 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
470 *stackaddr
= attr
->stackaddr
;
473 return (EINVAL
); /* Not an attribute structure! */
478 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
480 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
481 attr
->stackaddr
= stackaddr
;
482 attr
->freeStackOnExit
= FALSE
;
485 return (EINVAL
); /* Not an attribute structure! */
490 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
492 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
493 *stacksize
= attr
->stacksize
;
496 return (EINVAL
); /* Not an attribute structure! */
501 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
503 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
504 attr
->stacksize
= stacksize
;
507 return (EINVAL
); /* Not an attribute structure! */
512 * Create and start execution of a new thread.
516 _pthread_body(pthread_t self
)
518 _pthread_set_self(self
);
519 pthread_exit((self
->fun
)(self
->arg
));
523 _pthread_create(pthread_t t
,
524 const pthread_attr_t
*attrs
,
526 const mach_port_t kernel_thread
)
529 kern_return_t kern_res
;
533 memset(t
, 0, sizeof(*t
));
534 t
->stacksize
= attrs
->stacksize
;
535 t
->stackaddr
= (void *)stack
;
536 t
->kernel_thread
= kernel_thread
;
537 t
->detached
= attrs
->detached
;
538 t
->inherit
= attrs
->inherit
;
539 t
->policy
= attrs
->policy
;
540 t
->param
= attrs
->param
;
541 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
542 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
543 t
->sig
= _PTHREAD_SIG
;
544 t
->reply_port
= MACH_PORT_NULL
;
545 t
->cthread_self
= NULL
;
547 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
548 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
549 pthread_setschedparam(t
, t
->policy
, &t
->param
);
550 /* Create control semaphores */
551 if (t
->detached
== PTHREAD_CREATE_JOINABLE
)
553 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
557 if (kern_res
!= KERN_SUCCESS
)
559 printf("Can't create 'death' semaphore: %d\n", kern_res
);
560 res
= EINVAL
; /* Need better error here? */
563 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(),
567 if (kern_res
!= KERN_SUCCESS
)
569 printf("Can't create 'joiners' semaphore: %d\n", kern_res
);
570 res
= EINVAL
; /* Need better error here? */
576 t
->death
= MACH_PORT_NULL
;
583 _pthread_is_threaded(void)
585 return __is_threaded
;
589 pthread_mach_thread_np(pthread_t t
)
591 return t
->kernel_thread
;
595 pthread_get_stacksize_np(pthread_t t
)
601 pthread_get_stackaddr_np(pthread_t t
)
607 _pthread_reply_port(pthread_t t
)
609 return t
->reply_port
;
613 _pthread_create_suspended(pthread_t
*thread
,
614 const pthread_attr_t
*attr
,
615 void *(*start_routine
)(void *),
619 pthread_attr_t _attr
, *attrs
;
623 kern_return_t kern_res
;
624 mach_port_t kernel_thread
;
625 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
626 { /* Set up default paramters */
628 pthread_attr_init(attrs
);
629 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
635 /* Allocate a stack for the thread */
636 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
639 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
641 /* Create the Mach thread for this thread */
642 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
643 if (kern_res
!= KERN_SUCCESS
)
645 printf("Can't create thread: %d\n", kern_res
);
646 res
= EINVAL
; /* Need better error here? */
649 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
654 t
->fun
= start_routine
;
655 /* Now set it up to execute */
656 _pthread_setup(t
, _pthread_body
, stack
);
657 /* Send it on it's way */
658 set_malloc_singlethreaded(0);
660 if (suspended
== 0) {
661 PTHREAD_MACH_CALL(thread_resume(kernel_thread
), kern_res
);
663 if (kern_res
!= KERN_SUCCESS
)
665 printf("Can't resume thread: %d\n", kern_res
);
666 res
= EINVAL
; /* Need better error here? */
674 pthread_create(pthread_t
*thread
,
675 const pthread_attr_t
*attr
,
676 void *(*start_routine
)(void *),
679 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
683 pthread_create_suspended_np(pthread_t
*thread
,
684 const pthread_attr_t
*attr
,
685 void *(*start_routine
)(void *),
688 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
692 * Make a thread 'undetached' - no longer 'joinable' with other threads.
695 pthread_detach(pthread_t thread
)
697 kern_return_t kern_res
;
700 if (thread
->sig
== _PTHREAD_SIG
)
703 if (thread
->detached
== PTHREAD_CREATE_JOINABLE
)
705 thread
->detached
= PTHREAD_CREATE_DETACHED
;
706 num_joiners
= thread
->num_joiners
;
707 death
= thread
->death
;
708 thread
->death
= MACH_PORT_NULL
;
709 UNLOCK(thread
->lock
);
712 /* Wake up a joiner */
713 PTHREAD_MACH_CALL(semaphore_signal(thread
->joiners
), kern_res
);
715 /* Destroy 'control' semaphores */
716 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
717 thread
->joiners
), kern_res
);
718 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(),
721 } else if (thread
->detached
== _PTHREAD_EXITED
) {
722 UNLOCK(thread
->lock
);
723 pthread_join(thread
, NULL
);
727 UNLOCK(thread
->lock
);
732 return (ESRCH
); /* Not a valid thread */
736 /* Announce that there is a thread ready to be reclaimed for pthread_create */
737 /* or terminated by pthread_exit. If the thread is reused, it will have its */
738 /* thread state set and will continue in the thread body function. If it is */
739 /* terminated, it will be yanked out from under the mach_msg() call. */
741 static void _pthread_become_available(pthread_t thread
) {
742 mach_msg_empty_rcv_t msg
= { { 0 } };
745 if (thread
->reply_port
== MACH_PORT_NULL
) {
746 thread
->reply_port
= mach_reply_port();
748 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
749 msg
.header
.msgh_remote_port
= thread_recycle_port
;
750 msg
.header
.msgh_local_port
= MACH_PORT_NULL
;
751 msg
.header
.msgh_id
= (int)thread
;
752 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0);
753 ret
= mach_msg(&msg
.header
, MACH_SEND_MSG
| MACH_RCV_MSG
,
754 msg
.header
.msgh_size
, sizeof msg
,
755 thread
->reply_port
, MACH_MSG_TIMEOUT_NONE
,
758 ret
= thread_suspend(thread
->kernel_thread
);
760 /* We should never get here */
763 /* Check to see if any threads are available. Return immediately */
765 static kern_return_t
_pthread_check_for_available_threads(mach_msg_empty_rcv_t
*msg
) {
766 return mach_msg(&msg
->header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
767 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
, 0,
771 /* Terminate all available threads and deallocate their stacks */
772 static void _pthread_reap_threads(void) {
774 mach_msg_empty_rcv_t msg
= { { 0 } };
775 while((ret
= _pthread_check_for_available_threads(&msg
)) == KERN_SUCCESS
) {
776 pthread_t th
= (pthread_t
)msg
.header
.msgh_id
;
777 mach_port_t kernel_thread
= th
->kernel_thread
;
778 mach_port_t reply_port
= th
->reply_port
;
779 vm_size_t size
= (vm_size_t
)th
->stacksize
+ vm_page_size
;
780 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
781 #if !defined(STACK_GROWS_UP)
784 ret
= thread_terminate(kernel_thread
);
785 if (ret
!= KERN_SUCCESS
) {
786 fprintf(stderr
, "thread_terminate() failed: %s\n",
787 mach_error_string(ret
));
789 ret
= mach_port_destroy(mach_task_self(), reply_port
);
790 if (ret
!= KERN_SUCCESS
) {
792 "mach_port_destroy(thread_reply) failed: %s\n",
793 mach_error_string(ret
));
795 if (th
->freeStackOnExit
) {
796 ret
= vm_deallocate(mach_task_self(), addr
, size
);
797 if (ret
!= KERN_SUCCESS
) {
799 "vm_deallocate(stack) failed: %s\n",
800 mach_error_string(ret
));
805 assert(ret
== MACH_RCV_TIMED_OUT
);
808 /* For compatibility... */
812 return pthread_self();
816 * Terminate a thread.
819 pthread_exit(void *value_ptr
)
821 pthread_t self
= pthread_self();
822 struct _pthread_handler_rec
*handler
;
823 kern_return_t kern_res
;
825 while ((handler
= self
->cleanup_stack
) != 0)
827 (handler
->routine
)(handler
->arg
);
828 self
->cleanup_stack
= handler
->next
;
830 _pthread_tsd_cleanup(self
);
832 if (self
->detached
== PTHREAD_CREATE_JOINABLE
)
834 self
->detached
= _PTHREAD_EXITED
;
835 self
->exit_value
= value_ptr
;
836 num_joiners
= self
->num_joiners
;
840 /* POSIX says that multiple pthread_join() calls on */
841 /* the same thread are undefined so we just wake up */
842 /* the first one to join */
843 PTHREAD_MACH_CALL(semaphore_signal(self
->joiners
), kern_res
);
846 PTHREAD_MACH_CALL(semaphore_wait(self
->death
), kern_res
);
847 } while (kern_res
== KERN_ABORTED
);
850 /* Destroy thread & reclaim resources */
853 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self
->joiners
), kern_res
);
854 PTHREAD_MACH_CALL(semaphore_destroy(mach_task_self(), self
->death
), kern_res
);
856 if (self
->detached
== _PTHREAD_CREATE_PARENT
) {
857 exit((int)(self
->exit_value
));
860 _pthread_reap_threads();
862 _pthread_become_available(self
);
866 * Wait for a thread to terminate and obtain its exit value.
869 pthread_join(pthread_t thread
,
872 kern_return_t kern_res
;
873 if (thread
->sig
== _PTHREAD_SIG
)
876 if (thread
->detached
== PTHREAD_CREATE_JOINABLE
)
878 thread
->num_joiners
++;
879 UNLOCK(thread
->lock
);
881 PTHREAD_MACH_CALL(semaphore_wait(thread
->joiners
), kern_res
);
882 } while (kern_res
== KERN_ABORTED
);
884 thread
->num_joiners
--;
886 if (thread
->detached
== _PTHREAD_EXITED
)
888 if (thread
->num_joiners
== 0)
889 { /* Give the result to this thread */
892 *value_ptr
= thread
->exit_value
;
894 UNLOCK(thread
->lock
);
895 PTHREAD_MACH_CALL(semaphore_signal(thread
->death
), kern_res
);
898 { /* This 'joiner' missed the catch! */
899 UNLOCK(thread
->lock
);
903 { /* The thread has become anti-social! */
904 UNLOCK(thread
->lock
);
909 return (ESRCH
); /* Not a valid thread */
914 * Get the scheduling policy and scheduling paramters for a thread.
917 pthread_getschedparam(pthread_t thread
,
919 struct sched_param
*param
)
921 if (thread
->sig
== _PTHREAD_SIG
)
923 *policy
= thread
->policy
;
924 *param
= thread
->param
;
928 return (ESRCH
); /* Not a valid thread structure */
933 * Set the scheduling policy and scheduling paramters for a thread.
936 pthread_setschedparam(pthread_t thread
,
938 const struct sched_param
*param
)
940 policy_base_data_t bases
;
942 mach_msg_type_number_t count
;
945 if (thread
->sig
== _PTHREAD_SIG
)
950 bases
.ts
.base_priority
= param
->sched_priority
;
951 base
= (policy_base_t
)&bases
.ts
;
952 count
= POLICY_TIMESHARE_BASE_COUNT
;
955 bases
.fifo
.base_priority
= param
->sched_priority
;
956 base
= (policy_base_t
)&bases
.fifo
;
957 count
= POLICY_FIFO_BASE_COUNT
;
960 bases
.rr
.base_priority
= param
->sched_priority
;
961 /* quantum isn't public yet */
962 bases
.rr
.quantum
= param
->quantum
;
963 base
= (policy_base_t
)&bases
.rr
;
964 count
= POLICY_RR_BASE_COUNT
;
969 thread
->policy
= policy
;
970 thread
->param
= *param
;
971 ret
= thread_policy(thread
->kernel_thread
, policy
, base
, count
, TRUE
);
972 if (ret
!= KERN_SUCCESS
)
979 return (ESRCH
); /* Not a valid thread structure */
984 * Get the minimum priority for the given policy
987 sched_get_priority_min(int policy
)
989 return default_priority
- 16;
993 * Get the maximum priority for the given policy
996 sched_get_priority_max(int policy
)
998 return default_priority
+ 16;
1002 * Determine if two thread identifiers represent the same thread.
1005 pthread_equal(pthread_t t1
,
1012 cthread_set_self(void *cself
)
1014 pthread_t self
= pthread_self();
1015 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1016 _pthread_set_self(cself
);
1019 self
->cthread_self
= cself
;
1023 ur_cthread_self(void) {
1024 pthread_t self
= pthread_self();
1025 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1026 return (void *)self
;
1028 return self
->cthread_self
;
1032 * Execute a function exactly one time in a thread-safe fashion.
1035 pthread_once(pthread_once_t
*once_control
,
1036 void (*init_routine
)(void))
1038 LOCK(once_control
->lock
);
1039 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1042 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1044 UNLOCK(once_control
->lock
);
1045 return (ESUCCESS
); /* Spec defines no possible errors! */
1052 pthread_cancel(pthread_t thread
)
1054 if (thread
->sig
== _PTHREAD_SIG
)
1056 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1065 * Insert a cancellation point in a thread.
1068 _pthread_testcancel(pthread_t thread
)
1071 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1072 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1074 UNLOCK(thread
->lock
);
1077 UNLOCK(thread
->lock
);
1081 pthread_testcancel(void)
1083 pthread_t self
= pthread_self();
1084 _pthread_testcancel(self
);
1088 * Query/update the cancelability 'state' of a thread
1091 pthread_setcancelstate(int state
, int *oldstate
)
1093 pthread_t self
= pthread_self();
1096 *oldstate
= self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
;
1097 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1099 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1105 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1110 * Query/update the cancelability 'type' of a thread
1113 pthread_setcanceltype(int type
, int *oldtype
)
1115 pthread_t self
= pthread_self();
1118 *oldtype
= self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
;
1119 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1121 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1127 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1132 * Perform package initialization - called automatically when application starts
1135 /* We'll implement this when the main thread is a pthread */
1136 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
1138 static struct _pthread _thread
= {0};
1143 pthread_attr_t _attr
, *attrs
;
1146 host_basic_info_data_t basic_info
;
1147 host_priority_info_data_t priority_info
;
1149 host_flavor_t flavor
;
1150 mach_msg_type_number_t count
;
1153 int hasvectorunit
, numcpus
;
1155 count
= HOST_PRIORITY_INFO_COUNT
;
1156 info
= (host_info_t
)&priority_info
;
1157 flavor
= HOST_PRIORITY_INFO
;
1158 kr
= host_info(mach_host_self(), flavor
, info
, &count
);
1159 if (kr
!= KERN_SUCCESS
)
1160 printf("host_info failed (%d); probably need privilege.\n", kr
);
1162 default_priority
= priority_info
.user_priority
;
1163 min_priority
= priority_info
.minimum_priority
;
1164 max_priority
= priority_info
.maximum_priority
;
1167 pthread_attr_init(attrs
);
1168 _pthread_set_self(&_thread
);
1170 _pthread_create(&_thread
, attrs
, USRSTACK
, mach_thread_self());
1172 thread
->detached
= _PTHREAD_CREATE_PARENT
;
1174 /* See if we're on a multiprocessor and set _spin_tries if so. */
1177 len
= sizeof(numcpus
);
1178 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1180 _spin_tries
= MP_SPIN_TRIES
;
1183 count
= HOST_BASIC_INFO_COUNT
;
1184 info
= (host_info_t
)&basic_info
;
1185 flavor
= HOST_BASIC_INFO
;
1186 kr
= host_info(mach_host_self(), flavor
, info
, &count
);
1187 if (kr
!= KERN_SUCCESS
)
1188 printf("host_info failed (%d)\n", kr
);
1190 if (basic_info
.avail_cpus
> 1)
1191 _spin_tries
= MP_SPIN_TRIES
;
1192 /* This is a crude test */
1193 if (basic_info
.cpu_subtype
>= CPU_SUBTYPE_POWERPC_7400
)
1194 _cpu_has_altivec
= 1;
1198 mib
[1] = HW_VECTORUNIT
;
1199 len
= sizeof(hasvectorunit
);
1200 if (sysctl(mib
, 2, &hasvectorunit
, &len
, NULL
, 0) == 0) {
1201 _cpu_has_altivec
= hasvectorunit
;
1203 mig_init(1); /* enable multi-threaded mig interfaces */
1207 int sched_yield(void)
1213 /* This is the "magic" that gets the initialization routine called when the application starts */
1214 int (*_cthread_init_routine
)(void) = pthread_init
;
1216 /* Get a semaphore from the pool, growing it if necessary */
1218 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1223 LOCK(sem_pool_lock
);
1224 if (sem_pool_current
== sem_pool_count
) {
1225 sem_pool_count
+= 16;
1226 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1227 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1228 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1231 sem
= sem_pool
[sem_pool_current
++];
1232 UNLOCK(sem_pool_lock
);
1236 /* Put a semaphore back into the pool */
1237 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1238 LOCK(sem_pool_lock
);
1239 sem_pool
[--sem_pool_current
] = sem
;
1240 UNLOCK(sem_pool_lock
);
1243 static void sem_pool_reset(void) {
1244 LOCK(sem_pool_lock
);
1246 sem_pool_current
= 0;
1248 UNLOCK(sem_pool_lock
);
1251 __private_extern__
void _pthread_fork_child(void) {
1252 /* Just in case somebody had it locked... */
1253 UNLOCK(sem_pool_lock
);