2 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
5 * Permission to use, copy, modify, and distribute this software and
6 * its documentation for any purpose and without fee is hereby granted,
7 * provided that the above copyright notice appears in all copies and
8 * that both the copyright notice and this permission notice appear in
9 * supporting documentation.
11 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
12 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
13 * FOR A PARTICULAR PURPOSE.
15 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
16 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
17 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
18 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
19 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
27 * POSIX Pthread Library
32 #include <stdio.h> /* For printf(). */
34 #include <errno.h> /* For __mach_errno_addr() prototype. */
36 #include <sys/resource.h>
37 #include <sys/sysctl.h>
38 #include <sys/syscall.h>
39 #include <machine/vmparam.h>
40 #include <mach/vm_statistics.h>
42 #include "pthread_internals.h"
44 /* Per-thread kernel support */
45 extern void _pthread_set_self(pthread_t
);
46 extern void mig_init(int);
48 /* Needed to tell the malloc subsystem we're going multithreaded */
49 extern void set_malloc_singlethreaded(int);
51 /* Used when we need to call into the kernel with no reply port */
52 extern pthread_lock_t reply_port_lock
;
54 /* We'll implement this when the main thread is a pthread */
55 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
56 static struct _pthread _thread
= {0};
58 /* This global should be used (carefully) by anyone needing to know if a
59 ** pthread has been created.
61 int __is_threaded
= 0;
62 static int _pthread_count
= 1;
64 static pthread_lock_t _pthread_count_lock
= LOCK_INITIALIZER
;
66 /* Same implementation as LOCK, but without the __is_threaded check */
68 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
70 int tries
= _spin_tries
;
74 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
76 } while(!_spin_lock_try(lock
));
79 /* Apparently, bcopy doesn't declare _cpu_has_altivec anymore */
80 int _cpu_has_altivec
= 0;
82 extern mach_port_t thread_recycle_port
;
84 /* These are used to keep track of a semaphore pool shared by mutexes and condition
88 static semaphore_t
*sem_pool
= NULL
;
89 static int sem_pool_count
= 0;
90 static int sem_pool_current
= 0;
91 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
93 static int default_priority
;
94 static int max_priority
;
95 static int min_priority
;
96 static int pthread_concurrency
;
99 * [Internal] stack support
101 size_t _pthread_stack_size
= 0;
102 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
103 #define STACK_RESERVED (sizeof (struct _pthread))
105 #ifdef STACK_GROWS_UP
107 /* The stack grows towards higher addresses:
108 |struct _pthread|user stack---------------->|
109 ^STACK_BASE ^STACK_START
112 #define STACK_BASE(sp) STACK_LOWEST(sp)
113 #define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED)
114 #define STACK_SELF(sp) STACK_BASE(sp)
118 /* The stack grows towards lower addresses:
119 |<----------------user stack|struct _pthread|
120 ^STACK_LOWEST ^STACK_START ^STACK_BASE
123 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
124 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
125 #define STACK_SELF(sp) STACK_START(sp)
130 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
131 #elif defined(__i386__)
132 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
134 #error Need to define a stack address hint for this architecture
137 /* Set the base address to use as the stack pointer, before adjusting due to the ABI */
140 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
144 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
145 if (attrs
->stackaddr
!= NULL
) {
146 assert(((vm_address_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
147 *stack
= attrs
->stackaddr
;
151 *((vm_address_t
*)stack
) = PTHREAD_STACK_HINT
;
152 kr
= vm_map(mach_task_self(), (vm_address_t
*)stack
,
153 attrs
->stacksize
+ vm_page_size
,
155 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
156 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
158 if (kr
!= KERN_SUCCESS
)
159 kr
= vm_allocate(mach_task_self(),
160 (vm_address_t
*)stack
, attrs
->stacksize
+ vm_page_size
,
161 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
162 if (kr
!= KERN_SUCCESS
) {
165 #ifdef STACK_GROWS_UP
166 /* The guard page is the page one higher than the stack */
167 /* The stack base is at the lowest address */
168 kr
= vm_protect(mach_task_self(), *stack
+ attrs
->stacksize
, vm_page_size
, FALSE
, VM_PROT_NONE
);
170 /* The guard page is at the lowest address */
171 /* The stack base is the highest address */
172 kr
= vm_protect(mach_task_self(), (vm_address_t
)*stack
, vm_page_size
, FALSE
, VM_PROT_NONE
);
173 *stack
+= attrs
->stacksize
+ vm_page_size
;
177 vm_address_t cur_stack
= (vm_address_t
)0;
178 if (free_stacks
== 0)
180 /* Allocating guard pages is done by doubling
181 * the actual stack size, since STACK_BASE() needs
182 * to have stacks aligned on stack_size. Allocating just
183 * one page takes as much memory as allocating more pages
184 * since it will remain one entry in the vm map.
185 * Besides, allocating more than one page allows tracking the
186 * overflow pattern when the overflow is bigger than one page.
188 #ifndef NO_GUARD_PAGES
189 # define GUARD_SIZE(a) (2*(a))
190 # define GUARD_MASK(a) (((a)<<1) | 1)
192 # define GUARD_SIZE(a) (a)
193 # define GUARD_MASK(a) (a)
195 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
197 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
198 /* Ensure stack is there */
199 kr
= vm_allocate(mach_task_self(),
201 GUARD_SIZE(__pthread_stack_size
),
203 #ifndef NO_GUARD_PAGES
204 if (kr
== KERN_SUCCESS
) {
205 # ifdef STACK_GROWS_UP
206 kr
= vm_protect(mach_task_self(),
207 lowest_stack
+__pthread_stack_size
,
208 __pthread_stack_size
,
209 FALSE
, VM_PROT_NONE
);
210 # else /* STACK_GROWS_UP */
211 kr
= vm_protect(mach_task_self(),
213 __pthread_stack_size
,
214 FALSE
, VM_PROT_NONE
);
215 lowest_stack
+= __pthread_stack_size
;
216 # endif /* STACK_GROWS_UP */
217 if (kr
== KERN_SUCCESS
)
221 if (kr
== KERN_SUCCESS
)
225 if (lowest_stack
> 0)
226 free_stacks
= (vm_address_t
*)lowest_stack
;
229 /* Too bad. We'll just have to take what comes.
230 Use vm_map instead of vm_allocate so we can
231 specify alignment. */
232 kr
= vm_map(mach_task_self(), &lowest_stack
,
233 GUARD_SIZE(__pthread_stack_size
),
234 GUARD_MASK(__pthread_stack_mask
),
235 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
236 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
238 /* This really shouldn't fail and if it does I don't
240 #ifndef NO_GUARD_PAGES
241 if (kr
== KERN_SUCCESS
) {
242 # ifdef STACK_GROWS_UP
243 kr
= vm_protect(mach_task_self(),
244 lowest_stack
+__pthread_stack_size
,
245 __pthread_stack_size
,
246 FALSE
, VM_PROT_NONE
);
247 # else /* STACK_GROWS_UP */
248 kr
= vm_protect(mach_task_self(),
250 __pthread_stack_size
,
251 FALSE
, VM_PROT_NONE
);
252 lowest_stack
+= __pthread_stack_size
;
253 # endif /* STACK_GROWS_UP */
256 free_stacks
= (vm_address_t
*)lowest_stack
;
259 *free_stacks
= 0; /* No other free stacks */
261 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
262 free_stacks
= (vm_address_t
*)*free_stacks
;
263 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
268 static pthread_attr_t _pthread_attr_default
= {0};
271 * Destroy a thread attribute structure
274 pthread_attr_destroy(pthread_attr_t
*attr
)
276 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
281 return (EINVAL
); /* Not an attribute structure! */
286 * Get the 'detach' state from a thread attribute structure.
287 * Note: written as a helper function for info hiding
290 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
293 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
295 *detachstate
= attr
->detached
;
299 return (EINVAL
); /* Not an attribute structure! */
304 * Get the 'inherit scheduling' info from a thread attribute structure.
305 * Note: written as a helper function for info hiding
308 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
311 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
313 *inheritsched
= attr
->inherit
;
317 return (EINVAL
); /* Not an attribute structure! */
322 * Get the scheduling parameters from a thread attribute structure.
323 * Note: written as a helper function for info hiding
326 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
327 struct sched_param
*param
)
329 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
331 *param
= attr
->param
;
335 return (EINVAL
); /* Not an attribute structure! */
340 * Get the scheduling policy from a thread attribute structure.
341 * Note: written as a helper function for info hiding
344 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
347 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
349 *policy
= attr
->policy
;
353 return (EINVAL
); /* Not an attribute structure! */
357 static const size_t DEFAULT_STACK_SIZE
= DFLSSIZ
;
359 * Initialize a thread attribute structure to default values.
362 pthread_attr_init(pthread_attr_t
*attr
)
364 attr
->stacksize
= DEFAULT_STACK_SIZE
;
365 attr
->stackaddr
= NULL
;
366 attr
->sig
= _PTHREAD_ATTR_SIG
;
367 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
368 attr
->param
.sched_priority
= default_priority
;
369 attr
->param
.quantum
= 10; /* quantum isn't public yet */
370 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
371 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
372 attr
->freeStackOnExit
= TRUE
;
377 * Set the 'detach' state in a thread attribute structure.
378 * Note: written as a helper function for info hiding
381 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
384 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
386 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
387 (detachstate
== PTHREAD_CREATE_DETACHED
))
389 attr
->detached
= detachstate
;
397 return (EINVAL
); /* Not an attribute structure! */
402 * Set the 'inherit scheduling' state in a thread attribute structure.
403 * Note: written as a helper function for info hiding
406 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
409 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
411 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
412 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
414 attr
->inherit
= inheritsched
;
422 return (EINVAL
); /* Not an attribute structure! */
427 * Set the scheduling paramters in a thread attribute structure.
428 * Note: written as a helper function for info hiding
431 pthread_attr_setschedparam(pthread_attr_t
*attr
,
432 const struct sched_param
*param
)
434 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
436 /* TODO: Validate sched_param fields */
437 attr
->param
= *param
;
441 return (EINVAL
); /* Not an attribute structure! */
446 * Set the scheduling policy in a thread attribute structure.
447 * Note: written as a helper function for info hiding
450 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
453 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
455 if ((policy
== SCHED_OTHER
) ||
456 (policy
== SCHED_RR
) ||
457 (policy
== SCHED_FIFO
))
459 attr
->policy
= policy
;
467 return (EINVAL
); /* Not an attribute structure! */
472 * Set the scope for the thread.
473 * We currently only provide PTHREAD_SCOPE_SYSTEM
476 pthread_attr_setscope(pthread_attr_t
*attr
,
479 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
480 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
481 /* No attribute yet for the scope */
483 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
487 return (EINVAL
); /* Not an attribute structure! */
491 * Get the scope for the thread.
492 * We currently only provide PTHREAD_SCOPE_SYSTEM
495 pthread_attr_getscope(pthread_attr_t
*attr
,
498 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
499 *scope
= PTHREAD_SCOPE_SYSTEM
;
502 return (EINVAL
); /* Not an attribute structure! */
505 /* Get the base stack address of the given thread */
507 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
509 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
510 *stackaddr
= attr
->stackaddr
;
513 return (EINVAL
); /* Not an attribute structure! */
518 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
520 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
521 attr
->stackaddr
= stackaddr
;
522 attr
->freeStackOnExit
= FALSE
;
525 return (EINVAL
); /* Not an attribute structure! */
530 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
532 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
533 *stacksize
= attr
->stacksize
;
536 return (EINVAL
); /* Not an attribute structure! */
541 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
543 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
544 attr
->stacksize
= stacksize
;
547 return (EINVAL
); /* Not an attribute structure! */
552 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
554 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
555 *stackaddr
= attr
->stackaddr
;
556 *stacksize
= attr
->stacksize
;
559 return (EINVAL
); /* Not an attribute structure! */
564 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
566 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
567 (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0) &&
568 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
569 attr
->stackaddr
= stackaddr
;
570 attr
->freeStackOnExit
= FALSE
;
571 attr
->stacksize
= stacksize
;
574 return (EINVAL
); /* Not an attribute structure! */
579 * Create and start execution of a new thread.
583 _pthread_body(pthread_t self
)
585 _pthread_set_self(self
);
586 pthread_exit((self
->fun
)(self
->arg
));
590 _pthread_create(pthread_t t
,
591 const pthread_attr_t
*attrs
,
593 const mach_port_t kernel_thread
)
600 memset(t
, 0, sizeof(*t
));
601 t
->stacksize
= attrs
->stacksize
;
602 t
->stackaddr
= (void *)stack
;
603 t
->kernel_thread
= kernel_thread
;
604 t
->detached
= attrs
->detached
;
605 t
->inherit
= attrs
->inherit
;
606 t
->policy
= attrs
->policy
;
607 t
->param
= attrs
->param
;
608 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
609 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
610 t
->sig
= _PTHREAD_SIG
;
611 t
->reply_port
= MACH_PORT_NULL
;
612 t
->cthread_self
= NULL
;
614 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
615 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
616 t
->death
= SEMAPHORE_NULL
;
618 if (kernel_thread
!= MACH_PORT_NULL
)
619 pthread_setschedparam(t
, t
->policy
, &t
->param
);
624 /* Need to deprecate this in future */
626 _pthread_is_threaded(void)
628 return __is_threaded
;
631 /* Non portable public api to know whether this process has(had) atleast one thread
632 * apart from main thread. There could be race if there is a thread in the process of
633 * creation at the time of call . It does not tell whether there are more than one thread
634 * at this point of time.
637 pthread_is_threaded_np(void)
639 return (__is_threaded
);
643 pthread_mach_thread_np(pthread_t t
)
645 thread_t kernel_thread
;
647 /* Wait for the creator to initialize it */
648 while ((kernel_thread
= t
->kernel_thread
) == MACH_PORT_NULL
)
651 return kernel_thread
;
655 pthread_get_stacksize_np(pthread_t t
)
661 pthread_get_stackaddr_np(pthread_t t
)
667 _pthread_reply_port(pthread_t t
)
669 return t
->reply_port
;
673 /* returns non-zero if the current thread is the main thread */
675 pthread_main_np(void)
677 pthread_t self
= pthread_self();
679 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
683 _pthread_create_suspended(pthread_t
*thread
,
684 const pthread_attr_t
*attr
,
685 void *(*start_routine
)(void *),
689 pthread_attr_t
*attrs
;
693 kern_return_t kern_res
;
694 mach_port_t kernel_thread
= MACH_PORT_NULL
;
697 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
698 { /* Set up default paramters */
699 attrs
= &_pthread_attr_default
;
700 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
705 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
706 * any change in priority or policy is needed here.
708 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
709 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
717 /* Allocate a stack for the thread */
718 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
721 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
724 /* Create the Mach thread for this thread */
725 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
726 if (kern_res
!= KERN_SUCCESS
)
728 printf("Can't create thread: %d\n", kern_res
);
729 res
= EINVAL
; /* Need better error here? */
733 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
737 set_malloc_singlethreaded(0);
739 LOCK(_pthread_count_lock
);
741 UNLOCK(_pthread_count_lock
);
743 /* Send it on it's way */
745 t
->fun
= start_routine
;
746 /* Now set it up to execute */
747 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
753 pthread_create(pthread_t
*thread
,
754 const pthread_attr_t
*attr
,
755 void *(*start_routine
)(void *),
758 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
762 pthread_create_suspended_np(pthread_t
*thread
,
763 const pthread_attr_t
*attr
,
764 void *(*start_routine
)(void *),
767 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
771 * Make a thread 'undetached' - no longer 'joinable' with other threads.
774 pthread_detach(pthread_t thread
)
776 if (thread
->sig
== _PTHREAD_SIG
)
779 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
781 if (thread
->detached
& _PTHREAD_EXITED
) {
782 UNLOCK(thread
->lock
);
783 pthread_join(thread
, NULL
);
786 semaphore_t death
= thread
->death
;
788 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
789 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
790 UNLOCK(thread
->lock
);
792 (void) semaphore_signal(death
);
796 UNLOCK(thread
->lock
);
800 return (ESRCH
); /* Not a valid thread */
806 * pthread_kill call to system call
817 if ((sig
< 0) || (sig
> NSIG
))
820 if (th
&& (th
->sig
== _PTHREAD_SIG
)) {
821 error
= __pthread_kill(pthread_mach_thread_np(th
), sig
);
830 /* Announce that there are pthread resources ready to be reclaimed in a */
831 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
832 /* thread underneath is terminated right away. */
834 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
835 mach_msg_empty_rcv_t msg
;
838 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
839 MACH_MSG_TYPE_MOVE_SEND
);
840 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
841 msg
.header
.msgh_remote_port
= thread_recycle_port
;
842 msg
.header
.msgh_local_port
= kernel_thread
;
843 msg
.header
.msgh_id
= (int)thread
;
844 ret
= mach_msg_send(&msg
.header
);
845 assert(ret
== MACH_MSG_SUCCESS
);
848 /* Reap the resources for available threads */
850 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
) {
851 mach_port_type_t ptype
;
855 self
= mach_task_self();
856 if (kernel_thread
!= MACH_PORT_DEAD
) {
857 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
858 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
859 /* not quite dead yet... */
862 ret
= mach_port_deallocate(self
, kernel_thread
);
863 if (ret
!= KERN_SUCCESS
) {
865 "mach_port_deallocate(kernel_thread) failed: %s\n",
866 mach_error_string(ret
));
870 if (th
->reply_port
!= MACH_PORT_NULL
) {
871 ret
= mach_port_mod_refs(self
, th
->reply_port
,
872 MACH_PORT_RIGHT_RECEIVE
, -1);
873 if (ret
!= KERN_SUCCESS
) {
875 "mach_port_mod_refs(reply_port) failed: %s\n",
876 mach_error_string(ret
));
880 if (th
->freeStackOnExit
) {
881 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
884 size
= (vm_size_t
)th
->stacksize
+ vm_page_size
;
886 #if !defined(STACK_GROWS_UP)
889 ret
= vm_deallocate(self
, addr
, size
);
890 if (ret
!= KERN_SUCCESS
) {
892 "vm_deallocate(stack) failed: %s\n",
893 mach_error_string(ret
));
898 *value_ptr
= th
->exit_value
;
907 void _pthread_reap_threads(void)
909 mach_msg_empty_rcv_t msg
;
912 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
913 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
914 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
915 while (ret
== MACH_MSG_SUCCESS
) {
916 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
917 pthread_t thread
= (pthread_t
)msg
.header
.msgh_id
;
919 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0) == EAGAIN
)
921 /* not dead yet, put it back for someone else to reap, stop here */
922 _pthread_become_available(thread
, kernel_thread
);
925 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
926 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
927 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
931 /* For compatibility... */
935 return pthread_self();
939 * Terminate a thread.
942 pthread_exit(void *value_ptr
)
944 struct _pthread_handler_rec
*handler
;
945 pthread_t self
= pthread_self();
946 kern_return_t kern_res
;
949 /* Make this thread not to receive any signals */
952 while ((handler
= self
->cleanup_stack
) != 0)
954 (handler
->routine
)(handler
->arg
);
955 self
->cleanup_stack
= handler
->next
;
957 _pthread_tsd_cleanup(self
);
959 _pthread_reap_threads();
962 self
->detached
|= _PTHREAD_EXITED
;
964 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
965 mach_port_t death
= self
->death
;
966 self
->exit_value
= value_ptr
;
968 /* the joiner will need a kernel thread reference, leave ours for it */
970 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
971 if (kern_res
!= KERN_SUCCESS
)
973 "semaphore_signal(death) failed: %s\n",
974 mach_error_string(kern_res
));
978 /* with no joiner, we let become available consume our cached ref */
979 _pthread_become_available(self
, pthread_mach_thread_np(self
));
982 LOCK(_pthread_count_lock
);
983 thread_count
= --_pthread_count
;
984 UNLOCK(_pthread_count_lock
);
985 if (thread_count
<= 0)
988 /* Use a new reference to terminate ourselves. Should never return. */
989 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
990 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
991 mach_error_string(kern_res
));
996 * Wait for a thread to terminate and obtain its exit value.
999 pthread_join(pthread_t thread
,
1002 kern_return_t kern_res
;
1005 if (thread
->sig
== _PTHREAD_SIG
)
1007 semaphore_t death
= new_sem_from_pool(); /* in case we need it */
1010 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1011 thread
->death
== SEMAPHORE_NULL
)
1013 pthread_t self
= pthread_self();
1015 assert(thread
->joiner
== NULL
);
1016 if (thread
!= self
&& (self
== NULL
|| self
->joiner
!= thread
))
1018 int already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1020 thread
->death
= death
;
1021 thread
->joiner
= self
;
1022 UNLOCK(thread
->lock
);
1024 if (!already_exited
)
1026 /* Wait for it to signal... */
1028 PTHREAD_MACH_CALL(semaphore_wait(death
), kern_res
);
1029 } while (kern_res
!= KERN_SUCCESS
);
1032 /* ... and wait for it to really be dead */
1033 while ((res
= _pthread_reap_thread(thread
,
1034 thread
->kernel_thread
,
1035 value_ptr
)) == EAGAIN
)
1040 UNLOCK(thread
->lock
);
1044 UNLOCK(thread
->lock
);
1047 restore_sem_to_pool(death
);
1054 * Get the scheduling policy and scheduling paramters for a thread.
1057 pthread_getschedparam(pthread_t thread
,
1059 struct sched_param
*param
)
1061 if (thread
->sig
== _PTHREAD_SIG
)
1063 *policy
= thread
->policy
;
1064 *param
= thread
->param
;
1068 return (ESRCH
); /* Not a valid thread structure */
1073 * Set the scheduling policy and scheduling paramters for a thread.
1076 pthread_setschedparam(pthread_t thread
,
1078 const struct sched_param
*param
)
1080 policy_base_data_t bases
;
1082 mach_msg_type_number_t count
;
1085 if (thread
->sig
== _PTHREAD_SIG
)
1090 bases
.ts
.base_priority
= param
->sched_priority
;
1091 base
= (policy_base_t
)&bases
.ts
;
1092 count
= POLICY_TIMESHARE_BASE_COUNT
;
1095 bases
.fifo
.base_priority
= param
->sched_priority
;
1096 base
= (policy_base_t
)&bases
.fifo
;
1097 count
= POLICY_FIFO_BASE_COUNT
;
1100 bases
.rr
.base_priority
= param
->sched_priority
;
1101 /* quantum isn't public yet */
1102 bases
.rr
.quantum
= param
->quantum
;
1103 base
= (policy_base_t
)&bases
.rr
;
1104 count
= POLICY_RR_BASE_COUNT
;
1109 thread
->policy
= policy
;
1110 thread
->param
= *param
;
1111 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1112 if (ret
!= KERN_SUCCESS
)
1119 return (ESRCH
); /* Not a valid thread structure */
1124 * Get the minimum priority for the given policy
1127 sched_get_priority_min(int policy
)
1129 return default_priority
- 16;
1133 * Get the maximum priority for the given policy
1136 sched_get_priority_max(int policy
)
1138 return default_priority
+ 16;
1142 * Determine if two thread identifiers represent the same thread.
1145 pthread_equal(pthread_t t1
,
1152 cthread_set_self(void *cself
)
1154 pthread_t self
= pthread_self();
1155 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1156 _pthread_set_self(cself
);
1159 self
->cthread_self
= cself
;
1163 ur_cthread_self(void) {
1164 pthread_t self
= pthread_self();
1165 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1166 return (void *)self
;
1168 return self
->cthread_self
;
1172 * Execute a function exactly one time in a thread-safe fashion.
1175 pthread_once(pthread_once_t
*once_control
,
1176 void (*init_routine
)(void))
1178 LOCK(once_control
->lock
);
1179 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1182 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1184 UNLOCK(once_control
->lock
);
1185 return (ESUCCESS
); /* Spec defines no possible errors! */
1192 pthread_cancel(pthread_t thread
)
1194 if (thread
->sig
== _PTHREAD_SIG
)
1196 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1205 * Insert a cancellation point in a thread.
1208 _pthread_testcancel(pthread_t thread
)
1211 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1212 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1214 UNLOCK(thread
->lock
);
1217 UNLOCK(thread
->lock
);
1221 pthread_testcancel(void)
1223 pthread_t self
= pthread_self();
1224 _pthread_testcancel(self
);
1228 * Query/update the cancelability 'state' of a thread
1231 pthread_setcancelstate(int state
, int *oldstate
)
1233 pthread_t self
= pthread_self();
1237 *oldstate
= self
->cancel_state
& ~_PTHREAD_CANCEL_STATE_MASK
;
1238 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1240 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1246 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1251 * Query/update the cancelability 'type' of a thread
1254 pthread_setcanceltype(int type
, int *oldtype
)
1256 pthread_t self
= pthread_self();
1260 *oldtype
= self
->cancel_state
& ~_PTHREAD_CANCEL_TYPE_MASK
;
1261 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1263 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1269 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1274 pthread_getconcurrency(void)
1276 return(pthread_concurrency
);
1280 pthread_setconcurrency(int new_level
)
1282 pthread_concurrency
= new_level
;
1287 * Perform package initialization - called automatically when application starts
1290 extern int _cpu_capabilities
;
1292 #define kHasAltivec 0x01
1293 #define kCache32 0x04
1294 #define kUseDcba 0x20
1295 #define kNoDcba 0x40
1300 pthread_attr_t
*attrs
;
1303 host_basic_info_data_t basic_info
;
1304 host_priority_info_data_t priority_info
;
1306 host_flavor_t flavor
;
1308 mach_msg_type_number_t count
;
1311 int hasvectorunit
, numcpus
;
1313 extern int _bcopy_initialize(void);
1317 count
= HOST_PRIORITY_INFO_COUNT
;
1318 info
= (host_info_t
)&priority_info
;
1319 flavor
= HOST_PRIORITY_INFO
;
1320 host
= mach_host_self();
1321 kr
= host_info(host
, flavor
, info
, &count
);
1322 if (kr
!= KERN_SUCCESS
)
1323 printf("host_info failed (%d); probably need privilege.\n", kr
);
1325 default_priority
= priority_info
.user_priority
;
1326 min_priority
= priority_info
.minimum_priority
;
1327 max_priority
= priority_info
.maximum_priority
;
1329 attrs
= &_pthread_attr_default
;
1330 pthread_attr_init(attrs
);
1333 _pthread_set_self(thread
);
1334 _pthread_create(thread
, attrs
, (void *)USRSTACK
, mach_thread_self());
1335 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1337 /* See if we're on a multiprocessor and set _spin_tries if so. */
1340 len
= sizeof(numcpus
);
1341 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1343 _spin_tries
= MP_SPIN_TRIES
;
1346 count
= HOST_BASIC_INFO_COUNT
;
1347 info
= (host_info_t
)&basic_info
;
1348 flavor
= HOST_BASIC_INFO
;
1349 kr
= host_info(host
, flavor
, info
, &count
);
1350 if (kr
!= KERN_SUCCESS
)
1351 printf("host_info failed (%d)\n", kr
);
1353 if (basic_info
.avail_cpus
> 1)
1354 _spin_tries
= MP_SPIN_TRIES
;
1355 /* This is a crude test */
1356 if (basic_info
.cpu_subtype
>= CPU_SUBTYPE_POWERPC_7400
)
1357 _cpu_has_altivec
= 1;
1360 mach_port_deallocate(mach_task_self(), host
);
1363 mib
[1] = HW_VECTORUNIT
;
1364 len
= sizeof(hasvectorunit
);
1365 if (sysctl(mib
, 2, &hasvectorunit
, &len
, NULL
, 0) == 0) {
1366 _cpu_has_altivec
= hasvectorunit
;
1368 if (_cpu_has_altivec
) { // G4, let bcopy decide whether to use dcba
1369 _cpu_capabilities
= kCache32
+ kHasAltivec
;
1370 } else { // G3, no altivec and no dcba
1371 _cpu_capabilities
= kCache32
+ kNoDcba
;
1374 dynamic_choice
= _bcopy_initialize(); // returns 0, kUseDcba, or kNoDcba
1375 _cpu_capabilities
|= dynamic_choice
; // remember dynamic choice, if any
1376 mig_init(1); /* enable multi-threaded mig interfaces */
1380 int sched_yield(void)
1386 /* This is the "magic" that gets the initialization routine called when the application starts */
1387 int (*_cthread_init_routine
)(void) = pthread_init
;
1389 /* Get a semaphore from the pool, growing it if necessary */
1391 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1396 LOCK(sem_pool_lock
);
1397 if (sem_pool_current
== sem_pool_count
) {
1398 sem_pool_count
+= 16;
1399 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1400 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1401 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1404 sem
= sem_pool
[sem_pool_current
++];
1405 UNLOCK(sem_pool_lock
);
1409 /* Put a semaphore back into the pool */
1410 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1411 LOCK(sem_pool_lock
);
1412 sem_pool
[--sem_pool_current
] = sem
;
1413 UNLOCK(sem_pool_lock
);
1416 static void sem_pool_reset(void) {
1417 LOCK(sem_pool_lock
);
1419 sem_pool_current
= 0;
1421 UNLOCK(sem_pool_lock
);
1424 __private_extern__
void _pthread_fork_child(void) {
1425 /* Just in case somebody had it locked... */
1426 UNLOCK(sem_pool_lock
);
1428 UNLOCK(_pthread_count_lock
);