2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
52 #include "pthread_internals.h"
55 #include <stdio.h> /* For printf(). */
57 #include <errno.h> /* For __mach_errno_addr() prototype. */
59 #include <sys/resource.h>
60 #include <sys/sysctl.h>
61 #include <sys/queue.h>
62 #include <sys/syscall.h>
63 #include <machine/vmparam.h>
64 #include <mach/vm_statistics.h>
65 #define __APPLE_API_PRIVATE
66 #include <machine/cpu_capabilities.h>
68 __private_extern__
struct __pthread_list __pthread_head
= LIST_HEAD_INITIALIZER(&__pthread_head
);
70 /* Per-thread kernel support */
71 extern void _pthread_set_self(pthread_t
);
72 extern void mig_init(int);
74 /* Get CPU capabilities from the kernel */
75 __private_extern__
void _init_cpu_capabilities(void);
77 /* Needed to tell the malloc subsystem we're going multithreaded */
78 extern void set_malloc_singlethreaded(int);
80 /* Used when we need to call into the kernel with no reply port */
81 extern pthread_lock_t reply_port_lock
;
83 /* We'll implement this when the main thread is a pthread */
84 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
85 static struct _pthread _thread
= {0};
87 /* This global should be used (carefully) by anyone needing to know if a
88 ** pthread has been created.
90 int __is_threaded
= 0;
91 /* _pthread_count is protected by _pthread_list_lock */
92 static int _pthread_count
= 1;
94 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
96 /* Same implementation as LOCK, but without the __is_threaded check */
98 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
100 int tries
= _spin_tries
;
104 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
106 } while(!_spin_lock_try(lock
));
109 extern mach_port_t thread_recycle_port
;
111 /* These are used to keep track of a semaphore pool shared by mutexes and condition
115 static semaphore_t
*sem_pool
= NULL
;
116 static int sem_pool_count
= 0;
117 static int sem_pool_current
= 0;
118 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
120 static int default_priority
;
121 static int max_priority
;
122 static int min_priority
;
123 static int pthread_concurrency
;
126 * [Internal] stack support
128 size_t _pthread_stack_size
= 0;
129 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
130 #define STACK_RESERVED (sizeof (struct _pthread))
133 /* The stack grows towards lower addresses:
134 |<----------------user stack|struct _pthread|
135 ^STACK_LOWEST ^STACK_START ^STACK_BASE
138 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
139 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
140 #define STACK_SELF(sp) STACK_START(sp)
143 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
144 #elif defined(__i386__)
145 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
147 #error Need to define a stack address hint for this architecture
150 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
151 * The guardpages for stackoverflow protection is also allocated here
152 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
153 * set up for the thread
157 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
162 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
163 if (attrs
->stackaddr
!= NULL
) {
164 /* No guard pages setup in this case */
165 assert(((vm_address_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
166 *stack
= attrs
->stackaddr
;
170 guardsize
= attrs
->guardsize
;
171 *((vm_address_t
*)stack
) = PTHREAD_STACK_HINT
;
172 kr
= vm_map(mach_task_self(), (vm_address_t
*)stack
,
173 attrs
->stacksize
+ guardsize
,
175 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
176 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
178 if (kr
!= KERN_SUCCESS
)
179 kr
= vm_allocate(mach_task_self(),
180 (vm_address_t
*)stack
, attrs
->stacksize
+ guardsize
,
181 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
182 if (kr
!= KERN_SUCCESS
) {
185 /* The guard page is at the lowest address */
186 /* The stack base is the highest address */
188 kr
= vm_protect(mach_task_self(), (vm_address_t
)*stack
, guardsize
, FALSE
, VM_PROT_NONE
);
189 *stack
+= attrs
->stacksize
+ guardsize
;
192 vm_address_t cur_stack
= (vm_address_t
)0;
193 if (free_stacks
== 0)
195 /* Allocating guard pages is done by doubling
196 * the actual stack size, since STACK_BASE() needs
197 * to have stacks aligned on stack_size. Allocating just
198 * one page takes as much memory as allocating more pages
199 * since it will remain one entry in the vm map.
200 * Besides, allocating more than one page allows tracking the
201 * overflow pattern when the overflow is bigger than one page.
203 #ifndef NO_GUARD_PAGES
204 # define GUARD_SIZE(a) (2*(a))
205 # define GUARD_MASK(a) (((a)<<1) | 1)
207 # define GUARD_SIZE(a) (a)
208 # define GUARD_MASK(a) (a)
210 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
212 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
213 /* Ensure stack is there */
214 kr
= vm_allocate(mach_task_self(),
216 GUARD_SIZE(__pthread_stack_size
),
218 #ifndef NO_GUARD_PAGES
219 if (kr
== KERN_SUCCESS
) {
220 kr
= vm_protect(mach_task_self(),
222 __pthread_stack_size
,
223 FALSE
, VM_PROT_NONE
);
224 lowest_stack
+= __pthread_stack_size
;
225 if (kr
== KERN_SUCCESS
)
229 if (kr
== KERN_SUCCESS
)
233 if (lowest_stack
> 0)
234 free_stacks
= (vm_address_t
*)lowest_stack
;
237 /* Too bad. We'll just have to take what comes.
238 Use vm_map instead of vm_allocate so we can
239 specify alignment. */
240 kr
= vm_map(mach_task_self(), &lowest_stack
,
241 GUARD_SIZE(__pthread_stack_size
),
242 GUARD_MASK(__pthread_stack_mask
),
243 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
244 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
246 /* This really shouldn't fail and if it does I don't
248 #ifndef NO_GUARD_PAGES
249 if (kr
== KERN_SUCCESS
) {
250 kr
= vm_protect(mach_task_self(),
252 __pthread_stack_size
,
253 FALSE
, VM_PROT_NONE
);
254 lowest_stack
+= __pthread_stack_size
;
257 free_stacks
= (vm_address_t
*)lowest_stack
;
260 *free_stacks
= 0; /* No other free stacks */
262 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
263 free_stacks
= (vm_address_t
*)*free_stacks
;
264 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
269 static pthread_attr_t _pthread_attr_default
= {0};
272 * Destroy a thread attribute structure
275 pthread_attr_destroy(pthread_attr_t
*attr
)
277 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
282 return (EINVAL
); /* Not an attribute structure! */
287 * Get the 'detach' state from a thread attribute structure.
288 * Note: written as a helper function for info hiding
291 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
294 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
296 *detachstate
= attr
->detached
;
300 return (EINVAL
); /* Not an attribute structure! */
305 * Get the 'inherit scheduling' info from a thread attribute structure.
306 * Note: written as a helper function for info hiding
309 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
312 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
314 *inheritsched
= attr
->inherit
;
318 return (EINVAL
); /* Not an attribute structure! */
323 * Get the scheduling parameters from a thread attribute structure.
324 * Note: written as a helper function for info hiding
327 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
328 struct sched_param
*param
)
330 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
332 *param
= attr
->param
;
336 return (EINVAL
); /* Not an attribute structure! */
341 * Get the scheduling policy from a thread attribute structure.
342 * Note: written as a helper function for info hiding
345 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
348 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
350 *policy
= attr
->policy
;
354 return (EINVAL
); /* Not an attribute structure! */
358 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
359 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
361 * Initialize a thread attribute structure to default values.
364 pthread_attr_init(pthread_attr_t
*attr
)
366 attr
->stacksize
= DEFAULT_STACK_SIZE
;
367 attr
->stackaddr
= NULL
;
368 attr
->sig
= _PTHREAD_ATTR_SIG
;
369 attr
->param
.sched_priority
= default_priority
;
370 attr
->param
.quantum
= 10; /* quantum isn't public yet */
371 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
372 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
373 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
374 attr
->freeStackOnExit
= TRUE
;
375 attr
->guardsize
= vm_page_size
;
380 * Set the 'detach' state in a thread attribute structure.
381 * Note: written as a helper function for info hiding
384 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
387 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
389 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
390 (detachstate
== PTHREAD_CREATE_DETACHED
))
392 attr
->detached
= detachstate
;
400 return (EINVAL
); /* Not an attribute structure! */
405 * Set the 'inherit scheduling' state in a thread attribute structure.
406 * Note: written as a helper function for info hiding
409 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
412 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
414 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
415 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
417 attr
->inherit
= inheritsched
;
425 return (EINVAL
); /* Not an attribute structure! */
430 * Set the scheduling paramters in a thread attribute structure.
431 * Note: written as a helper function for info hiding
434 pthread_attr_setschedparam(pthread_attr_t
*attr
,
435 const struct sched_param
*param
)
437 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
439 /* TODO: Validate sched_param fields */
440 attr
->param
= *param
;
444 return (EINVAL
); /* Not an attribute structure! */
449 * Set the scheduling policy in a thread attribute structure.
450 * Note: written as a helper function for info hiding
453 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
456 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
458 if ((policy
== SCHED_OTHER
) ||
459 (policy
== SCHED_RR
) ||
460 (policy
== SCHED_FIFO
))
462 attr
->policy
= policy
;
470 return (EINVAL
); /* Not an attribute structure! */
475 * Set the scope for the thread.
476 * We currently only provide PTHREAD_SCOPE_SYSTEM
479 pthread_attr_setscope(pthread_attr_t
*attr
,
482 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
483 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
484 /* No attribute yet for the scope */
486 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
490 return (EINVAL
); /* Not an attribute structure! */
494 * Get the scope for the thread.
495 * We currently only provide PTHREAD_SCOPE_SYSTEM
498 pthread_attr_getscope(pthread_attr_t
*attr
,
501 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
502 *scope
= PTHREAD_SCOPE_SYSTEM
;
505 return (EINVAL
); /* Not an attribute structure! */
508 /* Get the base stack address of the given thread */
510 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
512 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
513 *stackaddr
= attr
->stackaddr
;
516 return (EINVAL
); /* Not an attribute structure! */
521 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
523 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
524 attr
->stackaddr
= stackaddr
;
525 attr
->freeStackOnExit
= FALSE
;
528 return (EINVAL
); /* Not an attribute structure! */
533 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
535 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
536 *stacksize
= attr
->stacksize
;
539 return (EINVAL
); /* Not an attribute structure! */
544 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
546 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
547 attr
->stacksize
= stacksize
;
550 return (EINVAL
); /* Not an attribute structure! */
555 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
557 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
558 u_int32_t addr
= (u_int32_t
)attr
->stackaddr
;
560 addr
-= attr
->stacksize
;
561 *stackaddr
= (void *)addr
;
562 *stacksize
= attr
->stacksize
;
565 return (EINVAL
); /* Not an attribute structure! */
569 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
570 * byte address. This is not the same as in pthread_attr_setstackaddr.
573 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
575 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
576 (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0) &&
577 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
578 u_int32_t addr
= (u_int32_t
)stackaddr
;
581 attr
->stackaddr
= (void *)addr
;
582 attr
->stacksize
= stacksize
;
583 attr
->freeStackOnExit
= FALSE
;
586 return (EINVAL
); /* Not an attribute structure! */
592 * Set the guardsize attribute in the attr.
595 pthread_attr_setguardsize(pthread_attr_t
*attr
,
598 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
599 /* Guardsize of 0 is valid, ot means no guard */
600 if ((guardsize
% vm_page_size
) == 0) {
601 attr
->guardsize
= guardsize
;
606 return (EINVAL
); /* Not an attribute structure! */
610 * Get the guardsize attribute in the attr.
613 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
616 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
617 *guardsize
= attr
->guardsize
;
620 return (EINVAL
); /* Not an attribute structure! */
625 * Create and start execution of a new thread.
629 _pthread_body(pthread_t self
)
631 _pthread_set_self(self
);
632 pthread_exit((self
->fun
)(self
->arg
));
636 _pthread_create(pthread_t t
,
637 const pthread_attr_t
*attrs
,
639 const mach_port_t kernel_thread
)
646 memset(t
, 0, sizeof(*t
));
648 t
->stacksize
= attrs
->stacksize
;
649 t
->stackaddr
= (void *)stack
;
650 t
->guardsize
= attrs
->guardsize
;
651 t
->kernel_thread
= kernel_thread
;
652 t
->detached
= attrs
->detached
;
653 t
->inherit
= attrs
->inherit
;
654 t
->policy
= attrs
->policy
;
655 t
->param
= attrs
->param
;
656 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
657 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
658 t
->sig
= _PTHREAD_SIG
;
659 t
->reply_port
= MACH_PORT_NULL
;
660 t
->cthread_self
= NULL
;
662 t
->plist
.le_next
= (struct _pthread
*)0;
663 t
->plist
.le_prev
= (struct _pthread
**)0;
664 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
665 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
666 t
->death
= SEMAPHORE_NULL
;
668 if (kernel_thread
!= MACH_PORT_NULL
)
669 pthread_setschedparam(t
, t
->policy
, &t
->param
);
674 /* Need to deprecate this in future */
676 _pthread_is_threaded(void)
678 return __is_threaded
;
681 /* Non portable public api to know whether this process has(had) atleast one thread
682 * apart from main thread. There could be race if there is a thread in the process of
683 * creation at the time of call . It does not tell whether there are more than one thread
684 * at this point of time.
687 pthread_is_threaded_np(void)
689 return (__is_threaded
);
693 pthread_mach_thread_np(pthread_t t
)
695 thread_t kernel_thread
;
697 /* Wait for the creator to initialize it */
698 while ((kernel_thread
= t
->kernel_thread
) == MACH_PORT_NULL
)
701 return kernel_thread
;
705 pthread_get_stacksize_np(pthread_t t
)
711 pthread_get_stackaddr_np(pthread_t t
)
717 _pthread_reply_port(pthread_t t
)
719 return t
->reply_port
;
723 /* returns non-zero if the current thread is the main thread */
725 pthread_main_np(void)
727 pthread_t self
= pthread_self();
729 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
733 _pthread_create_suspended(pthread_t
*thread
,
734 const pthread_attr_t
*attr
,
735 void *(*start_routine
)(void *),
739 pthread_attr_t
*attrs
;
743 kern_return_t kern_res
;
744 mach_port_t kernel_thread
= MACH_PORT_NULL
;
747 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
748 { /* Set up default paramters */
749 attrs
= &_pthread_attr_default
;
750 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
755 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
756 * any change in priority or policy is needed here.
758 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
759 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
767 /* Allocate a stack for the thread */
768 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
771 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
774 /* Create the Mach thread for this thread */
775 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
776 if (kern_res
!= KERN_SUCCESS
)
778 printf("Can't create thread: %d\n", kern_res
);
779 res
= EINVAL
; /* Need better error here? */
783 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
787 set_malloc_singlethreaded(0);
790 /* Send it on it's way */
792 t
->fun
= start_routine
;
793 /* Now set it up to execute */
794 LOCK(_pthread_list_lock
);
795 LIST_INSERT_HEAD(&__pthread_head
, t
, plist
);
797 UNLOCK(_pthread_list_lock
);
798 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
804 pthread_create(pthread_t
*thread
,
805 const pthread_attr_t
*attr
,
806 void *(*start_routine
)(void *),
809 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
813 pthread_create_suspended_np(pthread_t
*thread
,
814 const pthread_attr_t
*attr
,
815 void *(*start_routine
)(void *),
818 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
822 * Make a thread 'undetached' - no longer 'joinable' with other threads.
825 pthread_detach(pthread_t thread
)
827 if (thread
->sig
== _PTHREAD_SIG
)
830 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
832 if (thread
->detached
& _PTHREAD_EXITED
) {
833 UNLOCK(thread
->lock
);
834 pthread_join(thread
, NULL
);
837 semaphore_t death
= thread
->death
;
839 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
840 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
841 UNLOCK(thread
->lock
);
843 (void) semaphore_signal(death
);
847 UNLOCK(thread
->lock
);
851 return (ESRCH
); /* Not a valid thread */
857 * pthread_kill call to system call
860 extern int __pthread_kill(mach_port_t
, int);
869 if ((sig
< 0) || (sig
> NSIG
))
872 if (th
&& (th
->sig
== _PTHREAD_SIG
)) {
873 error
= __pthread_kill(pthread_mach_thread_np(th
), sig
);
882 /* Announce that there are pthread resources ready to be reclaimed in a */
883 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
884 /* thread underneath is terminated right away. */
886 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
887 mach_msg_empty_rcv_t msg
;
890 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
891 MACH_MSG_TYPE_MOVE_SEND
);
892 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
893 msg
.header
.msgh_remote_port
= thread_recycle_port
;
894 msg
.header
.msgh_local_port
= kernel_thread
;
895 msg
.header
.msgh_id
= (int)thread
;
896 ret
= mach_msg_send(&msg
.header
);
897 assert(ret
== MACH_MSG_SUCCESS
);
900 /* Reap the resources for available threads */
902 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
) {
903 mach_port_type_t ptype
;
907 self
= mach_task_self();
908 if (kernel_thread
!= MACH_PORT_DEAD
) {
909 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
910 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
911 /* not quite dead yet... */
914 ret
= mach_port_deallocate(self
, kernel_thread
);
915 if (ret
!= KERN_SUCCESS
) {
917 "mach_port_deallocate(kernel_thread) failed: %s\n",
918 mach_error_string(ret
));
922 if (th
->reply_port
!= MACH_PORT_NULL
) {
923 ret
= mach_port_mod_refs(self
, th
->reply_port
,
924 MACH_PORT_RIGHT_RECEIVE
, -1);
925 if (ret
!= KERN_SUCCESS
) {
927 "mach_port_mod_refs(reply_port) failed: %s\n",
928 mach_error_string(ret
));
932 if (th
->freeStackOnExit
) {
933 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
936 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
939 ret
= vm_deallocate(self
, addr
, size
);
940 if (ret
!= KERN_SUCCESS
) {
942 "vm_deallocate(stack) failed: %s\n",
943 mach_error_string(ret
));
948 *value_ptr
= th
->exit_value
;
957 void _pthread_reap_threads(void)
959 mach_msg_empty_rcv_t msg
;
962 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
963 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
964 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
965 while (ret
== MACH_MSG_SUCCESS
) {
966 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
967 pthread_t thread
= (pthread_t
)msg
.header
.msgh_id
;
969 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0) == EAGAIN
)
971 /* not dead yet, put it back for someone else to reap, stop here */
972 _pthread_become_available(thread
, kernel_thread
);
975 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
976 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
977 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
981 /* For compatibility... */
985 return pthread_self();
989 * Terminate a thread.
992 pthread_exit(void *value_ptr
)
994 struct _pthread_handler_rec
*handler
;
995 pthread_t self
= pthread_self();
996 kern_return_t kern_res
;
999 /* Make this thread not to receive any signals */
1002 while ((handler
= self
->cleanup_stack
) != 0)
1004 (handler
->routine
)(handler
->arg
);
1005 self
->cleanup_stack
= handler
->next
;
1007 _pthread_tsd_cleanup(self
);
1009 _pthread_reap_threads();
1012 self
->detached
|= _PTHREAD_EXITED
;
1014 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1015 mach_port_t death
= self
->death
;
1016 self
->exit_value
= value_ptr
;
1018 /* the joiner will need a kernel thread reference, leave ours for it */
1020 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1021 if (kern_res
!= KERN_SUCCESS
)
1023 "semaphore_signal(death) failed: %s\n",
1024 mach_error_string(kern_res
));
1026 LOCK(_pthread_list_lock
);
1027 thread_count
= --_pthread_count
;
1028 UNLOCK(_pthread_list_lock
);
1031 LOCK(_pthread_list_lock
);
1032 LIST_REMOVE(self
, plist
);
1033 thread_count
= --_pthread_count
;
1034 UNLOCK(_pthread_list_lock
);
1035 /* with no joiner, we let become available consume our cached ref */
1036 _pthread_become_available(self
, pthread_mach_thread_np(self
));
1039 if (thread_count
<= 0)
1042 /* Use a new reference to terminate ourselves. Should never return. */
1043 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1044 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1045 mach_error_string(kern_res
));
1050 * Wait for a thread to terminate and obtain its exit value.
1053 pthread_join(pthread_t thread
,
1056 kern_return_t kern_res
;
1059 if (thread
->sig
== _PTHREAD_SIG
)
1061 semaphore_t death
= new_sem_from_pool(); /* in case we need it */
1064 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1065 thread
->death
== SEMAPHORE_NULL
)
1067 pthread_t self
= pthread_self();
1069 assert(thread
->joiner
== NULL
);
1070 if (thread
!= self
&& (self
== NULL
|| self
->joiner
!= thread
))
1072 int already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1074 thread
->death
= death
;
1075 thread
->joiner
= self
;
1076 UNLOCK(thread
->lock
);
1078 if (!already_exited
)
1080 /* Wait for it to signal... */
1082 PTHREAD_MACH_CALL(semaphore_wait(death
), kern_res
);
1083 } while (kern_res
!= KERN_SUCCESS
);
1086 LOCK(_pthread_list_lock
);
1087 LIST_REMOVE(thread
, plist
);
1088 UNLOCK(_pthread_list_lock
);
1089 /* ... and wait for it to really be dead */
1090 while ((res
= _pthread_reap_thread(thread
,
1091 thread
->kernel_thread
,
1092 value_ptr
)) == EAGAIN
)
1097 UNLOCK(thread
->lock
);
1101 UNLOCK(thread
->lock
);
1104 restore_sem_to_pool(death
);
1111 * Get the scheduling policy and scheduling paramters for a thread.
1114 pthread_getschedparam(pthread_t thread
,
1116 struct sched_param
*param
)
1118 if (thread
->sig
== _PTHREAD_SIG
)
1120 *policy
= thread
->policy
;
1121 *param
= thread
->param
;
1125 return (ESRCH
); /* Not a valid thread structure */
1130 * Set the scheduling policy and scheduling paramters for a thread.
1133 pthread_setschedparam(pthread_t thread
,
1135 const struct sched_param
*param
)
1137 policy_base_data_t bases
;
1139 mach_msg_type_number_t count
;
1142 if (thread
->sig
== _PTHREAD_SIG
)
1147 bases
.ts
.base_priority
= param
->sched_priority
;
1148 base
= (policy_base_t
)&bases
.ts
;
1149 count
= POLICY_TIMESHARE_BASE_COUNT
;
1152 bases
.fifo
.base_priority
= param
->sched_priority
;
1153 base
= (policy_base_t
)&bases
.fifo
;
1154 count
= POLICY_FIFO_BASE_COUNT
;
1157 bases
.rr
.base_priority
= param
->sched_priority
;
1158 /* quantum isn't public yet */
1159 bases
.rr
.quantum
= param
->quantum
;
1160 base
= (policy_base_t
)&bases
.rr
;
1161 count
= POLICY_RR_BASE_COUNT
;
1166 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1167 if (ret
!= KERN_SUCCESS
)
1171 thread
->policy
= policy
;
1172 thread
->param
= *param
;
1176 return (ESRCH
); /* Not a valid thread structure */
1181 * Get the minimum priority for the given policy
1184 sched_get_priority_min(int policy
)
1186 return default_priority
- 16;
1190 * Get the maximum priority for the given policy
1193 sched_get_priority_max(int policy
)
1195 return default_priority
+ 16;
1199 * Determine if two thread identifiers represent the same thread.
1202 pthread_equal(pthread_t t1
,
1208 __private_extern__
void
1209 _pthread_set_self(pthread_t p
)
1211 extern void __pthread_set_self(pthread_t
);
1213 bzero(&_thread
, sizeof(struct _pthread
));
1217 __pthread_set_self(p
);
1221 cthread_set_self(void *cself
)
1223 pthread_t self
= pthread_self();
1224 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1225 _pthread_set_self(cself
);
1228 self
->cthread_self
= cself
;
1232 ur_cthread_self(void) {
1233 pthread_t self
= pthread_self();
1234 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1235 return (void *)self
;
1237 return self
->cthread_self
;
1241 * Execute a function exactly one time in a thread-safe fashion.
1244 pthread_once(pthread_once_t
*once_control
,
1245 void (*init_routine
)(void))
1247 _spin_lock(&once_control
->lock
);
1248 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1251 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1253 _spin_unlock(&once_control
->lock
);
1254 return (ESUCCESS
); /* Spec defines no possible errors! */
1261 pthread_cancel(pthread_t thread
)
1263 if (thread
->sig
== _PTHREAD_SIG
)
1265 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1274 * Insert a cancellation point in a thread.
1277 _pthread_testcancel(pthread_t thread
)
1280 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1281 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1283 UNLOCK(thread
->lock
);
1286 UNLOCK(thread
->lock
);
1290 pthread_testcancel(void)
1292 pthread_t self
= pthread_self();
1293 _pthread_testcancel(self
);
1297 * Query/update the cancelability 'state' of a thread
1300 pthread_setcancelstate(int state
, int *oldstate
)
1302 pthread_t self
= pthread_self();
1306 *oldstate
= self
->cancel_state
& ~_PTHREAD_CANCEL_STATE_MASK
;
1307 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1309 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1315 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1320 * Query/update the cancelability 'type' of a thread
1323 pthread_setcanceltype(int type
, int *oldtype
)
1325 pthread_t self
= pthread_self();
1329 *oldtype
= self
->cancel_state
& ~_PTHREAD_CANCEL_TYPE_MASK
;
1330 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1332 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1338 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1343 pthread_getconcurrency(void)
1345 return(pthread_concurrency
);
1349 pthread_setconcurrency(int new_level
)
1351 pthread_concurrency
= new_level
;
1356 * Perform package initialization - called automatically when application starts
1362 pthread_attr_t
*attrs
;
1365 host_basic_info_data_t basic_info
;
1366 host_priority_info_data_t priority_info
;
1368 host_flavor_t flavor
;
1370 mach_msg_type_number_t count
;
1375 count
= HOST_PRIORITY_INFO_COUNT
;
1376 info
= (host_info_t
)&priority_info
;
1377 flavor
= HOST_PRIORITY_INFO
;
1378 host
= mach_host_self();
1379 kr
= host_info(host
, flavor
, info
, &count
);
1380 if (kr
!= KERN_SUCCESS
)
1381 printf("host_info failed (%d); probably need privilege.\n", kr
);
1383 default_priority
= priority_info
.user_priority
;
1384 min_priority
= priority_info
.minimum_priority
;
1385 max_priority
= priority_info
.maximum_priority
;
1387 attrs
= &_pthread_attr_default
;
1388 pthread_attr_init(attrs
);
1390 LIST_INIT(&__pthread_head
);
1391 LOCK_INIT(_pthread_list_lock
);
1393 LIST_INSERT_HEAD(&__pthread_head
, thread
, plist
);
1394 _pthread_set_self(thread
);
1395 _pthread_create(thread
, attrs
, (void *)USRSTACK
, mach_thread_self());
1396 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1398 /* See if we're on a multiprocessor and set _spin_tries if so. */
1401 len
= sizeof(numcpus
);
1402 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1404 _spin_tries
= MP_SPIN_TRIES
;
1407 count
= HOST_BASIC_INFO_COUNT
;
1408 info
= (host_info_t
)&basic_info
;
1409 flavor
= HOST_BASIC_INFO
;
1410 kr
= host_info(host
, flavor
, info
, &count
);
1411 if (kr
!= KERN_SUCCESS
)
1412 printf("host_info failed (%d)\n", kr
);
1414 if (basic_info
.avail_cpus
> 1)
1415 _spin_tries
= MP_SPIN_TRIES
;
1419 mach_port_deallocate(mach_task_self(), host
);
1421 _init_cpu_capabilities();
1423 #if defined(__ppc__)
1425 /* Use fsqrt instruction in sqrt() if available. */
1426 if (_cpu_capabilities
& kHasFsqrt
) {
1427 extern size_t hw_sqrt_len
;
1428 extern double sqrt( double );
1429 extern double hw_sqrt( double );
1430 extern void sys_icache_invalidate(void *, size_t);
1432 memcpy ( (void *)sqrt
, (void *)hw_sqrt
, hw_sqrt_len
);
1433 sys_icache_invalidate((void *)sqrt
, hw_sqrt_len
);
1437 mig_init(1); /* enable multi-threaded mig interfaces */
1441 int sched_yield(void)
1447 /* This is the "magic" that gets the initialization routine called when the application starts */
1448 int (*_cthread_init_routine
)(void) = pthread_init
;
1450 /* Get a semaphore from the pool, growing it if necessary */
1452 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1457 LOCK(sem_pool_lock
);
1458 if (sem_pool_current
== sem_pool_count
) {
1459 sem_pool_count
+= 16;
1460 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1461 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1462 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1465 sem
= sem_pool
[sem_pool_current
++];
1466 UNLOCK(sem_pool_lock
);
1470 /* Put a semaphore back into the pool */
1471 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1472 LOCK(sem_pool_lock
);
1473 sem_pool
[--sem_pool_current
] = sem
;
1474 UNLOCK(sem_pool_lock
);
1477 static void sem_pool_reset(void) {
1478 LOCK(sem_pool_lock
);
1480 sem_pool_current
= 0;
1482 UNLOCK(sem_pool_lock
);
1485 __private_extern__
void _pthread_fork_child(pthread_t p
) {
1486 /* Just in case somebody had it locked... */
1487 UNLOCK(sem_pool_lock
);
1489 /* No need to hold the pthread_list_lock as no one other than this
1490 * thread is present at this time
1492 LIST_INIT(&__pthread_head
);
1493 LOCK_INIT(_pthread_list_lock
);
1494 LIST_INSERT_HEAD(&__pthread_head
, p
, plist
);