2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
29 * Permission to use, copy, modify, and distribute this software and
30 * its documentation for any purpose and without fee is hereby granted,
31 * provided that the above copyright notice appears in all copies and
32 * that both the copyright notice and this permission notice appear in
33 * supporting documentation.
35 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
36 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
37 * FOR A PARTICULAR PURPOSE.
39 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
40 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
41 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
42 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
43 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
51 * POSIX Pthread Library
54 #include "pthread_internals.h"
57 #include <stdio.h> /* For printf(). */
59 #include <errno.h> /* For __mach_errno_addr() prototype. */
61 #include <sys/resource.h>
62 #include <sys/sysctl.h>
63 #include <sys/queue.h>
64 #include <sys/syscall.h>
65 #include <machine/vmparam.h>
66 #include <mach/vm_statistics.h>
67 #define __APPLE_API_PRIVATE
68 #include <machine/cpu_capabilities.h>
70 __private_extern__
struct __pthread_list __pthread_head
= LIST_HEAD_INITIALIZER(&__pthread_head
);
72 /* Per-thread kernel support */
73 extern void _pthread_set_self(pthread_t
);
74 extern void mig_init(int);
76 /* Get CPU capabilities from the kernel */
77 __private_extern__
void _init_cpu_capabilities(void);
79 /* Needed to tell the malloc subsystem we're going multithreaded */
80 extern void set_malloc_singlethreaded(int);
82 /* Used when we need to call into the kernel with no reply port */
83 extern pthread_lock_t reply_port_lock
;
85 /* We'll implement this when the main thread is a pthread */
86 /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */
87 static struct _pthread _thread
= {0};
89 /* This global should be used (carefully) by anyone needing to know if a
90 ** pthread has been created.
92 int __is_threaded
= 0;
93 /* _pthread_count is protected by _pthread_list_lock */
94 static int _pthread_count
= 1;
96 __private_extern__ pthread_lock_t _pthread_list_lock
= LOCK_INITIALIZER
;
98 /* Same implementation as LOCK, but without the __is_threaded check */
100 __private_extern__
void _spin_lock_retry(pthread_lock_t
*lock
)
102 int tries
= _spin_tries
;
106 syscall_thread_switch(THREAD_NULL
, SWITCH_OPTION_DEPRESS
, 1);
108 } while(!_spin_lock_try(lock
));
111 extern mach_port_t thread_recycle_port
;
113 /* These are used to keep track of a semaphore pool shared by mutexes and condition
117 static semaphore_t
*sem_pool
= NULL
;
118 static int sem_pool_count
= 0;
119 static int sem_pool_current
= 0;
120 static pthread_lock_t sem_pool_lock
= LOCK_INITIALIZER
;
122 static int default_priority
;
123 static int max_priority
;
124 static int min_priority
;
125 static int pthread_concurrency
;
128 * [Internal] stack support
130 size_t _pthread_stack_size
= 0;
131 #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask)
132 #define STACK_RESERVED (sizeof (struct _pthread))
135 /* The stack grows towards lower addresses:
136 |<----------------user stack|struct _pthread|
137 ^STACK_LOWEST ^STACK_START ^STACK_BASE
140 #define STACK_BASE(sp) (((sp) | __pthread_stack_mask) + 1)
141 #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED)
142 #define STACK_SELF(sp) STACK_START(sp)
145 static const vm_address_t PTHREAD_STACK_HINT
= 0xF0000000;
146 #elif defined(__i386__)
147 static const vm_address_t PTHREAD_STACK_HINT
= 0xB0000000;
149 #error Need to define a stack address hint for this architecture
152 /* Set the base address to use as the stack pointer, before adjusting due to the ABI
153 * The guardpages for stackoverflow protection is also allocated here
154 * If the stack was already allocated(stackaddr in attr) then there are no guardpages
155 * set up for the thread
159 _pthread_allocate_stack(pthread_attr_t
*attrs
, void **stack
)
164 assert(attrs
->stacksize
>= PTHREAD_STACK_MIN
);
165 if (attrs
->stackaddr
!= NULL
) {
166 /* No guard pages setup in this case */
167 assert(((vm_address_t
)(attrs
->stackaddr
) & (vm_page_size
- 1)) == 0);
168 *stack
= attrs
->stackaddr
;
172 guardsize
= attrs
->guardsize
;
173 *((vm_address_t
*)stack
) = PTHREAD_STACK_HINT
;
174 kr
= vm_map(mach_task_self(), (vm_address_t
*)stack
,
175 attrs
->stacksize
+ guardsize
,
177 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
, MEMORY_OBJECT_NULL
,
178 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
180 if (kr
!= KERN_SUCCESS
)
181 kr
= vm_allocate(mach_task_self(),
182 (vm_address_t
*)stack
, attrs
->stacksize
+ guardsize
,
183 VM_MAKE_TAG(VM_MEMORY_STACK
)| VM_FLAGS_ANYWHERE
);
184 if (kr
!= KERN_SUCCESS
) {
187 /* The guard page is at the lowest address */
188 /* The stack base is the highest address */
190 kr
= vm_protect(mach_task_self(), (vm_address_t
)*stack
, guardsize
, FALSE
, VM_PROT_NONE
);
191 *stack
+= attrs
->stacksize
+ guardsize
;
194 vm_address_t cur_stack
= (vm_address_t
)0;
195 if (free_stacks
== 0)
197 /* Allocating guard pages is done by doubling
198 * the actual stack size, since STACK_BASE() needs
199 * to have stacks aligned on stack_size. Allocating just
200 * one page takes as much memory as allocating more pages
201 * since it will remain one entry in the vm map.
202 * Besides, allocating more than one page allows tracking the
203 * overflow pattern when the overflow is bigger than one page.
205 #ifndef NO_GUARD_PAGES
206 # define GUARD_SIZE(a) (2*(a))
207 # define GUARD_MASK(a) (((a)<<1) | 1)
209 # define GUARD_SIZE(a) (a)
210 # define GUARD_MASK(a) (a)
212 while (lowest_stack
> GUARD_SIZE(__pthread_stack_size
))
214 lowest_stack
-= GUARD_SIZE(__pthread_stack_size
);
215 /* Ensure stack is there */
216 kr
= vm_allocate(mach_task_self(),
218 GUARD_SIZE(__pthread_stack_size
),
220 #ifndef NO_GUARD_PAGES
221 if (kr
== KERN_SUCCESS
) {
222 kr
= vm_protect(mach_task_self(),
224 __pthread_stack_size
,
225 FALSE
, VM_PROT_NONE
);
226 lowest_stack
+= __pthread_stack_size
;
227 if (kr
== KERN_SUCCESS
)
231 if (kr
== KERN_SUCCESS
)
235 if (lowest_stack
> 0)
236 free_stacks
= (vm_address_t
*)lowest_stack
;
239 /* Too bad. We'll just have to take what comes.
240 Use vm_map instead of vm_allocate so we can
241 specify alignment. */
242 kr
= vm_map(mach_task_self(), &lowest_stack
,
243 GUARD_SIZE(__pthread_stack_size
),
244 GUARD_MASK(__pthread_stack_mask
),
245 TRUE
/* anywhere */, MEMORY_OBJECT_NULL
,
246 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
248 /* This really shouldn't fail and if it does I don't
250 #ifndef NO_GUARD_PAGES
251 if (kr
== KERN_SUCCESS
) {
252 kr
= vm_protect(mach_task_self(),
254 __pthread_stack_size
,
255 FALSE
, VM_PROT_NONE
);
256 lowest_stack
+= __pthread_stack_size
;
259 free_stacks
= (vm_address_t
*)lowest_stack
;
262 *free_stacks
= 0; /* No other free stacks */
264 cur_stack
= STACK_START((vm_address_t
) free_stacks
);
265 free_stacks
= (vm_address_t
*)*free_stacks
;
266 cur_stack
= _adjust_sp(cur_stack
); /* Machine dependent stack fudging */
271 static pthread_attr_t _pthread_attr_default
= {0};
274 * Destroy a thread attribute structure
277 pthread_attr_destroy(pthread_attr_t
*attr
)
279 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
284 return (EINVAL
); /* Not an attribute structure! */
289 * Get the 'detach' state from a thread attribute structure.
290 * Note: written as a helper function for info hiding
293 pthread_attr_getdetachstate(const pthread_attr_t
*attr
,
296 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
298 *detachstate
= attr
->detached
;
302 return (EINVAL
); /* Not an attribute structure! */
307 * Get the 'inherit scheduling' info from a thread attribute structure.
308 * Note: written as a helper function for info hiding
311 pthread_attr_getinheritsched(const pthread_attr_t
*attr
,
314 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
316 *inheritsched
= attr
->inherit
;
320 return (EINVAL
); /* Not an attribute structure! */
325 * Get the scheduling parameters from a thread attribute structure.
326 * Note: written as a helper function for info hiding
329 pthread_attr_getschedparam(const pthread_attr_t
*attr
,
330 struct sched_param
*param
)
332 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
334 *param
= attr
->param
;
338 return (EINVAL
); /* Not an attribute structure! */
343 * Get the scheduling policy from a thread attribute structure.
344 * Note: written as a helper function for info hiding
347 pthread_attr_getschedpolicy(const pthread_attr_t
*attr
,
350 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
352 *policy
= attr
->policy
;
356 return (EINVAL
); /* Not an attribute structure! */
360 /* Retain the existing stack size of 512K and not depend on Main thread default stack size */
361 static const size_t DEFAULT_STACK_SIZE
= (512*1024);
363 * Initialize a thread attribute structure to default values.
366 pthread_attr_init(pthread_attr_t
*attr
)
368 attr
->stacksize
= DEFAULT_STACK_SIZE
;
369 attr
->stackaddr
= NULL
;
370 attr
->sig
= _PTHREAD_ATTR_SIG
;
371 attr
->param
.sched_priority
= default_priority
;
372 attr
->param
.quantum
= 10; /* quantum isn't public yet */
373 attr
->detached
= PTHREAD_CREATE_JOINABLE
;
374 attr
->inherit
= _PTHREAD_DEFAULT_INHERITSCHED
;
375 attr
->policy
= _PTHREAD_DEFAULT_POLICY
;
376 attr
->freeStackOnExit
= TRUE
;
377 attr
->guardsize
= vm_page_size
;
382 * Set the 'detach' state in a thread attribute structure.
383 * Note: written as a helper function for info hiding
386 pthread_attr_setdetachstate(pthread_attr_t
*attr
,
389 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
391 if ((detachstate
== PTHREAD_CREATE_JOINABLE
) ||
392 (detachstate
== PTHREAD_CREATE_DETACHED
))
394 attr
->detached
= detachstate
;
402 return (EINVAL
); /* Not an attribute structure! */
407 * Set the 'inherit scheduling' state in a thread attribute structure.
408 * Note: written as a helper function for info hiding
411 pthread_attr_setinheritsched(pthread_attr_t
*attr
,
414 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
416 if ((inheritsched
== PTHREAD_INHERIT_SCHED
) ||
417 (inheritsched
== PTHREAD_EXPLICIT_SCHED
))
419 attr
->inherit
= inheritsched
;
427 return (EINVAL
); /* Not an attribute structure! */
432 * Set the scheduling paramters in a thread attribute structure.
433 * Note: written as a helper function for info hiding
436 pthread_attr_setschedparam(pthread_attr_t
*attr
,
437 const struct sched_param
*param
)
439 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
441 /* TODO: Validate sched_param fields */
442 attr
->param
= *param
;
446 return (EINVAL
); /* Not an attribute structure! */
451 * Set the scheduling policy in a thread attribute structure.
452 * Note: written as a helper function for info hiding
455 pthread_attr_setschedpolicy(pthread_attr_t
*attr
,
458 if (attr
->sig
== _PTHREAD_ATTR_SIG
)
460 if ((policy
== SCHED_OTHER
) ||
461 (policy
== SCHED_RR
) ||
462 (policy
== SCHED_FIFO
))
464 attr
->policy
= policy
;
472 return (EINVAL
); /* Not an attribute structure! */
477 * Set the scope for the thread.
478 * We currently only provide PTHREAD_SCOPE_SYSTEM
481 pthread_attr_setscope(pthread_attr_t
*attr
,
484 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
485 if (scope
== PTHREAD_SCOPE_SYSTEM
) {
486 /* No attribute yet for the scope */
488 } else if (scope
== PTHREAD_SCOPE_PROCESS
) {
492 return (EINVAL
); /* Not an attribute structure! */
496 * Get the scope for the thread.
497 * We currently only provide PTHREAD_SCOPE_SYSTEM
500 pthread_attr_getscope(pthread_attr_t
*attr
,
503 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
504 *scope
= PTHREAD_SCOPE_SYSTEM
;
507 return (EINVAL
); /* Not an attribute structure! */
510 /* Get the base stack address of the given thread */
512 pthread_attr_getstackaddr(const pthread_attr_t
*attr
, void **stackaddr
)
514 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
515 *stackaddr
= attr
->stackaddr
;
518 return (EINVAL
); /* Not an attribute structure! */
523 pthread_attr_setstackaddr(pthread_attr_t
*attr
, void *stackaddr
)
525 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0)) {
526 attr
->stackaddr
= stackaddr
;
527 attr
->freeStackOnExit
= FALSE
;
530 return (EINVAL
); /* Not an attribute structure! */
535 pthread_attr_getstacksize(const pthread_attr_t
*attr
, size_t *stacksize
)
537 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
538 *stacksize
= attr
->stacksize
;
541 return (EINVAL
); /* Not an attribute structure! */
546 pthread_attr_setstacksize(pthread_attr_t
*attr
, size_t stacksize
)
548 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) && ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
549 attr
->stacksize
= stacksize
;
552 return (EINVAL
); /* Not an attribute structure! */
557 pthread_attr_getstack(const pthread_attr_t
*attr
, void **stackaddr
, size_t * stacksize
)
559 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
560 u_int32_t addr
= (u_int32_t
)attr
->stackaddr
;
562 addr
-= attr
->stacksize
;
563 *stackaddr
= (void *)addr
;
564 *stacksize
= attr
->stacksize
;
567 return (EINVAL
); /* Not an attribute structure! */
571 /* By SUSV spec, the stackaddr is the base address, the lowest addressable
572 * byte address. This is not the same as in pthread_attr_setstackaddr.
575 pthread_attr_setstack(pthread_attr_t
*attr
, void *stackaddr
, size_t stacksize
)
577 if ((attr
->sig
== _PTHREAD_ATTR_SIG
) &&
578 (((vm_offset_t
)stackaddr
& (vm_page_size
- 1)) == 0) &&
579 ((stacksize
% vm_page_size
) == 0) && (stacksize
>= PTHREAD_STACK_MIN
)) {
580 u_int32_t addr
= (u_int32_t
)stackaddr
;
583 attr
->stackaddr
= (void *)addr
;
584 attr
->stacksize
= stacksize
;
585 attr
->freeStackOnExit
= FALSE
;
588 return (EINVAL
); /* Not an attribute structure! */
594 * Set the guardsize attribute in the attr.
597 pthread_attr_setguardsize(pthread_attr_t
*attr
,
600 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
601 /* Guardsize of 0 is valid, ot means no guard */
602 if ((guardsize
% vm_page_size
) == 0) {
603 attr
->guardsize
= guardsize
;
608 return (EINVAL
); /* Not an attribute structure! */
612 * Get the guardsize attribute in the attr.
615 pthread_attr_getguardsize(const pthread_attr_t
*attr
,
618 if (attr
->sig
== _PTHREAD_ATTR_SIG
) {
619 *guardsize
= attr
->guardsize
;
622 return (EINVAL
); /* Not an attribute structure! */
627 * Create and start execution of a new thread.
631 _pthread_body(pthread_t self
)
633 _pthread_set_self(self
);
634 pthread_exit((self
->fun
)(self
->arg
));
638 _pthread_create(pthread_t t
,
639 const pthread_attr_t
*attrs
,
641 const mach_port_t kernel_thread
)
648 memset(t
, 0, sizeof(*t
));
650 t
->stacksize
= attrs
->stacksize
;
651 t
->stackaddr
= (void *)stack
;
652 t
->guardsize
= attrs
->guardsize
;
653 t
->kernel_thread
= kernel_thread
;
654 t
->detached
= attrs
->detached
;
655 t
->inherit
= attrs
->inherit
;
656 t
->policy
= attrs
->policy
;
657 t
->param
= attrs
->param
;
658 t
->freeStackOnExit
= attrs
->freeStackOnExit
;
659 t
->mutexes
= (struct _pthread_mutex
*)NULL
;
660 t
->sig
= _PTHREAD_SIG
;
661 t
->reply_port
= MACH_PORT_NULL
;
662 t
->cthread_self
= NULL
;
664 t
->plist
.le_next
= (struct _pthread
*)0;
665 t
->plist
.le_prev
= (struct _pthread
**)0;
666 t
->cancel_state
= PTHREAD_CANCEL_ENABLE
| PTHREAD_CANCEL_DEFERRED
;
667 t
->cleanup_stack
= (struct _pthread_handler_rec
*)NULL
;
668 t
->death
= SEMAPHORE_NULL
;
670 if (kernel_thread
!= MACH_PORT_NULL
)
671 pthread_setschedparam(t
, t
->policy
, &t
->param
);
676 /* Need to deprecate this in future */
678 _pthread_is_threaded(void)
680 return __is_threaded
;
683 /* Non portable public api to know whether this process has(had) atleast one thread
684 * apart from main thread. There could be race if there is a thread in the process of
685 * creation at the time of call . It does not tell whether there are more than one thread
686 * at this point of time.
689 pthread_is_threaded_np(void)
691 return (__is_threaded
);
695 pthread_mach_thread_np(pthread_t t
)
697 thread_t kernel_thread
;
699 /* Wait for the creator to initialize it */
700 while ((kernel_thread
= t
->kernel_thread
) == MACH_PORT_NULL
)
703 return kernel_thread
;
707 pthread_get_stacksize_np(pthread_t t
)
713 pthread_get_stackaddr_np(pthread_t t
)
719 _pthread_reply_port(pthread_t t
)
721 return t
->reply_port
;
725 /* returns non-zero if the current thread is the main thread */
727 pthread_main_np(void)
729 pthread_t self
= pthread_self();
731 return ((self
->detached
& _PTHREAD_CREATE_PARENT
) == _PTHREAD_CREATE_PARENT
);
735 _pthread_create_suspended(pthread_t
*thread
,
736 const pthread_attr_t
*attr
,
737 void *(*start_routine
)(void *),
741 pthread_attr_t
*attrs
;
745 kern_return_t kern_res
;
746 mach_port_t kernel_thread
= MACH_PORT_NULL
;
749 if ((attrs
= (pthread_attr_t
*)attr
) == (pthread_attr_t
*)NULL
)
750 { /* Set up default paramters */
751 attrs
= &_pthread_attr_default
;
752 } else if (attrs
->sig
!= _PTHREAD_ATTR_SIG
) {
757 /* In default policy (ie SCHED_OTHER) only sched_priority is used. Check for
758 * any change in priority or policy is needed here.
760 if (((attrs
->policy
!= _PTHREAD_DEFAULT_POLICY
) ||
761 (attrs
->param
.sched_priority
!= default_priority
)) && (suspended
== 0)) {
769 /* Allocate a stack for the thread */
770 if ((res
= _pthread_allocate_stack(attrs
, &stack
)) != 0) {
773 t
= (pthread_t
)malloc(sizeof(struct _pthread
));
776 /* Create the Mach thread for this thread */
777 PTHREAD_MACH_CALL(thread_create(mach_task_self(), &kernel_thread
), kern_res
);
778 if (kern_res
!= KERN_SUCCESS
)
780 printf("Can't create thread: %d\n", kern_res
);
781 res
= EINVAL
; /* Need better error here? */
785 if ((res
= _pthread_create(t
, attrs
, stack
, kernel_thread
)) != 0)
789 set_malloc_singlethreaded(0);
792 /* Send it on it's way */
794 t
->fun
= start_routine
;
795 /* Now set it up to execute */
796 LOCK(_pthread_list_lock
);
797 LIST_INSERT_HEAD(&__pthread_head
, t
, plist
);
799 UNLOCK(_pthread_list_lock
);
800 _pthread_setup(t
, _pthread_body
, stack
, suspended
, needresume
);
806 pthread_create(pthread_t
*thread
,
807 const pthread_attr_t
*attr
,
808 void *(*start_routine
)(void *),
811 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 0);
815 pthread_create_suspended_np(pthread_t
*thread
,
816 const pthread_attr_t
*attr
,
817 void *(*start_routine
)(void *),
820 return _pthread_create_suspended(thread
, attr
, start_routine
, arg
, 1);
824 * Make a thread 'undetached' - no longer 'joinable' with other threads.
827 pthread_detach(pthread_t thread
)
829 if (thread
->sig
== _PTHREAD_SIG
)
832 if (thread
->detached
& PTHREAD_CREATE_JOINABLE
)
834 if (thread
->detached
& _PTHREAD_EXITED
) {
835 UNLOCK(thread
->lock
);
836 pthread_join(thread
, NULL
);
839 semaphore_t death
= thread
->death
;
841 thread
->detached
&= ~PTHREAD_CREATE_JOINABLE
;
842 thread
->detached
|= PTHREAD_CREATE_DETACHED
;
843 UNLOCK(thread
->lock
);
845 (void) semaphore_signal(death
);
849 UNLOCK(thread
->lock
);
853 return (ESRCH
); /* Not a valid thread */
859 * pthread_kill call to system call
862 extern int __pthread_kill(mach_port_t
, int);
871 if ((sig
< 0) || (sig
> NSIG
))
874 if (th
&& (th
->sig
== _PTHREAD_SIG
)) {
875 error
= __pthread_kill(pthread_mach_thread_np(th
), sig
);
884 /* Announce that there are pthread resources ready to be reclaimed in a */
885 /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */
886 /* thread underneath is terminated right away. */
888 void _pthread_become_available(pthread_t thread
, mach_port_t kernel_thread
) {
889 mach_msg_empty_rcv_t msg
;
892 msg
.header
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
,
893 MACH_MSG_TYPE_MOVE_SEND
);
894 msg
.header
.msgh_size
= sizeof msg
- sizeof msg
.trailer
;
895 msg
.header
.msgh_remote_port
= thread_recycle_port
;
896 msg
.header
.msgh_local_port
= kernel_thread
;
897 msg
.header
.msgh_id
= (int)thread
;
898 ret
= mach_msg_send(&msg
.header
);
899 assert(ret
== MACH_MSG_SUCCESS
);
902 /* Reap the resources for available threads */
904 int _pthread_reap_thread(pthread_t th
, mach_port_t kernel_thread
, void **value_ptr
) {
905 mach_port_type_t ptype
;
909 self
= mach_task_self();
910 if (kernel_thread
!= MACH_PORT_DEAD
) {
911 ret
= mach_port_type(self
, kernel_thread
, &ptype
);
912 if (ret
== KERN_SUCCESS
&& ptype
!= MACH_PORT_TYPE_DEAD_NAME
) {
913 /* not quite dead yet... */
916 ret
= mach_port_deallocate(self
, kernel_thread
);
917 if (ret
!= KERN_SUCCESS
) {
919 "mach_port_deallocate(kernel_thread) failed: %s\n",
920 mach_error_string(ret
));
924 if (th
->reply_port
!= MACH_PORT_NULL
) {
925 ret
= mach_port_mod_refs(self
, th
->reply_port
,
926 MACH_PORT_RIGHT_RECEIVE
, -1);
927 if (ret
!= KERN_SUCCESS
) {
929 "mach_port_mod_refs(reply_port) failed: %s\n",
930 mach_error_string(ret
));
934 if (th
->freeStackOnExit
) {
935 vm_address_t addr
= (vm_address_t
)th
->stackaddr
;
938 size
= (vm_size_t
)th
->stacksize
+ th
->guardsize
;
941 ret
= vm_deallocate(self
, addr
, size
);
942 if (ret
!= KERN_SUCCESS
) {
944 "vm_deallocate(stack) failed: %s\n",
945 mach_error_string(ret
));
950 *value_ptr
= th
->exit_value
;
959 void _pthread_reap_threads(void)
961 mach_msg_empty_rcv_t msg
;
964 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
965 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
966 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
967 while (ret
== MACH_MSG_SUCCESS
) {
968 mach_port_t kernel_thread
= msg
.header
.msgh_remote_port
;
969 pthread_t thread
= (pthread_t
)msg
.header
.msgh_id
;
971 if (_pthread_reap_thread(thread
, kernel_thread
, (void **)0) == EAGAIN
)
973 /* not dead yet, put it back for someone else to reap, stop here */
974 _pthread_become_available(thread
, kernel_thread
);
977 ret
= mach_msg(&msg
.header
, MACH_RCV_MSG
|MACH_RCV_TIMEOUT
, 0,
978 sizeof(mach_msg_empty_rcv_t
), thread_recycle_port
,
979 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
983 /* For compatibility... */
987 return pthread_self();
991 * Terminate a thread.
994 pthread_exit(void *value_ptr
)
996 struct _pthread_handler_rec
*handler
;
997 pthread_t self
= pthread_self();
998 kern_return_t kern_res
;
1001 /* Make this thread not to receive any signals */
1004 while ((handler
= self
->cleanup_stack
) != 0)
1006 (handler
->routine
)(handler
->arg
);
1007 self
->cleanup_stack
= handler
->next
;
1009 _pthread_tsd_cleanup(self
);
1011 _pthread_reap_threads();
1014 self
->detached
|= _PTHREAD_EXITED
;
1016 if (self
->detached
& PTHREAD_CREATE_JOINABLE
) {
1017 mach_port_t death
= self
->death
;
1018 self
->exit_value
= value_ptr
;
1020 /* the joiner will need a kernel thread reference, leave ours for it */
1022 PTHREAD_MACH_CALL(semaphore_signal(death
), kern_res
);
1023 if (kern_res
!= KERN_SUCCESS
)
1025 "semaphore_signal(death) failed: %s\n",
1026 mach_error_string(kern_res
));
1028 LOCK(_pthread_list_lock
);
1029 thread_count
= --_pthread_count
;
1030 UNLOCK(_pthread_list_lock
);
1033 LOCK(_pthread_list_lock
);
1034 LIST_REMOVE(self
, plist
);
1035 thread_count
= --_pthread_count
;
1036 UNLOCK(_pthread_list_lock
);
1037 /* with no joiner, we let become available consume our cached ref */
1038 _pthread_become_available(self
, pthread_mach_thread_np(self
));
1041 if (thread_count
<= 0)
1044 /* Use a new reference to terminate ourselves. Should never return. */
1045 PTHREAD_MACH_CALL(thread_terminate(mach_thread_self()), kern_res
);
1046 fprintf(stderr
, "thread_terminate(mach_thread_self()) failed: %s\n",
1047 mach_error_string(kern_res
));
1052 * Wait for a thread to terminate and obtain its exit value.
1055 pthread_join(pthread_t thread
,
1058 kern_return_t kern_res
;
1061 if (thread
->sig
== _PTHREAD_SIG
)
1063 semaphore_t death
= new_sem_from_pool(); /* in case we need it */
1066 if ((thread
->detached
& PTHREAD_CREATE_JOINABLE
) &&
1067 thread
->death
== SEMAPHORE_NULL
)
1069 pthread_t self
= pthread_self();
1071 assert(thread
->joiner
== NULL
);
1072 if (thread
!= self
&& (self
== NULL
|| self
->joiner
!= thread
))
1074 int already_exited
= (thread
->detached
& _PTHREAD_EXITED
);
1076 thread
->death
= death
;
1077 thread
->joiner
= self
;
1078 UNLOCK(thread
->lock
);
1080 if (!already_exited
)
1082 /* Wait for it to signal... */
1084 PTHREAD_MACH_CALL(semaphore_wait(death
), kern_res
);
1085 } while (kern_res
!= KERN_SUCCESS
);
1088 LOCK(_pthread_list_lock
);
1089 LIST_REMOVE(thread
, plist
);
1090 UNLOCK(_pthread_list_lock
);
1091 /* ... and wait for it to really be dead */
1092 while ((res
= _pthread_reap_thread(thread
,
1093 thread
->kernel_thread
,
1094 value_ptr
)) == EAGAIN
)
1099 UNLOCK(thread
->lock
);
1103 UNLOCK(thread
->lock
);
1106 restore_sem_to_pool(death
);
1113 * Get the scheduling policy and scheduling paramters for a thread.
1116 pthread_getschedparam(pthread_t thread
,
1118 struct sched_param
*param
)
1120 if (thread
->sig
== _PTHREAD_SIG
)
1122 *policy
= thread
->policy
;
1123 *param
= thread
->param
;
1127 return (ESRCH
); /* Not a valid thread structure */
1132 * Set the scheduling policy and scheduling paramters for a thread.
1135 pthread_setschedparam(pthread_t thread
,
1137 const struct sched_param
*param
)
1139 policy_base_data_t bases
;
1141 mach_msg_type_number_t count
;
1144 if (thread
->sig
== _PTHREAD_SIG
)
1149 bases
.ts
.base_priority
= param
->sched_priority
;
1150 base
= (policy_base_t
)&bases
.ts
;
1151 count
= POLICY_TIMESHARE_BASE_COUNT
;
1154 bases
.fifo
.base_priority
= param
->sched_priority
;
1155 base
= (policy_base_t
)&bases
.fifo
;
1156 count
= POLICY_FIFO_BASE_COUNT
;
1159 bases
.rr
.base_priority
= param
->sched_priority
;
1160 /* quantum isn't public yet */
1161 bases
.rr
.quantum
= param
->quantum
;
1162 base
= (policy_base_t
)&bases
.rr
;
1163 count
= POLICY_RR_BASE_COUNT
;
1168 ret
= thread_policy(pthread_mach_thread_np(thread
), policy
, base
, count
, TRUE
);
1169 if (ret
!= KERN_SUCCESS
)
1173 thread
->policy
= policy
;
1174 thread
->param
= *param
;
1178 return (ESRCH
); /* Not a valid thread structure */
1183 * Get the minimum priority for the given policy
1186 sched_get_priority_min(int policy
)
1188 return default_priority
- 16;
1192 * Get the maximum priority for the given policy
1195 sched_get_priority_max(int policy
)
1197 return default_priority
+ 16;
1201 * Determine if two thread identifiers represent the same thread.
1204 pthread_equal(pthread_t t1
,
1210 __private_extern__
void
1211 _pthread_set_self(pthread_t p
)
1213 extern void __pthread_set_self(pthread_t
);
1215 bzero(&_thread
, sizeof(struct _pthread
));
1219 __pthread_set_self(p
);
1223 cthread_set_self(void *cself
)
1225 pthread_t self
= pthread_self();
1226 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1227 _pthread_set_self(cself
);
1230 self
->cthread_self
= cself
;
1234 ur_cthread_self(void) {
1235 pthread_t self
= pthread_self();
1236 if ((self
== (pthread_t
)NULL
) || (self
->sig
!= _PTHREAD_SIG
)) {
1237 return (void *)self
;
1239 return self
->cthread_self
;
1243 * Execute a function exactly one time in a thread-safe fashion.
1246 pthread_once(pthread_once_t
*once_control
,
1247 void (*init_routine
)(void))
1249 _spin_lock(&once_control
->lock
);
1250 if (once_control
->sig
== _PTHREAD_ONCE_SIG_init
)
1253 once_control
->sig
= _PTHREAD_ONCE_SIG
;
1255 _spin_unlock(&once_control
->lock
);
1256 return (ESUCCESS
); /* Spec defines no possible errors! */
1263 pthread_cancel(pthread_t thread
)
1265 if (thread
->sig
== _PTHREAD_SIG
)
1267 thread
->cancel_state
|= _PTHREAD_CANCEL_PENDING
;
1276 * Insert a cancellation point in a thread.
1279 _pthread_testcancel(pthread_t thread
)
1282 if ((thread
->cancel_state
& (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
)) ==
1283 (PTHREAD_CANCEL_ENABLE
|_PTHREAD_CANCEL_PENDING
))
1285 UNLOCK(thread
->lock
);
1288 UNLOCK(thread
->lock
);
1292 pthread_testcancel(void)
1294 pthread_t self
= pthread_self();
1295 _pthread_testcancel(self
);
1299 * Query/update the cancelability 'state' of a thread
1302 pthread_setcancelstate(int state
, int *oldstate
)
1304 pthread_t self
= pthread_self();
1308 *oldstate
= self
->cancel_state
& ~_PTHREAD_CANCEL_STATE_MASK
;
1309 if ((state
== PTHREAD_CANCEL_ENABLE
) || (state
== PTHREAD_CANCEL_DISABLE
))
1311 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_STATE_MASK
) | state
;
1317 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1322 * Query/update the cancelability 'type' of a thread
1325 pthread_setcanceltype(int type
, int *oldtype
)
1327 pthread_t self
= pthread_self();
1331 *oldtype
= self
->cancel_state
& ~_PTHREAD_CANCEL_TYPE_MASK
;
1332 if ((type
== PTHREAD_CANCEL_DEFERRED
) || (type
== PTHREAD_CANCEL_ASYNCHRONOUS
))
1334 self
->cancel_state
= (self
->cancel_state
& _PTHREAD_CANCEL_TYPE_MASK
) | type
;
1340 _pthread_testcancel(self
); /* See if we need to 'die' now... */
1345 pthread_getconcurrency(void)
1347 return(pthread_concurrency
);
1351 pthread_setconcurrency(int new_level
)
1353 pthread_concurrency
= new_level
;
1358 * Perform package initialization - called automatically when application starts
1364 pthread_attr_t
*attrs
;
1367 host_basic_info_data_t basic_info
;
1368 host_priority_info_data_t priority_info
;
1370 host_flavor_t flavor
;
1372 mach_msg_type_number_t count
;
1377 count
= HOST_PRIORITY_INFO_COUNT
;
1378 info
= (host_info_t
)&priority_info
;
1379 flavor
= HOST_PRIORITY_INFO
;
1380 host
= mach_host_self();
1381 kr
= host_info(host
, flavor
, info
, &count
);
1382 if (kr
!= KERN_SUCCESS
)
1383 printf("host_info failed (%d); probably need privilege.\n", kr
);
1385 default_priority
= priority_info
.user_priority
;
1386 min_priority
= priority_info
.minimum_priority
;
1387 max_priority
= priority_info
.maximum_priority
;
1389 attrs
= &_pthread_attr_default
;
1390 pthread_attr_init(attrs
);
1392 LIST_INIT(&__pthread_head
);
1393 LOCK_INIT(_pthread_list_lock
);
1395 LIST_INSERT_HEAD(&__pthread_head
, thread
, plist
);
1396 _pthread_set_self(thread
);
1397 _pthread_create(thread
, attrs
, (void *)USRSTACK
, mach_thread_self());
1398 thread
->detached
= PTHREAD_CREATE_JOINABLE
|_PTHREAD_CREATE_PARENT
;
1400 /* See if we're on a multiprocessor and set _spin_tries if so. */
1403 len
= sizeof(numcpus
);
1404 if (sysctl(mib
, 2, &numcpus
, &len
, NULL
, 0) == 0) {
1406 _spin_tries
= MP_SPIN_TRIES
;
1409 count
= HOST_BASIC_INFO_COUNT
;
1410 info
= (host_info_t
)&basic_info
;
1411 flavor
= HOST_BASIC_INFO
;
1412 kr
= host_info(host
, flavor
, info
, &count
);
1413 if (kr
!= KERN_SUCCESS
)
1414 printf("host_info failed (%d)\n", kr
);
1416 if (basic_info
.avail_cpus
> 1)
1417 _spin_tries
= MP_SPIN_TRIES
;
1421 mach_port_deallocate(mach_task_self(), host
);
1423 _init_cpu_capabilities();
1425 #if defined(__ppc__)
1427 /* Use fsqrt instruction in sqrt() if available. */
1428 if (_cpu_capabilities
& kHasFsqrt
) {
1429 extern size_t hw_sqrt_len
;
1430 extern double sqrt( double );
1431 extern double hw_sqrt( double );
1432 extern void sys_icache_invalidate(void *, size_t);
1434 memcpy ( (void *)sqrt
, (void *)hw_sqrt
, hw_sqrt_len
);
1435 sys_icache_invalidate((void *)sqrt
, hw_sqrt_len
);
1439 mig_init(1); /* enable multi-threaded mig interfaces */
1443 int sched_yield(void)
1449 /* This is the "magic" that gets the initialization routine called when the application starts */
1450 int (*_cthread_init_routine
)(void) = pthread_init
;
1452 /* Get a semaphore from the pool, growing it if necessary */
1454 __private_extern__ semaphore_t
new_sem_from_pool(void) {
1459 LOCK(sem_pool_lock
);
1460 if (sem_pool_current
== sem_pool_count
) {
1461 sem_pool_count
+= 16;
1462 sem_pool
= realloc(sem_pool
, sem_pool_count
* sizeof(semaphore_t
));
1463 for (i
= sem_pool_current
; i
< sem_pool_count
; i
++) {
1464 PTHREAD_MACH_CALL(semaphore_create(mach_task_self(), &sem_pool
[i
], SYNC_POLICY_FIFO
, 0), res
);
1467 sem
= sem_pool
[sem_pool_current
++];
1468 UNLOCK(sem_pool_lock
);
1472 /* Put a semaphore back into the pool */
1473 __private_extern__
void restore_sem_to_pool(semaphore_t sem
) {
1474 LOCK(sem_pool_lock
);
1475 sem_pool
[--sem_pool_current
] = sem
;
1476 UNLOCK(sem_pool_lock
);
1479 static void sem_pool_reset(void) {
1480 LOCK(sem_pool_lock
);
1482 sem_pool_current
= 0;
1484 UNLOCK(sem_pool_lock
);
1487 __private_extern__
void _pthread_fork_child(pthread_t p
) {
1488 /* Just in case somebody had it locked... */
1489 UNLOCK(sem_pool_lock
);
1491 /* No need to hold the pthread_list_lock as no one other than this
1492 * thread is present at this time
1494 LIST_INIT(&__pthread_head
);
1495 LOCK_INIT(_pthread_list_lock
);
1496 LIST_INSERT_HEAD(&__pthread_head
, p
, plist
);