X-Git-Url: https://git.saurik.com/apple/libc.git/blobdiff_plain/5b2abdfbf4211b6592cdd02b9507555a0ecbb04b..20d7cd4c186bcbb50f0bb56ce882b5680664d965:/pthreads/pthread.c?ds=inline diff --git a/pthreads/pthread.c b/pthreads/pthread.c index 14d7446..b2cc915 100644 --- a/pthreads/pthread.c +++ b/pthreads/pthread.c @@ -1,3 +1,25 @@ +/* + * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ /* * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 * All Rights Reserved @@ -27,7 +49,8 @@ * POSIX Pthread Library */ -#define __POSIX_LIB__ +#include "pthread_internals.h" + #include #include /* For printf(). */ #include @@ -35,22 +58,39 @@ #include #include #include +#include #include #include #include +#define __APPLE_API_PRIVATE +#include -#include "pthread_internals.h" + +#ifndef BUILDING_VARIANT /* [ */ + +__private_extern__ struct __pthread_list __pthread_head = LIST_HEAD_INITIALIZER(&__pthread_head); /* Per-thread kernel support */ extern void _pthread_set_self(pthread_t); extern void mig_init(int); +/* Get CPU capabilities from the kernel */ +__private_extern__ void _init_cpu_capabilities(void); + /* Needed to tell the malloc subsystem we're going multithreaded */ extern void set_malloc_singlethreaded(int); /* Used when we need to call into the kernel with no reply port */ extern pthread_lock_t reply_port_lock; +/* Mach message used to notify that a thread needs to be reaped */ + +typedef struct _pthread_reap_msg_t { + mach_msg_header_t header; + pthread_t thread; + mach_msg_trailer_t trailer; +} pthread_reap_msg_t; + /* We'll implement this when the main thread is a pthread */ /* Use the local _pthread struct to avoid malloc before our MiG reply port is set */ static struct _pthread _thread = {0}; @@ -59,9 +99,12 @@ static struct _pthread _thread = {0}; ** pthread has been created. */ int __is_threaded = 0; +/* _pthread_count is protected by _pthread_list_lock */ static int _pthread_count = 1; +int __unix_conforming = 0; + -static pthread_lock_t _pthread_count_lock = LOCK_INITIALIZER; +__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER; /* Same implementation as LOCK, but without the __is_threaded check */ int _spin_tries = 0; @@ -76,9 +119,6 @@ __private_extern__ void _spin_lock_retry(pthread_lock_t *lock) } while(!_spin_lock_try(lock)); } -/* Apparently, bcopy doesn't declare _cpu_has_altivec anymore */ -int _cpu_has_altivec = 0; - extern mach_port_t thread_recycle_port; /* These are used to keep track of a semaphore pool shared by mutexes and condition @@ -95,6 +135,8 @@ static int max_priority; static int min_priority; static int pthread_concurrency; +static void _pthread_exit(pthread_t self, void *value_ptr); + /* * [Internal] stack support */ @@ -102,18 +144,6 @@ size_t _pthread_stack_size = 0; #define STACK_LOWEST(sp) ((sp) & ~__pthread_stack_mask) #define STACK_RESERVED (sizeof (struct _pthread)) -#ifdef STACK_GROWS_UP - -/* The stack grows towards higher addresses: - |struct _pthread|user stack---------------->| - ^STACK_BASE ^STACK_START - ^STACK_SELF - ^STACK_LOWEST */ -#define STACK_BASE(sp) STACK_LOWEST(sp) -#define STACK_START(stack_low) (STACK_BASE(stack_low) + STACK_RESERVED) -#define STACK_SELF(sp) STACK_BASE(sp) - -#else /* The stack grows towards lower addresses: |<----------------user stack|struct _pthread| @@ -124,9 +154,7 @@ size_t _pthread_stack_size = 0; #define STACK_START(stack_low) (STACK_BASE(stack_low) - STACK_RESERVED) #define STACK_SELF(sp) STACK_START(sp) -#endif - -#if defined(__ppc__) +#if defined(__ppc__) || defined(__ppc64__) static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000; #elif defined(__i386__) static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000; @@ -134,44 +162,47 @@ static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000; #error Need to define a stack address hint for this architecture #endif -/* Set the base address to use as the stack pointer, before adjusting due to the ABI */ +/* Set the base address to use as the stack pointer, before adjusting due to the ABI + * The guardpages for stackoverflow protection is also allocated here + * If the stack was already allocated(stackaddr in attr) then there are no guardpages + * set up for the thread + */ static int _pthread_allocate_stack(pthread_attr_t *attrs, void **stack) { kern_return_t kr; + vm_address_t stackaddr; + size_t guardsize; #if 1 assert(attrs->stacksize >= PTHREAD_STACK_MIN); if (attrs->stackaddr != NULL) { - assert(((vm_address_t)(attrs->stackaddr) & (vm_page_size - 1)) == 0); + /* No guard pages setup in this case */ + assert(((uintptr_t)attrs->stackaddr % vm_page_size) == 0); *stack = attrs->stackaddr; return 0; } - *((vm_address_t *)stack) = PTHREAD_STACK_HINT; - kr = vm_map(mach_task_self(), (vm_address_t *)stack, - attrs->stacksize + vm_page_size, + guardsize = attrs->guardsize; + stackaddr = PTHREAD_STACK_HINT; + kr = vm_map(mach_task_self(), &stackaddr, + attrs->stacksize + guardsize, vm_page_size-1, VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT); if (kr != KERN_SUCCESS) kr = vm_allocate(mach_task_self(), - (vm_address_t *)stack, attrs->stacksize + vm_page_size, + &stackaddr, attrs->stacksize + guardsize, VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE); if (kr != KERN_SUCCESS) { return EAGAIN; } - #ifdef STACK_GROWS_UP - /* The guard page is the page one higher than the stack */ - /* The stack base is at the lowest address */ - kr = vm_protect(mach_task_self(), *stack + attrs->stacksize, vm_page_size, FALSE, VM_PROT_NONE); - #else /* The guard page is at the lowest address */ /* The stack base is the highest address */ - kr = vm_protect(mach_task_self(), (vm_address_t)*stack, vm_page_size, FALSE, VM_PROT_NONE); - *stack += attrs->stacksize + vm_page_size; - #endif + if (guardsize) + kr = vm_protect(mach_task_self(), stackaddr, guardsize, FALSE, VM_PROT_NONE); + *stack = (void *)(stackaddr + attrs->stacksize + guardsize); #else vm_address_t cur_stack = (vm_address_t)0; @@ -202,18 +233,11 @@ _pthread_allocate_stack(pthread_attr_t *attrs, void **stack) FALSE); #ifndef NO_GUARD_PAGES if (kr == KERN_SUCCESS) { -# ifdef STACK_GROWS_UP - kr = vm_protect(mach_task_self(), - lowest_stack+__pthread_stack_size, - __pthread_stack_size, - FALSE, VM_PROT_NONE); -# else /* STACK_GROWS_UP */ kr = vm_protect(mach_task_self(), lowest_stack, __pthread_stack_size, FALSE, VM_PROT_NONE); lowest_stack += __pthread_stack_size; -# endif /* STACK_GROWS_UP */ if (kr == KERN_SUCCESS) break; } @@ -239,18 +263,11 @@ _pthread_allocate_stack(pthread_attr_t *attrs, void **stack) know what to do. */ #ifndef NO_GUARD_PAGES if (kr == KERN_SUCCESS) { -# ifdef STACK_GROWS_UP - kr = vm_protect(mach_task_self(), - lowest_stack+__pthread_stack_size, - __pthread_stack_size, - FALSE, VM_PROT_NONE); -# else /* STACK_GROWS_UP */ kr = vm_protect(mach_task_self(), lowest_stack, __pthread_stack_size, FALSE, VM_PROT_NONE); lowest_stack += __pthread_stack_size; -# endif /* STACK_GROWS_UP */ } #endif free_stacks = (vm_address_t *)lowest_stack; @@ -354,7 +371,8 @@ pthread_attr_getschedpolicy(const pthread_attr_t *attr, } } -static const size_t DEFAULT_STACK_SIZE = DFLSSIZ; +/* Retain the existing stack size of 512K and not depend on Main thread default stack size */ +static const size_t DEFAULT_STACK_SIZE = (512*1024); /* * Initialize a thread attribute structure to default values. */ @@ -364,12 +382,13 @@ pthread_attr_init(pthread_attr_t *attr) attr->stacksize = DEFAULT_STACK_SIZE; attr->stackaddr = NULL; attr->sig = _PTHREAD_ATTR_SIG; - attr->policy = _PTHREAD_DEFAULT_POLICY; attr->param.sched_priority = default_priority; attr->param.quantum = 10; /* quantum isn't public yet */ - attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED; attr->detached = PTHREAD_CREATE_JOINABLE; + attr->inherit = _PTHREAD_DEFAULT_INHERITSCHED; + attr->policy = _PTHREAD_DEFAULT_POLICY; attr->freeStackOnExit = TRUE; + attr->guardsize = vm_page_size; return (ESUCCESS); } @@ -517,7 +536,7 @@ pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr) int pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr) { - if ((attr->sig == _PTHREAD_ATTR_SIG) && (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0)) { + if ((attr->sig == _PTHREAD_ATTR_SIG) && (((uintptr_t)stackaddr % vm_page_size) == 0)) { attr->stackaddr = stackaddr; attr->freeStackOnExit = FALSE; return (ESUCCESS); @@ -552,7 +571,7 @@ int pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * stacksize) { if (attr->sig == _PTHREAD_ATTR_SIG) { - *stackaddr = attr->stackaddr; + *stackaddr = (void *)((uintptr_t)attr->stackaddr - attr->stacksize); *stacksize = attr->stacksize; return (ESUCCESS); } else { @@ -560,21 +579,58 @@ pthread_attr_getstack(const pthread_attr_t *attr, void **stackaddr, size_t * sta } } +/* By SUSV spec, the stackaddr is the base address, the lowest addressable + * byte address. This is not the same as in pthread_attr_setstackaddr. + */ int pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr, size_t stacksize) { if ((attr->sig == _PTHREAD_ATTR_SIG) && - (((vm_offset_t)stackaddr & (vm_page_size - 1)) == 0) && - ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) { - attr->stackaddr = stackaddr; - attr->freeStackOnExit = FALSE; + (((uintptr_t)stackaddr % vm_page_size) == 0) && + ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) { + attr->stackaddr = (void *)((uintptr_t)stackaddr + stacksize); attr->stacksize = stacksize; + attr->freeStackOnExit = FALSE; return (ESUCCESS); } else { return (EINVAL); /* Not an attribute structure! */ } } + +/* + * Set the guardsize attribute in the attr. + */ +int +pthread_attr_setguardsize(pthread_attr_t *attr, + size_t guardsize) +{ + if (attr->sig == _PTHREAD_ATTR_SIG) { + /* Guardsize of 0 is valid, ot means no guard */ + if ((guardsize % vm_page_size) == 0) { + attr->guardsize = guardsize; + return (ESUCCESS); + } else + return(EINVAL); + } + return (EINVAL); /* Not an attribute structure! */ +} + +/* + * Get the guardsize attribute in the attr. + */ +int +pthread_attr_getguardsize(const pthread_attr_t *attr, + size_t *guardsize) +{ + if (attr->sig == _PTHREAD_ATTR_SIG) { + *guardsize = attr->guardsize; + return (ESUCCESS); + } + return (EINVAL); /* Not an attribute structure! */ +} + + /* * Create and start execution of a new thread. */ @@ -583,7 +639,7 @@ static void _pthread_body(pthread_t self) { _pthread_set_self(self); - pthread_exit((self->fun)(self->arg)); + _pthread_exit(self, (self->fun)(self->arg)); } int @@ -598,8 +654,10 @@ _pthread_create(pthread_t t, do { memset(t, 0, sizeof(*t)); + t->tsd[0] = t; t->stacksize = attrs->stacksize; t->stackaddr = (void *)stack; + t->guardsize = attrs->guardsize; t->kernel_thread = kernel_thread; t->detached = attrs->detached; t->inherit = attrs->inherit; @@ -611,8 +669,10 @@ _pthread_create(pthread_t t, t->reply_port = MACH_PORT_NULL; t->cthread_self = NULL; LOCK_INIT(t->lock); + t->plist.le_next = (struct _pthread *)0; + t->plist.le_prev = (struct _pthread **)0; t->cancel_state = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED; - t->cleanup_stack = (struct _pthread_handler_rec *)NULL; + t->__cleanup_stack = (struct __darwin_pthread_handler_rec *)NULL; t->death = SEMAPHORE_NULL; if (kernel_thread != MACH_PORT_NULL) @@ -736,14 +796,15 @@ _pthread_create_suspended(pthread_t *thread, } set_malloc_singlethreaded(0); __is_threaded = 1; - LOCK(_pthread_count_lock); - _pthread_count++; - UNLOCK(_pthread_count_lock); /* Send it on it's way */ t->arg = arg; t->fun = start_routine; /* Now set it up to execute */ + LOCK(_pthread_list_lock); + LIST_INSERT_HEAD(&__pthread_head, t, plist); + _pthread_count++; + UNLOCK(_pthread_list_lock); _pthread_setup(t, _pthread_body, stack, suspended, needresume); } while (0); return (res); @@ -806,6 +867,7 @@ pthread_detach(pthread_t thread) * pthread_kill call to system call */ +extern int __pthread_kill(mach_port_t, int); int pthread_kill ( @@ -832,7 +894,7 @@ pthread_kill ( /* thread underneath is terminated right away. */ static void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) { - mach_msg_empty_rcv_t msg; + pthread_reap_msg_t msg; kern_return_t ret; msg.header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, @@ -840,13 +902,14 @@ void _pthread_become_available(pthread_t thread, mach_port_t kernel_thread) { msg.header.msgh_size = sizeof msg - sizeof msg.trailer; msg.header.msgh_remote_port = thread_recycle_port; msg.header.msgh_local_port = kernel_thread; - msg.header.msgh_id = (int)thread; + msg.header.msgh_id = 0x44454144; /* 'DEAD' */ + msg.thread = thread; ret = mach_msg_send(&msg.header); assert(ret == MACH_MSG_SUCCESS); } /* Reap the resources for available threads */ -static +__private_extern__ int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr) { mach_port_type_t ptype; kern_return_t ret; @@ -881,11 +944,9 @@ int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_p vm_address_t addr = (vm_address_t)th->stackaddr; vm_size_t size; - size = (vm_size_t)th->stacksize + vm_page_size; + size = (vm_size_t)th->stacksize + th->guardsize; -#if !defined(STACK_GROWS_UP) addr -= size; -#endif ret = vm_deallocate(self, addr, size); if (ret != KERN_SUCCESS) { fprintf(stderr, @@ -906,15 +967,15 @@ int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_p static void _pthread_reap_threads(void) { - mach_msg_empty_rcv_t msg; + pthread_reap_msg_t msg; kern_return_t ret; ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, - sizeof(mach_msg_empty_rcv_t), thread_recycle_port, + sizeof msg, thread_recycle_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); while (ret == MACH_MSG_SUCCESS) { mach_port_t kernel_thread = msg.header.msgh_remote_port; - pthread_t thread = (pthread_t)msg.header.msgh_id; + pthread_t thread = msg.thread; if (_pthread_reap_thread(thread, kernel_thread, (void **)0) == EAGAIN) { @@ -923,7 +984,7 @@ void _pthread_reap_threads(void) return; } ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, - sizeof(mach_msg_empty_rcv_t), thread_recycle_port, + sizeof msg, thread_recycle_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); } } @@ -938,21 +999,20 @@ _pthread_self() { /* * Terminate a thread. */ -void -pthread_exit(void *value_ptr) +static void +_pthread_exit(pthread_t self, void *value_ptr) { - struct _pthread_handler_rec *handler; - pthread_t self = pthread_self(); + struct __darwin_pthread_handler_rec *handler; kern_return_t kern_res; int thread_count; /* Make this thread not to receive any signals */ syscall(331,1); - while ((handler = self->cleanup_stack) != 0) + while ((handler = self->__cleanup_stack) != 0) { - (handler->routine)(handler->arg); - self->cleanup_stack = handler->next; + (handler->__routine)(handler->__arg); + self->__cleanup_stack = handler->__next; } _pthread_tsd_cleanup(self); @@ -973,15 +1033,19 @@ pthread_exit(void *value_ptr) "semaphore_signal(death) failed: %s\n", mach_error_string(kern_res)); } + LOCK(_pthread_list_lock); + thread_count = --_pthread_count; + UNLOCK(_pthread_list_lock); } else { UNLOCK(self->lock); + LOCK(_pthread_list_lock); + LIST_REMOVE(self, plist); + thread_count = --_pthread_count; + UNLOCK(_pthread_list_lock); /* with no joiner, we let become available consume our cached ref */ _pthread_become_available(self, pthread_mach_thread_np(self)); } - LOCK(_pthread_count_lock); - thread_count = --_pthread_count; - UNLOCK(_pthread_count_lock); if (thread_count <= 0) exit(0); @@ -992,62 +1056,10 @@ pthread_exit(void *value_ptr) abort(); } -/* - * Wait for a thread to terminate and obtain its exit value. - */ -int -pthread_join(pthread_t thread, - void **value_ptr) +void +pthread_exit(void *value_ptr) { - kern_return_t kern_res; - int res = ESUCCESS; - - if (thread->sig == _PTHREAD_SIG) - { - semaphore_t death = new_sem_from_pool(); /* in case we need it */ - - LOCK(thread->lock); - if ((thread->detached & PTHREAD_CREATE_JOINABLE) && - thread->death == SEMAPHORE_NULL) - { - pthread_t self = pthread_self(); - - assert(thread->joiner == NULL); - if (thread != self && (self == NULL || self->joiner != thread)) - { - int already_exited = (thread->detached & _PTHREAD_EXITED); - - thread->death = death; - thread->joiner = self; - UNLOCK(thread->lock); - - if (!already_exited) - { - /* Wait for it to signal... */ - do { - PTHREAD_MACH_CALL(semaphore_wait(death), kern_res); - } while (kern_res != KERN_SUCCESS); - } - - /* ... and wait for it to really be dead */ - while ((res = _pthread_reap_thread(thread, - thread->kernel_thread, - value_ptr)) == EAGAIN) - { - sched_yield(); - } - } else { - UNLOCK(thread->lock); - res = EDEADLK; - } - } else { - UNLOCK(thread->lock); - res = EINVAL; - } - restore_sem_to_pool(death); - return res; - } - return ESRCH; + _pthread_exit(pthread_self(), value_ptr); } /* @@ -1106,13 +1118,13 @@ pthread_setschedparam(pthread_t thread, default: return (EINVAL); } - thread->policy = policy; - thread->param = *param; ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE); if (ret != KERN_SUCCESS) { return (EINVAL); } + thread->policy = policy; + thread->param = *param; return (ESUCCESS); } else { @@ -1148,6 +1160,18 @@ pthread_equal(pthread_t t1, return (t1 == t2); } +__private_extern__ void +_pthread_set_self(pthread_t p) +{ + extern void __pthread_set_self(pthread_t); + if (p == 0) { + bzero(&_thread, sizeof(struct _pthread)); + p = &_thread; + } + p->tsd[0] = p; + __pthread_set_self(p); +} + void cthread_set_self(void *cself) { @@ -1175,100 +1199,36 @@ int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) { - LOCK(once_control->lock); + _spin_lock(&once_control->lock); if (once_control->sig == _PTHREAD_ONCE_SIG_init) { (*init_routine)(); once_control->sig = _PTHREAD_ONCE_SIG; } - UNLOCK(once_control->lock); + _spin_unlock(&once_control->lock); return (ESUCCESS); /* Spec defines no possible errors! */ } -/* - * Cancel a thread - */ -int -pthread_cancel(pthread_t thread) -{ - if (thread->sig == _PTHREAD_SIG) - { - thread->cancel_state |= _PTHREAD_CANCEL_PENDING; - return (ESUCCESS); - } else - { - return (ESRCH); - } -} - /* * Insert a cancellation point in a thread. */ -static void -_pthread_testcancel(pthread_t thread) +__private_extern__ void +_pthread_testcancel(pthread_t thread, int isconforming) { LOCK(thread->lock); if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) { UNLOCK(thread->lock); - pthread_exit(0); + if (isconforming) + pthread_exit(PTHREAD_CANCELED); + else + pthread_exit(0); } UNLOCK(thread->lock); } -void -pthread_testcancel(void) -{ - pthread_t self = pthread_self(); - _pthread_testcancel(self); -} -/* - * Query/update the cancelability 'state' of a thread - */ -int -pthread_setcancelstate(int state, int *oldstate) -{ - pthread_t self = pthread_self(); - int err = ESUCCESS; - LOCK(self->lock); - if (oldstate) - *oldstate = self->cancel_state & ~_PTHREAD_CANCEL_STATE_MASK; - if ((state == PTHREAD_CANCEL_ENABLE) || (state == PTHREAD_CANCEL_DISABLE)) - { - self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_STATE_MASK) | state; - } else - { - err = EINVAL; - } - UNLOCK(self->lock); - _pthread_testcancel(self); /* See if we need to 'die' now... */ - return (err); -} - -/* - * Query/update the cancelability 'type' of a thread - */ -int -pthread_setcanceltype(int type, int *oldtype) -{ - pthread_t self = pthread_self(); - int err = ESUCCESS; - LOCK(self->lock); - if (oldtype) - *oldtype = self->cancel_state & ~_PTHREAD_CANCEL_TYPE_MASK; - if ((type == PTHREAD_CANCEL_DEFERRED) || (type == PTHREAD_CANCEL_ASYNCHRONOUS)) - { - self->cancel_state = (self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK) | type; - } else - { - err = EINVAL; - } - UNLOCK(self->lock); - _pthread_testcancel(self); /* See if we need to 'die' now... */ - return (err); -} int pthread_getconcurrency(void) @@ -1287,13 +1247,6 @@ pthread_setconcurrency(int new_level) * Perform package initialization - called automatically when application starts */ -extern int _cpu_capabilities; - -#define kHasAltivec 0x01 -#define kCache32 0x04 -#define kUseDcba 0x20 -#define kNoDcba 0x40 - static int pthread_init(void) { @@ -1308,11 +1261,8 @@ pthread_init(void) mach_msg_type_number_t count; int mib[2]; size_t len; - int hasvectorunit, numcpus; - - extern int _bcopy_initialize(void); - int dynamic_choice; - + int numcpus; + void *stackaddr; count = HOST_PRIORITY_INFO_COUNT; info = (host_info_t)&priority_info; @@ -1323,15 +1273,24 @@ pthread_init(void) printf("host_info failed (%d); probably need privilege.\n", kr); else { default_priority = priority_info.user_priority; - min_priority = priority_info.minimum_priority; - max_priority = priority_info.maximum_priority; + min_priority = priority_info.minimum_priority; + max_priority = priority_info.maximum_priority; } attrs = &_pthread_attr_default; pthread_attr_init(attrs); + LIST_INIT(&__pthread_head); + LOCK_INIT(_pthread_list_lock); thread = &_thread; + LIST_INSERT_HEAD(&__pthread_head, thread, plist); _pthread_set_self(thread); - _pthread_create(thread, attrs, (void *)USRSTACK, mach_thread_self()); + + mib[0] = CTL_KERN; + mib[1] = KERN_USRSTACK; + len = sizeof (stackaddr); + if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0) + stackaddr = (void *)USRSTACK; + _pthread_create(thread, attrs, stackaddr, mach_thread_self()); thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT; /* See if we're on a multiprocessor and set _spin_tries if so. */ @@ -1352,27 +1311,27 @@ pthread_init(void) else { if (basic_info.avail_cpus > 1) _spin_tries = MP_SPIN_TRIES; - /* This is a crude test */ - if (basic_info.cpu_subtype >= CPU_SUBTYPE_POWERPC_7400) - _cpu_has_altivec = 1; } } + mach_port_deallocate(mach_task_self(), host); + + _init_cpu_capabilities(); + +#if defined(_OBJC_PAGE_BASE_ADDRESS) +{ + vm_address_t objcRTPage = (vm_address_t)_OBJC_PAGE_BASE_ADDRESS; + kr = vm_map(mach_task_self(), + &objcRTPage, vm_page_size * 4, vm_page_size - 1, + VM_FLAGS_FIXED | VM_MAKE_TAG(0), // Which tag to use? + MACH_PORT_NULL, + (vm_address_t)0, FALSE, + (vm_prot_t)0, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, + VM_INHERIT_DEFAULT); + /* We ignore the return result here. The ObjC runtime will just have to deal. */ +} +#endif - mib[0] = CTL_HW; - mib[1] = HW_VECTORUNIT; - len = sizeof(hasvectorunit); - if (sysctl(mib, 2, &hasvectorunit, &len, NULL, 0) == 0) { - _cpu_has_altivec = hasvectorunit; - } - if (_cpu_has_altivec) { // G4, let bcopy decide whether to use dcba - _cpu_capabilities = kCache32 + kHasAltivec; - } else { // G3, no altivec and no dcba - _cpu_capabilities = kCache32 + kNoDcba; - } - - dynamic_choice = _bcopy_initialize(); // returns 0, kUseDcba, or kNoDcba - _cpu_capabilities |= dynamic_choice; // remember dynamic choice, if any mig_init(1); /* enable multi-threaded mig interfaces */ return 0; } @@ -1421,11 +1380,253 @@ static void sem_pool_reset(void) { UNLOCK(sem_pool_lock); } -__private_extern__ void _pthread_fork_child(void) { +__private_extern__ void _pthread_fork_child(pthread_t p) { /* Just in case somebody had it locked... */ UNLOCK(sem_pool_lock); sem_pool_reset(); - UNLOCK(_pthread_count_lock); + /* No need to hold the pthread_list_lock as no one other than this + * thread is present at this time + */ + LIST_INIT(&__pthread_head); + LOCK_INIT(_pthread_list_lock); + LIST_INSERT_HEAD(&__pthread_head, p, plist); _pthread_count = 1; } +#else /* !BUILDING_VARIANT ] [ */ +extern int __unix_conforming; +extern pthread_lock_t _pthread_list_lock; +extern void _pthread_testcancel(pthread_t thread, int isconforming); +extern int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_ptr); + +#endif /* !BUILDING_VARIANT ] */ + +#if __DARWIN_UNIX03 + +static void __posix_join_cleanup(void *arg) +{ + pthread_t thread = (pthread_t)arg; + int already_exited, res; + void * dummy; + semaphore_t death; + + LOCK(thread->lock); + death = thread->death; + already_exited = (thread->detached & _PTHREAD_EXITED); + + if (!already_exited){ + thread->joiner = (struct _pthread *)NULL; + UNLOCK(thread->lock); + restore_sem_to_pool(death); + } else { + UNLOCK(thread->lock); + while ((res = _pthread_reap_thread(thread, + thread->kernel_thread, + &dummy)) == EAGAIN) + { + sched_yield(); + } + restore_sem_to_pool(death); + + } +} + +#endif /* __DARWIN_UNIX03 */ + + +/* + * Wait for a thread to terminate and obtain its exit value. + */ +int +pthread_join(pthread_t thread, + void **value_ptr) +{ + kern_return_t kern_res; + int res = ESUCCESS; + +#if __DARWIN_UNIX03 + if (__unix_conforming == 0) + __unix_conforming = 1; +#endif /* __DARWIN_UNIX03 */ + + if (thread->sig == _PTHREAD_SIG) + { + semaphore_t death = new_sem_from_pool(); /* in case we need it */ + + LOCK(thread->lock); + if ((thread->detached & PTHREAD_CREATE_JOINABLE) && + thread->death == SEMAPHORE_NULL) + { + pthread_t self = pthread_self(); + + assert(thread->joiner == NULL); + if (thread != self && (self == NULL || self->joiner != thread)) + { + int already_exited = (thread->detached & _PTHREAD_EXITED); + + thread->death = death; + thread->joiner = self; + UNLOCK(thread->lock); + + if (!already_exited) + { +#if __DARWIN_UNIX03 + /* Wait for it to signal... */ + pthread_cleanup_push(__posix_join_cleanup, (void *)thread); + do { + res = __semwait_signal(death, 0, 0, 0, 0, 0); + } while ((res < 0) && (errno == EINTR)); + pthread_cleanup_pop(0); + +#else /* __DARWIN_UNIX03 */ + /* Wait for it to signal... */ + do { + PTHREAD_MACH_CALL(semaphore_wait(death), kern_res); + } while (kern_res != KERN_SUCCESS); +#endif /* __DARWIN_UNIX03 */ + } +#if __DARWIN_UNIX03 + else { + if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) + res = PTHREAD_CANCELED; + } +#endif /* __DARWIN_UNIX03 */ + + LOCK(_pthread_list_lock); + LIST_REMOVE(thread, plist); + UNLOCK(_pthread_list_lock); + /* ... and wait for it to really be dead */ + while ((res = _pthread_reap_thread(thread, + thread->kernel_thread, + value_ptr)) == EAGAIN) + { + sched_yield(); + } + } else { + UNLOCK(thread->lock); + res = EDEADLK; + } + } else { + UNLOCK(thread->lock); + res = EINVAL; + } + restore_sem_to_pool(death); + return res; + } + return ESRCH; +} + +/* + * Cancel a thread + */ +int +pthread_cancel(pthread_t thread) +{ +#if __DARWIN_UNIX03 + if (__unix_conforming == 0) + __unix_conforming = 1; +#endif /* __DARWIN_UNIX03 */ + + if (thread->sig == _PTHREAD_SIG) + { +#if __DARWIN_UNIX03 + int state; + LOCK(thread->lock); + state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING; + UNLOCK(thread->lock); + if (state & PTHREAD_CANCEL_ENABLE) + __pthread_markcancel(thread->kernel_thread); +#else /* __DARWIN_UNIX03 */ + thread->cancel_state |= _PTHREAD_CANCEL_PENDING; +#endif /* __DARWIN_UNIX03 */ + return (ESUCCESS); + } else + { + return (ESRCH); + } +} + +void +pthread_testcancel(void) +{ + pthread_t self = pthread_self(); + +#if __DARWIN_UNIX03 + if (__unix_conforming == 0) + __unix_conforming = 1; + _pthread_testcancel(self, 1); +#else /* __DARWIN_UNIX03 */ + _pthread_testcancel(self, 0); +#endif /* __DARWIN_UNIX03 */ + +} +/* + * Query/update the cancelability 'state' of a thread + */ +int +pthread_setcancelstate(int state, int *oldstate) +{ + pthread_t self = pthread_self(); + +#if __DARWIN_UNIX03 + if (__unix_conforming == 0) + __unix_conforming = 1; +#endif /* __DARWIN_UNIX03 */ + + switch (state) { + case PTHREAD_CANCEL_ENABLE: +#if __DARWIN_UNIX03 + __pthread_canceled(1); +#endif /* __DARWIN_UNIX03 */ + break; + case PTHREAD_CANCEL_DISABLE: +#if __DARWIN_UNIX03 + __pthread_canceled(2); +#endif /* __DARWIN_UNIX03 */ + break; + default: + return EINVAL; + } + + self = pthread_self(); + LOCK(self->lock); + if (oldstate) + *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK; + self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK; + self->cancel_state |= state; + UNLOCK(self->lock); +#if !__DARWIN_UNIX03 + _pthread_testcancel(self, 0); /* See if we need to 'die' now... */ +#endif /* __DARWIN_UNIX03 */ + return (0); +} + +/* + * Query/update the cancelability 'type' of a thread + */ +int +pthread_setcanceltype(int type, int *oldtype) +{ + pthread_t self = pthread_self(); + +#if __DARWIN_UNIX03 + if (__unix_conforming == 0) + __unix_conforming = 1; +#endif /* __DARWIN_UNIX03 */ + + if ((type != PTHREAD_CANCEL_DEFERRED) && + (type != PTHREAD_CANCEL_ASYNCHRONOUS)) + return EINVAL; + self = pthread_self(); + LOCK(self->lock); + if (oldtype) + *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK; + self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK; + self->cancel_state |= type; + UNLOCK(self->lock); +#if !__DARWIN_UNIX03 + _pthread_testcancel(self, 0); /* See if we need to 'die' now... */ +#endif /* __DARWIN_UNIX03 */ + return (0); +} +