X-Git-Url: https://git.saurik.com/apple/libc.git/blobdiff_plain/224c70764cab4e0e39a26aaf3ad3016552f62f55..7b00c0c43f52e9d27168e67a26aac19065cdb40c:/pthreads/pthread.c diff --git a/pthreads/pthread.c b/pthreads/pthread.c index 59632e6..ef4527e 100644 --- a/pthreads/pthread.c +++ b/pthreads/pthread.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2008 Apple Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -61,8 +61,10 @@ #include #include #include +#include #include #include +#include #define __APPLE_API_PRIVATE #include #include @@ -71,19 +73,23 @@ #endif +extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming); +extern int __pthread_sigmask(int, const sigset_t *, sigset_t *); + #ifndef BUILDING_VARIANT /* [ */ __private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head); +int32_t workq_targetconc[WORKQ_NUM_PRIOQUEUE]; + /* Per-thread kernel support */ extern void _pthread_set_self(pthread_t); extern void mig_init(int); static int _pthread_create_pthread_onstack(pthread_attr_t *attrs, void **stack, pthread_t *thread); static kern_return_t _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread); -void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero); -static void _pthread_tsd_reinit(pthread_t t); +static void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero); static int _new_pthread_create_suspended(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), @@ -128,6 +134,7 @@ __private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER; /* Same implementation as LOCK, but without the __is_threaded check */ int _spin_tries = 0; +extern kern_return_t syscall_thread_switch(mach_port_name_t, int, mach_msg_timeout_t); __private_extern__ void _spin_lock_retry(pthread_lock_t *lock) { int tries = _spin_tries; @@ -139,7 +146,7 @@ __private_extern__ void _spin_lock_retry(pthread_lock_t *lock) } while(!_spin_lock_try(lock)); } -extern mach_port_t thread_recycle_port; +static mach_port_t thread_recycle_port = MACH_PORT_NULL; /* These are used to keep track of a semaphore pool shared by mutexes and condition ** variables. @@ -157,27 +164,29 @@ static int pthread_concurrency; static OSSpinLock __workqueue_list_lock = OS_SPINLOCK_INIT; -static void _pthread_exit(pthread_t self, void *value_ptr); -int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming); +static void _pthread_exit(pthread_t self, void *value_ptr) __dead2; static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming); static pthread_attr_t _pthread_attr_default = {0}; static void _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * attr); -static int handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item); static int kernel_workq_setup = 0; static volatile int32_t kernel_workq_count = 0; -static volatile unsigned int user_workq_count = 0; +static volatile unsigned int user_workq_count = 0; /* number of outstanding workqueues */ +static volatile unsigned int user_workitem_count = 0; /* number of outstanding workitems */ #define KERNEL_WORKQ_ELEM_MAX 64 /* Max number of elements in the kerrel */ static int wqreadyprio = 0; /* current highest prio queue ready with items */ __private_extern__ struct __pthread_workitem_pool __pthread_workitem_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workitem_pool_head); __private_extern__ struct __pthread_workqueue_pool __pthread_workqueue_pool_head = TAILQ_HEAD_INITIALIZER(__pthread_workqueue_pool_head); +static struct _pthread_workitem * __workqueue_pool_ptr; +static size_t __workqueue_pool_size = 0; +static int __workqueue_nitems = 0; + struct _pthread_workqueue_head __pthread_workq0_head; struct _pthread_workqueue_head __pthread_workq1_head; struct _pthread_workqueue_head __pthread_workq2_head; struct _pthread_workqueue_head __pthread_workq3_head; -struct _pthread_workqueue_head __pthread_workq4_head; -pthread_workqueue_head_t __pthread_wq_head_tbl[WQ_NUM_PRIO_QS] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head, &__pthread_workq4_head}; +pthread_workqueue_head_t __pthread_wq_head_tbl[WORKQ_NUM_PRIOQUEUE] = {&__pthread_workq0_head, &__pthread_workq1_head, &__pthread_workq2_head, &__pthread_workq3_head}; static void workqueue_list_lock(void); static void workqueue_list_unlock(void); @@ -186,20 +195,30 @@ static void pick_nextworkqueue_droplock(void); static int post_nextworkitem(pthread_workqueue_t workq); static void _pthread_workq_return(pthread_t self); static pthread_workqueue_attr_t _pthread_wq_attr_default = {0}; -void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse); extern void start_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse); -extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, int flags); +extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); static pthread_workitem_t alloc_workitem(void); static void free_workitem(pthread_workitem_t); +static void grow_workitem(void); static pthread_workqueue_t alloc_workqueue(void); static void free_workqueue(pthread_workqueue_t); static int _pthread_work_internal_init(void); static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item); +void _pthread_fork_child_postinit(); -/* workq_ops commands */ +void pthread_workqueue_atfork_prepare(void); +void pthread_workqueue_atfork_parent(void); +void pthread_workqueue_atfork_child(void); + +extern void dispatch_atfork_prepare(void); +extern void dispatch_atfork_parent(void); +extern void dispatch_atfork_child(void); + +/* workq_kernreturn commands */ #define WQOPS_QUEUE_ADD 1 #define WQOPS_QUEUE_REMOVE 2 #define WQOPS_THREAD_RETURN 4 +#define WQOPS_THREAD_SETCONC 8 /* * Flags filed passed to bsdthread_create and back in pthread_start @@ -208,8 +227,12 @@ _________________________________________ | flags(8) | policy(8) | importance(16) | ----------------------------------------- */ +__private_extern__ void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); +__private_extern__ +void _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_workitem_t item, int reuse); + #define PTHREAD_START_CUSTOM 0x01000000 #define PTHREAD_START_SETSCHED 0x02000000 #define PTHREAD_START_DETACHED 0x04000000 @@ -217,13 +240,27 @@ void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), voi #define PTHREAD_START_POLICY_MASK 0xff #define PTHREAD_START_IMPORTANCE_MASK 0xffff -extern pthread_t __bsdthread_create(void (*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags); +static int pthread_setschedparam_internal(pthread_t, mach_port_t, int, const struct sched_param *); +extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void * stack, pthread_t thread, unsigned int flags); +extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, pthread_workitem_t, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t); extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem); +extern __uint64_t __thread_selfid( void ); +extern int __pthread_canceled(int); +extern void _pthread_keys_init(void); +extern int __pthread_kill(mach_port_t, int); +extern int __pthread_markcancel(int); +extern int __workq_open(void); + +#define WORKQUEUE_OVERCOMMIT 0x10000 + +extern int __workq_kernreturn(int, pthread_workitem_t, int, int); #if defined(__ppc__) || defined(__ppc64__) static const vm_address_t PTHREAD_STACK_HINT = 0xF0000000; #elif defined(__i386__) || defined(__x86_64__) static const vm_address_t PTHREAD_STACK_HINT = 0xB0000000; +#elif defined(__arm__) +static const vm_address_t PTHREAD_STACK_HINT = 0x30000000; #else #error Need to define a stack address hint for this architecture #endif @@ -336,12 +373,11 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) kern_return_t res = 0; vm_address_t freeaddr; size_t freesize; - task_t self = mach_task_self(); int thread_count; mach_port_t kport; semaphore_t joinsem = SEMAPHORE_NULL; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x900001c, freestruct, termthread, 0, 0, 0); #endif kport = t->kernel_thread; @@ -359,7 +395,7 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) if (freestruct != 0) { TAILQ_REMOVE(&__pthread_head, t, plist); /* if parent has not returned from create yet keep pthread_t */ -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x9000010, t, 0, 0, 1, 0); #endif if (t->parentcheck == 0) @@ -369,16 +405,16 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) thread_count = --_pthread_count; UNLOCK(_pthread_list_lock); -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000020, freeaddr, freesize, kport, 1, 0); #endif if (thread_count <=0) exit(0); else - __bsdthread_terminate(freeaddr, freesize, kport, joinsem); - abort(); + __bsdthread_terminate((void *)freeaddr, freesize, kport, joinsem); + LIBC_ABORT("thread %p didn't terminate", t); } else { -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000024, freeaddr, freesize, 0, 1, 0); #endif res = vm_deallocate(mach_task_self(), freeaddr, freesize); @@ -389,7 +425,7 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) LOCK(_pthread_list_lock); if (freestruct != 0) { TAILQ_REMOVE(&__pthread_head, t, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x9000010, t, 0, 0, 2, 0); #endif } @@ -398,7 +434,7 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) UNLOCK(_pthread_list_lock); if (freestruct) { -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000008, t, 0, 0, 2, 0); #endif free(t); @@ -406,7 +442,7 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) freeaddr = 0; freesize = 0; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000020, 0, 0, kport, 2, 0); #endif @@ -414,10 +450,10 @@ _pthread_free_pthread_onstack(pthread_t t, int freestruct, int termthread) exit(0); else __bsdthread_terminate(NULL, 0, kport, joinsem); - abort(); + LIBC_ABORT("thread %p didn't terminate", t); } else if (freestruct) { t->sig = _PTHREAD_NO_SIG; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000024, t, 0, 0, 2, 0); #endif free(t); @@ -791,13 +827,16 @@ static void _pthread_body(pthread_t self) { _pthread_set_self(self); +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) + printf("Failed to set thread_id in _pthread_body\n"); +#endif _pthread_exit(self, (self->fun)(self->arg)); } void _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int pflags) { - int ret; #if WQ_DEBUG pthread_t pself; #endif @@ -805,8 +844,11 @@ _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * f char * stackaddr; if ((pflags & PTHREAD_START_CUSTOM) == 0) { - stackaddr = self; + stackaddr = (char *)self; _pthread_struct_init(self, attrs, stackaddr, stacksize, 1, 1); +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + _pthread_set_self(self); +#endif LOCK(_pthread_list_lock); if (pflags & PTHREAD_START_SETSCHED) { self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK); @@ -817,8 +859,12 @@ _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * f self->detached &= ~PTHREAD_CREATE_JOINABLE; self->detached |= PTHREAD_CREATE_DETACHED; } - } else + } else { +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + _pthread_set_self(self); +#endif LOCK(_pthread_list_lock); + } self->kernel_thread = kport; self->fun = fun; self->arg = funarg; @@ -826,23 +872,25 @@ _pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * f /* Add to the pthread list */ if (self->parentcheck == 0) { TAILQ_INSERT_TAIL(&__pthread_head, self, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, self, 0, 0, 3, 0); #endif _pthread_count++; } self->childrun = 1; UNLOCK(_pthread_list_lock); -#if defined(__i386__) || defined(__x86_64__) - _pthread_set_self(self); + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) + printf("Failed to set thread_id in pthread_start\n"); #endif #if WQ_DEBUG pself = pthread_self(); if (self != pself) - abort(); + LIBC_ABORT("self %p != pself %p", self, pself); #endif -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000030, self, pflags, 0, 0, 0); #endif @@ -878,7 +926,7 @@ _pthread_create(pthread_t t, t->policy = attrs->policy; t->param = attrs->param; t->freeStackOnExit = attrs->freeStackOnExit; - t->mutexes = (struct _pthread_mutex *)NULL; + t->cancel_error = 0; t->sig = _PTHREAD_SIG; t->reply_port = MACH_PORT_NULL; t->cthread_self = NULL; @@ -890,7 +938,7 @@ _pthread_create(pthread_t t, t->death = SEMAPHORE_NULL; if (kernel_thread != MACH_PORT_NULL) - pthread_setschedparam(t, t->policy, &t->param); + (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param); } while (0); return (res); } @@ -898,7 +946,7 @@ _pthread_create(pthread_t t, void _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, size_t stacksize, int kernalloc, int nozero) { - mach_vm_offset_t stackaddr = (mach_vm_offset_t)stack; + mach_vm_offset_t stackaddr = (mach_vm_offset_t)(uintptr_t)stack; if (nozero == 0) { memset(t, 0, sizeof(*t)); @@ -908,13 +956,13 @@ _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, siz t->schedset = attrs->schedset; t->tsd[0] = t; if (kernalloc != 0) { - stackaddr = (mach_vm_offset_t)t; + stackaddr = (mach_vm_offset_t)(uintptr_t)t; /* if allocated from kernel set values appropriately */ t->stacksize = stacksize; - t->stackaddr = stackaddr; + t->stackaddr = (void *)(uintptr_t)stackaddr; t->freeStackOnExit = 1; - t->freeaddr = stackaddr - stacksize - vm_page_size; + t->freeaddr = (void *)(uintptr_t)(stackaddr - stacksize - vm_page_size); t->freesize = pthreadsize + stacksize + vm_page_size; } else { t->stacksize = attrs->stacksize; @@ -925,7 +973,7 @@ _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, siz t->inherit = attrs->inherit; t->policy = attrs->policy; t->param = attrs->param; - t->mutexes = (struct _pthread_mutex *)NULL; + t->cancel_error = 0; t->sig = _PTHREAD_SIG; t->reply_port = MACH_PORT_NULL; t->cthread_self = NULL; @@ -941,13 +989,6 @@ _pthread_struct_init(pthread_t t, const pthread_attr_t *attrs, void * stack, siz t->max_tsd_key = 0; } -static void -_pthread_tsd_reinit(pthread_t t) -{ - bzero(&t->tsd[1], (_INTERNAL_POSIX_THREAD_KEYS_END-1) * sizeof(void *)); -} - - /* Need to deprecate this in future */ int _pthread_is_threaded(void) @@ -971,9 +1012,24 @@ pthread_mach_thread_np(pthread_t t) { mach_port_t kport = MACH_PORT_NULL; + if (t == NULL) + goto out; + + /* + * If the call is on self, return the kernel port. We cannot + * add this bypass for main thread as it might have exited, + * and we should not return stale port info. + */ + if (t == pthread_self()) + { + kport = t->kernel_thread; + goto out; + } + if (_pthread_lookup_thread(t, &kport, 0) != 0) - return(NULL); + return((mach_port_t)0); +out: return(kport); } @@ -1000,14 +1056,23 @@ pthread_get_stacksize_np(pthread_t t) if (t == NULL) return(ESRCH); + if ( t == pthread_self() || t == &_thread ) //since the main thread will not get de-allocated from underneath us + { + size=t->stacksize; + return size; + } + + LOCK(_pthread_list_lock); if ((ret = _pthread_find_thread(t)) != 0) { UNLOCK(_pthread_list_lock); return(ret); } - size = t->stacksize; + + size=t->stacksize; UNLOCK(_pthread_list_lock); + return(size); } @@ -1018,13 +1083,16 @@ pthread_get_stackaddr_np(pthread_t t) void * addr = NULL; if (t == NULL) - return(ESRCH); + return((void *)(uintptr_t)ESRCH); + if(t == pthread_self() || t == &_thread) //since the main thread will not get deallocated from underneath us + return t->stackaddr; + LOCK(_pthread_list_lock); if ((ret = _pthread_find_thread(t)) != 0) { UNLOCK(_pthread_list_lock); - return(ret); + return((void *)(uintptr_t)ret); } addr = t->stackaddr; UNLOCK(_pthread_list_lock); @@ -1048,6 +1116,75 @@ pthread_main_np(void) return ((self->detached & _PTHREAD_CREATE_PARENT) == _PTHREAD_CREATE_PARENT); } + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) +/* if we are passed in a pthread_t that is NULL, then we return + the current thread's thread_id. So folks don't have to call + pthread_self, in addition to us doing it, if they just want + their thread_id. +*/ +int +pthread_threadid_np(pthread_t thread, __uint64_t *thread_id) +{ + int rval=0; + pthread_t self = pthread_self(); + + if (thread_id == NULL) { + return(EINVAL); + } else if (thread == NULL || thread == self) { + *thread_id = self->thread_id; + return rval; + } + + LOCK(_pthread_list_lock); + if ((rval = _pthread_find_thread(thread)) != 0) { + UNLOCK(_pthread_list_lock); + return(rval); + } + *thread_id = thread->thread_id; + UNLOCK(_pthread_list_lock); + return rval; +} +#endif + +int +pthread_getname_np(pthread_t thread, char *threadname, size_t len) +{ + int rval; + rval = 0; + + if (thread == NULL) + return(ESRCH); + + LOCK(_pthread_list_lock); + if ((rval = _pthread_find_thread(thread)) != 0) { + UNLOCK(_pthread_list_lock); + return(rval); + } + strlcpy(threadname, thread->pthread_name, len); + UNLOCK(_pthread_list_lock); + return rval; +} + +int +pthread_setname_np(const char *threadname) +{ + int rval; + int len; + + rval = 0; + len = strlen(threadname); + + /* protytype is in pthread_internals.h */ + rval = proc_setthreadname((void *)threadname, len); + if(rval == 0) + { + strlcpy((pthread_self())->pthread_name, threadname, MAXTHREADNAMESIZE); + } + return rval; + +} + static int _new_pthread_create_suspended(pthread_t *thread, const pthread_attr_t *attr, @@ -1059,7 +1196,7 @@ _new_pthread_create_suspended(pthread_t *thread, void *stack; int error; unsigned int flags; - pthread_t t; + pthread_t t,t2; kern_return_t kern_res; mach_port_t kernel_thread = MACH_PORT_NULL; int needresume; @@ -1089,7 +1226,7 @@ _new_pthread_create_suspended(pthread_t *thread, /* Rosetta or pthread_create_suspended() */ /* running under rosetta */ /* Allocate a stack for the thread */ -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000000, create_susp, 0, 0, 0, 0); #endif if ((error = _pthread_allocate_stack(attrs, &stack)) != 0) { @@ -1120,7 +1257,7 @@ _new_pthread_create_suspended(pthread_t *thread, /* Now set it up to execute */ LOCK(_pthread_list_lock); TAILQ_INSERT_TAIL(&__pthread_head, t, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, t, 0, 0, 4, 0); #endif _pthread_count++; @@ -1155,27 +1292,31 @@ _new_pthread_create_suspended(pthread_t *thread, t->fun = start_routine; t->newstyle = 1; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000004, t, flags, 0, 0, 0); #endif - if ((t = __bsdthread_create(start_routine, arg, stack, t, flags)) == -1) { + if ((t2 = __bsdthread_create(start_routine, arg, stack, t, flags)) == (pthread_t)-1) { _pthread_free_pthread_onstack(t, 1, 0); return (EAGAIN); } + else t=t2; LOCK(_pthread_list_lock); t->parentcheck = 1; if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) { /* detached child exited, mop up */ UNLOCK(_pthread_list_lock); -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000008, t, 0, 0, 1, 0); #endif + if(t->freeStackOnExit) + vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize); + else free(t); } else if (t->childrun == 0) { TAILQ_INSERT_TAIL(&__pthread_head, t, plist); _pthread_count++; -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, t, 0, 0, 1, 0); #endif UNLOCK(_pthread_list_lock); @@ -1184,17 +1325,17 @@ _new_pthread_create_suspended(pthread_t *thread, *thread = t; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000014, t, 0, 0, 1, 0); #endif return (0); } else { /* kernel allocation */ -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000018, flags, 0, 0, 0, 0); #endif - if ((t = __bsdthread_create(start_routine, arg, attrs->stacksize, NULL, flags)) == -1) + if ((t = __bsdthread_create(start_routine, arg, (void *)attrs->stacksize, NULL, flags)) == (pthread_t)-1) return (EAGAIN); /* Now set it up to execute */ LOCK(_pthread_list_lock); @@ -1202,14 +1343,14 @@ _new_pthread_create_suspended(pthread_t *thread, if ((t->childexit != 0) && ((t->detached & PTHREAD_CREATE_DETACHED) == PTHREAD_CREATE_DETACHED)) { /* detached child exited, mop up */ UNLOCK(_pthread_list_lock); -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000008, t, pthreadsize, 0, 2, 0); #endif - vm_deallocate(self, t, pthreadsize); + vm_deallocate(self, (mach_vm_address_t)(uintptr_t)t, pthreadsize); } else if (t->childrun == 0) { TAILQ_INSERT_TAIL(&__pthread_head, t, plist); _pthread_count++; -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, t, 0, 0, 2, 0); #endif UNLOCK(_pthread_list_lock); @@ -1218,7 +1359,7 @@ _new_pthread_create_suspended(pthread_t *thread, *thread = t; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000014, t, 0, 0, 2, 0); #endif return(0); @@ -1290,7 +1431,7 @@ _pthread_create_suspended(pthread_t *thread, /* Now set it up to execute */ LOCK(_pthread_list_lock); TAILQ_INSERT_TAIL(&__pthread_head, t, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, t, 0, 0, 5, 0); #endif _pthread_count++; @@ -1370,9 +1511,6 @@ pthread_detach(pthread_t thread) /* * pthread_kill call to system call */ - -extern int __pthread_kill(mach_port_t, int); - int pthread_kill ( pthread_t th, @@ -1387,6 +1525,11 @@ pthread_kill ( if (_pthread_lookup_thread(th, &kport, 0) != 0) return (ESRCH); /* Not a valid thread */ + /* if the thread is a workqueue thread, just return error */ + if ((th->wqthread != 0) && (th->wqkillset ==0)) { + return(ENOTSUP); + } + error = __pthread_kill(kport, sig); if (error == -1) @@ -1394,6 +1537,22 @@ pthread_kill ( return(error); } +int +__pthread_workqueue_setkill(int enable) +{ + pthread_t self = pthread_self(); + + LOCK(self->lock); + if (enable == 0) + self->wqkillset = 0; + else + self->wqkillset = 1; + UNLOCK(self->lock); + + return(0); + +} + /* Announce that there are pthread resources ready to be reclaimed in a */ /* subsequent pthread_exit or reaped by pthread_join. In either case, the Mach */ /* thread underneath is terminated right away. */ @@ -1465,7 +1624,7 @@ int _pthread_reap_thread(pthread_t th, mach_port_t kernel_thread, void **value_p *value_ptr = th->exit_value; if (conforming) { if ((th->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == - (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) + (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) *value_ptr = PTHREAD_CANCELED; th->sig = _PTHREAD_NO_SIG; } @@ -1490,12 +1649,20 @@ void _pthread_reap_threads(void) mach_port_t kernel_thread = msg.header.msgh_remote_port; pthread_t thread = msg.thread; - if (_pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN) + /* deal with race with thread_create_running() */ + if (kernel_thread == MACH_PORT_NULL && + kernel_thread != thread->kernel_thread) { + kernel_thread = thread->kernel_thread; + } + + if ( kernel_thread == MACH_PORT_NULL || + _pthread_reap_thread(thread, kernel_thread, (void **)0, 0) == EAGAIN) { /* not dead yet, put it back for someone else to reap, stop here */ _pthread_become_available(thread, kernel_thread); return; } + ret = mach_msg(&msg.header, MACH_RCV_MSG|MACH_RCV_TIMEOUT, 0, sizeof msg, thread_recycle_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); @@ -1525,7 +1692,7 @@ _pthread_exit(pthread_t self, void *value_ptr) /* Make this thread not to receive any signals */ __disable_threadsignal(1); -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x900001c, self, newstyle, 0, 0, 0); #endif @@ -1564,7 +1731,7 @@ _pthread_exit(pthread_t self, void *value_ptr) UNLOCK(self->lock); LOCK(_pthread_list_lock); TAILQ_REMOVE(&__pthread_head, self, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x9000010, self, 0, 0, 5, 0); #endif thread_count = --_pthread_count; @@ -1583,14 +1750,14 @@ _pthread_exit(pthread_t self, void *value_ptr) } else { semaphore_t joinsem = SEMAPHORE_NULL; - if ((self->joiner_notify == NULL) && (self->detached & PTHREAD_CREATE_JOINABLE)) + if ((self->joiner_notify == (mach_port_t)0) && (self->detached & PTHREAD_CREATE_JOINABLE)) joinsem = new_sem_from_pool(); LOCK(self->lock); self->detached |= _PTHREAD_EXITED; self->exit_value = value_ptr; if (self->detached & PTHREAD_CREATE_JOINABLE) { - if (self->joiner_notify == NULL) { + if (self->joiner_notify == (mach_port_t)0) { self->joiner_notify = joinsem; joinsem = SEMAPHORE_NULL; } @@ -1606,17 +1773,19 @@ _pthread_exit(pthread_t self, void *value_ptr) _pthread_free_pthread_onstack(self, 1, 1); } } - abort(); + LIBC_ABORT("thread %p didn't exit", self); } void pthread_exit(void *value_ptr) { pthread_t self = pthread_self(); - if (self->wqthread != 0) - workqueue_exit(self, self->cur_workq, self->cur_workitem); - else + /* if the current thread is a workqueue thread, just crash the app, as per libdispatch folks */ + if (self->wqthread == 0) { _pthread_exit(self, value_ptr); + } else { + LIBC_ABORT("pthread_exit() may only be called against threads created via pthread_create()"); + } } /* @@ -1650,8 +1819,9 @@ pthread_getschedparam(pthread_t thread, /* * Set the scheduling policy and scheduling paramters for a thread. */ -int -pthread_setschedparam(pthread_t thread, +static int +pthread_setschedparam_internal(pthread_t thread, + mach_port_t kport, int policy, const struct sched_param *param) { @@ -1682,14 +1852,48 @@ pthread_setschedparam(pthread_t thread, default: return (EINVAL); } - ret = thread_policy(pthread_mach_thread_np(thread), policy, base, count, TRUE); + ret = thread_policy(kport, policy, base, count, TRUE); if (ret != KERN_SUCCESS) return (EINVAL); - thread->policy = policy; - thread->param = *param; return (0); } +int +pthread_setschedparam(pthread_t t, + int policy, + const struct sched_param *param) +{ + mach_port_t kport = MACH_PORT_NULL; + int error; + int bypass = 1; + + if (t != pthread_self() && t != &_thread ) { //since the main thread will not get de-allocated from underneath us + bypass = 0; + if (_pthread_lookup_thread(t, &kport, 0) != 0) + return(ESRCH); + } else + kport = t->kernel_thread; + + error = pthread_setschedparam_internal(t, kport, policy, param); + if (error == 0) { + if (bypass == 0) { + /* ensure the thread is still valid */ + LOCK(_pthread_list_lock); + if ((error = _pthread_find_thread(t)) != 0) { + UNLOCK(_pthread_list_lock); + return(error); + } + t->policy = policy; + t->param = *param; + UNLOCK(_pthread_list_lock); + } else { + t->policy = policy; + t->param = *param; + } + } + return(error); +} + /* * Get the minimum priority for the given policy */ @@ -1718,16 +1922,21 @@ pthread_equal(pthread_t t1, return (t1 == t2); } -__private_extern__ void +// Force LLVM not to optimise this to a call to __pthread_set_self, if it does +// then _pthread_set_self won't be bound when secondary threads try and start up. +void __attribute__((noinline)) _pthread_set_self(pthread_t p) { - extern void __pthread_set_self(pthread_t); + extern void __pthread_set_self(void *); + if (p == 0) { - bzero(&_thread, sizeof(struct _pthread)); + if (_thread.tsd[0] != 0) { + bzero(&_thread, sizeof(struct _pthread)); + } p = &_thread; } p->tsd[0] = p; - __pthread_set_self(p); + __pthread_set_self(&p->tsd[0]); } void @@ -1771,7 +1980,7 @@ pthread_once(pthread_once_t *once_control, _spin_lock(&once_control->lock); if (once_control->sig == _PTHREAD_ONCE_SIG_init) { - pthread_cleanup_push(__pthread_once_cancel_handler, once_control); + pthread_cleanup_push((void (*)(void *))__pthread_once_cancel_handler, once_control); (*init_routine)(); pthread_cleanup_pop(0); once_control->sig = _PTHREAD_ONCE_SIG; @@ -1819,8 +2028,7 @@ pthread_setconcurrency(int new_level) /* * Perform package initialization - called automatically when application starts */ - -__private_extern__ int +int pthread_init(void) { pthread_attr_t *attrs; @@ -1832,6 +2040,7 @@ pthread_init(void) host_t host; mach_msg_type_number_t count; int mib[2]; + int ncpus = 0; size_t len; void *stackaddr; @@ -1856,7 +2065,10 @@ pthread_init(void) thread = &_thread; TAILQ_INSERT_HEAD(&__pthread_head, thread, plist); _pthread_set_self(thread); - +#if PTH_LISTTRACE + __kdebug_trace(0x900000c, thread, 0, 0, 10, 0); +#endif + /* In case of dyld reset the tsd keys from 1 - 10 */ _pthread_keys_init(); @@ -1866,12 +2078,18 @@ pthread_init(void) if (sysctl (mib, 2, &stackaddr, &len, NULL, 0) != 0) stackaddr = (void *)USRSTACK; _pthread_create(thread, attrs, stackaddr, mach_thread_self()); + thread->stacksize = DFLSSIZ; //initialize main thread's stacksize based on vmparam.h thread->detached = PTHREAD_CREATE_JOINABLE|_PTHREAD_CREATE_PARENT; _init_cpu_capabilities(); - if (_NumCPUs() > 1) + if ((ncpus = _NumCPUs()) > 1) _spin_tries = MP_SPIN_TRIES; + workq_targetconc[WORKQ_HIGH_PRIOQUEUE] = ncpus; + workq_targetconc[WORKQ_DEFAULT_PRIOQUEUE] = ncpus; + workq_targetconc[WORKQ_LOW_PRIOQUEUE] = ncpus; + workq_targetconc[WORKQ_BG_PRIOQUEUE] = ncpus; + mach_port_deallocate(mach_task_self(), host); #if defined(__ppc__) @@ -1879,9 +2097,6 @@ pthread_init(void) __oldstyle = 1; } #endif -#if defined(__arm__) - __oldstyle = 1; -#endif #if defined(_OBJC_PAGE_BASE_ADDRESS) { @@ -1896,15 +2111,21 @@ pthread_init(void) /* We ignore the return result here. The ObjC runtime will just have to deal. */ } #endif - + //added so that thread_recycle_port is initialized on new launch. + _pthread_fork_child_postinit(); mig_init(1); /* enable multi-threaded mig interfaces */ if (__oldstyle == 0) { -#if defined(__i386__) || defined(__x86_64__) - __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread))); +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&thread->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&thread->tsd[0])); #else - __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread))); + __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)), NULL, &workq_targetconc[0], (uintptr_t)&thread->tsd[__PTK_LIBDISPATCH_KEY0] - (uintptr_t)thread); #endif } + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + if( (thread->thread_id = __thread_selfid()) == (__uint64_t)-1) + printf("Failed to set thread_id in pthread_init\n"); +#endif return 0; } @@ -1915,8 +2136,12 @@ int sched_yield(void) } /* This used to be the "magic" that gets the initialization routine called when the application starts */ -static int _do_nothing(void) { return 0; } -int (*_cthread_init_routine)(void) = _do_nothing; +/* + * (These has been moved to setenv.c, so we can use it to fix a less than 10.5 + * crt1.o issue) + * static int _do_nothing(void) { return 0; } + * int (*_cthread_init_routine)(void) = _do_nothing; + */ /* Get a semaphore from the pool, growing it if necessary */ @@ -1963,7 +2188,23 @@ __private_extern__ void _pthread_fork_child(pthread_t p) { TAILQ_INIT(&__pthread_head); LOCK_INIT(_pthread_list_lock); TAILQ_INSERT_HEAD(&__pthread_head, p, plist); +#if PTH_LISTTRACE + __kdebug_trace(0x900000c, p, 0, 0, 10, 0); +#endif _pthread_count = 1; +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + if( (p->thread_id = __thread_selfid()) == (__uint64_t)-1) + printf("Failed to set thread_id in pthread_fork_child\n"); +#endif +} + +void _pthread_fork_child_postinit() { + kern_return_t kr; + + kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &thread_recycle_port); + if (kr != KERN_SUCCESS) { + abort(); + } } /* @@ -2019,9 +2260,9 @@ int _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) { kern_return_t res; - int detached = 0, ret; + int ret; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000028, thread, 0, 0, 1, 0); #endif /* The scenario where the joiner was waiting for the thread and @@ -2044,7 +2285,7 @@ _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) } /* It is still a joinable thread and needs to be reaped */ TAILQ_REMOVE(&__pthread_head, thread, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x9000010, thread, 0, 0, 3, 0); #endif UNLOCK(_pthread_list_lock); @@ -2053,7 +2294,7 @@ _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) *value_ptr = thread->exit_value; if (conforming) { if ((thread->cancel_state & (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) == - (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING)) { + (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING) && (value_ptr != NULL)) { *value_ptr = PTHREAD_CANCELED; } } @@ -2065,13 +2306,13 @@ _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming) } if (thread->freeStackOnExit) { thread->sig = _PTHREAD_NO_SIG; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000028, thread, 0, 0, 2, 0); #endif - vm_deallocate(mach_task_self(), thread, pthreadsize); + vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)thread, pthreadsize); } else { thread->sig = _PTHREAD_NO_SIG; -#if WQ_TRACE +#if PTH_TRACE __kdebug_trace(0x9000028, thread, 0, 0, 3, 0); #endif free(thread); @@ -2130,19 +2371,16 @@ _pthread_lookup_thread(pthread_t thread, mach_port_t * portp, int only_joinable) int pthread_workqueue_attr_init_np(pthread_workqueue_attr_t * attrp) { - attrp->stacksize = DEFAULT_STACK_SIZE; - attrp->istimeshare = 1; - attrp->importance = 0; - attrp->affinity = 0; - attrp->queueprio = WORK_QUEUE_NORMALIZER; - attrp->sig = PTHEAD_WRKQUEUE_ATTR_SIG; + attrp->queueprio = WORKQ_DEFAULT_PRIOQUEUE; + attrp->sig = PTHREAD_WORKQUEUE_ATTR_SIG; + attrp->overcommit = 0; return(0); } int pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr) { - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) + if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { return (0); } else @@ -2151,124 +2389,65 @@ pthread_workqueue_attr_destroy_np(pthread_workqueue_attr_t * attr) } } -#ifdef NOTYET /* [ */ -int -pthread_workqueue_attr_getstacksize_np(const pthread_workqueue_attr_t * attr, size_t * stacksizep) -{ - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - *stacksizep = attr->stacksize; - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} - -int -pthread_workqueue_attr_setstacksize_np(pthread_workqueue_attr_t * attr, size_t stacksize) -{ - if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && ((stacksize % vm_page_size) == 0) && (stacksize >= PTHREAD_STACK_MIN)) { - attr->stacksize = stacksize; - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} - - int -pthread_workqueue_attr_getthreadtimeshare_np(const pthread_workqueue_attr_t * attr, int * istimesahrep) +pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop) { - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - *istimesahrep = attr->istimeshare; + if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { + *qpriop = attr->queueprio; return (0); } else { return (EINVAL); /* Not an attribute structure! */ } } -int -pthread_workqueue_attr_settthreadtimeshare_np(pthread_workqueue_attr_t * attr, int istimeshare) -{ - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - if (istimeshare != 0) - attr->istimeshare = istimeshare; - else - attr->istimeshare = 0; - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} int -pthread_workqueue_attr_getthreadimportance_np(const pthread_workqueue_attr_t * attr, int * importancep) +pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio) { - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - *importancep = attr->importance; - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} +int error = 0; -int -pthread_workqueue_attr_settthreadimportance_np(pthread_workqueue_attr_t * attr, int importance) -{ - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){ - attr->importance = importance; - return (0); + if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { + switch(qprio) { + case WORKQ_HIGH_PRIOQUEUE: + case WORKQ_DEFAULT_PRIOQUEUE: + case WORKQ_LOW_PRIOQUEUE: + case WORKQ_BG_PRIOQUEUE: + attr->queueprio = qprio; + break; + default: + error = EINVAL; + } } else { - return (EINVAL); /* Not an attribute structure! */ + error = EINVAL; } + return (error); } -int -pthread_workqueue_attr_getthreadaffinity_np(const pthread_workqueue_attr_t * attr, int * affinityp) -{ - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - *affinityp = attr->affinity; - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} int -pthread_workqueue_attr_settthreadaffinity_np(pthread_workqueue_attr_t * attr, int affinity) +pthread_workqueue_attr_getovercommit_np(const pthread_workqueue_attr_t * attr, int * ocommp) { - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG){ - attr->affinity = affinity; + if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { + *ocommp = attr->overcommit; return (0); } else { return (EINVAL); /* Not an attribute structure! */ } } -#endif /* NOTYET ] */ int -pthread_workqueue_attr_getqueuepriority_np(const pthread_workqueue_attr_t * attr, int * qpriop) +pthread_workqueue_attr_setovercommit_np(pthread_workqueue_attr_t * attr, int ocomm) { - if (attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) { - *qpriop = (attr->queueprio - WORK_QUEUE_NORMALIZER); - return (0); - } else { - return (EINVAL); /* Not an attribute structure! */ - } -} +int error = 0; -int -pthread_workqueue_attr_setqueuepriority_np(pthread_workqueue_attr_t * attr, int qprio) -{ - /* only -2 to +2 is valid */ - if ((attr->sig == PTHEAD_WRKQUEUE_ATTR_SIG) && (qprio <= 2) && (qprio >= -2)) { - attr->queueprio = (qprio + WORK_QUEUE_NORMALIZER); - return (0); + if (attr->sig == PTHREAD_WORKQUEUE_ATTR_SIG) { + attr->overcommit = ocomm; } else { - return (EINVAL); /* Not an attribute structure! */ + error = EINVAL; } + return (error); } - /* XXXXXXXXXXXXX Pthread Workqueue support routines XXXXXXXXXXXXXXXXXX */ static void @@ -2295,50 +2474,117 @@ pthread_workqueue_init_np() return(ret); } -static int -_pthread_work_internal_init(void) +int +pthread_workqueue_requestconcurrency_np(int queue, int request_concurrency) { - int i, error; - pthread_workqueue_head_t headp; - pthread_workitem_t witemp; - pthread_workqueue_t wq; + int error = 0; - if (kernel_workq_setup == 0) { -#if defined(__i386__) || defined(__x86_64__) - __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread))); -#else - __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread))); -#endif + if (queue < 0 || queue > WORKQ_NUM_PRIOQUEUE) + return(EINVAL); - _pthread_wq_attr_default.stacksize = DEFAULT_STACK_SIZE; - _pthread_wq_attr_default.istimeshare = 1; - _pthread_wq_attr_default.importance = 0; - _pthread_wq_attr_default.affinity = 0; - _pthread_wq_attr_default.queueprio = WORK_QUEUE_NORMALIZER; - _pthread_wq_attr_default.sig = PTHEAD_WRKQUEUE_ATTR_SIG; + error =__workq_kernreturn(WQOPS_THREAD_SETCONC, NULL, request_concurrency, queue); - for( i = 0; i< WQ_NUM_PRIO_QS; i++) { - headp = __pthread_wq_head_tbl[i]; - TAILQ_INIT(&headp->wqhead); - headp->next_workq = 0; - } + if (error == -1) + return(errno); + return(0); +} - /* create work item and workqueue pools */ - witemp = (struct _pthread_workitem *)malloc(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE); - bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE)); - for (i = 0; i < WORKITEM_POOL_SIZE; i++) { - TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry); - } - wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE); - bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE)); - for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) { - TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list); - } +void +pthread_workqueue_atfork_prepare(void) +{ + /* + * NOTE: Any workq additions here + * should be for i386,x86_64 only + */ + dispatch_atfork_prepare(); +} + +void +pthread_workqueue_atfork_parent(void) +{ + /* + * NOTE: Any workq additions here + * should be for i386,x86_64 only + */ + dispatch_atfork_parent(); +} + +void +pthread_workqueue_atfork_child(void) +{ +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + /* + * NOTE: workq additions here + * are for i386,x86_64 only as + * ppc and arm do not support it + */ + __workqueue_list_lock = OS_SPINLOCK_INIT; + if (kernel_workq_setup != 0){ + kernel_workq_setup = 0; + _pthread_work_internal_init(); + } +#endif + dispatch_atfork_child(); +} + +static int +_pthread_work_internal_init(void) +{ + int i, error; + pthread_workqueue_head_t headp; + pthread_workqueue_t wq; +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + pthread_t self = pthread_self(); +#endif + + if (kernel_workq_setup == 0) { +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + __bsdthread_register(thread_start, start_wqthread, round_page(sizeof(struct _pthread)), _pthread_start, &workq_targetconc[0], (uintptr_t)(&self->tsd[__PTK_LIBDISPATCH_KEY0]) - (uintptr_t)(&self->tsd[0])); +#else + __bsdthread_register(_pthread_start, _pthread_wqthread, round_page(sizeof(struct _pthread)),NULL,NULL,0); +#endif + + _pthread_wq_attr_default.queueprio = WORKQ_DEFAULT_PRIOQUEUE; + _pthread_wq_attr_default.sig = PTHREAD_WORKQUEUE_ATTR_SIG; + + for( i = 0; i< WORKQ_NUM_PRIOQUEUE; i++) { + headp = __pthread_wq_head_tbl[i]; + TAILQ_INIT(&headp->wqhead); + headp->next_workq = 0; + } + + __workqueue_pool_ptr = NULL; + __workqueue_pool_size = round_page(sizeof(struct _pthread_workitem) * WORKITEM_POOL_SIZE); + + __workqueue_pool_ptr = (struct _pthread_workitem *)mmap(NULL, __workqueue_pool_size, + PROT_READ|PROT_WRITE, + MAP_ANON | MAP_PRIVATE, + 0, + 0); + + if (__workqueue_pool_ptr == MAP_FAILED) { + /* Not expected to fail, if it does, always malloc for work items */ + __workqueue_nitems = WORKITEM_POOL_SIZE; + __workqueue_pool_ptr = NULL; + } else + __workqueue_nitems = 0; + + /* sets up the workitem pool */ + grow_workitem(); + + /* since the size is less than a page, leaving this in malloc pool */ + wq = (struct _pthread_workqueue *)malloc(sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE); + bzero(wq, (sizeof(struct _pthread_workqueue) * WORKQUEUE_POOL_SIZE)); + for (i = 0; i < WORKQUEUE_POOL_SIZE; i++) { + TAILQ_INSERT_TAIL(&__pthread_workqueue_pool_head, &wq[i], wq_list); + } if (error = __workq_open()) { TAILQ_INIT(&__pthread_workitem_pool_head); TAILQ_INIT(&__pthread_workqueue_pool_head); - free(witemp); + if (__workqueue_pool_ptr != NULL) { + munmap((void *)__workqueue_pool_ptr, __workqueue_pool_size); + } free(wq); return(ENOMEM); } @@ -2355,13 +2601,25 @@ alloc_workitem(void) pthread_workitem_t witem; if (TAILQ_EMPTY(&__pthread_workitem_pool_head)) { - workqueue_list_unlock(); - witem = malloc(sizeof(struct _pthread_workitem)); - workqueue_list_lock(); - } else { - witem = TAILQ_FIRST(&__pthread_workitem_pool_head); - TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry); + /* the chunk size is set so some multiple of it is pool size */ + if (__workqueue_nitems < WORKITEM_POOL_SIZE) { + grow_workitem(); + } else { + workqueue_list_unlock(); + witem = malloc(sizeof(struct _pthread_workitem)); + workqueue_list_lock(); + witem->fromcache = 0; + goto out; + } } + witem = TAILQ_FIRST(&__pthread_workitem_pool_head); + TAILQ_REMOVE(&__pthread_workitem_pool_head, witem, item_entry); + witem->fromcache = 1; +out: + witem->flags = 0; + witem->item_entry.tqe_next = 0; + witem->item_entry.tqe_prev = 0; + user_workitem_count++; return(witem); } @@ -2369,7 +2627,27 @@ alloc_workitem(void) static void free_workitem(pthread_workitem_t witem) { - TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry); + user_workitem_count--; + witem->flags = 0; + if (witem->fromcache != 0) + TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, witem, item_entry); + else + free(witem); +} + +static void +grow_workitem(void) +{ + pthread_workitem_t witemp; + int i; + + witemp = &__workqueue_pool_ptr[__workqueue_nitems]; + bzero(witemp, (sizeof(struct _pthread_workitem) * WORKITEM_CHUNK_SIZE)); + for (i = 0; i < WORKITEM_CHUNK_SIZE; i++) { + witemp[i].fromcache = 1; + TAILQ_INSERT_TAIL(&__pthread_workitem_pool_head, &witemp[i], item_entry); + } + __workqueue_nitems += WORKITEM_CHUNK_SIZE; } /* This routine is called with list lock held */ @@ -2403,32 +2681,29 @@ _pthread_workq_init(pthread_workqueue_t wq, const pthread_workqueue_attr_t * att { bzero(wq, sizeof(struct _pthread_workqueue)); if (attr != NULL) { - wq->stacksize = attr->stacksize; - wq->istimeshare = attr->istimeshare; - wq->importance = attr->importance; - wq->affinity = attr->affinity; wq->queueprio = attr->queueprio; + wq->overcommit = attr->overcommit; } else { - wq->stacksize = DEFAULT_STACK_SIZE; - wq->istimeshare = 1; - wq->importance = 0; - wq->affinity = 0; - wq->queueprio = WORK_QUEUE_NORMALIZER; + wq->queueprio = WORKQ_DEFAULT_PRIOQUEUE; + wq->overcommit = 0; } LOCK_INIT(wq->lock); wq->flags = 0; TAILQ_INIT(&wq->item_listhead); TAILQ_INIT(&wq->item_kernhead); +#if WQ_LISTTRACE + __kdebug_trace(0x90080ac, wq, &wq->item_listhead, wq->item_listhead.tqh_first, wq->item_listhead.tqh_last, 0); +#endif wq->wq_list.tqe_next = 0; wq->wq_list.tqe_prev = 0; - wq->sig = PTHEAD_WRKQUEUE_SIG; + wq->sig = PTHREAD_WORKQUEUE_SIG; wq->headp = __pthread_wq_head_tbl[wq->queueprio]; } int valid_workq(pthread_workqueue_t workq) { - if (workq->sig == PTHEAD_WRKQUEUE_SIG) + if (workq->sig == PTHREAD_WORKQUEUE_SIG) return(1); else return(0); @@ -2444,10 +2719,13 @@ pick_nextworkqueue_droplock() pthread_workqueue_t workq; pthread_workqueue_t nworkq = NULL; +#if WQ_TRACE + __kdebug_trace(0x9008098, kernel_workq_count, 0, 0, 0, 0); +#endif loop: while (kernel_workq_count < KERNEL_WORKQ_ELEM_MAX) { found = 0; - for (i = 0; i < WQ_NUM_PRIO_QS; i++) { + for (i = 0; i < WORKQ_NUM_PRIOQUEUE; i++) { wqreadyprio = i; /* because there is nothing else higher to run */ headp = __pthread_wq_head_tbl[i]; @@ -2462,6 +2740,9 @@ loop: headp->next_workq = TAILQ_NEXT(workq, wq_list); if (headp->next_workq == NULL) headp->next_workq = TAILQ_FIRST(&headp->wqhead); +#if WQ_TRACE + __kdebug_trace(0x9008098, kernel_workq_count, workq, 0, 1, 0); +#endif val = post_nextworkitem(workq); if (val != 0) { @@ -2504,7 +2785,7 @@ loop: static int post_nextworkitem(pthread_workqueue_t workq) { - int error; + int error, prio; pthread_workitem_t witem; pthread_workqueue_head_t headp; void (*func)(pthread_workqueue_t, void *); @@ -2512,12 +2793,24 @@ post_nextworkitem(pthread_workqueue_t workq) if ((workq->flags & PTHREAD_WORKQ_SUSPEND) == PTHREAD_WORKQ_SUSPEND) { return(0); } +#if WQ_TRACE + __kdebug_trace(0x900809c, workq, workq->item_listhead.tqh_first, 0, 1, 0); +#endif if (TAILQ_EMPTY(&workq->item_listhead)) { return(0); } + if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) + return(0); + witem = TAILQ_FIRST(&workq->item_listhead); headp = workq->headp; +#if WQ_TRACE + __kdebug_trace(0x900809c, workq, witem, 0, 0xee, 0); +#endif if ((witem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) { +#if WQ_TRACE + __kdebug_trace(0x9000064, workq, 0, 0, 2, 0); +#endif if ((witem->flags & PTH_WQITEM_APPLIED) != 0) { return(0); @@ -2537,14 +2830,26 @@ post_nextworkitem(pthread_workqueue_t workq) __kdebug_trace(0x9000064, 2, workq->barrier_count, 0, 0, 0); #endif if (witem->func != NULL) { + /* since we are going to drop list lock */ + witem->flags |= PTH_WQITEM_APPLIED; + workq->flags |= PTHREAD_WORKQ_BARRIER_ON; workqueue_list_unlock(); - func = witem->func; + func = (void (*)(pthread_workqueue_t, void *))witem->func; (*func)(workq, witem->func_arg); +#if WQ_TRACE + __kdebug_trace(0x9000064, 3, workq->barrier_count, 0, 0, 0); +#endif workqueue_list_lock(); + workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; } TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); - witem->flags = 0; +#if WQ_LISTTRACE + __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif free_workitem(witem); +#if WQ_TRACE + __kdebug_trace(0x9000064, 4, workq->barrier_count, 0, 0, 0); +#endif return(1); } } else if ((witem->flags & PTH_WQITEM_DESTROY) == PTH_WQITEM_DESTROY) { @@ -2557,16 +2862,18 @@ post_nextworkitem(pthread_workqueue_t workq) witem->flags |= PTH_WQITEM_APPLIED; workq->flags |= (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON); workq->barrier_count = workq->kq_count; - workq->term_callback = witem->func; + workq->term_callback = (void (*)(struct _pthread_workqueue *,void *))witem->func; workq->term_callarg = witem->func_arg; TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); +#if WQ_LISTTRACE + __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif if ((TAILQ_EMPTY(&workq->item_listhead)) && (workq->kq_count == 0)) { if (!(TAILQ_EMPTY(&workq->item_kernhead))) { #if WQ_TRACE __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 0xff, 0); #endif } - witem->flags = 0; free_workitem(witem); workq->flags |= PTHREAD_WORKQ_DESTROYED; #if WQ_TRACE @@ -2590,8 +2897,12 @@ post_nextworkitem(pthread_workqueue_t workq) } free_workqueue(workq); return(1); - } else + } else { TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry); +#if WQ_LISTTRACE + __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif + } #if WQ_TRACE __kdebug_trace(0x9000068, 2, workq->barrier_count, 0, 0, 0); #endif @@ -2601,6 +2912,9 @@ post_nextworkitem(pthread_workqueue_t workq) __kdebug_trace(0x9000060, witem, workq, witem->func_arg, 0xfff, 0); #endif TAILQ_REMOVE(&workq->item_listhead, witem, item_entry); +#if WQ_LISTTRACE + __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif TAILQ_INSERT_TAIL(&workq->item_kernhead, witem, item_entry); if ((witem->flags & PTH_WQITEM_KERN_COUNT) == 0) { workq->kq_count++; @@ -2608,7 +2922,13 @@ post_nextworkitem(pthread_workqueue_t workq) } OSAtomicIncrement32(&kernel_workq_count); workqueue_list_unlock(); - if (( error =__workq_ops(WQOPS_QUEUE_ADD, witem, 0)) == -1) { + + prio = workq->queueprio; + if (workq->overcommit != 0) { + prio |= WORKQUEUE_OVERCOMMIT; + } + + if (( error =__workq_kernreturn(WQOPS_QUEUE_ADD, witem, workq->affinity, prio)) == -1) { OSAtomicDecrement32(&kernel_workq_count); workqueue_list_lock(); #if WQ_TRACE @@ -2616,6 +2936,9 @@ post_nextworkitem(pthread_workqueue_t workq) #endif TAILQ_REMOVE(&workq->item_kernhead, witem, item_entry); TAILQ_INSERT_HEAD(&workq->item_listhead, witem, item_entry); +#if WQ_LISTTRACE + __kdebug_trace(0x90080b0, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif if ((workq->flags & (PTHREAD_WORKQ_BARRIER_ON | PTHREAD_WORKQ_TERM_ON)) != 0) workq->flags |= PTHREAD_WORKQ_REQUEUED; } else @@ -2628,7 +2951,7 @@ post_nextworkitem(pthread_workqueue_t workq) /* noone should come here */ #if 1 printf("error in logic for next workitem\n"); - abort(); + LIBC_ABORT("error in logic for next workitem"); #endif return(0); } @@ -2639,7 +2962,9 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_w int ret; pthread_attr_t *attrs = &_pthread_attr_default; pthread_workqueue_t workq; +#if WQ_DEBUG pthread_t pself; +#endif workq = item->workq; @@ -2647,37 +2972,45 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_w /* reuse is set to 0, when a thread is newly created to run a workitem */ _pthread_struct_init(self, attrs, stackaddr, DEFAULT_STACK_SIZE, 1, 1); self->wqthread = 1; + self->wqkillset = 0; self->parentcheck = 1; /* These are not joinable threads */ self->detached &= ~PTHREAD_CREATE_JOINABLE; self->detached |= PTHREAD_CREATE_DETACHED; -#if defined(__i386__) || defined(__x86_64__) +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) _pthread_set_self(self); #endif #if WQ_TRACE __kdebug_trace(0x9000050, self, item, item->func_arg, 0, 0); #endif self->kernel_thread = kport; - self->fun = item->func; + self->fun = (void *(*)(void *))item->func; self->arg = item->func_arg; /* Add to the pthread list */ LOCK(_pthread_list_lock); TAILQ_INSERT_TAIL(&__pthread_head, self, plist); -#if WQ_TRACE +#if PTH_LISTTRACE __kdebug_trace(0x900000c, self, 0, 0, 10, 0); #endif _pthread_count++; UNLOCK(_pthread_list_lock); + +#if defined(__i386__) || defined(__x86_64__) || defined(__arm__) + if( (self->thread_id = __thread_selfid()) == (__uint64_t)-1) + printf("Failed to set thread_id in pthread_wqthread\n"); +#endif + } else { /* reuse is set to 1, when a thread is resued to run another work item */ #if WQ_TRACE __kdebug_trace(0x9000054, self, item, item->func_arg, 0, 0); #endif /* reset all tsd from 1 to KEYS_MAX */ - _pthread_tsd_reinit(self); + if (self == NULL) + LIBC_ABORT("_pthread_wqthread: pthread %p setup to be NULL", self); - self->fun = item->func; + self->fun = (void *(*)(void *))item->func; self->arg = item->func_arg; } @@ -2699,7 +3032,7 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_w pself = pthread_self(); if (self != pself) { printf("(3)pthread_self not set in reuse: pself %p, passed in %p\n", pself, self); - abort(); + LIBC_ABORT("(3)pthread_self not set in reuse: pself %p, passed in %p", pself, self); } } #endif /* WQ_DEBUG */ @@ -2708,8 +3041,15 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_w self->cur_workitem = item; OSAtomicDecrement32(&kernel_workq_count); - ret = (*self->fun)(self->arg); + ret = (int)(intptr_t)(*self->fun)(self->arg); + /* If we reach here without going through the above initialization path then don't go through + * with the teardown code path ( e.g. setjmp/longjmp ). Instead just exit this thread. + */ + if(self != pthread_self()) { + pthread_exit(PTHREAD_CANCELED); + } + workqueue_exit(self, workq, item); } @@ -2717,7 +3057,6 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void * stackaddr, pthread_w static void workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t item) { - pthread_attr_t *attrs = &_pthread_attr_default; pthread_workitem_t baritem; pthread_workqueue_head_t headp; void (*func)(pthread_workqueue_t, void *); @@ -2729,7 +3068,6 @@ workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t ite #if WQ_TRACE __kdebug_trace(0x9000070, self, 1, item->func_arg, workq->kq_count, 0); #endif - item->flags = 0; free_workitem(item); if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) { @@ -2747,12 +3085,14 @@ workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t ite /* if the front item is a barrier and call back is registered, run that */ if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) && (baritem->func != NULL)) { workqueue_list_unlock(); - func = baritem->func; + func = (void (*)(pthread_workqueue_t, void *))baritem->func; (*func)(workq, baritem->func_arg); workqueue_list_lock(); } TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry); - baritem->flags = 0; +#if WQ_LISTTRACE + __kdebug_trace(0x90080a8, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif free_workitem(baritem); workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; #if WQ_TRACE @@ -2801,100 +3141,13 @@ workqueue_exit(pthread_t self, pthread_workqueue_t workq, pthread_workitem_t ite static void _pthread_workq_return(pthread_t self) { - struct __darwin_pthread_handler_rec *handler; - int value = 0; - int * value_ptr=&value; - - /* set cancel state to disable and type to deferred */ - _pthread_setcancelstate_exit(self, value_ptr, __unix_conforming); - - /* Make this thread not to receive any signals */ - __disable_threadsignal(1); - - while ((handler = self->__cleanup_stack) != 0) - { - (handler->__routine)(handler->__arg); - self->__cleanup_stack = handler->__next; - } - _pthread_tsd_cleanup(self); - - __workq_ops(WQOPS_THREAD_RETURN, NULL, 0); + __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0); /* This is the way to terminate the thread */ _pthread_exit(self, NULL); } -/* returns 0 if it handles it, otherwise 1 */ -static int -handle_removeitem(pthread_workqueue_t workq, pthread_workitem_t item) -{ - pthread_workitem_t baritem; - pthread_workqueue_head_t headp; - void (*func)(pthread_workqueue_t, void *); - - if ((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == PTHREAD_WORKQ_BARRIER_ON) { - workq->barrier_count--; - if (workq->barrier_count <= 0 ) { - /* Need to remove barrier item from the list */ - baritem = TAILQ_FIRST(&workq->item_listhead); -#if WQ_DEBUG - if ((baritem->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_DESTROY| PTH_WQITEM_APPLIED)) == 0) - printf("Incorect bar item being removed in barrier processing\n"); -#endif /* WQ_DEBUG */ - /* if the front item is a barrier and call back is registered, run that */ - if (((baritem->flags & PTH_WQITEM_BARRIER) == PTH_WQITEM_BARRIER) - && (baritem->func != NULL)) { - workqueue_list_unlock(); - func = baritem->func; - (*func)(workq, baritem->func_arg); - workqueue_list_lock(); - } - TAILQ_REMOVE(&workq->item_listhead, baritem, item_entry); - baritem->flags = 0; - free_workitem(baritem); - item->flags = 0; - free_workitem(item); - workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; -#if WQ_TRACE - __kdebug_trace(0x9000058, pthread_self(), item, item->func_arg, 0, 0); -#endif - if ((workq->flags & PTHREAD_WORKQ_TERM_ON) != 0) { - headp = __pthread_wq_head_tbl[workq->queueprio]; - workq->flags |= PTHREAD_WORKQ_DESTROYED; -#if WQ_TRACE - __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 2, 0); -#endif - if (headp->next_workq == workq) { - headp->next_workq = TAILQ_NEXT(workq, wq_list); - if (headp->next_workq == NULL) { - headp->next_workq = TAILQ_FIRST(&headp->wqhead); - if (headp->next_workq == workq) - headp->next_workq = NULL; - } - } - TAILQ_REMOVE(&headp->wqhead, workq, wq_list); - workq->sig = 0; - if (workq->term_callback != NULL) { - workqueue_list_unlock(); - (*workq->term_callback)(workq, workq->term_callarg); - workqueue_list_lock(); - } - free_workqueue(workq); - pick_nextworkqueue_droplock(); - return(0); - } else { - /* if there are higher prio schedulabel item reset to wqreadyprio */ - if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) - wqreadyprio = workq->queueprio; - free_workitem(item); - pick_nextworkqueue_droplock(); - return(0); - } - } - } - return(1); -} /* XXXXXXXXXXXXX Pthread Workqueue functions XXXXXXXXXXXXXXXXXX */ int @@ -2903,7 +3156,12 @@ pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueu pthread_workqueue_t wq; pthread_workqueue_head_t headp; - if ((attr != NULL) && (attr->sig != PTHEAD_WRKQUEUE_ATTR_SIG)) { +#if defined(__ppc__) + IF_ROSETTA() { + return(ENOTSUP); + } +#endif + if ((attr != NULL) && (attr->sig != PTHREAD_WORKQUEUE_ATTR_SIG)) { return(EINVAL); } @@ -2937,72 +3195,7 @@ pthread_workqueue_create_np(pthread_workqueue_t * workqp, const pthread_workqueu } int -pthread_workqueue_destroy_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg) -{ - pthread_workitem_t witem; - pthread_workqueue_head_t headp; - - if (valid_workq(workq) == 0) { - return(EINVAL); - } - - workqueue_list_lock(); - - /* - * Allocate the workitem here as it can drop the lock. - * Also we can evaluate the workqueue state only once. - */ - witem = alloc_workitem(); - witem->item_entry.tqe_next = 0; - witem->item_entry.tqe_prev = 0; - witem->func = callback_func; - witem->func_arg = callback_arg; - witem->flags = PTH_WQITEM_DESTROY; - - if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED)) == 0) { - workq->flags |= PTHREAD_WORKQ_IN_TERMINATE; - /* If nothing queued or running, destroy now */ - if ((TAILQ_EMPTY(&workq->item_listhead)) && (TAILQ_EMPTY(&workq->item_kernhead))) { - workq->flags |= (PTHREAD_WORKQ_TERM_ON | PTHREAD_WORKQ_DESTROYED); - headp = __pthread_wq_head_tbl[workq->queueprio]; - workq->term_callback = callback_func; - workq->term_callarg = callback_arg; - if (headp->next_workq == workq) { - headp->next_workq = TAILQ_NEXT(workq, wq_list); - if (headp->next_workq == NULL) { - headp->next_workq = TAILQ_FIRST(&headp->wqhead); - if (headp->next_workq == workq) - headp->next_workq = NULL; - } - } - TAILQ_REMOVE(&headp->wqhead, workq, wq_list); - workq->sig = 0; - free_workitem(witem); - if (workq->term_callback != NULL) { - workqueue_list_unlock(); - (*workq->term_callback)(workq, workq->term_callarg); - workqueue_list_lock(); - } -#if WQ_TRACE - __kdebug_trace(0x900006c, workq, workq->kq_count, 0, 3, 0); -#endif - free_workqueue(workq); - workqueue_list_unlock(); - return(0); - } - TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry); - } else { - free_workitem(witem); - workqueue_list_unlock(); - return(EINPROGRESS); - } - workqueue_list_unlock(); - return(0); -} - - -int -pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep) +pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(void *), void * workitem_arg, pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) { pthread_workitem_t witem; @@ -3019,10 +3212,7 @@ pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(v witem = alloc_workitem(); witem->func = workitem_func; witem->func_arg = workitem_arg; - witem->flags = 0; witem->workq = workq; - witem->item_entry.tqe_next = 0; - witem->item_entry.tqe_prev = 0; /* alloc workitem can drop the lock, check the state */ if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { @@ -3034,112 +3224,16 @@ pthread_workqueue_additem_np(pthread_workqueue_t workq, void ( *workitem_func)(v if (itemhandlep != NULL) *itemhandlep = (pthread_workitem_handle_t *)witem; + if (gencountp != NULL) + *gencountp = 0; +#if WQ_TRACE + __kdebug_trace(0x9008090, witem, witem->func, witem->func_arg, workq, 0); +#endif TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry); - if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio)) - wqreadyprio = workq->queueprio; - - pick_nextworkqueue_droplock(); - - return(0); -} - -int -pthread_workqueue_removeitem_np(pthread_workqueue_t workq, pthread_workitem_handle_t itemhandle) -{ - pthread_workitem_t item, baritem; - pthread_workqueue_head_t headp; - int error; +#if WQ_LISTTRACE + __kdebug_trace(0x90080a4, workq, &workq->item_listhead, workq->item_listhead.tqh_first, workq->item_listhead.tqh_last, 0); +#endif - if (valid_workq(workq) == 0) { - return(EINVAL); - } - - workqueue_list_lock(); - if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { - workqueue_list_unlock(); - return(ESRCH); - } - - TAILQ_FOREACH(item, &workq->item_listhead, item_entry) { - if (item == (pthread_workitem_t)itemhandle) { - TAILQ_REMOVE(&workq->item_listhead, item, item_entry); - if ((item->flags & (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) == (PTH_WQITEM_BARRIER | PTH_WQITEM_APPLIED)) { - workq->flags &= ~PTHREAD_WORKQ_BARRIER_ON; - workq->barrier_count = 0; - if ((workq->queueprio < wqreadyprio) && (!(TAILQ_EMPTY(&workq->item_listhead)))) { - wqreadyprio = workq->queueprio; - } - } else if ((item->flags & PTH_WQITEM_KERN_COUNT) == PTH_WQITEM_KERN_COUNT) { - workq->kq_count--; - item->flags |= PTH_WQITEM_REMOVED; - if (handle_removeitem(workq, item) == 0) - return(0); - } - item->flags |= PTH_WQITEM_NOTINLIST; - free_workitem(item); - workqueue_list_unlock(); - return(0); - } - } - - TAILQ_FOREACH(item, &workq->item_kernhead, item_entry) { - if (item == (pthread_workitem_t)itemhandle) { - workqueue_list_unlock(); - if ((error = __workq_ops(WQOPS_QUEUE_REMOVE, item, 0)) == 0) { - workqueue_list_lock(); - TAILQ_REMOVE(&workq->item_kernhead, item, item_entry); - OSAtomicDecrement32(&kernel_workq_count); - workq->kq_count--; - item->flags |= PTH_WQITEM_REMOVED; - if (handle_removeitem(workq, item) != 0) { - free_workitem(item); - pick_nextworkqueue_droplock(); - } - return(0); - } else { - workqueue_list_unlock(); - return(EBUSY); - } - } - } - workqueue_list_unlock(); - return(EINVAL); -} - - -int -pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func)(pthread_workqueue_t, void *), void * callback_arg, __unused int waitforcallback, pthread_workitem_handle_t *itemhandlep) -{ - pthread_workitem_t witem; - - if (valid_workq(workq) == 0) { - return(EINVAL); - } - - workqueue_list_lock(); - - /* - * Allocate the workitem here as it can drop the lock. - * Also we can evaluate the workqueue state only once. - */ - witem = alloc_workitem(); - witem->item_entry.tqe_next = 0; - witem->item_entry.tqe_prev = 0; - witem->func = callback_func; - witem->func_arg = callback_arg; - witem->flags = PTH_WQITEM_BARRIER; - - /* alloc workitem can drop the lock, check the state */ - if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { - free_workitem(witem); - workqueue_list_unlock(); - return(ESRCH); - } - - if (itemhandlep != NULL) - *itemhandlep = (pthread_workitem_handle_t *)witem; - - TAILQ_INSERT_TAIL(&workq->item_listhead, witem, item_entry); if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio)) wqreadyprio = workq->queueprio; @@ -3148,49 +3242,18 @@ pthread_workqueue_addbarrier_np(pthread_workqueue_t workq, void (* callback_func return(0); } -int -pthread_workqueue_suspend_np(pthread_workqueue_t workq) +int +pthread_workqueue_getovercommit_np(pthread_workqueue_t workq, unsigned int *ocommp) { - if (valid_workq(workq) == 0) { - return(EINVAL); - } - workqueue_list_lock(); - if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { - workqueue_list_unlock(); - return(ESRCH); - } + if (valid_workq(workq) == 0) { + return(EINVAL); + } - workq->flags |= PTHREAD_WORKQ_SUSPEND; - workq->suspend_count++; - workqueue_list_unlock(); + if (ocommp != NULL) + *ocommp = workq->overcommit; return(0); } -int -pthread_workqueue_resume_np(pthread_workqueue_t workq) -{ - if (valid_workq(workq) == 0) { - return(EINVAL); - } - workqueue_list_lock(); - if ((workq->flags & (PTHREAD_WORKQ_IN_TERMINATE | PTHREAD_WORKQ_DESTROYED)) != 0) { - workqueue_list_unlock(); - return(ESRCH); - } - - workq->suspend_count--; - if (workq->suspend_count <= 0) { - workq->flags &= ~PTHREAD_WORKQ_SUSPEND; - if (((workq->flags & PTHREAD_WORKQ_BARRIER_ON) == 0) && (workq->queueprio < wqreadyprio)) - wqreadyprio = workq->queueprio; - - pick_nextworkqueue_droplock(); - } else - workqueue_list_unlock(); - - - return(0); -} #else /* !BUILDING_VARIANT ] [ */ extern int __unix_conforming; @@ -3210,8 +3273,7 @@ __posix_join_cleanup(void *arg) int already_exited, res; void * dummy; semaphore_t death; - mach_port_t joinport; - int newstyle = 0; + int newstyle; LOCK(thread->lock); already_exited = (thread->detached & _PTHREAD_EXITED); @@ -3221,7 +3283,7 @@ __posix_join_cleanup(void *arg) #if WQ_TRACE __kdebug_trace(0x900002c, thread, newstyle, 0, 0, 0); #endif - if (newstyle = 0) { + if (newstyle == 0) { death = thread->death; if (!already_exited){ thread->joiner = (struct _pthread *)NULL; @@ -3273,6 +3335,10 @@ pthread_cancel(pthread_t thread) if (_pthread_lookup_thread(thread, NULL, 0) != 0) return(ESRCH); + /* if the thread is a workqueue thread, then return error */ + if (thread->wqthread != 0) { + return(ENOTSUP); + } #if __DARWIN_UNIX03 int state;