* Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_LICENSE_HEADER_END@
*/
/*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
* IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
*/
/*
* MkLinux
* POSIX Pthread Library
*/
+#include "resolver.h"
#include "internal.h"
#include "private.h"
#include "workqueue_private.h"
#include "introspection_private.h"
#include "qos_private.h"
+#include "tsd_private.h"
#include <stdlib.h>
#include <errno.h>
#include <machine/vmparam.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
-#include <libkern/OSAtomic.h>
#include <_simple.h>
#include <platform/string.h>
extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
extern void __exit(int) __attribute__((noreturn));
+extern int __pthread_kill(mach_port_t, int);
+
+extern struct _pthread _thread;
+extern int default_priority;
-static void (*exitf)(int) = __exit;
-__private_extern__ void* (*_pthread_malloc)(size_t) = NULL;
-__private_extern__ void (*_pthread_free)(void *) = NULL;
//
// Global variables
//
+static void (*exitf)(int) = __exit;
+PTHREAD_NOEXPORT void* (*_pthread_malloc)(size_t) = NULL;
+PTHREAD_NOEXPORT void (*_pthread_free)(void *) = NULL;
+
+#if PTHREAD_DEBUG_LOG
+#include <fcntl.h>
+int _pthread_debuglog;
+uint64_t _pthread_debugstart;
+#endif
+
// This global should be used (carefully) by anyone needing to know if a
// pthread (other than the main thread) has been created.
int __is_threaded = 0;
// _pthread_list_lock protects _pthread_count, access to the __pthread_head
// list, and the parentcheck, childrun and childexit flags of the pthread
// structure. Externally imported by pthread_cancelable.c.
-__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
-__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
+PTHREAD_NOEXPORT _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
+PTHREAD_NOEXPORT struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
static int _pthread_count = 1;
#if PTHREAD_LAYOUT_SPI
mach_msg_trailer_t trailer;
} pthread_reap_msg_t;
-#define pthreadsize ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
-static pthread_attr_t _pthread_attr_default = {0};
+/*
+ * The pthread may be offset into a page. In that event, by contract
+ * with the kernel, the allocation will extend PTHREAD_SIZE from the
+ * start of the next page. There's also one page worth of allocation
+ * below stacksize for the guard page. <rdar://problem/19941744>
+ */
+#define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
+#define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
+#define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
+
+static pthread_attr_t _pthread_attr_default = { };
// The main thread's pthread_t
-static struct _pthread _thread __attribute__((aligned(4096))) = {0};
+PTHREAD_NOEXPORT struct _pthread _thread __attribute__((aligned(64))) = { };
-static int default_priority;
+PTHREAD_NOEXPORT int default_priority;
static int max_priority;
static int min_priority;
static int pthread_concurrency;
// work queue support data
static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL;
+static void (*__libdispatch_workloopfunction)(uint64_t *workloop_id, void **events, int *nevents) = NULL;
static int __libdispatch_offset;
// supported feature set
int __pthread_supported_features;
+static bool __workq_newapi;
//
// Function prototypes
static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
static int _pthread_deallocate(pthread_t t);
-static void _pthread_terminate(pthread_t t);
+static void _pthread_terminate_invoke(pthread_t t);
-static void _pthread_struct_init(pthread_t t,
+static inline void _pthread_struct_init(pthread_t t,
const pthread_attr_t *attrs,
void *stack,
size_t stacksize,
- int kernalloc);
+ void *freeaddr,
+ size_t freesize);
-extern void _pthread_set_self(pthread_t);
+static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
static void _pthread_dealloc_reply_port(pthread_t t);
+static void _pthread_dealloc_special_reply_port(pthread_t t);
-static inline void __pthread_add_thread(pthread_t t, bool parent);
+static inline void __pthread_add_thread(pthread_t t, const pthread_attr_t *attr, bool parent, bool from_mach_thread);
static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
-static int _pthread_find_thread(pthread_t thread);
-
static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
-static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
static inline void _pthread_introspection_thread_start(pthread_t t);
static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
static inline void _pthread_introspection_thread_destroy(pthread_t t);
+extern void _pthread_set_self(pthread_t);
extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
-void pthread_workqueue_atfork_child(void);
-
-static bool __workq_newapi;
-
/* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
* the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
* from old API requests to the new kext conventions.
#define WORKQUEUE_OVERCOMMIT 0x10000
/*
- * Flags filed passed to bsdthread_create and back in pthread_start
+ * Flags filed passed to bsdthread_create and back in pthread_start
31 <---------------------------------> 0
_________________________________________
| flags(8) | policy(8) | importance(16) |
-----------------------------------------
*/
-#define PTHREAD_START_CUSTOM 0x01000000
-#define PTHREAD_START_SETSCHED 0x02000000
-#define PTHREAD_START_DETACHED 0x04000000
-#define PTHREAD_START_QOSCLASS 0x08000000
-#define PTHREAD_START_QOSCLASS_MASK 0xffffff
+#define PTHREAD_START_CUSTOM 0x01000000
+#define PTHREAD_START_SETSCHED 0x02000000
+#define PTHREAD_START_DETACHED 0x04000000
+#define PTHREAD_START_QOSCLASS 0x08000000
+#define PTHREAD_START_TSD_BASE_SET 0x10000000
+#define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
#define PTHREAD_START_POLICY_BITSHIFT 16
#define PTHREAD_START_POLICY_MASK 0xff
#define PTHREAD_START_IMPORTANCE_MASK 0xffff
extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
extern __uint64_t __thread_selfid( void );
-extern int __pthread_canceled(int);
-extern int __pthread_kill(mach_port_t, int);
extern int __workq_open(void);
extern int __workq_kernreturn(int, void *, int, int);
#error no PTHREAD_STACK_HINT for this architecture
#endif
-#if defined(__i386__) && defined(static_assert)
-// Check for regression of <rdar://problem/13249323>
-static_assert(offsetof(struct _pthread, err_no) == 68);
-#endif
+// Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
+_Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
+ == offsetof(struct _pthread, thread_id),
+ "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
// Allocate a thread structure, stack and guard page.
//
size_t allocsize = 0;
size_t guardsize = 0;
size_t stacksize = 0;
-
+
PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
*thread = NULL;
*stack = NULL;
-
+
// Allocate a pthread structure if necessary
-
+
if (attrs->stackaddr != NULL) {
PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
*stack = attrs->stackaddr;
- allocsize = pthreadsize;
+ allocsize = PTHREAD_SIZE;
} else {
guardsize = attrs->guardsize;
stacksize = attrs->stacksize;
- allocsize = stacksize + guardsize + pthreadsize;
+ allocsize = stacksize + guardsize + PTHREAD_SIZE;
}
-
+
kr = mach_vm_map(mach_task_self(),
&allocaddr,
allocsize,
*stack = t;
}
}
-
+
if (t != NULL) {
- _pthread_struct_init(t, attrs, *stack, attrs->stacksize, 0);
- t->freeaddr = (void *)allocaddr;
- t->freesize = allocsize;
+ _pthread_struct_init(t, attrs,
+ *stack, attrs->stacksize,
+ allocaddr, allocsize);
*thread = t;
res = 0;
} else {
return 0;
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wreturn-stack-address"
+
+PTHREAD_NOINLINE
+static void*
+_pthread_current_stack_address(void)
+{
+ int a;
+ return &a;
+}
+
+#pragma clang diagnostic pop
+
// Terminates the thread if called from the currently running thread.
-PTHREAD_NORETURN
+PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
static void
_pthread_terminate(pthread_t t)
{
PTHREAD_ASSERT(t == pthread_self());
-
+
uintptr_t freeaddr = (uintptr_t)t->freeaddr;
size_t freesize = t->freesize;
+ // the size of just the stack
+ size_t freesize_stack = t->freesize;
+
+ // We usually pass our structure+stack to bsdthread_terminate to free, but
+ // if we get told to keep the pthread_t structure around then we need to
+ // adjust the free size and addr in the pthread_t to just refer to the
+ // structure and not the stack. If we do end up deallocating the
+ // structure, this is useless work since no one can read the result, but we
+ // can't do it after the call to pthread_remove_thread because it isn't
+ // safe to dereference t after that.
+ if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
+ // Check to ensure the pthread structure itself is part of the
+ // allocation described by freeaddr/freesize, in which case we split and
+ // only deallocate the area below the pthread structure. In the event of a
+ // custom stack, the freeaddr/size will be the pthread structure itself, in
+ // which case we shouldn't free anything (the final else case).
+ freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
+
+ // describe just the remainder for deallocation when the pthread_t goes away
+ t->freeaddr += freesize_stack;
+ t->freesize -= freesize_stack;
+ } else if (t == &_thread){
+ freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
+ uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
+ freesize_stack = stackborder - freeaddr;
+ } else {
+ freesize_stack = 0;
+ }
+
mach_port_t kport = _pthread_kernel_thread(t);
semaphore_t joinsem = t->joiner_notify;
+ _pthread_dealloc_special_reply_port(t);
_pthread_dealloc_reply_port(t);
- // If the pthread_t sticks around after the __bsdthread_terminate, we'll
- // need to free it later
-
- // After the call to __pthread_remove_thread, it is only safe to
- // dereference the pthread_t structure if EBUSY has been returned.
+ // After the call to __pthread_remove_thread, it is not safe to
+ // dereference the pthread_t structure.
bool destroy, should_exit;
destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY);
- if (t == &_thread) {
- // Don't free the main thread.
- freesize = 0;
- } else if (!destroy) {
- // We were told to keep the pthread_t structure around. In the common
- // case, the pthread structure itself is part of the allocation
- // described by freeaddr/freesize, in which case we need to split and
- // only deallocate the area below the pthread structure. In the event
- // of a custom stack, the freeaddr/size will be the pthread structure
- // itself, in which case we shouldn't free anything.
- if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
- freesize = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
- t->freeaddr += freesize;
- t->freesize -= freesize;
- } else {
- freesize = 0;
- }
+ if (!destroy || t == &_thread) {
+ // Use the adjusted freesize of just the stack that we computed above.
+ freesize = freesize_stack;
}
+
+ // Check if there is nothing to free because the thread has a custom
+ // stack allocation and is joinable.
if (freesize == 0) {
freeaddr = 0;
}
PTHREAD_ABORT("thread %p didn't terminate", t);
}
-int
+PTHREAD_NORETURN
+static void
+_pthread_terminate_invoke(pthread_t t)
+{
+ _pthread_terminate(t);
+}
+
+int
pthread_attr_destroy(pthread_attr_t *attr)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
int ret = EINVAL;
return 0;
}
-int
+int
pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
int ret = EINVAL;
/*
* Create and start execution of a new thread.
*/
-
+PTHREAD_NOINLINE PTHREAD_NORETURN
static void
-_pthread_body(pthread_t self)
+_pthread_body(pthread_t self, bool needs_tsd_base_set)
{
- _pthread_set_self(self);
- __pthread_add_thread(self, false);
- _pthread_exit(self, (self->fun)(self->arg));
+ _pthread_set_self_internal(self, needs_tsd_base_set);
+ __pthread_add_thread(self, NULL, false, false);
+ void *result = (self->fun)(self->arg);
+
+ _pthread_exit(self, result);
}
+PTHREAD_NORETURN
void
-_pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void *arg, size_t stacksize, unsigned int pflags)
+_pthread_start(pthread_t self,
+ mach_port_t kport,
+ void *(*fun)(void *),
+ void *arg,
+ size_t stacksize,
+ unsigned int pflags)
{
if ((pflags & PTHREAD_START_CUSTOM) == 0) {
- uintptr_t stackaddr = self;
- _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize, 1);
+ void *stackaddr = self;
+ _pthread_struct_init(self, &_pthread_attr_default,
+ stackaddr, stacksize,
+ PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
if (pflags & PTHREAD_START_SETSCHED) {
self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
}
- _pthread_set_kernel_thread(self, kport);
+ bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
+
+#if DEBUG
+ PTHREAD_ASSERT(MACH_PORT_VALID(kport));
+ PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
+#endif
+ // will mark the thread initialized
+ _pthread_markcancel_if_canceled(self, kport);
+
self->fun = fun;
self->arg = arg;
-
- _pthread_body(self);
+
+ _pthread_body(self, !thread_tsd_bsd_set);
}
-static void
+PTHREAD_ALWAYS_INLINE
+static inline void
_pthread_struct_init(pthread_t t,
const pthread_attr_t *attrs,
void *stackaddr,
size_t stacksize,
- int kernalloc)
+ void *freeaddr,
+ size_t freesize)
{
+#if DEBUG
+ PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
+#endif
+
t->sig = _PTHREAD_SIG;
t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
- LOCK_INIT(t->lock);
+ _PTHREAD_LOCK_INIT(t->lock);
- t->stacksize = stacksize;
t->stackaddr = stackaddr;
-
- t->kernalloc = kernalloc;
- if (kernalloc){
- /*
- * The pthread may be offset into a page. In that event, by contract
- * with the kernel, the allocation will extend pthreadsize from the
- * start of the next page. There's also one page worth of allocation
- * below stacksize for the guard page. <rdar://problem/19941744>
- */
- t->freeaddr = (stackaddr - stacksize) - vm_page_size;
- t->freesize = (round_page((uintptr_t)stackaddr) + pthreadsize) - (uintptr_t)t->freeaddr;
- }
+ t->stacksize = stacksize;
+ t->freeaddr = freeaddr;
+ t->freesize = freesize;
t->guardsize = attrs->guardsize;
t->detached = attrs->detached;
return __is_threaded;
}
-/* Non portable public api to know whether this process has(had) atleast one thread
+/* Non portable public api to know whether this process has(had) atleast one thread
* apart from main thread. There could be race if there is a thread in the process of
* creation at the time of call . It does not tell whether there are more than one thread
* at this point of time.
return __is_threaded;
}
+
+PTHREAD_NOEXPORT_VARIANT
mach_port_t
pthread_mach_thread_np(pthread_t t)
{
mach_port_t kport = MACH_PORT_NULL;
-
- if (t == pthread_self()) {
- /*
- * If the call is on self, return the kernel port. We cannot
- * add this bypass for main thread as it might have exited,
- * and we should not return stale port info.
- */
- kport = _pthread_kernel_thread(t);
- } else {
- (void)_pthread_lookup_thread(t, &kport, 0);
- }
-
+ (void)_pthread_is_valid(t, 0, &kport);
return kport;
}
+PTHREAD_NOEXPORT_VARIANT
pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread)
{
struct _pthread *p = NULL;
/* No need to wait as mach port is already known */
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
TAILQ_FOREACH(p, &__pthread_head, plist) {
if (_pthread_kernel_thread(p) == kernel_thread) {
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return p;
}
+PTHREAD_NOEXPORT_VARIANT
size_t
pthread_get_stacksize_np(pthread_t t)
{
- int ret;
size_t size = 0;
if (t == NULL) {
return ESRCH; // XXX bug?
}
-
- // since the main thread will not get de-allocated from underneath us
+
+#if !defined(__arm__) && !defined(__arm64__)
+ // The default rlimit based allocations will be provided with a stacksize
+ // of the current limit and a freesize of the max. However, custom
+ // allocations will just have the guard page to free. If we aren't in the
+ // latter case, call into rlimit to determine the current stack size. In
+ // the event that the current limit == max limit then we'll fall down the
+ // fast path, but since it's unlikely that the limit is going to be lowered
+ // after it's been change to the max, we should be fine.
+ //
+ // Of course, on arm rlim_cur == rlim_max and there's only the one guard
+ // page. So, we can skip all this there.
+ if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
+ // We want to call getrlimit() just once, as it's relatively expensive
+ static size_t rlimit_stack;
+
+ if (rlimit_stack == 0) {
+ struct rlimit limit;
+ int ret = getrlimit(RLIMIT_STACK, &limit);
+
+ if (ret == 0) {
+ rlimit_stack = (size_t) limit.rlim_cur;
+ }
+ }
+
+ if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
+ return t->stacksize;
+ } else {
+ return rlimit_stack;
+ }
+ }
+#endif /* !defined(__arm__) && !defined(__arm64__) */
+
if (t == pthread_self() || t == &_thread) {
return t->stacksize;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(t);
- if (ret == 0) {
+ if (_pthread_is_valid_locked(t)) {
size = t->stacksize;
} else {
- size = ret; // XXX bug?
+ size = ESRCH; // XXX bug?
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return size;
}
+PTHREAD_NOEXPORT_VARIANT
void *
pthread_get_stackaddr_np(pthread_t t)
{
- int ret;
void *addr = NULL;
if (t == NULL) {
return (void *)(uintptr_t)ESRCH; // XXX bug?
}
-
+
// since the main thread will not get de-allocated from underneath us
if (t == pthread_self() || t == &_thread) {
return t->stackaddr;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(t);
- if (ret == 0) {
+ if (_pthread_is_valid_locked(t)) {
addr = t->stackaddr;
} else {
- addr = (void *)(uintptr_t)ret; // XXX bug?
+ addr = (void *)(uintptr_t)ESRCH; // XXX bug?
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return addr;
}
+
static mach_port_t
_pthread_reply_port(pthread_t t)
{
}
}
+static mach_port_t
+_pthread_special_reply_port(pthread_t t)
+{
+ void *p;
+ if (t == NULL) {
+ p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
+ } else {
+ p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
+ }
+ return (mach_port_t)(uintptr_t)p;
+}
+
+static void
+_pthread_dealloc_special_reply_port(pthread_t t)
+{
+ mach_port_t special_reply_port = _pthread_special_reply_port(t);
+ if (special_reply_port != MACH_PORT_NULL) {
+ mach_port_mod_refs(mach_task_self(), special_reply_port,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ }
+}
+
pthread_t
pthread_main_thread_np(void)
{
/* if we are passed in a pthread_t that is NULL, then we return
the current thread's thread_id. So folks don't have to call
- pthread_self, in addition to us doing it, if they just want
+ pthread_self, in addition to us doing it, if they just want
their thread_id.
*/
+PTHREAD_NOEXPORT_VARIANT
int
pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
{
if (thread == NULL || thread == self) {
*thread_id = self->thread_id;
} else {
- LOCK(_pthread_list_lock);
- res = _pthread_find_thread(thread);
- if (res == 0) {
+ _PTHREAD_LOCK(_pthread_list_lock);
+ if (!_pthread_is_valid_locked(thread)) {
+ res = ESRCH;
+ } else if (thread->thread_id == 0) {
+ res = EINVAL;
+ } else {
*thread_id = thread->thread_id;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
}
return res;
}
+PTHREAD_NOEXPORT_VARIANT
int
pthread_getname_np(pthread_t thread, char *threadname, size_t len)
{
- int res;
+ int res = 0;
if (thread == NULL) {
return ESRCH;
}
- LOCK(_pthread_list_lock);
- res = _pthread_find_thread(thread);
- if (res == 0) {
+ _PTHREAD_LOCK(_pthread_list_lock);
+ if (_pthread_is_valid_locked(thread)) {
strlcpy(threadname, thread->pthread_name, len);
+ } else {
+ res = ESRCH;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return res;
}
+
int
pthread_setname_np(const char *name)
{
PTHREAD_ALWAYS_INLINE
static inline void
-__pthread_add_thread(pthread_t t, bool parent)
+__pthread_add_thread(pthread_t t, const pthread_attr_t *attrs,
+ bool parent, bool from_mach_thread)
{
bool should_deallocate = false;
bool should_add = true;
- LOCK(_pthread_list_lock);
+ mach_port_t kport = _pthread_kernel_thread(t);
+ if (os_slowpath(!MACH_PORT_VALID(kport))) {
+ PTHREAD_CLIENT_CRASH(kport,
+ "Unable to allocate thread port, possible port leak");
+ }
+
+ if (from_mach_thread) {
+ _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
+ } else {
+ _PTHREAD_LOCK(_pthread_list_lock);
+ }
// The parent and child threads race to add the thread to the list.
// When called by the parent:
// child got here first, don't add.
should_add = false;
}
-
+
// If the child exits before we check in then it has to keep
// the thread structure memory alive so our dereferences above
// are valid. If it's a detached thread, then no joiner will
if (should_add) {
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
_pthread_count++;
+
+ /*
+ * Set some initial values which we know in the pthread structure in
+ * case folks try to get the values before the thread can set them.
+ */
+ if (parent && attrs && attrs->schedset == 0) {
+ t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
+ }
}
- UNLOCK(_pthread_list_lock);
+ if (from_mach_thread){
+ _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
+ } else {
+ _PTHREAD_UNLOCK(_pthread_list_lock);
+ }
if (parent) {
- _pthread_introspection_thread_create(t, should_deallocate);
+ if (!from_mach_thread) {
+ // PR-26275485: Mach threads will likely crash trying to run
+ // introspection code. Since the fall out from the introspection
+ // code not seeing the injected thread is likely less than crashing
+ // in the introspection code, just don't make the call.
+ _pthread_introspection_thread_create(t, should_deallocate);
+ }
if (should_deallocate) {
_pthread_deallocate(t);
}
__pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
{
int ret = 0;
-
+
bool should_remove = true;
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
// When a thread removes itself:
// - Set the childexit flag indicating that the thread has exited.
// - Update the running thread count.
// When another thread removes a joinable thread:
// - CAREFUL not to dereference the thread before verifying that the
- // reference is still valid using _pthread_find_thread().
+ // reference is still valid using _pthread_is_valid_locked().
// - Remove the thread from the list.
if (child) {
should_remove = false;
}
*should_exit = (--_pthread_count <= 0);
- } else {
- ret = _pthread_find_thread(t);
- if (ret == 0) {
- // If we found a thread but it's not joinable, bail.
- if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
- should_remove = false;
- ret = ESRCH;
- }
- }
+ } else if (!_pthread_is_valid_locked(t)) {
+ ret = ESRCH;
+ should_remove = false;
+ } else if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
+ // If we found a thread but it's not joinable, bail.
+ ret = ESRCH;
+ should_remove = false;
+ } else if (t->parentcheck == 0) {
+ // If we're not the child thread *and* the parent has not finished
+ // creating the thread yet, then we are another thread that's joining
+ // and we cannot deallocate the pthread.
+ ret = EBUSY;
}
if (should_remove) {
TAILQ_REMOVE(&__pthread_head, t, plist);
}
- UNLOCK(_pthread_list_lock);
-
+ _PTHREAD_UNLOCK(_pthread_list_lock);
+
return ret;
}
-int
-pthread_create(pthread_t *thread,
+static int
+_pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
- void *arg)
-{
+ void *arg,
+ bool from_mach_thread)
+{
pthread_t t = NULL;
unsigned int flags = 0;
__is_threaded = 1;
void *stack;
-
+
if (attrs->fastpath) {
// kernel will allocate thread and stack, pass stacksize.
stack = (void *)attrs->stacksize;
pthread_t t2;
t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
if (t2 == (pthread_t)-1) {
+ if (errno == EMFILE) {
+ PTHREAD_CLIENT_CRASH(0,
+ "Unable to allocate thread port, possible port leak");
+ }
if (flags & PTHREAD_START_CUSTOM) {
// free the thread and stack if we allocated it
_pthread_deallocate(t);
t = t2;
}
- __pthread_add_thread(t, true);
-
- // XXX if a thread is created detached and exits, t will be invalid
+ __pthread_add_thread(t, attrs, true, from_mach_thread);
+
+ // n.b. if a thread is created detached and exits, t will be invalid
*thread = t;
return 0;
}
+int
+pthread_create(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg)
+{
+ return _pthread_create(thread, attr, start_routine, arg, false);
+}
+
+int
+pthread_create_from_mach_thread(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg)
+{
+ return _pthread_create(thread, attr, start_routine, arg, true);
+}
+
+PTHREAD_NORETURN
+static void
+_pthread_suspended_body(pthread_t self)
+{
+ _pthread_set_self(self);
+ __pthread_add_thread(self, NULL, false, false);
+ _pthread_exit(self, (self->fun)(self->arg));
+}
+
int
pthread_create_suspended_np(pthread_t *thread,
const pthread_attr_t *attr,
if (res) {
return res;
}
-
+
*thread = t;
kern_return_t kr;
_pthread_set_kernel_thread(t, kernel_thread);
(void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
-
+
__is_threaded = 1;
t->arg = arg;
t->fun = start_routine;
- __pthread_add_thread(t, true);
+ t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
+ __pthread_add_thread(t, NULL, true, false);
// Set up a suspended thread.
- _pthread_setup(t, _pthread_body, stack, 1, 0);
+ _pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
return res;
}
-int
+
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_detach(pthread_t thread)
{
- int res;
+ int res = 0;
bool join = false;
semaphore_t sema = SEMAPHORE_NULL;
- res = _pthread_lookup_thread(thread, NULL, 1);
- if (res) {
- return res; // Not a valid thread to detach.
+ if (!_pthread_is_valid(thread, PTHREAD_IS_VALID_LOCK_THREAD, NULL)) {
+ return ESRCH; // Not a valid thread to detach.
}
- LOCK(thread->lock);
- if (thread->detached & PTHREAD_CREATE_JOINABLE) {
- if (thread->detached & _PTHREAD_EXITED) {
- // Join the thread if it's already exited.
- join = true;
- } else {
- thread->detached &= ~PTHREAD_CREATE_JOINABLE;
- thread->detached |= PTHREAD_CREATE_DETACHED;
- sema = thread->joiner_notify;
- }
- } else {
+ if ((thread->detached & PTHREAD_CREATE_DETACHED) ||
+ !(thread->detached & PTHREAD_CREATE_JOINABLE)) {
res = EINVAL;
+ } else if (thread->detached & _PTHREAD_EXITED) {
+ // Join the thread if it's already exited.
+ join = true;
+ } else {
+ thread->detached &= ~PTHREAD_CREATE_JOINABLE;
+ thread->detached |= PTHREAD_CREATE_DETACHED;
+ sema = thread->joiner_notify;
}
- UNLOCK(thread->lock);
+
+ _PTHREAD_UNLOCK(thread->lock);
if (join) {
pthread_join(thread, NULL);
return res;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_kill(pthread_t th, int sig)
-{
+{
if (sig < 0 || sig > NSIG) {
return EINVAL;
}
mach_port_t kport = MACH_PORT_NULL;
- if (_pthread_lookup_thread(th, &kport, 0) != 0) {
+ if (!_pthread_is_valid(th, 0, &kport)) {
return ESRCH; // Not a valid thread.
}
return ret;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
__pthread_workqueue_setkill(int enable)
{
pthread_t self = pthread_self();
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
self->wqkillset = enable ? 1 : 0;
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
return 0;
}
-static void *
-__pthread_get_exit_value(pthread_t t, int conforming)
-{
- const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
- void *value = t->exit_value;
- if (conforming) {
- if ((t->cancel_state & flags) == flags) {
- value = PTHREAD_CANCELED;
- }
- }
- return value;
-}
/* For compatibility... */
int __disable_threadsignal(int);
PTHREAD_NORETURN
-static void
+static void
_pthread_exit(pthread_t self, void *value_ptr)
{
struct __darwin_pthread_handler_rec *handler;
}
_pthread_tsd_cleanup(self);
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
self->detached |= _PTHREAD_EXITED;
self->exit_value = value_ptr;
self->joiner_notify == SEMAPHORE_NULL) {
self->joiner_notify = (semaphore_t)os_get_cached_semaphore();
}
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
// Clear per-thread semaphore cache
os_put_cached_semaphore(SEMAPHORE_NULL);
- _pthread_terminate(self);
+ _pthread_terminate_invoke(self);
}
void
}
}
-int
-pthread_getschedparam(pthread_t thread,
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_getschedparam(pthread_t thread,
int *policy,
struct sched_param *param)
{
- int ret;
+ int ret = 0;
if (thread == NULL) {
return ESRCH;
}
-
- LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(thread);
- if (ret == 0) {
+ _PTHREAD_LOCK(_pthread_list_lock);
+
+ if (_pthread_is_valid_locked(thread)) {
if (policy) {
*policy = thread->policy;
}
if (param) {
*param = thread->param;
}
+ } else {
+ ret = ESRCH;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return ret;
}
-static int
-pthread_setschedparam_internal(pthread_t thread,
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+pthread_setschedparam_internal(pthread_t thread,
mach_port_t kport,
int policy,
const struct sched_param *param)
return (ret != KERN_SUCCESS) ? EINVAL : 0;
}
-int
+
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
{
mach_port_t kport = MACH_PORT_NULL;
int bypass = 1;
// since the main thread will not get de-allocated from underneath us
- if (t == pthread_self() || t == &_thread ) {
+ if (t == pthread_self() || t == &_thread) {
kport = _pthread_kernel_thread(t);
} else {
bypass = 0;
- (void)_pthread_lookup_thread(t, &kport, 0);
+ (void)_pthread_is_valid(t, 0, &kport);
}
-
+
res = pthread_setschedparam_internal(t, kport, policy, param);
if (res == 0) {
if (bypass == 0) {
// Ensure the thread is still valid.
- LOCK(_pthread_list_lock);
- res = _pthread_find_thread(t);
- if (res == 0) {
+ _PTHREAD_LOCK(_pthread_list_lock);
+ if (_pthread_is_valid_locked(t)) {
t->policy = policy;
t->param = *param;
+ } else {
+ res = ESRCH;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
} else {
t->policy = policy;
t->param = *param;
return res;
}
+
int
sched_get_priority_min(int policy)
{
return default_priority + 16;
}
-int
+int
pthread_equal(pthread_t t1, pthread_t t2)
{
return (t1 == t2);
}
-// Force LLVM not to optimise this to a call to __pthread_set_self, if it does
-// then _pthread_set_self won't be bound when secondary threads try and start up.
+/*
+ * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
+ * then _pthread_set_self won't be bound when secondary threads try and start up.
+ */
PTHREAD_NOINLINE
void
_pthread_set_self(pthread_t p)
{
- extern void __pthread_set_self(void *);
+ return _pthread_set_self_internal(p, true);
+}
+PTHREAD_ALWAYS_INLINE
+static inline void
+_pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
+{
if (p == NULL) {
p = &_thread;
}
-
+
uint64_t tid = __thread_selfid();
if (tid == -1ull) {
PTHREAD_ABORT("failed to set thread_id");
p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
p->thread_id = tid;
- __pthread_set_self(&p->tsd[0]);
+
+ if (needs_tsd_base_set) {
+ _thread_set_tsd_base(&p->tsd[0]);
+ }
+}
+
+
+// <rdar://problem/28984807> pthread_once should have an acquire barrier
+PTHREAD_ALWAYS_INLINE
+static inline void
+_os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
+{
+ if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
+ _os_once(predicate, context, function);
+ OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
+ }
}
struct _pthread_once_context {
ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
{
struct _pthread_once_context ctx = { once_control, init_routine };
do {
- os_once(&once_control->once, &ctx, __pthread_once_handler);
+ _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
} while (once_control->sig == _PTHREAD_ONCE_SIG_init);
return 0;
}
-void
-_pthread_testcancel(pthread_t thread, int isconforming)
-{
- const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
-
- LOCK(thread->lock);
- bool canceled = ((thread->cancel_state & flags) == flags);
- UNLOCK(thread->lock);
-
- if (canceled) {
- pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
- }
-}
-
-void
-_pthread_exit_if_canceled(int error)
-{
- if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) {
- pthread_t self = pthread_self();
- if (self != NULL) {
- self->cancel_error = error;
- }
- pthread_exit(PTHREAD_CANCELED);
- }
-}
int
pthread_getconcurrency(void)
return 0;
}
-void
-_pthread_set_pfz(uintptr_t address)
+static unsigned long
+_pthread_strtoul(const char *p, const char **endptr, int base)
{
+ uintptr_t val = 0;
+
+ // Expect hex string starting with "0x"
+ if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
+ p += 2;
+ while (1) {
+ char c = *p;
+ if ('0' <= c && c <= '9') {
+ val = (val << 4) + (c - '0');
+ } else if ('a' <= c && c <= 'f') {
+ val = (val << 4) + (c - 'a' + 10);
+ } else if ('A' <= c && c <= 'F') {
+ val = (val << 4) + (c - 'A' + 10);
+ } else {
+ break;
+ }
+ ++p;
+ }
+ }
+
+ *endptr = (char *)p;
+ return val;
+}
+
+static int
+parse_main_stack_params(const char *apple[],
+ void **stackaddr,
+ size_t *stacksize,
+ void **allocaddr,
+ size_t *allocsize)
+{
+ const char *p = _simple_getenv(apple, "main_stack");
+ if (!p) return 0;
+
+ int ret = 0;
+ const char *s = p;
+
+ *stackaddr = _pthread_strtoul(s, &s, 16);
+ if (*s != ',') goto out;
+
+ *stacksize = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',') goto out;
+
+ *allocaddr = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',') goto out;
+
+ *allocsize = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',' && *s != 0) goto out;
+
+ ret = 1;
+out:
+ bzero((char *)p, strlen(p));
+ return ret;
}
-#if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD)
+#if !defined(VARIANT_STATIC)
void *
malloc(size_t sz)
{
_pthread_free(p);
}
}
-#endif
+#endif // VARIANT_STATIC
/*
* Perform package initialization - called automatically when application starts
struct ProgramVars; /* forward reference */
int
-__pthread_init(const struct _libpthread_functions *pthread_funcs, const char *envp[] __unused,
- const char *apple[] __unused, const struct ProgramVars *vars __unused)
+__pthread_init(const struct _libpthread_functions *pthread_funcs,
+ const char *envp[] __unused,
+ const char *apple[],
+ const struct ProgramVars *vars __unused)
{
// Save our provided pushed-down functions
if (pthread_funcs) {
// Set up the main thread structure
//
- void *stackaddr;
- size_t stacksize = DFLSSIZ;
- size_t len = sizeof(stackaddr);
- int mib[] = { CTL_KERN, KERN_USRSTACK };
- if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
- stackaddr = (void *)USRSTACK;
+ // Get the address and size of the main thread's stack from the kernel.
+ void *stackaddr = 0;
+ size_t stacksize = 0;
+ void *allocaddr = 0;
+ size_t allocsize = 0;
+ if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
+ stackaddr == NULL || stacksize == 0) {
+ // Fall back to previous bevhaior.
+ size_t len = sizeof(stackaddr);
+ int mib[] = { CTL_KERN, KERN_USRSTACK };
+ if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
+#if defined(__LP64__)
+ stackaddr = (void *)USRSTACK64;
+#else
+ stackaddr = (void *)USRSTACK;
+#endif
+ }
+ stacksize = DFLSSIZ;
+ allocaddr = 0;
+ allocsize = 0;
}
pthread_t thread = &_thread;
pthread_attr_init(&_pthread_attr_default);
- _pthread_struct_init(thread, &_pthread_attr_default, stackaddr, stacksize, 0);
+ _pthread_struct_init(thread, &_pthread_attr_default,
+ stackaddr, stacksize,
+ allocaddr, allocsize);
thread->detached = PTHREAD_CREATE_JOINABLE;
// Finish initialization with common code that is reinvoked on the
// Finishes initialization of main thread attributes.
// Initializes the thread list and add the main thread.
// Calls _pthread_set_self() to prepare the main thread for execution.
- __pthread_fork_child_internal(thread);
-
+ _pthread_main_thread_init(thread);
+
+ struct _pthread_registration_data registration_data;
// Set up kernel entry points with __bsdthread_register.
- pthread_workqueue_atfork_child();
+ _pthread_bsdthread_init(®istration_data);
- // Have pthread_key do its init envvar checks.
+ // Have pthread_key and pthread_mutex do their init envvar checks.
_pthread_key_global_init(envp);
+ _pthread_mutex_global_init(envp, ®istration_data);
+
+#if PTHREAD_DEBUG_LOG
+ _SIMPLE_STRING path = _simple_salloc();
+ _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
+ _pthread_debuglog = open(_simple_string(path),
+ O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
+ _simple_sfree(path);
+ _pthread_debugstart = mach_absolute_time();
+#endif
return 0;
}
-int
-sched_yield(void)
-{
- swtch_pri(0);
- return 0;
-}
-
PTHREAD_NOEXPORT void
-__pthread_fork_child_internal(pthread_t p)
+_pthread_main_thread_init(pthread_t p)
{
TAILQ_INIT(&__pthread_head);
- LOCK_INIT(_pthread_list_lock);
+ _PTHREAD_LOCK_INIT(_pthread_list_lock);
// Re-use the main thread's static storage if no thread was provided.
if (p == NULL) {
p = &_thread;
}
- LOCK_INIT(p->lock);
+ _PTHREAD_LOCK_INIT(p->lock);
_pthread_set_kernel_thread(p, mach_thread_self());
_pthread_set_reply_port(p, mach_reply_port());
p->__cleanup_stack = NULL;
p->joiner_notify = SEMAPHORE_NULL;
p->joiner = MACH_PORT_NULL;
p->detached |= _PTHREAD_CREATE_PARENT;
- p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL;
+ p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
+ p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
// Initialize the list of threads with the new main thread.
TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
_pthread_introspection_thread_start(p);
}
-/*
- * Query/update the cancelability 'state' of a thread
- */
-PTHREAD_NOEXPORT int
-_pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
-{
- pthread_t self;
-
- switch (state) {
- case PTHREAD_CANCEL_ENABLE:
- if (conforming) {
- __pthread_canceled(1);
- }
- break;
- case PTHREAD_CANCEL_DISABLE:
- if (conforming) {
- __pthread_canceled(2);
- }
- break;
- default:
- return EINVAL;
- }
-
- self = pthread_self();
- LOCK(self->lock);
- if (oldstate) {
- *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
- }
- self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
- self->cancel_state |= state;
- UNLOCK(self->lock);
- if (!conforming) {
- _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
- }
- return 0;
-}
-
-/* When a thread exits set the cancellation state to DISABLE and DEFERRED */
-static void
-_pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
-{
- LOCK(self->lock);
- self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
- self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
- if (value_ptr == PTHREAD_CANCELED) {
-// 4597450: begin
- self->detached |= _PTHREAD_WASCANCEL;
-// 4597450: end
- }
- UNLOCK(self->lock);
-}
-
int
_pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
{
- // Returns ESRCH if the thread was not created joinable.
int ret = __pthread_remove_thread(thread, false, NULL);
- if (ret != 0) {
+ if (ret != 0 && ret != EBUSY) {
+ // Returns ESRCH if the thread was not created joinable.
return ret;
}
-
+
if (value_ptr) {
- *value_ptr = __pthread_get_exit_value(thread, conforming);
+ *value_ptr = _pthread_get_exit_value(thread, conforming);
}
_pthread_introspection_thread_destroy(thread);
- _pthread_deallocate(thread);
+ if (ret != EBUSY) {
+ // __pthread_remove_thread returns EBUSY if the parent has not
+ // finished creating the thread (and is still expecting the pthread_t
+ // to be alive).
+ _pthread_deallocate(thread);
+ }
return 0;
}
-/* ALWAYS called with list lock and return with list lock */
int
-_pthread_find_thread(pthread_t thread)
+sched_yield(void)
{
- if (thread != NULL) {
- pthread_t p;
-loop:
- TAILQ_FOREACH(p, &__pthread_head, plist) {
- if (p == thread) {
- if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
- UNLOCK(_pthread_list_lock);
- sched_yield();
- LOCK(_pthread_list_lock);
- goto loop;
- }
- return 0;
- }
- }
- }
- return ESRCH;
+ swtch_pri(0);
+ return 0;
}
-int
-_pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable)
+// XXX remove
+void
+cthread_yield(void)
{
- mach_port_t kport = MACH_PORT_NULL;
- int ret;
-
- if (thread == NULL) {
- return ESRCH;
- }
-
- LOCK(_pthread_list_lock);
-
- ret = _pthread_find_thread(thread);
- if (ret == 0) {
- // Fail if we only want joinable threads and the thread found is
- // not in the detached state.
- if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) {
- ret = EINVAL;
- } else {
- kport = _pthread_kernel_thread(thread);
- }
- }
-
- UNLOCK(_pthread_list_lock);
-
- if (portp != NULL) {
- *portp = kport;
- }
+ sched_yield();
+}
- return ret;
+void
+pthread_yield_np(void)
+{
+ sched_yield();
}
+
+
+PTHREAD_NOEXPORT_VARIANT
void
_pthread_clear_qos_tsd(mach_port_t thread_port)
{
} else {
pthread_t p;
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
TAILQ_FOREACH(p, &__pthread_head, plist) {
mach_port_t kp = _pthread_kernel_thread(p);
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
}
}
+
/***** pthread workqueue support routines *****/
PTHREAD_NOEXPORT void
-pthread_workqueue_atfork_child(void)
+_pthread_bsdthread_init(struct _pthread_registration_data *data)
{
- struct _pthread_registration_data data = {
- .dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *),
- };
+ bzero(data, sizeof(*data));
+ data->version = sizeof(struct _pthread_registration_data);
+ data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
+ data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
+ data->tsd_offset = offsetof(struct _pthread, tsd);
+ data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
int rv = __bsdthread_register(thread_start,
- start_wqthread,
- (int)pthreadsize,
- (void*)&data,
- (uintptr_t)sizeof(data),
- data.dispatch_queue_offset);
+ start_wqthread, (int)PTHREAD_SIZE,
+ (void*)data, (uintptr_t)sizeof(*data),
+ data->dispatch_queue_offset);
if (rv > 0) {
+ if ((rv & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
+ PTHREAD_INTERNAL_CRASH(rv,
+ "Missing required support for QOS_CLASS_DEFAULT");
+ }
+ if ((rv & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
+ PTHREAD_INTERNAL_CRASH(rv,
+ "Missing required support for QOS_CLASS_MAINTENANCE");
+ }
__pthread_supported_features = rv;
}
- if (_pthread_priority_get_qos_newest(data.main_qos) != QOS_CLASS_UNSPECIFIED) {
- _pthread_set_main_qos(data.main_qos);
- _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = data.main_qos;
+ /*
+ * TODO: differentiate between (-1, EINVAL) after fork (which has the side
+ * effect of resetting the child's stack_addr_hint before bailing out) and
+ * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
+ * the latter as fatal.
+ *
+ * <rdar://problem/36451838>
+ */
+
+ pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
+
+ if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
+ _pthread_set_main_qos(main_qos);
+ _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
}
if (__libdispatch_workerfunction != NULL) {
}
// workqueue entry point from kernel
+PTHREAD_NORETURN
void
_pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
{
PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
- int thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
- int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
- int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0;
- int kevent = flags & WQ_FLAG_THREAD_KEVENT;
+ bool thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
+ bool overcommit = flags & WQ_FLAG_THREAD_OVERCOMMIT;
+ bool kevent = flags & WQ_FLAG_THREAD_KEVENT;
+ bool workloop = (flags & WQ_FLAG_THREAD_WORKLOOP) &&
+ __libdispatch_workloopfunction != NULL;
PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL));
+ PTHREAD_ASSERT(!workloop || kevent);
pthread_priority_t priority = 0;
unsigned long priority_flags = 0;
priority_flags |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
if (flags & WQ_FLAG_THREAD_EVENT_MANAGER)
priority_flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ if (kevent)
+ priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
- if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
- priority = _pthread_priority_make_version2(thread_class, 0, priority_flags);
- } else {
- priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
- }
+ int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
+ priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
- if (thread_reuse == 0) {
+ if (!thread_reuse) {
// New thread created by kernel, needs initialization.
+ void *stackaddr = self;
size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
- _pthread_struct_init(self, &_pthread_attr_default, (void*)self, stacksize, 1);
+
+ _pthread_struct_init(self, &_pthread_attr_default,
+ stackaddr, stacksize,
+ PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
_pthread_set_kernel_thread(self, kport);
self->wqthread = 1;
self->wqkillset = 0;
+ self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
// Not a joinable thread.
self->detached &= ~PTHREAD_CREATE_JOINABLE;
self->detached |= PTHREAD_CREATE_DETACHED;
// Update the running thread count and set childrun bit.
- // XXX this should be consolidated with pthread_body().
- _pthread_set_self(self);
+ bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
+ _pthread_set_self_internal(self, !thread_tsd_base_set);
_pthread_introspection_thread_create(self, false);
- __pthread_add_thread(self, false);
+ __pthread_add_thread(self, NULL, false, false);
}
// If we're running with fine-grained priority, we also need to
PTHREAD_ASSERT(self == pthread_self());
#endif // WQ_DEBUG
- if (kevent){
+ if (workloop) {
+ self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
+ } else if (kevent){
self->fun = (void *(*)(void*))__libdispatch_keventfunction;
} else {
- self->fun = (void *(*)(void *))__libdispatch_workerfunction;
+ self->fun = (void *(*)(void*))__libdispatch_workerfunction;
}
self->arg = (void *)(uintptr_t)thread_class;
- if (kevent && keventlist){
+ if (kevent && keventlist && nkevents > 0){
+ int errors_out;
kevent_errors_retry:
- (*__libdispatch_keventfunction)(&keventlist, &nkevents);
- int errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+ if (workloop) {
+ kqueue_id_t kevent_id = *(kqueue_id_t*)((char*)keventlist - sizeof(kqueue_id_t));
+ kqueue_id_t kevent_id_in = kevent_id;
+ (__libdispatch_workloopfunction)(&kevent_id, &keventlist, &nkevents);
+ PTHREAD_ASSERT(kevent_id == kevent_id_in || nkevents == 0);
+ errors_out = __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, keventlist, nkevents, 0);
+ } else {
+ (__libdispatch_keventfunction)(&keventlist, &nkevents);
+ errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+ }
+
if (errors_out > 0){
nkevents = errors_out;
goto kevent_errors_retry;
} else if (errors_out < 0){
PTHREAD_ABORT("kevent return produced an error: %d", errno);
}
- _pthread_exit(self, NULL);
+ goto thexit;
} else if (kevent){
- (*__libdispatch_keventfunction)(NULL, NULL);
+ if (workloop) {
+ (__libdispatch_workloopfunction)(0, NULL, NULL);
+ __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, NULL, 0, -1);
+ } else {
+ (__libdispatch_keventfunction)(NULL, NULL);
+ __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
+ }
- __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
- _pthread_exit(self, NULL);
+ goto thexit;
}
if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
}
__workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
+
+thexit:
+ {
+ pthread_priority_t current_priority = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS);
+ if ((current_priority & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) ||
+ (_pthread_priority_get_qos_newest(current_priority) > WQ_THREAD_CLEANUP_QOS)) {
+ // Reset QoS to something low for the cleanup process
+ priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
+ _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
+ }
+ }
+
_pthread_exit(self, NULL);
}
/***** pthread workqueue API for libdispatch *****/
+_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
+ "Kernel and userland should agree on the event list size");
+
void
pthread_workqueue_setdispatchoffset_np(int offset)
{
__libdispatch_offset = offset;
}
-int
-pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func)
+static int
+pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ pthread_workqueue_function_workloop_t workloop_func)
{
int res = EBUSY;
if (__libdispatch_workerfunction == NULL) {
} else {
__libdispatch_workerfunction = queue_func;
__libdispatch_keventfunction = kevent_func;
+ __libdispatch_workloopfunction = workloop_func;
// Prepare the kernel for workq action
(void)__workq_open();
}
int
-_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags)
+_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ pthread_workqueue_function_workloop_t workloop_func,
+ int offset, int flags)
{
if (flags != 0) {
return ENOTSUP;
}
-
+
__workq_newapi = true;
__libdispatch_offset = offset;
-
- int rv = pthread_workqueue_setdispatch_with_kevent_np(queue_func, kevent_func);
+
+ int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
return rv;
}
+int
+_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ int offset, int flags)
+{
+ return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
+}
+
int
_pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
{
int
pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
{
- return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t)worker_func, NULL);
+ return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
}
int
_pthread_workqueue_supported(void)
{
+ if (os_unlikely(!__pthread_supported_features)) {
+ PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
+ }
+
return __pthread_supported_features;
}
flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
+#pragma clang diagnostic pop
} else {
/* Running on the old kernel, queue_priority is what we pass directly to
return res;
}
+bool
+_pthread_workqueue_should_narrow(pthread_priority_t pri)
+{
+ int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
+ if (res == -1) {
+ return false;
+ }
+ return res;
+}
+
int
_pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
{
pthread_introspection_hook_t
pthread_introspection_hook_install(pthread_introspection_hook_t hook)
{
- if (os_slowpath(!hook)) {
- PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
- }
pthread_introspection_hook_t prev;
- prev = __sync_swap(&_pthread_introspection_hook, hook);
+ prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
return prev;
}
_pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy)
{
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
- pthreadsize);
+ PTHREAD_SIZE);
if (!destroy) return;
_pthread_introspection_thread_destroy(t);
}
freesize = t->stacksize + t->guardsize;
freeaddr = t->stackaddr - freesize;
} else {
- freesize = t->freesize - pthreadsize;
+ freesize = t->freesize - PTHREAD_SIZE;
freeaddr = t->freeaddr;
}
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
void *freeaddr, size_t freesize, bool destroy)
{
if (destroy && freesize) {
- freesize -= pthreadsize;
+ freesize -= PTHREAD_SIZE;
}
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
freeaddr, freesize);
{
if (t == &_thread) return;
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
- pthreadsize);
+ PTHREAD_SIZE);
}
static inline void