* Copyright (c) 2000-2013 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* compliance with the License. Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this
* file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_LICENSE_HEADER_END@
*/
/*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
- * All Rights Reserved
- *
- * Permission to use, copy, modify, and distribute this software and
- * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and
- * that both the copyright notice and this permission notice appear in
- * supporting documentation.
- *
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE.
- *
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ * All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
* IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- *
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
*/
/*
* MkLinux
* POSIX Pthread Library
*/
+#include "resolver.h"
#include "internal.h"
#include "private.h"
#include "workqueue_private.h"
#include <machine/vmparam.h>
#define __APPLE_API_PRIVATE
#include <machine/cpu_capabilities.h>
-#include <libkern/OSAtomic.h>
#include <_simple.h>
#include <platform/string.h>
extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
extern void __exit(int) __attribute__((noreturn));
+extern int __pthread_kill(mach_port_t, int);
+
+extern struct _pthread _thread;
+extern int default_priority;
-static void (*exitf)(int) = __exit;
-__private_extern__ void* (*_pthread_malloc)(size_t) = NULL;
-__private_extern__ void (*_pthread_free)(void *) = NULL;
//
// Global variables
//
+static void (*exitf)(int) = __exit;
+PTHREAD_NOEXPORT void* (*_pthread_malloc)(size_t) = NULL;
+PTHREAD_NOEXPORT void (*_pthread_free)(void *) = NULL;
+
+#if PTHREAD_DEBUG_LOG
+#include <fcntl.h>
+int _pthread_debuglog;
+uint64_t _pthread_debugstart;
+#endif
+
// This global should be used (carefully) by anyone needing to know if a
// pthread (other than the main thread) has been created.
int __is_threaded = 0;
// _pthread_list_lock protects _pthread_count, access to the __pthread_head
// list, and the parentcheck, childrun and childexit flags of the pthread
// structure. Externally imported by pthread_cancelable.c.
-__private_extern__ _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
-__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
+PTHREAD_NOEXPORT _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
+PTHREAD_NOEXPORT struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
static int _pthread_count = 1;
#if PTHREAD_LAYOUT_SPI
mach_msg_trailer_t trailer;
} pthread_reap_msg_t;
-/*
+/*
* The pthread may be offset into a page. In that event, by contract
* with the kernel, the allocation will extend PTHREAD_SIZE from the
* start of the next page. There's also one page worth of allocation
- * below stacksize for the guard page. <rdar://problem/19941744>
+ * below stacksize for the guard page. <rdar://problem/19941744>
*/
#define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
#define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
#define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
-static pthread_attr_t _pthread_attr_default = {0};
+static pthread_attr_t _pthread_attr_default = { };
// The main thread's pthread_t
-static struct _pthread _thread __attribute__((aligned(64))) = {0};
+PTHREAD_NOEXPORT struct _pthread _thread __attribute__((aligned(64))) = { };
-static int default_priority;
+PTHREAD_NOEXPORT int default_priority;
static int max_priority;
static int min_priority;
static int pthread_concurrency;
// work queue support data
static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL;
+static void (*__libdispatch_workloopfunction)(uint64_t *workloop_id, void **events, int *nevents) = NULL;
static int __libdispatch_offset;
// supported feature set
int __pthread_supported_features;
+static bool __workq_newapi;
//
// Function prototypes
static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
static int _pthread_deallocate(pthread_t t);
-static void _pthread_terminate(pthread_t t);
+static void _pthread_terminate_invoke(pthread_t t);
-static void _pthread_struct_init(pthread_t t,
+static inline void _pthread_struct_init(pthread_t t,
const pthread_attr_t *attrs,
void *stack,
size_t stacksize,
void *freeaddr,
size_t freesize);
-extern void _pthread_set_self(pthread_t);
-static void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
+static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
static void _pthread_dealloc_reply_port(pthread_t t);
+static void _pthread_dealloc_special_reply_port(pthread_t t);
-static inline void __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread);
+static inline void __pthread_add_thread(pthread_t t, const pthread_attr_t *attr, bool parent, bool from_mach_thread);
static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
-static int _pthread_find_thread(pthread_t thread);
-
static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
-static void _pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
static inline void _pthread_introspection_thread_start(pthread_t t);
static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
static inline void _pthread_introspection_thread_destroy(pthread_t t);
+extern void _pthread_set_self(pthread_t);
extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
-void pthread_workqueue_atfork_child(void);
-
-static bool __workq_newapi;
-
/* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
* the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
* from old API requests to the new kext conventions.
#define WORKQUEUE_OVERCOMMIT 0x10000
/*
- * Flags filed passed to bsdthread_create and back in pthread_start
+ * Flags filed passed to bsdthread_create and back in pthread_start
31 <---------------------------------> 0
_________________________________________
| flags(8) | policy(8) | importance(16) |
extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
extern __uint64_t __thread_selfid( void );
-extern int __pthread_canceled(int);
-extern int __pthread_kill(mach_port_t, int);
extern int __workq_open(void);
extern int __workq_kernreturn(int, void *, int, int);
size_t allocsize = 0;
size_t guardsize = 0;
size_t stacksize = 0;
-
+
PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
*thread = NULL;
*stack = NULL;
-
+
// Allocate a pthread structure if necessary
-
+
if (attrs->stackaddr != NULL) {
PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
*stack = attrs->stackaddr;
stacksize = attrs->stacksize;
allocsize = stacksize + guardsize + PTHREAD_SIZE;
}
-
+
kr = mach_vm_map(mach_task_self(),
&allocaddr,
allocsize,
*stack = t;
}
}
-
+
if (t != NULL) {
_pthread_struct_init(t, attrs,
*stack, attrs->stacksize,
PTHREAD_NOINLINE
static void*
-_current_stack_address(void)
+_pthread_current_stack_address(void)
{
int a;
return &a;
#pragma clang diagnostic pop
// Terminates the thread if called from the currently running thread.
-PTHREAD_NORETURN PTHREAD_NOINLINE
+PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
static void
_pthread_terminate(pthread_t t)
{
t->freesize -= freesize_stack;
} else if (t == &_thread){
freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
- uintptr_t stackborder = trunc_page((uintptr_t)_current_stack_address());
+ uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
freesize_stack = stackborder - freeaddr;
} else {
freesize_stack = 0;
mach_port_t kport = _pthread_kernel_thread(t);
semaphore_t joinsem = t->joiner_notify;
+ _pthread_dealloc_special_reply_port(t);
_pthread_dealloc_reply_port(t);
// After the call to __pthread_remove_thread, it is not safe to
PTHREAD_ABORT("thread %p didn't terminate", t);
}
-int
+PTHREAD_NORETURN
+static void
+_pthread_terminate_invoke(pthread_t t)
+{
+ _pthread_terminate(t);
+}
+
+int
pthread_attr_destroy(pthread_attr_t *attr)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
int ret = EINVAL;
return 0;
}
-int
+int
pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
{
int ret = EINVAL;
return ret;
}
-int
+int
pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
int ret = EINVAL;
/*
* Create and start execution of a new thread.
*/
-PTHREAD_NOINLINE
+PTHREAD_NOINLINE PTHREAD_NORETURN
static void
_pthread_body(pthread_t self, bool needs_tsd_base_set)
{
_pthread_set_self_internal(self, needs_tsd_base_set);
- __pthread_add_thread(self, false, false);
+ __pthread_add_thread(self, NULL, false, false);
void *result = (self->fun)(self->arg);
_pthread_exit(self, result);
}
+PTHREAD_NORETURN
void
_pthread_start(pthread_t self,
mach_port_t kport,
bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
- _pthread_set_kernel_thread(self, kport);
+#if DEBUG
+ PTHREAD_ASSERT(MACH_PORT_VALID(kport));
+ PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
+#endif
+ // will mark the thread initialized
+ _pthread_markcancel_if_canceled(self, kport);
+
self->fun = fun;
self->arg = arg;
_pthread_body(self, !thread_tsd_bsd_set);
}
-static void
+PTHREAD_ALWAYS_INLINE
+static inline void
_pthread_struct_init(pthread_t t,
const pthread_attr_t *attrs,
void *stackaddr,
return __is_threaded;
}
-/* Non portable public api to know whether this process has(had) atleast one thread
+/* Non portable public api to know whether this process has(had) atleast one thread
* apart from main thread. There could be race if there is a thread in the process of
* creation at the time of call . It does not tell whether there are more than one thread
* at this point of time.
return __is_threaded;
}
+
+PTHREAD_NOEXPORT_VARIANT
mach_port_t
pthread_mach_thread_np(pthread_t t)
{
mach_port_t kport = MACH_PORT_NULL;
-
- if (t == pthread_self()) {
- /*
- * If the call is on self, return the kernel port. We cannot
- * add this bypass for main thread as it might have exited,
- * and we should not return stale port info.
- */
- kport = _pthread_kernel_thread(t);
- } else {
- (void)_pthread_lookup_thread(t, &kport, 0);
- }
-
+ (void)_pthread_is_valid(t, 0, &kport);
return kport;
}
+PTHREAD_NOEXPORT_VARIANT
pthread_t
pthread_from_mach_thread_np(mach_port_t kernel_thread)
{
return p;
}
+PTHREAD_NOEXPORT_VARIANT
size_t
pthread_get_stacksize_np(pthread_t t)
{
- int ret;
size_t size = 0;
if (t == NULL) {
if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
// We want to call getrlimit() just once, as it's relatively expensive
static size_t rlimit_stack;
-
+
if (rlimit_stack == 0) {
struct rlimit limit;
int ret = getrlimit(RLIMIT_STACK, &limit);
-
+
if (ret == 0) {
rlimit_stack = (size_t) limit.rlim_cur;
}
}
-
+
if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
return t->stacksize;
} else {
_PTHREAD_LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(t);
- if (ret == 0) {
+ if (_pthread_is_valid_locked(t)) {
size = t->stacksize;
} else {
- size = ret; // XXX bug?
+ size = ESRCH; // XXX bug?
}
_PTHREAD_UNLOCK(_pthread_list_lock);
return size;
}
+PTHREAD_NOEXPORT_VARIANT
void *
pthread_get_stackaddr_np(pthread_t t)
{
- int ret;
void *addr = NULL;
if (t == NULL) {
return (void *)(uintptr_t)ESRCH; // XXX bug?
}
-
+
// since the main thread will not get de-allocated from underneath us
if (t == pthread_self() || t == &_thread) {
return t->stackaddr;
_PTHREAD_LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(t);
- if (ret == 0) {
+ if (_pthread_is_valid_locked(t)) {
addr = t->stackaddr;
} else {
- addr = (void *)(uintptr_t)ret; // XXX bug?
+ addr = (void *)(uintptr_t)ESRCH; // XXX bug?
}
_PTHREAD_UNLOCK(_pthread_list_lock);
return addr;
}
+
static mach_port_t
_pthread_reply_port(pthread_t t)
{
}
}
+static mach_port_t
+_pthread_special_reply_port(pthread_t t)
+{
+ void *p;
+ if (t == NULL) {
+ p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
+ } else {
+ p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
+ }
+ return (mach_port_t)(uintptr_t)p;
+}
+
+static void
+_pthread_dealloc_special_reply_port(pthread_t t)
+{
+ mach_port_t special_reply_port = _pthread_special_reply_port(t);
+ if (special_reply_port != MACH_PORT_NULL) {
+ mach_port_mod_refs(mach_task_self(), special_reply_port,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ }
+}
+
pthread_t
pthread_main_thread_np(void)
{
/* if we are passed in a pthread_t that is NULL, then we return
the current thread's thread_id. So folks don't have to call
- pthread_self, in addition to us doing it, if they just want
+ pthread_self, in addition to us doing it, if they just want
their thread_id.
*/
+PTHREAD_NOEXPORT_VARIANT
int
pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
{
*thread_id = self->thread_id;
} else {
_PTHREAD_LOCK(_pthread_list_lock);
- res = _pthread_find_thread(thread);
- if (res == 0) {
+ if (!_pthread_is_valid_locked(thread)) {
+ res = ESRCH;
+ } else if (thread->thread_id == 0) {
+ res = EINVAL;
+ } else {
*thread_id = thread->thread_id;
}
_PTHREAD_UNLOCK(_pthread_list_lock);
return res;
}
+PTHREAD_NOEXPORT_VARIANT
int
pthread_getname_np(pthread_t thread, char *threadname, size_t len)
{
- int res;
+ int res = 0;
if (thread == NULL) {
return ESRCH;
}
_PTHREAD_LOCK(_pthread_list_lock);
- res = _pthread_find_thread(thread);
- if (res == 0) {
+ if (_pthread_is_valid_locked(thread)) {
strlcpy(threadname, thread->pthread_name, len);
+ } else {
+ res = ESRCH;
}
_PTHREAD_UNLOCK(_pthread_list_lock);
return res;
}
+
int
pthread_setname_np(const char *name)
{
PTHREAD_ALWAYS_INLINE
static inline void
-__pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread)
+__pthread_add_thread(pthread_t t, const pthread_attr_t *attrs,
+ bool parent, bool from_mach_thread)
{
bool should_deallocate = false;
bool should_add = true;
- if (from_mach_thread){
+ mach_port_t kport = _pthread_kernel_thread(t);
+ if (os_slowpath(!MACH_PORT_VALID(kport))) {
+ PTHREAD_CLIENT_CRASH(kport,
+ "Unable to allocate thread port, possible port leak");
+ }
+
+ if (from_mach_thread) {
_PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
} else {
_PTHREAD_LOCK(_pthread_list_lock);
if (should_add) {
TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
_pthread_count++;
+
+ /*
+ * Set some initial values which we know in the pthread structure in
+ * case folks try to get the values before the thread can set them.
+ */
+ if (parent && attrs && attrs->schedset == 0) {
+ t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
+ }
}
if (from_mach_thread){
__pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
{
int ret = 0;
-
+
bool should_remove = true;
_PTHREAD_LOCK(_pthread_list_lock);
// - Update the running thread count.
// When another thread removes a joinable thread:
// - CAREFUL not to dereference the thread before verifying that the
- // reference is still valid using _pthread_find_thread().
+ // reference is still valid using _pthread_is_valid_locked().
// - Remove the thread from the list.
if (child) {
should_remove = false;
}
*should_exit = (--_pthread_count <= 0);
- } else {
- ret = _pthread_find_thread(t);
- if (ret == 0) {
- // If we found a thread but it's not joinable, bail.
- if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
- should_remove = false;
- ret = ESRCH;
- }
- }
+ } else if (!_pthread_is_valid_locked(t)) {
+ ret = ESRCH;
+ should_remove = false;
+ } else if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
+ // If we found a thread but it's not joinable, bail.
+ ret = ESRCH;
+ should_remove = false;
+ } else if (t->parentcheck == 0) {
+ // If we're not the child thread *and* the parent has not finished
+ // creating the thread yet, then we are another thread that's joining
+ // and we cannot deallocate the pthread.
+ ret = EBUSY;
}
if (should_remove) {
TAILQ_REMOVE(&__pthread_head, t, plist);
}
_PTHREAD_UNLOCK(_pthread_list_lock);
-
+
return ret;
}
pthread_t t2;
t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
if (t2 == (pthread_t)-1) {
+ if (errno == EMFILE) {
+ PTHREAD_CLIENT_CRASH(0,
+ "Unable to allocate thread port, possible port leak");
+ }
if (flags & PTHREAD_START_CUSTOM) {
// free the thread and stack if we allocated it
_pthread_deallocate(t);
t = t2;
}
- __pthread_add_thread(t, true, from_mach_thread);
+ __pthread_add_thread(t, attrs, true, from_mach_thread);
// n.b. if a thread is created detached and exits, t will be invalid
*thread = t;
return _pthread_create(thread, attr, start_routine, arg, true);
}
+PTHREAD_NORETURN
static void
_pthread_suspended_body(pthread_t self)
{
_pthread_set_self(self);
- __pthread_add_thread(self, false, false);
+ __pthread_add_thread(self, NULL, false, false);
_pthread_exit(self, (self->fun)(self->arg));
}
if (res) {
return res;
}
-
+
*thread = t;
kern_return_t kr;
_pthread_set_kernel_thread(t, kernel_thread);
(void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
-
+
__is_threaded = 1;
t->arg = arg;
t->fun = start_routine;
- __pthread_add_thread(t, true, false);
+ t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
+ __pthread_add_thread(t, NULL, true, false);
// Set up a suspended thread.
_pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
return res;
}
-int
+
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_detach(pthread_t thread)
{
- int res;
+ int res = 0;
bool join = false;
semaphore_t sema = SEMAPHORE_NULL;
- res = _pthread_lookup_thread(thread, NULL, 1);
- if (res) {
- return res; // Not a valid thread to detach.
+ if (!_pthread_is_valid(thread, PTHREAD_IS_VALID_LOCK_THREAD, NULL)) {
+ return ESRCH; // Not a valid thread to detach.
}
- _PTHREAD_LOCK(thread->lock);
- if (thread->detached & PTHREAD_CREATE_JOINABLE) {
- if (thread->detached & _PTHREAD_EXITED) {
- // Join the thread if it's already exited.
- join = true;
- } else {
- thread->detached &= ~PTHREAD_CREATE_JOINABLE;
- thread->detached |= PTHREAD_CREATE_DETACHED;
- sema = thread->joiner_notify;
- }
- } else {
+ if ((thread->detached & PTHREAD_CREATE_DETACHED) ||
+ !(thread->detached & PTHREAD_CREATE_JOINABLE)) {
res = EINVAL;
+ } else if (thread->detached & _PTHREAD_EXITED) {
+ // Join the thread if it's already exited.
+ join = true;
+ } else {
+ thread->detached &= ~PTHREAD_CREATE_JOINABLE;
+ thread->detached |= PTHREAD_CREATE_DETACHED;
+ sema = thread->joiner_notify;
}
+
_PTHREAD_UNLOCK(thread->lock);
if (join) {
return res;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_kill(pthread_t th, int sig)
-{
+{
if (sig < 0 || sig > NSIG) {
return EINVAL;
}
mach_port_t kport = MACH_PORT_NULL;
- if (_pthread_lookup_thread(th, &kport, 0) != 0) {
+ if (!_pthread_is_valid(th, 0, &kport)) {
return ESRCH; // Not a valid thread.
}
return ret;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
__pthread_workqueue_setkill(int enable)
{
pthread_t self = pthread_self();
return 0;
}
-static void *
-__pthread_get_exit_value(pthread_t t, int conforming)
-{
- const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
- void *value = t->exit_value;
- if (conforming) {
- if ((t->cancel_state & flags) == flags) {
- value = PTHREAD_CANCELED;
- }
- }
- return value;
-}
/* For compatibility... */
int __disable_threadsignal(int);
PTHREAD_NORETURN
-static void
+static void
_pthread_exit(pthread_t self, void *value_ptr)
{
struct __darwin_pthread_handler_rec *handler;
// Clear per-thread semaphore cache
os_put_cached_semaphore(SEMAPHORE_NULL);
- _pthread_terminate(self);
+ _pthread_terminate_invoke(self);
}
void
}
}
-int
-pthread_getschedparam(pthread_t thread,
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_getschedparam(pthread_t thread,
int *policy,
struct sched_param *param)
{
- int ret;
+ int ret = 0;
if (thread == NULL) {
return ESRCH;
}
-
+
_PTHREAD_LOCK(_pthread_list_lock);
- ret = _pthread_find_thread(thread);
- if (ret == 0) {
+ if (_pthread_is_valid_locked(thread)) {
if (policy) {
*policy = thread->policy;
}
if (param) {
*param = thread->param;
}
+ } else {
+ ret = ESRCH;
}
_PTHREAD_UNLOCK(_pthread_list_lock);
return ret;
}
-static int
-pthread_setschedparam_internal(pthread_t thread,
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+pthread_setschedparam_internal(pthread_t thread,
mach_port_t kport,
int policy,
const struct sched_param *param)
return (ret != KERN_SUCCESS) ? EINVAL : 0;
}
-int
+
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
{
mach_port_t kport = MACH_PORT_NULL;
int bypass = 1;
// since the main thread will not get de-allocated from underneath us
- if (t == pthread_self() || t == &_thread ) {
+ if (t == pthread_self() || t == &_thread) {
kport = _pthread_kernel_thread(t);
} else {
bypass = 0;
- (void)_pthread_lookup_thread(t, &kport, 0);
+ (void)_pthread_is_valid(t, 0, &kport);
}
-
+
res = pthread_setschedparam_internal(t, kport, policy, param);
if (res == 0) {
if (bypass == 0) {
// Ensure the thread is still valid.
_PTHREAD_LOCK(_pthread_list_lock);
- res = _pthread_find_thread(t);
- if (res == 0) {
+ if (_pthread_is_valid_locked(t)) {
t->policy = policy;
t->param = *param;
+ } else {
+ res = ESRCH;
}
_PTHREAD_UNLOCK(_pthread_list_lock);
} else {
return res;
}
+
int
sched_get_priority_min(int policy)
{
return default_priority + 16;
}
-int
+int
pthread_equal(pthread_t t1, pthread_t t2)
{
return (t1 == t2);
}
-/*
+/*
* Force LLVM not to optimise this to a call to __pthread_set_self, if it does
* then _pthread_set_self won't be bound when secondary threads try and start up.
*/
return _pthread_set_self_internal(p, true);
}
-void
+PTHREAD_ALWAYS_INLINE
+static inline void
_pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
{
if (p == NULL) {
}
}
+
+// <rdar://problem/28984807> pthread_once should have an acquire barrier
+PTHREAD_ALWAYS_INLINE
+static inline void
+_os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
+{
+ if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
+ _os_once(predicate, context, function);
+ OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
+ }
+}
+
struct _pthread_once_context {
pthread_once_t *pthread_once;
void (*routine)(void);
ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
}
-int
+PTHREAD_NOEXPORT_VARIANT
+int
pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
{
struct _pthread_once_context ctx = { once_control, init_routine };
do {
- os_once(&once_control->once, &ctx, __pthread_once_handler);
+ _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
} while (once_control->sig == _PTHREAD_ONCE_SIG_init);
return 0;
}
-void
-_pthread_testcancel(pthread_t thread, int isconforming)
-{
- const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
-
- _PTHREAD_LOCK(thread->lock);
- bool canceled = ((thread->cancel_state & flags) == flags);
- _PTHREAD_UNLOCK(thread->lock);
-
- if (canceled) {
- pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
- }
-}
-
-void
-_pthread_exit_if_canceled(int error)
-{
- if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) {
- pthread_t self = pthread_self();
- if (self != NULL) {
- self->cancel_error = error;
- }
- pthread_exit(PTHREAD_CANCELED);
- }
-}
int
pthread_getconcurrency(void)
_pthread_strtoul(const char *p, const char **endptr, int base)
{
uintptr_t val = 0;
-
+
// Expect hex string starting with "0x"
if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
p += 2;
// Finishes initialization of main thread attributes.
// Initializes the thread list and add the main thread.
// Calls _pthread_set_self() to prepare the main thread for execution.
- __pthread_fork_child_internal(thread);
-
+ _pthread_main_thread_init(thread);
+
+ struct _pthread_registration_data registration_data;
// Set up kernel entry points with __bsdthread_register.
- pthread_workqueue_atfork_child();
+ _pthread_bsdthread_init(®istration_data);
- // Have pthread_key do its init envvar checks.
+ // Have pthread_key and pthread_mutex do their init envvar checks.
_pthread_key_global_init(envp);
+ _pthread_mutex_global_init(envp, ®istration_data);
+
+#if PTHREAD_DEBUG_LOG
+ _SIMPLE_STRING path = _simple_salloc();
+ _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
+ _pthread_debuglog = open(_simple_string(path),
+ O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
+ _simple_sfree(path);
+ _pthread_debugstart = mach_absolute_time();
+#endif
return 0;
}
-int
-sched_yield(void)
-{
- swtch_pri(0);
- return 0;
-}
-
PTHREAD_NOEXPORT void
-__pthread_fork_child_internal(pthread_t p)
+_pthread_main_thread_init(pthread_t p)
{
TAILQ_INIT(&__pthread_head);
_PTHREAD_LOCK_INIT(_pthread_list_lock);
p->joiner_notify = SEMAPHORE_NULL;
p->joiner = MACH_PORT_NULL;
p->detached |= _PTHREAD_CREATE_PARENT;
- p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL;
+ p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
+ p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
// Initialize the list of threads with the new main thread.
TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
_pthread_introspection_thread_start(p);
}
-/*
- * Query/update the cancelability 'state' of a thread
- */
-PTHREAD_NOEXPORT int
-_pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
-{
- pthread_t self;
-
- switch (state) {
- case PTHREAD_CANCEL_ENABLE:
- if (conforming) {
- __pthread_canceled(1);
- }
- break;
- case PTHREAD_CANCEL_DISABLE:
- if (conforming) {
- __pthread_canceled(2);
- }
- break;
- default:
- return EINVAL;
- }
-
- self = pthread_self();
- _PTHREAD_LOCK(self->lock);
- if (oldstate) {
- *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
- }
- self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
- self->cancel_state |= state;
- _PTHREAD_UNLOCK(self->lock);
- if (!conforming) {
- _pthread_testcancel(self, 0); /* See if we need to 'die' now... */
- }
- return 0;
-}
-
-/* When a thread exits set the cancellation state to DISABLE and DEFERRED */
-static void
-_pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
-{
- _PTHREAD_LOCK(self->lock);
- self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
- self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
- if (value_ptr == PTHREAD_CANCELED) {
-// 4597450: begin
- self->detached |= _PTHREAD_WASCANCEL;
-// 4597450: end
- }
- _PTHREAD_UNLOCK(self->lock);
-}
-
int
_pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
{
- // Returns ESRCH if the thread was not created joinable.
int ret = __pthread_remove_thread(thread, false, NULL);
- if (ret != 0) {
+ if (ret != 0 && ret != EBUSY) {
+ // Returns ESRCH if the thread was not created joinable.
return ret;
}
-
+
if (value_ptr) {
- *value_ptr = __pthread_get_exit_value(thread, conforming);
+ *value_ptr = _pthread_get_exit_value(thread, conforming);
}
_pthread_introspection_thread_destroy(thread);
- _pthread_deallocate(thread);
+ if (ret != EBUSY) {
+ // __pthread_remove_thread returns EBUSY if the parent has not
+ // finished creating the thread (and is still expecting the pthread_t
+ // to be alive).
+ _pthread_deallocate(thread);
+ }
return 0;
}
-/* ALWAYS called with list lock and return with list lock */
int
-_pthread_find_thread(pthread_t thread)
+sched_yield(void)
{
- if (thread != NULL) {
- pthread_t p;
-loop:
- TAILQ_FOREACH(p, &__pthread_head, plist) {
- if (p == thread) {
- if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
- _PTHREAD_UNLOCK(_pthread_list_lock);
- sched_yield();
- _PTHREAD_LOCK(_pthread_list_lock);
- goto loop;
- }
- return 0;
- }
- }
- }
- return ESRCH;
+ swtch_pri(0);
+ return 0;
}
-int
-_pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable)
+// XXX remove
+void
+cthread_yield(void)
{
- mach_port_t kport = MACH_PORT_NULL;
- int ret;
-
- if (thread == NULL) {
- return ESRCH;
- }
-
- _PTHREAD_LOCK(_pthread_list_lock);
-
- ret = _pthread_find_thread(thread);
- if (ret == 0) {
- // Fail if we only want joinable threads and the thread found is
- // not in the detached state.
- if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) {
- ret = EINVAL;
- } else {
- kport = _pthread_kernel_thread(thread);
- }
- }
-
- _PTHREAD_UNLOCK(_pthread_list_lock);
-
- if (portp != NULL) {
- *portp = kport;
- }
+ sched_yield();
+}
- return ret;
+void
+pthread_yield_np(void)
+{
+ sched_yield();
}
+
+
+PTHREAD_NOEXPORT_VARIANT
void
_pthread_clear_qos_tsd(mach_port_t thread_port)
{
}
}
+
/***** pthread workqueue support routines *****/
PTHREAD_NOEXPORT void
-pthread_workqueue_atfork_child(void)
+_pthread_bsdthread_init(struct _pthread_registration_data *data)
{
- struct _pthread_registration_data data = {};
- data.version = sizeof(struct _pthread_registration_data);
- data.dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
- data.tsd_offset = offsetof(struct _pthread, tsd);
+ bzero(data, sizeof(*data));
+ data->version = sizeof(struct _pthread_registration_data);
+ data->dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
+ data->return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
+ data->tsd_offset = offsetof(struct _pthread, tsd);
+ data->mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
int rv = __bsdthread_register(thread_start,
start_wqthread, (int)PTHREAD_SIZE,
- (void*)&data, (uintptr_t)sizeof(data),
- data.dispatch_queue_offset);
+ (void*)data, (uintptr_t)sizeof(*data),
+ data->dispatch_queue_offset);
if (rv > 0) {
+ if ((rv & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
+ PTHREAD_INTERNAL_CRASH(rv,
+ "Missing required support for QOS_CLASS_DEFAULT");
+ }
+ if ((rv & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
+ PTHREAD_INTERNAL_CRASH(rv,
+ "Missing required support for QOS_CLASS_MAINTENANCE");
+ }
__pthread_supported_features = rv;
}
- pthread_priority_t main_qos = (pthread_priority_t)data.main_qos;
+ /*
+ * TODO: differentiate between (-1, EINVAL) after fork (which has the side
+ * effect of resetting the child's stack_addr_hint before bailing out) and
+ * (-1, EINVAL) because of invalid arguments. We'd probably like to treat
+ * the latter as fatal.
+ *
+ * <rdar://problem/36451838>
+ */
+
+ pthread_priority_t main_qos = (pthread_priority_t)data->main_qos;
if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
_pthread_set_main_qos(main_qos);
}
// workqueue entry point from kernel
+PTHREAD_NORETURN
void
_pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
{
PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
- int thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
- int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
- int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0;
- int kevent = flags & WQ_FLAG_THREAD_KEVENT;
+ bool thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
+ bool overcommit = flags & WQ_FLAG_THREAD_OVERCOMMIT;
+ bool kevent = flags & WQ_FLAG_THREAD_KEVENT;
+ bool workloop = (flags & WQ_FLAG_THREAD_WORKLOOP) &&
+ __libdispatch_workloopfunction != NULL;
PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL));
+ PTHREAD_ASSERT(!workloop || kevent);
pthread_priority_t priority = 0;
unsigned long priority_flags = 0;
if (kevent)
priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
- if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
- priority = _pthread_priority_make_version2(thread_class, 0, priority_flags);
- } else {
- priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
- }
+ int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
+ priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
- if (thread_reuse == 0) {
+ if (!thread_reuse) {
// New thread created by kernel, needs initialization.
void *stackaddr = self;
size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
_pthread_set_kernel_thread(self, kport);
self->wqthread = 1;
self->wqkillset = 0;
+ self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
// Not a joinable thread.
self->detached &= ~PTHREAD_CREATE_JOINABLE;
bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
_pthread_set_self_internal(self, !thread_tsd_base_set);
_pthread_introspection_thread_create(self, false);
- __pthread_add_thread(self, false, false);
+ __pthread_add_thread(self, NULL, false, false);
}
// If we're running with fine-grained priority, we also need to
PTHREAD_ASSERT(self == pthread_self());
#endif // WQ_DEBUG
- if (kevent){
+ if (workloop) {
+ self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
+ } else if (kevent){
self->fun = (void *(*)(void*))__libdispatch_keventfunction;
} else {
- self->fun = (void *(*)(void *))__libdispatch_workerfunction;
+ self->fun = (void *(*)(void*))__libdispatch_workerfunction;
}
self->arg = (void *)(uintptr_t)thread_class;
if (kevent && keventlist && nkevents > 0){
+ int errors_out;
kevent_errors_retry:
- (*__libdispatch_keventfunction)(&keventlist, &nkevents);
- int errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+ if (workloop) {
+ kqueue_id_t kevent_id = *(kqueue_id_t*)((char*)keventlist - sizeof(kqueue_id_t));
+ kqueue_id_t kevent_id_in = kevent_id;
+ (__libdispatch_workloopfunction)(&kevent_id, &keventlist, &nkevents);
+ PTHREAD_ASSERT(kevent_id == kevent_id_in || nkevents == 0);
+ errors_out = __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, keventlist, nkevents, 0);
+ } else {
+ (__libdispatch_keventfunction)(&keventlist, &nkevents);
+ errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+ }
+
if (errors_out > 0){
nkevents = errors_out;
goto kevent_errors_retry;
}
goto thexit;
} else if (kevent){
- (*__libdispatch_keventfunction)(NULL, NULL);
+ if (workloop) {
+ (__libdispatch_workloopfunction)(0, NULL, NULL);
+ __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, NULL, 0, -1);
+ } else {
+ (__libdispatch_keventfunction)(NULL, NULL);
+ __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
+ }
- __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
goto thexit;
}
if ((current_priority & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) ||
(_pthread_priority_get_qos_newest(current_priority) > WQ_THREAD_CLEANUP_QOS)) {
// Reset QoS to something low for the cleanup process
- pthread_priority_t priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
+ priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
_pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
}
}
/***** pthread workqueue API for libdispatch *****/
+_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
+ "Kernel and userland should agree on the event list size");
+
void
pthread_workqueue_setdispatchoffset_np(int offset)
{
__libdispatch_offset = offset;
}
-int
-pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func)
+static int
+pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ pthread_workqueue_function_workloop_t workloop_func)
{
int res = EBUSY;
if (__libdispatch_workerfunction == NULL) {
} else {
__libdispatch_workerfunction = queue_func;
__libdispatch_keventfunction = kevent_func;
+ __libdispatch_workloopfunction = workloop_func;
// Prepare the kernel for workq action
(void)__workq_open();
}
int
-_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags)
+_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ pthread_workqueue_function_workloop_t workloop_func,
+ int offset, int flags)
{
if (flags != 0) {
return ENOTSUP;
}
-
+
__workq_newapi = true;
__libdispatch_offset = offset;
-
- int rv = pthread_workqueue_setdispatch_with_kevent_np(queue_func, kevent_func);
+
+ int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
return rv;
}
+int
+_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
+ pthread_workqueue_function_kevent_t kevent_func,
+ int offset, int flags)
+{
+ return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
+}
+
int
_pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
{
int
pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
{
- return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t)worker_func, NULL);
+ return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
}
int
_pthread_workqueue_supported(void)
{
+ if (os_unlikely(!__pthread_supported_features)) {
+ PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
+ }
+
return __pthread_supported_features;
}
flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
+#pragma clang diagnostic pop
} else {
/* Running on the old kernel, queue_priority is what we pass directly to
return res;
}
+bool
+_pthread_workqueue_should_narrow(pthread_priority_t pri)
+{
+ int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
+ if (res == -1) {
+ return false;
+ }
+ return res;
+}
+
int
_pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
{
pthread_introspection_hook_t
pthread_introspection_hook_install(pthread_introspection_hook_t hook)
{
- if (os_slowpath(!hook)) {
- PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
- }
pthread_introspection_hook_t prev;
- prev = __sync_swap(&_pthread_introspection_hook, hook);
+ prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
return prev;
}