--- /dev/null
+#freebsd = https://github.com/freebsd/freebsd.git
+#netbsd = https://github.com/jsonn/src
+man/pthread.3 freebsd share/man/man3/pthread.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_atfork.3 freebsd share/man/man3/pthread_atfork.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_attr.3 freebsd share/man/man3/pthread_attr.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cancel.3 freebsd share/man/man3/pthread_cancel.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cleanup_pop.3 freebsd share/man/man3/pthread_cleanup_pop.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cleanup_push.3 freebsd share/man/man3/pthread_cleanup_push.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_broadcast.3 freebsd share/man/man3/pthread_cond_broadcast.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_destroy.3 freebsd share/man/man3/pthread_cond_destroy.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_init.3 freebsd share/man/man3/pthread_cond_init.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_signal.3 freebsd share/man/man3/pthread_cond_signal.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_timedwait.3 freebsd share/man/man3/pthread_cond_timedwait.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_cond_wait.3 freebsd share/man/man3/pthread_cond_wait.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_condattr.3 freebsd share/man/man3/pthread_condattr.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_create.3 freebsd share/man/man3/pthread_create.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_detach.3 freebsd share/man/man3/pthread_detach.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_equal.3 freebsd share/man/man3/pthread_equal.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_exit.3 freebsd share/man/man3/pthread_exit.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_getspecific.3 freebsd share/man/man3/pthread_getspecific.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_join.3 freebsd share/man/man3/pthread_join.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_key_create.3 freebsd share/man/man3/pthread_key_create.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_key_delete.3 freebsd share/man/man3/pthread_key_delete.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutex_destroy.3 freebsd share/man/man3/pthread_mutex_destroy.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutex_init.3 freebsd share/man/man3/pthread_mutex_init.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutex_lock.3 freebsd share/man/man3/pthread_mutex_lock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutex_trylock.3 freebsd share/man/man3/pthread_mutex_trylock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutex_unlock.3 freebsd share/man/man3/pthread_mutex_unlock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_mutexattr.3 netbsd lib/libpthread/pthread_mutexattr.3 55d5c86daab8888c63bdda28ee1dd528025833a5
+man/pthread_once.3 freebsd share/man/man3/pthread_once.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlock_destroy.3 freebsd share/man/man3/pthread_rwlock_destroy.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlock_init.3 freebsd share/man/man3/pthread_rwlock_init.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlock_rdlock.3 freebsd share/man/man3/pthread_rwlock_rdlock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlock_unlock.3 freebsd share/man/man3/pthread_rwlock_unlock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlock_wrlock.3 freebsd share/man/man3/pthread_rwlock_wrlock.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlockattr_destroy.3 freebsd share/man/man3/pthread_rwlockattr_destroy.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlockattr_getpshared.3 freebsd share/man/man3/pthread_rwlockattr_getpshared.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlockattr_init.3 freebsd share/man/man3/pthread_rwlockattr_init.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_rwlockattr_setpshared.3 freebsd share/man/man3/pthread_rwlockattr_setpshared.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_self.3 freebsd share/man/man3/pthread_self.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_setspecific.3 freebsd share/man/man3/pthread_setspecific.3 47f557e75db43cad41fe283ec73e82a323268b81
+man/pthread_setcancelstate.3 freebsd share/man/man3/pthread_testcancel.3 7163c791983c0f54683941b66da3004d125a2f52
+man/pthread_sigmask.2 freebsd share/man/man3/pthread_sigmask.3 f0d258568af32f2a815311e3ad3c6d253c67cd6e
+man/pthread_getschedparam.3 freebsd share/man/man3/pthread_schedparam.3 f0d258568af32f2a815311e3ad3c6d253c67cd6e
+man/pthread_kill.2 freebsd share/man/man3/pthread_kill.3 f0d258568af32f2a815311e3ad3c6d253c67cd6e
+man/pthread_setname_np.3 freebsd share/man/man3/pthread_set_name_np.3 f0d258568af32f2a815311e3ad3c6d253c67cd6e
+man/pthread_yield_np.3 freebsd share/man/man3/pthread_yield.3 f0d258568af32f2a815311e3ad3c6d253c67cd6e
const struct pthread_functions_s pthread_internal_functions = {
.pthread_init = _pthread_init,
.fill_procworkqueue = _fill_procworkqueue,
- .workqueue_init_lock = _workqueue_init_lock,
- .workqueue_destroy_lock = _workqueue_destroy_lock,
+ .get_pwq_state_kdp = _get_pwq_state_kdp,
.workqueue_exit = _workqueue_exit,
.workqueue_mark_exiting = _workqueue_mark_exiting,
.workqueue_thread_yielded = _workqueue_thread_yielded,
.workq_reqthreads = _workq_reqthreads,
.thread_qos_from_pthread_priority = _thread_qos_from_pthread_priority,
+ .pthread_priority_canonicalize2 = _pthread_priority_canonicalize,
};
kern_return_t pthread_start(__unused kmod_info_t * ki, __unused void *d)
#define PTHREAD_FEATURE_BSDTHREADCTL 0x04 /* is the bsdthread_ctl syscall available */
#define PTHREAD_FEATURE_SETSELF 0x08 /* is the BSDTHREAD_CTL_SET_SELF command of bsdthread_ctl available */
#define PTHREAD_FEATURE_QOS_MAINTENANCE 0x10 /* is QOS_CLASS_MAINTENANCE available */
-#define PTHREAD_FEATURE_KEVENT 0x20 /* supports direct kevent delivery */
+#define PTHREAD_FEATURE_RESERVED 0x20 /* burnt, shipped in OSX 10.11 & iOS 9 with partial kevent delivery support */
+#define PTHREAD_FEATURE_KEVENT 0x40 /* supports direct kevent delivery */
#define PTHREAD_FEATURE_QOS_DEFAULT 0x40000000 /* the kernel supports QOS_CLASS_DEFAULT */
/* pthread bsdthread_ctl sysctl commands */
-#define BSDTHREAD_CTL_SET_QOS 0x10 /* bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, thread_port, tsd_entry_addr, 0) */
-#define BSDTHREAD_CTL_GET_QOS 0x20 /* bsdthread_ctl(BSDTHREAD_CTL_GET_QOS, thread_port, 0, 0) */
+#define BSDTHREAD_CTL_SET_QOS 0x10 /* bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, thread_port, tsd_entry_addr, 0) */
+#define BSDTHREAD_CTL_GET_QOS 0x20 /* bsdthread_ctl(BSDTHREAD_CTL_GET_QOS, thread_port, 0, 0) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_START 0x40 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread_port, priority, 0) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_END 0x80 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread_port, 0, 0) */
-#define BSDTHREAD_CTL_SET_SELF 0x100 /* bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags) */
+#define BSDTHREAD_CTL_SET_SELF 0x100 /* bsdthread_ctl(BSDTHREAD_CTL_SET_SELF, priority, voucher, flags) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_RESET 0x200 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_RESET, 0, 0, 0) */
#define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH 0x400 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread_port, priority, 0) */
-#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */
-#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */
+#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD 0x401 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */
+#define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET 0x402 /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */
/* qos_class_t is mapped into one of these bits in the bitfield, this mapping now exists here because
* libdispatch requires the QoS class mask of the pthread_priority_t to be a bitfield.
/* userspace <-> kernel registration struct, for passing data to/from the kext during main thread init. */
struct _pthread_registration_data {
- uint64_t version; /* copy-in */
+ /*
+ * version == sizeof(struct _pthread_registration_data)
+ *
+ * The structure can only grow, so we use its size as the version.
+ * Userspace initializes this to the size of its structure and the kext
+ * will copy out the version that was actually consumed.
+ *
+ * n.b. you must make sure the size of this structure isn't LP64-dependent
+ */
+ uint64_t version;
+
uint64_t dispatch_queue_offset; /* copy-in */
- pthread_priority_t main_qos; /* copy-out */
-};
+ uint64_t /* pthread_priority_t */ main_qos; /* copy-out */
+ uint32_t tsd_offset; /* copy-in */
+} __attribute__ ((packed));
#ifdef KERNEL
};
typedef struct ksyn_waitq_element * ksyn_waitq_element_t;
-pthread_priority_t pthread_qos_class_get_priority(int qos) __attribute__((const));
-int pthread_priority_get_qos_class(pthread_priority_t priority) __attribute__((const));
+pthread_priority_t thread_qos_get_pthread_priority(int qos) __attribute__((const));
+int thread_qos_get_class_index(int qos) __attribute__((const));
+int pthread_priority_get_thread_qos(pthread_priority_t priority) __attribute__((const));
int pthread_priority_get_class_index(pthread_priority_t priority) __attribute__((const));
-int qos_get_class_index(int qos) __attribute__((const));
-pthread_priority_t pthread_priority_from_class_index(int index) __attribute__((const));
+pthread_priority_t class_index_get_pthread_priority(int index) __attribute__((const));
+int class_index_get_thread_qos(int index) __attribute__((const));
+int qos_class_get_class_index(int qos) __attribute__((const));
#define PTH_DEFAULT_STACKSIZE 512*1024
#define MAX_PTHREAD_SIZE 64*1024
void _pthread_init(void);
int _fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo);
-void _workqueue_init_lock(proc_t p);
-void _workqueue_destroy_lock(proc_t p);
+uint32_t _get_pwq_state_kdp(proc_t p);
void _workqueue_exit(struct proc *p);
void _workqueue_mark_exiting(struct proc *p);
void _workqueue_thread_yielded(void);
// Resolve a pthread_priority_t to a QoS/relative pri
integer_t _thread_qos_from_pthread_priority(unsigned long pri, unsigned long *flags);
+// Clear out extraneous flags/pri info for putting in voucher
+pthread_priority_t _pthread_priority_canonicalize(pthread_priority_t pri, boolean_t for_propagation);
#endif // KERNEL
#include <kern/assert.h>
pthread_priority_t
-pthread_qos_class_get_priority(int qos)
+thread_qos_get_pthread_priority(int qos)
{
- /* Map the buckets we have in pthread_priority_t into a QoS tier. */
- switch (qos) {
+ /* Map the buckets we have in pthread_priority_t into a QoS tier. */
+ switch (qos) {
case THREAD_QOS_USER_INTERACTIVE: return _pthread_priority_make_newest(QOS_CLASS_USER_INTERACTIVE, 0, 0);
case THREAD_QOS_USER_INITIATED: return _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, 0);
case THREAD_QOS_LEGACY: return _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, 0);
case THREAD_QOS_BACKGROUND: return _pthread_priority_make_newest(QOS_CLASS_BACKGROUND, 0, 0);
case THREAD_QOS_MAINTENANCE: return _pthread_priority_make_newest(QOS_CLASS_MAINTENANCE, 0, 0);
default: return _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
+ }
+}
+
+int
+thread_qos_get_class_index(int qos)
+{
+ switch (qos) {
+ case THREAD_QOS_USER_INTERACTIVE: return 0;
+ case THREAD_QOS_USER_INITIATED: return 1;
+ case THREAD_QOS_LEGACY: return 2;
+ case THREAD_QOS_UTILITY: return 3;
+ case THREAD_QOS_BACKGROUND: return 4;
+ case THREAD_QOS_MAINTENANCE: return 5;
+ default: return 2;
}
}
int
-pthread_priority_get_qos_class(pthread_priority_t priority)
+pthread_priority_get_thread_qos(pthread_priority_t priority)
{
/* Map the buckets we have in pthread_priority_t into a QoS tier. */
switch (_pthread_priority_get_qos_newest(priority)) {
}
}
+int
+pthread_priority_get_class_index(pthread_priority_t priority)
+{
+ return qos_class_get_class_index(_pthread_priority_get_qos_newest(priority));
+}
+
pthread_priority_t
-pthread_priority_from_class_index(int index)
+class_index_get_pthread_priority(int index)
{
qos_class_t qos;
switch (index) {
}
int
-qos_get_class_index(int qos){
+class_index_get_thread_qos(int class)
+{
+ int thread_qos;
+ switch (class) {
+ case 0: thread_qos = THREAD_QOS_USER_INTERACTIVE; break;
+ case 1: thread_qos = THREAD_QOS_USER_INITIATED; break;
+ case 2: thread_qos = THREAD_QOS_LEGACY; break;
+ case 3: thread_qos = THREAD_QOS_UTILITY; break;
+ case 4: thread_qos = THREAD_QOS_BACKGROUND; break;
+ case 5: thread_qos = THREAD_QOS_MAINTENANCE; break;
+ case 6: thread_qos = THREAD_QOS_LAST; break;
+ default:
+ thread_qos = THREAD_QOS_LAST;
+ }
+ return thread_qos;
+}
+
+int
+qos_class_get_class_index(int qos)
+{
switch (qos){
case QOS_CLASS_USER_INTERACTIVE: return 0;
case QOS_CLASS_USER_INITIATED: return 1;
case QOS_CLASS_BACKGROUND: return 4;
case QOS_CLASS_MAINTENANCE: return 5;
default:
- /* Return the utility band if we don't understand the input. */
+ /* Return the default band if we don't understand the input. */
return 2;
}
}
-int
-pthread_priority_get_class_index(pthread_priority_t priority)
-{
- return qos_get_class_index(_pthread_priority_get_qos_newest(priority));
-}
+/**
+ * Shims to help the kernel understand pthread_priority_t
+ */
integer_t
-_thread_qos_from_pthread_priority(unsigned long priority, unsigned long *flags){
- if (flags){
- *flags = (int)_pthread_priority_get_flags(priority) >> _PTHREAD_PRIORITY_FLAGS_SHIFT;
+_thread_qos_from_pthread_priority(unsigned long priority, unsigned long *flags)
+{
+ if (flags != NULL){
+ *flags = (int)_pthread_priority_get_flags(priority);
}
- return pthread_priority_get_qos_class(priority);
+ int thread_qos = pthread_priority_get_thread_qos(priority);
+ if (thread_qos == THREAD_QOS_UNSPECIFIED && flags != NULL){
+ *flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ }
+ return thread_qos;
+}
+
+pthread_priority_t
+_pthread_priority_canonicalize(pthread_priority_t priority, boolean_t for_propagation)
+{
+ qos_class_t qos_class;
+ int relpri;
+ unsigned long flags = _pthread_priority_get_flags(priority);
+ _pthread_priority_split_newest(priority, qos_class, relpri);
+
+ if (for_propagation) {
+ flags = 0;
+ if (relpri > 0 || relpri < -15) relpri = 0;
+ } else {
+ if (qos_class == QOS_CLASS_UNSPECIFIED) {
+ flags = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ } else if (flags & (_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG|_PTHREAD_PRIORITY_SCHED_PRI_FLAG)){
+ flags = _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ qos_class = QOS_CLASS_UNSPECIFIED;
+ } else {
+ flags &= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG|_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ }
+
+ relpri = 0;
+ }
+
+ return _pthread_priority_make_newest(qos_class, relpri, flags);
}
#undef pthread_rwlockattr_t
#undef pthread_rwlock_t
+#include <sys/cdefs.h>
+
+// <rdar://problem/26158937> panic() should be marked noreturn
+extern void panic(const char *string, ...) __printflike(1,2) __dead2;
+
#include <sys/param.h>
#include <sys/queue.h>
#include <sys/resourcevar.h>
#include <mach/shared_region.h>
#include <libkern/OSAtomic.h>
+#include <libkern/libkern.h>
#include <sys/pthread_shims.h>
#include "kern_internal.h"
-#if DEBUG
-#define kevent_qos_internal kevent_qos_internal_stub
-static int kevent_qos_internal_stub(__unused struct proc *p, __unused int fd,
- __unused user_addr_t changelist, __unused int nchanges,
- __unused user_addr_t eventlist, __unused int nevents,
- __unused user_addr_t data_out, user_size_t *data_available,
- __unused unsigned int flags, int32_t *retval){
- if (data_available){
- static int i = 0;
- switch (i++ % 4) {
- case 0:
- case 2:
- *data_available = *data_available / 2;
- *retval = 4;
- break;
- case 1:
- *data_available = 0;
- *retval = 4;
- break;
- case 3:
- *retval = 0;
- break;
- }
- } else {
- *retval = 0;
- }
- return 0;
-}
-#endif /* DEBUG */
-
-uint32_t pthread_debug_tracing = 1;
-
-SYSCTL_INT(_kern, OID_AUTO, pthread_debug_tracing, CTLFLAG_RW | CTLFLAG_LOCKED,
- &pthread_debug_tracing, 0, "")
-
// XXX: Dirty import for sys/signarvar.h that's wrapped in BSD_KERNEL_PRIVATE
#define sigcantmask (sigmask(SIGKILL) | sigmask(SIGSTOP))
+// XXX: Ditto for thread tags from kern/thread.h
+#define THREAD_TAG_MAINTHREAD 0x1
+#define THREAD_TAG_PTHREAD 0x10
+#define THREAD_TAG_WORKQUEUE 0x20
+
lck_grp_attr_t *pthread_lck_grp_attr;
lck_grp_t *pthread_lck_grp;
lck_attr_t *pthread_lck_attr;
extern void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
extern void workqueue_thread_yielded(void);
-enum run_nextreq_mode {RUN_NEXTREQ_DEFAULT, RUN_NEXTREQ_OVERCOMMIT, RUN_NEXTREQ_DEFERRED_OVERCOMMIT, RUN_NEXTREQ_UNCONSTRAINED, RUN_NEXTREQ_EVENT_MANAGER};
-static boolean_t workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t th, enum run_nextreq_mode mode, pthread_priority_t oc_prio);
+enum run_nextreq_mode {
+ RUN_NEXTREQ_DEFAULT,
+ RUN_NEXTREQ_DEFAULT_KEVENT,
+ RUN_NEXTREQ_OVERCOMMIT,
+ RUN_NEXTREQ_OVERCOMMIT_KEVENT,
+ RUN_NEXTREQ_DEFERRED_OVERCOMMIT,
+ RUN_NEXTREQ_UNCONSTRAINED,
+ RUN_NEXTREQ_EVENT_MANAGER,
+ RUN_NEXTREQ_ADD_TIMER
+};
+static thread_t workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t th,
+ enum run_nextreq_mode mode, pthread_priority_t prio,
+ bool kevent_bind_via_return);
static boolean_t workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, pthread_priority_t priority);
-static void wq_runreq(proc_t p, pthread_priority_t priority, thread_t th, struct threadlist *tl,
- int reuse_thread, int wake_thread, int return_directly);
+static void wq_runreq(proc_t p, thread_t th, struct workqueue *wq,
+ struct threadlist *tl, boolean_t return_directly, boolean_t deferred_kevent);
-static int _setup_wqthread(proc_t p, thread_t th, pthread_priority_t priority, int reuse_thread, struct threadlist *tl);
+static void _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl, bool first_use);
-static void wq_unpark_continue(void);
-static void wq_unsuspend_continue(void);
+static void reset_priority(struct threadlist *tl, pthread_priority_t pri);
+static pthread_priority_t pthread_priority_from_wq_class_index(struct workqueue *wq, int index);
+
+static void wq_unpark_continue(void* ptr, wait_result_t wait_result) __dead2;
static boolean_t workqueue_addnewthread(struct workqueue *wq, boolean_t ignore_constrained_thread_limit);
-static void workqueue_removethread(struct threadlist *tl, int fromexit);
-static void workqueue_lock_spin(proc_t);
-static void workqueue_unlock(proc_t);
+
+static void workqueue_removethread(struct threadlist *tl, bool fromexit, bool first_use);
+static void workqueue_lock_spin(struct workqueue *);
+static void workqueue_unlock(struct workqueue *);
static boolean_t may_start_constrained_thread(struct workqueue *wq, uint32_t at_priclass, uint32_t my_priclass, boolean_t *start_timer);
-static mach_vm_offset_t stackaddr_hint(proc_t p);
+static mach_vm_offset_t stack_addr_hint(proc_t p, vm_map_t vmap);
int proc_settargetconc(pid_t pid, int queuenum, int32_t targetconc);
int proc_setalltargetconc(pid_t pid, int32_t * targetconcp);
-----------------------------------------
*/
-#define PTHREAD_START_CUSTOM 0x01000000
-#define PTHREAD_START_SETSCHED 0x02000000
-#define PTHREAD_START_DETACHED 0x04000000
-#define PTHREAD_START_QOSCLASS 0x08000000
-#define PTHREAD_START_QOSCLASS_MASK 0xffffff
+#define PTHREAD_START_CUSTOM 0x01000000
+#define PTHREAD_START_SETSCHED 0x02000000
+#define PTHREAD_START_DETACHED 0x04000000
+#define PTHREAD_START_QOSCLASS 0x08000000
+#define PTHREAD_START_TSD_BASE_SET 0x10000000
+#define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
#define PTHREAD_START_POLICY_BITSHIFT 16
#define PTHREAD_START_POLICY_MASK 0xff
#define PTHREAD_START_IMPORTANCE_MASK 0xffff
#define BASEPRI_DEFAULT 31
+#pragma mark sysctls
+
+uint32_t wq_yielded_threshold = WQ_YIELDED_THRESHOLD;
+uint32_t wq_yielded_window_usecs = WQ_YIELDED_WINDOW_USECS;
+uint32_t wq_stalled_window_usecs = WQ_STALLED_WINDOW_USECS;
+uint32_t wq_reduce_pool_window_usecs = WQ_REDUCE_POOL_WINDOW_USECS;
+uint32_t wq_max_timer_interval_usecs = WQ_MAX_TIMER_INTERVAL_USECS;
+uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
+uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
+uint32_t wq_max_concurrency = 1; // set to ncpus on load
+
+SYSCTL_INT(_kern, OID_AUTO, wq_yielded_threshold, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_yielded_threshold, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_yielded_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_yielded_window_usecs, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_stalled_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_stalled_window_usecs, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_reduce_pool_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_reduce_pool_window_usecs, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_max_timer_interval_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_max_timer_interval_usecs, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_max_threads, 0, "");
+
+SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_max_constrained_threads, 0, "");
+
+#ifdef DEBUG
+SYSCTL_INT(_kern, OID_AUTO, wq_max_concurrency, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &wq_max_concurrency, 0, "");
+
+static int wq_kevent_test SYSCTL_HANDLER_ARGS;
+SYSCTL_PROC(_debug, OID_AUTO, wq_kevent_test, CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE, NULL, 0, wq_kevent_test, 0, "-");
+#endif
+
+static uint32_t wq_init_constrained_limit = 1;
+
+uint32_t pthread_debug_tracing = 1;
+
+SYSCTL_INT(_kern, OID_AUTO, pthread_debug_tracing, CTLFLAG_RW | CTLFLAG_LOCKED,
+ &pthread_debug_tracing, 0, "")
+
+
#pragma mark - Process/Thread Setup/Teardown syscalls
-static mach_vm_offset_t stackaddr_hint(proc_t p __unused){
+static mach_vm_offset_t
+stack_addr_hint(proc_t p, vm_map_t vmap)
+{
mach_vm_offset_t stackaddr;
+ mach_vm_offset_t aslr_offset;
+ bool proc64bit = proc_is64bit(p);
+
+ // We can't safely take random values % something unless its a power-of-two
+ _Static_assert(powerof2(PTH_DEFAULT_STACKSIZE), "PTH_DEFAULT_STACKSIZE is a power-of-two");
+
#if defined(__i386__) || defined(__x86_64__)
- if (proc_is64bit(p)){
+ if (proc64bit) {
+ // Matches vm_map_get_max_aslr_slide_pages's image shift in xnu
+ aslr_offset = random() % (1 << 28); // about 512 stacks
+ } else {
+ // Actually bigger than the image shift, we've got ~256MB to work with
+ aslr_offset = random() % (16 * PTH_DEFAULT_STACKSIZE);
+ }
+ aslr_offset = vm_map_trunc_page_mask(aslr_offset, vm_map_page_mask(vmap));
+ if (proc64bit) {
// Above nanomalloc range (see NANOZONE_SIGNATURE)
- stackaddr = 0x700000000000;
+ stackaddr = 0x700000000000 + aslr_offset;
} else {
- stackaddr = SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386;
+ stackaddr = SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386 + aslr_offset;
}
#elif defined(__arm__) || defined(__arm64__)
- if (proc_is64bit(p)){
+ // vm_map_get_max_aslr_slide_pages ensures 1MB of slide, we do better
+ aslr_offset = random() % ((proc64bit ? 4 : 2) * PTH_DEFAULT_STACKSIZE);
+ aslr_offset = vm_map_trunc_page_mask((vm_map_offset_t)aslr_offset, vm_map_page_mask(vmap));
+ if (proc64bit) {
// 64 stacks below nanomalloc (see NANOZONE_SIGNATURE)
- stackaddr = 0x170000000 - 64 * PTH_DEFAULT_STACKSIZE;
-#if defined(__arm__)
- } else if (pthread_kern->map_is_1gb(get_task_map(pthread_kern->proc_get_task(p)))){
- stackaddr = SHARED_REGION_BASE_ARM - 32 * PTH_DEFAULT_STACKSIZE;
-#endif
+ stackaddr = 0x170000000 - 64 * PTH_DEFAULT_STACKSIZE - aslr_offset;
} else {
- stackaddr = SHARED_REGION_BASE_ARM + SHARED_REGION_SIZE_ARM;
+ // If you try to slide down from this point, you risk ending up in memory consumed by malloc
+ stackaddr = SHARED_REGION_BASE_ARM - 32 * PTH_DEFAULT_STACKSIZE + aslr_offset;
}
#else
#error Need to define a stack address hint for this architecture
mach_vm_size_t th_guardsize;
mach_vm_offset_t th_stack;
mach_vm_offset_t th_pthread;
+ mach_vm_offset_t th_tsd_base;
mach_port_name_t th_thport;
thread_t th;
vm_map_t vmap = pthread_kern->current_map();
task_t ctask = current_task();
unsigned int policy, importance;
-
+ uint32_t tsd_offset;
+
int isLP64 = 0;
if (pthread_kern->proc_get_register(p) == 0) {
isLP64 = proc_is64bit(p);
th_guardsize = vm_map_page_size(vmap);
- stackaddr = stackaddr_hint(p);
+ stackaddr = pthread_kern->proc_get_stack_addr_hint(p);
kret = pthread_kern->thread_create(ctask, &th);
if (kret != KERN_SUCCESS)
return(ENOMEM);
thread_reference(th);
+ pthread_kern->thread_set_tag(th, THREAD_TAG_PTHREAD);
+
sright = (void *)pthread_kern->convert_thread_to_port(th);
th_thport = pthread_kern->ipc_port_copyout_send(sright, pthread_kern->task_get_ipcspace(ctask));
th_allocsize = th_guardsize + user_stack + pthread_size;
user_stack += PTHREAD_T_OFFSET;
- kret = mach_vm_map(vmap, &stackaddr,
- th_allocsize,
+ kret = mach_vm_map(vmap, &stackaddr,
+ th_allocsize,
page_size-1,
VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , NULL,
0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (kret != KERN_SUCCESS){
- kret = mach_vm_allocate(vmap,
+ kret = mach_vm_allocate(vmap,
&stackaddr, th_allocsize,
VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE);
}
PTHREAD_TRACE(TRACE_pthread_thread_create|DBG_FUNC_NONE, 0, 0, 0, 3, 0);
}
-
+
+ tsd_offset = pthread_kern->proc_get_pthread_tsd_offset(p);
+ if (tsd_offset) {
+ th_tsd_base = th_pthread + tsd_offset;
+ kret = pthread_kern->thread_set_tsd_base(th, th_tsd_base);
+ if (kret == KERN_SUCCESS) {
+ flags |= PTHREAD_START_TSD_BASE_SET;
+ }
+ }
+
#if defined(__i386__) || defined(__x86_64__)
/*
* Set up i386 registers & function call.
pthread_priority_t priority = (pthread_priority_t)(flags & PTHREAD_START_QOSCLASS_MASK);
thread_qos_policy_data_t qos;
- qos.qos_tier = pthread_priority_get_qos_class(priority);
+ qos.qos_tier = pthread_priority_get_thread_qos(priority);
qos.tier_importance = (qos.qos_tier == QOS_CLASS_UNSPECIFIED) ? 0 :
_pthread_priority_get_relpri(priority);
mach_vm_offset_t freeaddr;
mach_vm_size_t freesize;
kern_return_t kret;
+ thread_t th = current_thread();
freeaddr = (mach_vm_offset_t)stackaddr;
freesize = size;
PTHREAD_TRACE(TRACE_pthread_thread_terminate|DBG_FUNC_START, freeaddr, freesize, kthport, 0xff, 0);
if ((freesize != (mach_vm_size_t)0) && (freeaddr != (mach_vm_offset_t)0)) {
- kret = mach_vm_deallocate(pthread_kern->current_map(), freeaddr, freesize);
- if (kret != KERN_SUCCESS) {
- PTHREAD_TRACE(TRACE_pthread_thread_terminate|DBG_FUNC_END, kret, 0, 0, 0, 0);
- return(EINVAL);
+ if (pthread_kern->thread_get_tag(th) & THREAD_TAG_MAINTHREAD){
+ vm_map_t user_map = pthread_kern->current_map();
+ freesize = vm_map_trunc_page_mask((vm_map_offset_t)freesize - 1, vm_map_page_mask(user_map));
+ kret = mach_vm_behavior_set(user_map, freeaddr, freesize, VM_BEHAVIOR_REUSABLE);
+ assert(kret == KERN_SUCCESS || kret == KERN_INVALID_ADDRESS);
+ kret = kret ? kret : mach_vm_protect(user_map, freeaddr, freesize, FALSE, VM_PROT_NONE);
+ assert(kret == KERN_SUCCESS || kret == KERN_INVALID_ADDRESS);
+ } else {
+ kret = mach_vm_deallocate(pthread_kern->current_map(), freeaddr, freesize);
+ if (kret != KERN_SUCCESS) {
+ PTHREAD_TRACE(TRACE_pthread_thread_terminate|DBG_FUNC_END, kret, 0, 0, 0, 0);
+ return(EINVAL);
+ }
}
}
- (void) thread_terminate(current_thread());
+ (void) thread_terminate(th);
if (sem != MACH_PORT_NULL) {
kret = pthread_kern->semaphore_signal_internal_trap(sem);
if (kret != KERN_SUCCESS) {
user_addr_t wqthread,
int pthsize,
user_addr_t pthread_init_data,
- user_addr_t targetconc_ptr,
+ user_addr_t pthread_init_data_size,
uint64_t dispatchqueue_offset,
int32_t *retval)
{
+ /* We have to do this first so that it resets after fork */
+ pthread_kern->proc_set_stack_addr_hint(p, (user_addr_t)stack_addr_hint(p, pthread_kern->current_map()));
+
/* prevent multiple registrations */
if (pthread_kern->proc_get_register(p) != 0) {
return(EINVAL);
if (pthread_init_data != 0) {
thread_qos_policy_data_t qos;
- struct _pthread_registration_data data;
- size_t pthread_init_sz = MIN(sizeof(struct _pthread_registration_data), (size_t)targetconc_ptr);
+ struct _pthread_registration_data data = {};
+ size_t pthread_init_sz = MIN(sizeof(struct _pthread_registration_data), (size_t)pthread_init_data_size);
kern_return_t kr = copyin(pthread_init_data, &data, pthread_init_sz);
if (kr != KERN_SUCCESS) {
/* Incoming data from the data structure */
pthread_kern->proc_set_dispatchqueue_offset(p, data.dispatch_queue_offset);
+ if (data.version > offsetof(struct _pthread_registration_data, tsd_offset)
+ && data.tsd_offset < (uint32_t)pthsize) {
+ pthread_kern->proc_set_pthread_tsd_offset(p, data.tsd_offset);
+ }
/* Outgoing data that userspace expects as a reply */
+ data.version = sizeof(struct _pthread_registration_data);
if (pthread_kern->qos_main_thread_active()) {
mach_msg_type_number_t nqos = THREAD_QOS_POLICY_COUNT;
boolean_t gd = FALSE;
}
if (kr == KERN_SUCCESS) {
- data.main_qos = pthread_qos_class_get_priority(qos.qos_tier);
+ data.main_qos = thread_qos_get_pthread_priority(qos.qos_tier);
} else {
data.main_qos = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
}
}
} else {
pthread_kern->proc_set_dispatchqueue_offset(p, dispatchqueue_offset);
- pthread_kern->proc_set_targconc(p, targetconc_ptr);
}
/* return the supported feature set as the return value. */
return NULL;
}
-static inline void
-wq_thread_override_reset(thread_t th, user_addr_t resource)
-{
- struct uthread *uth = pthread_kern->get_bsdthread_info(th);
- struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
-
- if (tl) {
- /*
- * Drop all outstanding overrides on this thread, done outside the wq lock
- * because proc_usynch_thread_qos_remove_override_for_resource takes a spinlock that
- * could cause us to panic.
- */
- PTHREAD_TRACE(TRACE_wq_override_reset | DBG_FUNC_NONE, tl->th_workq, 0, 0, 0, 0);
-
- pthread_kern->proc_usynch_thread_qos_reset_override_for_resource(current_task(), uth, 0, resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
- }
-}
-
int
_bsdthread_ctl_set_self(struct proc *p, user_addr_t __unused cmd, pthread_priority_t priority, mach_port_name_t voucher, _pthread_set_flags_t flags, int __unused *retval)
{
thread_qos_policy_data_t qos;
mach_msg_type_number_t nqos = THREAD_QOS_POLICY_COUNT;
boolean_t gd = FALSE;
+ bool was_manager_thread = false;
+ thread_t th = current_thread();
+ struct workqueue *wq = NULL;
+ struct threadlist *tl = NULL;
kern_return_t kr;
int qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
+ if ((flags & _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND) != 0) {
+ tl = util_get_thread_threadlist_entry(th);
+ if (tl) {
+ wq = tl->th_workq;
+ } else {
+ goto qos;
+ }
+
+ workqueue_lock_spin(wq);
+ if (tl->th_flags & TH_LIST_KEVENT_BOUND) {
+ tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
+ unsigned int kevent_flags = KEVENT_FLAG_WORKQ;
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+ kevent_flags |= KEVENT_FLAG_WORKQ_MANAGER;
+ }
+
+ workqueue_unlock(wq);
+ kevent_qos_internal_unbind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
+ } else {
+ workqueue_unlock(wq);
+ }
+ }
+
+qos:
if ((flags & _PTHREAD_SET_SELF_QOS_FLAG) != 0) {
- kr = pthread_kern->thread_policy_get(current_thread(), THREAD_QOS_POLICY, (thread_policy_t)&qos, &nqos, &gd);
+ kr = pthread_kern->thread_policy_get(th, THREAD_QOS_POLICY, (thread_policy_t)&qos, &nqos, &gd);
if (kr != KERN_SUCCESS) {
qos_rv = EINVAL;
goto voucher;
}
/* Get the work queue for tracing, also the threadlist for bucket manipluation. */
- struct workqueue *wq = NULL;
- struct threadlist *tl = util_get_thread_threadlist_entry(current_thread());
- if (tl) {
- wq = tl->th_workq;
+ if (!tl) {
+ tl = util_get_thread_threadlist_entry(th);
+ if (tl) wq = tl->th_workq;
}
- PTHREAD_TRACE(TRACE_pthread_set_qos_self | DBG_FUNC_START, wq, qos.qos_tier, qos.tier_importance, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_pthread_set_qos_self | DBG_FUNC_START, wq, qos.qos_tier, qos.tier_importance, 0, 0);
- qos.qos_tier = pthread_priority_get_qos_class(priority);
+ qos.qos_tier = pthread_priority_get_thread_qos(priority);
qos.tier_importance = (qos.qos_tier == QOS_CLASS_UNSPECIFIED) ? 0 : _pthread_priority_get_relpri(priority);
- kr = pthread_kern->thread_policy_set_internal(current_thread(), THREAD_QOS_POLICY, (thread_policy_t)&qos, THREAD_QOS_POLICY_COUNT);
- if (kr != KERN_SUCCESS) {
+ if (qos.qos_tier == QOS_CLASS_UNSPECIFIED) {
qos_rv = EINVAL;
goto voucher;
}
/* If we're a workqueue, the threadlist item priority needs adjusting, along with the bucket we were running in. */
if (tl) {
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
+ bool now_under_constrained_limit = false;
+
+ assert(!(tl->th_flags & TH_LIST_KEVENT_BOUND));
+
+ kr = pthread_kern->thread_set_workq_qos(th, qos.qos_tier, qos.tier_importance);
+ assert(kr == KERN_SUCCESS || kr == KERN_TERMINATED);
/* Fix up counters. */
uint8_t old_bucket = tl->th_priority;
uint8_t new_bucket = pthread_priority_get_class_index(priority);
+ if (old_bucket == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+ was_manager_thread = true;
+ }
uint32_t old_active = OSAddAtomic(-1, &wq->wq_thactive_count[old_bucket]);
OSAddAtomic(1, &wq->wq_thactive_count[new_bucket]);
wq->wq_thscheduled_count[old_bucket]--;
wq->wq_thscheduled_count[new_bucket]++;
+ bool old_overcommit = !(tl->th_flags & TH_LIST_CONSTRAINED);
+ bool new_overcommit = priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+ if (!old_overcommit && new_overcommit) {
+ wq->wq_constrained_threads_scheduled--;
+ tl->th_flags &= ~TH_LIST_CONSTRAINED;
+ if (wq->wq_constrained_threads_scheduled == wq_max_constrained_threads - 1) {
+ now_under_constrained_limit = true;
+ }
+ } else if (old_overcommit && !new_overcommit) {
+ wq->wq_constrained_threads_scheduled++;
+ tl->th_flags |= TH_LIST_CONSTRAINED;
+ }
+
tl->th_priority = new_bucket;
- /* If we were at the ceiling of non-overcommitted threads for a given bucket, we have to
- * reevaluate whether we should start more work.
+ /* If we were at the ceiling of threads for a given bucket, we have
+ * to reevaluate whether we should start more work.
*/
- if (old_active == wq->wq_reqconc[old_bucket]) {
+ if (old_active == wq->wq_reqconc[old_bucket] || now_under_constrained_limit) {
/* workqueue_run_nextreq will drop the workqueue lock in all exit paths. */
- (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT, 0);
+ (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT, 0, false);
} else {
- workqueue_unlock(p);
+ workqueue_unlock(wq);
+ }
+ } else {
+ kr = pthread_kern->thread_policy_set_internal(th, THREAD_QOS_POLICY, (thread_policy_t)&qos, THREAD_QOS_POLICY_COUNT);
+ if (kr != KERN_SUCCESS) {
+ qos_rv = EINVAL;
}
}
- PTHREAD_TRACE(TRACE_pthread_set_qos_self | DBG_FUNC_END, wq, qos.qos_tier, qos.tier_importance, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_pthread_set_qos_self | DBG_FUNC_END, wq, qos.qos_tier, qos.tier_importance, 0, 0);
}
voucher:
}
fixedpri:
+ if (qos_rv) goto done;
if ((flags & _PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG) != 0) {
thread_extended_policy_data_t extpol = {.timeshare = 0};
- thread_t thread = current_thread();
-
- struct threadlist *tl = util_get_thread_threadlist_entry(thread);
+
+ if (!tl) tl = util_get_thread_threadlist_entry(th);
if (tl) {
/* Not allowed on workqueue threads */
fixedpri_rv = ENOTSUP;
goto done;
}
- kr = pthread_kern->thread_policy_set_internal(thread, THREAD_EXTENDED_POLICY, (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
+ kr = pthread_kern->thread_policy_set_internal(th, THREAD_EXTENDED_POLICY, (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
if (kr != KERN_SUCCESS) {
fixedpri_rv = EINVAL;
goto done;
}
} else if ((flags & _PTHREAD_SET_SELF_TIMESHARE_FLAG) != 0) {
thread_extended_policy_data_t extpol = {.timeshare = 1};
- thread_t thread = current_thread();
-
- struct threadlist *tl = util_get_thread_threadlist_entry(thread);
+
+ if (!tl) tl = util_get_thread_threadlist_entry(th);
if (tl) {
/* Not allowed on workqueue threads */
fixedpri_rv = ENOTSUP;
goto done;
}
- kr = pthread_kern->thread_policy_set_internal(thread, THREAD_EXTENDED_POLICY, (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
+ kr = pthread_kern->thread_policy_set_internal(th, THREAD_EXTENDED_POLICY, (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
if (kr != KERN_SUCCESS) {
fixedpri_rv = EINVAL;
goto done;
}
}
-
+
done:
if (qos_rv && voucher_rv) {
/* Both failed, give that a unique error. */
return ESRCH;
}
- struct uthread *uth = pthread_kern->get_bsdthread_info(th);
- int override_qos = pthread_priority_get_qos_class(priority);
+ int override_qos = pthread_priority_get_thread_qos(priority);
struct threadlist *tl = util_get_thread_threadlist_entry(th);
if (tl) {
- PTHREAD_TRACE(TRACE_wq_override_start | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_override_start | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
}
/* The only failure case here is if we pass a tid and have it lookup the thread, we pass the uthread, so this all always succeeds. */
- pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), uth, 0, override_qos, TRUE, resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
-
+ pthread_kern->proc_usynch_thread_qos_add_override_for_resource_check_owner(th, override_qos, TRUE,
+ resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE, USER_ADDR_NULL, MACH_PORT_NULL);
thread_deallocate(th);
return rv;
}
struct threadlist *tl = util_get_thread_threadlist_entry(th);
if (tl) {
- PTHREAD_TRACE(TRACE_wq_override_end | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 0, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_override_end | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 0, 0, 0);
}
pthread_kern->proc_usynch_thread_qos_remove_override_for_resource(current_task(), uth, 0, resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
return rv;
}
-int
-_bsdthread_ctl_qos_override_dispatch(struct proc *p, user_addr_t cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t arg3, int *retval)
-{
- if (arg3 != 0) {
- return EINVAL;
- }
-
- return _bsdthread_ctl_qos_dispatch_asynchronous_override_add(p, cmd, kport, priority, USER_ADDR_NULL, retval);
-}
-
-int
-_bsdthread_ctl_qos_dispatch_asynchronous_override_add(struct proc __unused *p, user_addr_t __unused cmd, mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval)
+static int
+_bsdthread_ctl_qos_dispatch_asynchronous_override_add_internal(mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, user_addr_t ulock_addr)
{
thread_t th;
int rv = 0;
return ESRCH;
}
- struct uthread *uth = pthread_kern->get_bsdthread_info(th);
- int override_qos = pthread_priority_get_qos_class(priority);
+ int override_qos = pthread_priority_get_thread_qos(priority);
struct threadlist *tl = util_get_thread_threadlist_entry(th);
if (!tl) {
return EPERM;
}
- PTHREAD_TRACE(TRACE_wq_override_dispatch | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE, tl->th_workq, thread_tid(th), 1, priority, 0);
- /* The only failure case here is if we pass a tid and have it lookup the thread, we pass the uthread, so this all always succeeds. */
- pthread_kern->proc_usynch_thread_qos_add_override_for_resource(current_task(), uth, 0, override_qos, TRUE, resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
+ rv = pthread_kern->proc_usynch_thread_qos_add_override_for_resource_check_owner(th, override_qos, TRUE,
+ resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE, ulock_addr, kport);
thread_deallocate(th);
return rv;
}
+int _bsdthread_ctl_qos_dispatch_asynchronous_override_add(struct proc __unused *p, user_addr_t __unused cmd,
+ mach_port_name_t kport, pthread_priority_t priority, user_addr_t resource, int __unused *retval)
+{
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_add_internal(kport, priority, resource, USER_ADDR_NULL);
+}
+
+int
+_bsdthread_ctl_qos_override_dispatch(struct proc *p __unused, user_addr_t cmd __unused, mach_port_name_t kport, pthread_priority_t priority, user_addr_t ulock_addr, int __unused *retval)
+{
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_add_internal(kport, priority, USER_ADDR_NULL, ulock_addr);
+}
+
int
_bsdthread_ctl_qos_override_reset(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval)
{
int
_bsdthread_ctl_qos_dispatch_asynchronous_override_reset(struct proc __unused *p, user_addr_t __unused cmd, int reset_all, user_addr_t resource, user_addr_t arg3, int __unused *retval)
{
- thread_t th;
- struct threadlist *tl;
- int rv = 0;
-
if ((reset_all && (resource != 0)) || arg3 != 0) {
return EINVAL;
}
- th = current_thread();
- tl = util_get_thread_threadlist_entry(th);
+ thread_t th = current_thread();
+ struct uthread *uth = pthread_kern->get_bsdthread_info(th);
+ struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
- if (tl) {
- wq_thread_override_reset(th, reset_all ? THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD : resource);
- } else {
- rv = EPERM;
+ if (!tl) {
+ return EPERM;
}
- return rv;
+ PTHREAD_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, tl->th_workq, 0, 0, 0, 0);
+
+ resource = reset_all ? THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD : resource;
+ pthread_kern->proc_usynch_thread_qos_reset_override_for_resource(current_task(), uth, 0, resource, THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
+
+ return 0;
}
int
_bsdthread_ctl(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval)
{
switch (cmd) {
- case BSDTHREAD_CTL_SET_QOS:
- return _bsdthread_ctl_set_qos(p, cmd, (mach_port_name_t)arg1, arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_OVERRIDE_START:
- return _bsdthread_ctl_qos_override_start(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_OVERRIDE_END:
- return _bsdthread_ctl_qos_override_end(p, cmd, (mach_port_name_t)arg1, arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
- return _bsdthread_ctl_qos_override_reset(p, cmd, arg1, arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
- return _bsdthread_ctl_qos_override_dispatch(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
- return _bsdthread_ctl_qos_dispatch_asynchronous_override_add(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
- case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
- return _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(p, cmd, (int)arg1, arg2, arg3, retval);
- case BSDTHREAD_CTL_SET_SELF:
- return _bsdthread_ctl_set_self(p, cmd, (pthread_priority_t)arg1, (mach_port_name_t)arg2, (_pthread_set_flags_t)arg3, retval);
- default:
- return EINVAL;
+ case BSDTHREAD_CTL_SET_QOS:
+ return _bsdthread_ctl_set_qos(p, cmd, (mach_port_name_t)arg1, arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_OVERRIDE_START:
+ return _bsdthread_ctl_qos_override_start(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_OVERRIDE_END:
+ return _bsdthread_ctl_qos_override_end(p, cmd, (mach_port_name_t)arg1, arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
+ return _bsdthread_ctl_qos_override_reset(p, cmd, arg1, arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
+ return _bsdthread_ctl_qos_override_dispatch(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_add(p, cmd, (mach_port_name_t)arg1, (pthread_priority_t)arg2, arg3, retval);
+ case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
+ return _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(p, cmd, (int)arg1, arg2, arg3, retval);
+ case BSDTHREAD_CTL_SET_SELF:
+ return _bsdthread_ctl_set_self(p, cmd, (pthread_priority_t)arg1, (mach_port_name_t)arg2, (_pthread_set_flags_t)arg3, retval);
+ default:
+ return EINVAL;
}
}
#pragma mark - Workqueue Implementation
-#pragma mark sysctls
-
-uint32_t wq_yielded_threshold = WQ_YIELDED_THRESHOLD;
-uint32_t wq_yielded_window_usecs = WQ_YIELDED_WINDOW_USECS;
-uint32_t wq_stalled_window_usecs = WQ_STALLED_WINDOW_USECS;
-uint32_t wq_reduce_pool_window_usecs = WQ_REDUCE_POOL_WINDOW_USECS;
-uint32_t wq_max_timer_interval_usecs = WQ_MAX_TIMER_INTERVAL_USECS;
-uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
-uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
-uint32_t wq_max_concurrency = 1; // set to ncpus on load
-
-SYSCTL_INT(_kern, OID_AUTO, wq_yielded_threshold, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_yielded_threshold, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_yielded_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_yielded_window_usecs, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_stalled_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_stalled_window_usecs, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_reduce_pool_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_reduce_pool_window_usecs, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_max_timer_interval_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_timer_interval_usecs, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_threads, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_constrained_threads, 0, "");
-
-#ifdef DEBUG
-SYSCTL_INT(_kern, OID_AUTO, wq_max_concurrency, CTLFLAG_RW | CTLFLAG_LOCKED,
- &wq_max_concurrency, 0, "");
-
-static int wq_kevent_test SYSCTL_HANDLER_ARGS;
-SYSCTL_PROC(_debug, OID_AUTO, wq_kevent_test, CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE, NULL, 0, wq_kevent_test, 0, "-");
-#endif
-
-static uint32_t wq_init_constrained_limit = 1;
-
#pragma mark workqueue lock
-void
-_workqueue_init_lock(proc_t p)
-{
- lck_spin_init(pthread_kern->proc_get_wqlockptr(p), pthread_lck_grp, pthread_lck_attr);
- *(pthread_kern->proc_get_wqinitingptr(p)) = FALSE;
-}
-
-void
-_workqueue_destroy_lock(proc_t p)
-{
- lck_spin_destroy(pthread_kern->proc_get_wqlockptr(p), pthread_lck_grp);
+static boolean_t workqueue_lock_spin_is_acquired_kdp(struct workqueue *wq) {
+ return kdp_lck_spin_is_acquired(&wq->wq_lock);
}
-
static void
-workqueue_lock_spin(proc_t p)
+workqueue_lock_spin(struct workqueue *wq)
{
- lck_spin_lock(pthread_kern->proc_get_wqlockptr(p));
+ boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
+ lck_spin_lock(&wq->wq_lock);
+ wq->wq_interrupt_state = interrupt_state;
}
static void
-workqueue_unlock(proc_t p)
+workqueue_unlock(struct workqueue *wq)
{
- lck_spin_unlock(pthread_kern->proc_get_wqlockptr(p));
+ boolean_t interrupt_state = wq->wq_interrupt_state;
+ lck_spin_unlock(&wq->wq_lock);
+ ml_set_interrupts_enabled(interrupt_state);
}
#pragma mark workqueue add timer
}
clock_interval_to_deadline(wq->wq_timer_interval, 1000, &deadline);
- thread_call_enter_delayed(wq->wq_atimer_call, deadline);
+ PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, wq->wq_flags, wq->wq_timer_interval, 0);
+
+ boolean_t ret = thread_call_enter1_delayed(wq->wq_atimer_delayed_call, wq->wq_atimer_delayed_call, deadline);
+ if (ret) {
+ panic("delayed_call was already enqueued");
+ }
+}
+
+/**
+ * Immediately trigger the workqueue_add_timer
+ */
+static void
+workqueue_interval_timer_trigger(struct workqueue *wq)
+{
+ PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, wq->wq_flags, 0, 0);
- PTHREAD_TRACE(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, wq->wq_flags, wq->wq_timer_interval, 0);
+ boolean_t ret = thread_call_enter1(wq->wq_atimer_immediate_call, wq->wq_atimer_immediate_call);
+ if (ret) {
+ panic("immediate_call was already enqueued");
+ }
}
/**
return (FALSE);
}
-#define WQ_TIMER_NEEDED(wq, start_timer) do { \
- int oldflags = wq->wq_flags; \
- \
- if ( !(oldflags & (WQ_EXITING | WQ_ATIMER_RUNNING))) { \
- if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_RUNNING, (UInt32 *)&wq->wq_flags)) \
- start_timer = TRUE; \
- } \
-} while (0)
+static inline bool
+WQ_TIMER_DELAYED_NEEDED(struct workqueue *wq)
+{
+ int oldflags;
+retry:
+ oldflags = wq->wq_flags;
+ if ( !(oldflags & (WQ_EXITING | WQ_ATIMER_DELAYED_RUNNING))) {
+ if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_DELAYED_RUNNING, (UInt32 *)&wq->wq_flags)) {
+ return true;
+ } else {
+ goto retry;
+ }
+ }
+ return false;
+}
+
+static inline bool
+WQ_TIMER_IMMEDIATE_NEEDED(struct workqueue *wq)
+{
+ int oldflags;
+retry:
+ oldflags = wq->wq_flags;
+ if ( !(oldflags & (WQ_EXITING | WQ_ATIMER_IMMEDIATE_RUNNING))) {
+ if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_IMMEDIATE_RUNNING, (UInt32 *)&wq->wq_flags)) {
+ return true;
+ } else {
+ goto retry;
+ }
+ }
+ return false;
+}
/**
* handler function for the timer
*/
static void
-workqueue_add_timer(struct workqueue *wq, __unused int param1)
+workqueue_add_timer(struct workqueue *wq, thread_call_t thread_call_self)
{
proc_t p;
boolean_t start_timer = FALSE;
boolean_t retval;
- PTHREAD_TRACE(TRACE_wq_add_timer | DBG_FUNC_START, wq, wq->wq_flags, wq->wq_nthreads, wq->wq_thidlecount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, wq->wq_flags, wq->wq_nthreads, wq->wq_thidlecount, 0);
p = wq->wq_proc;
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
/*
- * because workqueue_callback now runs w/o taking the workqueue lock
- * we are unsynchronized w/r to a change in state of the running threads...
- * to make sure we always evaluate that change, we allow it to start up
- * a new timer if the current one is actively evalutating the state
- * however, we do not need more than 2 timers fired up (1 active and 1 pending)
- * and we certainly do not want 2 active timers evaluating the state
- * simultaneously... so use WQL_ATIMER_BUSY to serialize the timers...
- * note that WQL_ATIMER_BUSY is in a different flag word from WQ_ATIMER_RUNNING since
- * it is always protected by the workq lock... WQ_ATIMER_RUNNING is evaluated
- * and set atomimcally since the callback function needs to manipulate it
- * w/o holding the workq lock...
+ * There's two tricky issues here.
*
- * !WQ_ATIMER_RUNNING && !WQL_ATIMER_BUSY == no pending timer, no active timer
- * !WQ_ATIMER_RUNNING && WQL_ATIMER_BUSY == no pending timer, 1 active timer
- * WQ_ATIMER_RUNNING && !WQL_ATIMER_BUSY == 1 pending timer, no active timer
- * WQ_ATIMER_RUNNING && WQL_ATIMER_BUSY == 1 pending timer, 1 active timer
+ * First issue: we start the thread_call's that invoke this routine without
+ * the workqueue lock held. The scheduler callback needs to trigger
+ * reevaluation of the number of running threads but shouldn't take that
+ * lock, so we can't use it to synchronize state around the thread_call.
+ * As a result, it might re-enter the thread_call while this routine is
+ * already running. This could cause it to fire a second time and we'll
+ * have two add_timers running at once. Obviously, we don't want that to
+ * keep stacking, so we need to keep it at two timers.
+ *
+ * Solution: use wq_flags (accessed via atomic CAS) to synchronize the
+ * enqueue of the thread_call itself. When a thread needs to trigger the
+ * add_timer, it checks for ATIMER_DELAYED_RUNNING and, when not set, sets
+ * the flag then does a thread_call_enter. We'll then remove that flag
+ * only once we've got the lock and it's safe for the thread_call to be
+ * entered again.
+ *
+ * Second issue: we need to make sure that the two timers don't execute this
+ * routine concurrently. We can't use the workqueue lock for this because
+ * we'll need to drop it during our execution.
+ *
+ * Solution: use WQL_ATIMER_BUSY as a condition variable to indicate that
+ * we are currently executing the routine and the next thread should wait.
+ *
+ * After all that, we arrive at the following four possible states:
+ * !WQ_ATIMER_DELAYED_RUNNING && !WQL_ATIMER_BUSY no pending timer, no active timer
+ * !WQ_ATIMER_DELAYED_RUNNING && WQL_ATIMER_BUSY no pending timer, 1 active timer
+ * WQ_ATIMER_DELAYED_RUNNING && !WQL_ATIMER_BUSY 1 pending timer, no active timer
+ * WQ_ATIMER_DELAYED_RUNNING && WQL_ATIMER_BUSY 1 pending timer, 1 active timer
+ *
+ * Further complication sometimes we need to trigger this function to run
+ * without delay. Because we aren't under a lock between setting
+ * WQ_ATIMER_DELAYED_RUNNING and calling thread_call_enter, we can't simply
+ * re-enter the thread call: if thread_call_enter() returned false, we
+ * wouldn't be able to distinguish the case where the thread_call had
+ * already fired from the case where it hadn't been entered yet from the
+ * other thread. So, we use a separate thread_call for immediate
+ * invocations, and a separate RUNNING flag, WQ_ATIMER_IMMEDIATE_RUNNING.
*/
+
while (wq->wq_lflags & WQL_ATIMER_BUSY) {
wq->wq_lflags |= WQL_ATIMER_WAITING;
assert_wait((caddr_t)wq, (THREAD_UNINT));
- workqueue_unlock(p);
+ workqueue_unlock(wq);
thread_block(THREAD_CONTINUE_NULL);
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
}
wq->wq_lflags |= WQL_ATIMER_BUSY;
/*
- * the workq lock will protect us from seeing WQ_EXITING change state, but we
- * still need to update this atomically in case someone else tries to start
- * the timer just as we're releasing it
+ * Decide which timer we are and remove the RUNNING flag.
*/
- while ( !(OSCompareAndSwap(wq->wq_flags, (wq->wq_flags & ~WQ_ATIMER_RUNNING), (UInt32 *)&wq->wq_flags)));
+ if (thread_call_self == wq->wq_atimer_delayed_call) {
+ if ((wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING) == 0) {
+ panic("workqueue_add_timer is the delayed timer but the delayed running flag isn't set");
+ }
+ WQ_UNSETFLAG(wq, WQ_ATIMER_DELAYED_RUNNING);
+ } else if (thread_call_self == wq->wq_atimer_immediate_call) {
+ if ((wq->wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) == 0) {
+ panic("workqueue_add_timer is the immediate timer but the immediate running flag isn't set");
+ }
+ WQ_UNSETFLAG(wq, WQ_ATIMER_IMMEDIATE_RUNNING);
+ } else {
+ panic("workqueue_add_timer can't figure out which timer it is");
+ }
again:
retval = TRUE;
boolean_t add_thread = FALSE;
/*
* check to see if the stall frequency was beyond our tolerance
- * or we have work on the queue, but haven't scheduled any
+ * or we have work on the queue, but haven't scheduled any
* new work within our acceptable time interval because
* there were no idle threads left to schedule
*/
for (uint32_t i = 0; i <= priclass; i++) {
thactive_count += wq->wq_thactive_count[i];
- // XXX why isn't this checking thscheduled_count < thactive_count ?
- if (wq->wq_thscheduled_count[i]) {
+ if (wq->wq_thscheduled_count[i] < wq->wq_thactive_count[i]) {
if (wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i]))
busycount++;
}
* workqueue_run_nextreq is responsible for
* dropping the workqueue lock in all cases
*/
- retval = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT, 0);
- workqueue_lock_spin(p);
+ retval = (workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_ADD_TIMER, 0, false) != THREAD_NULL);
+ workqueue_lock_spin(wq);
if (retval == FALSE)
break;
if (wq->wq_thidlecount == 0 && retval == TRUE && add_thread == TRUE)
goto again;
- if (wq->wq_thidlecount == 0 || busycount)
- WQ_TIMER_NEEDED(wq, start_timer);
+ if (wq->wq_thidlecount == 0 || busycount) {
+ start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
+ }
- PTHREAD_TRACE(TRACE_wq_add_timer | DBG_FUNC_NONE, wq, wq->wq_reqcount, wq->wq_thidlecount, busycount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_NONE, wq, wq->wq_reqcount, wq->wq_thidlecount, busycount, 0);
}
}
}
}
- /*
+ /*
* If we called WQ_TIMER_NEEDED above, then this flag will be set if that
* call marked the timer running. If so, we let the timer interval grow.
* Otherwise, we reset it back to 0.
*/
- if ( !(wq->wq_flags & WQ_ATIMER_RUNNING))
+ if (!(wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING)) {
wq->wq_timer_interval = 0;
+ }
wq->wq_lflags &= ~WQL_ATIMER_BUSY;
if ((wq->wq_flags & WQ_EXITING) || (wq->wq_lflags & WQL_ATIMER_WAITING)) {
/*
- * wakeup the thread hung up in workqueue_exit or workqueue_add_timer waiting for this timer
+ * wakeup the thread hung up in _workqueue_mark_exiting or workqueue_add_timer waiting for this timer
* to finish getting out of the way
*/
wq->wq_lflags &= ~WQL_ATIMER_WAITING;
wakeup(wq);
}
- PTHREAD_TRACE(TRACE_wq_add_timer | DBG_FUNC_END, wq, start_timer, wq->wq_nthreads, wq->wq_thidlecount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, start_timer, wq->wq_nthreads, wq->wq_thidlecount, 0);
- workqueue_unlock(p);
+ workqueue_unlock(wq);
if (start_timer == TRUE)
workqueue_interval_timer_start(wq);
if ((wq = pthread_kern->proc_get_wqptr(p)) == NULL || wq->wq_reqcount == 0)
return;
-
- workqueue_lock_spin(p);
+
+ workqueue_lock_spin(wq);
if (wq->wq_reqcount) {
uint64_t curtime;
wq->wq_thread_yielded_timestamp = mach_absolute_time();
if (wq->wq_thread_yielded_count < wq_yielded_threshold) {
- workqueue_unlock(p);
+ workqueue_unlock(wq);
return;
}
- PTHREAD_TRACE(TRACE_wq_thread_yielded | DBG_FUNC_START, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_START, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 0, 0);
wq->wq_thread_yielded_count = 0;
* will eventually get used (if it hasn't already)...
*/
if (wq->wq_reqcount == 0) {
- workqueue_unlock(p);
+ workqueue_unlock(wq);
return;
}
}
if (wq->wq_thidlecount) {
- (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_UNCONSTRAINED, 0);
+ (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_UNCONSTRAINED, 0, false);
/*
* workqueue_run_nextreq is responsible for
* dropping the workqueue lock in all cases
*/
- PTHREAD_TRACE(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 1, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 1, 0);
return;
}
}
- PTHREAD_TRACE(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 2, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 2, 0);
}
- workqueue_unlock(p);
+ workqueue_unlock(wq);
}
-
-
static void
workqueue_callback(int type, thread_t thread)
{
if (wq->wq_reqcount) {
/*
- * we have work to do so start up the timer
- * if it's not running... we'll let it sort
- * out whether we really need to start up
- * another thread
+ * We have work to do so start up the timer if it's not
+ * running; it'll sort out whether we need to start another
+ * thread
*/
- WQ_TIMER_NEEDED(wq, start_timer);
+ start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
}
if (start_timer == TRUE) {
workqueue_interval_timer_start(wq);
}
}
- PTHREAD_TRACE1(TRACE_wq_thread_block | DBG_FUNC_START, wq, old_activecount, tl->th_priority, start_timer, thread_tid(thread));
+ PTHREAD_TRACE1_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq, old_activecount, tl->th_priority, start_timer, thread_tid(thread));
break;
}
case SCHED_CALL_UNBLOCK:
* is also held
*/
OSAddAtomic(1, &wq->wq_thactive_count[tl->th_priority]);
-
- PTHREAD_TRACE1(TRACE_wq_thread_block | DBG_FUNC_END, wq, wq->wq_threads_scheduled, tl->th_priority, 0, thread_tid(thread));
-
+
+ PTHREAD_TRACE1_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq, wq->wq_threads_scheduled, tl->th_priority, 0, thread_tid(thread));
+
break;
}
}
#pragma mark thread addition/removal
+static mach_vm_size_t
+_workqueue_allocsize(struct workqueue *wq)
+{
+ proc_t p = wq->wq_proc;
+ mach_vm_size_t guardsize = vm_map_page_size(wq->wq_map);
+ mach_vm_size_t pthread_size =
+ vm_map_round_page_mask(pthread_kern->proc_get_pthsize(p) + PTHREAD_T_OFFSET, vm_map_page_mask(wq->wq_map));
+ return guardsize + PTH_DEFAULT_STACKSIZE + pthread_size;
+}
+
/**
* pop goes the thread
+ *
+ * If fromexit is set, the call is from workqueue_exit(,
+ * so some cleanups are to be avoided.
*/
static void
-workqueue_removethread(struct threadlist *tl, int fromexit)
+workqueue_removethread(struct threadlist *tl, bool fromexit, bool first_use)
{
- struct workqueue *wq;
struct uthread * uth;
+ struct workqueue * wq = tl->th_workq;
- /*
- * If fromexit is set, the call is from workqueue_exit(,
- * so some cleanups are to be avoided.
- */
- wq = tl->th_workq;
-
- TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET){
+ TAILQ_REMOVE(&wq->wq_thidlemgrlist, tl, th_entry);
+ } else {
+ TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
+ }
if (fromexit == 0) {
+ assert(wq->wq_nthreads && wq->wq_thidlecount);
wq->wq_nthreads--;
wq->wq_thidlecount--;
}
/*
- * Clear the threadlist pointer in uthread so
+ * Clear the threadlist pointer in uthread so
* blocked thread on wakeup for termination will
* not access the thread list as it is going to be
* freed.
}
if (fromexit == 0) {
/* during exit the lock is not held */
- workqueue_unlock(wq->wq_proc);
+ workqueue_unlock(wq);
}
- if ( (tl->th_flags & TH_LIST_SUSPENDED) ) {
+ if ( (tl->th_flags & TH_LIST_NEW) || first_use ) {
/*
- * thread was created, but never used...
+ * thread was created, but never used...
* need to clean up the stack and port ourselves
* since we're not going to spin up through the
* normal exit path triggered from Libc
*/
if (fromexit == 0) {
/* vm map is already deallocated when this is called from exit */
- (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, tl->th_allocsize);
+ (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, _workqueue_allocsize(wq));
}
(void)pthread_kern->mach_port_deallocate(pthread_kern->task_get_ipcspace(wq->wq_task), tl->th_thport);
- PTHREAD_TRACE1(TRACE_wq_thread_suspend | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
} else {
- PTHREAD_TRACE1(TRACE_wq_thread_park | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
+ PTHREAD_TRACE1_WQ(TRACE_wq_thread_park | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
}
/*
* drop our ref on the thread
mach_vm_offset_t stackaddr;
if ((wq->wq_flags & WQ_EXITING) == WQ_EXITING) {
- PTHREAD_TRACE(TRACE_wq_thread_add_during_exit | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_add_during_exit | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
return (FALSE);
}
- if (wq->wq_nthreads >= wq_max_threads || wq->wq_nthreads >= (pthread_kern->config_thread_max - 20)) {
- wq->wq_lflags |= WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
-
- PTHREAD_TRACE(TRACE_wq_thread_limit_exceeded | DBG_FUNC_NONE, wq, wq->wq_nthreads, wq_max_threads,
- pthread_kern->config_thread_max - 20, 0);
+ if (wq->wq_nthreads >= wq_max_threads) {
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_limit_exceeded | DBG_FUNC_NONE, wq, wq->wq_nthreads, wq_max_threads, 0, 0);
return (FALSE);
}
- wq->wq_lflags &= ~WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
if (ignore_constrained_thread_limit == FALSE &&
wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
- /*
+ /*
* If we're not creating this thread to service an overcommit or
* event manager request, then we check to see if we are over our
* constrained thread limit, in which case we error out.
*/
- wq->wq_lflags |= WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
-
- PTHREAD_TRACE(TRACE_wq_thread_constrained_maxed | DBG_FUNC_NONE, wq, wq->wq_constrained_threads_scheduled,
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_constrained_maxed | DBG_FUNC_NONE, wq, wq->wq_constrained_threads_scheduled,
wq_max_constrained_threads, 0, 0);
return (FALSE);
}
- if (wq->wq_constrained_threads_scheduled < wq_max_constrained_threads)
- wq->wq_lflags &= ~WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
wq->wq_nthreads++;
p = wq->wq_proc;
- workqueue_unlock(p);
+ workqueue_unlock(wq);
+
+ tl = kalloc(sizeof(struct threadlist));
+ bzero(tl, sizeof(struct threadlist));
- kret = pthread_kern->thread_create_workq(wq->wq_task, (thread_continue_t)wq_unsuspend_continue, &th);
+ kret = pthread_kern->thread_create_workq_waiting(wq->wq_task, wq_unpark_continue, tl, &th);
if (kret != KERN_SUCCESS) {
- PTHREAD_TRACE(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 0, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 0, 0, 0);
+ kfree(tl, sizeof(struct threadlist));
goto failed;
}
- tl = kalloc(sizeof(struct threadlist));
- bzero(tl, sizeof(struct threadlist));
-
- stackaddr = stackaddr_hint(p);
+ stackaddr = pthread_kern->proc_get_stack_addr_hint(p);
mach_vm_size_t guardsize = vm_map_page_size(wq->wq_map);
- mach_vm_size_t pthread_size =
+ mach_vm_size_t pthread_size =
vm_map_round_page_mask(pthread_kern->proc_get_pthsize(p) + PTHREAD_T_OFFSET, vm_map_page_mask(wq->wq_map));
- tl->th_allocsize = guardsize + PTH_DEFAULT_STACKSIZE + pthread_size;
+ mach_vm_size_t th_allocsize = guardsize + PTH_DEFAULT_STACKSIZE + pthread_size;
kret = mach_vm_map(wq->wq_map, &stackaddr,
- tl->th_allocsize,
- page_size-1,
- VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE , NULL,
- 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
- VM_INHERIT_DEFAULT);
+ th_allocsize, page_size-1,
+ VM_MAKE_TAG(VM_MEMORY_STACK)| VM_FLAGS_ANYWHERE, NULL,
+ 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL,
+ VM_INHERIT_DEFAULT);
if (kret != KERN_SUCCESS) {
- PTHREAD_TRACE(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 1, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 1, 0, 0);
kret = mach_vm_allocate(wq->wq_map,
- &stackaddr, tl->th_allocsize,
+ &stackaddr, th_allocsize,
VM_MAKE_TAG(VM_MEMORY_STACK) | VM_FLAGS_ANYWHERE);
}
if (kret == KERN_SUCCESS) {
kret = mach_vm_protect(wq->wq_map, stackaddr, guardsize, FALSE, VM_PROT_NONE);
if (kret != KERN_SUCCESS) {
- (void) mach_vm_deallocate(wq->wq_map, stackaddr, tl->th_allocsize);
- PTHREAD_TRACE(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 2, 0, 0);
+ (void) mach_vm_deallocate(wq->wq_map, stackaddr, th_allocsize);
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 2, 0, 0);
}
}
if (kret != KERN_SUCCESS) {
}
thread_reference(th);
+ pthread_kern->thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
+
sright = (void *)pthread_kern->convert_thread_to_port(th);
tl->th_thport = pthread_kern->ipc_port_copyout_send(sright, pthread_kern->task_get_ipcspace(wq->wq_task));
pthread_kern->thread_static_param(th, TRUE);
- tl->th_flags = TH_LIST_INITED | TH_LIST_SUSPENDED;
+ tl->th_flags = TH_LIST_INITED | TH_LIST_NEW;
tl->th_thread = th;
tl->th_workq = wq;
tl->th_stackaddr = stackaddr;
tl->th_priority = WORKQUEUE_NUM_BUCKETS;
- tl->th_policy = -1;
uth = pthread_kern->get_bsdthread_info(tl->th_thread);
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
pthread_kern->uthread_set_threadlist(uth, tl);
TAILQ_INSERT_TAIL(&wq->wq_thidlelist, tl, th_entry);
wq->wq_thidlecount++;
- PTHREAD_TRACE1(TRACE_wq_thread_suspend | DBG_FUNC_START, wq, wq->wq_nthreads, 0, thread_tid(current_thread()), thread_tid(tl->th_thread));
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
return (TRUE);
failed:
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
wq->wq_nthreads--;
return (FALSE);
uint32_t i;
uint32_t num_cpus;
int error = 0;
- boolean_t need_wakeup = FALSE;
if (pthread_kern->proc_get_register(p) == 0) {
return EINVAL;
wq_max_constrained_threads = limit;
wq_init_constrained_limit = 0;
+
+ if (wq_max_threads > pthread_kern->config_thread_max - 20) {
+ wq_max_threads = pthread_kern->config_thread_max - 20;
+ }
}
- workqueue_lock_spin(p);
if (pthread_kern->proc_get_wqptr(p) == NULL) {
-
- while (*pthread_kern->proc_get_wqinitingptr(p) == TRUE) {
-
- assert_wait((caddr_t)pthread_kern->proc_get_wqinitingptr(p), THREAD_UNINT);
- workqueue_unlock(p);
-
- thread_block(THREAD_CONTINUE_NULL);
-
- workqueue_lock_spin(p);
- }
- if (pthread_kern->proc_get_wqptr(p) != NULL) {
+ if (pthread_kern->proc_init_wqptr_or_wait(p) == FALSE) {
+ assert(pthread_kern->proc_get_wqptr(p) != NULL);
goto out;
}
- *(pthread_kern->proc_get_wqinitingptr(p)) = TRUE;
-
- workqueue_unlock(p);
-
wq_size = sizeof(struct workqueue);
ptr = (char *)kalloc(wq_size);
// though we shouldn't ever read this value for that bucket
wq->wq_reqconc[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
- // Always start the event manager at BACKGROUND
- wq->wq_event_manager_priority = (uint32_t)pthread_qos_class_get_priority(THREAD_QOS_BACKGROUND) | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ // Start the event manager at the priority hinted at by the policy engine
+ int mgr_priority_hint = pthread_kern->task_get_default_manager_qos(current_task());
+ wq->wq_event_manager_priority = (uint32_t)thread_qos_get_pthread_priority(mgr_priority_hint) | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
TAILQ_INIT(&wq->wq_thrunlist);
TAILQ_INIT(&wq->wq_thidlelist);
- wq->wq_atimer_call = thread_call_allocate((thread_call_func_t)workqueue_add_timer, (thread_call_param_t)wq);
+ wq->wq_atimer_delayed_call = thread_call_allocate((thread_call_func_t)workqueue_add_timer, (thread_call_param_t)wq);
+ wq->wq_atimer_immediate_call = thread_call_allocate((thread_call_func_t)workqueue_add_timer, (thread_call_param_t)wq);
- workqueue_lock_spin(p);
+ lck_spin_init(&wq->wq_lock, pthread_lck_grp, pthread_lck_attr);
pthread_kern->proc_set_wqptr(p, wq);
- pthread_kern->proc_set_wqsize(p, wq_size);
- *(pthread_kern->proc_get_wqinitingptr(p)) = FALSE;
- need_wakeup = TRUE;
}
out:
- workqueue_unlock(p);
- if (need_wakeup == TRUE) {
- wakeup(pthread_kern->proc_get_wqinitingptr(p));
- }
return(error);
}
if (wq != NULL) {
- PTHREAD_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
/*
- * we now arm the timer in the callback function w/o holding the workq lock...
- * we do this by setting WQ_ATIMER_RUNNING via OSCompareAndSwap in order to
- * insure only a single timer if running and to notice that WQ_EXITING has
- * been set (we don't want to start a timer once WQ_EXITING is posted)
+ * We arm the add timer without holding the workqueue lock so we need
+ * to synchronize with any running or soon to be running timers.
*
- * so once we have successfully set WQ_EXITING, we cannot fire up a new timer...
- * therefor no need to clear the timer state atomically from the flags
+ * Threads that intend to arm the timer atomically OR
+ * WQ_ATIMER_{DELAYED,IMMEDIATE}_RUNNING into the wq_flags, only if
+ * WQ_EXITING is not present. So, once we have set WQ_EXITING, we can
+ * be sure that no new RUNNING flags will be set, but still need to
+ * wait for the already running timers to complete.
*
- * since we always hold the workq lock when dropping WQ_ATIMER_RUNNING
- * the check for and sleep until clear is protected
+ * We always hold the workq lock when dropping WQ_ATIMER_RUNNING, so
+ * the check for and sleep until clear is protected.
*/
- while (!(OSCompareAndSwap(wq->wq_flags, (wq->wq_flags | WQ_EXITING), (UInt32 *)&wq->wq_flags)));
+ WQ_SETFLAG(wq, WQ_EXITING);
- if (wq->wq_flags & WQ_ATIMER_RUNNING) {
- if (thread_call_cancel(wq->wq_atimer_call) == TRUE) {
- wq->wq_flags &= ~WQ_ATIMER_RUNNING;
+ if (wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING) {
+ if (thread_call_cancel(wq->wq_atimer_delayed_call) == TRUE) {
+ WQ_UNSETFLAG(wq, WQ_ATIMER_DELAYED_RUNNING);
+ }
+ }
+ if (wq->wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) {
+ if (thread_call_cancel(wq->wq_atimer_immediate_call) == TRUE) {
+ WQ_UNSETFLAG(wq, WQ_ATIMER_IMMEDIATE_RUNNING);
}
}
- while ((wq->wq_flags & WQ_ATIMER_RUNNING) || (wq->wq_lflags & WQL_ATIMER_BUSY)) {
+ while (wq->wq_flags & (WQ_ATIMER_DELAYED_RUNNING | WQ_ATIMER_IMMEDIATE_RUNNING) ||
+ (wq->wq_lflags & WQL_ATIMER_BUSY)) {
assert_wait((caddr_t)wq, (THREAD_UNINT));
- workqueue_unlock(p);
+ workqueue_unlock(wq);
thread_block(THREAD_CONTINUE_NULL);
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
}
- workqueue_unlock(p);
+ workqueue_unlock(wq);
PTHREAD_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
}
struct workqueue * wq;
struct threadlist * tl, *tlist;
struct uthread *uth;
- int wq_size = 0;
+ size_t wq_size = sizeof(struct workqueue);
wq = pthread_kern->proc_get_wqptr(p);
if (wq != NULL) {
- PTHREAD_TRACE(TRACE_wq_workqueue_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_workqueue_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
- wq_size = pthread_kern->proc_get_wqsize(p);
pthread_kern->proc_set_wqptr(p, NULL);
- pthread_kern->proc_set_wqsize(p, 0);
/*
* Clean up workqueue data structures for threads that exited and
* didn't get a chance to clean up after themselves.
*/
TAILQ_FOREACH_SAFE(tl, &wq->wq_thrunlist, th_entry, tlist) {
+ assert((tl->th_flags & TH_LIST_RUNNING) != 0);
+
pthread_kern->thread_sched_call(tl->th_thread, NULL);
uth = pthread_kern->get_bsdthread_info(tl->th_thread);
kfree(tl, sizeof(struct threadlist));
}
TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlelist, th_entry, tlist) {
- workqueue_removethread(tl, 1);
+ assert((tl->th_flags & TH_LIST_RUNNING) == 0);
+ assert(tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET);
+ workqueue_removethread(tl, true, false);
+ }
+ TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlemgrlist, th_entry, tlist) {
+ assert((tl->th_flags & TH_LIST_RUNNING) == 0);
+ assert(tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET);
+ workqueue_removethread(tl, true, false);
}
- thread_call_free(wq->wq_atimer_call);
+ thread_call_free(wq->wq_atimer_delayed_call);
+ thread_call_free(wq->wq_atimer_immediate_call);
+ lck_spin_destroy(&wq->wq_lock, pthread_lck_grp);
kfree(wq, wq_size);
*/
static int wqops_queue_reqthreads(struct proc *p, int reqcount, pthread_priority_t priority){
struct workqueue *wq;
+ boolean_t start_timer = FALSE;
boolean_t overcommit = (_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) != 0;
int class = pthread_priority_get_class_index(priority);
return EINVAL;
}
- workqueue_lock_spin(p);
-
+
if ((wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p)) == NULL) {
- workqueue_unlock(p);
-
return EINVAL;
}
+
+ workqueue_lock_spin(wq);
if (overcommit == 0 && event_manager == 0) {
wq->wq_reqcount += reqcount;
wq->wq_requests[class] += reqcount;
- PTHREAD_TRACE(TRACE_wq_req_threads | DBG_FUNC_NONE, wq, priority, wq->wq_requests[class], reqcount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_req_threads | DBG_FUNC_NONE, wq, priority, wq->wq_requests[class], reqcount, 0);
while (wq->wq_reqcount) {
if (!workqueue_run_one(p, wq, overcommit, 0))
break;
}
- } else if (overcommit){
- PTHREAD_TRACE(TRACE_wq_req_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
+ } else if (overcommit) {
+ PTHREAD_TRACE_WQ(TRACE_wq_req_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
while (reqcount) {
if (!workqueue_run_one(p, wq, overcommit, priority))
}
if (reqcount) {
/*
- * we need to delay starting some of the overcommit requests...
- * we should only fail to create the overcommit threads if
- * we're at the max thread limit... as existing threads
- * return to the kernel, we'll notice the ocrequests
- * and spin them back to user space as the overcommit variety
+ * We need to delay starting some of the overcommit requests.
+ * We'll record the request here and as existing threads return to
+ * the kernel, we'll notice the ocrequests and spin them back to
+ * user space as the overcommit variety.
*/
wq->wq_reqcount += reqcount;
wq->wq_requests[class] += reqcount;
wq->wq_ocrequests[class] += reqcount;
- PTHREAD_TRACE(TRACE_wq_delay_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_delay_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
- /* if we delayed this thread coming up but we're not constrained
+ /*
+ * If we delayed this thread coming up but we're not constrained
* or at max threads then we need to start the timer so we don't
* risk dropping this request on the floor.
*/
- if ((wq->wq_lflags & (WQL_EXCEEDED_TOTAL_THREAD_LIMIT | WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT)) == 0) {
- boolean_t start_timer = FALSE;
- WQ_TIMER_NEEDED(wq, start_timer);
-
- if (start_timer) {
- workqueue_interval_timer_start(wq);
- }
+ if ((wq->wq_constrained_threads_scheduled < wq_max_constrained_threads) &&
+ (wq->wq_nthreads < wq_max_threads)){
+ start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
}
}
} else if (event_manager) {
- PTHREAD_TRACE(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, wq->wq_event_manager_priority, wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET], 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, wq->wq_event_manager_priority, wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET], 0);
if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
wq->wq_reqcount += 1;
// We've recorded the request for an event manager thread above. We'll
// let the timer pick it up as we would for a kernel callout. We can
// do a direct add/wakeup when that support is added for the kevent path.
- boolean_t start_timer = FALSE;
- if (wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0)
- WQ_TIMER_NEEDED(wq, start_timer);
- if (start_timer == TRUE)
- workqueue_interval_timer_start(wq);
+ if (wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
+ start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
+ }
}
- workqueue_unlock(p);
+
+ if (start_timer) {
+ workqueue_interval_timer_start(wq);
+ }
+
+ workqueue_unlock(wq);
return 0;
}
-/* Used by the kevent system to request threads. Currently count is ignored
- * and we always return one thread per invocation.
+/*
+ * Used by the kevent system to request threads.
+ *
+ * Currently count is ignored and we always return one thread per invocation.
*/
thread_t _workq_reqthreads(struct proc *p, int requests_count, workq_reqthreads_req_t requests){
- boolean_t start_timer = FALSE;
+ thread_t th = THREAD_NULL;
+ boolean_t do_thread_call = FALSE;
+ boolean_t emergency_thread = FALSE;
assert(requests_count > 0);
#if DEBUG
}
#endif // DEBUG
- int error = 0;
struct workqueue *wq;
-
- workqueue_lock_spin(p);
-
if ((wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p)) == NULL) {
- error = EINVAL;
- goto done;
+ return THREAD_NULL;
}
- PTHREAD_TRACE(TRACE_wq_kevent_req_threads | DBG_FUNC_START, wq, requests_count, 0, 0, 0);
+ workqueue_lock_spin(wq);
+
+ PTHREAD_TRACE_WQ(TRACE_wq_kevent_req_threads | DBG_FUNC_START, wq, requests_count, 0, 0, 0);
// Look for overcommit or event-manager-only requests.
boolean_t have_overcommit = FALSE;
}
if (have_overcommit){
- // I can't make this call, since it's not safe from some contexts yet,
- // so just setup a delayed overcommit and let the timer do the work
- //boolean_t success = workqueue_run_one(p, wq, TRUE, priority);
- if (/* !success */ TRUE){
- int class = pthread_priority_get_class_index(priority);
- wq->wq_reqcount += 1;
- wq->wq_requests[class] += 1;
- wq->wq_kevent_ocrequests[class] += 1;
-
- PTHREAD_TRACE(TRACE_wq_req_kevent_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_kevent_ocrequests[class], 1, 0);
-
- WQ_TIMER_NEEDED(wq, start_timer);
+ if (wq->wq_thidlecount){
+ th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_OVERCOMMIT_KEVENT, priority, true);
+ if (th != THREAD_NULL){
+ goto out;
+ } else {
+ workqueue_lock_spin(wq); // reacquire lock
+ }
}
- goto done;
+
+ int class = pthread_priority_get_class_index(priority);
+ wq->wq_reqcount += 1;
+ wq->wq_requests[class] += 1;
+ wq->wq_kevent_ocrequests[class] += 1;
+
+ do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
+ goto deferred;
}
// Having no overcommit requests, try to find any request that can start
for (int i = 0; i < requests_count; i++){
workq_reqthreads_req_t req = requests + i;
priority = req->priority;
+ int class = pthread_priority_get_class_index(priority);
if (req->count == 0)
continue;
- int class = pthread_priority_get_class_index(priority);
-
- // Ask if we can start a new thread at the given class. Pass NUM_BUCKETS as
- // my class to indicate we won't reuse this thread
- if (may_start_constrained_thread(wq, class, WORKQUEUE_NUM_BUCKETS, NULL)){
- wq->wq_reqcount += 1;
- wq->wq_requests[class] += 1;
- wq->wq_kevent_requests[class] += 1;
-
- PTHREAD_TRACE(TRACE_wq_req_kevent_threads | DBG_FUNC_NONE, wq, priority, wq->wq_kevent_requests[class], 1, 0);
+ if (!may_start_constrained_thread(wq, class, WORKQUEUE_NUM_BUCKETS, NULL))
+ continue;
- // I can't make this call because it's not yet safe to make from
- // scheduler callout context, so instead we'll just start up the timer
- // which will spin up the thread when it files.
- // workqueue_run_one(p, wq, FALSE, priority);
+ wq->wq_reqcount += 1;
+ wq->wq_requests[class] += 1;
+ wq->wq_kevent_requests[class] += 1;
- WQ_TIMER_NEEDED(wq, start_timer);
+ PTHREAD_TRACE_WQ(TRACE_wq_req_kevent_threads | DBG_FUNC_NONE, wq, priority, wq->wq_kevent_requests[class], 1, 0);
- goto done;
+ if (wq->wq_thidlecount){
+ th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT_KEVENT, priority, true);
+ goto out;
+ } else {
+ do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
+ goto deferred;
}
}
// Okay, here's the fun case: we can't spin up any of the non-overcommit threads
// that we've seen a request for, so we kick this over to the event manager thread
+ emergency_thread = TRUE;
event_manager:
- PTHREAD_TRACE(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, wq->wq_event_manager_priority, wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET], 0);
-
if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
wq->wq_reqcount += 1;
wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
+ PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, 0, wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], 1, 0);
+ } else {
+ PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, 0, wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], 0, 0);
}
wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
- if (wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0)
- WQ_TIMER_NEEDED(wq, start_timer);
+ if (wq->wq_thidlecount && wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
+ th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_EVENT_MANAGER, 0, true);
+ assert(th != THREAD_NULL);
+ goto out;
+ }
+ do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
-done:
- workqueue_unlock(p);
+deferred:
+ workqueue_unlock(wq);
- if (start_timer == TRUE)
- workqueue_interval_timer_start(wq);
+ if (do_thread_call == TRUE){
+ workqueue_interval_timer_trigger(wq);
+ }
- PTHREAD_TRACE(TRACE_wq_kevent_req_threads | DBG_FUNC_END, wq, start_timer, 0, 0, 0);
+out:
+ PTHREAD_TRACE_WQ(TRACE_wq_kevent_req_threads | DBG_FUNC_END, wq, do_thread_call, 0, 0, 0);
- return THREAD_NULL;
+ return emergency_thread ? (void*)-1 : th;
}
static int wqops_thread_return(struct proc *p){
thread_t th = current_thread();
struct uthread *uth = pthread_kern->get_bsdthread_info(th);
- struct threadlist *tl = util_get_thread_threadlist_entry(th);
-
+ struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
+
/* reset signal mask on the workqueue thread to default state */
if (pthread_kern->uthread_get_sigmask(uth) != (sigset_t)(~workq_threadmask)) {
pthread_kern->proc_lock(p);
pthread_kern->uthread_set_sigmask(uth, ~workq_threadmask);
pthread_kern->proc_unlock(p);
}
-
- /* dropping WQ override counts has to be done outside the wq lock. */
- wq_thread_override_reset(th, THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD);
-
- workqueue_lock_spin(p);
struct workqueue *wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
if (wq == NULL || !tl) {
- workqueue_unlock(p);
-
return EINVAL;
}
- PTHREAD_TRACE(TRACE_wq_runitem | DBG_FUNC_END, wq, 0, 0, 0, 0);
- (void)workqueue_run_nextreq(p, wq, th, RUN_NEXTREQ_DEFAULT, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_START, tl->th_workq, 0, 0, 0, 0);
+
+ /*
+ * This squash call has neat semantics: it removes the specified overrides,
+ * replacing the current requested QoS with the previous effective QoS from
+ * those overrides. This means we won't be preempted due to having our QoS
+ * lowered. Of course, now our understanding of the thread's QoS is wrong,
+ * so we'll adjust below.
+ */
+ int new_qos =
+ pthread_kern->proc_usynch_thread_qos_squash_override_for_resource(th,
+ THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD,
+ THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
+
+ workqueue_lock_spin(wq);
+
+ if (tl->th_flags & TH_LIST_KEVENT_BOUND) {
+ unsigned int flags = KEVENT_FLAG_WORKQ;
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+ flags |= KEVENT_FLAG_WORKQ_MANAGER;
+ }
+
+ workqueue_unlock(wq);
+ kevent_qos_internal_unbind(p, class_index_get_thread_qos(tl->th_priority), th, flags);
+ workqueue_lock_spin(wq);
+
+ tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
+ }
+
+ /* Fix up counters from the squash operation. */
+ uint8_t old_bucket = tl->th_priority;
+ uint8_t new_bucket = thread_qos_get_class_index(new_qos);
+
+ if (old_bucket != new_bucket) {
+ OSAddAtomic(-1, &wq->wq_thactive_count[old_bucket]);
+ OSAddAtomic(1, &wq->wq_thactive_count[new_bucket]);
+
+ wq->wq_thscheduled_count[old_bucket]--;
+ wq->wq_thscheduled_count[new_bucket]++;
+
+ tl->th_priority = new_bucket;
+ }
+
+ PTHREAD_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_END, tl->th_workq, new_qos, 0, 0, 0);
+
+ PTHREAD_TRACE_WQ(TRACE_wq_runitem | DBG_FUNC_END, wq, 0, 0, 0, 0);
+
+ (void)workqueue_run_nextreq(p, wq, th, RUN_NEXTREQ_DEFAULT, 0, false);
/*
* workqueue_run_nextreq is responsible for
* dropping the workqueue lock in all cases
int
_workq_kernreturn(struct proc *p,
int options,
- __unused user_addr_t item,
+ user_addr_t item,
int arg2,
int arg3,
int32_t *retval)
*/
pthread_priority_t pri = arg2;
- workqueue_lock_spin(p);
struct workqueue *wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
- if (wq == NULL ) {
- workqueue_unlock(p);
+ if (wq == NULL) {
error = EINVAL;
break;
}
+ workqueue_lock_spin(wq);
if (pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG){
// If userspace passes a scheduling priority, that takes precidence
// over any QoS. (So, userspace should take care not to accidenatally
| _PTHREAD_PRIORITY_SCHED_PRI_FLAG | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
}
} else if ((wq->wq_event_manager_priority & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) == 0){
- int cur_qos = pthread_priority_get_qos_class(wq->wq_event_manager_priority);
- int new_qos = pthread_priority_get_qos_class(pri);
- wq->wq_event_manager_priority = (uint32_t)pthread_qos_class_get_priority(MAX(cur_qos, new_qos)) | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ int cur_qos = pthread_priority_get_thread_qos(wq->wq_event_manager_priority);
+ int new_qos = pthread_priority_get_thread_qos(pri);
+ wq->wq_event_manager_priority = (uint32_t)thread_qos_get_pthread_priority(MAX(cur_qos, new_qos)) | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
}
- workqueue_unlock(p);
+ workqueue_unlock(wq);
break;
}
- case WQOPS_THREAD_KEVENT_RETURN: {
- int32_t kevent_retval;
- int ret = kevent_qos_internal(p, -1, item, arg2, item, arg2, NULL, NULL, KEVENT_FLAG_WORKQ | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS, &kevent_retval);
- // We shouldn't be getting more errors out than events we put in, so
- // reusing the input buffer should always provide enough space
- assert(ret == KERN_SUCCESS && kevent_retval >= 0);
- if (ret != KERN_SUCCESS){
- error = ret;
- break;
- } else if (kevent_retval > 0){
- assert(kevent_retval <= arg2);
- *retval = kevent_retval;
- error = 0;
- break;
+ case WQOPS_THREAD_KEVENT_RETURN:
+ if (item != 0) {
+ int32_t kevent_retval;
+ int ret = kevent_qos_internal(p, -1, item, arg2, item, arg2, NULL, NULL, KEVENT_FLAG_WORKQ | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS, &kevent_retval);
+ // We shouldn't be getting more errors out than events we put in, so
+ // reusing the input buffer should always provide enough space. But,
+ // the assert is commented out since we get errors in edge cases in the
+ // process lifecycle.
+ //assert(ret == KERN_SUCCESS && kevent_retval >= 0);
+ if (ret != KERN_SUCCESS){
+ error = ret;
+ break;
+ } else if (kevent_retval > 0){
+ assert(kevent_retval <= arg2);
+ *retval = kevent_retval;
+ error = 0;
+ break;
+ }
}
- } /* FALLTHROUGH */
- case WQOPS_THREAD_RETURN: {
+ // FALLTHRU
+ case WQOPS_THREAD_RETURN:
error = wqops_thread_return(p);
// NOT REACHED except in case of error
assert(error);
break;
- }
default:
error = EINVAL;
break;
return (FALSE);
}
}
- ran_one = workqueue_run_nextreq(p, wq, THREAD_NULL, overcommit ? RUN_NEXTREQ_OVERCOMMIT : RUN_NEXTREQ_DEFAULT, priority);
+ ran_one = (workqueue_run_nextreq(p, wq, THREAD_NULL, overcommit ? RUN_NEXTREQ_OVERCOMMIT : RUN_NEXTREQ_DEFAULT, priority, false) != THREAD_NULL);
/*
* workqueue_run_nextreq is responsible for
* dropping the workqueue lock in all cases
*/
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
return (ran_one);
}
/*
- * this is a workqueue thread with no more
- * work to do... park it for now
+ * We have no work to do, park ourselves on the idle list.
+ *
+ * Consumes the workqueue lock and does not return.
*/
-static void
+static void __dead2
parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
{
- uint32_t us_to_wait;
-
+ assert(thread == tl->th_thread);
+ assert(thread == current_thread());
+
+ uint32_t us_to_wait = 0;
+
TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
- tl->th_flags &= ~TH_LIST_RUNNING;
- tl->th_flags |= TH_LIST_BLOCKED;
- TAILQ_INSERT_HEAD(&wq->wq_thidlelist, tl, th_entry);
+ tl->th_flags &= ~TH_LIST_RUNNING;
+ tl->th_flags &= ~TH_LIST_KEVENT;
+ assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
- pthread_kern->thread_sched_call(thread, NULL);
+ if (tl->th_flags & TH_LIST_CONSTRAINED) {
+ wq->wq_constrained_threads_scheduled--;
+ tl->th_flags &= ~TH_LIST_CONSTRAINED;
+ }
OSAddAtomic(-1, &wq->wq_thactive_count[tl->th_priority]);
wq->wq_thscheduled_count[tl->th_priority]--;
wq->wq_threads_scheduled--;
+ uint32_t thidlecount = ++wq->wq_thidlecount;
- if (tl->th_flags & TH_LIST_CONSTRAINED) {
- wq->wq_constrained_threads_scheduled--;
- wq->wq_lflags &= ~WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
- tl->th_flags &= ~TH_LIST_CONSTRAINED;
+ pthread_kern->thread_sched_call(thread, NULL);
+
+ /*
+ * We'd like to always have one manager thread parked so that we can have
+ * low latency when we need to bring a manager thread up. If that idle
+ * thread list is empty, make this thread a manager thread.
+ *
+ * XXX: This doesn't check that there's not a manager thread outstanding,
+ * so it's based on the assumption that most manager callouts will change
+ * their QoS before parking. If that stops being true, this may end up
+ * costing us more than we gain.
+ */
+ if (TAILQ_EMPTY(&wq->wq_thidlemgrlist) &&
+ tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET){
+ reset_priority(tl, pthread_priority_from_wq_class_index(wq, WORKQUEUE_EVENT_MANAGER_BUCKET));
+ tl->th_priority = WORKQUEUE_EVENT_MANAGER_BUCKET;
}
- if (wq->wq_thidlecount < 100)
- us_to_wait = wq_reduce_pool_window_usecs - (wq->wq_thidlecount * (wq_reduce_pool_window_usecs / 100));
- else
- us_to_wait = wq_reduce_pool_window_usecs / 100;
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET){
+ TAILQ_INSERT_HEAD(&wq->wq_thidlemgrlist, tl, th_entry);
+ } else {
+ TAILQ_INSERT_HEAD(&wq->wq_thidlelist, tl, th_entry);
+ }
- wq->wq_thidlecount++;
- wq->wq_lflags &= ~WQL_EXCEEDED_TOTAL_THREAD_LIMIT;
+ PTHREAD_TRACE_WQ(TRACE_wq_thread_park | DBG_FUNC_START, wq,
+ wq->wq_threads_scheduled, wq->wq_thidlecount, us_to_wait, 0);
- assert_wait_timeout_with_leeway((caddr_t)tl, (THREAD_INTERRUPTIBLE),
- TIMEOUT_URGENCY_SYS_BACKGROUND|TIMEOUT_URGENCY_LEEWAY, us_to_wait,
- wq_reduce_pool_window_usecs, NSEC_PER_USEC);
-
- PTHREAD_TRACE1(TRACE_wq_thread_park | DBG_FUNC_START, wq, wq->wq_threads_scheduled, wq->wq_thidlecount, us_to_wait, thread_tid(thread));
+ /*
+ * When we remove the voucher from the thread, we may lose our importance
+ * causing us to get preempted, so we do this after putting the thread on
+ * the idle list. That when, when we get our importance back we'll be able
+ * to use this thread from e.g. the kevent call out to deliver a boosting
+ * message.
+ */
+ workqueue_unlock(wq);
+ kern_return_t kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
+ assert(kr == KERN_SUCCESS);
+ workqueue_lock_spin(wq);
+
+ if ((tl->th_flags & TH_LIST_RUNNING) == 0) {
+ if (thidlecount < 101) {
+ us_to_wait = wq_reduce_pool_window_usecs - ((thidlecount-2) * (wq_reduce_pool_window_usecs / 100));
+ } else {
+ us_to_wait = wq_reduce_pool_window_usecs / 100;
+ }
+
+ assert_wait_timeout_with_leeway((caddr_t)tl, (THREAD_INTERRUPTIBLE),
+ TIMEOUT_URGENCY_SYS_BACKGROUND|TIMEOUT_URGENCY_LEEWAY, us_to_wait,
+ wq_reduce_pool_window_usecs/10, NSEC_PER_USEC);
+
+ workqueue_unlock(wq);
+
+ thread_block(wq_unpark_continue);
+ panic("thread_block(wq_unpark_continue) returned!");
+ } else {
+ workqueue_unlock(wq);
+
+ /*
+ * While we'd dropped the lock to unset our voucher, someone came
+ * around and made us runnable. But because we weren't waiting on the
+ * event their wakeup() was ineffectual. To correct for that, we just
+ * run the continuation ourselves.
+ */
+ wq_unpark_continue(NULL, THREAD_AWAKENED);
+ }
}
static boolean_t may_start_constrained_thread(struct workqueue *wq, uint32_t at_priclass, uint32_t my_priclass, boolean_t *start_timer){
// Has our most recently blocked thread blocked recently enough that we
// should still consider it busy?
- // XXX should this be wq->wq_thscheduled_count[at_priclass] > thactive_count ?
- if (wq->wq_thscheduled_count[at_priclass]) {
+ if (wq->wq_thscheduled_count[at_priclass] > wq->wq_thactive_count[at_priclass]) {
if (wq_thread_is_busy(mach_absolute_time(), &wq->wq_lastblocked_ts[at_priclass])) {
busycount++;
}
if (my_priclass < WORKQUEUE_NUM_BUCKETS && my_priclass == at_priclass){
/*
- * dont't count this thread as currently active
+ * don't count this thread as currently active
*/
thactive_count--;
}
* to kick off the timer... we need to
* start it now...
*/
- WQ_TIMER_NEEDED(wq, *start_timer);
+ *start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
}
- PTHREAD_TRACE(TRACE_wq_overcommitted|DBG_FUNC_NONE, wq, (start_timer ? 1<<7 : 0) | pthread_priority_from_class_index(at_priclass), thactive_count, busycount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_overcommitted|DBG_FUNC_NONE, wq, ((start_timer && *start_timer) ? 1 << _PTHREAD_PRIORITY_FLAGS_SHIFT : 0) | class_index_get_pthread_priority(at_priclass), thactive_count, busycount, 0);
return FALSE;
}
return TRUE;
}
-static struct threadlist *pop_from_thidlelist(struct workqueue *wq, uint32_t priclass, int *upcall_flags, int *wake_thread){
- struct threadlist *tl = TAILQ_FIRST(&wq->wq_thidlelist);
- TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
- wq->wq_thidlecount--;
-
- TAILQ_INSERT_TAIL(&wq->wq_thrunlist, tl, th_entry);
+static struct threadlist *
+pop_from_thidlelist(struct workqueue *wq, uint32_t priclass)
+{
+ assert(wq->wq_thidlecount);
- if ((tl->th_flags & TH_LIST_SUSPENDED) == TH_LIST_SUSPENDED) {
- tl->th_flags &= ~TH_LIST_SUSPENDED;
- *upcall_flags &= ~WQ_FLAG_THREAD_REUSE;
+ struct threadlist *tl = NULL;
- } else if ((tl->th_flags & TH_LIST_BLOCKED) == TH_LIST_BLOCKED) {
- tl->th_flags &= ~TH_LIST_BLOCKED;
- *wake_thread = 1;
+ if (!TAILQ_EMPTY(&wq->wq_thidlemgrlist) &&
+ (priclass == WORKQUEUE_EVENT_MANAGER_BUCKET || TAILQ_EMPTY(&wq->wq_thidlelist))){
+ tl = TAILQ_FIRST(&wq->wq_thidlemgrlist);
+ TAILQ_REMOVE(&wq->wq_thidlemgrlist, tl, th_entry);
+ assert(tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET);
+ } else if (!TAILQ_EMPTY(&wq->wq_thidlelist) &&
+ (priclass != WORKQUEUE_EVENT_MANAGER_BUCKET || TAILQ_EMPTY(&wq->wq_thidlemgrlist))){
+ tl = TAILQ_FIRST(&wq->wq_thidlelist);
+ TAILQ_REMOVE(&wq->wq_thidlelist, tl, th_entry);
+ assert(tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET);
+ } else {
+ panic("pop_from_thidlelist called with no threads available");
}
+ assert((tl->th_flags & TH_LIST_RUNNING) == 0);
+
+ assert(wq->wq_thidlecount);
+ wq->wq_thidlecount--;
+
+ TAILQ_INSERT_TAIL(&wq->wq_thrunlist, tl, th_entry);
+
tl->th_flags |= TH_LIST_RUNNING | TH_LIST_BUSY;
wq->wq_threads_scheduled++;
return tl;
}
+static pthread_priority_t
+pthread_priority_from_wq_class_index(struct workqueue *wq, int index){
+ if (index == WORKQUEUE_EVENT_MANAGER_BUCKET){
+ return wq->wq_event_manager_priority;
+ } else {
+ return class_index_get_pthread_priority(index);
+ }
+}
+
static void
-reset_to_priority(struct threadlist *tl, pthread_priority_t pri){
+reset_priority(struct threadlist *tl, pthread_priority_t pri){
kern_return_t ret;
thread_t th = tl->th_thread;
- if (tl->th_flags & TH_LIST_EVENT_MGR_SCHED_PRI){
- thread_precedence_policy_data_t precedinfo = {
- .importance = 0
- };
- ret = pthread_kern->thread_policy_set_internal(th, THREAD_PRECEDENCE_POLICY, (thread_policy_t)&precedinfo, THREAD_PRECEDENCE_POLICY_COUNT);
+ if ((pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) == 0){
+ ret = pthread_kern->thread_set_workq_qos(th, pthread_priority_get_thread_qos(pri), 0);
assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
- tl->th_flags &= ~TH_LIST_EVENT_MGR_SCHED_PRI;
- }
- thread_qos_policy_data_t qosinfo = {
- .qos_tier = pthread_priority_get_qos_class(pri),
- .tier_importance = 0
- };
- ret = pthread_kern->thread_policy_set_internal(th, THREAD_QOS_POLICY, (thread_policy_t)&qosinfo, THREAD_QOS_POLICY_COUNT);
- assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
-}
+ if (tl->th_flags & TH_LIST_EVENT_MGR_SCHED_PRI) {
-static void
-reset_to_schedpri(struct threadlist *tl, pthread_priority_t pri){
- kern_return_t ret;
- thread_t th = tl->th_thread;
+ /* Reset priority to default (masked by QoS) */
- thread_qos_policy_data_t qosinfo = {
- .qos_tier = THREAD_QOS_UNSPECIFIED,
- .tier_importance = 0
- };
- ret = pthread_kern->thread_policy_set_internal(th, THREAD_QOS_POLICY, (thread_policy_t)&qosinfo, THREAD_QOS_POLICY_COUNT);
- assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
+ ret = pthread_kern->thread_set_workq_pri(th, 31, POLICY_TIMESHARE);
+ assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
- thread_precedence_policy_data_t precedinfo = {
- .importance = ((pri & (~_PTHREAD_PRIORITY_FLAGS_MASK)) - BASEPRI_DEFAULT)
- };
- ret = pthread_kern->thread_policy_set_internal(th, THREAD_PRECEDENCE_POLICY, (thread_policy_t)&precedinfo, THREAD_PRECEDENCE_POLICY_COUNT);
- assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
+ tl->th_flags &= ~TH_LIST_EVENT_MGR_SCHED_PRI;
+ }
+ } else {
+ ret = pthread_kern->thread_set_workq_qos(th, THREAD_QOS_UNSPECIFIED, 0);
+ assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
+ ret = pthread_kern->thread_set_workq_pri(th, (pri & (~_PTHREAD_PRIORITY_FLAGS_MASK)), POLICY_TIMESHARE);
+ assert(ret == KERN_SUCCESS || ret == KERN_TERMINATED);
- tl->th_flags |= TH_LIST_EVENT_MGR_SCHED_PRI;
+ tl->th_flags |= TH_LIST_EVENT_MGR_SCHED_PRI;
+ }
}
/**
* - if provided mode is for overcommit, doesn't consume a reqcount
*
*/
-static boolean_t
+static thread_t
workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t thread,
- enum run_nextreq_mode mode, pthread_priority_t oc_prio)
+ enum run_nextreq_mode mode, pthread_priority_t prio,
+ bool kevent_bind_via_return)
{
thread_t th_to_run = THREAD_NULL;
- int wake_thread = 0;
- int upcall_flags = WQ_FLAG_THREAD_REUSE;
+ uint32_t upcall_flags = 0;
uint32_t priclass;
struct threadlist *tl = NULL;
struct uthread *uth = NULL;
boolean_t start_timer = FALSE;
+ if (mode == RUN_NEXTREQ_ADD_TIMER) {
+ mode = RUN_NEXTREQ_DEFAULT;
+ }
+
// valid modes to call this function with
- assert(mode == RUN_NEXTREQ_DEFAULT || mode == RUN_NEXTREQ_OVERCOMMIT || mode == RUN_NEXTREQ_UNCONSTRAINED);
- // may only have a priority if in OVERCOMMIT mode
- assert(mode == RUN_NEXTREQ_OVERCOMMIT || oc_prio == 0);
+ assert(mode == RUN_NEXTREQ_DEFAULT || mode == RUN_NEXTREQ_DEFAULT_KEVENT ||
+ mode == RUN_NEXTREQ_OVERCOMMIT || mode == RUN_NEXTREQ_UNCONSTRAINED ||
+ mode == RUN_NEXTREQ_EVENT_MANAGER || mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT);
+ // may only have a priority if in OVERCOMMIT or DEFAULT_KEVENT mode
+ assert(mode == RUN_NEXTREQ_OVERCOMMIT || mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT ||
+ mode == RUN_NEXTREQ_DEFAULT_KEVENT || prio == 0);
// thread == thread_null means "please spin up a new workqueue thread, we can't reuse this"
// thread != thread_null is thread reuse, and must be the current thread
assert(thread == THREAD_NULL || thread == current_thread());
- PTHREAD_TRACE(TRACE_wq_run_nextitem|DBG_FUNC_START, wq, thread, wq->wq_thidlecount, wq->wq_reqcount, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_START, wq, thread_tid(thread), wq->wq_thidlecount, wq->wq_reqcount, 0);
if (thread != THREAD_NULL) {
uth = pthread_kern->get_bsdthread_info(thread);
}
/*
- * from here until we drop the workq lock
- * we can't be pre-empted since we hold
- * the lock in spin mode... this is important
- * since we have to independently update the priority that
- * the thread is associated with and the priorty based
- * counters that "workqueue_callback" also changes and bases
- * decisons on.
+ * from here until we drop the workq lock we can't be pre-empted since we
+ * hold the lock in spin mode... this is important since we have to
+ * independently update the priority that the thread is associated with and
+ * the priorty based counters that "workqueue_callback" also changes and
+ * bases decisions on.
*/
+ /*
+ * This giant monstrosity does three things:
+ *
+ * - adjusts the mode, if required
+ * - selects the priclass that we'll be servicing
+ * - sets any mode-specific upcall flags
+ *
+ * When possible special-cases should be handled here and converted into
+ * non-special cases.
+ */
if (mode == RUN_NEXTREQ_OVERCOMMIT) {
- priclass = pthread_priority_get_class_index(oc_prio);
+ priclass = pthread_priority_get_class_index(prio);
upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
+ } else if (mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT){
+ priclass = pthread_priority_get_class_index(prio);
+ upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ } else if (mode == RUN_NEXTREQ_EVENT_MANAGER){
+ assert(wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0);
+ priclass = WORKQUEUE_EVENT_MANAGER_BUCKET;
+ upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
+ if (wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET]){
+ upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ }
} else if (wq->wq_reqcount == 0){
// no work to do. we'll check again when new work arrives.
goto done;
+ } else if (mode == RUN_NEXTREQ_DEFAULT_KEVENT) {
+ assert(kevent_bind_via_return);
+
+ priclass = pthread_priority_get_class_index(prio);
+ assert(priclass < WORKQUEUE_EVENT_MANAGER_BUCKET);
+ assert(wq->wq_kevent_requests[priclass] > 0);
+
+ upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ mode = RUN_NEXTREQ_DEFAULT;
} else if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] &&
((wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0) ||
(thread != THREAD_NULL && tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET))){
mode = RUN_NEXTREQ_EVENT_MANAGER;
priclass = WORKQUEUE_EVENT_MANAGER_BUCKET;
upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
- if (wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET])
+ if (wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET]){
upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+ }
} else {
// Find highest priority and check for special request types
for (priclass = 0; priclass < WORKQUEUE_EVENT_MANAGER_BUCKET; priclass++) {
}
}
+ assert(mode != RUN_NEXTREQ_EVENT_MANAGER || priclass == WORKQUEUE_EVENT_MANAGER_BUCKET);
+ assert(mode == RUN_NEXTREQ_EVENT_MANAGER || priclass != WORKQUEUE_EVENT_MANAGER_BUCKET);
+
if (mode == RUN_NEXTREQ_DEFAULT /* non-overcommit */){
uint32_t my_priclass = (thread != THREAD_NULL) ? tl->th_priority : WORKQUEUE_NUM_BUCKETS;
if (may_start_constrained_thread(wq, priclass, my_priclass, &start_timer) == FALSE){
* we pick up new work for this specific thread.
*/
th_to_run = thread;
+ upcall_flags |= WQ_FLAG_THREAD_REUSE;
} else if (wq->wq_thidlecount == 0) {
/*
* we have no additional threads waiting to pick up
* work, however, there is additional work to do.
*/
- WQ_TIMER_NEEDED(wq, start_timer);
+ start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
- PTHREAD_TRACE(TRACE_wq_stalled, wq, wq->wq_nthreads, start_timer, 0, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_stalled, wq, wq->wq_nthreads, start_timer, 0, 0);
goto done;
} else {
- // there is both work available and an idle thread, so activate a thread
- tl = pop_from_thidlelist(wq, priclass, &upcall_flags, &wake_thread);
- th_to_run = tl->th_thread;
+ // there is both work available and an idle thread, so activate a thread
+ tl = pop_from_thidlelist(wq, priclass);
+ th_to_run = tl->th_thread;
}
// Adjust counters and thread flags AKA consume the request
// TODO: It would be lovely if OVERCOMMIT consumed reqcount
switch (mode) {
case RUN_NEXTREQ_DEFAULT:
+ case RUN_NEXTREQ_DEFAULT_KEVENT: /* actually mapped to DEFAULT above */
+ case RUN_NEXTREQ_ADD_TIMER: /* actually mapped to DEFAULT above */
case RUN_NEXTREQ_UNCONSTRAINED:
wq->wq_reqcount--;
wq->wq_requests[priclass]--;
}
} else if (mode == RUN_NEXTREQ_UNCONSTRAINED){
if (tl->th_flags & TH_LIST_CONSTRAINED) {
- // XXX: Why aren't we unsetting CONSTRAINED_THREAD_LIMIT here
wq->wq_constrained_threads_scheduled--;
tl->th_flags &= ~TH_LIST_CONSTRAINED;
}
}
/* FALLTHROUGH */
case RUN_NEXTREQ_OVERCOMMIT:
+ case RUN_NEXTREQ_OVERCOMMIT_KEVENT:
if (tl->th_flags & TH_LIST_CONSTRAINED) {
wq->wq_constrained_threads_scheduled--;
tl->th_flags &= ~TH_LIST_CONSTRAINED;
wq->wq_kevent_ocrequests[priclass] <=
wq->wq_requests[priclass]);
+ assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
+ if (upcall_flags & WQ_FLAG_THREAD_KEVENT) {
+ tl->th_flags |= TH_LIST_KEVENT;
+ } else {
+ tl->th_flags &= ~TH_LIST_KEVENT;
+ }
+
uint32_t orig_class = tl->th_priority;
tl->th_priority = (uint8_t)priclass;
}
wq->wq_thread_yielded_count = 0;
- workqueue_unlock(p);
+ pthread_priority_t outgoing_priority = pthread_priority_from_wq_class_index(wq, tl->th_priority);
+ PTHREAD_TRACE_WQ(TRACE_wq_reset_priority | DBG_FUNC_START, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
+ reset_priority(tl, outgoing_priority);
+ PTHREAD_TRACE_WQ(TRACE_wq_reset_priority | DBG_FUNC_END, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
- pthread_priority_t outgoing_priority;
- if (mode == RUN_NEXTREQ_EVENT_MANAGER){
- outgoing_priority = wq->wq_event_manager_priority;
- } else {
- outgoing_priority = pthread_priority_from_class_index(priclass);
- }
-
- PTHREAD_TRACE(TRACE_wq_reset_priority | DBG_FUNC_START, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
- if (outgoing_priority & _PTHREAD_PRIORITY_SCHED_PRI_FLAG){
- reset_to_schedpri(tl, outgoing_priority & (~_PTHREAD_PRIORITY_FLAGS_MASK));
- } else if (orig_class != priclass) {
- reset_to_priority(tl, outgoing_priority);
- }
- PTHREAD_TRACE(TRACE_wq_reset_priority | DBG_FUNC_END, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
+ /*
+ * persist upcall_flags so that in can be retrieved in setup_wqthread
+ */
+ tl->th_upcall_flags = upcall_flags >> WQ_FLAG_THREAD_PRIOSHIFT;
/*
* if current thread is reused for work request, does not return via unix_syscall
*/
- wq_runreq(p, outgoing_priority, th_to_run, tl, upcall_flags, wake_thread, (thread == th_to_run));
-
- PTHREAD_TRACE(TRACE_wq_run_nextitem|DBG_FUNC_END, wq, thread_tid(th_to_run), mode == RUN_NEXTREQ_OVERCOMMIT, 1, 0);
+ wq_runreq(p, th_to_run, wq, tl, (thread == th_to_run),
+ (upcall_flags & WQ_FLAG_THREAD_KEVENT) && !kevent_bind_via_return);
- return (TRUE);
+ PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_END, wq, thread_tid(th_to_run), mode == RUN_NEXTREQ_OVERCOMMIT, 1, 0);
-done:
- if (thread != THREAD_NULL){
- parkit(wq,tl,thread);
+ assert(!kevent_bind_via_return || (upcall_flags & WQ_FLAG_THREAD_KEVENT));
+ if (kevent_bind_via_return && (upcall_flags & WQ_FLAG_THREAD_KEVENT)) {
+ tl->th_flags |= TH_LIST_KEVENT_BOUND;
}
- workqueue_unlock(p);
+ workqueue_unlock(wq);
+
+ return th_to_run;
+done:
if (start_timer)
workqueue_interval_timer_start(wq);
- PTHREAD_TRACE(TRACE_wq_run_nextitem | DBG_FUNC_END, wq, thread_tid(thread), start_timer, 3, 0);
+ PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem | DBG_FUNC_END, wq, thread_tid(thread), start_timer, 3, 0);
if (thread != THREAD_NULL){
- thread_block((thread_continue_t)wq_unpark_continue);
+ parkit(wq, tl, thread);
/* NOT REACHED */
}
- return (FALSE);
+ workqueue_unlock(wq);
+
+ return THREAD_NULL;
}
/**
- * Called when a new thread is created
+ * parked thread wakes up
*/
-static void
-wq_unsuspend_continue(void)
+static void __dead2
+wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
{
- struct uthread *uth = NULL;
- thread_t th_to_unsuspend;
- struct threadlist *tl;
- proc_t p;
+ boolean_t first_use = false;
+ thread_t th = current_thread();
+ proc_t p = current_proc();
- th_to_unsuspend = current_thread();
- uth = pthread_kern->get_bsdthread_info(th_to_unsuspend);
+ struct uthread *uth = pthread_kern->get_bsdthread_info(th);
+ if (uth == NULL) goto done;
- if (uth != NULL && (tl = pthread_kern->uthread_get_threadlist(uth)) != NULL) {
-
- if ((tl->th_flags & (TH_LIST_RUNNING | TH_LIST_BUSY)) == TH_LIST_RUNNING) {
- /*
- * most likely a normal resume of this thread occurred...
- * it's also possible that the thread was aborted after we
- * finished setting it up so that it could be dispatched... if
- * so, thread_bootstrap_return will notice the abort and put
- * the thread on the path to self-destruction
- */
-normal_resume_to_user:
- pthread_kern->thread_sched_call(th_to_unsuspend, workqueue_callback);
- pthread_kern->thread_bootstrap_return();
- }
- /*
- * if we get here, it's because we've been resumed due to
- * an abort of this thread (process is crashing)
- */
- p = current_proc();
+ struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
+ if (tl == NULL) goto done;
- workqueue_lock_spin(p);
+ struct workqueue *wq = tl->th_workq;
- if (tl->th_flags & TH_LIST_SUSPENDED) {
- /*
- * thread has been aborted while still on our idle
- * queue... remove it from our domain...
- * workqueue_removethread consumes the lock
- */
- workqueue_removethread(tl, 0);
- pthread_kern->thread_bootstrap_return();
- }
- while ((tl->th_flags & TH_LIST_BUSY)) {
- /*
- * this thread was aborted after we started making
- * it runnable, but before we finished dispatching it...
- * we need to wait for that process to finish,
- * and we need to ask for a wakeup instead of a
- * thread_resume since the abort has already resumed us
- */
- tl->th_flags |= TH_LIST_NEED_WAKEUP;
+ workqueue_lock_spin(wq);
- assert_wait((caddr_t)tl, (THREAD_UNINT));
+ assert(tl->th_flags & TH_LIST_INITED);
- workqueue_unlock(p);
- thread_block(THREAD_CONTINUE_NULL);
- workqueue_lock_spin(p);
- }
- workqueue_unlock(p);
+ if ((tl->th_flags & TH_LIST_NEW)){
+ tl->th_flags &= ~(TH_LIST_NEW);
+ first_use = true;
+ }
+
+ if ((tl->th_flags & (TH_LIST_RUNNING | TH_LIST_BUSY)) == TH_LIST_RUNNING) {
/*
- * we have finished setting up the thread's context...
- * thread_bootstrap_return will take us through the abort path
- * where the thread will self destruct
+ * The normal wakeup path.
*/
- goto normal_resume_to_user;
+ goto return_to_user;
}
- pthread_kern->thread_bootstrap_return();
-}
-/**
- * parked thread wakes up
- */
-static void
-wq_unpark_continue(void)
-{
- struct uthread *uth;
- struct threadlist *tl;
+ if ((tl->th_flags & TH_LIST_RUNNING) == 0 &&
+ wait_result == THREAD_TIMED_OUT &&
+ tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET &&
+ TAILQ_FIRST(&wq->wq_thidlemgrlist) == tl &&
+ TAILQ_NEXT(tl, th_entry) == NULL){
+ /*
+ * If we are the only idle manager and we pop'ed for self-destruction,
+ * then don't actually exit. Instead, free our stack to save some
+ * memory and re-park.
+ */
- thread_t th_to_unpark = current_thread();
+ workqueue_unlock(wq);
- if ((uth = pthread_kern->get_bsdthread_info(th_to_unpark)) == NULL)
- goto done;
- if ((tl = pthread_kern->uthread_get_threadlist(uth)) == NULL)
- goto done;
+ vm_map_t vmap = wq->wq_map;
- /*
- * check if a normal wakeup of this thread occurred... if so, there's no need
- * for any synchronization with the timer and wq_runreq so we just skip all this.
- */
- if ((tl->th_flags & (TH_LIST_RUNNING | TH_LIST_BUSY)) != TH_LIST_RUNNING) {
- proc_t p = current_proc();
+ // Keep this in sync with _setup_wqthread()
+ const vm_size_t guardsize = vm_map_page_size(vmap);
+ const user_addr_t freeaddr = (user_addr_t)tl->th_stackaddr + guardsize;
+ const vm_map_offset_t freesize = vm_map_trunc_page_mask((PTH_DEFAULT_STACKSIZE + guardsize + PTHREAD_T_OFFSET) - 1, vm_map_page_mask(vmap)) - guardsize;
+
+ int kr;
+ kr = mach_vm_behavior_set(vmap, freeaddr, freesize, VM_BEHAVIOR_REUSABLE);
+ assert(kr == KERN_SUCCESS || kr == KERN_INVALID_ADDRESS);
- workqueue_lock_spin(p);
+ workqueue_lock_spin(wq);
if ( !(tl->th_flags & TH_LIST_RUNNING)) {
- /*
- * the timer popped us out and we've not
- * been moved off of the idle list
- * so we should now self-destruct
- *
- * workqueue_removethread consumes the lock
- */
- workqueue_removethread(tl, 0);
- pthread_kern->unix_syscall_return(0);
+ assert_wait((caddr_t)tl, (THREAD_INTERRUPTIBLE));
+
+ workqueue_unlock(wq);
+
+ thread_block(wq_unpark_continue);
+ /* NOT REACHED */
}
+ }
+ if ((tl->th_flags & TH_LIST_RUNNING) == 0) {
+ assert((tl->th_flags & TH_LIST_BUSY) == 0);
/*
- * the timer woke us up, but we have already
- * started to make this a runnable thread,
- * but have not yet finished that process...
- * so wait for the normal wakeup
+ * We were set running, but not for the purposes of actually running.
+ * This could be because the timer elapsed. Or it could be because the
+ * thread aborted. Either way, we need to return to userspace to exit.
+ *
+ * The call to workqueue_removethread will consume the lock.
*/
- while ((tl->th_flags & TH_LIST_BUSY)) {
-
- assert_wait((caddr_t)tl, (THREAD_UNINT));
- workqueue_unlock(p);
+ if (!first_use &&
+ tl->th_priority != qos_class_get_class_index(WQ_THREAD_CLEANUP_QOS)) {
+ // Reset the QoS to something low for the pthread cleanup
+ pthread_priority_t cleanup_pri = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
+ reset_priority(tl, cleanup_pri);
+ }
- thread_block(THREAD_CONTINUE_NULL);
+ workqueue_removethread(tl, 0, first_use);
- workqueue_lock_spin(p);
+ if (first_use){
+ pthread_kern->thread_bootstrap_return();
+ } else {
+ pthread_kern->unix_syscall_return(0);
}
-
- /*
- * we have finished setting up the thread's context
- * now we can return as if we got a normal wakeup
- */
- workqueue_unlock(p);
+ /* NOT REACHED */
}
- pthread_kern->thread_sched_call(th_to_unpark, workqueue_callback);
-
- // FIXME: What's this?
- PTHREAD_TRACE(0xefffd018 | DBG_FUNC_END, tl->th_workq, 0, 0, 0, 0);
+ /*
+ * The timer woke us up or the thread was aborted. However, we have
+ * already started to make this a runnable thread. Wait for that to
+ * finish, then continue to userspace.
+ */
+ while ((tl->th_flags & TH_LIST_BUSY)) {
+ assert_wait((caddr_t)tl, (THREAD_UNINT));
-done:
+ workqueue_unlock(wq);
- // XXX should be using unix_syscall_return(EJUSTRETURN)
- pthread_kern->thread_exception_return();
-}
+ thread_block(THREAD_CONTINUE_NULL);
+ workqueue_lock_spin(wq);
+ }
+return_to_user:
+ workqueue_unlock(wq);
+ _setup_wqthread(p, th, wq, tl, first_use);
+ pthread_kern->thread_sched_call(th, workqueue_callback);
+done:
+ if (first_use){
+ pthread_kern->thread_bootstrap_return();
+ } else {
+ pthread_kern->unix_syscall_return(EJUSTRETURN);
+ }
+ panic("Our attempt to return to userspace failed...");
+}
-static void
-wq_runreq(proc_t p, pthread_priority_t priority, thread_t th, struct threadlist *tl,
- int flags, int wake_thread, int return_directly)
+/* called with workqueue lock held */
+static void
+wq_runreq(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
+ boolean_t return_directly, boolean_t needs_kevent_bind)
{
- int ret = 0;
- boolean_t need_resume = FALSE;
-
- PTHREAD_TRACE1(TRACE_wq_runitem | DBG_FUNC_START, tl->th_workq, flags, priority, thread_tid(current_thread()), thread_tid(th));
+ PTHREAD_TRACE1_WQ(TRACE_wq_runitem | DBG_FUNC_START, tl->th_workq, 0, 0, thread_tid(current_thread()), thread_tid(th));
- ret = _setup_wqthread(p, th, priority, flags, tl);
-
- if (ret != 0)
- panic("setup_wqthread failed %x\n", ret);
+ unsigned int kevent_flags = KEVENT_FLAG_WORKQ;
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+ kevent_flags |= KEVENT_FLAG_WORKQ_MANAGER;
+ }
if (return_directly) {
- PTHREAD_TRACE(TRACE_wq_run_nextitem|DBG_FUNC_END, tl->th_workq, 0, 0, 4, 0);
+ if (needs_kevent_bind) {
+ assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
+ tl->th_flags |= TH_LIST_KEVENT_BOUND;
+ }
- // XXX should be using unix_syscall_return(EJUSTRETURN)
- pthread_kern->thread_exception_return();
- panic("wq_runreq: thread_exception_return returned ...\n");
- }
- if (wake_thread) {
- workqueue_lock_spin(p);
-
- tl->th_flags &= ~TH_LIST_BUSY;
- wakeup(tl);
+ workqueue_unlock(wq);
- workqueue_unlock(p);
- } else {
- PTHREAD_TRACE1(TRACE_wq_thread_suspend | DBG_FUNC_END, tl->th_workq, 0, 0, thread_tid(current_thread()), thread_tid(th));
+ if (needs_kevent_bind) {
+ kevent_qos_internal_bind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
+ }
+
+ /*
+ * For preemption reasons, we want to reset the voucher as late as
+ * possible, so we do it in two places:
+ * - Just before parking (i.e. in parkit())
+ * - Prior to doing the setup for the next workitem (i.e. here)
+ *
+ * Those two places are sufficient to ensure we always reset it before
+ * it goes back out to user space, but be careful to not break that
+ * guarantee.
+ */
+ kern_return_t kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
+ assert(kr == KERN_SUCCESS);
- workqueue_lock_spin(p);
+ _setup_wqthread(p, th, wq, tl, false);
- if (tl->th_flags & TH_LIST_NEED_WAKEUP) {
- wakeup(tl);
- } else {
- need_resume = TRUE;
- }
+ PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_END, tl->th_workq, 0, 0, 4, 0);
- tl->th_flags &= ~(TH_LIST_BUSY | TH_LIST_NEED_WAKEUP);
-
- workqueue_unlock(p);
+ pthread_kern->unix_syscall_return(EJUSTRETURN);
+ /* NOT REACHED */
+ }
- if (need_resume) {
- /*
- * need to do this outside of the workqueue spin lock
- * since thread_resume locks the thread via a full mutex
- */
- pthread_kern->thread_resume(th);
- }
+ if (needs_kevent_bind) {
+ // Leave TH_LIST_BUSY set so that the thread can't beat us to calling kevent
+ workqueue_unlock(wq);
+ assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
+ kevent_qos_internal_bind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
+ tl->th_flags |= TH_LIST_KEVENT_BOUND;
+ workqueue_lock_spin(wq);
}
+ tl->th_flags &= ~(TH_LIST_BUSY);
+ thread_wakeup_thread(tl,th);
}
-#define KEVENT_LIST_LEN 16
+#define KEVENT_LIST_LEN 16 // WORKQ_KEVENT_EVENT_BUFFER_LEN
#define KEVENT_DATA_SIZE (32 * 1024)
/**
* configures initial thread stack/registers to jump into:
- * _pthread_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *keventlist, int flags, int nkevents);
+ * _pthread_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *keventlist, int upcall_flags, int nkevents);
* to get there we jump through assembily stubs in pthread_asm.s. Those
* routines setup a stack frame, using the current stack pointer, and marshall
* arguments from registers to the stack as required by the ABI.
* |guard page | guardsize
* |-----------| th_stackaddr
*/
-int
-_setup_wqthread(proc_t p, thread_t th, pthread_priority_t priority, int flags, struct threadlist *tl)
+void
+_setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
+ bool first_use)
{
- int error = 0;
+ int error;
+ uint32_t upcall_flags;
+
+ pthread_priority_t priority = pthread_priority_from_wq_class_index(wq, tl->th_priority);
const vm_size_t guardsize = vm_map_page_size(tl->th_workq->wq_map);
const vm_size_t stack_gap_min = (proc_is64bit(p) == 0) ? C_32_STK_ALIGN : C_64_REDZONE_LEN;
user_addr_t stack_top_addr = (user_addr_t)((pthread_self_addr - stack_gap_min) & -stack_align_min);
user_addr_t stack_bottom_addr = (user_addr_t)(tl->th_stackaddr + guardsize);
+ user_addr_t wqstart_fnptr = pthread_kern->proc_get_wqthread(p);
+ if (!wqstart_fnptr) {
+ panic("workqueue thread start function pointer is NULL");
+ }
+
/* Put the QoS class value into the lower bits of the reuse_thread register, this is where
* the thread priority used to be stored anyway.
*/
- flags |= (_pthread_priority_get_qos_newest(priority) & WQ_FLAG_THREAD_PRIOMASK);
+ upcall_flags = tl->th_upcall_flags << WQ_FLAG_THREAD_PRIOSHIFT;
+ upcall_flags |= (_pthread_priority_get_qos_newest(priority) & WQ_FLAG_THREAD_PRIOMASK);
- flags |= WQ_FLAG_THREAD_NEWSPI;
+ upcall_flags |= WQ_FLAG_THREAD_NEWSPI;
+
+ uint32_t tsd_offset = pthread_kern->proc_get_pthread_tsd_offset(p);
+ if (tsd_offset) {
+ mach_vm_offset_t th_tsd_base = (mach_vm_offset_t)pthread_self_addr + tsd_offset;
+ kern_return_t kret = pthread_kern->thread_set_tsd_base(th, th_tsd_base);
+ if (kret == KERN_SUCCESS) {
+ upcall_flags |= WQ_FLAG_THREAD_TSD_BASE_SET;
+ }
+ }
+
+ if (first_use) {
+ /*
+ * Pre-fault the first page of the new thread's stack and the page that will
+ * contain the pthread_t structure.
+ */
+ vm_map_t vmap = pthread_kern->current_map();
+ if (vm_map_trunc_page_mask((vm_map_offset_t)(stack_top_addr - C_64_REDZONE_LEN), vm_map_page_mask(vmap)) !=
+ vm_map_trunc_page_mask((vm_map_offset_t)pthread_self_addr, vm_map_page_mask(vmap))){
+ vm_fault( vmap,
+ vm_map_trunc_page_mask((vm_map_offset_t)(stack_top_addr - C_64_REDZONE_LEN), vm_map_page_mask(vmap)),
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ }
+ vm_fault( vmap,
+ vm_map_trunc_page_mask((vm_map_offset_t)pthread_self_addr, vm_map_page_mask(vmap)),
+ VM_PROT_READ | VM_PROT_WRITE,
+ FALSE,
+ THREAD_UNINT, NULL, 0);
+ } else {
+ upcall_flags |= WQ_FLAG_THREAD_REUSE;
+ }
user_addr_t kevent_list = NULL;
int kevent_count = 0;
- if (flags & WQ_FLAG_THREAD_KEVENT){
+ if (upcall_flags & WQ_FLAG_THREAD_KEVENT){
kevent_list = pthread_self_addr - KEVENT_LIST_LEN * sizeof(struct kevent_qos_s);
kevent_count = KEVENT_LIST_LEN;
int32_t events_out = 0;
- int ret = kevent_qos_internal(p, -1, NULL, 0, kevent_list, kevent_count,
+ assert(tl->th_flags | TH_LIST_KEVENT_BOUND);
+ unsigned int flags = KEVENT_FLAG_WORKQ | KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE;
+ if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+ flags |= KEVENT_FLAG_WORKQ_MANAGER;
+ }
+ int ret = kevent_qos_internal(p, class_index_get_thread_qos(tl->th_priority), NULL, 0, kevent_list, kevent_count,
kevent_data_buf, &kevent_data_available,
- KEVENT_FLAG_WORKQ | KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_STACK_EVENTS | KEVENT_FLAG_IMMEDIATE,
- &events_out);
+ flags, &events_out);
- // squash any errors into just empty output on non-debug builds
- assert(ret == KERN_SUCCESS && events_out != -1);
+ // turns out there are a lot of edge cases where this will fail, so not enabled by default
+ //assert((ret == KERN_SUCCESS && events_out != -1) || ret == KERN_ABORTED);
+
+ // squash any errors into just empty output on
if (ret != KERN_SUCCESS || events_out == -1){
events_out = 0;
kevent_data_available = KEVENT_DATA_SIZE;
// We shouldn't get data out if there aren't events available
assert(events_out != 0 || kevent_data_available == KEVENT_DATA_SIZE);
- if (events_out >= 0){
- kevent_count = events_out;
- kevent_list = pthread_self_addr - kevent_count * sizeof(struct kevent_qos_s);
-
+ if (events_out > 0){
if (kevent_data_available == KEVENT_DATA_SIZE){
stack_top_addr = (kevent_list - stack_gap_min) & -stack_align_min;
} else {
stack_top_addr = (kevent_data_buf + kevent_data_available - stack_gap_min) & -stack_align_min;
}
+
+ kevent_count = events_out;
} else {
kevent_list = NULL;
kevent_count = 0;
}
#if defined(__i386__) || defined(__x86_64__)
- int isLP64 = proc_is64bit(p);
-
- if (isLP64 == 0) {
+ if (proc_is64bit(p) == 0) {
x86_thread_state32_t state = {
- .eip = (unsigned int)pthread_kern->proc_get_wqthread(p),
+ .eip = (unsigned int)wqstart_fnptr,
.eax = /* arg0 */ (unsigned int)pthread_self_addr,
.ebx = /* arg1 */ (unsigned int)tl->th_thport,
.ecx = /* arg2 */ (unsigned int)stack_bottom_addr,
.edx = /* arg3 */ (unsigned int)kevent_list,
- .edi = /* arg4 */ (unsigned int)flags,
+ .edi = /* arg4 */ (unsigned int)upcall_flags,
.esi = /* arg5 */ (unsigned int)kevent_count,
.esp = (int)((vm_offset_t)stack_top_addr),
};
- (void)pthread_kern->thread_set_wq_state32(th, (thread_state_t)&state);
+ error = pthread_kern->thread_set_wq_state32(th, (thread_state_t)&state);
+ if (error != KERN_SUCCESS) {
+ panic(__func__ ": thread_set_wq_state failed: %d", error);
+ }
} else {
x86_thread_state64_t state64 = {
// x86-64 already passes all the arguments in registers, so we just put them in their final place here
- .rip = (uint64_t)pthread_kern->proc_get_wqthread(p),
+ .rip = (uint64_t)wqstart_fnptr,
.rdi = (uint64_t)pthread_self_addr,
.rsi = (uint64_t)tl->th_thport,
.rdx = (uint64_t)stack_bottom_addr,
.rcx = (uint64_t)kevent_list,
- .r8 = (uint64_t)flags,
+ .r8 = (uint64_t)upcall_flags,
.r9 = (uint64_t)kevent_count,
.rsp = (uint64_t)(stack_top_addr)
error = pthread_kern->thread_set_wq_state64(th, (thread_state_t)&state64);
if (error != KERN_SUCCESS) {
- error = EINVAL;
+ panic(__func__ ": thread_set_wq_state failed: %d", error);
}
}
#else
#error setup_wqthread not defined for this architecture
#endif
-
- return error;
}
#if DEBUG
int activecount;
uint32_t pri;
- workqueue_lock_spin(p);
if ((wq = pthread_kern->proc_get_wqptr(p)) == NULL) {
- error = EINVAL;
- goto out;
+ return EINVAL;
}
+
+ workqueue_lock_spin(wq);
activecount = 0;
for (pri = 0; pri < WORKQUEUE_NUM_BUCKETS; pri++) {
pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
pwqinfo->pwq_state = 0;
- if (wq->wq_lflags & WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT) {
+ if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
}
- if (wq->wq_lflags & WQL_EXCEEDED_TOTAL_THREAD_LIMIT) {
+ if (wq->wq_nthreads >= wq_max_threads) {
pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
}
-out:
- workqueue_unlock(p);
+ workqueue_unlock(wq);
return(error);
}
+uint32_t
+_get_pwq_state_kdp(proc_t p)
+{
+ if (p == NULL) {
+ return 0;
+ }
+
+ struct workqueue *wq = pthread_kern->proc_get_wqptr(p);
+
+ if (wq == NULL || workqueue_lock_spin_is_acquired_kdp(wq)) {
+ return 0;
+ }
+
+ uint32_t pwq_state = WQ_FLAGS_AVAILABLE;
+
+ if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
+ pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
+ }
+
+ if (wq->wq_nthreads >= wq_max_threads) {
+ pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
+ }
+
+ return pwq_state;
+}
+
int
_thread_selfid(__unused struct proc *p, uint64_t *retval)
{
*/
pthread_lck_attr = lck_attr_alloc_init();
- _workqueue_init_lock((proc_t)get_bsdtask_info(kernel_task));
pthread_list_mlock = lck_mtx_alloc_init(pthread_lck_grp, pthread_lck_attr);
pth_global_hashinit();
#endif
#if KERNEL
+#include <vm/vm_kern.h>
+
extern uint32_t pthread_debug_tracing;
+static __unused void*
+VM_UNSLIDE(void* ptr)
+{
+ vm_offset_t unslid_ptr;
+ vm_kernel_unslide_or_perm_external(ptr, &unslid_ptr);
+ return (void*)unslid_ptr;
+}
+
# define PTHREAD_TRACE(x,a,b,c,d,e) \
{ if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e); } }
{ if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT1(x, a, b, c, d, e); } }
#endif
+# define PTHREAD_TRACE_WQ(x,a,b,c,d,e) \
+ { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, VM_UNSLIDE(a), b, c, d, e); } }
+
+# define PTHREAD_TRACE1_WQ(x,a,b,c,d,e) \
+ { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT1(x, VM_UNSLIDE(a), b, c, d, e); } }
+
# define TRACE_CODE(name, subclass, code) \
static const int TRACE_##name = KDBG_CODE(DBG_PTHREAD, subclass, code)
TRACE_CODE(wq_thread_constrained_maxed, _TRACE_SUB_WORKQUEUE, 0x1b);
TRACE_CODE(wq_thread_add_during_exit, _TRACE_SUB_WORKQUEUE, 0x1c);
TRACE_CODE(wq_thread_create_failed, _TRACE_SUB_WORKQUEUE, 0x1d);
+TRACE_CODE(wq_manager_request, _TRACE_SUB_WORKQUEUE, 0x1e);
+TRACE_CODE(wq_thread_create, _TRACE_SUB_WORKQUEUE, 0x1f);
// synch trace points
TRACE_CODE(psynch_mutex_ulock, _TRACE_SUB_MUTEX, 0x0);
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
- <key>OSBundleRequired</key>
- <string>Root</string>
<key>AppleKernelExternalComponent</key>
<true/>
- <key>OSBundleAllowUserLoad</key>
- <true/>
- <key>OSBundleCompatibleVersion</key>
- <string>1.0</string>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<key>CFBundleIconFile</key>
<string></string>
<key>CFBundleIdentifier</key>
- <string>${MODULE_NAME}</string>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>1</string>
<key>NSHumanReadableCopyright</key>
<string>Copyright © 2012 Apple Inc. All rights reserved.</string>
+ <key>OSBundleAllowUserLoad</key>
+ <true/>
+ <key>OSBundleCompatibleVersion</key>
+ <string>1.0</string>
<key>OSBundleLibraries</key>
<dict>
<key>com.apple.kpi.bsd</key>
<key>com.apple.kpi.unsupported</key>
<string>11.2</string>
</dict>
+ <key>OSBundleRequired</key>
+ <string>Root</string>
</dict>
</plist>
#define WQOPS_THREAD_KEVENT_RETURN 0x40 /* parks the thread after delivering the passed kevent array */
#define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80 /* max() in the provided priority in the the priority of the event manager */
-/* flag values for reuse field in the libc side _pthread_wqthread */
-#define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff
-#define WQ_FLAG_THREAD_PRIOSHIFT (8ull)
-#define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */
-#define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */
-#define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */
-#define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */
-#define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */
+/* flag values for upcall flags field, only 8 bits per struct threadlist */
+#define WQ_FLAG_THREAD_PRIOMASK 0x0000ffff
+#define WQ_FLAG_THREAD_PRIOSHIFT 16
+#define WQ_FLAG_THREAD_OVERCOMMIT 0x00010000 /* thread is with overcommit prio */
+#define WQ_FLAG_THREAD_REUSE 0x00020000 /* thread is being reused */
+#define WQ_FLAG_THREAD_NEWSPI 0x00040000 /* the call is with new SPIs */
+#define WQ_FLAG_THREAD_KEVENT 0x00080000 /* thread is response to kevent req */
+#define WQ_FLAG_THREAD_EVENT_MANAGER 0x00100000 /* event manager thread */
+#define WQ_FLAG_THREAD_TSD_BASE_SET 0x00200000 /* tsd base has already been set */
+
+#define WQ_THREAD_CLEANUP_QOS QOS_CLASS_UTILITY
/* These definitions are only available to the kext, to avoid bleeding constants and types across the boundary to
* the userspace library.
#define WORKQUEUE_OVERCOMMIT 0x10000
+/*
+ * A thread which is scheduled may read its own th_priority field without
+ * taking the workqueue lock. Other fields should be assumed to require the
+ * lock.
+ */
struct threadlist {
TAILQ_ENTRY(threadlist) th_entry;
thread_t th_thread;
- int th_flags;
- uint8_t th_priority;
- uint8_t th_policy;
struct workqueue *th_workq;
- mach_vm_size_t th_stacksize;
- mach_vm_size_t th_allocsize;
mach_vm_offset_t th_stackaddr;
mach_port_name_t th_thport;
+ uint16_t th_flags;
+ uint8_t th_upcall_flags;
+ uint8_t th_priority;
};
-#define TH_LIST_INITED 0x01
-#define TH_LIST_RUNNING 0x02
-#define TH_LIST_BLOCKED 0x04
-#define TH_LIST_SUSPENDED 0x08
-#define TH_LIST_BUSY 0x10
-#define TH_LIST_NEED_WAKEUP 0x20
-#define TH_LIST_CONSTRAINED 0x40
-#define TH_LIST_EVENT_MGR_SCHED_PRI 0x80
+#define TH_LIST_INITED 0x01 /* Set at thread creation. */
+#define TH_LIST_RUNNING 0x02 /* On thrunlist, not parked. */
+#define TH_LIST_KEVENT 0x04 /* Thread requested by kevent */
+#define TH_LIST_NEW 0x08 /* First return to userspace */
+#define TH_LIST_BUSY 0x10 /* Removed from idle list but not ready yet. */
+#define TH_LIST_KEVENT_BOUND 0x20 /* Thread bound to kqueues */
+#define TH_LIST_CONSTRAINED 0x40 /* Non-overcommit thread. */
+#define TH_LIST_EVENT_MGR_SCHED_PRI 0x80 /* Non-QoS Event Manager */
struct workqueue {
proc_t wq_proc;
vm_map_t wq_map;
task_t wq_task;
- thread_call_t wq_atimer_call;
- int wq_flags; // updated atomically
- int wq_lflags; // protected by wqueue lock
- uint64_t wq_thread_yielded_timestamp;
+
+ _Atomic uint32_t wq_flags; // updated atomically
+ uint32_t wq_lflags; // protected by wqueue lock
+
+ lck_spin_t wq_lock;
+ boolean_t wq_interrupt_state;
+
+ thread_call_t wq_atimer_delayed_call;
+ thread_call_t wq_atimer_immediate_call;
+
+ uint64_t wq_thread_yielded_timestamp;
uint32_t wq_thread_yielded_count;
uint32_t wq_timer_interval;
uint32_t wq_max_concurrency;
uint32_t wq_constrained_threads_scheduled;
uint32_t wq_nthreads;
uint32_t wq_thidlecount;
+
TAILQ_HEAD(, threadlist) wq_thrunlist;
TAILQ_HEAD(, threadlist) wq_thidlelist;
+ TAILQ_HEAD(, threadlist) wq_thidlemgrlist;
/* Counters for how many requests we have outstanding. The invariants here:
* - reqcount == SUM(requests) + (event manager ? 1 : 0)
* - SUM(ocrequests) + SUM(kevent_requests) + SUM(kevent_ocrequests) <= SUM(requests)
* - # of constrained requests is difference between quantities above
- * i.e. a kevent+overcommit request will incrument reqcount, requests and
+ * i.e. a kevent+overcommit request will increment reqcount, requests and
* kevent_ocrequests only.
*/
uint32_t wq_reqcount;
uint32_t wq_thactive_count[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(4))); /* must be uint32_t since we OSAddAtomic on these */
uint64_t wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(8))); /* XXX: why per bucket? */
- uint32_t wq_event_manager_priority;
+ uint32_t wq_event_manager_priority;
};
#define WQ_LIST_INITED 0x01
-#define WQ_ATIMER_RUNNING 0x02
-#define WQ_EXITING 0x04
+#define WQ_EXITING 0x02
+#define WQ_ATIMER_DELAYED_RUNNING 0x04
+#define WQ_ATIMER_IMMEDIATE_RUNNING 0x08
+
+#define WQ_SETFLAG(wq, flag) __c11_atomic_fetch_or(&wq->wq_flags, flag, __ATOMIC_SEQ_CST)
+#define WQ_UNSETFLAG(wq, flag) __c11_atomic_fetch_and(&wq->wq_flags, ~flag, __ATOMIC_SEQ_CST)
#define WQL_ATIMER_BUSY 0x01
#define WQL_ATIMER_WAITING 0x02
-#define WQL_EXCEEDED_CONSTRAINED_THREAD_LIMIT 0x04
-#define WQL_EXCEEDED_TOTAL_THREAD_LIMIT 0x08
-
-#define WQ_VECT_SET_BIT(vector, bit) \
- vector[(bit) / 32] |= (1 << ((bit) % 32))
-
-#define WQ_VECT_CLEAR_BIT(vector, bit) \
- vector[(bit) / 32] &= ~(1 << ((bit) % 32))
-
-#define WQ_VECT_TEST_BIT(vector, bit) \
- vector[(bit) / 32] & (1 << ((bit) % 32))
#define WORKQUEUE_MAXTHREADS 512
#define WQ_YIELDED_THRESHOLD 2000
objects = {
/* Begin PBXAggregateTarget section */
+ 92799B441B96A5FD00861404 /* Tests */ = {
+ isa = PBXAggregateTarget;
+ buildConfigurationList = 92799B451B96A5FE00861404 /* Build configuration list for PBXAggregateTarget "Tests" */;
+ buildPhases = (
+ );
+ dependencies = (
+ 925383BB1BD01EED00F745DB /* PBXTargetDependency */,
+ );
+ name = Tests;
+ productName = Tests;
+ };
C90E7AAC15DC3D3300A06D48 /* All */ = {
isa = PBXAggregateTarget;
buildConfigurationList = C90E7AAD15DC3D3300A06D48 /* Build configuration list for PBXAggregateTarget "All" */;
dependencies = (
6E8C16821B14F11800C8987C /* PBXTargetDependency */,
C90E7AB015DC3D3D00A06D48 /* PBXTargetDependency */,
+ C04545BC1C58510F006A53B3 /* PBXTargetDependency */,
C90E7AB215DC3D3D00A06D48 /* PBXTargetDependency */,
);
name = All;
dependencies = (
6E8C16841B14F11B00C8987C /* PBXTargetDependency */,
C98832C615DEB44B00B3308E /* PBXTargetDependency */,
+ C04545BE1C585487006A53B3 /* PBXTargetDependency */,
C98832C815DEB44B00B3308E /* PBXTargetDependency */,
74E594AB1613AD7F006C417B /* PBXTargetDependency */,
C91D01BC162CA80F0002E29A /* PBXTargetDependency */,
6E8C166F1B14F08A00C8987C /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4657D4017284F7B007D1847 /* introspection_private.h */; };
6E8C16701B14F08A00C8987C /* tsd_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F415B7513200270056 /* tsd_private.h */; };
6E8C16711B14F08A00C8987C /* posix_sched.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F015B7513200270056 /* posix_sched.h */; };
- 6E8C16721B14F08A00C8987C /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E473BE1719AC305A009C5A52 /* atomic_llsc.h */; };
6E8C16731B14F08A00C8987C /* qos_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C99B17DA189C2E1B00991D38 /* qos_private.h */; };
- 6E8C16741B14F08A00C8987C /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = E473BE1819AC305A009C5A52 /* atomic.h */; };
6E8C16751B14F08A00C8987C /* spawn.h in Headers */ = {isa = PBXBuildFile; fileRef = C98C95D818FF1F4E005654FB /* spawn.h */; };
6E8C16761B14F08A00C8987C /* spinlock_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F715B7513200270056 /* spinlock_private.h */; };
6E8C16771B14F08A00C8987C /* workqueue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F915B7513200270056 /* workqueue_private.h */; };
74E5949C1613AAF4006C417B /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
74E5949E1613AAF4006C417B /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
74E594A61613AB10006C417B /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
+ 9202B2311D1A5B3F00945880 /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; };
+ 9202B2321D1A5B6200945880 /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; };
+ 924D8EDF1C11833D002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+ 924D8EE01C11833D002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+ 924D8EE11C11833E002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+ 924D8EE21C11833E002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+ C04545A41C584F4A006A53B3 /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
+ C04545A51C584F4A006A53B3 /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
+ C04545A61C584F4A006A53B3 /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
+ C04545A71C584F4A006A53B3 /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
+ C04545A81C584F4A006A53B3 /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
+ C04545A91C584F4A006A53B3 /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
+ C04545AA1C584F4A006A53B3 /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
+ C04545AB1C584F4A006A53B3 /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
+ C04545AC1C584F4A006A53B3 /* pthread_support.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DC15C9D16B0098ECD8 /* pthread_support.c */; };
+ C04545AD1C584F4A006A53B3 /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
+ C04545AE1C584F4A006A53B3 /* thread_setup.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FC15B7513200270056 /* thread_setup.c */; };
+ C04545AF1C584F4A006A53B3 /* qos.c in Sources */ = {isa = PBXBuildFile; fileRef = C9244C1C1860D8EF00075748 /* qos.c */; };
+ C04545B01C584F4A006A53B3 /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+ C04545B11C584F4A006A53B3 /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
+ C04545B21C584F4A006A53B3 /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
C90E7AA415DC3C9D00A06D48 /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
C90E7AA515DC3C9D00A06D48 /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
C90E7AA615DC3C9D00A06D48 /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
C9D75E4216127B3900C2FB26 /* kern_synch.c in Sources */ = {isa = PBXBuildFile; fileRef = C9169DDB1603DE84005A2F8C /* kern_synch.c */; };
E4063CF31906B75A000202F9 /* qos.h in Headers */ = {isa = PBXBuildFile; fileRef = E4063CF21906B4FB000202F9 /* qos.h */; settings = {ATTRIBUTES = (Private, ); }; };
E4657D4117284F7B007D1847 /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4657D4017284F7B007D1847 /* introspection_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
- E473BE1919AC305A009C5A52 /* atomic_llsc.h in Headers */ = {isa = PBXBuildFile; fileRef = E473BE1719AC305A009C5A52 /* atomic_llsc.h */; };
- E473BE1A19AC305A009C5A52 /* atomic.h in Headers */ = {isa = PBXBuildFile; fileRef = E473BE1819AC305A009C5A52 /* atomic.h */; };
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
remoteGlobalIDString = 74E594911613AAF4006C417B;
remoteInfo = "libpthread.a eOS";
};
+ 925383BA1BD01EED00F745DB /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = C9A325D915B7347000270056 /* Project object */;
+ proxyType = 1;
+ remoteGlobalIDString = 92B275F01BCE4C5E007D06D7;
+ remoteInfo = darwintests;
+ };
+ C04545BB1C58510F006A53B3 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = C9A325D915B7347000270056 /* Project object */;
+ proxyType = 1;
+ remoteGlobalIDString = C04545A21C584F4A006A53B3;
+ remoteInfo = "libpthread.a generic";
+ };
+ C04545BD1C585487006A53B3 /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = C9A325D915B7347000270056 /* Project object */;
+ proxyType = 1;
+ remoteGlobalIDString = C04545A21C584F4A006A53B3;
+ remoteInfo = "libpthread.a generic";
+ };
C90E7AAF15DC3D3D00A06D48 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = C9A325D915B7347000270056 /* Project object */;
6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_mutex_up.c; sourceTree = "<group>"; };
6EB232C91B0EB29D005915CE /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = "<group>"; };
6EB232CA1B0EB29D005915CE /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = "<group>"; };
- 720A46E21B8D125900AB7950 /* Makefile.common */ = {isa = PBXFileReference; lastKnownFileType = text; path = Makefile.common; sourceTree = "<group>"; };
74E594A41613AAF4006C417B /* libpthread_eOS.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread_eOS.a; sourceTree = BUILT_PRODUCTS_DIR; };
- 9207EB711AA6E008006FFC86 /* wq_kevent_stress.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = wq_kevent_stress.c; sourceTree = "<group>"; };
+ 9202B2301D1A5B3F00945880 /* introspection.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection.h; sourceTree = "<group>"; };
+ 9235CA551CA48D010015C92B /* kext_development.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = kext_development.xcconfig; sourceTree = "<group>"; };
9240BF321AA669C4003C99B4 /* wqtrace.lua */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = wqtrace.lua; path = tools/wqtrace.lua; sourceTree = SOURCE_ROOT; };
- 9264D6831A9D3E010094346B /* atfork.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = atfork.c; sourceTree = "<group>"; };
- 9264D6841A9D3E010094346B /* cond.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = cond.c; sourceTree = "<group>"; };
- 9264D6851A9D3E010094346B /* cond_timed.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = cond_timed.c; sourceTree = "<group>"; };
- 9264D6861A9D3E010094346B /* custom_stack.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = custom_stack.c; sourceTree = "<group>"; };
- 9264D6871A9D3E010094346B /* join.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = join.c; sourceTree = "<group>"; };
- 9264D6881A9D3E010094346B /* Makefile */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.make; path = Makefile; sourceTree = "<group>"; };
- 9264D6891A9D3E010094346B /* maxwidth.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = maxwidth.c; sourceTree = "<group>"; };
- 9264D68A1A9D3E010094346B /* mutex.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = mutex.c; sourceTree = "<group>"; };
- 9264D68B1A9D3E010094346B /* once.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = once.c; sourceTree = "<group>"; };
- 9264D68C1A9D3E010094346B /* rwlock-signal.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "rwlock-signal.c"; sourceTree = "<group>"; };
- 9264D68D1A9D3E010094346B /* rwlock.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = rwlock.c; sourceTree = "<group>"; };
- 9264D68E1A9D3E010094346B /* tsd.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = tsd.c; sourceTree = "<group>"; };
- 9264D68F1A9D3E010094346B /* wq_block_handoff.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = wq_block_handoff.c; sourceTree = "<group>"; };
- 9264D6901A9D3E010094346B /* wq_kevent.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = wq_kevent.c; sourceTree = "<group>"; };
- 92C577E11A378A85004AF98B /* kext_debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = kext_debug.xcconfig; sourceTree = "<group>"; };
- 92C577EA1A378C9C004AF98B /* pthread_debug.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = pthread_debug.xcconfig; sourceTree = "<group>"; };
+ 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_cwd.c; sourceTree = "<group>"; };
A98FE72D19479F7C007718DA /* qos_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = qos_private.h; sourceTree = "<group>"; };
- C90E7A9F15DC3C3800A06D48 /* libpthread.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ C04545881C584493006A53B3 /* run-on-install.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "run-on-install.sh"; sourceTree = "<group>"; };
+ C04545B81C584F4A006A53B3 /* libpthread.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ C04545B91C584F8B006A53B3 /* static.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = static.xcconfig; sourceTree = "<group>"; };
+ C90E7A9F15DC3C3800A06D48 /* libpthread_dyld.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread_dyld.a; sourceTree = BUILT_PRODUCTS_DIR; };
C90E7AB415DC40D900A06D48 /* pthread_atfork.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_atfork.c; sourceTree = "<group>"; };
C9153094167ACAB8006BB094 /* install-symlinks.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-symlinks.sh"; sourceTree = "<group>"; };
C9153095167ACC22006BB094 /* private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = private.h; sourceTree = "<group>"; };
C99AD87D15DF04D10009A6F8 /* pthread_asm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; path = pthread_asm.s; sourceTree = "<group>"; };
C99B17DA189C2E1B00991D38 /* qos_private.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = qos_private.h; sourceTree = "<group>"; };
C99EA612161F8288003EBC56 /* eos.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = eos.xcconfig; sourceTree = "<group>"; };
+ C9A1998A1C8E271F00CE102A /* tests */ = {isa = PBXFileReference; lastKnownFileType = folder; path = tests; sourceTree = "<group>"; };
C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_cancelable_cancel.c; sourceTree = "<group>"; };
C9A1BF5415C9CB9D006BB313 /* pthread_cancelable_legacy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_cancelable_legacy.c; sourceTree = "<group>"; };
C9A325E215B7347000270056 /* libsystem_pthread.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libsystem_pthread.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
C9A960B618452CDD00AE10C8 /* install-lldbmacros.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "install-lldbmacros.sh"; sourceTree = "<group>"; };
C9C2212D15FA978D00447568 /* pthread.aliases */ = {isa = PBXFileReference; lastKnownFileType = text; path = pthread.aliases; sourceTree = "<group>"; };
C9C533841607C928009988FA /* kern_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kern_internal.h; sourceTree = "<group>"; };
- C9C5F7381B8CE2D600C873EF /* rwlock-22244050.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = "rwlock-22244050.c"; sourceTree = "<group>"; };
C9CA27D91602813000259F78 /* pthread.kext */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = pthread.kext; sourceTree = BUILT_PRODUCTS_DIR; };
C9CA27DC1602813000259F78 /* Kernel.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Kernel.framework; path = System/Library/Frameworks/Kernel.framework; sourceTree = SDKROOT; };
C9D9E8FE1626248800448CED /* pthread-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "pthread-Info.plist"; sourceTree = "<group>"; };
C9DCA2A115DC4F2000D057E2 /* install-manpages.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = "<group>"; };
E4063CF21906B4FB000202F9 /* qos.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = qos.h; sourceTree = "<group>"; };
E4657D4017284F7B007D1847 /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = "<group>"; };
- E473BE1719AC305A009C5A52 /* atomic_llsc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic_llsc.h; sourceTree = "<group>"; };
- E473BE1819AC305A009C5A52 /* atomic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atomic.h; sourceTree = "<group>"; };
E4D962F919086AD600E8A9F2 /* qos.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qos.h; sourceTree = "<group>"; };
E4D962FC19086C5700E8A9F2 /* install-sys-headers.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-sys-headers.sh"; sourceTree = "<group>"; };
FC30E28D16A747AD00A25B5F /* synch_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = synch_internal.h; sourceTree = "<group>"; };
);
runOnlyForDeploymentPostprocessing = 0;
};
+ C04545B31C584F4A006A53B3 /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
C90E7A9C15DC3C3800A06D48 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
path = kern;
sourceTree = "<group>";
};
- 9264D6821A9D3E010094346B /* tests */ = {
- isa = PBXGroup;
- children = (
- 720A46E21B8D125900AB7950 /* Makefile.common */,
- 9264D6881A9D3E010094346B /* Makefile */,
- 9264D6831A9D3E010094346B /* atfork.c */,
- 9264D6841A9D3E010094346B /* cond.c */,
- 9264D6851A9D3E010094346B /* cond_timed.c */,
- 9264D6861A9D3E010094346B /* custom_stack.c */,
- 9264D6871A9D3E010094346B /* join.c */,
- 9264D6891A9D3E010094346B /* maxwidth.c */,
- 9264D68A1A9D3E010094346B /* mutex.c */,
- 9264D68B1A9D3E010094346B /* once.c */,
- C9C5F7381B8CE2D600C873EF /* rwlock-22244050.c */,
- 9264D68C1A9D3E010094346B /* rwlock-signal.c */,
- 9264D68D1A9D3E010094346B /* rwlock.c */,
- 9264D68E1A9D3E010094346B /* tsd.c */,
- 9264D68F1A9D3E010094346B /* wq_block_handoff.c */,
- 9264D6901A9D3E010094346B /* wq_kevent.c */,
- 9207EB711AA6E008006FFC86 /* wq_kevent_stress.c */,
- );
- path = tests;
- sourceTree = SOURCE_ROOT;
- };
C9169DD91603DE68005A2F8C /* kern */ = {
isa = PBXGroup;
children = (
C9D70EBE167AC7D100D52713 /* private */,
C948FCC015D187AD00180BF5 /* man */,
C9A325ED15B74FB600270056 /* src */,
- E4027C171AFC2B6D00ACCF91 /* os */,
- 9264D6821A9D3E010094346B /* tests */,
+ C9A1998A1C8E271F00CE102A /* tests */,
9240BF331AA669EB003C99B4 /* tools */,
C9A3260B15B759A100270056 /* xcodescripts */,
C9CA27DA1602813000259F78 /* Frameworks */,
isa = PBXGroup;
children = (
C9A325E215B7347000270056 /* libsystem_pthread.dylib */,
- C90E7A9F15DC3C3800A06D48 /* libpthread.a */,
+ C90E7A9F15DC3C3800A06D48 /* libpthread_dyld.a */,
74E594A41613AAF4006C417B /* libpthread_eOS.a */,
C9CA27D91602813000259F78 /* pthread.kext */,
6E8C16801B14F08A00C8987C /* libsystem_pthread.dylib */,
+ C04545B81C584F4A006A53B3 /* libpthread.a */,
);
name = Products;
sourceTree = "<group>";
C90E7AB415DC40D900A06D48 /* pthread_atfork.c */,
C9A325F115B7513200270056 /* pthread_cancelable.c */,
C9A325F215B7513200270056 /* pthread_cond.c */,
+ 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */,
C9A325F515B7513200270056 /* pthread_mutex.c */,
6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */,
C9A325F615B7513200270056 /* pthread_rwlock.c */,
C9A325FD15B7513700270056 /* pthread */ = {
isa = PBXGroup;
children = (
+ 9202B2301D1A5B3F00945880 /* introspection.h */,
C9A325FE15B7513700270056 /* pthread.h */,
C9A325FF15B7513700270056 /* pthread_impl.h */,
C9A3260015B7513700270056 /* pthread_spis.h */,
C9A3260B15B759A100270056 /* xcodescripts */ = {
isa = PBXGroup;
children = (
+ 9235CA551CA48D010015C92B /* kext_development.xcconfig */,
C91D01BA162893CD0002E29A /* kext.xcconfig */,
- 92C577E11A378A85004AF98B /* kext_debug.xcconfig */,
C9A3260C15B759B600270056 /* pthread.xcconfig */,
- 92C577EA1A378C9C004AF98B /* pthread_debug.xcconfig */,
6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */,
C99EA612161F8288003EBC56 /* eos.xcconfig */,
+ C04545B91C584F8B006A53B3 /* static.xcconfig */,
C9DCA2A115DC4F2000D057E2 /* install-manpages.sh */,
C9153094167ACAB8006BB094 /* install-symlinks.sh */,
+ C04545881C584493006A53B3 /* run-on-install.sh */,
E4D962FC19086C5700E8A9F2 /* install-sys-headers.sh */,
C9A960B618452CDD00AE10C8 /* install-lldbmacros.sh */,
C979E9FC18A2BF2C000951E5 /* install-codes.sh */,
path = private;
sourceTree = "<group>";
};
- E4027C171AFC2B6D00ACCF91 /* os */ = {
- isa = PBXGroup;
- children = (
- E473BE1819AC305A009C5A52 /* atomic.h */,
- E473BE1719AC305A009C5A52 /* atomic_llsc.h */,
- );
- path = os;
- sourceTree = "<group>";
- };
FC5A372217CEB3D6008C323E /* sys */ = {
isa = PBXGroup;
children = (
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
- 6E8C16721B14F08A00C8987C /* atomic_llsc.h in Headers */,
- 6E8C16741B14F08A00C8987C /* atomic.h in Headers */,
6E8C16711B14F08A00C8987C /* posix_sched.h in Headers */,
6E8C166F1B14F08A00C8987C /* introspection_private.h in Headers */,
6E8C166C1B14F08A00C8987C /* qos.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
+ C04545B41C584F4A006A53B3 /* Headers */ = {
+ isa = PBXHeadersBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
C90E7A9D15DC3C3800A06D48 /* Headers */ = {
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
C9A1BF4E15C9A594006BB313 /* pthread_impl.h in Headers */,
E4063CF31906B75A000202F9 /* qos.h in Headers */,
C9A1BF4F15C9A598006BB313 /* pthread_spis.h in Headers */,
+ 9202B2321D1A5B6200945880 /* introspection.h in Headers */,
C9A1BF5015C9A59B006BB313 /* sched.h in Headers */,
E4657D4117284F7B007D1847 /* introspection_private.h in Headers */,
C9BB478D15E6ADF700F135B7 /* tsd_private.h in Headers */,
C99AD87B15DEC4BC0009A6F8 /* posix_sched.h in Headers */,
- E473BE1919AC305A009C5A52 /* atomic_llsc.h in Headers */,
C9CCFB9D18B6D0910060CAAE /* qos_private.h in Headers */,
- E473BE1A19AC305A009C5A52 /* atomic.h in Headers */,
C98C95D918FF1F4E005654FB /* spawn.h in Headers */,
C99AD87C15DEC5290009A6F8 /* spinlock_private.h in Headers */,
C9BB478B15E6ABD900F135B7 /* workqueue_private.h in Headers */,
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
+ 9202B2311D1A5B3F00945880 /* introspection.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXHeadersBuildPhase section */
/* Begin PBXLegacyTarget section */
- 720A46DE1B8D0B7700AB7950 /* Tests */ = {
+ 92B275F01BCE4C5E007D06D7 /* darwintests */ = {
isa = PBXLegacyTarget;
buildArgumentsString = "$(ACTION)";
- buildConfigurationList = 720A46E11B8D0B7700AB7950 /* Build configuration list for PBXLegacyTarget "Tests" */;
+ buildConfigurationList = 92B275F11BCE4C5E007D06D7 /* Build configuration list for PBXLegacyTarget "darwintests" */;
buildPhases = (
);
buildToolPath = /usr/bin/make;
buildWorkingDirectory = tests;
dependencies = (
);
- name = Tests;
+ name = darwintests;
passBuildSettingsInEnvironment = 1;
- productName = Test;
+ productName = darwintests;
};
/* End PBXLegacyTarget section */
productReference = 74E594A41613AAF4006C417B /* libpthread_eOS.a */;
productType = "com.apple.product-type.library.static";
};
- C90E7A9E15DC3C3800A06D48 /* libpthread.a */ = {
+ C04545A21C584F4A006A53B3 /* libpthread.a generic */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread.a generic" */;
+ buildPhases = (
+ C04545A31C584F4A006A53B3 /* Sources */,
+ C04545B31C584F4A006A53B3 /* Frameworks */,
+ C04545B41C584F4A006A53B3 /* Headers */,
+ C04545BA1C585034006A53B3 /* Symlink libpthread.a to the loaderd path */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ );
+ name = "libpthread.a generic";
+ productName = libpthread.a;
+ productReference = C04545B81C584F4A006A53B3 /* libpthread.a */;
+ productType = "com.apple.product-type.library.static";
+ };
+ C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */ = {
isa = PBXNativeTarget;
- buildConfigurationList = C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a" */;
+ buildConfigurationList = C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a dyld" */;
buildPhases = (
C90E7A9B15DC3C3800A06D48 /* Sources */,
C90E7A9C15DC3C3800A06D48 /* Frameworks */,
C90E7A9D15DC3C3800A06D48 /* Headers */,
+ C04545891C5844F8006A53B3 /* Symlink libpthread_dyld.a to libpthread.a */,
);
buildRules = (
);
dependencies = (
);
- name = libpthread.a;
+ name = "libpthread.a dyld";
productName = libpthread.a;
- productReference = C90E7A9F15DC3C3800A06D48 /* libpthread.a */;
+ productReference = C90E7A9F15DC3C3800A06D48 /* libpthread_dyld.a */;
productType = "com.apple.product-type.library.static";
};
C9A325E115B7347000270056 /* libsystem_pthread.dylib */ = {
C9A325D915B7347000270056 /* Project object */ = {
isa = PBXProject;
attributes = {
- LastUpgradeCheck = 0700;
+ LastUpgradeCheck = 0800;
ORGANIZATIONNAME = "";
TargetAttributes = {
- 720A46DE1B8D0B7700AB7950 = {
+ 92799B441B96A5FD00861404 = {
CreatedOnToolsVersion = 7.0;
};
+ 92B275F01BCE4C5E007D06D7 = {
+ CreatedOnToolsVersion = 7.1;
+ };
};
};
buildConfigurationList = C9A325DC15B7347000270056 /* Build configuration list for PBXProject "libpthread" */;
C90E7AAC15DC3D3300A06D48 /* All */,
C91D01B5162892FF0002E29A /* Kext */,
C98832C115DEB44000B3308E /* Embedded */,
- 720A46DE1B8D0B7700AB7950 /* Tests */,
+ 92799B441B96A5FD00861404 /* Tests */,
6E8C16511B14F08A00C8987C /* libsystem_pthread.dylib introspection */,
C9A325E115B7347000270056 /* libsystem_pthread.dylib */,
- C90E7A9E15DC3C3800A06D48 /* libpthread.a */,
+ C04545A21C584F4A006A53B3 /* libpthread.a generic */,
+ C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */,
74E594911613AAF4006C417B /* libpthread.a eOS */,
C9CA27D81602813000259F78 /* pthread */,
+ 92B275F01BCE4C5E007D06D7 /* darwintests */,
);
};
/* End PBXProject section */
shellPath = /bin/sh;
shellScript = "dtrace -h -C -s \"${SCRIPT_INPUT_FILE_0}\" -o \"${SCRIPT_OUTPUT_FILE_0}\"";
};
+ C04545891C5844F8006A53B3 /* Symlink libpthread_dyld.a to libpthread.a */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 8;
+ files = (
+ );
+ inputPaths = (
+ "$(SRCROOT)/xcodescripts/run-on-install.sh",
+ );
+ name = "Symlink libpthread_dyld.a to libpthread.a";
+ outputPaths = (
+ );
+ runOnlyForDeploymentPostprocessing = 1;
+ shellPath = /bin/sh;
+ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf libpthread_dyld.a ${DSTROOT}${INSTALL_PATH}/libpthread.a";
+ };
+ C04545BA1C585034006A53B3 /* Symlink libpthread.a to the loaderd path */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 8;
+ files = (
+ );
+ inputPaths = (
+ "$(SRCROOT)/xcodescripts/run-on-install.sh",
+ );
+ name = "Symlink libpthread.a to the loaderd path";
+ outputPaths = (
+ "${DSTROOT}/usr/local/lib/loaderd/libpthread.a",
+ );
+ runOnlyForDeploymentPostprocessing = 1;
+ shellPath = /bin/sh;
+ shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/libpthread.a ${DSTROOT}/usr/local/lib/loaderd/libpthread.a";
+ };
C979E9FD18A2BF3D000951E5 /* Install Codes file */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 8;
6E8C16581B14F08A00C8987C /* pthread_cond.c in Sources */,
6E8C16591B14F08A00C8987C /* pthread_mutex.c in Sources */,
6E8C165A1B14F08A00C8987C /* pthread_mutex_up.c in Sources */,
+ 924D8EE21C11833E002AC2BC /* pthread_cwd.c in Sources */,
6E8C165B1B14F08A00C8987C /* qos.c in Sources */,
6E8C165C1B14F08A00C8987C /* pthread_rwlock.c in Sources */,
6E8C165D1B14F08A00C8987C /* pthread_tsd.c in Sources */,
74E594991613AAF4006C417B /* pthread_tsd.c in Sources */,
74E5949A1613AAF4006C417B /* thread_setup.c in Sources */,
C9244C1F1860D96E00075748 /* qos.c in Sources */,
+ 924D8EDF1C11833D002AC2BC /* pthread_cwd.c in Sources */,
74E5949C1613AAF4006C417B /* pthread_atfork.c in Sources */,
74E5949E1613AAF4006C417B /* pthread_asm.s in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
+ C04545A31C584F4A006A53B3 /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ C04545A41C584F4A006A53B3 /* resolver.c in Sources */,
+ C04545A51C584F4A006A53B3 /* pthread.c in Sources */,
+ C04545A61C584F4A006A53B3 /* pthread_cancelable.c in Sources */,
+ C04545A71C584F4A006A53B3 /* pthread_cancelable_cancel.c in Sources */,
+ C04545A81C584F4A006A53B3 /* pthread_cond.c in Sources */,
+ C04545A91C584F4A006A53B3 /* pthread_mutex.c in Sources */,
+ C04545AA1C584F4A006A53B3 /* pthread_mutex_up.c in Sources */,
+ C04545AB1C584F4A006A53B3 /* pthread_rwlock.c in Sources */,
+ C04545AC1C584F4A006A53B3 /* pthread_support.c in Sources */,
+ C04545AD1C584F4A006A53B3 /* pthread_tsd.c in Sources */,
+ C04545AE1C584F4A006A53B3 /* thread_setup.c in Sources */,
+ C04545AF1C584F4A006A53B3 /* qos.c in Sources */,
+ C04545B01C584F4A006A53B3 /* pthread_cwd.c in Sources */,
+ C04545B11C584F4A006A53B3 /* pthread_atfork.c in Sources */,
+ C04545B21C584F4A006A53B3 /* pthread_asm.s in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
C90E7A9B15DC3C3800A06D48 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
C90E7AA915DC3C9D00A06D48 /* pthread_support.c in Sources */,
C90E7AAA15DC3C9D00A06D48 /* pthread_tsd.c in Sources */,
C90E7AAB15DC3C9D00A06D48 /* thread_setup.c in Sources */,
+ 924D8EE01C11833D002AC2BC /* pthread_cwd.c in Sources */,
C90E7AB915DC40D900A06D48 /* pthread_atfork.c in Sources */,
C99AD87F15DF04D10009A6F8 /* pthread_asm.s in Sources */,
);
6EB232CB1B0EB2E2005915CE /* pthread_mutex_up.c in Sources */,
C9244C1D1860D8EF00075748 /* qos.c in Sources */,
C9A1BF4B15C9A578006BB313 /* pthread_rwlock.c in Sources */,
+ 924D8EE11C11833E002AC2BC /* pthread_cwd.c in Sources */,
C9A1BF4C15C9A578006BB313 /* pthread_tsd.c in Sources */,
C9A1BF5315C9A9F5006BB313 /* pthread_cancelable_cancel.c in Sources */,
C9A1BF5515C9CB9D006BB313 /* pthread_cancelable_legacy.c in Sources */,
target = 74E594911613AAF4006C417B /* libpthread.a eOS */;
targetProxy = 74E594AA1613AD7F006C417B /* PBXContainerItemProxy */;
};
+ 925383BB1BD01EED00F745DB /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ target = 92B275F01BCE4C5E007D06D7 /* darwintests */;
+ targetProxy = 925383BA1BD01EED00F745DB /* PBXContainerItemProxy */;
+ };
+ C04545BC1C58510F006A53B3 /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ target = C04545A21C584F4A006A53B3 /* libpthread.a generic */;
+ targetProxy = C04545BB1C58510F006A53B3 /* PBXContainerItemProxy */;
+ };
+ C04545BE1C585487006A53B3 /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ target = C04545A21C584F4A006A53B3 /* libpthread.a generic */;
+ targetProxy = C04545BD1C585487006A53B3 /* PBXContainerItemProxy */;
+ };
C90E7AB015DC3D3D00A06D48 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = C9A325E115B7347000270056 /* libsystem_pthread.dylib */;
};
C90E7AB215DC3D3D00A06D48 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
- target = C90E7A9E15DC3C3800A06D48 /* libpthread.a */;
+ target = C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */;
targetProxy = C90E7AB115DC3D3D00A06D48 /* PBXContainerItemProxy */;
};
C91D01B9162893070002E29A /* PBXTargetDependency */ = {
};
C98832C815DEB44B00B3308E /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
- target = C90E7A9E15DC3C3800A06D48 /* libpthread.a */;
+ target = C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */;
targetProxy = C98832C715DEB44B00B3308E /* PBXContainerItemProxy */;
};
/* End PBXTargetDependency section */
};
name = Release;
};
- 6E8C167F1B14F08A00C8987C /* Debug */ = {
+ 74E594A31613AAF4006C417B /* Release */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */;
+ baseConfigurationReference = C99EA612161F8288003EBC56 /* eos.xcconfig */;
buildSettings = {
- EXECUTABLE_PREFIX = lib;
- PRODUCT_NAME = system_pthread;
};
- name = Debug;
+ name = Release;
};
- 720A46DF1B8D0B7700AB7950 /* Release */ = {
+ 9235CA491CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- ALWAYS_SEARCH_USER_PATHS = NO;
- CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
- CLANG_CXX_LIBRARY = "libc++";
- CLANG_ENABLE_MODULES = YES;
- CLANG_ENABLE_OBJC_ARC = YES;
- CLANG_WARN_BOOL_CONVERSION = YES;
- CLANG_WARN_CONSTANT_CONVERSION = YES;
- CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
- CLANG_WARN_EMPTY_BODY = YES;
- CLANG_WARN_ENUM_CONVERSION = YES;
- CLANG_WARN_INT_CONVERSION = YES;
- CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
- CLANG_WARN_UNREACHABLE_CODE = YES;
- CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
- COPY_PHASE_STRIP = NO;
- DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
- ENABLE_NS_ASSERTIONS = NO;
- ENABLE_STRICT_OBJC_MSGSEND = YES;
- GCC_C_LANGUAGE_STANDARD = gnu99;
- GCC_NO_COMMON_BLOCKS = YES;
- GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
- GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
- GCC_WARN_UNDECLARED_SELECTOR = YES;
- GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
- GCC_WARN_UNUSED_FUNCTION = YES;
- GCC_WARN_UNUSED_VARIABLE = YES;
- MTL_ENABLE_DEBUG_INFO = NO;
- OTHER_CFLAGS = "";
- OTHER_LDFLAGS = "";
- PRODUCT_NAME = "$(TARGET_NAME)";
- SDKROOT = macosx.internal;
+ COPY_PHASE_STRIP = YES;
};
- name = Release;
+ name = Debug;
};
- 720A46E01B8D0B7700AB7950 /* Debug */ = {
+ 9235CA4A1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- ALWAYS_SEARCH_USER_PATHS = NO;
- CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x";
- CLANG_CXX_LIBRARY = "libc++";
- CLANG_ENABLE_MODULES = YES;
- CLANG_ENABLE_OBJC_ARC = YES;
- CLANG_WARN_BOOL_CONVERSION = YES;
- CLANG_WARN_CONSTANT_CONVERSION = YES;
- CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
- CLANG_WARN_EMPTY_BODY = YES;
- CLANG_WARN_ENUM_CONVERSION = YES;
- CLANG_WARN_INT_CONVERSION = YES;
- CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
- CLANG_WARN_UNREACHABLE_CODE = YES;
- CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
- COPY_PHASE_STRIP = NO;
- DEBUGGING_SYMBOLS = YES;
- DEBUG_INFORMATION_FORMAT = dwarf;
- ENABLE_STRICT_OBJC_MSGSEND = YES;
- ENABLE_TESTABILITY = YES;
- GCC_C_LANGUAGE_STANDARD = gnu99;
- GCC_DYNAMIC_NO_PIC = NO;
- GCC_GENERATE_DEBUGGING_SYMBOLS = YES;
- GCC_NO_COMMON_BLOCKS = YES;
- GCC_OPTIMIZATION_LEVEL = 0;
- GCC_PREPROCESSOR_DEFINITIONS = (
- "DEBUG=1",
- "$(inherited)",
- );
- GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
- GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
- GCC_WARN_UNDECLARED_SELECTOR = YES;
- GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
- GCC_WARN_UNUSED_FUNCTION = YES;
- GCC_WARN_UNUSED_VARIABLE = YES;
- MTL_ENABLE_DEBUG_INFO = YES;
- ONLY_ACTIVE_ARCH = YES;
- OTHER_CFLAGS = "";
- OTHER_LDFLAGS = "";
PRODUCT_NAME = "$(TARGET_NAME)";
- SDKROOT = macosx.internal;
};
name = Debug;
};
- 74E594A31613AAF4006C417B /* Release */ = {
+ 9235CA4B1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = C99EA612161F8288003EBC56 /* eos.xcconfig */;
buildSettings = {
+ PRODUCT_NAME = "$(TARGET_NAME)";
};
- name = Release;
+ name = Debug;
};
- 92C577E21A378AC8004AF98B /* Debug */ = {
+ 9235CA4C1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COPY_PHASE_STRIP = YES;
+ PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Debug;
};
- 92C577E31A378AC8004AF98B /* Debug */ = {
+ 9235CA4D1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
+ SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos";
};
name = Debug;
};
- 92C577E41A378AC8004AF98B /* Debug */ = {
+ 9235CA4E1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
+ baseConfigurationReference = 6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
- PRODUCT_NAME = "$(TARGET_NAME)";
+ EXECUTABLE_PREFIX = lib;
+ PRODUCT_NAME = system_pthread;
};
name = Debug;
};
- 92C577E51A378AC8004AF98B /* Debug */ = {
+ 9235CA4F1CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
+ baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
- PRODUCT_NAME = "$(TARGET_NAME)";
+ EXECUTABLE_PREFIX = lib;
+ PRODUCT_NAME = system_pthread;
};
name = Debug;
};
- 92C577E61A378AC8004AF98B /* Debug */ = {
+ 9235CA501CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 92C577EA1A378C9C004AF98B /* pthread_debug.xcconfig */;
+ baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
buildSettings = {
- EXECUTABLE_PREFIX = lib;
- PRODUCT_NAME = system_pthread;
+ PRODUCT_NAME = "$(PRODUCT_NAME)";
};
name = Debug;
};
- 92C577E71A378AC8004AF98B /* Debug */ = {
+ 9235CA511CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 92C577EA1A378C9C004AF98B /* pthread_debug.xcconfig */;
+ baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
BUILD_VARIANTS = normal;
GCC_PREPROCESSOR_DEFINITIONS = (
"$(BASE_PREPROCESSOR_MACROS)",
"VARIANT_DYLD=1",
+ "VARIANT_STATIC=1",
);
INSTALL_PATH = /usr/local/lib/dyld;
OTHER_LDFLAGS = "";
- PRODUCT_NAME = pthread;
+ PRODUCT_NAME = pthread_dyld;
};
name = Debug;
};
- 92C577E81A378AC8004AF98B /* Debug */ = {
+ 9235CA521CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = C99EA612161F8288003EBC56 /* eos.xcconfig */;
buildSettings = {
};
name = Debug;
};
- 92C577E91A378AC8004AF98B /* Debug */ = {
+ 9235CA531CA48CEA0015C92B /* Debug */ = {
isa = XCBuildConfiguration;
- baseConfigurationReference = 92C577E11A378A85004AF98B /* kext_debug.xcconfig */;
+ baseConfigurationReference = 9235CA551CA48D010015C92B /* kext_development.xcconfig */;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+ PRODUCT_BUNDLE_IDENTIFIER = "${MODULE_NAME}";
PRODUCT_NAME = "$(TARGET_NAME)";
SDKROOT = macosx.internal;
};
name = Debug;
};
+ 9235CA541CA48CEA0015C92B /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = darwintests;
+ SDKROOT = macosx.internal;
+ SUPPORTED_PLATFORMS = "iphoneos macosx watchos appletvos";
+ };
+ name = Debug;
+ };
+ 92799B461B96A5FE00861404 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "$(TARGET_NAME)";
+ SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos";
+ };
+ name = Release;
+ };
+ 92B275F21BCE4C5E007D06D7 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = darwintests;
+ SDKROOT = macosx.internal;
+ SUPPORTED_PLATFORMS = "iphoneos macosx watchos appletvos";
+ };
+ name = Release;
+ };
+ C04545B61C584F4A006A53B3 /* Release */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
+ buildSettings = {
+ PRODUCT_NAME = "$(PRODUCT_NAME)";
+ };
+ name = Release;
+ };
C90E7AA015DC3C3800A06D48 /* Release */ = {
isa = XCBuildConfiguration;
baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
GCC_PREPROCESSOR_DEFINITIONS = (
"$(BASE_PREPROCESSOR_MACROS)",
"VARIANT_DYLD=1",
+ "VARIANT_STATIC=1",
);
INSTALL_PATH = /usr/local/lib/dyld;
OTHER_LDFLAGS = "";
- PRODUCT_NAME = pthread;
+ PRODUCT_NAME = pthread_dyld;
};
name = Release;
};
C90E7AAE15DC3D3300A06D48 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
C91D01B7162892FF0002E29A /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
C98832C215DEB44000B3308E /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
};
name = Release;
C9A325E515B7347000270056 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
- COPY_PHASE_STRIP = YES;
};
name = Release;
};
isa = XCBuildConfiguration;
baseConfigurationReference = C91D01BA162893CD0002E29A /* kext.xcconfig */;
buildSettings = {
- COMBINE_HIDPI_IMAGES = YES;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
+ PRODUCT_BUNDLE_IDENTIFIER = "${MODULE_NAME}";
PRODUCT_NAME = "$(TARGET_NAME)";
SDKROOT = macosx.internal;
};
isa = XCConfigurationList;
buildConfigurations = (
6E8C167E1B14F08A00C8987C /* Release */,
- 6E8C167F1B14F08A00C8987C /* Debug */,
+ 9235CA4E1CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
- 720A46E11B8D0B7700AB7950 /* Build configuration list for PBXLegacyTarget "Tests" */ = {
+ 74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread.a eOS" */ = {
isa = XCConfigurationList;
buildConfigurations = (
- 720A46DF1B8D0B7700AB7950 /* Release */,
- 720A46E01B8D0B7700AB7950 /* Debug */,
+ 74E594A31613AAF4006C417B /* Release */,
+ 9235CA521CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
};
- 74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread.a eOS" */ = {
+ 92799B451B96A5FE00861404 /* Build configuration list for PBXAggregateTarget "Tests" */ = {
isa = XCConfigurationList;
buildConfigurations = (
- 74E594A31613AAF4006C417B /* Release */,
- 92C577E81A378AC8004AF98B /* Debug */,
+ 92799B461B96A5FE00861404 /* Release */,
+ 9235CA4D1CA48CEA0015C92B /* Debug */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ 92B275F11BCE4C5E007D06D7 /* Build configuration list for PBXLegacyTarget "darwintests" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 92B275F21BCE4C5E007D06D7 /* Release */,
+ 9235CA541CA48CEA0015C92B /* Debug */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Release;
+ };
+ C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread.a generic" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ C04545B61C584F4A006A53B3 /* Release */,
+ 9235CA501CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
- C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a" */ = {
+ C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a dyld" */ = {
isa = XCConfigurationList;
buildConfigurations = (
C90E7AA015DC3C3800A06D48 /* Release */,
- 92C577E71A378AC8004AF98B /* Debug */,
+ 9235CA511CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C90E7AAE15DC3D3300A06D48 /* Release */,
- 92C577E31A378AC8004AF98B /* Debug */,
+ 9235CA4A1CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C91D01B7162892FF0002E29A /* Release */,
- 92C577E41A378AC8004AF98B /* Debug */,
+ 9235CA4B1CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C98832C215DEB44000B3308E /* Release */,
- 92C577E51A378AC8004AF98B /* Debug */,
+ 9235CA4C1CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C9A325E515B7347000270056 /* Release */,
- 92C577E21A378AC8004AF98B /* Debug */,
+ 9235CA491CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C9A325E815B7347000270056 /* Release */,
- 92C577E61A378AC8004AF98B /* Debug */,
+ 9235CA4F1CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
isa = XCConfigurationList;
buildConfigurations = (
C9CA27E61602813000259F78 /* Release */,
- 92C577E91A378AC8004AF98B /* Debug */,
+ 9235CA531CA48CEA0015C92B /* Debug */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/share/man/man3/pthread.3,v 1.12.2.4 2001/08/17 13:08:36 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd November 5, 2001
+.Dd August 12, 2014
.Dt PTHREAD 3
.Os Darwin
.Sh NAME
.Nm pthread
.Nd POSIX thread functions
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Sh DESCRIPTION
POSIX threads are a set of functions that support applications with
requirements for multiple flows of control, called
-.Fa threads ,
+.Em threads ,
within a process.
Multithreading is used to improve the performance of a
program.
.Pp
The POSIX thread functions are summarized in this section in the following
groups:
-.Bl -bullet -offset indent
+.Pp
+.Bl -bullet -offset indent -compact
.It
Thread Routines
.It
.It
Cleanup Routines
.El
-.Sh THREAD ROUTINES
-.Bl -tag -width Er
+.Ss Thread Routines
+.Bl -tag -width indent
.It Xo
.Ft int
-.Fn pthread_create "pthread_t *thread" "const pthread_attr_t *attr" "void *(*start_routine)(void *)" "void *arg"
+.Fo pthread_create
+.Fa "pthread_t *thread" "const pthread_attr_t *attr"
+.Fa "void *\*[lp]*start_routine\*[rp]\*[lp]void *\*[rp]" "void *arg"
+.Fc
.Xc
Creates a new thread of execution.
.It Xo
.Ft int
+.Fn pthread_cancel "pthread_t thread"
+.Xc
+Cancels execution of a thread.
+.It Xo
+.Ft int
.Fn pthread_detach "pthread_t thread"
.Xc
Marks a thread for deletion.
Causes the calling thread to wait for the termination of the specified thread.
.It Xo
.Ft int
-.Fn pthread_cancel "pthread_t thread"
+.Fn pthread_kill "pthread_t thread" "int sig"
.Xc
-Cancels execution of a thread.
+Delivers a signal to a specified thread.
.It Xo
.Ft int
-.Fn pthread_once "pthread_once_t *once_control" "void (*init_routine)(void)"
+.Fn pthread_once "pthread_once_t *once_control" "void \*[lp]*init_routine\*[rp]\*[lp]void\*[rp]"
.Xc
Calls an initialization routine once.
.It Xo
.Xc
Returns the thread ID of the calling thread.
.It Xo
-.Ft int
-.Fn pthread_atfork "void (*prepare)(void)" "void (*parent)(void)" "void (*child)(void)"
+.Ft int
+.Fn pthread_setcancelstate "int state" "int *oldstate"
+.Xc
+Sets the current thread's cancelability state.
+.It Xo
+.Ft int
+.Fn pthread_setcanceltype "int type" "int *oldtype"
+.Xc
+Sets the current thread's cancelability type.
+.It Xo
+.Ft void
+.Fn pthread_testcancel void
.Xc
-Registers handlers to be called before and after
-.Fn fork
+Creates a cancellation point in the calling thread.
.El
-.Sh ATTRIBUTE OBJECT ROUTINES
-.Bl -tag -width Er
+.Ss Attribute Object Routines
+.Bl -tag -width indent
.It Xo
.Ft int
.Fn pthread_attr_destroy "pthread_attr_t *attr"
Destroy a thread attributes object.
.It Xo
.Ft int
-.Fn pthread_attr_getinheritsched "const pthread_attr_t *attr" "int *inheritsched"
+.Fo pthread_attr_getinheritsched
+.Fa "const pthread_attr_t *attr" "int *inheritsched"
+.Fc
.Xc
Get the inherit scheduling attribute from a thread attributes object.
.It Xo
.Ft int
-.Fn pthread_attr_getschedparam "const pthread_attr_t *attr" "struct sched_param *param"
+.Fo pthread_attr_getschedparam
+.Fa "const pthread_attr_t *attr" "struct sched_param *param"
+.Fc
.Xc
Get the scheduling parameter attribute from a thread attributes object.
.It Xo
Set the inherit scheduling attribute in a thread attributes object.
.It Xo
.Ft int
-.Fn pthread_attr_setschedparam "pthread_attr_t *attr" "const struct sched_param *param"
+.Fo pthread_attr_setschedparam
+.Fa "pthread_attr_t *attr" "const struct sched_param *param"
+.Fc
.Xc
Set the scheduling parameter attribute in a thread attributes object.
.It Xo
.Xc
Set the detach state in a thread attributes object.
.El
-.Sh MUTEX ROUTINES
-.Bl -tag -width Er
+.Ss Mutex Routines
+.Bl -tag -width indent
.It Xo
.Ft int
.Fn pthread_mutexattr_destroy "pthread_mutexattr_t *attr"
Destroy a mutex attributes object.
.It Xo
.Ft int
+.Fn pthread_mutexattr_getprioceiling "pthread_mutexattr_t *attr" "int *ceiling"
+.Xc
+Obtain priority ceiling attribute of mutex attribute object.
+.It Xo
+.Ft int
+.Fn pthread_mutexattr_getprotocol "pthread_mutexattr_t *attr" "int *protocol"
+.Xc
+Obtain protocol attribute of mutex attribute object.
+.It Xo
+.Ft int
+.Fn pthread_mutexattr_gettype "pthread_mutexattr_t *attr" "int *type"
+.Xc
+Obtain the mutex type attribute in the specified mutex attributes object.
+.It Xo
+.Ft int
.Fn pthread_mutexattr_init "pthread_mutexattr_t *attr"
.Xc
Initialize a mutex attributes object with default values.
.It Xo
.Ft int
+.Fn pthread_mutexattr_setprioceiling "pthread_mutexattr_t *attr" "int ceiling"
+.Xc
+Set priority ceiling attribute of mutex attribute object.
+.It Xo
+.Ft int
+.Fn pthread_mutexattr_setprotocol "pthread_mutexattr_t *attr" "int protocol"
+.Xc
+Set protocol attribute of mutex attribute object.
+.It Xo
+.Ft int
+.Fn pthread_mutexattr_settype "pthread_mutexattr_t *attr" "int type"
+.Xc
+Set the mutex type attribute that is used when a mutex is created.
+.It Xo
+.Ft int
.Fn pthread_mutex_destroy "pthread_mutex_t *mutex"
.Xc
Destroy a mutex.
.It Xo
.Ft int
-.Fn pthread_mutex_init "pthread_mutex_t *mutex" "const pthread_mutexattr_t *attr"
+.Fo pthread_mutex_init
+.Fa "pthread_mutex_t *mutex" "const pthread_mutexattr_t *attr"
+.Fc
.Xc
Initialize a mutex with specified attributes.
.It Xo
.Ft int
.Fn pthread_mutex_trylock "pthread_mutex_t *mutex"
.Xc
-Try to lock a mutex, but don't block if the mutex is locked by another thread,
+Try to lock a mutex, but do not block if the mutex is locked by another thread,
including the current thread.
.It Xo
.Ft int
.Xc
Unlock a mutex.
.El
-.Sh CONDITION VARIABLE ROUTINES
-.Bl -tag -width Er
+.Ss Condition Variable Routines
+.Bl -tag -width indent
.It Xo
.Ft int
-.Fn pthread_condattr_init "pthread_condattr_t *attr"
+.Fn pthread_condattr_destroy "pthread_condattr_t *attr"
.Xc
-Initialize a condition variable attributes object with default values.
+Destroy a condition variable attributes object.
.It Xo
.Ft int
-.Fn pthread_condattr_destroy "pthread_condattr_t *attr"
+.Fn pthread_condattr_init "pthread_condattr_t *attr"
.Xc
-Destroy a condition variable attributes object.
+Initialize a condition variable attributes object with default values.
.It Xo
.Ft int
.Fn pthread_cond_broadcast "pthread_cond_t *cond"
Unblock at least one of the threads blocked on the specified condition variable.
.It Xo
.Ft int
-.Fn pthread_cond_timedwait "pthread_cond_t *cond" "pthread_mutex_t *mutex" "const struct timespec *abstime"
+.Fo pthread_cond_timedwait
+.Fa "pthread_cond_t *cond" "pthread_mutex_t *mutex"
+.Fa "const struct timespec *abstime"
+.Fc
.Xc
-Atomically unlock the specified mutex and block on a condition
-or until the specified time passes.
+Unlock the specified mutex, wait no longer than the specified time for
+a condition, and then relock the mutex.
.It Xo
.Ft int
.Fn pthread_cond_wait "pthread_cond_t *" "pthread_mutex_t *mutex"
.Xc
-Atomically unlock the specified mutex and block on a condition.
+Unlock the specified mutex, wait for a condition, and relock the mutex.
.El
-.Sh READ/WRITE LOCK ROUTINES
-.Bl -tag -width Er
+.Ss Read/Write Lock Routines
+.Bl -tag -width indent
.It Xo
.Ft int
.Fn pthread_rwlock_destroy "pthread_rwlock_t *lock"
Destroy a read/write lock object.
.It Xo
.Ft int
-.Fn pthread_rwlock_init "pthread_rwlock_t *lock" "const pthread_rwlockattr_t *attr"
+.Fo pthread_rwlock_init
+.Fa "pthread_rwlock_t *lock" "const pthread_rwlockattr_t *attr"
+.Fc
.Xc
Initialize a read/write lock object.
.It Xo
Destroy a read/write lock attribute object.
.It Xo
.Ft int
-.Fn pthread_rwlockattr_getpshared "const pthread_rwlockattr_t *attr" "int *pshared"
+.Fo pthread_rwlockattr_getpshared
+.Fa "const pthread_rwlockattr_t *attr" "int *pshared"
+.Fc
.Xc
Retrieve the process shared setting for the read/write lock attribute
object.
.Xc
Set the process shared setting for the read/write lock attribute object.
.El
-.Sh PER-THREAD CONTEXT ROUTINES
-.Bl -tag -width Er
+.Ss Per-Thread Context Routines
+.Bl -tag -width indent
.It Xo
.Ft int
-.Fn pthread_key_create "pthread_key_t *key" "void (*routine)(void *)"
+.Fn pthread_key_create "pthread_key_t *key" "void \*[lp]*routine\*[rp]\*[lp]void *\*[rp]"
.Xc
Create a thread-specific data key.
.It Xo
.Xc
Set the thread-specific value for the specified key.
.El
-.Sh CLEANUP ROUTINES
-.Bl -tag -width Er
+.Ss Cleanup Routines
+.Bl -tag -width indent
+.It Xo
+.Ft int
+.Fo pthread_atfork
+.Fa "void \*[lp]*prepare\*[rp]\*[lp]void\*[rp]"
+.Fa "void \*[lp]*parent\*[rp]\*[lp]void\*[rp]"
+.Fa "void \*[lp]*child\*[rp]\*[lp]void\*[rp]"
+.Fc
+.Xc
+Register fork handlers.
.It Xo
.Ft void
.Fn pthread_cleanup_pop "int execute"
stack and optionally invoke it.
.It Xo
.Ft void
-.Fn pthread_cleanup_push "void (*routine)(void *)" "void *routine_arg"
+.Fn pthread_cleanup_push "void \*[lp]*routine\*[rp]\*[lp]void *\*[rp]" "void *routine_arg"
.Xc
Push the specified cancellation cleanup handler onto the calling thread's
cancellation stack.
.Sh SEE ALSO
.Xr pthread_atfork 3 ,
.Xr pthread_attr 3 ,
-.Xr pthread_attr_init_destroy 3 ,
-.Xr pthread_attr_getdetachstate 3 ,
-.Xr pthread_attr_getinheritsched 3 ,
-.Xr pthread_attr_getschedparam 3 ,
-.Xr pthread_attr_getschedpolicy 3 ,
-.Xr pthread_attr_getscope 3 ,
-.Xr pthread_attr_getstackaddr 3 ,
-.Xr pthread_attr_getstacksize 3 ,
-.Xr pthread_attr_init 3 ,
-.Xr pthread_attr_init_destroy 3 ,
-.Xr pthread_attr_setdetachstate 3 ,
-.Xr pthread_attr_setinheritsched 3 ,
-.Xr pthread_attr_setschedparam 3 ,
-.Xr pthread_attr_setschedpolicy 3 ,
-.Xr pthread_attr_setscope 3 ,
-.Xr pthread_attr_setstackaddr 3 ,
-.Xr pthread_attr_setstacksize 3 ,
.Xr pthread_cancel 3 ,
.Xr pthread_cleanup_pop 3 ,
.Xr pthread_cleanup_push 3 ,
.Xr pthread_join 3 ,
.Xr pthread_key_create 3 ,
.Xr pthread_key_delete 3 ,
+.Xr pthread_kill 3 ,
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_lock 3 ,
.Xr pthread_rwlockattr_init 3 ,
.Xr pthread_rwlockattr_setpshared 3 ,
.Xr pthread_self 3 ,
-.Xr pthread_setcancelstate 3
-.Xr pthread_setcanceltype 3
-.Xr pthread_setschedparam 3
-.Xr pthread_setspecific 3
+.Xr pthread_setcancelstate 3 ,
+.Xr pthread_setcanceltype 3 ,
+.Xr pthread_setspecific 3 ,
.Xr pthread_testcancel 3
.Sh STANDARDS
The functions with the
-.Fa pthread_
+.Nm pthread_
prefix and not
-.Fa _np
+.Nm _np
suffix or
-.Fa pthread_rwlock
+.Nm pthread_rwlock
prefix conform to
.St -p1003.1-96 .
.Pp
The functions with the
-.Fa pthread_
+.Nm pthread_
prefix and
-.Fa _np
+.Nm _np
suffix are non-portable extensions to POSIX threads.
.Pp
The functions with the
-.Fa pthread_rwlock
+.Nm pthread_rwlock
prefix are extensions created by The Open Group as part of the
.St -susv2 .
.\" Copyright (c) 2004 Apple Computer, Inc.
.\"
-.Dd August 12, 2004
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" Portions of this text are reprinted and reproduced in electronic form
+.\" from IEEE Std 1003.1, 2004 Edition, Standard for Information Technology --
+.\" Portable Operating System Interface (POSIX), The Open Group Base
+.\" Specifications Issue 6, Copyright (C) 2001-2004 by the Institute of
+.\" Electrical and Electronics Engineers, Inc and The Open Group. In the
+.\" event of any discrepancy between this version and the original IEEE and
+.\" The Open Group Standard, the original IEEE and The Open Group Standard is
+.\" the referee document. The original Standard can be obtained online at
+.\" http://www.opengroup.org/unix/online.html.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd June 21, 2004
.Dt PTHREAD_ATFORK 3
.Os
.Sh NAME
.Nd register handlers to be called before and after
.Fn fork
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fn pthread_atfork "void (*prepare)(void)" "void (*parent)(void)" "void (*child)(void)"
+.Fo pthread_atfork
+.Fa "void \*[lp]*prepare\*[rp]\*[lp]void\*[rp]"
+.Fa "void \*[lp]*parent\*[rp]\*[lp]void\*[rp]"
+.Fa "void \*[lp]*child\*[rp]\*[lp]void\*[rp]"
+.Fc
.Sh DESCRIPTION
The
.Fn pthread_atfork
-function is used to register functions to be called before and after
-.Fn fork .
-The
-.Fa prepare
-handler is called before
-.Fn fork ,
-while the
-.Fa parent
-and
-.Fa child
-handlers are called after
-.Fn fork
-in the parent and child process respectively.
-The
+function declares fork handlers to be called before and after
+.Xr fork 2 ,
+in the context of the thread that called
+.Xr fork 2 .
+.Pp
+The handlers registered with
+.Fn pthread_atfork
+are called at the moments in time described below:
+.Bl -tag -width ".Fa prepare"
+.It Fa prepare
+Before
+.Xr fork 2
+processing commences in the parent process.
+If more than one
.Fa prepare
-handlers are called in reverse order of their registration, while
+handler is registered they will be called in the opposite order
+they were registered.
+.It Fa parent
+After
+.Xr fork 2
+completes in the parent process.
+If more than one
.Fa parent
-and
+handler is registered they will be called in the same order
+they were registered.
+.It Fa child
+After
+.Xr fork 2
+processing completes in the child process.
+If more than one
.Fa child
-handlers are called in the order in which they were registered.
-Any of the handlers may be NULL.
+handler is registered they will be called in the same order
+they were registered.
+.El
+.Pp
+If no handling is desired at one or more of these three points,
+a null pointer may be passed as the corresponding fork handler.
.Pp
.Em Important :
only async-signal-safe functions are allowed on the child side of
.Sh RETURN VALUES
If successful, the
.Fn pthread_atfork
-function will return zero; otherwise an error number will be returned to
-indicate the error.
+function will return zero.
+Otherwise an error number will be returned to indicate the error.
.Sh ERRORS
+The
.Fn pthread_atfork
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er ENOMEM
The system lacked the necessary resources to add another handler to the list.
.El
.Sh SEE ALSO
-.Xr fork 2
+.Xr fork 2 ,
+.Xr pthread 3
.Sh STANDARDS
+The
.Fn pthread_atfork
conforms to
.St -p1003.1-96 .
.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_attr.3,v 1.4.2.5 2001/08/17 15:42:51 ru Exp $
-.Dd April 28, 2000
+.\" $FreeBSD$
+.Dd January 8, 2010
.Dt PTHREAD_ATTR 3
.Os
.Sh NAME
-.Nm pthread_attr_destroy ,
-.Nm pthread_attr_getdetachstate ,
-.Nm pthread_attr_getinheritsched ,
-.Nm pthread_attr_getschedparam ,
-.Nm pthread_attr_getschedpolicy ,
-.Nm pthread_attr_getscope ,
-.Nm pthread_attr_getstackaddr ,
-.Nm pthread_attr_getstacksize ,
.Nm pthread_attr_init ,
-.Nm pthread_attr_setdetachstate ,
-.Nm pthread_attr_setinheritsched ,
-.Nm pthread_attr_setschedparam ,
-.Nm pthread_attr_setschedpolicy ,
-.Nm pthread_attr_setscope ,
-.Nm pthread_attr_setstackaddr ,
-.Nm pthread_attr_setstacksize
+.Nm pthread_attr_destroy ,
+.Nm pthread_attr_setstack ,
+.Nm pthread_attr_getstack ,
+.Nm pthread_attr_setguardsize ,
+.Nm pthread_attr_getguardsize
.Nd thread attribute operations
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
+.Ft int
+.Fn pthread_attr_init "pthread_attr_t *attr"
+.Ft int
+.Fn pthread_attr_destroy "pthread_attr_t *attr"
+.Ft int
+.Fn pthread_attr_setstack "pthread_attr_t *attr" " void *stackaddr" "size_t stacksize"
+.Ft int
+.Fn pthread_attr_getstack "const pthread_attr_t * restrict attr" "void ** restrict stackaddr" "size_t * restrict stacksize"
.Ft int
-.Fo pthread_attr_destroy
-.Fa "pthread_attr_t *attr"
-.Fc
+.Fn pthread_attr_setstacksize "pthread_attr_t *attr" "size_t stacksize"
.Ft int
-.Fo pthread_attr_getdetachstate
-.Fa "const pthread_attr_t *attr"
-.Fa "int *detachstate"
-.Fc
+.Fn pthread_attr_getstacksize "const pthread_attr_t *attr" "size_t *stacksize"
.Ft int
-.Fo pthread_attr_getinheritsched
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "int *restrict inheritsched"
-.Fc
+.Fn pthread_attr_setguardsize "pthread_attr_t *attr" "size_t guardsize"
.Ft int
-.Fo pthread_attr_getschedparam
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "struct sched_param *restrict param"
-.Fc
+.Fn pthread_attr_getguardsize "const pthread_attr_t *attr" "size_t *guardsize"
.Ft int
-.Fo pthread_attr_getschedpolicy
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "int *restrict policy"
-.Fc
+.Fn pthread_attr_setstackaddr "pthread_attr_t *attr" "void *stackaddr"
.Ft int
-.Fo pthread_attr_getscope
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "int *restrict contentionscope"
-.Fc
+.Fn pthread_attr_getstackaddr "const pthread_attr_t *attr" "void **stackaddr"
.Ft int
-.Fo pthread_attr_getstackaddr
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "void **restrict stackaddr"
-.Fc
+.Fn pthread_attr_setdetachstate "pthread_attr_t *attr" "int detachstate"
.Ft int
-.Fo pthread_attr_getstacksize
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "size_t *restrict stacksize"
-.Fc
+.Fn pthread_attr_getdetachstate "const pthread_attr_t *attr" "int *detachstate"
.Ft int
-.Fo pthread_attr_init
-.Fa "pthread_attr_t *attr"
-.Fc
+.Fn pthread_attr_setinheritsched "pthread_attr_t *attr" "int inheritsched"
.Ft int
-.Fo pthread_attr_setdetachstate
-.Fa "pthread_attr_t *attr"
-.Fa "int detachstate"
-.Fc
+.Fn pthread_attr_getinheritsched "const pthread_attr_t *attr" "int *inheritsched"
.Ft int
-.Fo pthread_attr_setinheritsched
-.Fa "pthread_attr_t *attr"
-.Fa "int inheritsched"
-.Fc
+.Fn pthread_attr_setschedparam "pthread_attr_t *attr" "const struct sched_param *param"
.Ft int
-.Fo pthread_attr_setschedparam
-.Fa "pthread_attr_t *restrict attr"
-.Fa "const struct sched_param *restrict param"
-.Fc
+.Fn pthread_attr_getschedparam "const pthread_attr_t *attr" "struct sched_param *param"
.Ft int
-.Fo pthread_attr_setschedpolicy
-.Fa "pthread_attr_t *attr"
-.Fa "int policy"
-.Fc
+.Fn pthread_attr_setschedpolicy "pthread_attr_t *attr" "int policy"
.Ft int
-.Fo pthread_attr_setscope
-.Fa "pthread_attr_t *attr"
-.Fa "int contentionscope"
-.Fc
+.Fn pthread_attr_getschedpolicy "const pthread_attr_t *attr" "int *policy"
.Ft int
-.Fo pthread_attr_setstackaddr
-.Fa "pthread_attr_t *attr"
-.Fa "void *stackaddr"
-.Fc
+.Fn pthread_attr_setscope "pthread_attr_t *attr" "int contentionscope"
.Ft int
-.Fo pthread_attr_setstacksize
-.Fa "pthread_attr_t *attr"
-.Fa "size_t stacksize"
-.Fc
+.Fn pthread_attr_getscope "const pthread_attr_t *attr" "int *contentionscope"
.Sh DESCRIPTION
Thread attributes are used to specify parameters to
.Fn pthread_create .
If successful, these functions return 0.
Otherwise, an error number is returned to indicate the error.
.Sh ERRORS
+The
.Fn pthread_attr_init
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.\" ========
.It Bq Er ENOMEM
Out of memory.
.El
.Pp
+The
.Fn pthread_attr_destroy
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr .
-.El
-.Pp
-.Fn pthread_attr_setstacksize
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr .
-.\" ========
-.It Bq Er EINVAL
-.Fa stacksize
-is less than
-.Dv PTHREAD_STACK_MIN .
-.\" ========
-.It Bq Er EINVAL
-.Fa stacksize
-is not a multiple of the system page size.
-.El
-.Pp
-.Fn pthread_attr_setdetachstate
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr
-or
-.Fa detachstate .
-.El
-.Pp
-.Fn pthread_attr_setinheritsched
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr .
-.El
-.Pp
-.Fn pthread_attr_setschedparam
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr .
-.\" ========
-.It Bq Er ENOTSUP
-Invalid value for
-.Fa param .
-.El
-.Pp
-.Fn pthread_attr_setschedpolicy
-will fail if:
-.Bl -tag -width Er
-.\" ========
-.It Bq Er EINVAL
-Invalid value for
-.Fa attr .
-.It Bq Er ENOTSUP
-Invalid or unsupported value for
-.Fa policy .
-.El
-.Pp
-.Fn pthread_attr_setscope
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.\" ========
.It Bq Er EINVAL
Invalid value for
.Fa attr .
-.\" ========
-.It Bq Er ENOTSUP
-Invalid or unsupported value for
-.Fa contentionscope .
.El
.Sh SEE ALSO
+.Xr pthread_attr_affinity_np 3 ,
+.Xr pthread_attr_get_np 3 ,
.Xr pthread_create 3
.Sh STANDARDS
.Fn pthread_attr_init ,
.Fn pthread_attr_setdetachstate ,
and
.Fn pthread_attr_getdetachstate
-conform to
+functions conform to
.St -p1003.1-96
.Pp
+The
.Fn pthread_attr_setinheritsched ,
.Fn pthread_attr_getinheritsched ,
.Fn pthread_attr_setschedparam ,
.Fn pthread_attr_setscope ,
and
.Fn pthread_attr_getscope
-conform to
+functions conform to
.St -susv2
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cancel.3,v 1.3.2.3 2001/03/06 16:46:08 ru Exp $
+.\" $FreeBSD$
.Dd January 17, 1999
.Dt PTHREAD_CANCEL 3
.Os
.Nm pthread_cancel
.Nd cancel execution of a thread
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
.Fn pthread_cancel "pthread_t thread"
.Sh DESCRIPTION
whose value matches no pointer to an object in memory nor the value
.Dv NULL .
.Sh RETURN VALUES
-If successful, the
+If successful, the
.Fn pthread_cancel
functions will return zero.
Otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_cancel
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er ESRCH
No thread could be found corresponding to that specified by the given
.Xr pthread_setcanceltype 3 ,
.Xr pthread_testcancel 3
.Sh STANDARDS
+The
.Fn pthread_cancel
-conforms to
+function conforms to
.St -p1003.1-96 .
.Sh AUTHORS
-This man page was written by
-.An David Leonard Aq d@openbsd.org
+This manual page was written by
+.An David Leonard Aq Mt d@openbsd.org
for the
.Ox
implementation of
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cleanup_pop.3,v 1.5.2.3 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 30, 1998
+.Dd October 25, 2014
.Dt PTHREAD_CLEANUP_POP 3
.Os
.Sh NAME
.Nm pthread_cleanup_pop
.Nd call the first cleanup routine
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft void
.Fn pthread_cleanup_pop "int execute"
.Sh DESCRIPTION
The
.Fn pthread_cleanup_pop
-function pops the top cleanup routine off
-of the current thread's cleanup routine stack and, if
+function pops the top cleanup routine off of the current threads cleanup
+routine stack, and, if
.Fa execute
is non-zero, it will execute the function.
-If there is no cleanup routine,
+If there is no cleanup routine
+then
.Fn pthread_cleanup_pop
does nothing.
.Pp
+The
.Fn pthread_cleanup_pop
-must be paired with a corresponding
+function is implemented as a macro that closes a block.
+Invocations of this function must appear as standalone statements that are
+paired with an earlier call of
.Xr pthread_cleanup_push 3
in the same lexical scope.
.Sh RETURN VALUES
+The
.Fn pthread_cleanup_pop
-does not return any value.
+function does not return any value.
.Sh ERRORS
None
.Sh SEE ALSO
.Xr pthread_cleanup_push 3 ,
.Xr pthread_exit 3
.Sh STANDARDS
+The
.Fn pthread_cleanup_pop
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cleanup_push.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 30, 1998
+.Dd October 25, 2014
.Dt PTHREAD_CLEANUP_PUSH 3
.Os
.Sh NAME
.Nm pthread_cleanup_push
.Nd add a cleanup function for thread exit
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft void
-.Fo pthread_cleanup_push
-.Fa "void \*[lp]*routine\*[rp]\*[lp]void *\*[rp]"
-.Fa "void *arg"
-.Fc
+.Fn pthread_cleanup_push "void \*[lp]*cleanup_routine\*[rp]\*[lp]void *\*[rp]" "void *arg"
.Sh DESCRIPTION
The
.Fn pthread_cleanup_push
function adds
-.Fa routine
+.Fa cleanup_routine
to the top of the stack of cleanup handlers that
get called when the current thread exits.
.Pp
When
-.Fa routine
+.Fa cleanup_routine
is called, it is passed
.Fa arg
as its only argument.
+.Pp
+The
.Fn pthread_cleanup_push
-must be paired with a corresponding
-.Xr pthread_cleanup_pop 3
+function is implemented as a macro that opens a new block.
+Invocations of this function must appear as standalone statements that are
+paired with a later call of
+.Xr pthread_cleanup_pop 3
in the same lexical scope.
.Sh RETURN VALUES
+The
.Fn pthread_cleanup_push
-does not return any value.
+function does not return any value.
.Sh ERRORS
None
.Sh SEE ALSO
.Xr pthread_cleanup_pop 3 ,
.Xr pthread_exit 3
.Sh STANDARDS
+The
.Fn pthread_cleanup_push
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_broadcast.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 28, 1998
.Dt PTHREAD_COND_BROADCAST 3
.Nm pthread_cond_broadcast
.Nd unblock all threads waiting for a condition variable
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
.Fn pthread_cond_broadcast "pthread_cond_t *cond"
.Sh DESCRIPTION
The
.Fn pthread_cond_broadcast
-function unblocks all threads that are waiting for the condition variable
+function unblocks all threads waiting for the condition variable
.Fa cond .
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_broadcast
-function will return zero.
-Otherwise, an error number will be returned
+function will return zero, otherwise an error number will be returned
to indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_broadcast
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Xr pthread_cond_timedwait 3 ,
.Xr pthread_cond_wait 3
.Sh STANDARDS
+The
.Fn pthread_cond_broadcast
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_destroy.3,v 1.6.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 28, 1998
.Dt PTHREAD_COND_DESTROY 3
.Nm pthread_cond_destroy
.Nd destroy a condition variable
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
.Fn pthread_cond_destroy "pthread_cond_t *cond"
.Sh DESCRIPTION
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_destroy
-function will return zero.
-Otherwise, an error number will be returned
+function will return zero, otherwise an error number will be returned
to indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_destroy
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EBUSY
-The variable
-.Fa cond
-is locked by another thread.
.It Bq Er EINVAL
The value specified by
.Fa cond
is invalid.
+.It Bq Er EBUSY
+The variable
+.Fa cond
+is locked by another thread.
.El
.Sh SEE ALSO
.Xr pthread_cond_broadcast 3 ,
.Xr pthread_cond_timedwait 3 ,
.Xr pthread_cond_wait 3
.Sh STANDARDS
+The
.Fn pthread_cond_destroy
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_init.3,v 1.6.2.5 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 28, 1998
+.Dd November 4, 2006
.Dt PTHREAD_COND_INIT 3
.Os
.Sh NAME
.Nm pthread_cond_init
.Nd create a condition variable
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_cond_init
-.Fa "pthread_cond_t *restrict cond"
-.Fa "const pthread_condattr_t *restrict attr"
-.Fc
+.Fn pthread_cond_init "pthread_cond_t *cond" "const pthread_condattr_t *attr"
.Sh DESCRIPTION
The
.Fn pthread_cond_init
.Fa attr .
If
.Fa attr
-is NULL, the default attributes are used.
+is NULL the default attributes are used.
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_init
function will return zero and put the new condition variable id into
-.Fa cond .
-Otherwise, an error number will be returned to indicate the error.
+.Fa cond ,
+otherwise an error number will be returned to indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_init
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EAGAIN
-The system temporarily lacks the resources to create another condition
-variable.
.It Bq Er EINVAL
The value specified by
.Fa attr
.It Bq Er ENOMEM
The process cannot allocate enough memory to create another condition
variable.
+.It Bq Er EAGAIN
+The system temporarily lacks the resources to create another condition
+variable.
.El
.Sh SEE ALSO
.Xr pthread_cond_broadcast 3 ,
.Xr pthread_cond_destroy 3 ,
.Xr pthread_cond_signal 3 ,
.Xr pthread_cond_timedwait 3 ,
-.Xr pthread_cond_wait 3
+.Xr pthread_cond_wait 3 ,
+.Xr pthread_condattr 3
.Sh STANDARDS
+The
.Fn pthread_cond_init
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_signal.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 28, 1998
.Dt PTHREAD_COND_SIGNAL 3
.Nm pthread_cond_signal
.Nd unblock a thread waiting for a condition variable
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
.Fn pthread_cond_signal "pthread_cond_t *cond"
.Sh DESCRIPTION
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_signal
-function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+function will return zero, otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_signal
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Xr pthread_cond_timedwait 3 ,
.Xr pthread_cond_wait 3
.Sh STANDARDS
+The
.Fn pthread_cond_signal
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_timedwait.3,v 1.8.2.6 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 28, 1998
+.Dd May 9, 2010
.Dt PTHREAD_COND_TIMEDWAIT 3
.Os
.Sh NAME
.Nm pthread_cond_timedwait
.Nd "wait on a condition variable for a specific amount of time"
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_cond_timedwait
-.Fa "pthread_cond_t *restrict cond"
-.Fa "pthread_mutex_t *restrict mutex"
-.Fa "const struct timespec *restrict abstime"
-.Fc
+.Fn pthread_cond_timedwait "pthread_cond_t *cond" "pthread_mutex_t *mutex" "const struct timespec *abstime"
.Sh DESCRIPTION
The
.Fn pthread_cond_timedwait
function atomically blocks the current thread waiting on the condition
variable specified by
-.Fa cond
-and unblocks the mutex specified by
+.Fa cond ,
+and releases the mutex specified by
.Fa mutex .
The waiting thread unblocks only after another thread calls
.Xr pthread_cond_signal 3 ,
If successful, the
.Fn pthread_cond_timedwait
function will return zero.
-Otherwise, an error number will be returned to
+Otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_timedwait
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Xr pthread_cond_wait 3 ,
.Xr gettimeofday 2
.Sh STANDARDS
+The
.Fn pthread_cond_timedwait
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_cond_wait.3,v 1.8.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd November 5, 2001
+.Dd February 16, 2006
.Dt PTHREAD_COND_WAIT 3
.Os Darwin
.Sh NAME
.Nm pthread_cond_wait
.Nd wait on a condition variable
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_cond_wait
-.Fa "pthread_cond_t *restrict cond"
-.Fa "pthread_mutex_t *restrict mutex"
-.Fc
+.Fn pthread_cond_wait "pthread_cond_t *cond" "pthread_mutex_t *mutex"
.Sh DESCRIPTION
The
.Fn pthread_cond_wait
-function atomically unlocks the
-.Fa mutex
-and blocks the current thread on the condition specified by the
-.Fa cond
-argument.
-The current thread unblocks only after another thread calls
-.Xr pthread_cond_signal 3
+function atomically blocks the current thread waiting on the condition
+variable specified by
+.Fa cond ,
+and releases the mutex specified by
+.Fa mutex .
+The waiting thread unblocks only after another thread calls
+.Xr pthread_cond_signal 3 ,
or
.Xr pthread_cond_broadcast 3
-with the same condition variable.
-The
-.Fa mutex
-must be locked before calling this function, otherwise the behavior is
-undefined. Before
-.Fn pthread_cond_wait
-returns to the calling function, it re-acquires the
+with the same condition variable, and the current thread reacquires the lock
+on
.Fa mutex .
.Sh RETURN VALUES
If successful, the
.Fn pthread_cond_wait
-function will return zero; otherwise, an error number will be returned to
+function will return zero.
+Otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_cond_wait
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Xr pthread_cond_signal 3 ,
.Xr pthread_cond_timedwait 3
.Sh STANDARDS
+The
.Fn pthread_cond_wait
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_condattr.3,v 1.9 2001/10/01 16:09:09 ru Exp $
-.Dd April 28, 2000
+.\" $FreeBSD$
+.Dd May 9, 2010
.Dt PTHREAD_CONDATTR 3
.Os
.Sh NAME
+.Nm pthread_condattr_init ,
.Nm pthread_condattr_destroy ,
-.Nm pthread_condattr_init
.Nd condition attribute operations
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_condattr_destroy
-.Fa "pthread_condattr_t *attr"
-.Fc
+.Fn pthread_condattr_init "pthread_condattr_t *attr"
.Ft int
-.Fo pthread_condattr_init
-.Fa "pthread_condattr_t *attr"
-.Fc
+.Fn pthread_condattr_destroy "pthread_condattr_t *attr"
.Sh DESCRIPTION
Condition attribute objects are used to specify parameters to
.Fn pthread_cond_init .
.Sh SEE ALSO
.Xr pthread_cond_init 3
.Sh STANDARDS
+The
.Fn pthread_condattr_init
and
.Fn pthread_condattr_destroy
-conform to
+functions conform to
.St -p1003.1-96
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_create.3,v 1.9.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd April 4, 1996
+.Dd March 15, 2014
.Dt PTHREAD_CREATE 3
.Os
.Sh NAME
.Nm pthread_create
.Nd create a new thread
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_create
-.Fa "pthread_t *restrict thread"
-.Fa "const pthread_attr_t *restrict attr"
-.Fa "void *(*start_routine)(void *)"
-.Fa "void *restrict arg"
-.Fc
+.Fn pthread_create "pthread_t *thread" "const pthread_attr_t *attr" "void *(*start_routine)(void *)" "void *arg"
.Sh DESCRIPTION
The
.Fn pthread_create
within a process.
If
.Fa attr
-is NULL, the default attributes are used.
+is
+.Dv NULL ,
+the default attributes are used.
If the attributes specified by
.Fa attr
are modified later, the thread's attributes are not affected.
-Upon successful completion,
+Upon
+successful completion
.Fn pthread_create
will store the ID of the created thread in the location specified by
.Fa thread .
.Pp
-Upon its creation, the thread executes
-.Fa start_routine ,
+The thread is created executing
+.Fa start_routine
with
.Fa arg
as its sole argument.
-If
+If the
.Fa start_routine
returns, the effect is as if there was an implicit call to
-.Fn pthread_exit ,
+.Fn pthread_exit
using the return value of
.Fa start_routine
as the exit status.
When it returns from
.Fn main ,
the effect is as if there was an implicit call to
-.Fn exit ,
+.Fn exit
using the return value of
.Fn main
as the exit status.
The set of signals pending for the new thread is empty.
.El
.Sh RETURN VALUES
-If successful, the
+If successful, the
.Fn pthread_create
function will return zero.
-Otherwise, an error number will be returned to
+Otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_create
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EAGAIN
The system lacked the necessary resources to create another thread, or
the system-imposed limit on the total number of threads in a process
[PTHREAD_THREADS_MAX] would be exceeded.
+.It Bq Er EPERM
+The caller does not have appropriate permission to set the required scheduling
+parameters or scheduling policy.
.It Bq Er EINVAL
The value specified by
.Fa attr
.El
.Sh SEE ALSO
.Xr fork 2 ,
+.Xr pthread_attr 3 ,
+.Xr pthread_cancel 3 ,
.Xr pthread_cleanup_pop 3 ,
.Xr pthread_cleanup_push 3 ,
-.Xr pthread_detach 3 ,
.Xr pthread_exit 3 ,
.Xr pthread_join 3
.Sh STANDARDS
+The
.Fn pthread_create
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_detach.3,v 1.6.2.5 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_DETACH 3
.Nm pthread_detach
.Nd detach a thread
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_detach
-.Fa "pthread_t thread"
-.Fc
+.Fn pthread_detach "pthread_t thread"
.Sh DESCRIPTION
The
.Fn pthread_detach
.Fn pthread_detach
calls on the same target thread is unspecified.
.Sh RETURN VALUES
-If successful, the
+If successful, the
.Fn pthread_detach
function will return zero.
-Otherwise, an error number will be returned to
+Otherwise an error number will be returned to
indicate the error.
Note that the function does not change the value
-of errno, as it did for some drafts of the standard.
+of errno as it did for some drafts of the standard.
These early drafts
also passed a pointer to pthread_t as the argument.
Beware!
.Sh ERRORS
+The
.Fn pthread_detach
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The implementation has detected that the value specified by
.Sh SEE ALSO
.Xr pthread_join 3
.Sh STANDARDS
+The
.Fn pthread_detach
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_equal.3,v 1.4.2.5 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_EQUAL 3
.Nm pthread_equal
.Nd compare thread IDs
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_equal
-.Fa "pthread_t t1"
-.Fa "pthread_t t2"
-.Fc
+.Fn pthread_equal "pthread_t t1" "pthread_t t2"
.Sh DESCRIPTION
The
.Fn pthread_equal
.Fa t1
and
.Fa t2
-correspond to the same thread. Otherwise, it will return zero.
+correspond to the same thread, otherwise it will return zero.
.Sh ERRORS
None.
.Sh SEE ALSO
.Xr pthread_create 3 ,
.Xr pthread_exit 3
.Sh STANDARDS
+The
.Fn pthread_equal
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_exit.3,v 1.8.2.6 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd April 4, 1996
+.Dd March 15, 2014
.Dt PTHREAD_EXIT 3
.Os
.Sh NAME
.Nm pthread_exit
.Nd terminate the calling thread
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft void
-.Fo pthread_exit
-.Fa "void *value_ptr"
-.Fc
+.Fn pthread_exit "void *value_ptr"
.Sh DESCRIPTION
The
.Fn pthread_exit
is made when a thread other than the thread in which
.Fn main
was first invoked returns from the start routine that was used to create
-it. The function's return value serves as the thread's exit status.
+it.
+The function's return value serves as the thread's exit status.
.Pp
The behavior of
.Fn pthread_exit
.Sh SEE ALSO
.Xr _exit 2 ,
.Xr exit 3 ,
+.Xr pthread_cancel 3 ,
.Xr pthread_create 3 ,
.Xr pthread_join 3
.Sh STANDARDS
+The
.Fn pthread_exit
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_schedparam.3,v 1.2.2.4 2001/08/17 15:42:52 ru Exp $
-.Dd May 1, 2000
+.\" $FreeBSD$
+.\"
+.Dd October 16, 2006
.Dt PTHREAD_SCHEDPARAM 3
.Os
.Sh NAME
-.Nm pthread_getschedparam ,
-.Nm pthread_setschedparam
+.Nm pthread_setschedparam ,
+.Nm pthread_getschedparam
.Nd thread scheduling parameter manipulation
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_getschedparam
-.Fa "pthread_t thread"
-.Fa "int *restrict policy"
-.Fa "struct sched_param *restrict param"
-.Fc
+.Fn pthread_setschedparam "pthread_t thread" "int policy" "const struct sched_param *param"
.Ft int
-.Fo pthread_setschedparam
-.Fa "pthread_t thread"
-.Fa "int policy"
-.Fa "const struct sched_param *param"
-.Fc
+.Fn pthread_getschedparam "pthread_t thread" "int *policy" "struct sched_param *param"
.Sh DESCRIPTION
The
-.Fn pthread_getschedparam
-and
.Fn pthread_setschedparam
-functions get and set the scheduling parameters of individual threads.
+and
+.Fn pthread_getschedparam
+functions set and get the scheduling parameters of individual threads.
The scheduling policy for a thread can either be
.Dv SCHED_FIFO
-(first in, first out) or
+(first in, first out),
.Dv SCHED_RR
-(round-robin).
-The thread priority (accessed via
+(round-robin), or
+.Dv SCHED_OTHER
+(timesharing).
+Valid thread priorities (accessed via
.Va param->sched_priority )
-must be at least
-.Dv PTHREAD_MIN_PRIORITY
-and no more than
-.Dv PTHREAD_MAX_PRIORITY .
+must be within the range returned by the
+.Xr sched_get_priority_min 2
+and
+.Xr sched_get_priority_max 2
+system calls.
.Sh RETURN VALUES
If successful, these functions return 0.
Otherwise, an error number is returned to indicate the error.
.Sh ERRORS
-.Fn pthread_getschedparam
-will fail if:
-.Bl -tag -width Er
-.It Bq Er ESRCH
-Non-existent thread
-.Va thread .
-.El
-.Pp
+The
.Fn pthread_setschedparam
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
-.Va policy .
+.Fa policy .
.It Bq Er ENOTSUP
Invalid value for scheduling parameters.
.It Bq Er ESRCH
Non-existent thread
-.Va thread .
+.Fa thread .
+.El
+.Pp
+The
+.Fn pthread_getschedparam
+function will fail if:
+.Bl -tag -width Er
+.It Bq Er ESRCH
+Non-existent thread
+.Fa thread .
.El
+.Sh SEE ALSO
+.Xr sched_get_priority_max 2 ,
+.Xr sched_get_priority_min 2
.Sh STANDARDS
+The
.Fn pthread_setschedparam
and
.Fn pthread_getschedparam
-conform to
-.St -susv2
+functions conform to
+.St -susv2 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_getspecific.3,v 1.6.2.3 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_GETSPECIFIC 3
.Nm pthread_getspecific
.Nd get a thread-specific data value
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft void *
-.Fo pthread_getspecific
-.Fa "pthread_key_t key"
-.Fc
+.Fn pthread_getspecific "pthread_key_t key"
.Sh DESCRIPTION
The
.Fn pthread_getspecific
-function returns the value that is currently bound to the specified
-.Fa key ,
+function returns the value currently bound to the specified
+.Fa key
on behalf of the calling thread.
.Pp
The effect of calling
.Fn pthread_getspecific
with a
.Fa key
-value that was not obtained from
-.Fn pthread_key_create ,
-or after a
+value not obtained from
+.Fn pthread_key_create
+or after
.Fa key
has been deleted with
-.Fn pthread_key_delete ,
+.Fn pthread_key_delete
is undefined.
.Pp
+The
.Fn pthread_getspecific
-may be called from a thread-specific data destructor function.
+function may be called from a thread-specific data destructor function.
.Sh RETURN VALUES
The
.Fn pthread_getspecific
.Fa key .
If no thread-specific data value is associated with
.Fa key ,
-the value NULL is returned.
+then the value NULL is returned.
.Sh ERRORS
None.
.Sh SEE ALSO
.Xr pthread_key_delete 3 ,
.Xr pthread_setspecific 3
.Sh STANDARDS
+The
.Fn pthread_getspecific
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_join.3,v 1.7.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd April 4, 1996
+.Dd January 23, 2010
.Dt PTHREAD_JOIN 3
.Os
.Sh NAME
.Nm pthread_join
.Nd wait for thread termination
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_join
-.Fa "pthread_t thread"
-.Fa "void **value_ptr"
-.Fc
+.Fn pthread_join "pthread_t thread" "void **value_ptr"
.Sh DESCRIPTION
The
.Fn pthread_join
function suspends execution of the calling thread until the target
.Fa thread
-terminates, unless the target
+terminates unless the target
.Fa thread
has already terminated.
.Pp
returns successfully, the target thread has been terminated.
The results
of multiple simultaneous calls to
-.Fn pthread_join ,
-specifying the same target thread, are undefined.
+.Fn pthread_join
+specifying the same target thread are undefined.
If the thread calling
.Fn pthread_join
-is cancelled, the target thread is not detached.
+is cancelled, then the target thread is not detached.
.Pp
.Sh RETURN VALUES
-If successful, the
+If successful, the
.Fn pthread_join
function will return zero.
Otherwise, an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_join
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EDEADLK
-A deadlock was detected or the value of
-.Fa thread
-specifies the calling thread.
.It Bq Er EINVAL
The implementation has detected that the value specified by
.Fa thread
No thread could be found corresponding to that specified by the given
thread ID,
.Fa thread .
+.It Bq Er EDEADLK
+A deadlock was detected or the value of
+.Fa thread
+specifies the calling thread.
.El
.Sh SEE ALSO
.Xr wait 2 ,
.Xr pthread_create 3
.Sh STANDARDS
+The
.Fn pthread_join
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_key_create.3,v 1.6.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_KEY_CREATE 3
.Nm pthread_key_create
.Nd thread-specific data key creation
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_key_create
-.Fa "pthread_key_t *key"
-.Fa "void (*destructor)(void *)"
-.Fc
+.Fn pthread_key_create "pthread_key_t *key" "void (*destructor)(void *)"
.Sh DESCRIPTION
The
.Fn pthread_key_create
-function creates a thread-specific data key
-that is visible to all threads in the process.
+function creates a thread-specific data key visible to all threads in the
+process.
Key values provided by
.Fn pthread_key_create
-are opaque objects, used to locate thread-specific data.
+are opaque objects used to locate thread-specific data.
Although the same
key value may be used by different threads, the values bound to the key
by
defined keys in the new thread.
.Pp
An optional destructor function may be associated with each key value.
-If a key value has a non-NULL destructor function pointer, and the thread has
-a non-NULL value associated with the key at the time of thread exit, then the
-key value is set to NULL and the destructor function is called with the
-previous key value as its argument.
-The order of destructor calls at thread exit is unspecified.
+At
+thread exit, if a key value has a non-NULL destructor pointer, and the
+thread has a non-NULL value associated with the key, the function pointed
+to is called with the current associated value as its sole argument.
+The
+order of destructor calls is unspecified if more than one destructor exists
+for a thread when it exits.
.Pp
If, after all the destructors have been called for all non-NULL values
with associated destructors, there are still some non-NULL values with
function will store the newly created key value at the location specified by
.Fa key
and returns zero.
-Otherwise, an error number will be returned to indicate
+Otherwise an error number will be returned to indicate
the error.
.Sh ERRORS
+The
.Fn pthread_key_create
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EAGAIN
The system lacked the necessary resources to create another thread-specific
.Xr pthread_key_delete 3 ,
.Xr pthread_setspecific 3
.Sh STANDARDS
+The
.Fn pthread_key_create
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_key_delete.3,v 1.6.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_KEY_DELETE 3
.Nm pthread_key_delete
.Nd delete a thread-specific data key
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_key_delete
-.Fa "pthread_key_t key"
-.Fc
+.Fn pthread_key_delete "pthread_key_t key"
.Sh DESCRIPTION
The
.Fn pthread_key_delete
-function deletes a thread-specific data key, previously returned by
+function deletes a thread-specific data key previously returned by
.Fn pthread_key_create .
The thread-specific data values associated with
.Fa key
If successful, the
.Fn pthread_key_delete
function will return zero.
-Otherwise, an error number will be returned to
+Otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
+The
.Fn pthread_key_delete
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The
.Xr pthread_key_create 3 ,
.Xr pthread_setspecific 3
.Sh STANDARDS
+The
.Fn pthread_key_delete
-conforms to
+function conforms to
.St -p1003.1-96 .
returns 0.
Otherwise, an error number is returned.
.Sh ERRORS
+The
.Fn pthread_kill
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EINVAL
-.Fa sig
-is an invalid or unsupported signal number.
.It Bq Er ESRCH
.Fa thread
is an invalid thread ID.
+.It Bq Er EINVAL
+.Fa sig
+is an invalid or unsupported signal number.
.El
.Sh LEGACY SYNOPSIS
.Fd #include <pthread.h>
.Sh NAME
.Nm pthread_main_np
.Nd identify the initial thread
-.Sh LIBRARY
-.Lb libpthread
.Sh SYNOPSIS
.In pthread.h
.Ft int
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_mutex_destroy.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 29, 1998
.Dt PTHREAD_MUTEX_DESTROY 3
.Nm pthread_mutex_destroy
.Nd free resources allocated for a mutex
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutex_destroy
-.Fa "pthread_mutex_t *mutex"
-.Fc
+.Fn pthread_mutex_destroy "pthread_mutex_t *mutex"
.Sh DESCRIPTION
The
.Fn pthread_mutex_destroy
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_destroy
-will return zero.
-Otherwise, an error number will be returned to indicate the error.
+will return zero, otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutex_destroy
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EBUSY
-.Fa Mutex
-is locked by a thread.
.It Bq Er EINVAL
The value specified by
.Fa mutex
is invalid.
+.It Bq Er EBUSY
+.Fa Mutex
+is locked by another thread.
.El
.Sh SEE ALSO
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_trylock 3 ,
.Xr pthread_mutex_unlock 3
.Sh STANDARDS
+The
.Fn pthread_mutex_destroy
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_mutex_init.3,v 1.6.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 29, 1998
+.Dd November 4, 2006
.Dt PTHREAD_MUTEX_INIT 3
.Os
.Sh NAME
.Nm pthread_mutex_init
.Nd create a mutex
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutex_init
-.Fa "pthread_mutex_t *restrict mutex"
-.Fa "const pthread_mutexattr_t *restrict attr"
-.Fc
+.Fn pthread_mutex_init "pthread_mutex_t *mutex" "const pthread_mutexattr_t *attr"
.Sh DESCRIPTION
The
.Fn pthread_mutex_init
.Fa attr .
If
.Fa attr
-is NULL, the default attributes are used.
+is NULL the default attributes are used.
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_init
will return zero and put the new mutex id into
-.Fa mutex .
-Otherwise, an error number will be returned to indicate the error.
+.Fa mutex ,
+otherwise an error number will be returned to indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutex_init
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EAGAIN
-The system temporarily lacks the resources to create another mutex.
.It Bq Er EINVAL
The value specified by
.Fa attr
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_lock 3 ,
.Xr pthread_mutex_trylock 3 ,
-.Xr pthread_mutex_unlock 3
+.Xr pthread_mutex_unlock 3 ,
+.Xr pthread_mutexattr 3
.Sh STANDARDS
+The
.Fn pthread_mutex_init
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_mutex_lock.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
-.Dd July 30, 1998
+.Dd January 31, 2006
.Dt PTHREAD_MUTEX_LOCK 3
.Os
.Sh NAME
.Nm pthread_mutex_lock
.Nd lock a mutex
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutex_lock
-.Fa "pthread_mutex_t *mutex"
-.Fc
+.Fn pthread_mutex_lock "pthread_mutex_t *mutex"
.Sh DESCRIPTION
The
.Fn pthread_mutex_lock
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_lock
-will return zero.
-Otherwise, an error number will be returned to indicate the error.
+will return zero, otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutex_lock
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EDEADLK
-A deadlock would occur if the thread blocked waiting for
-.Fa mutex .
.It Bq Er EINVAL
The value specified by
.Fa mutex
is invalid.
+.It Bq Er EDEADLK
+A deadlock would occur if the thread blocked waiting for
+.Fa mutex .
.El
.Sh SEE ALSO
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_trylock 3 ,
.Xr pthread_mutex_unlock 3
.Sh STANDARDS
+The
.Fn pthread_mutex_lock
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_mutex_trylock.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 30, 1998
.Dt PTHREAD_MUTEX_TRYLOCK 3
.Nm pthread_mutex_trylock
.Nd attempt to lock a mutex without blocking
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutex_trylock
-.Fa "pthread_mutex_t *mutex"
-.Fc
+.Fn pthread_mutex_trylock "pthread_mutex_t *mutex"
.Sh DESCRIPTION
The
.Fn pthread_mutex_trylock
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_trylock
-will return zero.
-Otherwise, an error number will be returned to indicate the error.
+will return zero, otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutex_trylock
-will fail if:
+function will fail if:
.Bl -tag -width Er
-.It Bq Er EBUSY
-.Fa Mutex
-is already locked.
.It Bq Er EINVAL
The value specified by
.Fa mutex
is invalid.
+.It Bq Er EBUSY
+.Fa Mutex
+is already locked.
.El
.Sh SEE ALSO
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_lock 3 ,
.Xr pthread_mutex_unlock 3
.Sh STANDARDS
+The
.Fn pthread_mutex_trylock
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_mutex_unlock.3,v 1.5.2.4 2001/08/17 15:42:51 ru Exp $
+.\" $FreeBSD$
.\"
.Dd July 30, 1998
.Dt PTHREAD_MUTEX_UNLOCK 3
.Nm pthread_mutex_unlock
.Nd unlock a mutex
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutex_unlock
-.Fa "pthread_mutex_t *mutex"
-.Fc
+.Fn pthread_mutex_unlock "pthread_mutex_t *mutex"
.Sh DESCRIPTION
If the current thread holds the lock on
.Fa mutex ,
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_unlock
-will return zero.
-Otherwise, an error number will be returned to indicate the error.
+will return zero, otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutex_unlock
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Xr pthread_mutex_lock 3 ,
.Xr pthread_mutex_trylock 3
.Sh STANDARDS
+The
.Fn pthread_mutex_unlock
-conforms to
+function conforms to
.St -p1003.1-96 .
-.\" $NetBSD: pthread_mutexattr.3,v 1.3 2003/07/04 08:36:06 wiz Exp $
+.\" $NetBSD: pthread_mutexattr.3,v 1.11 2010/07/08 22:46:34 jruoho Exp $
.\"
-.\" Copyright (c) 2002 The NetBSD Foundation, Inc.
+.\" Copyright (c) 2002, 2010 The NetBSD Foundation, Inc.
.\" All rights reserved.
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
-.\" 3. Neither the name of The NetBSD Foundation nor the names of its
-.\" contributors may be used to endorse or promote products derived
-.\" from this software without specific prior written permission.
.\" THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
.\" ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
.\" TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libpthread/man/pthread_mutexattr.3,v 1.8 2002/09/16 19:29:29 mini Exp $
-.Dd January 30, 2003
+.\" $FreeBSD$
+.Dd July 9, 2010
.Dt PTHREAD_MUTEXATTR 3
.Os
.Sh NAME
-.Nm pthread_mutexattr_destroy ,
-.Nm pthread_mutexattr_getprioceiling ,
-.Nm pthread_mutexattr_getprotocol ,
-.Nm pthread_mutexattr_gettype ,
.Nm pthread_mutexattr_init ,
+.Nm pthread_mutexattr_destroy ,
.Nm pthread_mutexattr_setprioceiling ,
+.Nm pthread_mutexattr_getprioceiling ,
.Nm pthread_mutexattr_setprotocol ,
-.Nm pthread_mutexattr_settype
+.Nm pthread_mutexattr_getprotocol ,
+.Nm pthread_mutexattr_settype ,
+.Nm pthread_mutexattr_gettype
.Nd mutex attribute operations
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_mutexattr_destroy
-.Fa "pthread_mutexattr_t *attr"
-.Fc
+.Fn pthread_mutexattr_init "pthread_mutexattr_t *attr"
.Ft int
-.Fo pthread_mutexattr_getprioceiling
-.Fa "const pthread_mutexattr_t *restrict attr"
-.Fa "int *restrict prioceiling"
-.Fc
-.\" To match the SUS, this should be:
-.\" .Ft int
-.\" .Fo pthread_mutexattr_getprioceiling
-.\" .Fa "pthread_mutexattr_t *restrict attr"
-.\" .Fa "int *restrict prioceiling"
-.\" .Fc
+.Fn pthread_mutexattr_destroy "pthread_mutexattr_t *attr"
.Ft int
-.Fo pthread_mutexattr_getprotocol
-.Fa "const pthread_mutexattr_t *restrict attr"
-.Fa "int *restrict protocol"
-.Fc
-.\" To match the SUS, this should be:
-.\" .Ft int
-.\" .Fo pthread_mutexattr_getprotocol
-.\" .Fa "pthread_mutexattr_t *restrict attr"
-.\" .Fa "int *restrict protocol"
-.\" .Fc
+.Fn pthread_mutexattr_setprioceiling "pthread_mutexattr_t *attr" "int prioceiling"
.Ft int
-.Fo pthread_mutexattr_gettype
-.Fa "const pthread_mutexattr_t *restrict attr"
-.Fa "int *restrict type"
-.Fc
-.\" To match the SUS, this should be:
-.\" .Ft int
-.\" .Fo pthread_mutexattr_gettype
-.\" .Fa "pthread_mutexattr_t *restrict attr"
-.\" .Fa "int *restrict type"
-.\" .Fc
+.Fn pthread_mutexattr_getprioceiling "pthread_mutexattr_t *attr" "int *prioceiling"
.Ft int
-.Fo pthread_mutexattr_init
-.Fa "pthread_mutexattr_t *attr"
-.Fc
+.Fn pthread_mutexattr_setprotocol "pthread_mutexattr_t *attr" "int protocol"
.Ft int
-.Fo pthread_mutexattr_setprioceiling
-.Fa "pthread_mutexattr_t *attr"
-.Fa "int prioceiling"
-.Fc
+.Fn pthread_mutexattr_getprotocol "pthread_mutexattr_t *attr" "int *protocol"
.Ft int
-.Fo pthread_mutexattr_setprotocol
-.Fa "pthread_mutexattr_t *attr"
-.Fa "int protocol"
-.Fc
+.Fn pthread_mutexattr_settype "pthread_mutexattr_t *attr" "int type"
.Ft int
-.Fo pthread_mutexattr_settype
-.Fa "pthread_mutexattr_t *attr"
-.Fa "int type"
-.Fc
+.Fn pthread_mutexattr_gettype "pthread_mutexattr_t *attr" "int *type"
.Sh DESCRIPTION
Mutex attributes are used to specify parameters to
.Fn pthread_mutex_init .
-One attribute object can be used in multiple calls to
-.Fn pthread_mutex_init ,
+Like with thread attributes,
+one attribute object can be used in multiple calls to
+.Xr pthread_mutex_init 3 ,
with or without modifications between calls.
.Pp
The
.Fn pthread_mutexattr_init
function initializes
.Fa attr
-with all of the default mutex attributes.
+with all the default mutex attributes.
.Pp
The
.Fn pthread_mutexattr_destroy
.Pp
The
.Fn pthread_mutexattr_settype
-function sets the mutex type value of the attribute. Valid mutex types are:
-.Dv PTHREAD_MUTEX_NORMAL ,
-.Dv PTHREAD_MUTEX_ERRORCHECK ,
-.Dv PTHREAD_MUTEX_RECURSIVE ,
-and
-.Dv PTHREAD_MUTEX_DEFAULT .
-The default mutex type for
-.Fn pthread_mutexattr_init
-is
-.Dv PTHREAD_MUTEX_DEFAULT .
-.Pp
-.Dv PTHREAD_MUTEX_NORMAL
-mutexes do not check for usage errors.
-.Dv PTHREAD_MUTEX_NORMAL
-mutexes will deadlock if reentered, and result in undefined behavior if a
-locked mutex is unlocked by another thread. Attempts to unlock an already
-unlocked
+functions set the mutex
+.Fa type
+value of the attribute.
+Valid mutex types are:
+.Bl -tag -width "XXX" -offset 2n
+.It Dv PTHREAD_MUTEX_NORMAL
+This type of mutex does not check for usage errors.
+It will deadlock if reentered, and result in undefined behavior if a
+locked mutex is unlocked by another thread.
+Attempts to unlock an already unlocked
.Dv PTHREAD_MUTEX_NORMAL
mutex will result in undefined behavior.
-.Pp
-.Dv PTHREAD_MUTEX_ERRORCHECK
-mutexes do check for usage errors.
+.It Dv PTHREAD_MUTEX_ERRORCHECK
+These mutexes do check for usage errors.
If an attempt is made to relock a
.Dv PTHREAD_MUTEX_ERRORCHECK
mutex without first dropping the lock, an error will be returned.
If a thread attempts to unlock a
.Dv PTHREAD_MUTEX_ERRORCHECK
-mutex that is locked by another thread, an error will be returned. If a
-thread attempts to unlock a
+mutex that is locked by another thread, an error will be returned.
+If a thread attempts to unlock a
.Dv PTHREAD_MUTEX_ERRORCHECK
thread that is unlocked, an error will be returned.
-.Pp
-.Dv PTHREAD_MUTEX_RECURSIVE
-mutexes allow recursive locking.
+.It Dv PTHREAD_MUTEX_RECURSIVE
+These mutexes allow recursive locking.
An attempt to relock a
.Dv PTHREAD_MUTEX_RECURSIVE
-mutex that is already locked by the same thread succeeds. An equivalent
-number of
+mutex that is already locked by the same thread succeeds.
+An equivalent number of
.Xr pthread_mutex_unlock 3
-calls are needed before the mutex will wake another thread waiting on this
-lock. If a thread attempts to unlock a
+calls are needed before the mutex will wake another thread waiting
+on this lock.
+If a thread attempts to unlock a
.Dv PTHREAD_MUTEX_RECURSIVE
-mutex that is locked by another thread, an error will be returned. If a thread attemps to unlock a
+mutex that is locked by another thread, an error will be returned.
+If a thread attempts to unlock a
.Dv PTHREAD_MUTEX_RECURSIVE
thread that is unlocked, an error will be returned.
.Pp
-.Dv PTHREAD_MUTEX_DEFAULT
-mutexes result in undefined behavior if reentered.
+It is advised that
+.Dv PTHREAD_MUTEX_RECURSIVE
+mutexes are not used with condition variables.
+This is because of the implicit unlocking done by
+.Xr pthread_cond_wait 3
+and
+.Xr pthread_cond_timedwait 3 .
+.It Dv PTHREAD_MUTEX_DEFAULT
+Also this type of mutex will cause undefined behavior if reentered.
Unlocking a
.Dv PTHREAD_MUTEX_DEFAULT
-mutex locked by another thread will result in undefined behavior. Attempts to unlock an already
-unlocked
+mutex locked by another thread will result in undefined behavior.
+Attempts to unlock an already unlocked
.Dv PTHREAD_MUTEX_DEFAULT
mutex will result in undefined behavior.
+This is the default mutex type for
+.Fn pthread_mutexaddr_init .
+.El
.Pp
.Fn pthread_mutexattr_gettype
functions copy the type value of the attribute to the location pointed to by the second parameter.
If successful, these functions return 0.
Otherwise, an error number is returned to indicate the error.
.Sh ERRORS
+The
.Fn pthread_mutexattr_init
-will fail if:
+function shall fail if:
.Bl -tag -width Er
.It Bq Er ENOMEM
Out of memory.
.El
.Pp
+The
.Fn pthread_mutexattr_destroy
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa attr .
.El
.Pp
+The
.Fn pthread_mutexattr_setprioceiling
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa prioceiling .
.El
.Pp
+The
.Fn pthread_mutexattr_getprioceiling
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa attr .
.El
.Pp
+The
.Fn pthread_mutexattr_setprotocol
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa protocol .
.El
.Pp
+The
.Fn pthread_mutexattr_getprotocol
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa attr .
.El
.Pp
+The
.Fn pthread_mutexattr_settype
-will fail if:
+function shall fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
-Invalid value for
-.Fa attr ,
-or invalid value for
-.Fa type .
+The value specified either by
+.Fa type
+or
+.Fa attr
+is invalid.
.El
.Pp
+The
.Fn pthread_mutexattr_gettype
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Sh SEE ALSO
.Xr pthread_mutex_init 3
.Sh STANDARDS
+The
.Fn pthread_mutexattr_init
and
.Fn pthread_mutexattr_destroy
-conform to
+functions conform to
.St -p1003.1-96
.Pp
+The
.Fn pthread_mutexattr_setprioceiling ,
.Fn pthread_mutexattr_getprioceiling ,
.Fn pthread_mutexattr_setprotocol ,
.Fn pthread_mutexattr_settype ,
and
.Fn pthread_mutexattr_gettype
-conform to
+functions conform to
.St -susv2
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_once.3,v 1.6.2.6 2001/08/17 15:42:52 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_ONCE 3
.Nm pthread_once
.Nd dynamic package initialization
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Pp
pthread_once_t
.Fa once_control
However, if
.Fn init_routine
is a cancellation point and is cancelled, the effect on
-.Fa once_control is as if
+.Fa once_control
+is as if
.Fn pthread_once
was never called.
.Pp
The constant
.Fa PTHREAD_ONCE_INIT
is defined by header
-.Aq Pa pthread.h .
+.In pthread.h .
.Pp
The behavior of
.Fn pthread_once
If successful, the
.Fn pthread_once
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+Otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
None.
.Sh STANDARDS
+The
.Fn pthread_once
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlock_destroy.3,v 1.6 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCK_DESTROY 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlock_destroy
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_destroy "pthread_rwlock_t *lock"
.Sh DESCRIPTION
The
.Fn pthread_rwlock_destroy
If successful, the
.Fn pthread_rwlock_destroy
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlock_init 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlock_destroy
-function is expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
The
.Fn pthread_rwlock_destroy
.Bl -tag -width Er
.It Bq Er EBUSY
The system has detected an attempt to destroy the object referenced by
-.Fa rwlock
+.Fa lock
while it is locked.
.It Bq Er EINVAL
The value specified by
-.Fa rwlock
+.Fa lock
is invalid.
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_init 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlock_destroy
+function is expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlock_destroy
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlock_init.3,v 1.5 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCK_INIT 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlock_init
-.Fa "pthread_rwlock_t *restrict rwlock"
-.Fa "const pthread_rwlockattr_t *restrict attr"
-.Fc
+.Fn pthread_rwlock_init "pthread_rwlock_t *lock" "const pthread_rwlockattr_t *attr"
.Sh DESCRIPTION
The
.Fn pthread_rwlock_init
If successful, the
.Fn pthread_rwlock_init
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlock_destroy 3 ,
-.Xr pthread_rwlockattr_init 3 ,
-.Xr pthread_rwlockattr_setpshared 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlock_init
-function is expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
The
.Fn pthread_rwlock_init
.It Bq Er EBUSY
The system has detected an attempt to re-initialize the object
referenced by
-.Fa rwlock ,
+.Fa lock ,
a previously initialized but not yet destroyed read/write lock.
.It Bq Er EINVAL
The value specified by
.Fa attr
is invalid.
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_destroy 3 ,
+.Xr pthread_rwlockattr_init 3 ,
+.Xr pthread_rwlockattr_setpshared 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlock_init
+function is expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlock_init
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlock_rdlock.3,v 1.4 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCK_RDLOCK 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlock_rdlock
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_rdlock "pthread_rwlock_t *lock"
.Ft int
-.Fo pthread_rwlock_tryrdlock
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_tryrdlock "pthread_rwlock_t *lock"
.Sh DESCRIPTION
The
.Fn pthread_rwlock_rdlock
function acquires a read lock on
-.Fa rwlock ,
+.Fa lock
provided that
-.Fa rwlock
+.Fa lock
is not presently held for writing and no writer threads are
-presently blocked on the lock. If the read lock cannot be
+presently blocked on the lock.
+If the read lock cannot be
immediately acquired, the calling thread blocks until it can
acquire the lock.
.Pp
cannot be immediately obtained (i.e., the lock is held for writing
or there are waiting writers).
.Pp
-A thread may hold multiple concurrent read locks. If so,
+A thread may hold multiple concurrent read locks.
+If so,
.Fn pthread_rwlock_unlock
must be called once for each lock obtained.
.Pp
and
.Fn pthread_rwlock_tryrdlock
functions will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlock_init 3 ,
-.Xr pthread_rwlock_trywrlock 3 ,
-.Xr pthread_rwlock_unlock 3 ,
-.Xr pthread_rwlock_wrlock 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlock_rdlock
-and
-.Fn pthread_rwlock_tryrdlock
-functions are expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
The
.Fn pthread_rwlock_tryrdlock
function will fail if:
.Bl -tag -width Er
.It Bq Er EBUSY
-The lock could not be acquired, because a writer holds the lock or
+The lock could not be acquired because a writer holds the lock or
was blocked on it.
.El
.Pp
functions may fail if:
.Bl -tag -width Er
.It Bq Er EAGAIN
-The lock could not be acquired, because the maximum number of read locks
+The lock could not be acquired because the maximum number of read locks
against
.Fa lock
has been exceeded.
.It Bq Er EDEADLK
The current thread already owns
-.Fa rwlock
+.Fa lock
for writing.
.It Bq Er EINVAL
The value specified by
-.Fa rwlock
+.Fa lock
is invalid.
.It Bq Er ENOMEM
Insufficient memory exists to initialize the lock (applies to
statically initialized locks only).
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_init 3 ,
+.Xr pthread_rwlock_trywrlock 3 ,
+.Xr pthread_rwlock_unlock 3 ,
+.Xr pthread_rwlock_wrlock 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlock_rdlock
+and
+.Fn pthread_rwlock_tryrdlock
+functions are expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlock_rdlock
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlock_unlock.3,v 1.4 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCK_UNLOCK 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlock_unlock
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_unlock "pthread_rwlock_t *lock"
.Sh DESCRIPTION
The
.Fn pthread_rwlock_unlock
If successful, the
.Fn pthread_rwlock_unlock
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+Otherwise an error number will be returned
+to indicate the error.
.Pp
The results are undefined if
-.Fa rwlock
+.Fa lock
is not held by the calling thread.
-.Sh SEE ALSO
-.Xr pthread_rwlock_rdlock 3 ,
-.Xr pthread_rwlock_wrlock 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlock_unlock
-function is expected to conform to
-.St -susv2 .
.Sh ERRORS
The
.Fn pthread_rwlock_unlock
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
-.Fa rwlock
+.Fa lock
is invalid.
.It Bq Er EPERM
The current thread does not own the read/write lock.
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_rdlock 3 ,
+.Xr pthread_rwlock_wrlock 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlock_unlock
+function is expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlock_unlock
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlock_wrlock.3,v 1.4 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCK_WRLOCK 3
.Os
.Sh NAME
-.Nm pthread_rwlock_trywrlock ,
-.Nm pthread_rwlock_wrlock
+.Nm pthread_rwlock_wrlock ,
+.Nm pthread_rwlock_trywrlock
.Nd acquire a read/write lock for writing
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlock_trywrlock
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_wrlock "pthread_rwlock_t *lock"
.Ft int
-.Fo pthread_rwlock_wrlock
-.Fa "pthread_rwlock_t *rwlock"
-.Fc
+.Fn pthread_rwlock_trywrlock "pthread_rwlock_t *lock"
.Sh DESCRIPTION
The
.Fn pthread_rwlock_wrlock
function blocks until a write lock can be acquired against
-.Fa rwlock .
+.Fa lock .
The
.Fn pthread_rwlock_trywrlock
function performs the same action, but does not block if the lock
and
.Fn pthread_rwlock_trywrlock
functions will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlock_trywrlock 3 ,
-.Xr pthread_rwlock_unlock 3 ,
-.Xr pthread_rwlock_wrlock 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlock_wrlock
-and
-.Fn pthread_rwlock_trywrlock
-functions are expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
The
.Fn pthread_rwlock_trywrlock
or writing).
.It Bq Er EINVAL
The value specified by
-.Fa rwlock
+.Fa lock
is invalid.
.It Bq Er ENOMEM
Insufficient memory exists to initialize the lock (applies to
statically initialized locks only).
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_init 3 ,
+.Xr pthread_rwlock_rdlock 3 ,
+.Xr pthread_rwlock_tryrdlock 3 ,
+.Xr pthread_rwlock_unlock 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlock_wrlock
+and
+.Fn pthread_rwlock_trywrlock
+functions are expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlock_wrlock
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlockattr_destroy.3,v 1.6 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCKATTR_DESTROY 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlockattr_destroy
-.Fa "pthread_rwlockattr_t *attr"
-.Fc
+.Fn pthread_rwlockattr_destroy "pthread_rwlockattr_t *attr"
.Sh DESCRIPTION
The
.Fn pthread_rwlockattr_destroy
-function is used to destroy a read/write lock attribute object,
+function is used to destroy a read/write lock attribute object
previously created with
.Fn pthread_rwlockattr_init .
.Sh RETURN VALUES
If successful, the
.Fn pthread_rwlockattr_destroy
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlockattr_init 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlockattr_destroy
-function is expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
+The
.Fn pthread_rwlockattr_destroy
-may fail if:
+function may fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Fa attr
is invalid.
.El
+.Sh SEE ALSO
+.Xr pthread_rwlockattr_init 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlockattr_destroy
+function is expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlockattr_destroy
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlockattr_getpshared.3,v 1.8 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd March 22, 1999
.Dt PTHREAD_RWLOCKATTR_GETPSHARED 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlockattr_getpshared
-.Fa "const pthread_rwlockattr_t *restrict attr"
-.Fa "int *restrict pshared"
-.Fc
+.Fn pthread_rwlockattr_getpshared "const pthread_rwlockattr_t *attr" "int *pshared"
.Sh DESCRIPTION
The
.Fn pthread_rwlockattr_getpshared
-function is used to get the process-shared setting of a read/write
-lock attribute object. The setting is returned via
+function is used to get the process shared setting of a read/write
+lock attribute object.
+The setting is returned via
.Fa pshared ,
and may be one of two values:
.Bl -tag -width PTHREAD_PROCESS_PRIVATE
read/write lock resides can manipulate the lock.
.It Dv PTHREAD_PROCESS_PRIVATE
Only threads created within the same process as the thread that
-initialized the read/write lock can manipulate the lock. This is
+initialized the read/write lock can manipulate the lock.
+This is
the default value.
.El
.Sh RETURN VALUES
If successful, the
.Fn pthread_rwlockattr_getpshared
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+Otherwise an error number will be returned
+to indicate the error.
+.Sh ERRORS
+The
+.Fn pthread_rwlockattr_getpshared
+function may fail if:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+The value specified by
+.Fa attr
+is invalid.
+.El
.Sh SEE ALSO
.Xr pthread_rwlock_init 3 ,
.Xr pthread_rwlockattr_init 3 ,
.Fn pthread_rwlockattr_getpshared
function is expected to conform to
.St -susv2 .
-.Sh ERRORS
-.Fn pthread_rwlockattr_getpshared
-may fail if:
-.Bl -tag -width Er
-.It Bq Er EINVAL
-The value specified by
-.Fa attr
-is invalid.
-.El
.Sh HISTORY
The
.Fn pthread_rwlockattr_getpshared
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlockattr_init.3,v 1.6 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCKATTR_INIT 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlockattr_init
-.Fa "pthread_rwlockattr_t *attr"
-.Fc
+.Fn pthread_rwlockattr_init "pthread_rwlockattr_t *attr"
.Sh DESCRIPTION
The
.Fn pthread_rwlockattr_init
-function is used to initialize a read/write lock attribute object.
+function is used to initialize a read/write lock attributes object.
.Sh RETURN VALUES
If successful, the
.Fn pthread_rwlockattr_init
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+Otherwise an error number will be returned
+to indicate the error.
+.Sh ERRORS
+The
+.Fn pthread_rwlockattr_init
+function will fail if:
+.Bl -tag -width Er
+.It Bq Er ENOMEM
+Insufficient memory exists to initialize the attribute object.
+.El
.Sh SEE ALSO
.Xr pthread_rwlock_init 3 ,
.Xr pthread_rwlockattr_destroy 3 ,
.Fn pthread_rwlockattr_init
function is expected to conform to
.St -susv2 .
-.Sh ERRORS
-.Fn pthread_rwlockattr_init
-will fail if:
-.Bl -tag -width Er
-.It Bq Er ENOMEM
-Insufficient memory exists to initialize the attribute object.
-.El
.Sh HISTORY
The
.Fn pthread_rwlockattr_init
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_rwlockattr_setpshared.3,v 1.7 2001/10/01 16:09:09 ru Exp $
+.\" $FreeBSD$
.\"
.Dd August 4, 1998
.Dt PTHREAD_RWLOCKATTR_SETPSHARED 3
.Sh SYNOPSIS
.In pthread.h
.Ft int
-.Fo pthread_rwlockattr_setpshared
-.Fa "pthread_rwlockattr_t *attr"
-.Fa "int pshared"
-.Fc
+.Fn pthread_rwlockattr_setpshared "pthread_rwlockattr_t *attr" "int pshared"
.Sh DESCRIPTION
The
.Fn pthread_rwlockattr_setpshared
-function sets the process-shared attribute of
+function sets the process shared attribute of
.Fa attr
to the value referenced by
.Fa pshared .
+The
.Fa pshared
-may be one of two values:
+argument may be one of two values:
.Bl -tag -width PTHREAD_PROCESS_PRIVATE
.It Dv PTHREAD_PROCESS_SHARED
Any thread of any process that has access to the memory where the
read/write lock resides can manipulate the lock.
.It Dv PTHREAD_PROCESS_PRIVATE
Only threads created within the same process as the thread that
-initialized the read/write lock can manipulate the lock. This is
+initialized the read/write lock can manipulate the lock.
+This is
the default value.
.El
.Sh RETURN VALUES
If successful, the
.Fn pthread_rwlockattr_setpshared
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
-.Sh SEE ALSO
-.Xr pthread_rwlock_init 3 ,
-.Xr pthread_rwlockattr_init 3 ,
-.Xr pthread_rwlockattr_setpshared 3
-.Sh STANDARDS
-The
-.Fn pthread_rwlockattr_setpshared
-function is expected to conform to
-.St -susv2 .
+Otherwise an error number will be returned
+to indicate the error.
.Sh ERRORS
+The
.Fn pthread_rwlockattr_setpshared
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The value specified by
.Fa pshared
is invalid.
.El
+.Sh SEE ALSO
+.Xr pthread_rwlock_init 3 ,
+.Xr pthread_rwlockattr_getpshared 3 ,
+.Xr pthread_rwlockattr_init 3
+.Sh STANDARDS
+The
+.Fn pthread_rwlockattr_setpshared
+function is expected to conform to
+.St -susv2 .
.Sh HISTORY
The
.Fn pthread_rwlockattr_setpshared
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_self.3,v 1.4.2.4 2001/08/17 15:42:52 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_SELF 3
.Nm pthread_self
.Nd get the calling thread's ID
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft pthread_t
.Fn pthread_self "void"
.Sh DESCRIPTION
.Xr pthread_create 3 ,
.Xr pthread_equal 3
.Sh STANDARDS
+The
.Fn pthread_self
-conforms to
+function conforms to
.St -p1003.1-96 .
-.\" $FreeBSD: src/lib/libc_r/man/pthread_testcancel.3,v 1.2.2.3 2001/08/17 15:42:52 ru Exp $
-.Dd January 17, 1999
+.\" $FreeBSD$
+.Dd June 11, 2013
.Dt PTHREAD_TESTCANCEL 3
.Os
.Sh NAME
.Nm pthread_testcancel
.Nd set cancelability state
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_setcancelstate
-.Fa "int state"
-.Fa "int *oldstate"
-.Fc
+.Fn pthread_setcancelstate "int state" "int *oldstate"
.Ft int
-.Fo pthread_setcanceltype
-.Fa "int type"
-.Fa "int *oldtype"
-.Fc
+.Fn pthread_setcanceltype "int type" "int *oldtype"
.Ft void
-.Fo pthread_testcancel
-.Fa "void"
-.Fc
+.Fn pthread_testcancel "void"
.Sh DESCRIPTION
The
.Fn pthread_setcancelstate
function atomically both sets the calling thread's cancelability state
to the indicated
.Fa state
-and returns the previous cancelability state at the location referenced by
+and, if
+.Fa oldstate
+is not
+.Dv NULL ,
+returns the previous cancelability state at the location referenced by
.Fa oldstate .
Legal values for
.Fa state
function atomically both sets the calling thread's cancelability type
to the indicated
.Fa type
-and returns the previous cancelability type at the location referenced by
+and, if
+.Fa oldtype
+is not
+.Dv NULL ,
+returns the previous cancelability type at the location referenced by
.Fa oldtype .
Legal values for
.Fa type
.Pp
This follows from a modularity argument: if the client of an object (or the
client of an object that uses that object) has disabled cancelability, it is
-because the client doesn't want to have to worry about how to clean up if the
+because the client does not want to have to worry about how to clean up if the
thread is canceled while executing some sequence of actions.
If an object
is called in such a state and it enables cancelability and a cancellation
or
.Em asynchronous
upon entry to an object.
-But, as with the cancelability state, on exit from
+But as with the cancelability state, on exit from
an object that cancelability type should always be restored to its value on
entry to the object.
.Pp
.Sh SEE ALSO
.Xr pthread_cancel 3
.Sh STANDARDS
+The
.Fn pthread_testcancel
-conforms to
+function conforms to
.St -p1003.1-96 .
+The standard allows implementations to make many more functions
+cancellation points.
.Sh AUTHORS
-This man page was written by
-.An David Leonard Aq d@openbsd.org
+This manual page was written by
+.An David Leonard Aq Mt d@openbsd.org
for the
.Ox
implementation of
--- /dev/null
+.\" Copyright (c) 2003 Alexey Zelkin <phantom@FreeBSD.org>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd February 13, 2003
+.Dt PTHREAD_SETNAME_NP 3
+.Os
+.Sh NAME
+.Nm pthread_setname_np
+.Nd set the thread name
+.Sh SYNOPSIS
+.In pthread.h
+.Ft void
+.Fn pthread_setname_np "const char *name"
+.Sh DESCRIPTION
+The
+.Fn pthread_set_name_np
+function sets the internal name for the calling thread to string value specified by
+.Fa name
+argument.
+.Sh AUTHORS
+This manual page was written by
+.An Alexey Zelkin Aq Mt phantom@FreeBSD.org .
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_setspecific.3,v 1.6.2.4 2001/08/17 15:42:52 ru Exp $
+.\" $FreeBSD$
.\"
.Dd April 4, 1996
.Dt PTHREAD_SETSPECIFIC 3
.Nm pthread_setspecific
.Nd set a thread-specific data value
.Sh SYNOPSIS
-.Fd #include <pthread.h>
+.In pthread.h
.Ft int
-.Fo pthread_setspecific
-.Fa "pthread_key_t key"
-.Fa "const void *value"
-.Fc
+.Fn pthread_setspecific "pthread_key_t key" "const void *value"
.Sh DESCRIPTION
The
.Fn pthread_setspecific
.Fa key
obtained via a previous call to
.Fn pthread_key_create .
-Different threads may bind different values to the same key.
+Different threads can bind different values to the same key.
These values are
typically pointers to blocks of dynamically allocated memory that have been
reserved for use by the calling thread.
The effect of calling
.Fn pthread_setspecific
with a key value not obtained from
-.Fn pthread_key_create ,
+.Fn pthread_key_create
or after
.Fa key
has been deleted with
-.Fn pthread_key_delete ,
+.Fn pthread_key_delete
is undefined.
.Pp
+The
.Fn pthread_setspecific
-may be called from a thread-specific data destructor function;
-however, this may result in lost storage or infinite loops.
+function may be called from a thread-specific data destructor function,
+however this may result in lost storage or infinite loops.
.Sh RETURN VALUES
-If successful, the
+If successful, the
.Fn pthread_setspecific
function will return zero.
-Otherwise, an error number will be returned to indicate the error.
+Otherwise an error number will be returned to
+indicate the error.
.Sh ERRORS
+The
.Fn pthread_setspecific
-will fail if:
+function will fail if:
.Bl -tag -width Er
+.It Bq Er ENOMEM
+Insufficient memory exists to associate the value with the
+.Fa key .
.It Bq Er EINVAL
The
.Fa key
value is invalid.
-.It Bq Er ENOMEM
-Insufficient memory exists to associate the value with the
-.Fa key .
.El
.Sh SEE ALSO
.Xr pthread_getspecific 3 ,
.Xr pthread_key_create 3 ,
.Xr pthread_key_delete 3
.Sh STANDARDS
+The
.Fn pthread_setspecific
-conforms to
+function conforms to
.St -p1003.1-96 .
.\" OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.\" $FreeBSD: src/lib/libc_r/man/pthread_sigmask.3,v 1.9 2001/10/01 16:09:09 ru Exp $
-.Dd April 27, 2000
-.Dt PTHREAD_SIGMASK 2
+.\" $FreeBSD$
+.Dd February 19, 2011
+.Dt PTHREAD_SIGMASK 3
.Os
.Sh NAME
.Nm pthread_sigmask
.Sh SYNOPSIS
.In signal.h
.Ft int
-.Fo pthread_sigmask
-.Fa "int how"
-.Fa "const sigset_t *restrict set"
-.Fa "sigset_t *restrict oset"
-.Fc
+.Fn pthread_sigmask "int how" "const sigset_t * restrict set" \
+ "sigset_t * restrict oset"
.Sh DESCRIPTION
The
.Fn pthread_sigmask
returns 0.
Otherwise, an error is returned.
.Sh ERRORS
+The
.Fn pthread_sigmask
-will fail if:
+function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
.Fa how
--- /dev/null
+.\" Copyright (c) 2016 Apple Inc. All rights reserved.
+.\"
+.\" @APPLE_LICENSE_HEADER_START@
+.\"
+.\" This file contains Original Code and/or Modifications of Original Code
+.\" as defined in and that are subject to the Apple Public Source License
+.\" Version 2.0 (the 'License'). You may not use this file except in
+.\" compliance with the License. Please obtain a copy of the License at
+.\" http://www.opensource.apple.com/apsl/ and read it before using this
+.\" file.
+.\"
+.\" The Original Code and all software distributed under the License are
+.\" distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+.\" EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+.\" INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+.\" FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+.\" Please see the License for the specific language governing rights and
+.\" limitations under the License.
+.\"
+.\" @APPLE_LICENSE_HEADER_END@
+.\"
+.Dd April 12, 2016
+.Dt PTHREAD_THREADID_NP 3
+.Os
+.Sh NAME
+.Nm pthread_threadid_np
+.Nd get the calling thread's unique ID
+.Sh SYNOPSIS
+.In pthread.h
+.Ft int
+.Fn pthread_threadid_np "pthread_t thread" "uint64_t *thread_id"
+.Sh DESCRIPTION
+The
+.Fn pthread_threadid_np
+function stores the system-wide unique integral ID of
+.Fa thread
+in the location spedified by
+.Fa thread_id .
+If
+.Fa thread
+is NULL, the ID of the current thread is provided.
+.Sh RETURN VALUES
+If successful, the
+.Fn pthread_threadid_np
+function will return zero. Otherwise an error number will be returned to
+indicate the error.
+.Sh ERRORS
+The
+.Fn pthread_threadid_np
+function will fail if:
+.Bl -tag -width Er
+.It Bq Er EINVAL
+NULL value for
+.Fa thread_id .
+.It Bq Er ESRCH
+Non-existent thread
+.Fa thread .
+.El
+.Sh SEE ALSO
+.Xr pthread_self 3
--- /dev/null
+.\" $OpenBSD: pthread_yield.3,v 1.3 2004/01/25 14:48:32 jmc Exp $
+.\"
+.\" PUBLIC DOMAIN: No Rights Reserved. Marco S Hyman <marc@snafu.org>
+.\"
+.\" $FreeBSD$
+.\"
+.Dd September 18, 2006
+.Dt PTHREAD_YIELD_NP 3
+.Os
+.Sh NAME
+.Nm pthread_yield_np
+.Nd yield control of the current thread
+.Sh SYNOPSIS
+.In pthread.h
+.Ft void
+.Fn pthread_yield_np void
+.Sh DESCRIPTION
+The
+.Fn pthread_yield
+causes the running thread to relinquish the processor.
+++ /dev/null
-/*
- * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-/*
- * IMPORTANT: This header file describes INTERNAL interfaces to libplatform
- * which are subject to change in future releases of Mac OS X. Any applications
- * relying on these interfaces WILL break.
- */
-
-#ifndef __OS_ATOMIC__
-#define __OS_ATOMIC__
-
-// generate error during codegen
-#define _os_atomic_unimplemented() \
- ({ __asm__(".err unimplemented"); })
-
-#pragma mark -
-#pragma mark memory_order
-
-typedef enum _os_atomic_memory_order
-{
- _os_atomic_memory_order_relaxed,
- _os_atomic_memory_order_consume,
- _os_atomic_memory_order_acquire,
- _os_atomic_memory_order_release,
- _os_atomic_memory_order_acq_rel,
- _os_atomic_memory_order_seq_cst,
-} _os_atomic_memory_order;
-
-#if !OS_ATOMIC_UP
-
-#define os_atomic_memory_order_relaxed \
- _os_atomic_memory_order_relaxed
-#define os_atomic_memory_order_acquire \
- _os_atomic_memory_order_acquire
-#define os_atomic_memory_order_release \
- _os_atomic_memory_order_release
-#define os_atomic_memory_order_acq_rel \
- _os_atomic_memory_order_acq_rel
-#define os_atomic_memory_order_seq_cst \
- _os_atomic_memory_order_seq_cst
-
-#else // OS_ATOMIC_UP
-
-#define os_atomic_memory_order_relaxed \
- _os_atomic_memory_order_relaxed
-#define os_atomic_memory_order_acquire \
- _os_atomic_memory_order_relaxed
-#define os_atomic_memory_order_release \
- _os_atomic_memory_order_relaxed
-#define os_atomic_memory_order_acq_rel \
- _os_atomic_memory_order_relaxed
-#define os_atomic_memory_order_seq_cst \
- _os_atomic_memory_order_relaxed
-
-#endif // OS_ATOMIC_UP
-
-#if __has_extension(c_generic_selections)
-#define _os_atomic_basetypeof(p) \
- typeof(*_Generic((p), \
- int*: (int*)(p), \
- volatile int*: (int*)(p), \
- unsigned int*: (unsigned int*)(p), \
- volatile unsigned int*: (unsigned int*)(p), \
- long*: (long*)(p), \
- volatile long*: (long*)(p), \
- unsigned long*: (unsigned long*)(p), \
- volatile unsigned long*: (unsigned long*)(p), \
- long long*: (long long*)(p), \
- volatile long long*: (long long*)(p), \
- unsigned long long*: (unsigned long long*)(p), \
- volatile unsigned long long*: (unsigned long long*)(p), \
- default: (void**)(p)))
-#endif
-
-#if __has_extension(c_atomic) && __has_extension(c_generic_selections)
-#pragma mark -
-#pragma mark c11
-
-#define _os_atomic_c11_atomic(p) \
- _Generic((p), \
- int*: (_Atomic(int)*)(p), \
- volatile int*: (volatile _Atomic(int)*)(p), \
- unsigned int*: (_Atomic(unsigned int)*)(p), \
- volatile unsigned int*: (volatile _Atomic(unsigned int)*)(p), \
- long*: (_Atomic(long)*)(p), \
- volatile long*: (volatile _Atomic(long)*)(p), \
- unsigned long*: (_Atomic(unsigned long)*)(p), \
- volatile unsigned long*: (volatile _Atomic(unsigned long)*)(p), \
- long long*: (_Atomic(long long)*)(p), \
- volatile long long*: (volatile _Atomic(long long)*)(p), \
- unsigned long long*: (_Atomic(unsigned long long)*)(p), \
- volatile unsigned long long*: \
- (volatile _Atomic(unsigned long long)*)(p), \
- default: (volatile _Atomic(void*)*)(p))
-
-#define _os_atomic_barrier(m) \
- ({ __c11_atomic_thread_fence(os_atomic_memory_order_##m); })
-#define os_atomic_load(p, m) \
- ({ _os_atomic_basetypeof(p) _r = \
- __c11_atomic_load(_os_atomic_c11_atomic(p), \
- os_atomic_memory_order_##m); (typeof(*(p)))_r; })
-#define os_atomic_store(p, v, m) \
- ({ _os_atomic_basetypeof(p) _v = (v); \
- __c11_atomic_store(_os_atomic_c11_atomic(p), _v, \
- os_atomic_memory_order_##m); })
-#define os_atomic_xchg(p, v, m) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = \
- __c11_atomic_exchange(_os_atomic_c11_atomic(p), _v, \
- os_atomic_memory_order_##m); (typeof(*(p)))_r; })
-#define os_atomic_cmpxchg(p, e, v, m) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); \
- __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
- &_r, _v, os_atomic_memory_order_##m, \
- os_atomic_memory_order_relaxed); })
-#define os_atomic_cmpxchgv(p, e, v, g, m) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
- __c11_atomic_compare_exchange_strong(_os_atomic_c11_atomic(p), \
- &_r, _v, os_atomic_memory_order_##m, \
- os_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
-#define os_atomic_cmpxchgvw(p, e, v, g, m) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = (e); _Bool _b = \
- __c11_atomic_compare_exchange_weak(_os_atomic_c11_atomic(p), \
- &_r, _v, os_atomic_memory_order_##m, \
- os_atomic_memory_order_relaxed); *(g) = (typeof(*(p)))_r; _b; })
-#define _os_atomic_c11_op(p, v, m, o, op) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = \
- __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
- os_atomic_memory_order_##m); (typeof(*(p)))(_r op _v); })
-#define _os_atomic_c11_op_orig(p, v, m, o, op) \
- ({ _os_atomic_basetypeof(p) _v = (v), _r = \
- __c11_atomic_fetch_##o(_os_atomic_c11_atomic(p), _v, \
- os_atomic_memory_order_##m); (typeof(*(p)))_r; })
-
-#define os_atomic_add(p, v, m) \
- _os_atomic_c11_op((p), (v), m, add, +)
-#define os_atomic_add_orig(p, v, m) \
- _os_atomic_c11_op_orig((p), (v), m, add, +)
-#define os_atomic_sub(p, v, m) \
- _os_atomic_c11_op((p), (v), m, sub, -)
-#define os_atomic_sub_orig(p, v, m) \
- _os_atomic_c11_op_orig((p), (v), m, sub, -)
-#define os_atomic_and(p, v, m) \
- _os_atomic_c11_op((p), (v), m, and, &)
-#define os_atomic_and_orig(p, v, m) \
- _os_atomic_c11_op_orig((p), (v), m, and, &)
-#define os_atomic_or(p, v, m) \
- _os_atomic_c11_op((p), (v), m, or, |)
-#define os_atomic_or_orig(p, v, m) \
- _os_atomic_c11_op_orig((p), (v), m, or, |)
-#define os_atomic_xor(p, v, m) \
- _os_atomic_c11_op((p), (v), m, xor, ^)
-#define os_atomic_xor_orig(p, v, m) \
- _os_atomic_c11_op_orig((p), (v), m, xor, ^)
-
-#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 2)
-#pragma mark -
-#pragma mark gnu99
-
-#define _os_atomic_full_barrier() \
- __sync_synchronize()
-#define _os_atomic_barrier(m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- break; \
- default: \
- _os_atomic_full_barrier(); break; \
- } })
-// seq_cst: only emulate explicit store(seq_cst) -> load(seq_cst)
-#define os_atomic_load(p, m) \
- ({ typeof(*(p)) _r = *(p); \
- switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); /* fallthrough */ \
- case _os_atomic_memory_order_relaxed: \
- break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } _r; })
-#define os_atomic_store(p, v, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_release: \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); /* fallthrough */ \
- case _os_atomic_memory_order_relaxed: \
- *(p) = (v); break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_seq_cst: \
- _os_atomic_barrier(m); break; \
- default: \
- break; \
- } })
-#if __has_builtin(__sync_swap)
-#define os_atomic_xchg(p, v, m) \
- ((typeof(*(p)))__sync_swap((p), (v)))
-#else
-#define os_atomic_xchg(p, v, m) \
- ((typeof(*(p)))__sync_lock_test_and_set((p), (v)))
-#endif
-#define os_atomic_cmpxchg(p, e, v, m) \
- __sync_bool_compare_and_swap((p), (e), (v))
-#define os_atomic_cmpxchgv(p, e, v, g, m) \
- ({ typeof(*(g)) _e = (e), _r = \
- __sync_val_compare_and_swap((p), _e, (v)); \
- bool _b = (_e == _r); *(g) = _r; _b; })
-#define os_atomic_cmpxchgvw(p, e, v, g, m) \
- os_atomic_cmpxchgv((p), (e), (v), (g), m)
-
-#define os_atomic_add(p, v, m) \
- __sync_add_and_fetch((p), (v))
-#define os_atomic_add_orig(p, v, m) \
- __sync_fetch_and_add((p), (v))
-#define os_atomic_sub(p, v, m) \
- __sync_sub_and_fetch((p), (v))
-#define os_atomic_sub_orig(p, v, m) \
- __sync_fetch_and_sub((p), (v))
-#define os_atomic_and(p, v, m) \
- __sync_and_and_fetch((p), (v))
-#define os_atomic_and_orig(p, v, m) \
- __sync_fetch_and_and((p), (v))
-#define os_atomic_or(p, v, m) \
- __sync_or_and_fetch((p), (v))
-#define os_atomic_or_orig(p, v, m) \
- __sync_fetch_and_or((p), (v))
-#define os_atomic_xor(p, v, m) \
- __sync_xor_and_fetch((p), (v))
-#define os_atomic_xor_orig(p, v, m) \
- __sync_fetch_and_xor((p), (v))
-
-#if defined(__x86_64__) || defined(__i386__)
-// GCC emits nothing for __sync_synchronize() on x86_64 & i386
-#undef _os_atomic_full_barrier
-#define _os_atomic_full_barrier() \
- ({ __asm__ __volatile__( \
- "mfence" \
- : : : "memory"); })
-#undef os_atomic_load
-#define os_atomic_load(p, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_seq_cst: \
- case _os_atomic_memory_order_relaxed: \
- break; \
- default: \
- _os_atomic_unimplemented(); break; \
- } *(p); })
-// xchg is faster than store + mfence
-#undef os_atomic_store
-#define os_atomic_store(p, v, m) \
- ({ switch(os_atomic_memory_order_##m) { \
- case _os_atomic_memory_order_relaxed: \
- case _os_atomic_memory_order_release: \
- *(p) = (v); break; \
- case _os_atomic_memory_order_seq_cst: \
- (void)os_atomic_xchg((p), (v), m); break; \
- default:\
- _os_atomic_unimplemented(); break; \
- } })
-#endif
-
-#else
-#error "Please upgrade to GCC 4.2 or newer."
-#endif
-
-#pragma mark -
-#pragma mark generic
-
-// assume atomic builtins provide barriers
-#define os_atomic_barrier(m)
-// see comment in os_once.c
-#define os_atomic_maximally_synchronizing_barrier() \
- _os_atomic_barrier(seq_cst)
-
-#define os_atomic_load2o(p, f, m) \
- os_atomic_load(&(p)->f, m)
-#define os_atomic_store2o(p, f, v, m) \
- os_atomic_store(&(p)->f, (v), m)
-#define os_atomic_xchg2o(p, f, v, m) \
- os_atomic_xchg(&(p)->f, (v), m)
-#define os_atomic_cmpxchg2o(p, f, e, v, m) \
- os_atomic_cmpxchg(&(p)->f, (e), (v), m)
-#define os_atomic_cmpxchgv2o(p, f, e, v, g, m) \
- os_atomic_cmpxchgv(&(p)->f, (e), (v), (g), m)
-#define os_atomic_cmpxchgvw2o(p, f, e, v, g, m) \
- os_atomic_cmpxchgvw(&(p)->f, (e), (v), (g), m)
-#define os_atomic_add2o(p, f, v, m) \
- os_atomic_add(&(p)->f, (v), m)
-#define os_atomic_add_orig2o(p, f, v, m) \
- os_atomic_add_orig(&(p)->f, (v), m)
-#define os_atomic_sub2o(p, f, v, m) \
- os_atomic_sub(&(p)->f, (v), m)
-#define os_atomic_sub_orig2o(p, f, v, m) \
- os_atomic_sub_orig(&(p)->f, (v), m)
-#define os_atomic_and2o(p, f, v, m) \
- os_atomic_and(&(p)->f, (v), m)
-#define os_atomic_and_orig2o(p, f, v, m) \
- os_atomic_and_orig(&(p)->f, (v), m)
-#define os_atomic_or2o(p, f, v, m) \
- os_atomic_or(&(p)->f, (v), m)
-#define os_atomic_or_orig2o(p, f, v, m) \
- os_atomic_or_orig(&(p)->f, (v), m)
-#define os_atomic_xor2o(p, f, v, m) \
- os_atomic_xor(&(p)->f, (v), m)
-#define os_atomic_xor_orig2o(p, f, v, m) \
- os_atomic_xor_orig(&(p)->f, (v), m)
-
-#define os_atomic_inc(p, m) \
- os_atomic_add((p), 1, m)
-#define os_atomic_inc_orig(p, m) \
- os_atomic_add_orig((p), 1, m)
-#define os_atomic_inc2o(p, f, m) \
- os_atomic_add2o(p, f, 1, m)
-#define os_atomic_inc_orig2o(p, f, m) \
- os_atomic_add_orig2o(p, f, 1, m)
-#define os_atomic_dec(p, m) \
- os_atomic_sub((p), 1, m)
-#define os_atomic_dec_orig(p, m) \
- os_atomic_sub_orig((p), 1, m)
-#define os_atomic_dec2o(p, f, m) \
- os_atomic_sub2o(p, f, 1, m)
-#define os_atomic_dec_orig2o(p, f, m) \
- os_atomic_sub_orig2o(p, f, 1, m)
-
-#define os_atomic_tsx_xacq_cmpxchgv(p, e, v, g) \
- os_atomic_cmpxchgv((p), (e), (v), (g), acquire)
-#define os_atomic_tsx_xrel_store(p, v) \
- os_atomic_store(p, v, release)
-#define os_atomic_tsx_xacq_cmpxchgv2o(p, f, e, v, g) \
- os_atomic_tsx_xacq_cmpxchgv(&(p)->f, (e), (v), (g))
-#define os_atomic_tsx_xrel_store2o(p, f, v) \
- os_atomic_tsx_xrel_store(&(p)->f, (v))
-
-#if defined(__x86_64__) || defined(__i386__)
-#pragma mark -
-#pragma mark x86
-
-#undef os_atomic_maximally_synchronizing_barrier
-#ifdef __LP64__
-#define os_atomic_maximally_synchronizing_barrier() \
- ({ unsigned long _clbr; __asm__ __volatile__( \
- "cpuid" \
- : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory"); })
-#else
-#ifdef __llvm__
-#define os_atomic_maximally_synchronizing_barrier() \
- ({ unsigned long _clbr; __asm__ __volatile__( \
- "cpuid" \
- : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory"); })
-#else // gcc does not allow inline i386 asm to clobber ebx
-#define os_atomic_maximally_synchronizing_barrier() \
- ({ unsigned long _clbr; __asm__ __volatile__( \
- "pushl %%ebx\n\t" \
- "cpuid\n\t" \
- "popl %%ebx" \
- : "=a" (_clbr) : "0" (0) : "ecx", "edx", "cc", "memory"); })
-#endif
-#endif
-
-
-#endif
-
-
-#endif // __OS_ATOMIC__
+++ /dev/null
-/*
- * Copyright (c) 2013 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-/*
- * IMPORTANT: This header file describes INTERNAL interfaces to libplatform
- * which are subject to change in future releases of Mac OS X. Any applications
- * relying on these interfaces WILL break.
- */
-
-// Generated by os/atomic_gen.sh
-
-#ifndef __OS_ATOMIC_LLSC__
-#define __OS_ATOMIC_LLSC__
-
-
-#endif // __OS_ATOMIC_LLSC__
/*
- * Copyright (c) 2013 Apple Inc. All rights reserved.
+ * Copyright (c) 2013, 2016 Apple Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#ifndef __PTHREAD_INTROSPECTION_PRIVATE__
#define __PTHREAD_INTROSPECTION_PRIVATE__
-#include <sys/cdefs.h>
-#include <pthread/pthread.h>
-#include <Availability.h>
-
-/*!
- * @header
- *
- * @abstract
- * Introspection SPI for libpthread.
- */
-
-__BEGIN_DECLS
-
-/*!
- * @typedef pthread_introspection_hook_t
- *
- * @abstract
- * A function pointer called at various points in a PThread's lifetime.
- *
- * @param event
- * One of the events in pthread_introspection_event_t.
- *
- * @param thread
- * pthread_t associated with the event.
- *
- * @param addr
- * Address associated with the event.
- *
- * @param size
- * Size associated with the event.
- */
-typedef void (*pthread_introspection_hook_t)(unsigned int event,
- pthread_t thread, void *addr, size_t size);
-
-/*!
- * @enum pthread_introspection_event_t
- *
- * @constant PTHREAD_INTROSPECTION_THREAD_CREATE
- * pthread_t was created.
- *
- * @constant PTHREAD_INTROSPECTION_THREAD_START
- * Thread has started and stack was allocated.
- *
- * @constant PTHREAD_INTROSPECTION_THREAD_TERMINATE
- * Thread is about to be terminated and stack will be deallocated.
- *
- * @constant PTHREAD_INTROSPECTION_THREAD_DESTROY
- * pthread_t is about to be destroyed.
- */
-enum {
- PTHREAD_INTROSPECTION_THREAD_CREATE = 1,
- PTHREAD_INTROSPECTION_THREAD_START,
- PTHREAD_INTROSPECTION_THREAD_TERMINATE,
- PTHREAD_INTROSPECTION_THREAD_DESTROY,
-};
-
-/*!
- * @function pthread_introspection_hook_install
- *
- * @abstract
- * Install introspection hook function into libpthread.
- *
- * @discussion
- * The caller is responsible for implementing chaining to the hook that was
- * previously installed (if any).
- *
- * @param hook
- * Pointer to hook function.
- *
- * @result
- * Previously installed hook function or NULL.
- */
-
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
-__attribute__((__nonnull__, __warn_unused_result__))
-extern pthread_introspection_hook_t
-pthread_introspection_hook_install(pthread_introspection_hook_t hook);
-
-__END_DECLS
+#include <pthread/introspection.h>
#endif
void (*free)(void *); // added with version=2
};
+/*!
+ * @function pthread_chdir_np
+ *
+ * @abstract
+ * Sets the per-thread current working directory.
+ *
+ * @discussion
+ * This will set the per-thread current working directory to the provided path.
+ * If this is used on a workqueue (dispatch) thread, it MUST be unset with
+ * pthread_fchdir_np(-1) before returning.
+ *
+ * @param path
+ * The path of the new working directory.
+ *
+ * @result
+ * 0 upon success, -1 upon error and errno is set.
+ */
+__OSX_AVAILABLE(10.12)
+__IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0)
+__WATCHOS_AVAILABLE(3.0)
+int pthread_chdir_np(char *path);
+
+/*!
+ * @function pthread_fchdir_np
+ *
+ * @abstract
+ * Sets the per-thread current working directory.
+ *
+ * @discussion
+ * This will set the per-thread current working directory to the provided
+ * directory fd. If this is used on a workqueue (dispatch) thread, it MUST be
+ * unset with pthread_fchdir_np(-1) before returning.
+ *
+ * @param fd
+ * A file descriptor to the new working directory. Pass -1 to unset the
+ * per-thread working directory.
+ *
+ * @result
+ * 0 upon success, -1 upon error and errno is set.
+ */
+__OSX_AVAILABLE(10.12)
+__IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0)
+__WATCHOS_AVAILABLE(3.0)
+int pthread_fchdir_np(int fd);
+
+
+#ifdef _os_tsd_get_base
+
+#ifdef __LP64__
+#define _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET -8
+#else
+#define _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET -16
+#endif
+
+/* N.B. DO NOT USE UNLESS YOU ARE REBUILT AS PART OF AN OS TRAIN WORLDBUILD */
+__header_always_inline uint64_t
+_pthread_threadid_self_np_direct(void)
+{
+#ifndef __i386__
+ if (_pthread_has_direct_tsd()) {
+#ifdef OS_GS_RELATIVE
+ return *(uint64_t OS_GS_RELATIVE *)(_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET);
+#else
+ return *(uint64_t*)((char *)_os_tsd_get_base() + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET);
+#endif
+ }
+#endif
+ uint64_t threadid = 0;
+ pthread_threadid_np(NULL, &threadid);
+ return threadid;
+}
+
+#endif // _os_tsd_get_base
+
#endif // __PTHREAD_PRIVATE_H__
#if __DARWIN_C_LEVEL >= __DARWIN_C_FULL
-// <rdar://problem/16611709>
-#define QOS_CLASS_LEGACY QOS_CLASS_DEFAULT
-
#ifdef __has_include
#if __has_include(<pthread/qos_private.h>)
#include <pthread/qos_private.h>
// masks for splitting the handling the contents of a pthread_priority_t, the mapping from
// qos_class_t to the class bits, however, is intentionally not exposed.
-#define _PTHREAD_PRIORITY_FLAGS_MASK (~0xffffff)
-#define _PTHREAD_PRIORITY_FLAGS_SHIFT (24ull)
-#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x00ffff00
-#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
-#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
-#define _PTHREAD_PRIORITY_PRIORITY_SHIFT (0)
-
-#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
-#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
-#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
-#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
-#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
+#define _PTHREAD_PRIORITY_FLAGS_MASK 0xff000000
+#define _PTHREAD_PRIORITY_FLAGS_SHIFT (24ull)
+#define _PTHREAD_PRIORITY_ENCODING_MASK 0x00a00000
+#define _PTHREAD_PRIORITY_ENCODING_SHIFT (22ull)
+#define _PTHREAD_PRIORITY_ENCODING_V0 0x00000000
+#define _PTHREAD_PRIORITY_ENCODING_V1 0x00400000 /* unused */
+#define _PTHREAD_PRIORITY_ENCODING_V2 0x00800000 /* unused */
+#define _PTHREAD_PRIORITY_ENCODING_V3 0x00a00000 /* unused */
+#define _PTHREAD_PRIORITY_QOS_CLASS_MASK 0x003fff00
+#define _PTHREAD_PRIORITY_QOS_CLASS_SHIFT (8ull)
+#define _PTHREAD_PRIORITY_PRIORITY_MASK 0x000000ff
+#define _PTHREAD_PRIORITY_PRIORITY_SHIFT (0)
+
+#define _PTHREAD_PRIORITY_OVERCOMMIT_FLAG 0x80000000
+#define _PTHREAD_PRIORITY_INHERIT_FLAG 0x40000000
+#define _PTHREAD_PRIORITY_ROOTQUEUE_FLAG 0x20000000
+// Used to indicate to the pthread kext that the provided event manager thread
+// priority is actually a scheduling priority not a QoS. We can have ROOTQUEUE_FLAG
+// perform double duty because it's never provided to the kernel.
+#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#define _PTHREAD_PRIORITY_ENFORCE_FLAG 0x10000000
+#define _PTHREAD_PRIORITY_OVERRIDE_FLAG 0x08000000
// libdispatch defines the following, so it's not safe to use for anything we
// expect to be passed in from userspace
// manager thread. There can only ever be one event manager thread at a time and
// it is brought up at the highest of all event manager priorities passed to the
// kext.
-#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
-
-// Used to indicate to the pthread kext that the provided event manager thread
-// priority is actually a scheduling priority not a QoS. We can have ROOTQUEUE_FLAG
-// perform double duty because it's never provided to the kernel.
-#define _PTHREAD_PRIORITY_SCHED_PRI_FLAG 0x20000000
+#define _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG 0x02000000
+#define _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG 0x01000000
// redeffed here to avoid leaving __QOS_ENUM defined in the public header
#define __QOS_ENUM(name, type, ...) enum { __VA_ARGS__ }; typedef type name##_t
-#define __QOS_AVAILABLE_STARTING(x, y)
+#define __QOS_AVAILABLE_10_10
+#define __QOS_AVAILABLE_10_11
+#define __QOS_AVAILABLE_10_12
#if defined(__has_feature) && defined(__has_extension)
#if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums)
#define __QOS_ENUM(name, type, ...) typedef enum : type { __VA_ARGS__ } name##_t
#endif
#if __has_feature(enumerator_attributes)
-#undef __QOS_AVAILABLE_STARTING
-#define __QOS_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING
+#undef __QOS_AVAILABLE_10_10
+#define __QOS_AVAILABLE_10_10 __OSX_AVAILABLE(10.10) __IOS_AVAILABLE(8.0)
+#undef __QOS_AVAILABLE_10_11
+#define __QOS_AVAILABLE_10_11 __OSX_AVAILABLE(10.11) __IOS_AVAILABLE(9.0) __WATCHOS_AVAILABLE(2.0) __TVOS_AVAILABLE(9.0)
+#undef __QOS_AVAILABLE_10_12
+#define __QOS_AVAILABLE_10_12 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0)
#endif
#endif
__QOS_ENUM(_pthread_set_flags, unsigned int,
- _PTHREAD_SET_SELF_QOS_FLAG
- __QOS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x1,
- _PTHREAD_SET_SELF_VOUCHER_FLAG
- __QOS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x2,
- _PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG
- __QOS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x4,
- _PTHREAD_SET_SELF_TIMESHARE_FLAG
- __QOS_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0) = 0x8,
+ _PTHREAD_SET_SELF_QOS_FLAG __QOS_AVAILABLE_10_10 = 0x1,
+ _PTHREAD_SET_SELF_VOUCHER_FLAG __QOS_AVAILABLE_10_10 = 0x2,
+ _PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG __QOS_AVAILABLE_10_11 = 0x4,
+ _PTHREAD_SET_SELF_TIMESHARE_FLAG __QOS_AVAILABLE_10_11 = 0x8,
+ _PTHREAD_SET_SELF_WQ_KEVENT_UNBIND __QOS_AVAILABLE_10_12 = 0x10,
);
#undef __QOS_ENUM
-#undef __QOS_AVAILABLE_STARTING
+#undef __QOS_AVAILABLE_10_10
+#undef __QOS_AVAILABLE_10_11
+#undef __QOS_AVAILABLE_10_12
#ifndef KERNEL
#include <mach/mach.h>
#include <libkern/OSAtomic.h>
-typedef volatile OSSpinLock pthread_lock_t;
+typedef volatile OSSpinLock pthread_lock_t __deprecated_msg("Use <os/lock.h> instead");
#define LOCK_INIT(l) ((l) = OS_SPINLOCK_INIT)
#define LOCK_INITIALIZER OS_SPINLOCK_INIT
#define LOCK(v) OSSpinLockLock((volatile OSSpinLock *)&(v))
#define UNLOCK(v) OSSpinLockUnlock((volatile OSSpinLock *)&(v))
-extern void _spin_lock(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockLock instead");
-extern int _spin_lock_try(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockTry instead");
-extern void _spin_unlock(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockUnlock instead");
+extern void _spin_lock(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
+extern int _spin_lock_try(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
+extern void _spin_unlock(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
-extern void spin_lock(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockLock instead");
-extern int spin_lock_try(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockTry instead");
-extern void spin_unlock(pthread_lock_t *lockp) __deprecated_msg("Use OSSpinLockUnlock instead");
+extern void spin_lock(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
+extern int spin_lock_try(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
+extern void spin_unlock(pthread_lock_t *lockp) __deprecated_msg("Use <os/lock.h> instead");
#endif /* _POSIX_PTHREAD_SPINLOCK_H */
#include <System/machine/cpu_capabilities.h>
#include <sys/cdefs.h>
#include <TargetConditionals.h>
+#include <Availability.h>
#include <os/tsd.h>
#include <pthread/spinlock_private.h>
#define _PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS __TSD_THREAD_QOS_CLASS
//#define _PTHREAD_TSD_SLOT_SEMAPHORE_CACHE__TSD_SEMAPHORE_CACHE
+/*
+ * Windows 64-bit ABI bakes %gs relative accesses into its code in the same
+ * range as our TSD keys. To allow some limited interoperability for code
+ * targeting that ABI, we leave slots 6 and 11 unused.
+ */
+//#define _PTHREAD_TSD_SLOT_RESERVED_WIN64 6
+
#define _PTHREAD_TSD_RESERVED_SLOT_COUNT _PTHREAD_TSD_RESERVED_SLOT_COUNT
/* Keys 10 - 29 are for Libc/Libsystem internal usage */
/* used as __pthread_tsd_first + Num */
#define __PTK_LIBC_LOCALE_KEY 10
-#define __PTK_LIBC_TTYNAME_KEY 11
+//#define __PTK_LIBC_RESERVED_WIN64 11
#define __PTK_LIBC_LOCALTIME_KEY 12
#define __PTK_LIBC_GMTIME_KEY 13
#define __PTK_LIBC_GDTOA_BIGINT_KEY 14
#define __PTK_LIBC_PARSEFLOAT_KEY 15
+#define __PTK_LIBC_TTYNAME_KEY 16
/* for usage by dyld */
#define __PTK_LIBC_DYLD_Unwind_SjLj_Key 18
/* Keys 95 for CoreText */
#define __PTK_FRAMEWORK_CORETEXT_KEY0 95
-/* Keys 110-119 for Garbage Collection */
-#define __PTK_FRAMEWORK_GC_KEY0 110
-#define __PTK_FRAMEWORK_GC_KEY1 111
-#define __PTK_FRAMEWORK_GC_KEY2 112
-#define __PTK_FRAMEWORK_GC_KEY3 113
-#define __PTK_FRAMEWORK_GC_KEY4 114
-#define __PTK_FRAMEWORK_GC_KEY5 115
-#define __PTK_FRAMEWORK_GC_KEY6 116
-#define __PTK_FRAMEWORK_GC_KEY7 117
-#define __PTK_FRAMEWORK_GC_KEY8 118
-#define __PTK_FRAMEWORK_GC_KEY9 119
-
/* Keys 210 - 229 are for libSystem usage within the iOS Simulator */
/* They are offset from their corresponding libSystem keys by 200 */
#define __PTK_LIBC_SIM_LOCALE_KEY 210
/* setup destructor function for static key as it is not created with pthread_key_create() */
extern int pthread_key_init_np(int, void (*)(void *));
+__OSX_AVAILABLE(10.12)
+__IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0)
+__WATCHOS_AVAILABLE(3.0)
+extern int _pthread_setspecific_static(unsigned long, void *);
+
#if PTHREAD_LAYOUT_SPI
/* SPI intended for CoreSymbolication only */
} pthread_layout_offsets;
#endif // PTHREAD_LAYOUT_SPI
-__END_DECLS
-
-#if TARGET_IPHONE_SIMULATOR
__header_always_inline int
_pthread_has_direct_tsd(void)
{
+#if TARGET_IPHONE_SIMULATOR
return 0;
-}
-
-#define _pthread_getspecific_direct(key) pthread_getspecific((key))
-#define _pthread_setspecific_direct(key, val) pthread_setspecific((key), (val))
-
-#else /* TARGET_IPHONE_SIMULATOR */
-
-__header_always_inline int
-_pthread_has_direct_tsd(void)
-{
+#else
return 1;
+#endif
}
/* To be used with static constant keys only */
__header_always_inline void *
_pthread_getspecific_direct(unsigned long slot)
{
+#if TARGET_IPHONE_SIMULATOR
+ return pthread_getspecific(slot);
+#else
return _os_tsd_get_direct(slot);
+#endif
}
-/* To be used with static constant keys only */
+/* To be used with static constant keys only, assumes destructor is
+ * already setup (with pthread_key_init_np) */
__header_always_inline int
_pthread_setspecific_direct(unsigned long slot, void * val)
{
+#if TARGET_IPHONE_SIMULATOR
+ return _pthread_setspecific_static(slot, val);
+#else
return _os_tsd_set_direct(slot, val);
+#endif
}
-#endif /* TARGET_IPHONE_SIMULATOR */
+__END_DECLS
#endif /* ! __ASSEMBLER__ */
#endif /* __PTHREAD_TSD_H__ */
#include <pthread/qos_private.h>
#endif
-#define PTHREAD_WORKQUEUE_SPI_VERSION 20150304
+#define PTHREAD_WORKQUEUE_SPI_VERSION 20160427
/* Feature checking flags, returned by _pthread_workqueue_supported()
*
#define WORKQ_FEATURE_DISPATCHFUNC 0x01 // pthread_workqueue_setdispatch_np is supported (or not)
#define WORKQ_FEATURE_FINEPRIO 0x02 // fine grained pthread workq priorities
#define WORKQ_FEATURE_MAINTENANCE 0x10 // QOS class maintenance
-#define WORKQ_FEATURE_KEVENT 0x20 // Support for direct kevent delivery
+#define WORKQ_FEATURE_KEVENT 0x40 // Support for direct kevent delivery
/* Legacy dispatch priority bands */
typedef void (*pthread_workqueue_function_t)(int queue_priority, int options, void *ctxt);
// New callback prototype, used with pthread_workqueue_init
typedef void (*pthread_workqueue_function2_t)(pthread_priority_t priority);
+
// Newer callback prototype, used in conjection with function2 when there are kevents to deliver
// both parameters are in/out parameters
+#define WORKQ_KEVENT_EVENT_BUFFER_LEN 16
typedef void (*pthread_workqueue_function_kevent_t)(void **events, int *nevents);
// Initialises the pthread workqueue subsystem, passing the new-style callback prototype,
_pthread_workqueue_set_event_manager_priority(pthread_priority_t priority);
// Apply a QoS override without allocating userspace memory
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+int
+_pthread_qos_override_start_direct(mach_port_t thread, pthread_priority_t priority, void *resource);
+
+// Drop a corresponding QoS override made above, if the resource matches
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+int
+_pthread_qos_override_end_direct(mach_port_t thread, void *resource);
+
+// Apply a QoS override without allocating userspace memory
+__OSX_DEPRECATED(10.10, 10.12, "use _pthread_qos_override_start_direct()")
+__IOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_start_direct()")
+__TVOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_start_direct()")
+__WATCHOS_DEPRECATED(1.0, 3.0, "use _pthread_qos_override_start_direct()")
int
_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority);
// Drop a corresponding QoS override made above.
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__OSX_DEPRECATED(10.10, 10.12, "use _pthread_qos_override_end_direct()")
+__IOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_end_direct()")
+__TVOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_end_direct()")
+__WATCHOS_DEPRECATED(1.0, 3.0, "use _pthread_qos_override_end_direct()")
int
_pthread_override_qos_class_end_direct(mach_port_t thread);
int
_pthread_workqueue_override_start_direct(mach_port_t thread, pthread_priority_t priority);
+// Apply a QoS override on a given workqueue thread.
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+int
+_pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread_priority_t priority, mach_port_t *ulock_addr);
+
// Drop all QoS overrides on the current workqueue thread.
__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
int
--- /dev/null
+/*
+ * Copyright (c) 2013, 2016 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef __PTHREAD_INTROSPECTION__
+#define __PTHREAD_INTROSPECTION__
+
+#include <sys/cdefs.h>
+#include <pthread/pthread.h>
+#include <Availability.h>
+
+/*!
+ * @header
+ *
+ * @abstract
+ * Introspection API for libpthread.
+ *
+ * This should only be used for introspection and debugging tools. Do not rely
+ * on it in shipping code.
+ */
+
+__BEGIN_DECLS
+
+/*!
+ * @typedef pthread_introspection_hook_t
+ *
+ * @abstract
+ * A function pointer called at various points in a PThread's lifetime. The
+ * function must be able to be called in contexts with invalid thread state.
+ *
+ * @param event
+ * One of the events in pthread_introspection_event_t.
+ *
+ * @param thread
+ * pthread_t associated with the event.
+ *
+ * @param addr
+ * Address associated with the event, e.g. stack address.
+ *
+ * @param size
+ * Size associated with the event, e.g. stack size.
+ */
+typedef void (*pthread_introspection_hook_t)(unsigned int event,
+ pthread_t thread, void *addr, size_t size);
+
+/*!
+ * @enum pthread_introspection_event_t
+ *
+ * @constant PTHREAD_INTROSPECTION_THREAD_CREATE
+ * pthread_t was created.
+ *
+ * @constant PTHREAD_INTROSPECTION_THREAD_START
+ * Thread has started and stack was allocated.
+ *
+ * @constant PTHREAD_INTROSPECTION_THREAD_TERMINATE
+ * Thread is about to be terminated and stack will be deallocated.
+ *
+ * @constant PTHREAD_INTROSPECTION_THREAD_DESTROY
+ * pthread_t is about to be destroyed.
+ */
+enum {
+ PTHREAD_INTROSPECTION_THREAD_CREATE = 1,
+ PTHREAD_INTROSPECTION_THREAD_START,
+ PTHREAD_INTROSPECTION_THREAD_TERMINATE,
+ PTHREAD_INTROSPECTION_THREAD_DESTROY,
+};
+
+/*!
+ * @function pthread_introspection_hook_install
+ *
+ * @abstract
+ * Install introspection hook function into libpthread.
+ *
+ * @discussion
+ * The caller is responsible for implementing chaining to the hook that was
+ * previously installed (if any).
+ *
+ * @param hook
+ * Pointer to hook function.
+ *
+ * @result
+ * Previously installed hook function or NULL.
+ */
+
+__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+__attribute__((__nonnull__, __warn_unused_result__))
+extern pthread_introspection_hook_t
+pthread_introspection_hook_install(pthread_introspection_hook_t hook);
+
+__END_DECLS
+
+#endif
#include <sys/cdefs.h>
#include <Availability.h>
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull begin")
+#endif
__BEGIN_DECLS
/*
* Threads
# endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
#endif
+/* <rdar://problem/25944576> */
+#define _PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT \
+ defined(SWIFT_CLASS_EXTRA) && (!defined(SWIFT_SDK_OVERLAY_PTHREAD_EPOCH) || (SWIFT_SDK_OVERLAY_PTHREAD_EPOCH < 1))
+
/*
* Condition variable attributes
*/
* Prototypes for all PTHREAD interfaces
*/
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_atfork(void (*)(void), void (*)(void), void (*)(void));
+int pthread_atfork(void (* _Nullable)(void), void (* _Nullable)(void),
+ void (* _Nullable)(void));
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_attr_destroy(pthread_attr_t *);
int pthread_attr_getscope(const pthread_attr_t * __restrict, int * __restrict);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_attr_getstack(const pthread_attr_t * __restrict, void ** __restrict,
- size_t * __restrict);
+int pthread_attr_getstack(const pthread_attr_t * __restrict,
+ void * _Nullable * _Nonnull __restrict, size_t * __restrict);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_attr_getstackaddr(const pthread_attr_t * __restrict, void ** __restrict);
+int pthread_attr_getstackaddr(const pthread_attr_t * __restrict,
+ void * _Nullable * _Nonnull __restrict);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_attr_getstacksize(const pthread_attr_t * __restrict, size_t * __restrict);
int pthread_cond_destroy(pthread_cond_t *);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_cond_init(pthread_cond_t * __restrict,
- const pthread_condattr_t * __restrict) __DARWIN_ALIAS(pthread_cond_init);
+int pthread_cond_init(
+ pthread_cond_t * __restrict,
+ const pthread_condattr_t * _Nullable __restrict)
+ __DARWIN_ALIAS(pthread_cond_init);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_cond_signal(pthread_cond_t *);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_cond_timedwait(pthread_cond_t * __restrict, pthread_mutex_t * __restrict,
- const struct timespec * __restrict) __DARWIN_ALIAS_C(pthread_cond_timedwait);
+int pthread_cond_timedwait(
+ pthread_cond_t * __restrict, pthread_mutex_t * __restrict,
+ const struct timespec * _Nullable __restrict)
+ __DARWIN_ALIAS_C(pthread_cond_timedwait);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_cond_wait(pthread_cond_t * __restrict,
int pthread_condattr_setpshared(pthread_condattr_t *, int);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_create(pthread_t * __restrict, const pthread_attr_t * __restrict,
- void *(*)(void *), void * __restrict);
+#if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
+int pthread_create(pthread_t _Nullable * _Nonnull __restrict,
+ const pthread_attr_t * _Nullable __restrict,
+ void * _Nullable (* _Nonnull)(void * _Nullable),
+ void * _Nullable __restrict);
+#else
+int pthread_create(pthread_t * __restrict,
+ const pthread_attr_t * _Nullable __restrict,
+ void *(* _Nonnull)(void *), void * _Nullable __restrict);
+#endif // _PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_detach(pthread_t);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_equal(pthread_t, pthread_t);
+int pthread_equal(pthread_t _Nullable, pthread_t _Nullable);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-void pthread_exit(void *) __dead2;
+void pthread_exit(void * _Nullable) __dead2;
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_getconcurrency(void);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_getschedparam(pthread_t , int * __restrict,
- struct sched_param * __restrict);
+int pthread_getschedparam(pthread_t , int * _Nullable __restrict,
+ struct sched_param * _Nullable __restrict);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-void* pthread_getspecific(pthread_key_t);
+void* _Nullable pthread_getspecific(pthread_key_t);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_join(pthread_t , void **) __DARWIN_ALIAS_C(pthread_join);
+int pthread_join(pthread_t , void * _Nullable * _Nullable)
+ __DARWIN_ALIAS_C(pthread_join);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_key_create(pthread_key_t *, void (*)(void *));
+int pthread_key_create(pthread_key_t *, void (* _Nullable)(void *));
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_key_delete(pthread_key_t);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_mutex_init(pthread_mutex_t * __restrict,
- const pthread_mutexattr_t * __restrict);
+ const pthread_mutexattr_t * _Nullable __restrict);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_mutex_lock(pthread_mutex_t *);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
+__SWIFT_UNAVAILABLE_MSG("Use lazily initialized globals instead")
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_once(pthread_once_t *, void (*)(void));
+int pthread_once(pthread_once_t *, void (* _Nonnull)(void));
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_rwlock_destroy(pthread_rwlock_t * ) __DARWIN_ALIAS(pthread_rwlock_destroy);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_rwlock_init(pthread_rwlock_t * __restrict,
- const pthread_rwlockattr_t * __restrict) __DARWIN_ALIAS(pthread_rwlock_init);
+ const pthread_rwlockattr_t * _Nullable __restrict)
+ __DARWIN_ALIAS(pthread_rwlock_init);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_rwlock_rdlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_rdlock);
pthread_t pthread_self(void);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_setcancelstate(int , int *) __DARWIN_ALIAS(pthread_setcancelstate);
+int pthread_setcancelstate(int , int * _Nullable)
+ __DARWIN_ALIAS(pthread_setcancelstate);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_setcanceltype(int , int *) __DARWIN_ALIAS(pthread_setcanceltype);
+int pthread_setcanceltype(int , int * _Nullable)
+ __DARWIN_ALIAS(pthread_setcanceltype);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_setconcurrency(int);
int pthread_setschedparam(pthread_t, int, const struct sched_param *);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_setspecific(pthread_key_t , const void *);
+int pthread_setspecific(pthread_key_t , const void * _Nullable);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
void pthread_testcancel(void) __DARWIN_ALIAS(pthread_testcancel);
int pthread_is_threaded_np(void);
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
-int pthread_threadid_np(pthread_t,__uint64_t*);
+int pthread_threadid_np(pthread_t _Nullable,__uint64_t* _Nullable);
/*SPI to set and get pthread name*/
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
/* Like pthread_cond_signal(), but only wake up the specified pthread */
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_cond_signal_thread_np(pthread_cond_t *, pthread_t);
+int pthread_cond_signal_thread_np(pthread_cond_t *, pthread_t _Nullable);
/* Like pthread_cond_timedwait, but use a relative timeout */
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_cond_timedwait_relative_np(pthread_cond_t *, pthread_mutex_t *,
- const struct timespec *);
+ const struct timespec * _Nullable);
/* Like pthread_create(), but leaves the thread suspended */
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_create_suspended_np(pthread_t *, const pthread_attr_t *,
- void *(*)(void *), void *);
+#if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
+int pthread_create_suspended_np(
+ pthread_t _Nullable * _Nonnull, const pthread_attr_t * _Nullable,
+ void * _Nullable (* _Nonnull)(void * _Nullable), void * _Nullable);
+#else
+int pthread_create_suspended_np(pthread_t *, const pthread_attr_t * _Nullable,
+ void *(* _Nonnull)(void *), void * _Nullable);
+#endif
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
int pthread_kill(pthread_t, int);
__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
-pthread_t pthread_from_mach_thread_np(mach_port_t);
+_Nullable pthread_t pthread_from_mach_thread_np(mach_port_t);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
-int pthread_sigmask(int, const sigset_t *, sigset_t *) __DARWIN_ALIAS(pthread_sigmask);
+int pthread_sigmask(int, const sigset_t * _Nullable, sigset_t * _Nullable)
+ __DARWIN_ALIAS(pthread_sigmask);
__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
void pthread_yield_np(void);
#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
__END_DECLS
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull end")
+#endif
+
#endif /* _PTHREAD_H */
/* This whole header file will disappear, so don't depend on it... */
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull begin")
+#endif
+
#ifndef __POSIX_LIB__
/*
#endif /* __POSIX_LIB__ */
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull end")
+#endif
+
#endif /* _PTHREAD_IMPL_H_ */
*/
/*
- * Extension SPIs.
+ * Extension SPIs; installed to /usr/include.
*/
#ifndef _PTHREAD_SPIS_H
#include <pthread/pthread.h>
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull begin")
+#endif
__BEGIN_DECLS
#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE)
#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
+__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_NA)
+void _pthread_mutex_enable_legacy_mode(void);
+
+/*
+ * A version of pthread_create that is safely callable from an injected mach thread.
+ *
+ * The _create introspection hook will not fire for threads created from this function.
+ *
+ * It is not safe to call this function concurrently.
+ */
+__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
+__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+#if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
+int pthread_create_from_mach_thread(
+ pthread_t _Nullable * _Nonnull __restrict,
+ const pthread_attr_t * _Nullable __restrict,
+ void * _Nullable (* _Nonnull)(void * _Nullable),
+ void * _Nullable __restrict);
+#else
+int pthread_create_from_mach_thread(pthread_t * __restrict,
+ const pthread_attr_t * _Nullable __restrict,
+ void *(* _Nonnull)(void *), void * _Nullable __restrict);
+#endif
+
__END_DECLS
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull end")
+#endif
#endif /* _PTHREAD_SPIS_H */
#ifndef KERNEL
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull begin")
+#endif
__BEGIN_DECLS
/*!
__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
int
pthread_attr_get_qos_class_np(pthread_attr_t * __restrict __attr,
- qos_class_t * __restrict __qos_class,
- int * __restrict __relative_priority);
+ qos_class_t * _Nullable __restrict __qos_class,
+ int * _Nullable __restrict __relative_priority);
/*!
* @function pthread_set_qos_class_self_np
__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
int
pthread_get_qos_class_np(pthread_t __pthread,
- qos_class_t * __restrict __qos_class,
- int * __restrict __relative_priority);
+ qos_class_t * _Nullable __restrict __qos_class,
+ int * _Nullable __restrict __relative_priority);
/*!
* @typedef pthread_override_t
pthread_override_qos_class_end_np(pthread_override_t __override);
__END_DECLS
+#if __has_feature(assume_nonnull)
+_Pragma("clang assume_nonnull end")
+#endif
#endif // KERNEL
#include <libkern/OSAtomic.h>
#include <mach/mach.h>
#include <mach/mach_error.h>
-#include <os/once_private.h>
#include <sys/queue.h>
+#define __OS_EXPOSE_INTERNALS__ 1
+#include <os/internal/internal_shared.h>
+#include <os/once_private.h>
+
+#define PTHREAD_INTERNAL_CRASH(c, x) do { \
+ _os_set_crash_log_cause_and_message((c), \
+ "BUG IN LIBPTHREAD: " x); \
+ __builtin_trap(); \
+ } while (0)
+
+#define PTHREAD_CLIENT_CRASH(c, x) do { \
+ _os_set_crash_log_cause_and_message((c), \
+ "BUG IN CLIENT OF LIBPTHREAD: " x); \
+ __builtin_trap(); \
+ } while (0)
+
#ifndef __POSIX_LIB__
#define __POSIX_LIB__
#endif
#include "tsd_private.h"
#include "spinlock_private.h"
+#define OS_UNFAIR_LOCK_INLINE 1
+#include <os/lock_private.h>
+typedef os_unfair_lock _pthread_lock;
+#define _PTHREAD_LOCK_INITIALIZER OS_UNFAIR_LOCK_INIT
+#define _PTHREAD_LOCK_INIT(lock) ((lock) = (_pthread_lock)_PTHREAD_LOCK_INITIALIZER)
+#define _PTHREAD_LOCK(lock) os_unfair_lock_lock_with_options_inline(&(lock), OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION)
+#define _PTHREAD_LOCK_FROM_MACH_THREAD(lock) os_unfair_lock_lock_inline_no_tsd_4libpthread(&(lock))
+#define _PTHREAD_UNLOCK(lock) os_unfair_lock_unlock_inline(&(lock))
+#define _PTHREAD_UNLOCK_FROM_MACH_THREAD(lock) os_unfair_lock_unlock_inline_no_tsd_4libpthread(&(lock))
+
#if TARGET_IPHONE_SIMULATOR
#error Unsupported target
#endif
extern struct __pthread_list __pthread_head;
// Lock protects access to above list.
-extern pthread_lock_t _pthread_list_lock;
+extern _pthread_lock _pthread_list_lock;
extern int __is_threaded;
childexit:1,
pad3:29;
- pthread_lock_t lock; // protect access to everything below
+ _pthread_lock lock; // protect access to everything below
uint32_t detached:8,
inherit:8,
policy:8,
wqkillset:1,
pad:4;
-#if __LP64__
+#if defined(__LP64__)
uint32_t pad0;
#endif
- uint64_t thread_id; // 64-bit unique thread id
void *(*fun)(void*); // thread start routine
void *arg; // thread start routine argument
int cancel_state; // whether the thread can be cancelled
int cancel_error;
-#ifdef __i386__
- // i386 err_no must be at a 68 byte offset
- // See <rdar://problem/13249323>
- uint32_t __13249323_pad[3];
-#endif
int err_no; // thread-local errno
struct _pthread *joiner;
TAILQ_ENTRY(_pthread) plist; // global thread list [aligned]
char pthread_name[MAXTHREADNAMESIZE]; // includes NUL [aligned]
-
+
void *stackaddr; // base of the stack (page aligned)
size_t stacksize; // size of stack (page multiple and >= PTHREAD_STACK_MIN)
size_t freesize; // stack/thread allocation size
size_t guardsize; // guard page size in bytes
- // thread specific data
- void *tsd[_EXTERNAL_POSIX_THREAD_KEYS_MAX + _INTERNAL_POSIX_THREAD_KEYS_MAX] __attribute__ ((aligned (16)));
+ // tsd-base relative accessed elements
+ __attribute__((aligned(8)))
+ uint64_t thread_id; // 64-bit unique thread id
+
+ /* Thread Specific Data slots
+ *
+ * The offset of this field from the start of the structure is difficult to
+ * change on OS X because of a thorny bitcompat issue: mono has hard coded
+ * the value into their source. Newer versions of mono will fall back to
+ * scanning to determine it at runtime, but there's lots of software built
+ * with older mono that won't. We will have to break them someday...
+ */
+ __attribute__ ((aligned (16)))
+ void *tsd[_EXTERNAL_POSIX_THREAD_KEYS_MAX + _INTERNAL_POSIX_THREAD_KEYS_MAX];
} *pthread_t;
struct _pthread_attr_t {
long sig;
- pthread_lock_t lock;
+ _pthread_lock lock;
uint32_t detached:8,
inherit:8,
policy:8,
typedef struct {
long sig;
- pthread_lock_t lock;
+ _pthread_lock lock;
union {
uint32_t value;
struct _pthread_mutex_options options;
typedef struct {
long sig;
- pthread_lock_t lock;
+ _pthread_lock lock;
uint32_t unused:29,
misalign:1,
pshared:2;
typedef struct {
long sig;
- pthread_lock_t lock;
+ _pthread_lock lock;
uint32_t unused:29,
misalign:1,
pshared:2;
static inline void
_pthread_set_kernel_thread(pthread_t t, mach_port_t p)
{
+ if (os_slowpath(!MACH_PORT_VALID(p))) {
+ PTHREAD_INTERNAL_CRASH(t, "Invalid thread port");
+ }
t->tsd[_PTHREAD_TSD_SLOT_MACH_THREAD_SELF] = p;
}
struct pthread_globals_s {
// atfork.c
pthread_t psaved_self;
- OSSpinLock psaved_self_global_lock;
- OSSpinLock pthread_atfork_lock;
+ _pthread_lock psaved_self_global_lock;
+ _pthread_lock pthread_atfork_lock;
size_t atfork_count;
struct pthread_atfork_entry atfork_storage[PTHREAD_ATFORK_INLINE_MAX];
#include "workqueue_private.h"
#include "introspection_private.h"
#include "qos_private.h"
+#include "tsd_private.h"
#include <stdlib.h>
#include <errno.h>
// _pthread_list_lock protects _pthread_count, access to the __pthread_head
// list, and the parentcheck, childrun and childexit flags of the pthread
// structure. Externally imported by pthread_cancelable.c.
-__private_extern__ pthread_lock_t _pthread_list_lock = LOCK_INITIALIZER;
+__private_extern__ _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
static int _pthread_count = 1;
mach_msg_trailer_t trailer;
} pthread_reap_msg_t;
-#define pthreadsize ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
+/*
+ * The pthread may be offset into a page. In that event, by contract
+ * with the kernel, the allocation will extend PTHREAD_SIZE from the
+ * start of the next page. There's also one page worth of allocation
+ * below stacksize for the guard page. <rdar://problem/19941744>
+ */
+#define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
+#define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
+#define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
+
static pthread_attr_t _pthread_attr_default = {0};
// The main thread's pthread_t
-static struct _pthread _thread __attribute__((aligned(4096))) = {0};
+static struct _pthread _thread __attribute__((aligned(64))) = {0};
static int default_priority;
static int max_priority;
const pthread_attr_t *attrs,
void *stack,
size_t stacksize,
- int kernalloc);
+ void *freeaddr,
+ size_t freesize);
extern void _pthread_set_self(pthread_t);
+static void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
static void _pthread_dealloc_reply_port(pthread_t t);
-static inline void __pthread_add_thread(pthread_t t, bool parent);
+static inline void __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread);
static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
static int _pthread_find_thread(pthread_t thread);
-----------------------------------------
*/
-#define PTHREAD_START_CUSTOM 0x01000000
-#define PTHREAD_START_SETSCHED 0x02000000
-#define PTHREAD_START_DETACHED 0x04000000
-#define PTHREAD_START_QOSCLASS 0x08000000
-#define PTHREAD_START_QOSCLASS_MASK 0xffffff
+#define PTHREAD_START_CUSTOM 0x01000000
+#define PTHREAD_START_SETSCHED 0x02000000
+#define PTHREAD_START_DETACHED 0x04000000
+#define PTHREAD_START_QOSCLASS 0x08000000
+#define PTHREAD_START_TSD_BASE_SET 0x10000000
+#define PTHREAD_START_QOSCLASS_MASK 0x00ffffff
#define PTHREAD_START_POLICY_BITSHIFT 16
#define PTHREAD_START_POLICY_MASK 0xff
#define PTHREAD_START_IMPORTANCE_MASK 0xffff
#error no PTHREAD_STACK_HINT for this architecture
#endif
-#if defined(__i386__) && defined(static_assert)
-// Check for regression of <rdar://problem/13249323>
-static_assert(offsetof(struct _pthread, err_no) == 68);
-#endif
+// Check that offsets of _PTHREAD_STRUCT_DIRECT_*_OFFSET values hasn't changed
+_Static_assert(offsetof(struct _pthread, tsd) + _PTHREAD_STRUCT_DIRECT_THREADID_OFFSET
+ == offsetof(struct _pthread, thread_id),
+ "_PTHREAD_STRUCT_DIRECT_THREADID_OFFSET is correct");
// Allocate a thread structure, stack and guard page.
//
if (attrs->stackaddr != NULL) {
PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
*stack = attrs->stackaddr;
- allocsize = pthreadsize;
+ allocsize = PTHREAD_SIZE;
} else {
guardsize = attrs->guardsize;
stacksize = attrs->stacksize;
- allocsize = stacksize + guardsize + pthreadsize;
+ allocsize = stacksize + guardsize + PTHREAD_SIZE;
}
kr = mach_vm_map(mach_task_self(),
}
if (t != NULL) {
- _pthread_struct_init(t, attrs, *stack, attrs->stacksize, 0);
- t->freeaddr = (void *)allocaddr;
- t->freesize = allocsize;
+ _pthread_struct_init(t, attrs,
+ *stack, attrs->stacksize,
+ allocaddr, allocsize);
*thread = t;
res = 0;
} else {
return 0;
}
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wreturn-stack-address"
+
+PTHREAD_NOINLINE
+static void*
+_current_stack_address(void)
+{
+ int a;
+ return &a;
+}
+
+#pragma clang diagnostic pop
+
// Terminates the thread if called from the currently running thread.
-PTHREAD_NORETURN
+PTHREAD_NORETURN PTHREAD_NOINLINE
static void
_pthread_terminate(pthread_t t)
{
PTHREAD_ASSERT(t == pthread_self());
-
+
uintptr_t freeaddr = (uintptr_t)t->freeaddr;
size_t freesize = t->freesize;
+ // the size of just the stack
+ size_t freesize_stack = t->freesize;
+
+ // We usually pass our structure+stack to bsdthread_terminate to free, but
+ // if we get told to keep the pthread_t structure around then we need to
+ // adjust the free size and addr in the pthread_t to just refer to the
+ // structure and not the stack. If we do end up deallocating the
+ // structure, this is useless work since no one can read the result, but we
+ // can't do it after the call to pthread_remove_thread because it isn't
+ // safe to dereference t after that.
+ if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
+ // Check to ensure the pthread structure itself is part of the
+ // allocation described by freeaddr/freesize, in which case we split and
+ // only deallocate the area below the pthread structure. In the event of a
+ // custom stack, the freeaddr/size will be the pthread structure itself, in
+ // which case we shouldn't free anything (the final else case).
+ freesize_stack = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
+
+ // describe just the remainder for deallocation when the pthread_t goes away
+ t->freeaddr += freesize_stack;
+ t->freesize -= freesize_stack;
+ } else if (t == &_thread){
+ freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
+ uintptr_t stackborder = trunc_page((uintptr_t)_current_stack_address());
+ freesize_stack = stackborder - freeaddr;
+ } else {
+ freesize_stack = 0;
+ }
+
mach_port_t kport = _pthread_kernel_thread(t);
semaphore_t joinsem = t->joiner_notify;
_pthread_dealloc_reply_port(t);
- // If the pthread_t sticks around after the __bsdthread_terminate, we'll
- // need to free it later
-
- // After the call to __pthread_remove_thread, it is only safe to
- // dereference the pthread_t structure if EBUSY has been returned.
+ // After the call to __pthread_remove_thread, it is not safe to
+ // dereference the pthread_t structure.
bool destroy, should_exit;
destroy = (__pthread_remove_thread(t, true, &should_exit) != EBUSY);
- if (t == &_thread) {
- // Don't free the main thread.
- freesize = 0;
- } else if (!destroy) {
- // We were told to keep the pthread_t structure around. In the common
- // case, the pthread structure itself is part of the allocation
- // described by freeaddr/freesize, in which case we need to split and
- // only deallocate the area below the pthread structure. In the event
- // of a custom stack, the freeaddr/size will be the pthread structure
- // itself, in which case we shouldn't free anything.
- if ((void*)t > t->freeaddr && (void*)t < t->freeaddr + t->freesize){
- freesize = trunc_page((uintptr_t)t - (uintptr_t)freeaddr);
- t->freeaddr += freesize;
- t->freesize -= freesize;
- } else {
- freesize = 0;
- }
+ if (!destroy || t == &_thread) {
+ // Use the adjusted freesize of just the stack that we computed above.
+ freesize = freesize_stack;
}
+
+ // Check if there is nothing to free because the thread has a custom
+ // stack allocation and is joinable.
if (freesize == 0) {
freeaddr = 0;
}
/*
* Create and start execution of a new thread.
*/
-
+PTHREAD_NOINLINE
static void
-_pthread_body(pthread_t self)
+_pthread_body(pthread_t self, bool needs_tsd_base_set)
{
- _pthread_set_self(self);
- __pthread_add_thread(self, false);
- _pthread_exit(self, (self->fun)(self->arg));
+ _pthread_set_self_internal(self, needs_tsd_base_set);
+ __pthread_add_thread(self, false, false);
+ void *result = (self->fun)(self->arg);
+
+ _pthread_exit(self, result);
}
void
-_pthread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void *arg, size_t stacksize, unsigned int pflags)
+_pthread_start(pthread_t self,
+ mach_port_t kport,
+ void *(*fun)(void *),
+ void *arg,
+ size_t stacksize,
+ unsigned int pflags)
{
if ((pflags & PTHREAD_START_CUSTOM) == 0) {
- uintptr_t stackaddr = self;
- _pthread_struct_init(self, &_pthread_attr_default, stackaddr, stacksize, 1);
+ void *stackaddr = self;
+ _pthread_struct_init(self, &_pthread_attr_default,
+ stackaddr, stacksize,
+ PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
if (pflags & PTHREAD_START_SETSCHED) {
self->policy = ((pflags >> PTHREAD_START_POLICY_BITSHIFT) & PTHREAD_START_POLICY_MASK);
self->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
}
+ bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
+
_pthread_set_kernel_thread(self, kport);
self->fun = fun;
self->arg = arg;
-
- _pthread_body(self);
+
+ _pthread_body(self, !thread_tsd_bsd_set);
}
static void
const pthread_attr_t *attrs,
void *stackaddr,
size_t stacksize,
- int kernalloc)
+ void *freeaddr,
+ size_t freesize)
{
+#if DEBUG
+ PTHREAD_ASSERT(t->sig != _PTHREAD_SIG);
+#endif
+
t->sig = _PTHREAD_SIG;
t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = t;
t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = _pthread_priority_make_newest(QOS_CLASS_UNSPECIFIED, 0, 0);
- LOCK_INIT(t->lock);
+ _PTHREAD_LOCK_INIT(t->lock);
- t->stacksize = stacksize;
t->stackaddr = stackaddr;
-
- t->kernalloc = kernalloc;
- if (kernalloc){
- /*
- * The pthread may be offset into a page. In that event, by contract
- * with the kernel, the allocation will extend pthreadsize from the
- * start of the next page. There's also one page worth of allocation
- * below stacksize for the guard page. <rdar://problem/19941744>
- */
- t->freeaddr = (stackaddr - stacksize) - vm_page_size;
- t->freesize = (round_page((uintptr_t)stackaddr) + pthreadsize) - (uintptr_t)t->freeaddr;
- }
+ t->stacksize = stacksize;
+ t->freeaddr = freeaddr;
+ t->freesize = freesize;
t->guardsize = attrs->guardsize;
t->detached = attrs->detached;
struct _pthread *p = NULL;
/* No need to wait as mach port is already known */
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
TAILQ_FOREACH(p, &__pthread_head, plist) {
if (_pthread_kernel_thread(p) == kernel_thread) {
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return p;
}
if (t == NULL) {
return ESRCH; // XXX bug?
}
-
- // since the main thread will not get de-allocated from underneath us
+
+#if !defined(__arm__) && !defined(__arm64__)
+ // The default rlimit based allocations will be provided with a stacksize
+ // of the current limit and a freesize of the max. However, custom
+ // allocations will just have the guard page to free. If we aren't in the
+ // latter case, call into rlimit to determine the current stack size. In
+ // the event that the current limit == max limit then we'll fall down the
+ // fast path, but since it's unlikely that the limit is going to be lowered
+ // after it's been change to the max, we should be fine.
+ //
+ // Of course, on arm rlim_cur == rlim_max and there's only the one guard
+ // page. So, we can skip all this there.
+ if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
+ // We want to call getrlimit() just once, as it's relatively expensive
+ static size_t rlimit_stack;
+
+ if (rlimit_stack == 0) {
+ struct rlimit limit;
+ int ret = getrlimit(RLIMIT_STACK, &limit);
+
+ if (ret == 0) {
+ rlimit_stack = (size_t) limit.rlim_cur;
+ }
+ }
+
+ if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
+ return t->stacksize;
+ } else {
+ return rlimit_stack;
+ }
+ }
+#endif /* !defined(__arm__) && !defined(__arm64__) */
+
if (t == pthread_self() || t == &_thread) {
return t->stacksize;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
ret = _pthread_find_thread(t);
if (ret == 0) {
size = ret; // XXX bug?
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return size;
}
return t->stackaddr;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
ret = _pthread_find_thread(t);
if (ret == 0) {
addr = (void *)(uintptr_t)ret; // XXX bug?
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return addr;
}
if (thread == NULL || thread == self) {
*thread_id = self->thread_id;
} else {
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
res = _pthread_find_thread(thread);
if (res == 0) {
*thread_id = thread->thread_id;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
}
return res;
}
return ESRCH;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
res = _pthread_find_thread(thread);
if (res == 0) {
strlcpy(threadname, thread->pthread_name, len);
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return res;
}
PTHREAD_ALWAYS_INLINE
static inline void
-__pthread_add_thread(pthread_t t, bool parent)
+__pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread)
{
bool should_deallocate = false;
bool should_add = true;
- LOCK(_pthread_list_lock);
+ if (from_mach_thread){
+ _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
+ } else {
+ _PTHREAD_LOCK(_pthread_list_lock);
+ }
// The parent and child threads race to add the thread to the list.
// When called by the parent:
// child got here first, don't add.
should_add = false;
}
-
+
// If the child exits before we check in then it has to keep
// the thread structure memory alive so our dereferences above
// are valid. If it's a detached thread, then no joiner will
_pthread_count++;
}
- UNLOCK(_pthread_list_lock);
+ if (from_mach_thread){
+ _PTHREAD_UNLOCK_FROM_MACH_THREAD(_pthread_list_lock);
+ } else {
+ _PTHREAD_UNLOCK(_pthread_list_lock);
+ }
if (parent) {
- _pthread_introspection_thread_create(t, should_deallocate);
+ if (!from_mach_thread) {
+ // PR-26275485: Mach threads will likely crash trying to run
+ // introspection code. Since the fall out from the introspection
+ // code not seeing the injected thread is likely less than crashing
+ // in the introspection code, just don't make the call.
+ _pthread_introspection_thread_create(t, should_deallocate);
+ }
if (should_deallocate) {
_pthread_deallocate(t);
}
bool should_remove = true;
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
// When a thread removes itself:
// - Set the childexit flag indicating that the thread has exited.
TAILQ_REMOVE(&__pthread_head, t, plist);
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return ret;
}
-int
-pthread_create(pthread_t *thread,
+static int
+_pthread_create(pthread_t *thread,
const pthread_attr_t *attr,
void *(*start_routine)(void *),
- void *arg)
-{
+ void *arg,
+ bool from_mach_thread)
+{
pthread_t t = NULL;
unsigned int flags = 0;
__is_threaded = 1;
void *stack;
-
+
if (attrs->fastpath) {
// kernel will allocate thread and stack, pass stacksize.
stack = (void *)attrs->stacksize;
t = t2;
}
- __pthread_add_thread(t, true);
-
- // XXX if a thread is created detached and exits, t will be invalid
+ __pthread_add_thread(t, true, from_mach_thread);
+
+ // n.b. if a thread is created detached and exits, t will be invalid
*thread = t;
return 0;
}
+int
+pthread_create(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg)
+{
+ return _pthread_create(thread, attr, start_routine, arg, false);
+}
+
+int
+pthread_create_from_mach_thread(pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void *),
+ void *arg)
+{
+ return _pthread_create(thread, attr, start_routine, arg, true);
+}
+
+static void
+_pthread_suspended_body(pthread_t self)
+{
+ _pthread_set_self(self);
+ __pthread_add_thread(self, false, false);
+ _pthread_exit(self, (self->fun)(self->arg));
+}
+
int
pthread_create_suspended_np(pthread_t *thread,
const pthread_attr_t *attr,
t->arg = arg;
t->fun = start_routine;
- __pthread_add_thread(t, true);
+ __pthread_add_thread(t, true, false);
// Set up a suspended thread.
- _pthread_setup(t, _pthread_body, stack, 1, 0);
+ _pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
return res;
}
return res; // Not a valid thread to detach.
}
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
if (thread->detached & PTHREAD_CREATE_JOINABLE) {
if (thread->detached & _PTHREAD_EXITED) {
// Join the thread if it's already exited.
} else {
res = EINVAL;
}
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
if (join) {
pthread_join(thread, NULL);
{
pthread_t self = pthread_self();
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
self->wqkillset = enable ? 1 : 0;
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
return 0;
}
}
_pthread_tsd_cleanup(self);
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
self->detached |= _PTHREAD_EXITED;
self->exit_value = value_ptr;
self->joiner_notify == SEMAPHORE_NULL) {
self->joiner_notify = (semaphore_t)os_get_cached_semaphore();
}
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
// Clear per-thread semaphore cache
os_put_cached_semaphore(SEMAPHORE_NULL);
return ESRCH;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
ret = _pthread_find_thread(thread);
if (ret == 0) {
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
return ret;
}
if (res == 0) {
if (bypass == 0) {
// Ensure the thread is still valid.
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
res = _pthread_find_thread(t);
if (res == 0) {
t->policy = policy;
t->param = *param;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
} else {
t->policy = policy;
t->param = *param;
return (t1 == t2);
}
-// Force LLVM not to optimise this to a call to __pthread_set_self, if it does
-// then _pthread_set_self won't be bound when secondary threads try and start up.
+/*
+ * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
+ * then _pthread_set_self won't be bound when secondary threads try and start up.
+ */
PTHREAD_NOINLINE
void
_pthread_set_self(pthread_t p)
{
- extern void __pthread_set_self(void *);
+ return _pthread_set_self_internal(p, true);
+}
+void
+_pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
+{
if (p == NULL) {
p = &_thread;
}
-
+
uint64_t tid = __thread_selfid();
if (tid == -1ull) {
PTHREAD_ABORT("failed to set thread_id");
p->tsd[_PTHREAD_TSD_SLOT_PTHREAD_SELF] = p;
p->tsd[_PTHREAD_TSD_SLOT_ERRNO] = &p->err_no;
p->thread_id = tid;
- __pthread_set_self(&p->tsd[0]);
+
+ if (needs_tsd_base_set) {
+ _thread_set_tsd_base(&p->tsd[0]);
+ }
}
struct _pthread_once_context {
{
const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
bool canceled = ((thread->cancel_state & flags) == flags);
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
if (canceled) {
pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
return 0;
}
-void
-_pthread_set_pfz(uintptr_t address)
+static unsigned long
+_pthread_strtoul(const char *p, const char **endptr, int base)
{
+ uintptr_t val = 0;
+
+ // Expect hex string starting with "0x"
+ if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
+ p += 2;
+ while (1) {
+ char c = *p;
+ if ('0' <= c && c <= '9') {
+ val = (val << 4) + (c - '0');
+ } else if ('a' <= c && c <= 'f') {
+ val = (val << 4) + (c - 'a' + 10);
+ } else if ('A' <= c && c <= 'F') {
+ val = (val << 4) + (c - 'A' + 10);
+ } else {
+ break;
+ }
+ ++p;
+ }
+ }
+
+ *endptr = (char *)p;
+ return val;
+}
+
+static int
+parse_main_stack_params(const char *apple[],
+ void **stackaddr,
+ size_t *stacksize,
+ void **allocaddr,
+ size_t *allocsize)
+{
+ const char *p = _simple_getenv(apple, "main_stack");
+ if (!p) return 0;
+
+ int ret = 0;
+ const char *s = p;
+
+ *stackaddr = _pthread_strtoul(s, &s, 16);
+ if (*s != ',') goto out;
+
+ *stacksize = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',') goto out;
+
+ *allocaddr = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',') goto out;
+
+ *allocsize = _pthread_strtoul(s + 1, &s, 16);
+ if (*s != ',' && *s != 0) goto out;
+
+ ret = 1;
+out:
+ bzero((char *)p, strlen(p));
+ return ret;
}
-#if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD)
+#if !defined(VARIANT_STATIC)
void *
malloc(size_t sz)
{
_pthread_free(p);
}
}
-#endif
+#endif // VARIANT_STATIC
/*
* Perform package initialization - called automatically when application starts
struct ProgramVars; /* forward reference */
int
-__pthread_init(const struct _libpthread_functions *pthread_funcs, const char *envp[] __unused,
- const char *apple[] __unused, const struct ProgramVars *vars __unused)
+__pthread_init(const struct _libpthread_functions *pthread_funcs,
+ const char *envp[] __unused,
+ const char *apple[],
+ const struct ProgramVars *vars __unused)
{
// Save our provided pushed-down functions
if (pthread_funcs) {
// Set up the main thread structure
//
- void *stackaddr;
- size_t stacksize = DFLSSIZ;
- size_t len = sizeof(stackaddr);
- int mib[] = { CTL_KERN, KERN_USRSTACK };
- if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
- stackaddr = (void *)USRSTACK;
+ // Get the address and size of the main thread's stack from the kernel.
+ void *stackaddr = 0;
+ size_t stacksize = 0;
+ void *allocaddr = 0;
+ size_t allocsize = 0;
+ if (!parse_main_stack_params(apple, &stackaddr, &stacksize, &allocaddr, &allocsize) ||
+ stackaddr == NULL || stacksize == 0) {
+ // Fall back to previous bevhaior.
+ size_t len = sizeof(stackaddr);
+ int mib[] = { CTL_KERN, KERN_USRSTACK };
+ if (__sysctl(mib, 2, &stackaddr, &len, NULL, 0) != 0) {
+#if defined(__LP64__)
+ stackaddr = (void *)USRSTACK64;
+#else
+ stackaddr = (void *)USRSTACK;
+#endif
+ }
+ stacksize = DFLSSIZ;
+ allocaddr = 0;
+ allocsize = 0;
}
pthread_t thread = &_thread;
pthread_attr_init(&_pthread_attr_default);
- _pthread_struct_init(thread, &_pthread_attr_default, stackaddr, stacksize, 0);
+ _pthread_struct_init(thread, &_pthread_attr_default,
+ stackaddr, stacksize,
+ allocaddr, allocsize);
thread->detached = PTHREAD_CREATE_JOINABLE;
// Finish initialization with common code that is reinvoked on the
__pthread_fork_child_internal(pthread_t p)
{
TAILQ_INIT(&__pthread_head);
- LOCK_INIT(_pthread_list_lock);
+ _PTHREAD_LOCK_INIT(_pthread_list_lock);
// Re-use the main thread's static storage if no thread was provided.
if (p == NULL) {
p = &_thread;
}
- LOCK_INIT(p->lock);
+ _PTHREAD_LOCK_INIT(p->lock);
_pthread_set_kernel_thread(p, mach_thread_self());
_pthread_set_reply_port(p, mach_reply_port());
p->__cleanup_stack = NULL;
}
self = pthread_self();
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
if (oldstate) {
*oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
}
self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
self->cancel_state |= state;
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
if (!conforming) {
_pthread_testcancel(self, 0); /* See if we need to 'die' now... */
}
static void
_pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
{
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
if (value_ptr == PTHREAD_CANCELED) {
self->detached |= _PTHREAD_WASCANCEL;
// 4597450: end
}
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
}
int
TAILQ_FOREACH(p, &__pthread_head, plist) {
if (p == thread) {
if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
sched_yield();
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
goto loop;
}
return 0;
return ESRCH;
}
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
ret = _pthread_find_thread(thread);
if (ret == 0) {
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
if (portp != NULL) {
*portp = kport;
} else {
pthread_t p;
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
TAILQ_FOREACH(p, &__pthread_head, plist) {
mach_port_t kp = _pthread_kernel_thread(p);
}
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
}
}
PTHREAD_NOEXPORT void
pthread_workqueue_atfork_child(void)
{
- struct _pthread_registration_data data = {
- .dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *),
- };
+ struct _pthread_registration_data data = {};
+ data.version = sizeof(struct _pthread_registration_data);
+ data.dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
+ data.tsd_offset = offsetof(struct _pthread, tsd);
int rv = __bsdthread_register(thread_start,
- start_wqthread,
- (int)pthreadsize,
- (void*)&data,
- (uintptr_t)sizeof(data),
- data.dispatch_queue_offset);
+ start_wqthread, (int)PTHREAD_SIZE,
+ (void*)&data, (uintptr_t)sizeof(data),
+ data.dispatch_queue_offset);
if (rv > 0) {
__pthread_supported_features = rv;
}
- if (_pthread_priority_get_qos_newest(data.main_qos) != QOS_CLASS_UNSPECIFIED) {
- _pthread_set_main_qos(data.main_qos);
- _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = data.main_qos;
+ pthread_priority_t main_qos = (pthread_priority_t)data.main_qos;
+
+ if (_pthread_priority_get_qos_newest(main_qos) != QOS_CLASS_UNSPECIFIED) {
+ _pthread_set_main_qos(main_qos);
+ _thread.tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = main_qos;
}
if (__libdispatch_workerfunction != NULL) {
priority_flags |= _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
if (flags & WQ_FLAG_THREAD_EVENT_MANAGER)
priority_flags |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+ if (kevent)
+ priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
priority = _pthread_priority_make_version2(thread_class, 0, priority_flags);
if (thread_reuse == 0) {
// New thread created by kernel, needs initialization.
+ void *stackaddr = self;
size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
- _pthread_struct_init(self, &_pthread_attr_default, (void*)self, stacksize, 1);
+
+ _pthread_struct_init(self, &_pthread_attr_default,
+ stackaddr, stacksize,
+ PTHREAD_ALLOCADDR(stackaddr, stacksize), PTHREAD_ALLOCSIZE(stackaddr, stacksize));
_pthread_set_kernel_thread(self, kport);
self->wqthread = 1;
self->detached |= PTHREAD_CREATE_DETACHED;
// Update the running thread count and set childrun bit.
- // XXX this should be consolidated with pthread_body().
- _pthread_set_self(self);
+ bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
+ _pthread_set_self_internal(self, !thread_tsd_base_set);
_pthread_introspection_thread_create(self, false);
- __pthread_add_thread(self, false);
+ __pthread_add_thread(self, false, false);
}
// If we're running with fine-grained priority, we also need to
}
self->arg = (void *)(uintptr_t)thread_class;
- if (kevent && keventlist){
+ if (kevent && keventlist && nkevents > 0){
kevent_errors_retry:
(*__libdispatch_keventfunction)(&keventlist, &nkevents);
} else if (errors_out < 0){
PTHREAD_ABORT("kevent return produced an error: %d", errno);
}
- _pthread_exit(self, NULL);
+ goto thexit;
} else if (kevent){
(*__libdispatch_keventfunction)(NULL, NULL);
- __workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
- _pthread_exit(self, NULL);
+ __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
+ goto thexit;
}
if (__pthread_supported_features & PTHREAD_FEATURE_FINEPRIO) {
}
__workq_kernreturn(WQOPS_THREAD_RETURN, NULL, 0, 0);
+
+thexit:
+ {
+ // Reset QoS to something low for the cleanup process
+ pthread_priority_t priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
+ _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
+ }
+
_pthread_exit(self, NULL);
}
_pthread_introspection_hook_callout_thread_create(pthread_t t, bool destroy)
{
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_CREATE, t, t,
- pthreadsize);
+ PTHREAD_SIZE);
if (!destroy) return;
_pthread_introspection_thread_destroy(t);
}
freesize = t->stacksize + t->guardsize;
freeaddr = t->stackaddr - freesize;
} else {
- freesize = t->freesize - pthreadsize;
+ freesize = t->freesize - PTHREAD_SIZE;
freeaddr = t->freeaddr;
}
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_START, t,
void *freeaddr, size_t freesize, bool destroy)
{
if (destroy && freesize) {
- freesize -= pthreadsize;
+ freesize -= PTHREAD_SIZE;
}
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_TERMINATE, t,
freeaddr, freesize);
{
if (t == &_thread) return;
_pthread_introspection_hook(PTHREAD_INTROSPECTION_THREAD_DESTROY, t, t,
- pthreadsize);
+ PTHREAD_SIZE);
}
static inline void
#include <mach/i386/syscall_sw.h>
- .text
- .align 2, 0x90
- .globl ___pthread_set_self
-___pthread_set_self:
- movl $0, %esi // 0 as the second argument
- movl $ SYSCALL_CONSTRUCT_MDEP(3), %eax // Machine-dependent syscall number 3
- MACHDEP_SYSCALL_TRAP
- ret
-
#ifndef VARIANT_DYLD
.align 2, 0x90
#include <mach/i386/syscall_sw.h>
- .text
- .align 2, 0x90
- .globl ___pthread_set_self
-___pthread_set_self:
- pushl 4(%esp)
- pushl $0
- movl $3,%eax
- MACHDEP_SYSCALL_TRAP
- addl $8,%esp
- ret
-
#ifndef VARIANT_DYLD
.align 2, 0x90
#include <mach/arm/syscall_sw.h>
- .text
- .align 2
- .globl ___pthread_set_self
-___pthread_set_self:
- /* fast trap for thread_set_cthread */
- mov r3, #2
- mov r12, #0x80000000
- swi #SWI_SYSCALL
- bx lr
-
#ifndef VARIANT_DYLD
// This routine is never called directly by user code, jumped from kernel
size_t idx;
pthread_globals_t globals = _pthread_globals();
- OSSpinLockLock(&globals->pthread_atfork_lock);
+ _PTHREAD_LOCK(globals->pthread_atfork_lock);
idx = globals->atfork_count++;
if (idx == 0) {
kern_return_t kr;
mach_vm_address_t storage = 0;
mach_vm_size_t size = PTHREAD_ATFORK_MAX * sizeof(struct pthread_atfork_entry);
- OSSpinLockUnlock(&globals->pthread_atfork_lock);
+ _PTHREAD_UNLOCK(globals->pthread_atfork_lock);
kr = mach_vm_map(mach_task_self(),
&storage,
size,
VM_PROT_DEFAULT,
VM_PROT_ALL,
VM_INHERIT_DEFAULT);
- OSSpinLockLock(&globals->pthread_atfork_lock);
+ _PTHREAD_LOCK(globals->pthread_atfork_lock);
if (kr == KERN_SUCCESS) {
if (globals->atfork == globals->atfork_storage) {
globals->atfork = storage;
bzero(globals->atfork_storage, sizeof(globals->atfork_storage));
} else {
// Another thread did vm_map first.
- OSSpinLockUnlock(&globals->pthread_atfork_lock);
+ _PTHREAD_UNLOCK(globals->pthread_atfork_lock);
mach_vm_deallocate(mach_task_self(), storage, size);
- OSSpinLockLock(&globals->pthread_atfork_lock);
+ _PTHREAD_LOCK(globals->pthread_atfork_lock);
}
} else {
res = ENOMEM;
e->parent = parent;
e->child = child;
}
- OSSpinLockUnlock(&globals->pthread_atfork_lock);
+ _PTHREAD_UNLOCK(globals->pthread_atfork_lock);
return res;
}
{
pthread_globals_t globals = _pthread_globals();
- OSSpinLockLock(&globals->pthread_atfork_lock);
+ _PTHREAD_LOCK(globals->pthread_atfork_lock);
size_t idx;
for (idx = globals->atfork_count; idx > 0; --idx) {
}
}
- OSSpinLockLock(&globals->psaved_self_global_lock);
+ _PTHREAD_LOCK(globals->psaved_self_global_lock);
globals->psaved_self = pthread_self();
- OSSpinLockLock(&globals->psaved_self->lock);
+ _PTHREAD_LOCK(globals->psaved_self->lock);
}
// Called after the fork(2) system call returns to the parent process.
{
pthread_globals_t globals = _pthread_globals();
- OSSpinLockUnlock(&globals->psaved_self->lock);
- OSSpinLockUnlock(&globals->psaved_self_global_lock);
+ _PTHREAD_UNLOCK(globals->psaved_self->lock);
+ _PTHREAD_UNLOCK(globals->psaved_self_global_lock);
size_t idx;
for (idx = 0; idx < globals->atfork_count; ++idx) {
e->parent();
}
}
- OSSpinLockUnlock(&globals->pthread_atfork_lock);
+ _PTHREAD_UNLOCK(globals->pthread_atfork_lock);
}
// Called after the fork(2) system call returns to the new child process.
_pthread_fork_child(void)
{
pthread_globals_t globals = _pthread_globals();
- globals->psaved_self_global_lock = OS_SPINLOCK_INIT;
+ _PTHREAD_LOCK_INIT(globals->psaved_self_global_lock);
__pthread_fork_child_internal(globals->psaved_self);
__is_threaded = 0;
pthread_workqueue_atfork_child();
e->child();
}
}
- globals->pthread_atfork_lock = OS_SPINLOCK_INIT;
+ _PTHREAD_LOCK_INIT(globals->pthread_atfork_lock);
}
#if __DARWIN_UNIX03
int state;
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
if (state & PTHREAD_CANCEL_ENABLE)
__pthread_markcancel(_pthread_kernel_thread(thread));
#else /* __DARWIN_UNIX03 */
(type != PTHREAD_CANCEL_ASYNCHRONOUS))
return EINVAL;
self = pthread_self();
- LOCK(self->lock);
+ _PTHREAD_LOCK(self->lock);
if (oldtype)
*oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
self->cancel_state |= type;
- UNLOCK(self->lock);
+ _PTHREAD_UNLOCK(self->lock);
#if !__DARWIN_UNIX03
_pthread_testcancel(self, 0); /* See if we need to 'die' now... */
#endif /* __DARWIN_UNIX03 */
{
pthread_t thread = (pthread_t)arg;
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
/* leave another thread to join */
thread->joiner = (struct _pthread *)NULL;
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
}
#endif /* __DARWIN_UNIX03 */
death = (semaphore_t)os_get_cached_semaphore();
}
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
(thread->joiner == NULL)) {
PTHREAD_ASSERT(_pthread_kernel_thread(thread) == kthport);
}
joinsem = thread->joiner_notify;
thread->joiner = self;
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
if (death != SEMAPHORE_NULL) {
os_put_cached_semaphore((os_semaphore_t)death);
os_put_cached_semaphore((os_semaphore_t)joinsem);
res = _pthread_join_cleanup(thread, value_ptr, conforming);
} else {
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
res = EDEADLK;
}
} else {
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
res = EINVAL;
}
if (death != SEMAPHORE_NULL) {
{
int res = EINVAL;
if (cond->sig == _PTHREAD_COND_SIG_init) {
- LOCK(cond->lock);
+ _PTHREAD_LOCK(cond->lock);
if (cond->sig == _PTHREAD_COND_SIG_init) {
res = _pthread_cond_init(cond, NULL, 0);
if (inited) {
} else if (cond->sig == _PTHREAD_COND_SIG) {
res = 0;
}
- UNLOCK(cond->lock);
+ _PTHREAD_UNLOCK(cond->lock);
} else if (cond->sig == _PTHREAD_COND_SIG) {
res = 0;
}
_pthread_cond *cond = (_pthread_cond *)ocond;
int res = EINVAL;
if (cond->sig == _PTHREAD_COND_SIG) {
- LOCK(cond->lock);
+ _PTHREAD_LOCK(cond->lock);
uint64_t oldval64, newval64;
uint32_t lcntval, ucntval, scntval;
cond->sig = _PTHREAD_NO_SIG;
res = 0;
- UNLOCK(cond->lock);
+ _PTHREAD_UNLOCK(cond->lock);
if (needclearpre) {
(void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
pthread_t thread = pthread_self();
int thcanceled = 0;
- LOCK(thread->lock);
+ _PTHREAD_LOCK(thread->lock);
thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
- UNLOCK(thread->lock);
+ _PTHREAD_UNLOCK(thread->lock);
if (thcanceled == 0) {
return;
#endif /* __DARWIN_UNIX03 */
_pthread_cond *cond = (_pthread_cond *)ocond;
- LOCK_INIT(cond->lock);
+ _PTHREAD_LOCK_INIT(cond->lock);
return _pthread_cond_init(cond, attr, conforming);
}
--- /dev/null
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+extern int __pthread_chdir(char *path);
+int
+pthread_chdir_np(char *path)
+{
+ return __pthread_chdir(path);
+}
+
+extern int __pthread_fchdir(int fd);
+int
+pthread_fchdir_np(int fd)
+{
+ return __pthread_fchdir(fd);
+}
#include "kern/kern_trace.h"
#include <sys/syscall.h>
-#include "os/atomic.h"
-
#ifdef PLOCKSTAT
#include "plockstat.h"
#else /* !PLOCKSTAT */
return EBUSY;
#endif
_pthread_mutex *mutex = (_pthread_mutex *)omutex;
- LOCK_INIT(mutex->lock);
+ _PTHREAD_LOCK_INIT(mutex->lock);
return (_pthread_mutex_init(mutex, attr, 0x7));
}
int res = EINVAL;
_pthread_mutex *mutex = (_pthread_mutex *)omutex;
if (_pthread_mutex_check_signature(mutex)) {
- LOCK(mutex->lock);
+ _PTHREAD_LOCK(mutex->lock);
*prioceiling = mutex->prioceiling;
res = 0;
- UNLOCK(mutex->lock);
+ _PTHREAD_UNLOCK(mutex->lock);
}
return res;
}
int res = EINVAL;
_pthread_mutex *mutex = (_pthread_mutex *)omutex;
if (_pthread_mutex_check_signature(mutex)) {
- LOCK(mutex->lock);
+ _PTHREAD_LOCK(mutex->lock);
if (prioceiling >= -999 || prioceiling <= 999) {
*old_prioceiling = mutex->prioceiling;
mutex->prioceiling = prioceiling;
res = 0;
}
- UNLOCK(mutex->lock);
+ _PTHREAD_UNLOCK(mutex->lock);
}
return res;
}
_pthread_mutex *mutex = (_pthread_mutex *)omutex;
if (_pthread_mutex_check_signature_init(mutex)) {
- LOCK(mutex->lock);
+ _PTHREAD_LOCK(mutex->lock);
if (_pthread_mutex_check_signature_init(mutex)) {
// initialize a statically initialized mutex to provide
// compatibility for misbehaving applications.
} else if (_pthread_mutex_check_signature(mutex)) {
res = 0;
}
- UNLOCK(mutex->lock);
+ _PTHREAD_UNLOCK(mutex->lock);
} else if (_pthread_mutex_check_signature(mutex)) {
res = 0;
}
int res = EINVAL;
- LOCK(mutex->lock);
+ _PTHREAD_LOCK(mutex->lock);
if (_pthread_mutex_check_signature(mutex)) {
uint32_t lgenval, ugenval;
uint64_t oldval64;
mutex->sig = _PTHREAD_NO_SIG;
res = 0;
}
- UNLOCK(mutex->lock);
+ _PTHREAD_UNLOCK(mutex->lock);
return res;
}
}
#endif
if (res == 0) {
- LOCK_INIT(rwlock->lock);
+ _PTHREAD_LOCK_INIT(rwlock->lock);
res = __pthread_rwlock_init(rwlock, attr);
}
return res;
_pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- LOCK(rwlock->lock);
+ _PTHREAD_LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
res = __pthread_rwlock_init(rwlock, NULL);
} else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
res = 0;
}
- UNLOCK(rwlock->lock);
+ _PTHREAD_UNLOCK(rwlock->lock);
} else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
res = 0;
}
uintptr_t destructor;
} _pthread_keys[_INTERNAL_POSIX_THREAD_KEYS_END];
-static pthread_lock_t tsd_lock = LOCK_INITIALIZER;
+static _pthread_lock tsd_lock = _PTHREAD_LOCK_INITIALIZER;
// The pthread_tsd destruction order can be reverted to the old (pre-10.11) order
// by setting this environment variable.
int res = EAGAIN; // Returns EAGAIN if key cannot be allocated.
pthread_key_t k;
- LOCK(tsd_lock);
+ _PTHREAD_LOCK(tsd_lock);
for (k = __pthread_tsd_start; k < __pthread_tsd_end; k++) {
if (_pthread_key_set_destructor(k, destructor)) {
*key = k;
break;
}
}
- UNLOCK(tsd_lock);
+ _PTHREAD_UNLOCK(tsd_lock);
return res;
}
{
int res = EINVAL; // Returns EINVAL if key is not allocated.
- LOCK(tsd_lock);
+ _PTHREAD_LOCK(tsd_lock);
if (key >= __pthread_tsd_start && key < __pthread_tsd_end) {
if (_pthread_key_unset_destructor(key)) {
struct _pthread *p;
- LOCK(_pthread_list_lock);
+ _PTHREAD_LOCK(_pthread_list_lock);
TAILQ_FOREACH(p, &__pthread_head, plist) {
// No lock for word-sized write.
p->tsd[key] = 0;
}
- UNLOCK(_pthread_list_lock);
+ _PTHREAD_UNLOCK(_pthread_list_lock);
res = 0;
}
}
- UNLOCK(tsd_lock);
+ _PTHREAD_UNLOCK(tsd_lock);
return res;
}
return res;
}
+int
+_pthread_setspecific_static(pthread_key_t key, void *value)
+{
+ int res = EINVAL;
+
+#if !VARIANT_DYLD
+ if (key < __pthread_tsd_start) {
+ _pthread_setspecific_direct(key, value);
+ res = 0;
+ }
+#endif // !VARIANT_DYLD
+
+ return res;
+}
+
void*
pthread_getspecific(pthread_key_t key)
{
{
int j;
- // clean up all keys except the garbage collection key
+ // clean up all keys
for (j = 0; j < PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
pthread_key_t k;
for (k = __pthread_tsd_start; k <= self->max_tsd_key; k++) {
}
for (k = __pthread_tsd_first; k <= __pthread_tsd_max; k++) {
- if (k >= __PTK_FRAMEWORK_GC_KEY0 && k <= __PTK_FRAMEWORK_GC_KEY9) {
- // GC must be cleaned up last
- continue;
- }
_pthread_tsd_cleanup_key(self, k);
}
}
self->max_tsd_key = 0;
-
- // clean up all the GC keys
- for (j = 0; j < PTHREAD_DESTRUCTOR_ITERATIONS; j++) {
- pthread_key_t k;
- for (k = __PTK_FRAMEWORK_GC_KEY0; k <= __PTK_FRAMEWORK_GC_KEY9; k++) {
- _pthread_tsd_cleanup_key(self, k);
- }
- }
}
static void
{
int res = EINVAL; // Returns EINVAL if key is out of range.
if (key >= __pthread_tsd_first && key < __pthread_tsd_start) {
- LOCK(tsd_lock);
+ _PTHREAD_LOCK(tsd_lock);
_pthread_key_set_destructor(key, destructor);
if (key > __pthread_tsd_max) {
__pthread_tsd_max = key;
}
- UNLOCK(tsd_lock);
+ _PTHREAD_UNLOCK(tsd_lock);
res = 0;
}
return res;
#include <spawn.h>
#include <spawn_private.h>
#include <sys/spawn_internal.h>
+#include <sys/ulock.h>
// TODO: remove me when internal.h can include *_private.h itself
#include "workqueue_private.h"
kr = mach_vm_allocate(mach_task_self(), &vm_addr, round_page(sizeof(struct pthread_override_s)), VM_MAKE_TAG(VM_MEMORY_LIBDISPATCH) | VM_FLAGS_ANYWHERE);
if (kr != KERN_SUCCESS) {
errno = ENOMEM;
- return NULL;
+ return (_Nonnull pthread_override_t) NULL;
}
}
}
rv = NULL;
}
- return rv;
+ return (_Nonnull pthread_override_t) rv;
}
int
}
int
-_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority)
+_pthread_qos_override_start_direct(mach_port_t thread, pthread_priority_t priority, void *resource)
{
- // use pthread_self as the default per-thread memory allocation to track the override in the kernel
- int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, (uintptr_t)pthread_self());
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_START, thread, priority, (uintptr_t)resource);
if (res == -1) { res = errno; }
return res;
}
int
-_pthread_override_qos_class_end_direct(mach_port_t thread)
+_pthread_qos_override_end_direct(mach_port_t thread, void *resource)
{
- // use pthread_self as the default per-thread memory allocation to track the override in the kernel
- int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, (uintptr_t)pthread_self(), 0);
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, thread, (uintptr_t)resource, 0);
if (res == -1) { res = errno; }
return res;
}
+int
+_pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority)
+{
+ // use pthread_self as the default per-thread memory allocation to track the override in the kernel
+ return _pthread_qos_override_start_direct(thread, priority, pthread_self());
+}
+
+int
+_pthread_override_qos_class_end_direct(mach_port_t thread)
+{
+ // use pthread_self as the default per-thread memory allocation to track the override in the kernel
+ return _pthread_qos_override_end_direct(thread, pthread_self());
+}
+
int
_pthread_workqueue_override_start_direct(mach_port_t thread, pthread_priority_t priority)
{
return res;
}
+int
+_pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread_priority_t priority, mach_port_t *ulock_addr)
+{
+#if !TARGET_OS_IPHONE
+ static boolean_t kernel_supports_owner_check = TRUE;
+ if (!kernel_supports_owner_check) {
+ ulock_addr = NULL;
+ }
+#endif
+
+ for (;;) {
+ int res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread, priority, ulock_addr);
+ if (res == -1) { res = errno; }
+#if !TARGET_OS_IPHONE
+ if (ulock_addr && res == EINVAL) {
+ if ((uintptr_t)ulock_addr % _Alignof(_Atomic uint32_t)) {
+ // do not mute bad ulock addresses related errors
+ return EINVAL;
+ }
+ // backward compatibility for the XBS chroot
+ // BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH used to return EINVAL if
+ // arg3 was non NULL.
+ kernel_supports_owner_check = FALSE;
+ ulock_addr = NULL;
+ continue;
+ }
+#endif
+ if (ulock_addr && res == EFAULT) {
+ // kernel wants us to redrive the call, so while we refault the
+ // memory, also revalidate the owner
+ uint32_t uval = os_atomic_load(ulock_addr, relaxed);
+ if (ulock_owner_value_to_port_name(uval) != thread) {
+ return ESTALE;
+ }
+ continue;
+ }
+
+ return res;
+ }
+}
+
int
_pthread_workqueue_override_reset(void)
{
#include <arm/arch.h>
#endif
-#if !defined(PTHREAD_TARGET_EOS) && !defined(VARIANT_DYLD) && \
+// XXX <rdar://problem/24290376> eOS version of libpthread doesn't have UP optimizations
+#if !defined(VARIANT_STATIC) && \
defined(_ARM_ARCH_7) && !defined(__ARM_ARCH_7S__)
#if OS_ATOMIC_UP
#define OS_VARIANT_SELECTOR mp
#endif
-#endif // !PTHREAD_TARGET_EOS && !VARIANT_DYLD && _ARM_ARCH_7 && !__ARM_ARCH_7S__
+#endif // !VARIANT_STATIC && _ARM_ARCH_7 && !__ARM_ARCH_7S__
#define OS_VARIANT(f, v) OS_CONCAT(f, OS_CONCAT($VARIANT$, v))
* results.
* @discussion Such work is requested to run at a priority below critical user-
* interactive work, but relatively higher than other work on the system. This
- * is not an energy-efficient QOS class to use for large tasks and the use of
- * this QOS class should be limited to operations where the user is immediately
- * waiting for the results.
+ * is not an energy-efficient QOS class to use for large tasks. Its use
+ * should be limited to operations of short enough duration that the user is
+ * unlikely to switch tasks while waiting for the results. Typical
+ * user-initiated work will have progress indicated by the display of
+ * placeholder content or modal user interface.
*
* @constant QOS_CLASS_DEFAULT
* @abstract A default QOS class used by the system in cases where more specific
* immediately waiting for the results.
* @discussion Such work is requested to run at a priority below critical user-
* interactive and user-initiated work, but relatively higher than low-level
- * system maintenance tasks. The use of this QOS class indicates the work should
- * be run in an energy and thermally-efficient manner.
+ * system maintenance tasks. The use of this QOS class indicates the work
+ * should be run in an energy and thermally-efficient manner. The progress of
+ * utility work may or may not be indicated to the user, but the effect of such
+ * work is user-visible.
*
* @constant QOS_CLASS_BACKGROUND
* @abstract A QOS class which indicates work performed by this thread was not
-SOURCES := $(wildcard *.c)
-TARGETS := $(patsubst %.c,%,$(SOURCES))
-TESTS := $(patsubst %,test-%,$(TARGETS))
+PROJECT := libpthread
+TEST_DIR := tests/
-include Makefile.common
+ifeq ($(DEVELOPER_DIR),)
+ DEVELOPER_DIR := $(shell xcode-select -p)
+endif
-CFLAGS += -I$(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
+include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.common
-%: %.c
- $(CC) -o $(BUILDDIR)/$@ $< $(CFLAGS)
+TARGETS :=
+TARGETS += atfork
+TARGETS += bsdthread_set_self
+TARGETS += cond
+#TARGETS += cond_hang3
+#TARGETS += cond_stress
+TARGETS += cond_timed
+TARGETS += custom_stack
+TARGETS += stack_aslr
+TARGETS += join
+TARGETS += main_stack
+TARGETS += main_stack_custom
+#TARGETS += maxwidth
+TARGETS += mutex
+TARGETS += mutex_try
+TARGETS += once_cancel
+TARGETS += pthread_attr_setstacksize
+TARGETS += pthread_bulk_create
+TARGETS += pthread_cancel
+TARGETS += pthread_cwd
+TARGETS += pthread_exit
+TARGETS += pthread_introspection
+TARGETS += pthread_setspecific
+TARGETS += pthread_threadid_np
+#TARGETS += qos
+#TARGETS += rwlock-22244050
+#TARGETS += rwlock-signal
+#TARGETS += rwlock
+TARGETS += tsd
+#TARGETS += wq_block_handoff
+#TARGETS += wq_event_manager
+#TARGETS += wq_kevent
+#TARGETS += wq_kevent_stress
+TARGETS += add_timer_termination
-all: $(TARGETS)
+OTHER_TARGETS :=
-test check: $(TESTS)
+OTHER_CFLAGS := -DDARWINTEST -Weverything \
+ -Wno-vla -Wno-bad-function-cast -Wno-missing-noreturn \
+ -Wno-missing-field-initializers -Wno-format-pedantic \
+ -Wno-gnu-folding-constant
+OTHER_LDFLAGS := -ldarwintest_utils
-$(TESTS): test-%: %
- @echo "[TEST] $<"
- @$(TEST_ENV) ./$<
- @echo "[END] $<"
- @echo
+#TARGETS += main_stack_legacy // Disabled by default due to linker warnings
+#main_stack_legacy: OTHER_LDFLAGS += -Wl,-stack_addr,0xc1000000 -Wl,-stack_size,0x0f00000
+#main_stack_legacy: OTHER_CFLAGS += -DSTACKSIZE=0x0f00000
+#main_stack_legacy: ARCH_FLAGS = -arch i386
+#main_stack_legacy: DEPLOYMENT_TARGET_FLAGS = -mmacosx-version-min=10.7
-clean:
- rm -f $(TARGETS)
+main_stack_custom: OTHER_LDFLAGS += -Wl,-stack_size,0x14000
+main_stack_custom: OTHER_CFLAGS += -DSTACKSIZE=0x14000
-setup:
- mkdir -p $(BUILDDIR)
+bsdthread_set_self: OTHER_CFLAGS += -D_DARWIN_FEATURE_CLOCK_GETTIME
-
-# B&I Targets
-
-installhdrs:
-
-install: setup all
-
-
-.PHONY: all test check clean $(TESTS)
+include $(DEVELOPER_DIR)/AppleInternal/Makefiles/darwintest/Makefile.targets
+++ /dev/null
-# Code 'borrowed' from xnu/tools/tests Makefile structure.
-#
-# This provides a somewhat flexible framework (albeit, not perfect)
-# for building tests for multiple platforms using the correct toolset
-#
-# Please contact: nwertman@apple.com with any questions
-
-
-ifneq ($(SRCROOT),)
-SRCDIR=$(SRCROOT)
-else
-SRCDIR?=$(shell /bin/pwd)
-endif
-
-ifneq ($(OBJROOT),)
-OBJDIR?=$(OBJROOT)
-else
-OBJDIR?=$(SRCDIR)/build/obj
-endif
-
-ifneq ($(DSTROOT),)
-BUILDDIR?=$(DSTROOT)/AppleInternal/CoreOS/tests/$(PROJECT)
-else
-BUILDDIR?=$(SRCDIR)/build/dst
-endif
-
-#
-# Common definitions for test directories
-#
-
-XCRUN := /usr/bin/xcrun
-SDKROOT ?= macosx.internal
-
-# SDKROOT may be passed as a shorthand like "iphoneos.internal". We
-# must resolve these to a full path and override SDKROOT.
-
-SDKROOT_RESOLVED := $(shell $(XCRUN) -sdk $(SDKROOT) -show-sdk-path)
-ifeq ($(strip $(SDKROOT)_$(SDKROOT_RESOLVED)),/_)
-SDKROOT_RESOLVED := /
-endif
-override SDKROOT = $(SDKROOT_RESOLVED)
-
-SDKVERSION := $(shell $(XCRUN) -sdk $(SDKROOT) -show-sdk-version)
-
-PLATFORMPATH := $(shell $(XCRUN) -sdk $(SDKROOT) -show-sdk-platform-path)
-PLATFORM := $(shell echo $(PLATFORMPATH) | sed 's,^.*/\([^/]*\)\.platform$$,\1,')
-
-ifeq ($(PLATFORM),watchOS)
-PLATFORM := WatchOS
-endif
-
-SUPPORTED_EMBEDDED_PLATFORMS := iPhoneOS iPhoneOSNano tvOS AppleTVOS WatchOS
-Embedded = $(if $(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),YES,NO)
-
-#
-# Deployment target flag
-#
-ifeq ($(PLATFORM),MacOSX)
-DEPLOYMENT_TARGET_FLAGS = -mmacosx-version-min=$(SDKVERSION)
-else ifeq ($(PLATFORM),WatchOS)
-DEPLOYMENT_TARGET_FLAGS = -mwatchos-version-min=$(SDKVERSION)
-else ifeq ($(PLATFORM),tvOS)
-DEPLOYMENT_TARGET_FLAGS = -mtvos-version-min=$(SDKVERSION)
-else ifeq ($(PLATFORM),AppleTVOS)
-DEPLOYMENT_TARGET_FLAGS = -mtvos-version-min=$(SDKVERSION)
-else ifneq ($(filter $(SUPPORTED_EMBEDDED_PLATFORMS),$(PLATFORM)),)
-DEPLOYMENT_TARGET_FLAGS = -miphoneos-version-min=$(SDKVERSION)
-else ifneq ($(filter $(SUPPORTED_SIMULATOR_PLATFORMS),$(PLATFORM)),)
-DEPLOYMENT_TARGET_FLAGS =
-else
-DEPLOYMENT_TARGET_FLAGS =
-endif
-
-DEPLOYMENT_TARGET_DEFINES = -DPLATFORM_$(PLATFORM)
-
-
-
-# setup the TARGETSDK and SDKROOT variables
-TARGETSDK:=$(SDKROOT)
-SDKROOTPATH:=$(SDKROOT)
-
-# make sure we have a build directory
-$(shell [ -d "$(BUILDDIR)" ] || mkdir -p $(BUILDDIR))
-
-#arch configs if not provided
-ifdef RC_ARCHS
-ARCH_CONFIGS:=$(RC_ARCHS)
-endif
-
-ifeq ($(ARCH_CONFIGS),)
-ARCH_CONFIGS:=
-ifeq ($(Embedded),YES)
-ARCH_CONFIGS:=$(shell $(XCRUN) -sdk $(TARGETSDK) otool -f -v $(SDKROOT)/usr/lib/system/libsystem_kernel.dylib | grep architecture | cut -d' ' -f 2 | tr '\n' ' ')
-else
-ARCH_CONFIGS:=x86_64 i386
-endif
-endif
-
-ARCH_CONFIGS_32:=$(filter-out %64,$(ARCH_CONFIGS))
-ARCH_CONFIGS_64:=$(filter %64,$(ARCH_CONFIGS))
-
-ARCH_FLAGS:=$(foreach argarch,$(ARCH_CONFIGS),-arch $(argarch) )
-
-
-#setup the compiler flags.
-CC:=$(shell $(XCRUN) -sdk "$(TARGETSDK)" -find clang)
-MIG:=$(shell $(XCRUN) -sdk "$(TARGETSDK)" -find mig)
-CODESIGN:=$(shell $(XCRUN) -sdk "$(TARGETSDK)" -find codesign)
-CODESIGN_ALLOCATE:=$(shell $(XCRUN) -sdk "$(TARGETSDK)" -find codesign_allocate)
-PLUTIL:=$(shell $(XCRUN) -sdk "$(TARGETSDK)" -find plutil)
-
-CFLAGS=-I$(BUILDDIR) -I. -isysroot $(SDKROOTPATH) $(ARCH_FLAGS)
-
-ifeq ($(Embedded),YES)
-TARGET_NAME=ios
-CONFIG_EMBED_DEFINE:= -DCONFIG_EMBEDDED=1
-else
-TARGET_NAME=osx
-CONFIG_EMBED_DEFINE:=
-endif
-
-MORECFLAGS=$(CONFIG_EMBED_DEFINE)
\ No newline at end of file
--- /dev/null
+#include <assert.h>
+#include <errno.h>
+#include <stdatomic.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/qos.h>
+#include <mach/mach_time.h>
+
+#include <pthread.h>
+#include <pthread/tsd_private.h>
+#include <pthread/qos_private.h>
+#include <pthread/workqueue_private.h>
+
+#include <dispatch/dispatch.h>
+
+#include <darwintest.h>
+#include <darwintest_utils.h>
+
+extern void __exit(int) __attribute__((noreturn));
+
+static void __attribute__((noreturn))
+run_add_timer_termination(void)
+{
+ const int SOURCES = 32;
+ static unsigned int time_to_sleep; time_to_sleep = (unsigned int)(arc4random() % 5000 + 500);
+
+ static int pipes[SOURCES][2];
+ static dispatch_source_t s[SOURCES];
+ for (int i = 0; i < SOURCES; i++) {
+ pipe(pipes[i]);
+ s[i] = dispatch_source_create(DISPATCH_SOURCE_TYPE_READ, (uintptr_t)pipes[i][0], 0, NULL);
+ dispatch_source_set_event_handler(s[i], ^{
+ while(1) pause();
+ });
+ dispatch_resume(s[i]);
+ }
+
+ dispatch_async(dispatch_get_global_queue(0,0), ^{
+ for (int i = 1; i < SOURCES; i++){
+ write(pipes[i][1], &SOURCES, 1);
+ usleep(1);
+ }
+ while(1) pause();
+ });
+
+ usleep(time_to_sleep);
+ __exit(0);
+}
+
+T_DECL(add_timer_termination, "termination during add timer",
+ T_META_CHECK_LEAKS(NO))
+{
+ const int ROUNDS = 128;
+ const int TIMEOUT = 5;
+ for (int i = 0; i < ROUNDS; i++){
+ pid_t pid = fork();
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "fork");
+ if (pid == 0) { // child
+ run_add_timer_termination();
+ } else { // parent
+ bool success = dt_waitpid(pid, NULL, NULL, TIMEOUT);
+ T_ASSERT_TRUE(success, "Child %d exits successfully", i);
+ }
+ }
+}
#include <pthread.h>
#include <stdio.h>
#include <unistd.h>
-#include <os/assumes.h>
#include <sys/wait.h>
+#include <stdlib.h>
-#define DECL_ATFORK(x) \
-static void prepare_##x(void) { \
- printf("%d: %s\n", getpid(), __FUNCTION__); \
-} \
-static void parent_##x(void) { \
- printf("%d: %s\n", getpid(), __FUNCTION__); \
-} \
-static void child_##x(void) { \
- printf("%d: %s\n", getpid(), __FUNCTION__); \
+#include <darwintest.h>
+
+static const char ATFORK_PREPARE[] = "prepare";
+static const char ATFORK_PARENT[] = "parent";
+static const char ATFORK_CHILD[] = "child";
+
+struct callback_event {
+ size_t registration_idx;
+ const char *type;
+};
+
+#define NUM_REGISTRATIONS ((size_t) 20)
+static struct callback_event events[NUM_REGISTRATIONS * 5];
+static size_t recorded_events = 0;
+
+static void
+record_callback(size_t registration_idx, const char *type)
+{
+ if (recorded_events == (sizeof(events) / sizeof(events[0]))) {
+ return; // events array is full
+ }
+ struct callback_event *evt = &events[recorded_events++];
+ evt->registration_idx = registration_idx;
+ evt->type = type;
+ T_LOG("[%d] callback: #%lu %s", getpid(), registration_idx, type);
}
-#define ATFORK(x) \
-os_assumes_zero(pthread_atfork(prepare_##x, parent_##x, child_##x));
-
-DECL_ATFORK(1);
-DECL_ATFORK(2);
-DECL_ATFORK(3);
-DECL_ATFORK(4);
-DECL_ATFORK(5);
-DECL_ATFORK(6);
-DECL_ATFORK(7);
-DECL_ATFORK(8);
-DECL_ATFORK(9);
-DECL_ATFORK(10);
-DECL_ATFORK(11);
-DECL_ATFORK(12);
-DECL_ATFORK(13);
-DECL_ATFORK(14);
-DECL_ATFORK(15);
-DECL_ATFORK(16);
-DECL_ATFORK(17);
-DECL_ATFORK(18);
-DECL_ATFORK(19);
-
-int main(int argc, char *argv[]) {
- ATFORK(1);
- ATFORK(2);
- ATFORK(3);
- ATFORK(4);
- ATFORK(5);
- ATFORK(6);
- ATFORK(7);
- ATFORK(8);
- ATFORK(9);
- ATFORK(10);
- ATFORK(11);
- ATFORK(12);
- ATFORK(13);
- ATFORK(14);
- ATFORK(15);
- ATFORK(16);
- ATFORK(17);
- ATFORK(18);
- ATFORK(19);
-
- pid_t pid = fork();
- if (pid == 0) {
- pid = fork();
+#define TWENTY(X) X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7) X(8) X(9) X(10) \
+ X(11) X(12) X(13) X(14) X(15) X(16) X(17) X(18) X(19)
+
+#define DECLARE_CB(idx) \
+static void cb_prepare_##idx() { record_callback(idx, ATFORK_PREPARE); } \
+static void cb_parent_##idx() { record_callback(idx, ATFORK_PARENT); } \
+static void cb_child_##idx() { record_callback(idx, ATFORK_CHILD); }
+
+TWENTY(DECLARE_CB)
+
+typedef void (*atfork_cb_t)(void);
+static const atfork_cb_t callbacks[NUM_REGISTRATIONS][3] = {
+ #define CB_ELEM(idx) { cb_prepare_##idx, cb_parent_##idx, cb_child_##idx },
+ TWENTY(CB_ELEM)
+};
+
+static void assert_event_sequence(struct callback_event *sequence,
+ const char *expected_type, size_t start_idx, size_t end_idx)
+{
+ while (true) {
+ struct callback_event *evt = &sequence[0];
+ T_QUIET; T_ASSERT_EQ(evt->type, expected_type, NULL);
+ T_QUIET; T_ASSERT_EQ(evt->registration_idx, start_idx, NULL);
+
+ if (start_idx == end_idx) {
+ break;
+ }
+ if (start_idx < end_idx) {
+ start_idx++;
+ } else {
+ start_idx--;
+ }
+ sequence++;
}
- if (pid == -1) {
- posix_assumes_zero(pid);
- } else if (pid > 0) {
- int status;
- posix_assumes_zero(waitpid(pid, &status, 0));
- posix_assumes_zero(WEXITSTATUS(status));
+}
+
+static size_t inspect_event_sequence(struct callback_event *sequence,
+ const char *expected_type, size_t start_idx, size_t end_idx)
+{
+ size_t failures = 0;
+ while (true) {
+ struct callback_event *evt = &sequence[0];
+ if (evt->type != expected_type || evt->registration_idx != start_idx) {
+ T_LOG("FAIL: expected {idx, type}: {%lu, %s}. got {%lu, %s}",
+ start_idx, expected_type, evt->registration_idx, evt->type);
+ failures++;
+ }
+ if (start_idx == end_idx) {
+ break;
+ }
+ if (start_idx < end_idx) {
+ start_idx++;
+ } else {
+ start_idx--;
+ }
+ sequence++;
+ }
+ return failures;
+}
+
+T_DECL(atfork, "pthread_atfork")
+{
+ pid_t pid;
+ int status;
+ size_t failures = 0;
+
+ for (size_t i = 0; i < NUM_REGISTRATIONS; i++) {
+ T_QUIET; T_ASSERT_POSIX_ZERO(pthread_atfork(
+ callbacks[i][0], callbacks[i][1], callbacks[i][2]),
+ "registering callbacks with pthread_atfork()");
+ }
+
+ pid = fork(); // first level fork
+
+ if (pid == 0) {
+ // don't use ASSERTs/EXPECTs in child processes so not to confuse
+ // darwintest
+
+ pid = fork(); // second level fork
+
+ if (pid < 0) {
+ T_LOG("FAIL: second fork() failed");
+ exit(1);
+ }
+ if (recorded_events != NUM_REGISTRATIONS * 4) {
+ T_LOG("FAIL: unexpected # of events: %lu instead of %lu",
+ recorded_events, NUM_REGISTRATIONS * 4);
+ exit(1);
+ }
+ failures += inspect_event_sequence(&events[2 * NUM_REGISTRATIONS],
+ ATFORK_PREPARE, NUM_REGISTRATIONS - 1, 0);
+ failures += inspect_event_sequence(&events[3 * NUM_REGISTRATIONS],
+ (pid ? ATFORK_PARENT : ATFORK_CHILD), 0, NUM_REGISTRATIONS - 1);
+ if (failures) {
+ exit((int) failures);
+ }
+
+ if (pid > 0) {
+ if (waitpid(pid, &status, 0) != pid) {
+ T_LOG("FAIL: grandchild waitpid failed");
+ exit(1);
+ }
+ if (WEXITSTATUS(status) != 0) {
+ T_LOG("FAIL: grandchild exited with status %d",
+ WEXITSTATUS(status));
+ exit(1);
+ }
+ }
+ exit(0); // don't run leaks in the child and the grandchild
+ } else {
+ T_ASSERT_GE(pid, 0, "first fork()");
+
+ T_ASSERT_EQ(recorded_events, NUM_REGISTRATIONS * 2, "# of events");
+ assert_event_sequence(events, ATFORK_PREPARE, NUM_REGISTRATIONS - 1, 0);
+ assert_event_sequence(&events[NUM_REGISTRATIONS],
+ ATFORK_PARENT, 0, NUM_REGISTRATIONS - 1);
+
+ T_ASSERT_EQ(pid, waitpid(pid, &status, 0), "child waitpid");
+ T_ASSERT_POSIX_ZERO(WEXITSTATUS(status), "child exit status");
}
- return 0;
}
--- /dev/null
+#include <assert.h>
+#include <errno.h>
+#include <stdatomic.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/qos.h>
+
+#include <pthread.h>
+#include <pthread/tsd_private.h>
+#include <pthread/qos_private.h>
+#include <pthread/workqueue_private.h>
+
+#include <dispatch/dispatch.h>
+
+#include <darwintest.h>
+
+T_DECL(bsdthread_set_self_constrained_transition, "bsdthread_ctl(SET_SELF) with overcommit change",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ dispatch_async(dispatch_get_global_queue(0, 0), ^{
+ pthread_priority_t overcommit = (pthread_priority_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS) |
+ _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+ pthread_priority_t constrained = overcommit & (~_PTHREAD_PRIORITY_OVERCOMMIT_FLAG);
+
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, overcommit, 0), NULL);
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, constrained, 0), NULL);
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, overcommit, 0), NULL);
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, constrained, 0), NULL);
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, overcommit, 0), NULL);
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, constrained, 0), NULL);
+
+ T_END;
+ });
+
+ dispatch_main();
+}
+
+T_DECL(bsdthread_set_self_constrained_threads, "bsdthread_ctl(SET_SELF) with overcommit change",
+ T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES))
+{
+ static const int THREADS = 128;
+ static atomic_int threads_started;
+ dispatch_queue_t q = dispatch_queue_create("my queue", DISPATCH_QUEUE_CONCURRENT);
+ dispatch_set_target_queue(q, dispatch_get_global_queue(0, 0));
+ for (int i = 0; i < THREADS; i++) {
+ dispatch_async(q, ^{
+ int thread_id = ++threads_started;
+ T_PASS("Thread %d started successfully", thread_id);
+ if (thread_id == THREADS){
+ T_PASS("All threads started successfully");
+ T_END;
+ }
+
+ pthread_priority_t overcommit = (pthread_priority_t)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS) |
+ _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, overcommit, 0), NULL);
+
+ uint64_t t = clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
+ while (t > clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW) - (thread_id == 1 ? 30 : 60) * NSEC_PER_SEC) {
+ sleep(1);
+ }
+ if (thread_id == 1) {
+ T_FAIL("Where are my threads?");
+ T_END;
+ }
+ });
+ }
+
+ dispatch_main();
+}
+
+T_DECL(bsdthread_set_self_unbind, "bsdthread_ctl(SET_SELF) with kevent unbind",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ dispatch_async(dispatch_get_global_queue(0, 0), ^{
+ T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_WQ_KEVENT_UNBIND, 0, 0), NULL);
+
+ T_END;
+ });
+
+ dispatch_main();
+}
#include <errno.h>
#include <libkern/OSAtomic.h>
+#include <darwintest.h>
+#include <darwintest_utils.h>
+
struct context {
pthread_cond_t cond;
pthread_mutex_t mutex;
long count;
};
-void *wait_thread(void *ptr) {
+static void *wait_thread(void *ptr) {
int res;
struct context *context = ptr;
- int i = 0;
- char *str;
-
bool loop = true;
while (loop) {
res = pthread_mutex_lock(&context->mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_lock: %s\n", context->count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_lock", context->count);
}
if (context->count > 0) {
++context->waiters;
res = pthread_cond_wait(&context->cond, &context->mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_rwlock_unlock: %s\n", context->count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_rwlock_unlock", context->count);
}
--context->waiters;
--context->count;
res = pthread_mutex_unlock(&context->mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_unlock: %s\n", context->count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_unlock", context->count);
}
}
return NULL;
}
-int main(int argc, char *argv[])
+T_DECL(cond, "pthread_cond",
+ T_META_ALL_VALID_ARCHS(YES))
{
struct context context = {
.cond = PTHREAD_COND_INITIALIZER,
.mutex = PTHREAD_MUTEX_INITIALIZER,
.waiters = 0,
- .count = 500000,
+ .count = 100000 * dt_ncpu(),
};
int i;
int res;
int threads = 2;
pthread_t p[threads];
for (i = 0; i < threads; ++i) {
- res = pthread_create(&p[i], NULL, wait_thread, &context);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(pthread_create(&p[i], NULL, wait_thread, &context), NULL);
}
long half = context.count / 2;
while (loop) {
res = pthread_mutex_lock(&context.mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_lock: %s\n", context.count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_lock", context.count);
}
if (context.waiters) {
char *str;
res = pthread_cond_signal(&context.cond);
}
if (res != 0) {
- fprintf(stderr, "[%ld] %s: %s\n", context.count, str, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] %s", context.count, str);
}
}
if (context.count <= 0) {
loop = false;
+ T_PASS("Completed stres test successfully.");
}
res = pthread_mutex_unlock(&context.mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_unlock: %s\n", context.count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_unlock", context.count);
}
}
-
for (i = 0; i < threads; ++i) {
- res = pthread_join(p[i], NULL);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(pthread_join(p[i], NULL), NULL);
}
-
- return 0;
}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <string.h>
+#include <errno.h>
+#include <mach/thread_switch.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/sync.h>
+#include <mach/sync_policy.h>
+
+#define LOG
+
+typedef long pthread_lock_t;
+
+typedef struct _ConditionLock {
+ pthread_mutex_t _mutex;
+ pthread_cond_t _condition;
+ int _owner;
+ int _last_owner;
+ volatile int _isLocked;
+ volatile int _state;
+} ConditionLock;
+
+typedef struct _log {
+ int thread;
+ const char * op;
+ int tbr;
+} log_t;
+
+#define LOCK_INIT(l) ((l) = 0)
+#define LOCK(v) \
+ if (__is_threaded) { \
+ while (!_spin_lock_try((pthread_lock_t *)&v)) { \
+ syscall_thread_switch(THREAD_NULL, SWITCH_OPTION_WAIT, 1); \
+ } \
+ }
+#define UNLOCK(v) \
+ if (__is_threaded) \
+ _spin_unlock((pthread_lock_t *)&v)
+#ifndef ESUCCESS
+#define ESUCCESS 0
+#endif
+
+#define my_pthread_mutex_init(m) pthread_mutex_init((m), NULL)
+#define my_pthread_mutex_lock(m, ptself) pthread_mutex_lock(m)
+#define my_pthread_mutex_unlock(m) pthread_mutex_unlock(m)
+#define my_pthread_cond_init(c) pthread_cond_init((c), NULL)
+#define my_pthread_cond_wait(c, m, ptself) pthread_cond_wait((c), (m))
+#define my_pthread_cond_broadcast(c) pthread_cond_broadcast(c)
+
+static int initConditionLockWithCondition(ConditionLock *, int);
+static int lockConditionLock(ConditionLock *, int);
+static int lockConditionLockWhenCondition(ConditionLock *, int, int);
+static int unlockConditionLockWithCondition(ConditionLock *, int, int);
+static void * testThread(void *);
+
+extern int __is_threaded;
+extern int _spin_lock_try(pthread_lock_t * lockp);
+extern void _spin_unlock(pthread_lock_t * lockp);
+extern kern_return_t syscall_thread_switch(thread_t, int, int);
+
+static ConditionLock * lock;
+static volatile int count = 0;
+#if defined(LOG)
+static volatile int logcount = 0;
+static log_t * tracelog;
+static const size_t logsize = 4 * 1024 * 1024;
+pthread_lock_t loglock;
+extern int getTBR(void);
+
+static __inline__ unsigned long long
+ReadTBR()
+{
+ union {
+ unsigned long long time64;
+ unsigned long word[2];
+ } now;
+#if defined(__i386__)
+ /* Read from Pentium and Pentium Pro 64-bit timestamp counter.
+ * The counter is set to 0 at processor reset and increments on
+ * every clock cycle. */
+ __asm__ volatile("rdtsc" : : : "eax", "edx");
+ __asm__ volatile("movl %%eax,%0" : "=m"(now.word[0]) : : "eax");
+ __asm__ volatile("movl %%edx,%0" : "=m"(now.word[1]) : : "edx");
+#elif defined(__ppc__)
+ /* Read from PowerPC 64-bit time base register. The increment
+ * rate of the time base is implementation-dependent, but is
+ * 1/4th the bus clock cycle on 603/604 processors. */
+ unsigned long t3;
+ do {
+ __asm__ volatile("mftbu %0" : "=r"(now.word[0]));
+ __asm__ volatile("mftb %0" : "=r"(now.word[1]));
+ __asm__ volatile("mftbu %0" : "=r"(t3));
+ } while (now.word[0] != t3);
+#else
+ now.time64 = mach_absolute_time();
+#endif
+ return now.time64;
+}
+
+static void
+log(int self, const char * op, ConditionLock * cl)
+{
+ LOCK(loglock);
+ if (logcount >= logsize)
+ logcount = 0;
+ tracelog[logcount].thread = self;
+ tracelog[logcount].op = op;
+ tracelog[logcount++].tbr = ReadTBR();
+ UNLOCK(loglock);
+}
+#else
+#define log(a, b, c)
+#endif
+
+int
+main(int argc, char * argv[])
+{
+ pthread_t thread[4];
+ long iterations = 100000L;
+
+ lock = (ConditionLock *)calloc(sizeof(ConditionLock), 1);
+ if (initConditionLockWithCondition(lock, 0))
+ abort();
+#if defined(LOG)
+ tracelog = (log_t *)calloc(logsize, sizeof(log_t));
+ LOCK_INIT(loglock);
+#endif
+
+ pthread_create(&thread[0], NULL, testThread, (void *)1);
+ pthread_create(&thread[1], NULL, testThread, (void *)2);
+ pthread_create(&thread[2], NULL, testThread, (void *)3);
+ pthread_create(&thread[3], NULL, testThread, (void *)4);
+
+ while (iterations-- > 0) {
+ if (lockConditionLock(lock, 0))
+ abort();
+ count++;
+ if (unlockConditionLockWithCondition(lock, 1, 0))
+ abort();
+ }
+ printf("completed numerous iterations without hanging. Exiting with return 0\n");
+ return 0;
+}
+
+static void *
+testThread(void * arg)
+{
+ int self = (int)arg;
+ while (1) {
+ if (lockConditionLockWhenCondition(lock, 1, self))
+ abort();
+ count--;
+ if (!count) {
+ if (unlockConditionLockWithCondition(lock, 0, self))
+ abort();
+ } else {
+ if (unlockConditionLockWithCondition(lock, 1, self))
+ abort();
+ }
+ }
+ return arg;
+}
+
+static int
+initConditionLockWithCondition(ConditionLock * cl, int condition)
+{
+ int rc;
+
+ if ((rc = my_pthread_mutex_init(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_init returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+
+ if ((rc = my_pthread_cond_init(&cl->_condition))) {
+ fprintf(stderr, "pthread_cond_init returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+
+ cl->_isLocked = 0;
+ cl->_state = condition;
+
+ return 0;
+}
+
+static int
+lockConditionLock(ConditionLock * cl, int self)
+{
+ int rc;
+
+ if ((rc = my_pthread_mutex_lock(&cl->_mutex, self))) {
+ fprintf(stderr, "pthread_mutex_lock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ log(self, "Waiting for lock", cl);
+ while (cl->_isLocked) {
+ if ((rc = my_pthread_cond_wait(&cl->_condition, &cl->_mutex, self))) {
+ fprintf(stderr, "pthread_cond_wait() returned %d, %s\n", rc, strerror(rc));
+ if (rc != EINVAL) {
+ return 1;
+ }
+ }
+ if (cl->_isLocked) {
+ log(self, "lock wakeup with lock held", cl);
+ }
+ }
+ cl->_isLocked = 1;
+ cl->_owner = self;
+ log(self, "Got lock", cl);
+ if ((rc = my_pthread_mutex_unlock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_unlock() %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ return 0;
+}
+
+static int
+lockConditionLockWhenCondition(ConditionLock * cl, int condition, int self)
+{
+ int rc;
+
+ if ((rc = my_pthread_mutex_lock(&cl->_mutex, self))) {
+ fprintf(stderr, "pthread_mutex_lock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ log(self, "Waiting for condition", cl);
+ while (cl->_isLocked || cl->_state != condition) {
+ if ((rc = my_pthread_cond_wait(&cl->_condition, &cl->_mutex, self))) {
+ fprintf(stderr, "pthread_cond_wait() returned %d, %s\n", rc, strerror(rc));
+ if (rc != EINVAL) {
+ return 1;
+ }
+ }
+ if (cl->_isLocked) {
+ log(self, "condition lock wakeup with lock held", cl);
+ }
+ if (cl->_state != condition) {
+ log(self, "condition lock wakeup with wrong condition", cl);
+ }
+ }
+ cl->_isLocked = 1;
+ cl->_owner = self;
+ log(self, "Got condition", cl);
+ if ((rc = my_pthread_mutex_unlock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_unlock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ return 0;
+}
+
+static int
+unlockConditionLockWithCondition(ConditionLock * cl, int condition, int self)
+{
+ int rc;
+
+ if ((rc = my_pthread_mutex_lock(&cl->_mutex, self))) {
+ fprintf(stderr, "pthread_mutex_lock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ if (cl->_owner != self) {
+ fprintf(stderr, "%d: trying to unlock a lock owned by %d\n", self, cl->_owner);
+ abort();
+ }
+ log(self, condition ? "Unlocking with condition set" : "Unlocking with condition cleared", cl);
+ cl->_isLocked = 0;
+ cl->_last_owner = cl->_owner;
+ cl->_owner = 0;
+ cl->_state = condition;
+ if ((rc = my_pthread_cond_broadcast(&cl->_condition))) {
+ fprintf(stderr, "pthread_cond_broadcast() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ log(self, "Sent broadcast", cl);
+ if ((rc = my_pthread_mutex_unlock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_unlock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ return 0;
+}
+
+#if 0
+static int
+my_pthread_mutex_init(my_pthread_mutex_t *mutex)
+{
+ kern_return_t kern_res;
+ LOCK_INIT(mutex->lock);
+ mutex->owner = (pthread_t)NULL;
+ mutex->waiters = 0;
+ mutex->cond_lock = 0;
+ kern_res = semaphore_create(mach_task_self(),
+ &mutex->sem,
+ SYNC_POLICY_FIFO,
+ 0);
+ if (kern_res != KERN_SUCCESS)
+ {
+ return (ENOMEM);
+ } else
+ {
+ return (ESUCCESS);
+ }
+}
+
+static int
+my_pthread_mutex_lock(my_pthread_mutex_t *mutex, int self)
+{
+ kern_return_t kern_res;
+
+ LOCK(mutex->lock);
+#if 0
+ if (mutex->waiters || mutex->owner != (pthread_t)NULL)
+#else
+ while (mutex->owner != (pthread_t)NULL)
+#endif
+ {
+ mutex->waiters++;
+ log(self, "going in to sem_wait", 0);
+ UNLOCK(mutex->lock);
+ kern_res = semaphore_wait(mutex->sem);
+ LOCK(mutex->lock);
+ mutex->waiters--;
+ log(self, "woke up from sem_wait", 0);
+ if (mutex->cond_lock) {
+ log(self, "clearing cond_lock", 0);
+ mutex->cond_lock = 0;
+#if 0
+#else
+ break;
+#endif
+ }
+ }
+ mutex->owner = (pthread_t)0x12141968;
+ UNLOCK(mutex->lock);
+ return (ESUCCESS);
+}
+
+static int
+my_pthread_mutex_unlock(my_pthread_mutex_t *mutex)
+{
+ kern_return_t kern_res;
+ int waiters;
+
+ LOCK(mutex->lock);
+ mutex->owner = (pthread_t)NULL;
+ waiters = mutex->waiters;
+ UNLOCK(mutex->lock);
+ if (waiters)
+ {
+ kern_res = semaphore_signal(mutex->sem);
+ }
+ return (ESUCCESS);
+}
+
+/*
+ * Initialize a condition variable. Note: 'attr' is ignored.
+ */
+static int
+my_pthread_cond_init(my_pthread_cond_t *cond)
+{
+ kern_return_t kern_res;
+ LOCK_INIT(cond->lock);
+ cond->waiters = 0;
+ kern_res = semaphore_create(mach_task_self(),
+ &cond->sem,
+ SYNC_POLICY_FIFO,
+ 0);
+ if (kern_res != KERN_SUCCESS)
+ {
+ return (ENOMEM);
+ }
+ return (ESUCCESS);
+}
+
+/*
+ * Signal a condition variable, waking up all threads waiting for it.
+ */
+static int
+my_pthread_cond_broadcast(my_pthread_cond_t *cond)
+{
+ kern_return_t kern_res;
+ int waiters;
+
+ LOCK(cond->lock);
+ waiters = cond->waiters;
+ if (cond->waiters == 0)
+ { /* Avoid kernel call since there are no waiters... */
+ UNLOCK(cond->lock);
+ return (ESUCCESS);
+ }
+ UNLOCK(cond->lock);
+#if 0
+ kern_res = semaphore_signal(cond->sem);
+#endif
+ kern_res = semaphore_signal_all(cond->sem);
+ if (kern_res == KERN_SUCCESS)
+ {
+ return (ESUCCESS);
+ } else
+ {
+ return (EINVAL);
+ }
+}
+
+static int
+my_pthread_cond_wait(my_pthread_cond_t *cond, my_pthread_mutex_t *mutex, int self)
+{
+ int res;
+ kern_return_t kern_res;
+
+ LOCK(cond->lock);
+ cond->waiters++;
+ UNLOCK(cond->lock);
+ LOCK(mutex->lock);
+ mutex->cond_lock = 1;
+ log(self, "going in to sem_wait_signal", 0);
+ UNLOCK(mutex->lock);
+ kern_res = semaphore_wait_signal(cond->sem, mutex->sem);
+ LOCK(cond->lock);
+ cond->waiters--;
+ log(self, "woke up from sem_wait_signal", 0);
+ UNLOCK(cond->lock);
+ if ((res = my_pthread_mutex_lock(mutex, self)) != ESUCCESS)
+ {
+ return (res);
+ }
+ if (kern_res == KERN_SUCCESS)
+ {
+ return (ESUCCESS);
+ } else
+ {
+ return (EINVAL);
+ }
+}
+
+#endif
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <string.h>
+
+typedef struct _ConditionLock {
+ pthread_mutex_t _mutex;
+ pthread_cond_t _condition;
+ int _owner;
+ int _last_owner;
+ volatile int _state;
+} ConditionLock;
+
+typedef struct _log {
+ int thread;
+ const char * op;
+} log_t;
+
+static int initConditionLockWithCondition(ConditionLock *, int);
+static int lockConditionLock(ConditionLock *, int);
+static int lockConditionLockWhenCondition(ConditionLock *, int, int);
+static int unlockConditionLockWithCondition(ConditionLock *, int, int);
+static int destroyConditionLock(ConditionLock *);
+static void * testThread(void *);
+static void log(int, const char *, ConditionLock *);
+
+static ConditionLock * lock;
+static volatile int count = 0;
+static volatile int logcount = 0;
+static log_t * tracelog;
+#define TRACE_MAX_COUNT (4 * 1024 * 1024)
+long iterations = 999000L;
+
+static void
+log(int self, const char * op, ConditionLock * cl)
+{
+ tracelog[logcount].thread = self;
+ tracelog[logcount++].op = op;
+ if (logcount == TRACE_MAX_COUNT)
+ logcount = 0;
+}
+
+int
+main(int argc, char * argv[])
+{
+ pthread_t thread[4];
+
+ lock = (ConditionLock *)calloc(sizeof(ConditionLock), 1);
+ if (initConditionLockWithCondition(lock, 0))
+ abort();
+ tracelog = (log_t *)calloc(sizeof(log_t), TRACE_MAX_COUNT);
+
+ pthread_create(&thread[0], NULL, testThread, (void *)1);
+ pthread_create(&thread[1], NULL, testThread, (void *)2);
+ pthread_create(&thread[2], NULL, testThread, (void *)3);
+ pthread_create(&thread[3], NULL, testThread, (void *)4);
+
+ while (iterations > -100) {
+ if (lockConditionLock(lock, 0))
+ abort();
+ count = 1;
+ iterations--;
+ if (unlockConditionLockWithCondition(lock, 1, 0))
+ abort();
+ }
+
+ destroyConditionLock(lock);
+ free(lock);
+ free(tracelog);
+
+ return 0;
+}
+
+static void *
+testThread(void * arg)
+{
+ int self = (int)arg;
+ while (iterations > 0) {
+ if (lockConditionLockWhenCondition(lock, 1, self))
+ abort();
+ count = 0;
+ if (unlockConditionLockWithCondition(lock, 0, self))
+ abort();
+ }
+ return arg;
+}
+
+static int
+initConditionLockWithCondition(ConditionLock * cl, int condition)
+{
+ int rc;
+
+ if ((rc = pthread_mutex_init(&cl->_mutex, NULL))) {
+ fprintf(stderr, "pthread_mutex_init returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+
+ if ((rc = pthread_cond_init(&cl->_condition, NULL))) {
+ fprintf(stderr, "pthread_cond_init returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+
+ cl->_state = condition;
+
+ return 0;
+}
+
+static int
+destroyConditionLock(ConditionLock * cl)
+{
+ int rc;
+
+ if ((rc = pthread_mutex_destroy(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_destroy returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ if ((rc = pthread_cond_destroy(&cl->_condition))) {
+ fprintf(stderr, "pthread_cond_destroy returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ return 0;
+}
+
+static int
+lockConditionLock(ConditionLock * cl, int self)
+{
+ int rc;
+
+ if ((rc = pthread_mutex_lock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_lock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ cl->_owner = self;
+ log(self, "Got lock", cl);
+ return 0;
+}
+
+static int
+lockConditionLockWhenCondition(ConditionLock * cl, int condition, int self)
+{
+ int rc;
+
+ if ((rc = pthread_mutex_lock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_lock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ log(self, "Waiting for condition", cl);
+ while (cl->_state != condition) {
+ if ((rc = pthread_cond_wait(&cl->_condition, &cl->_mutex))) {
+ fprintf(stderr, "pthread_cond_wait() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ if (cl->_state != condition) {
+ log(self, "condition lock wakeup with wrong condition", cl);
+ }
+ }
+ cl->_owner = self;
+ log(self, "Got condition", cl);
+ return 0;
+}
+
+static int
+unlockConditionLockWithCondition(ConditionLock * cl, int condition, int self)
+{
+ int rc;
+
+ if (cl->_owner != self) {
+ fprintf(stderr, "%d: trying to unlock a lock owned by %d\n", self, cl->_owner);
+ abort();
+ }
+ log(self, condition ? "Unlocking with condition set" : "Unlocking with condition cleared", cl);
+ cl->_last_owner = cl->_owner;
+ cl->_owner = 0;
+ cl->_state = condition;
+ if ((rc = pthread_cond_signal(&cl->_condition))) {
+ fprintf(stderr, "pthread_cond_broadcast() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ log(self, "Sent broadcast", cl);
+ if ((rc = pthread_mutex_unlock(&cl->_mutex))) {
+ fprintf(stderr, "pthread_mutex_unlock() returned %d, %s\n", rc, strerror(rc));
+ return 1;
+ }
+ return 0;
+}
+
#include <libkern/OSAtomic.h>
#include <dispatch/dispatch.h>
+#include <darwintest.h>
+
+#define NUM_THREADS 8
+
struct context {
pthread_cond_t cond;
pthread_mutex_t mutex;
long count;
};
-void *wait_thread(void *ptr) {
+static void *wait_thread(void *ptr) {
int res;
struct context *context = ptr;
- int i = 0;
- char *str;
-
bool loop = true;
while (loop) {
struct timespec ts;
struct timeval tv;
gettimeofday(&tv, NULL);
- uint64_t ns = tv.tv_usec * NSEC_PER_USEC + context->udelay * NSEC_PER_USEC;
- ts.tv_nsec = ns >= NSEC_PER_SEC ? ns % NSEC_PER_SEC : ns;
- ts.tv_sec = tv.tv_sec + (ns >= NSEC_PER_SEC ? ns / NSEC_PER_SEC : 0);
+ tv.tv_sec += (tv.tv_usec + context->udelay) / (__typeof(tv.tv_sec)) USEC_PER_SEC;
+ tv.tv_usec = (tv.tv_usec + context->udelay) % (__typeof(tv.tv_usec)) USEC_PER_SEC;
+ TIMEVAL_TO_TIMESPEC(&tv, &ts);
res = pthread_mutex_lock(&context->mutex);
if (res) {
return NULL;
}
-int main(int argc, char *argv[])
+T_DECL(cond_timedwait_timeout, "pthread_cond_timedwait() timeout")
{
- const int threads = 8;
+ // This testcase launches 8 threads that all perform timed wait on the same
+ // conditional variable that is not being signaled in a loop. Ater the total
+ // of 8000 timeouts all threads finish and the testcase prints out the
+ // expected time (5[ms]*8000[timeouts]/8[threads]=5s) vs elapsed time.
struct context context = {
.cond = PTHREAD_COND_INITIALIZER,
.mutex = PTHREAD_MUTEX_INITIALIZER,
.udelay = 5000,
.count = 8000,
};
- int i;
- int res;
- uint64_t uexpected = (context.udelay * context.count) / threads;
- printf("waittime expected: %llu us\n", uexpected);
+ long uexpected = (context.udelay * context.count) / NUM_THREADS;
+ T_LOG("waittime expected: %ld us", uexpected);
struct timeval start, end;
gettimeofday(&start, NULL);
- pthread_t p[threads];
- for (i = 0; i < threads; ++i) {
- res = pthread_create(&p[i], NULL, wait_thread, &context);
- assert(res == 0);
+ pthread_t p[NUM_THREADS];
+ for (int i = 0; i < NUM_THREADS; ++i) {
+ T_ASSERT_POSIX_ZERO(pthread_create(&p[i], NULL, wait_thread, &context),
+ "pthread_create");
}
- usleep(uexpected);
+ usleep((useconds_t) uexpected);
bool loop = true;
while (loop) {
- res = pthread_mutex_lock(&context.mutex);
- if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_lock: %s\n", context.count, strerror(res));
- abort();
- }
+ T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex),
+ "pthread_mutex_lock");
if (context.count <= 0) {
loop = false;
}
- res = pthread_mutex_unlock(&context.mutex);
- if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_unlock: %s\n", context.count, strerror(res));
- abort();
- }
+ T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex),
+ "pthread_mutex_unlock");
}
- for (i = 0; i < threads; ++i) {
- res = pthread_join(p[i], NULL);
- assert(res == 0);
+ for (int i = 0; i < NUM_THREADS; ++i) {
+ T_ASSERT_POSIX_ZERO(pthread_join(p[i], NULL), "pthread_join");
}
gettimeofday(&end, NULL);
- uint64_t uelapsed = (end.tv_sec * USEC_PER_SEC + end.tv_usec) -
- (start.tv_sec * USEC_PER_SEC + start.tv_usec);
- printf("waittime actual: %llu us\n", uelapsed);
-
- return 0;
+ uint64_t uelapsed =
+ ((uint64_t) end.tv_sec * USEC_PER_SEC + (uint64_t) end.tv_usec) -
+ ((uint64_t) start.tv_sec * USEC_PER_SEC + (uint64_t) start.tv_usec);
+ T_LOG("waittime actual: %llu us", uelapsed);
}
#include <unistd.h>
#include <os/assumes.h>
-void *function(void *arg) {
+#include <darwintest.h>
+
+static uintptr_t stackaddr;
+static const size_t stacksize = 4096 * 8;
+
+static void *function(void *arg) {
// Use the stack...
char buffer[BUFSIZ];
strlcpy(buffer, arg, sizeof(buffer));
strlcat(buffer, arg, sizeof(buffer));
- printf("%s", buffer);
- sleep(30);
+ T_ASSERT_LT((uintptr_t)__builtin_frame_address(0), stackaddr, NULL);
+ T_ASSERT_GT((uintptr_t)__builtin_frame_address(0), stackaddr - stacksize, NULL);
+
return (void *)(uintptr_t)strlen(buffer);
}
-int main(int argc, char *argv[]) {
+T_DECL(custom_stack, "creating a pthread with a custom stack",
+ T_META_ALL_VALID_ARCHS(YES)){
char *arg = "This is a test and only a test of the pthread stackaddr system.\n";
- size_t stacksize = 4096 * 5;
- uintptr_t stackaddr = (uintptr_t)valloc(stacksize);
+ stackaddr = (uintptr_t)valloc(stacksize);
stackaddr += stacksize; // address starts at top of stack.
pthread_t thread;
pthread_attr_t attr;
- os_assumes_zero(pthread_attr_init(&attr));
- os_assumes_zero(pthread_attr_setstacksize(&attr, stacksize));
- os_assumes_zero(pthread_attr_setstackaddr(&attr, (void *)stackaddr));
+ T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_attr_setstacksize(&attr, stacksize), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_attr_setstackaddr(&attr, (void *)stackaddr), NULL);
- os_assumes_zero(pthread_create(&thread, &attr, function, arg));
+ T_ASSERT_POSIX_ZERO(pthread_create(&thread, &attr, function, arg), NULL);
void *result;
- os_assumes_zero(pthread_join(thread, &result));
- os_assumes((uintptr_t)result == (uintptr_t)strlen(arg)*2);
+ T_ASSERT_POSIX_ZERO(pthread_join(thread, &result), NULL);
+ T_ASSERT_EQ((uintptr_t)result, (uintptr_t)strlen(arg)*2, "thread should return correct value");
- return 0;
+ free((void*)(stackaddr - stacksize));
}
#include <unistd.h>
#include <mach/mach.h>
+#include <darwintest.h>
+
#define WAITTIME (100 * 1000)
static inline void*
test(void)
{
static uintptr_t idx;
- printf("Join %lu\n", ++idx);
return (void*)idx;
}
return param;
}
+/*
static void *
thread1(void *param)
{
usleep(WAITTIME);
res = pthread_join(p, NULL);
assert(res == 0);
- printf("Done\n");
return 0;
}
+*/
-__attribute((noreturn))
-int
-main(void)
+T_DECL(join, "pthread_join",
+ T_META_ALL_VALID_ARCHS(YES))
{
int res;
kern_return_t kr;
param = test();
res = pthread_create(&p, NULL, thread, param);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create");
value = NULL;
res = pthread_join(p, &value);
- assert(res == 0);
- assert(param == value);
+ T_ASSERT_POSIX_ZERO(res, "pthread_join");
+ T_ASSERT_EQ_PTR(param, value, "early join value");
param = test();
res = pthread_create(&p, NULL, thread, param);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create");
usleep(3 * WAITTIME);
value = NULL;
res = pthread_join(p, &value);
- assert(res == 0);
- assert(param == value);
+ T_ASSERT_POSIX_ZERO(res, "pthread_join");
+ T_ASSERT_EQ_PTR(param, value, "late join value");
param = test();
res = pthread_create_suspended_np(&p, NULL, thread, param);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create_suspended_np");
kr = thread_resume(pthread_mach_thread_np(p));
- assert(kr == 0);
+ T_ASSERT_EQ_INT(kr, 0, "thread_resume");
value = NULL;
res = pthread_join(p, &value);
- assert(res == 0);
- assert(param == value);
+ T_ASSERT_POSIX_ZERO(res, "pthread_join");
+ T_ASSERT_EQ_PTR(param, value, "suspended early join value");
param = test();
res = pthread_create_suspended_np(&p, NULL, thread, param);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create_suspended_np");
kr = thread_resume(pthread_mach_thread_np(p));
- assert(kr == 0);
+ T_ASSERT_EQ_INT(kr, 0, "thread_resume");
usleep(3 * WAITTIME);
value = NULL;
res = pthread_join(p, &value);
- assert(res == 0);
- assert(param == value);
+ T_ASSERT_POSIX_ZERO(res, "pthread_join");
+ T_ASSERT_EQ_PTR(param, value, "suspended late join value");
+ // This test is supposed to test joining on the main thread. It's not
+ // clear how to express this with libdarwintest for now.
+ /*
test();
param = pthread_self();
res = pthread_create_suspended_np(&p, NULL, thread1, param);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create_suspended_np");
res = pthread_detach(p);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_detach");
kr = thread_resume(pthread_mach_thread_np(p));
- assert(kr == 0);
+ T_ASSERT_EQ_INT(kr, 0, "thread_resume");
pthread_exit(0);
+ */
+}
+
+static void *
+thread_stub(__unused void *arg)
+{
+ return NULL;
}
+T_DECL(pthread_join_stress, "pthread_join in a loop")
+{
+ for (int i = 0; i < 1000; i++) {
+ pthread_t th[16];
+ for (int j = 0; j < i%16; j++){
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&th[j], NULL, thread_stub, NULL), NULL);
+ }
+ for (int j = i%16; j >= 0; j--){
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_join(th[j], NULL), NULL);
+ }
+ }
+ T_PASS("Success!");
+}
--- /dev/null
+#include <stdlib.h>
+#include <pthread.h>
+#include <darwintest.h>
+#include <machine/vmparam.h>
+
+T_DECL(main_stack, "tests the reported values for the main thread stack",
+ T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES)){
+ const uintptr_t stackaddr = (uintptr_t)pthread_get_stackaddr_np(pthread_self());
+ const size_t stacksize = pthread_get_stacksize_np(pthread_self());
+ T_LOG("stack: %zx -> %zx (+%zx)", stackaddr - stacksize, stackaddr, stacksize);
+ T_EXPECT_LT((uintptr_t)__builtin_frame_address(0), stackaddr, NULL);
+ T_EXPECT_GT((uintptr_t)__builtin_frame_address(0), stackaddr - stacksize, NULL);
+
+ struct rlimit lim;
+ T_ASSERT_POSIX_SUCCESS(getrlimit(RLIMIT_STACK, &lim), NULL);
+ T_EXPECT_EQ((size_t)lim.rlim_cur, pthread_get_stacksize_np(pthread_self()), "reported rlimit should match stacksize");
+
+ lim.rlim_cur = lim.rlim_cur / 8;
+ T_ASSERT_POSIX_SUCCESS(setrlimit(RLIMIT_STACK, &lim), NULL);
+
+ T_EXPECTFAIL;
+ T_EXPECT_EQ((size_t)lim.rlim_cur, pthread_get_stacksize_np(pthread_self()), "new rlimit should should match stacksize");
+}
--- /dev/null
+#include <stdlib.h>
+#include <pthread.h>
+#include <darwintest.h>
+#include <machine/vmparam.h>
+
+T_DECL(main_stack_custom, "tests the reported values for a custom main thread stack"){
+ T_EXPECT_EQ((size_t)STACKSIZE, pthread_get_stacksize_np(pthread_self()), NULL);
+
+ const uintptr_t stackaddr = (uintptr_t)pthread_get_stackaddr_np(pthread_self());
+ size_t stacksize = pthread_get_stacksize_np(pthread_self());
+ T_LOG("stack: %zx -> %zx (+%zx)", stackaddr - stacksize, stackaddr, stacksize);
+ T_EXPECT_LT((uintptr_t)__builtin_frame_address(0), stackaddr, NULL);
+ T_EXPECT_GT((uintptr_t)__builtin_frame_address(0), stackaddr - stacksize, NULL);
+
+ struct rlimit lim;
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(getrlimit(RLIMIT_STACK, &lim), NULL);
+ lim.rlim_cur = lim.rlim_cur / 8;
+ T_EXPECT_EQ(setrlimit(RLIMIT_STACK, &lim), -1, "setrlimit for stack should fail with custom stack");
+ T_EXPECT_EQ((size_t)STACKSIZE, pthread_get_stacksize_np(pthread_self()), "reported stacksize shouldn't change");
+}
--- /dev/null
+#include <stdlib.h>
+#include <pthread.h>
+#include <darwintest.h>
+#include <machine/vmparam.h>
+
+T_DECL(main_stack_legacy, "tests the reported values for a custom main thread stack",
+ T_META_CHECK_LEAKS(NO))
+{
+ T_EXPECT_LT((uintptr_t)0, pthread_get_stacksize_np(pthread_self()), NULL);
+
+ const uintptr_t stackaddr = (uintptr_t)pthread_get_stackaddr_np(pthread_self());
+ size_t stacksize = pthread_get_stacksize_np(pthread_self());
+ T_LOG("stack: %zx -> %zx (+%zx)", stackaddr - stacksize, stackaddr, stacksize);
+ T_EXPECT_LT((uintptr_t)__builtin_frame_address(0), stackaddr, NULL);
+ T_EXPECT_GT((uintptr_t)__builtin_frame_address(0), stackaddr - stacksize, NULL);
+}
-#include <assert.h>
#include <pthread.h>
-#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <stdbool.h>
#include <errno.h>
+#include <darwintest.h>
+
struct context {
pthread_mutex_t mutex;
long value;
long count;
};
-void *test_thread(void *ptr) {
+static void *test_thread(void *ptr) {
int res;
long old;
struct context *context = ptr;
char *str;
do {
- bool try = i & 1;
+ bool try = i++ & 1;
- switch (i++ & 1) {
- case 0:
- str = "pthread_mutex_lock";
- res = pthread_mutex_lock(&context->mutex);
- break;
- case 1:
- str = "pthread_mutex_trylock";
- res = pthread_mutex_trylock(&context->mutex);
- break;
+ if (!try){
+ str = "pthread_mutex_lock";
+ res = pthread_mutex_lock(&context->mutex);
+ } else {
+ str = "pthread_mutex_trylock";
+ res = pthread_mutex_trylock(&context->mutex);
}
if (res != 0) {
if (try && res == EBUSY) {
continue;
}
- fprintf(stderr, "[%ld] %s: %s\n", context->count, str, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] %s", context->count, str);
}
old = __sync_fetch_and_or(&context->value, 1);
if ((old & 1) != 0) {
- fprintf(stderr, "[%ld] OR %lx\n", context->count, old);
- abort();
+ T_FAIL("[%ld] OR %lx\n", context->count, old);
}
old = __sync_fetch_and_and(&context->value, 0);
if ((old & 1) == 0) {
- fprintf(stderr, "[%ld] AND %lx\n", context->count, old);
- abort();
+ T_FAIL("[%ld] AND %lx\n", context->count, old);
}
res = pthread_mutex_unlock(&context->mutex);
if (res) {
- fprintf(stderr, "[%ld] pthread_mutex_lock: %s\n", context->count, strerror(res));
- abort();
+ T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_lock", context->count);
}
} while (__sync_fetch_and_sub(&context->count, 1) > 0);
- exit(0);
+
+ T_PASS("thread completed successfully");
+
+ return NULL;
}
-int main(int argc, char *argv[])
+T_DECL(mutex, "pthread_mutex",
+ T_META_ALL_VALID_ARCHS(YES))
{
struct context context = {
.mutex = PTHREAD_MUTEX_INITIALIZER,
.value = 0,
- .count = 5000000,
+ .count = 1000000,
};
int i;
int res;
- int threads = 16;
+ int threads = 8;
pthread_t p[threads];
for (i = 0; i < threads; ++i) {
res = pthread_create(&p[i], NULL, test_thread, &context);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_create()");
}
for (i = 0; i < threads; ++i) {
res = pthread_join(p[i], NULL);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(res, "pthread_join()");
}
-
- return 0;
}
--- /dev/null
+#include <assert.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <pthread.h>
+#include <stdlib.h>
+
+#include <darwintest.h>
+
+struct ctx {
+ volatile int last_holder;
+ volatile int quit;
+ int iterations[2];
+ pthread_mutex_t l;
+};
+
+static void *
+job(struct ctx *ctx, int idx)
+{
+ int ret;
+ while (!ctx->quit) {
+ ret = pthread_mutex_trylock(&ctx->l);
+ T_QUIET; T_ASSERT_TRUE(ret == EBUSY || ret == 0, "trylock");
+
+ if (ret == EBUSY) {
+ T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&ctx->l),
+ "pthread_mutex_lock");
+ // we know that the other thread was just holding the lock
+ T_QUIET; T_ASSERT_EQ(ctx->last_holder, !idx,
+ "expecting oppsosite last holder after failed trylock");
+ }
+
+ ctx->last_holder = idx;
+ ctx->iterations[idx]++;
+
+ T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&ctx->l),
+ "pthread_mutex_unlock");
+ }
+ return NULL;
+}
+
+static void *
+job1(void *ctx)
+{
+ return job((struct ctx *)ctx, 0);
+}
+
+static void *
+job2(void *ctx)
+{
+ return job((struct ctx *)ctx, 1);
+}
+
+T_DECL(mutex_trylock, "pthread_mutex_trylock",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ // This testcase spins up two threads with identical jobs. They try-lock
+ // the same mutex. If that fails, they check that the last holder of the
+ // lock is the other thread.
+ const int test_duration = 10; // sec
+ struct ctx ctx = {0};
+
+ pthread_t t1, t2;
+
+ T_ASSERT_POSIX_ZERO(pthread_mutex_init(&ctx.l, NULL),
+ "pthread_mutex_init");
+ T_ASSERT_POSIX_ZERO(pthread_create(&t1, NULL, job1, &ctx),
+ "pthread_create 1");
+ T_ASSERT_POSIX_ZERO(pthread_create(&t2, NULL, job2, &ctx),
+ "pthread_create 2");
+
+ sleep(test_duration);
+
+ ctx.quit = 1;
+ T_ASSERT_POSIX_ZERO(pthread_join(t1, NULL), "pthread join 1");
+ T_ASSERT_POSIX_ZERO(pthread_join(t2, NULL), "pthread join 2");
+
+ T_LOG("after %d seconds iterations 0: %d, 1: %d. Exiting\n",
+ test_duration, ctx.iterations[0], ctx.iterations[1]);
+}
+++ /dev/null
-#define __DARWIN_NON_CANCELABLE 0
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-static pthread_once_t once = PTHREAD_ONCE_INIT;
-static int x = 0;
-
-void cancelled(void)
-{
- printf("thread cancelled.\n");
-}
-
-void oncef(void)
-{
- printf("in once handler: %p\n", pthread_self());
- sleep(5);
- x = 1;
-}
-
-void* a(void *ctx)
-{
- printf("a started: %p\n", pthread_self());
- pthread_cleanup_push((void*)cancelled, NULL);
- pthread_once(&once, oncef);
- pthread_cleanup_pop(0);
- printf("a finished\n");
- return NULL;
-}
-
-void* b(void *ctx)
-{
- sleep(1); // give enough time for a() to get into pthread_once
- printf("b started: %p\n", pthread_self());
- pthread_once(&once, oncef);
- printf("b finished\n");
- return NULL;
-}
-
-int main(void)
-{
- pthread_t t1;
- if (pthread_create(&t1, NULL, a, NULL) != 0) {
- fprintf(stderr, "failed to create thread a.");
- exit(1);
- }
-
- pthread_t t2;
- if (pthread_create(&t2, NULL, b, NULL) != 0) {
- fprintf(stderr, "failed to create thread b.");
- exit(1);
- }
-
- sleep(2);
- pthread_cancel(t1);
-
- pthread_join(t1, NULL);
- pthread_join(t2, NULL);
- exit(0);
-}
\ No newline at end of file
--- /dev/null
+#include <pthread.h>
+
+#include <darwintest.h>
+
+static volatile int once_invoked = 0;
+
+static void
+cancelation_handler(void * __unused arg)
+{
+ T_LOG("cancelled");
+}
+
+__attribute__((noreturn))
+static void
+await_cancelation(void)
+{
+ pthread_cleanup_push(cancelation_handler, NULL);
+ T_LOG("waiting for cancellation");
+
+ // can't use darwintest once cancellation is enabled
+ pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
+
+ while (true) {
+ pthread_testcancel();
+ sched_yield();
+ }
+
+ pthread_cleanup_pop(0);
+}
+
+static void *
+await_cancelation_in_once(void *arg)
+{
+ // disable cancellation until pthread_once to protect darwintest
+ pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
+
+ T_LOG("starting the thread");
+ pthread_once_t *once = (pthread_once_t *)arg;
+ pthread_once(once, await_cancelation);
+ return NULL;
+}
+
+static void
+oncef(void)
+{
+ T_LOG("once invoked");
+ once_invoked++;
+}
+
+T_DECL(once_cancel, "pthread_once is re-executed if cancelled")
+{
+ pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_t t;
+ void *join_result = NULL;
+
+ T_ASSERT_POSIX_ZERO(
+ pthread_create(&t, NULL, await_cancelation_in_once, &once), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_cancel(t), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_join(t, &join_result), NULL);
+ T_ASSERT_EQ(join_result, PTHREAD_CANCELED, NULL);
+
+ T_ASSERT_POSIX_ZERO(pthread_once(&once, oncef), NULL);
+ T_ASSERT_EQ(once_invoked, 1, NULL);
+}
--- /dev/null
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <limits.h>
+#include <pthread.h>
+
+#include <darwintest.h>
+
+#define STACK_ALLOWANCE (1024ULL * 6)
+
+static void *
+pthread_attr_setstacksize_func(void *arg)
+{
+#if defined(__arm64__)
+ // Because of <rdar://problem/19941744>, the kext adds additional size to the stack on arm64.
+ T_EXPECTFAIL;
+#endif
+ T_EXPECT_EQ((size_t)arg, pthread_get_stacksize_np(pthread_self()), "[stacksize=%zu] pthread_self stack size matches", (size_t)arg);
+
+ size_t stacksize = (size_t)arg - STACK_ALLOWANCE;
+ char *buf = alloca(stacksize);
+
+ memset_s(buf, sizeof(buf), 0, sizeof(buf) - 1);
+
+ return (void*)pthread_attr_setstacksize_func;
+}
+
+T_DECL(pthread_attr_setstacksize, "pthread_attr_setstacksize")
+{
+ size_t stacksizes[] = {PTHREAD_STACK_MIN, 1024ULL * 16, 1024ULL * 32, 1024ULL * 1024};
+ for (int i = 0; (size_t)i < sizeof(stacksizes)/sizeof(stacksizes[0]); i++){
+ pthread_t t = NULL;
+ pthread_attr_t attr;
+ size_t stacksize = stacksizes[i];
+
+ T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "[stacksize=%zu] pthread_attr_init", stacksize);
+ T_ASSERT_POSIX_ZERO(pthread_attr_setstacksize(&attr, stacksize), "[stacksize=%zu] pthread_attr_stacksize", stacksize);
+
+ T_ASSERT_POSIX_ZERO(pthread_create(&t, &attr, pthread_attr_setstacksize_func, (void*)stacksize), "[stacksize=%zu] pthread_create", stacksize);
+ T_ASSERT_NOTNULL(t, "[stacksize=%zu] pthread pointer not null", stacksize);
+
+ T_EXPECT_POSIX_ZERO(pthread_attr_destroy(&attr), "[stacksize=%zu] pthread_attr_destroy", stacksize);
+
+#if defined(__arm64__)
+ // Because of <rdar://problem/19941744>, the kext adds additional size to the stack on arm64.
+ T_EXPECTFAIL;
+#endif
+ T_EXPECT_EQ(stacksize, pthread_get_stacksize_np(t), "[stacksize=%zu] pthread stack size matches", stacksize);
+
+ void *out = NULL;
+ T_ASSERT_POSIX_ZERO(pthread_join(t, &out), "[stacksize=%zu] pthread_join", stacksize);
+ T_EXPECT_EQ_PTR(out, (void*)pthread_attr_setstacksize_func, "[stacksize=%zu] pthread_join returns correct value", stacksize);
+ }
+}
--- /dev/null
+#include <pthread.h>
+
+#include <darwintest.h>
+
+#define MAX_THREADS 512
+#define THREAD_DEPTH 32
+
+static void *
+thread(void * arg)
+{
+ T_LOG("thread %lx here: %d", (uintptr_t)pthread_self(), (int)arg);
+ return (arg);
+}
+
+T_DECL(pthread_bulk_create, "pthread_bulk_create")
+{
+ void *thread_res;
+ pthread_t t[THREAD_DEPTH];
+
+ for (int i = 0; i < MAX_THREADS; i += THREAD_DEPTH) {
+ T_LOG("Creating threads %d..%d\n", i, i + THREAD_DEPTH - 1);
+ for (int j = 0; j < THREAD_DEPTH; j++) {
+ void *arg = (void *)(intptr_t)(i + j);
+ T_QUIET; T_ASSERT_POSIX_ZERO(
+ pthread_create(&t[j], NULL, thread, arg), NULL);
+ }
+ T_LOG("Waiting for threads");
+ for (int j = 0; j < THREAD_DEPTH; j++) {
+ T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(t[j], &thread_res), NULL);
+ T_QUIET; T_ASSERT_EQ(i + j, (int)thread_res, "thread return value");
+ }
+ }
+}
--- /dev/null
+#include <pthread.h>
+
+#include <darwintest.h>
+
+static void *
+run(void * __unused arg)
+{
+ while (true) {
+ pthread_testcancel();
+ sched_yield();
+ }
+}
+
+T_DECL(pthread_cancel, "pthread_cancel",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ pthread_t thread;
+ void *join_result = NULL;
+ T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, run, NULL), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_cancel(thread), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_join(thread, &join_result), NULL);
+ T_ASSERT_EQ(join_result, PTHREAD_CANCELED, NULL);
+}
--- /dev/null
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <sys/errno.h>
+#include <sys/stat.h>
+#include <sys/param.h>
+#include <pthread/private.h>
+
+#include <darwintest.h>
+
+#include "../src/pthread_cwd.c"
+
+// /tmp is a symlink, so use full path for strict compare
+#define WORKDIR "/private/var/tmp/ptwork"
+#define WORKDIR1 WORKDIR "/one"
+#define WORKDIR2 WORKDIR "/two"
+
+/*
+ * This is a slow routine, just like getcwd(); people should remember that
+ * they set something, instead of asking us what they told us.
+ */
+static char *
+pthread_getcwd_np(char *buf, size_t size)
+{
+ int fd_cwd;
+
+ if (buf == NULL)
+ return (NULL);
+
+ /*
+ * Open the "current working directory"; if we are running on a per
+ * thread working directory, that's the one we will get.
+ */
+ if ((fd_cwd = open(".", O_RDONLY)) == -1)
+ return (NULL);
+
+ /*
+ * Switch off the per thread current working directory, in case we
+ * were on one; this fails if we aren't running with one.
+ */
+ if (pthread_fchdir_np( -1) == -1) {
+ /* We aren't runniing with one... alll done. */
+ close (fd_cwd);
+ return (NULL);
+ }
+
+ /*
+ * If we successfully switched off, then we switch back...
+ * this may fail catastrophically, if we no longer have rights;
+ * this should never happen, but threads may clobber our fd out
+ * from under us, etc..
+ */
+ if (pthread_fchdir_np(fd_cwd) == -1) {
+ close(fd_cwd);
+ errno = EBADF; /* sigil for catastrophic failure */
+ return (NULL);
+ }
+
+ /* Close our directory handle */
+ close(fd_cwd);
+
+ /*
+ * And call the regular getcwd(), which will return the per thread
+ * current working directory instead of the process one.
+ */
+ return getcwd(buf, size);
+}
+
+T_DECL(pthread_cwd, "per-thread working directory")
+{
+ char buf[MAXPATHLEN];
+
+ T_SETUPBEGIN;
+
+ T_ASSERT_EQ(pthread_fchdir_np(-1), -1, "test should not start with per-thread cwd");
+
+ /* Blow the umask to avoid shooting our foot */
+ umask(0); /* "always successful" */
+
+ /* Now set us up the test directories... */
+ T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR, 0777) != -1 || errno != EEXIST, NULL);
+ T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR1, 0777) != -1 || errno != EEXIST, NULL);
+ T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR2, 0777) != -1 || errno != EEXIST, NULL);
+
+ T_SETUPEND;
+
+ T_LOG("start in " WORKDIR1);
+ T_ASSERT_POSIX_SUCCESS(chdir(WORKDIR1), NULL);
+ T_ASSERT_EQ_STR(WORKDIR1, getcwd(buf, MAXPATHLEN), NULL);
+ T_ASSERT_NULL(pthread_getcwd_np(buf, MAXPATHLEN), "pthread_getcwd_np should return NULL without per-thread cwd, got: %s", buf);
+
+ T_LOG("move per-thread CWD to " WORKDIR2);
+ T_ASSERT_POSIX_SUCCESS(pthread_chdir_np(WORKDIR2), NULL);
+ T_ASSERT_EQ_STR(WORKDIR2, getcwd(buf, MAXPATHLEN), NULL);
+ T_ASSERT_EQ_STR(WORKDIR2, pthread_getcwd_np(buf, MAXPATHLEN), NULL);
+
+ T_LOG("unset per-thread CWD and confirm things go back");
+ T_ASSERT_POSIX_SUCCESS(pthread_fchdir_np(-1), NULL);
+ T_ASSERT_NULL(pthread_getcwd_np(buf, MAXPATHLEN), "pthread_getcwd_np should return NULL after reseting per-thread cwd, got: %s", buf);
+ T_ASSERT_EQ_STR(WORKDIR1, getcwd(buf, MAXPATHLEN), NULL);
+}
--- /dev/null
+#include <stdio.h>
+#include <pthread.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <darwintest.h>
+
+#define STACK_SIZE 32768
+#define THREAD_DEPTH 2000
+
+static unsigned int glob = 0;
+static unsigned int i;
+
+static pthread_mutex_t count_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void *
+thread_exit(__unused void *arg)
+{
+ unsigned int count;
+
+ sleep(5);
+ pthread_mutex_lock(&count_lock);
+ count = ++glob;
+ pthread_mutex_unlock(&count_lock);
+
+ T_QUIET; T_EXPECT_NE(pthread_mach_thread_np(pthread_self()), (mach_port_t)0, NULL);
+
+ if (count == THREAD_DEPTH){
+ T_PASS("all the threads survived main thread exit");
+ T_END;
+ }
+ return NULL;
+}
+
+T_DECL(pthread_exit, "pthread_exit")
+{
+ int j;
+ pthread_t th[THREAD_DEPTH];
+
+ T_LOG("Creating threads %d..%d", i, i+THREAD_DEPTH-1);
+ for (j = 0; j < THREAD_DEPTH; j++) {
+ pthread_attr_t attr;
+
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&th[j], &attr, thread_exit, (void *)&glob), NULL);
+ pthread_attr_destroy(&attr);
+ }
+ pthread_exit(pthread_self());
+ T_FAIL("Zombie walks");
+}
+
+static void *
+thread_stub(__unused void *arg)
+{
+ return NULL;
+}
+
+T_DECL(pthread_exit_private_stacks, "pthread_exit with private stacks", T_META_CHECK_LEAKS(NO))
+{
+ int j;
+ pthread_t th[THREAD_DEPTH];
+ void *stacks[THREAD_DEPTH];
+
+ for (j = 0; j < THREAD_DEPTH; j++) {
+ T_QUIET; T_ASSERT_NOTNULL((stacks[j] = malloc(STACK_SIZE)), NULL);
+ }
+
+ for (i=0;i < 20; i++) {
+ for (j = 0; j < THREAD_DEPTH; j++) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setstack(&attr, stacks[j], STACK_SIZE);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&th[j], &attr, thread_stub, (void *)&glob), NULL);
+ pthread_attr_destroy(&attr);
+ }
+ for (j = 0; j < THREAD_DEPTH; j++) {
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_join(th[j], NULL), NULL);
+ }
+ T_PASS("Created threads %d..%d", i*THREAD_DEPTH, (i+1)*THREAD_DEPTH-1);
+ }
+
+}
+
+T_DECL(pthread_exit_detached, "pthread_exit with detached threads")
+{
+ int j;
+ pthread_t th[THREAD_DEPTH];
+
+ for (i=0;i < 20; i++) {
+ for (j = 0; j < THREAD_DEPTH; j++) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&th[j], &attr, thread_stub, (void *)&glob), NULL);
+ pthread_attr_destroy(&attr);
+ }
+ sleep(1);
+ T_PASS("Created threads %d..%d", i*THREAD_DEPTH, (i+1)*THREAD_DEPTH-1);
+ }
+ T_PASS("Success!");
+}
--- /dev/null
+#include <stdatomic.h>
+#include <pthread.h>
+#include <pthread/introspection_private.h>
+#include <dispatch/dispatch.h>
+
+#include <darwintest.h>
+
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+
+#define THREAD_COUNT 3
+
+static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
+ void *addr, size_t size)
+{
+ static atomic_int create_count;
+ static atomic_int terminate_count;
+
+ uint64_t tid;
+ pthread_threadid_np(NULL, &tid);
+
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ T_LOG("event = PTHREAD_INTROSPECTION_THREAD_CREATE, thread = %p:%lld, addr = %p, size = 0x%zx", thread, tid, addr, size);
+ create_count++;
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ T_LOG("event = PTHREAD_INTROSPECTION_THREAD_TERMINATE, thread = %p:%lld, addr = %p, size = 0x%zx", thread, tid, addr, size);
+ terminate_count++;
+ T_ASSERT_GE(create_count, THREAD_COUNT, NULL);
+ T_PASS("Got termination events");
+ T_END;
+ }
+
+ if (prev_pthread_introspection_hook != NULL){
+ prev_pthread_introspection_hook(event, thread, addr, size);
+ }
+}
+
+T_DECL(PR_25679871, "PR-25679871",
+ T_META_TIMEOUT(30), T_META_ALL_VALID_ARCHS(YES))
+{
+ prev_pthread_introspection_hook = pthread_introspection_hook_install(&my_pthread_introspection_hook);
+
+ // minus two that come up in dispatch internally, one that comes after this block
+ for (int i = 0; i < THREAD_COUNT - 3; i++) {
+ dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+ sleep(3);
+ });
+ }
+ dispatch_queue_t serial_queue = dispatch_queue_create("test queue", NULL);
+ __block dispatch_block_t looping_block = ^{
+ static int count;
+ if (count < 20) {
+ dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 50 * NSEC_PER_MSEC), serial_queue, looping_block);
+ }
+ };
+ dispatch_async(serial_queue, looping_block);
+
+ sleep(30);
+
+ T_FAIL("Why are we still alive?");
+}
--- /dev/null
+/*
+ * @OSF_COPYRIGHT@
+ *
+ */
+/*
+ * HISTORY
+ * $Log: pthread_test3.c,v $
+ * Revision 1.1.4.2 1996/10/03 17:53:38 emcmanus
+ * Changed fprintf(stderr...) to printf(...) to allow building with
+ * PURE_MACH includes.
+ * [1996/10/03 16:17:34 emcmanus]
+ *
+ * Revision 1.1.4.1 1996/10/01 07:36:02 emcmanus
+ * Copied from rt3_merge.
+ * Include <stdlib.h> for malloc() prototype.
+ * [1996/10/01 07:35:53 emcmanus]
+ *
+ * Revision 1.1.2.1 1996/09/27 13:12:15 gdt
+ * Add support for thread specific data
+ * [1996/09/27 13:11:17 gdt]
+ *
+ * $EndLog$
+ */
+
+/*
+ * Test POSIX Thread Specific Data
+ */
+
+#include <stdio.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include <darwintest.h>
+
+static pthread_key_t key;
+
+static void *
+thread(void * arg)
+{
+ char * msg;
+ T_LOG("thread %lx here: %s\n", (uintptr_t)pthread_self(), (char *)arg);
+ msg = malloc(256);
+ sprintf(msg, "This is thread specific data for %lx\n", (uintptr_t)pthread_self());
+ T_ASSERT_POSIX_ZERO(pthread_setspecific(key, msg), NULL);
+ return (arg);
+}
+
+static void
+grim_reaper(void * param)
+{
+ T_LOG("grim_reaper - self: %lx, param: %lx value: %s", (uintptr_t)pthread_self(), (uintptr_t)param, (char *)param);
+ free(param);
+}
+
+T_DECL(pthread_setspecific, "pthread_setspecific",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ void * thread_res;
+ pthread_t t1, t2;
+ T_ASSERT_POSIX_ZERO(pthread_key_create(&key, grim_reaper), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_create(&t1, (pthread_attr_t *)NULL, thread, "thread #1 arg"), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_create(&t2, (pthread_attr_t *)NULL, thread, "thread #2 arg"), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_join(t1, &thread_res), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_join(t2, &thread_res), NULL);
+}
--- /dev/null
+#include <pthread.h>
+#include <pthread/private.h>
+#include <dispatch/dispatch.h>
+
+#include <darwintest.h>
+
+extern __uint64_t __thread_selfid( void );
+
+static void *do_test(void * __unused arg)
+{
+ uint64_t threadid = __thread_selfid();
+ T_ASSERT_NOTNULL(threadid, NULL);
+
+ uint64_t pth_threadid = 0;
+ T_ASSERT_POSIX_ZERO(pthread_threadid_np(NULL, &pth_threadid), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_threadid_np(pthread_self(), &pth_threadid), NULL);
+ T_EXPECT_EQ(threadid, pth_threadid, "pthread_threadid_np()");
+
+ pth_threadid = _pthread_threadid_self_np_direct();
+ T_EXPECT_EQ(threadid, pth_threadid, "pthread_threadid_np_direct()");
+
+ return NULL;
+}
+
+T_DECL(pthread_threadid_np, "pthread_threadid_np",
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ T_LOG("Main Thread");
+ do_test(NULL);
+
+ T_LOG("Pthread");
+ pthread_t pth;
+ T_ASSERT_POSIX_ZERO(pthread_create(&pth, NULL, do_test, NULL), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_join(pth, NULL), NULL);
+
+ T_LOG("Workqueue Thread");
+ dispatch_queue_t dq = dispatch_queue_create("myqueue", NULL);
+ dispatch_async(dq, ^{ do_test(NULL); });
+ dispatch_sync(dq, ^{});
+
+ T_LOG("Workqueue Thread Reuse");
+ dispatch_async(dq, ^{ do_test(NULL); });
+ dispatch_sync(dq, ^{});
+}
(((i) == THREAD_QOS_UNSPECIFIED) ? QOS_CLASS_UNSPECIFIED : \
((i) == THREAD_QOS_USER_INTERACTIVE) ? QOS_CLASS_USER_INTERACTIVE : \
((i) == THREAD_QOS_USER_INITIATED) ? QOS_CLASS_USER_INITIATED : \
- ((i) == THREAD_QOS_LEGACY) ? QOS_CLASS_LEGACY : \
+ ((i) == THREAD_QOS_LEGACY) ? QOS_CLASS_DEFAULT : \
((i) == THREAD_QOS_UTILITY) ? QOS_CLASS_UTILITY : \
((i) == THREAD_QOS_BACKGROUND) ? QOS_CLASS_BACKGROUND : \
((i) == THREAD_QOS_MAINTENANCE) ? QOS_CLASS_MAINTENANCE : \
--- /dev/null
+#include <assert.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <dispatch/dispatch.h>
+#include <sys/mman.h>
+
+#include <darwintest.h>
+
+#define T_LOG_VERBOSE(...)
+
+#ifdef __LP64__
+#define STACK_LOCATIONS 16
+#else
+#define STACK_LOCATIONS 8
+#endif
+
+static void*
+thread_routine(void *loc)
+{
+ int foo;
+ *(uintptr_t*)loc = (uintptr_t)&foo;
+ return NULL;
+}
+
+static int
+pointer_compare(const void *ap, const void *bp)
+{
+ uintptr_t a = *(const uintptr_t*)ap;
+ uintptr_t b = *(const uintptr_t*)bp;
+ return a > b ? 1 : a < b ? -1 : 0;
+}
+
+static void
+test_stack_aslr(bool workqueue_thread)
+{
+ const int attempts = 128;
+ int attempt_round = 0;
+
+ uintptr_t *addr_array = mmap(NULL, sizeof(uintptr_t) * attempts,
+ PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0);
+ T_QUIET; T_ASSERT_NOTNULL(addr_array, NULL);
+
+again:
+ bzero(addr_array, sizeof(uintptr_t) * attempts);
+
+ for (int i = 0; i < attempts; i++) {
+ pid_t pid = fork();
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "[%d] fork()", i);
+
+ if (pid) { // parent
+ pid = waitpid(pid, NULL, 0);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "[%d] waitpid()", i);
+ } else if (workqueue_thread) { // child
+ dispatch_async(dispatch_get_global_queue(0,0), ^{
+ int foo;
+ addr_array[i] = (uintptr_t)&foo;
+ exit(0);
+ });
+ while (true) sleep(1);
+ } else { // child
+ pthread_t th;
+ int ret = pthread_create(&th, NULL, thread_routine, &addr_array[i]);
+ assert(ret == 0);
+ ret = pthread_join(th, NULL);
+ assert(ret == 0);
+ exit(0);
+ }
+ }
+
+ qsort(addr_array, attempts, sizeof(uintptr_t), pointer_compare);
+
+ T_LOG("Stack address range: %p - %p (+%lx)", (void*)addr_array[0], (void*)addr_array[attempts-1],
+ addr_array[attempts-1] - addr_array[0]);
+
+ int unique_values = 0;
+ T_LOG_VERBOSE("[%p]", (void*)addr_array[0]);
+ for (int i = 1; i < attempts; i++) {
+ T_LOG_VERBOSE("[%p]", (void*)addr_array[i]);
+ if (addr_array[i-1] != addr_array[i]) {
+ unique_values++;
+ }
+ }
+
+ if (attempt_round < 3) T_MAYFAIL;
+ T_EXPECT_GE(unique_values, STACK_LOCATIONS, "Should have more than %d unique stack locations", STACK_LOCATIONS);
+ if (attempt_round++ < 3 && unique_values < STACK_LOCATIONS) goto again;
+}
+
+T_DECL(pthread_stack_aslr, "Confirm that stacks are ASLRed", T_META_CHECK_LEAKS(NO),
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ test_stack_aslr(false);
+}
+
+T_DECL(wq_stack_aslr, "Confirm that workqueue stacks are ASLRed", T_META_CHECK_LEAKS(NO),
+ T_META_ALL_VALID_ARCHS(YES))
+{
+ test_stack_aslr(true);
+}
-#include <assert.h>
#include <pthread.h>
#include <stdio.h>
-void *ptr = NULL;
+#include <darwintest.h>
-void destructor(void *value)
+static void *ptr = NULL;
+
+static void destructor(void *value)
{
ptr = value;
}
-void *thread(void *param)
+static void *thread(void *param)
{
- int res;
-
pthread_key_t key = *(pthread_key_t *)param;
- res = pthread_setspecific(key, (void *)0x12345678);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(pthread_setspecific(key, (void *)0x12345678), NULL);
void *value = pthread_getspecific(key);
- pthread_key_t key2;
- res = pthread_key_create(&key, NULL);
- assert(res == 0);
- res = pthread_setspecific(key, (void *)0x55555555);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(pthread_key_create(&key, NULL), NULL);
+ T_ASSERT_POSIX_ZERO(pthread_setspecific(key, (void *)0x55555555), NULL);
return value;
}
-int main(int argc, char *argv[])
+T_DECL(tsd, "tsd",
+ T_META_ALL_VALID_ARCHS(YES))
{
- int res;
pthread_key_t key;
- res = pthread_key_create(&key, destructor);
- assert(res == 0);
- printf("key = %ld\n", key);
+ T_ASSERT_POSIX_ZERO(pthread_key_create(&key, destructor), NULL);
+ T_LOG("key = %ld", key);
pthread_t p = NULL;
- res = pthread_create(&p, NULL, thread, &key);
- assert(res == 0);
+ T_ASSERT_POSIX_ZERO(pthread_create(&p, NULL, thread, &key), NULL);
void *value = NULL;
- res = pthread_join(p, &value);
- printf("value = %p\n", value);
- printf("ptr = %p\n", ptr);
+ T_ASSERT_POSIX_ZERO(pthread_join(p, &value), NULL);
+ T_LOG("value = %p; ptr = %p\n", value, ptr);
- assert(ptr == value);
+ T_EXPECT_EQ(ptr, value, NULL);
- res = pthread_key_delete(key);
- assert(res == 0);
-
- return 0;
+ T_ASSERT_POSIX_ZERO(pthread_key_delete(key), NULL);
}
-
return 1;
}
}
- fprintf(stderr, "\tsuccessfully signaled by %d threads.\n", threads);
+ fprintf(stderr, "\tsuccessfully signaled by %d threads.\n\n", threads);
return 0;
}
dispatch_semaphore_signal(sema);
}
-void (^cb)(void) = NULL;
+void (^cb)(pthread_priority_t p) = NULL;
static void workqueue_func_kevent(void **buf, int *count){
pthread_priority_t p = (pthread_priority_t)pthread_getspecific(4);
- fprintf(stderr, "\tthread with qos %s spawned.\n", describe_pri(p));
+ fprintf(stderr, "\tthread with qos %s spawned. (buf: %p, count: %d)\n", describe_pri(p), buf ? *buf : NULL, count ? *count : 0);
if (cb){
- cb();
+ cb(p);
}
dispatch_semaphore_signal(sema);
if ((ret = do_req()) < 0) return ret;
if ((ret = do_wait(1)) < 0) return ret;
- // whole bunch of constrained threads
+ // whole bunch of constrained threads (must be last)
+
+ dispatch_semaphore_t mgr_sema = dispatch_semaphore_create(0);
+ assert(mgr_sema != NULL);
requests[0].priority = _pthread_qos_class_encode(QOS_CLASS_USER_INTERACTIVE, 0, 0);
requests[0].count = 1;
- cb = ^{
+ cb = ^(pthread_priority_t p){
+ if (p & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG){
+ dispatch_semaphore_signal(mgr_sema);
+ }
+
// burn some CPU
for (int i = 0; i < 1000000; i++){
char c[32];
};
for (int i = 0; i < 8; i++)
if ((ret = do_req()) < 0) return ret;
- if ((ret = do_wait(8)) < 0) return ret;
+ ret = dispatch_semaphore_wait(mgr_sema, timeout);
+ if (ret) {
+ fprintf(stderr, "timeout waiting for a manager thread");
+ return 1;
+ }
+ fprintf(stderr, "\tsucessfully signaled by a manager thread.\n");
return 0;
}
local debugid = trace.debugid(codename)
if debugid ~= 0 then
trace.single(debugid,callback)
+ else
+ printf("WARNING: Cannot locate debugid for '%s'\n", codename)
end
end
initial_timestamp = 0
+workqueue_ptr_map = {};
get_prefix = function(buf)
if initial_timestamp == 0 then
initial_timestamp = buf.timestamp
end
+ local secs = (buf.timestamp - initial_timestamp) / 1000000000
+
local prefix
if trace.debugid_is_start(buf.debugid) then
prefix = "→"
else
prefix = "↔"
end
- local secs = (buf.timestamp - initial_timestamp) / 1000 / 1000000
- local usecs = (buf.timestamp - initial_timestamp) / 1000 % 1000000
- return string.format("%s %6d.%06d %-16s[%06x] %-24s",
- prefix, secs, usecs, buf.command, buf.threadid, buf.debugname)
+
+ local proc
+ if buf.command ~= "kernel_task" then
+ proc = buf.command
+ workqueue_ptr_map[buf[1]] = buf.command
+ elseif workqueue_ptr_map[buf[1]] ~= nil then
+ proc = workqueue_ptr_map[buf[1]]
+ else
+ proc = "UNKNOWN"
+ end
+
+ return string.format("%s %6.9f %-17s [%05d.%06x] %-24s",
+ prefix, secs, proc, buf.pid, buf.threadid, buf.debugname)
end
parse_pthread_priority = function(pri)
- local qos = bit32.rshift(bit32.band(pri, 0x00ffff00), 8)
+ pri = pri & 0xffffffff
+ if (pri & 0x02000000) == 0x02000000 then
+ return "Manager"
+ end
+ local qos = (pri & 0x00ffff00) >> 8
if qos == 0x20 then
- return "UInter"
+ return string.format("UInter[%x]", pri);
elseif qos == 0x10 then
- return "UInit"
+ return string.format("UInit[%x]", pri);
elseif qos == 0x08 then
- return "Dflt"
+ return string.format("Dflt[%x]", pri);
elseif qos == 0x04 then
- return "Util"
+ return string.format("Util[%x]", pri);
elseif qos == 0x02 then
- return "BG"
+ return string.format("BG[%x]", pri);
elseif qos == 0x01 then
- return "Maint"
+ return string.format("Maint[%x]", pri);
elseif qos == 0x00 then
- return "Unsp"
+ return string.format("Unsp[%x]", pri);
else
- return "Unkn"
+ return string.format("Unkn[%x]", pri);
end
end
end)
trace_codename("wq_req_event_manager", function(buf)
local prefix = get_prefix(buf)
- printf("%s\trecording event manager request at %s, existing at %d, %d running\n",
- prefix, parse_pthread_priority(buf.arg2), buf.arg3, buf.arg4)
+ if buf.arg2 == 1 then
+ printf("%s\tstarting event manager thread, existing at %d, %d added\n",
+ prefix, buf.arg3, buf.arg4)
+ else
+ printf("%s\trecording event manager request, existing at %d, %d added\n",
+ prefix, buf.arg3, buf.arg4)
+ end
end)
trace_codename("wq_start_add_timer", function(buf)
trace_codename("wq_overcommitted", function(buf)
local prefix = get_prefix(buf)
- if bit32.band(buf.arg2, 0x80) then
- printf("%s\tworkqueue overcimmitted @ %s, starting timer (thactive_count: %d, busycount; %d)",
+ if buf.arg2 & 0x1000000 ~= 0 then
+ printf("%s\tworkqueue overcommitted @ %s, starting timer (thactive_count: %d, busycount; %d)\n",
prefix, parse_pthread_priority(buf.arg2), buf.arg3, buf.arg4)
else
- printf("%s\tworkqueue overcimmitted @ %s (thactive_count: %d, busycount; %d)",
+ printf("%s\tworkqueue overcommitted @ %s (thactive_count: %d, busycount; %d)\n",
prefix, parse_pthread_priority(buf.arg2), buf.arg3, buf.arg4)
end
end)
local prefix = get_prefix(buf)
if trace.debugid_is_start(buf.debugid) then
if buf.arg2 == 0 then
- printf("%s\tthread %d looking for next request (idlecount: %d, reqcount: %d)\n",
- prefix, buf.threadid, buf.arg3, buf.arg4)
- else
printf("%s\ttrying to run a request on an idle thread (idlecount: %d, reqcount: %d)\n",
prefix, buf.arg3, buf.arg4)
+ else
+ printf("%s\tthread %x looking for next request (idlecount: %d, reqcount: %d)\n",
+ prefix, buf.threadid, buf.arg3, buf.arg4)
end
else
if buf.arg4 == 1 then
- printf("%s\tkicked off work on thread %d (overcommit: %d)\n", prefix, buf.arg2, buf.arg3)
- elseif buf.arg4 == 2 then
- printf("%s\tno work/threads (start_timer: %d)\n", prefix, buf.arg3)
+ printf("%s\tkicked off work on thread %x (overcommit: %d)\n", prefix, buf.arg2, buf.arg3)
elseif buf.arg4 == 3 then
- printf("%s\tthread parked\n", prefix)
+ printf("%s\tno work %x can currently do (start_timer: %d)\n", prefix, buf.arg2, buf.arg3)
elseif buf.arg4 == 4 then
- printf("%s\treturning with new request\n", prefix)
+ printf("%s\treturning to run next item\n", prefix)
else
printf("%s\tWARNING: UNKNOWN END CODE:%d\n", prefix, buf.arg4)
end
trace_codename("wq_runitem", function(buf)
local prefix = get_prefix(buf)
if trace.debugid_is_start(buf.debugid) then
- printf("%s\trunning an item at %s (flags: %x)\n", prefix, parse_pthread_priority(buf.arg3), buf.arg2)
+ printf("%s\tSTART running item\n", prefix)
else
- printf("%s\tthread returned\n", prefix)
+ printf("%s\tDONE running item; thread returned to kernel\n", prefix)
end
end)
printf("%s\tthread_yielded called (yielded_count: %d, reqcount: %d)\n",
prefix, buf.arg2, buf.arg3)
else
- if (buf.arg4 == 1) then
+ if buf.arg4 == 1 then
printf("%s\tthread_yielded completed kicking thread (yielded_count: %d, reqcount: %d)\n",
prefix, buf.arg2, buf.arg3)
- elseif (buf.arg4 == 2) then
+ elseif buf.arg4 == 2 then
printf("%s\tthread_yielded completed (yielded_count: %d, reqcount: %d)\n",
prefix, buf.arg2, buf.arg3)
else
end
end)
+trace_codename("wq_thread_create", function(buf)
+ printf("%s\tcreateed new workqueue thread\n", get_prefix(buf))
+end)
+
+trace_codename("wq_manager_request", function(buf)
+ local prefix = get_prefix(buf)
+ printf("%s\tthread in bucket %d\n", prefix, buf.arg3)
+end)
+
-- The trace codes we need aren't enabled by default
darwin.sysctlbyname("kern.pthread_debug_tracing", 1)
darwin.sysctlbyname("kern.pthread_debug_tracing", 0)
end
trace.set_completion_handler(completion_handler)
-
INSTALL_PATH = /usr/local/lib/eOS
EXECUTABLE_PREFIX = lib
PRODUCT_NAME = pthread_eOS
-GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) PTHREAD_TARGET_EOS=1
+GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) PTHREAD_TARGET_EOS=1 VARIANT_STATIC=1
OTHER_LDFLAGS =
if [ "$ACTION" = installhdrs ]; then exit 0; fi
if [ "${RC_ProjectName%_Sim}" != "${RC_ProjectName}" ]; then exit 0; fi
+set -x
+set -e
+
mkdir -p "$DSTROOT"/usr/share/man/man2 || true
mkdir -p "$DSTROOT"/usr/share/man/man3 || true
mkdir -p "$DSTROOT"/usr/local/share/man/man2 || true
pthread_atfork.3 \
pthread_attr.3 \
pthread_attr_init_destroy.3 \
- pthread_attr_set_getdetachstate.3 \
- pthread_attr_set_getinheritsched.3 \
- pthread_attr_set_getschedparam.3 \
- pthread_attr_set_getschedpolicy.3 \
- pthread_attr_set_getscope.3 pthread_attr_set_getstackaddr.3 \
- pthread_attr_set_getstacksize.3 \
pthread_cancel.3 \
pthread_cleanup_pop.3 \
pthread_cleanup_push.3 \
pthread_rwlockattr_setpshared.3 \
pthread_self.3 \
pthread_setcancelstate.3 \
- pthread_setspecific.3"
+ pthread_setname_np.3 \
+ pthread_setspecific.3 \
+ pthread_threadid_np.3 \
+ pthread_yield_np.3"
cp $BASE_PAGES "$DSTROOT"/usr/share/man/man3
+for ATTR in \
+ detachstate \
+ inheritsched \
+ schedparam \
+ schedpolicy \
+ scope \
+ stackaddr \
+ stacksize \
+ ; do
+ cp pthread_attr_set_get$ATTR.3 "$DSTROOT"/usr/share/man/man3/pthread_attr_set$ATTR.3
+ cp pthread_attr_set_get$ATTR.3 "$DSTROOT"/usr/share/man/man3/pthread_attr_get$ATTR.3
+done
+
# Make hard links
cd "$DSTROOT"/usr/share/man/man3
for M in \
pthread_attr_destroy.3 \
- pthread_attr_getdetachstate.3 \
- pthread_attr_getinheritsched.3 \
- pthread_attr_getschedparam.3 \
- pthread_attr_getschedpolicy.3 \
- pthread_attr_getscope.3 \
- pthread_attr_getstackaddr.3 \
- pthread_attr_getstacksize.3 \
pthread_attr_init.3 \
- pthread_attr_setdetachstate.3 \
- pthread_attr_setinheritsched.3 \
- pthread_attr_setschedparam.3 \
- pthread_attr_setschedpolicy.3 \
- pthread_attr_setscope.3 \
- pthread_attr_setstackaddr.3 \
- pthread_attr_setstacksize.3 \
+ pthread_attr_setstack.3 \
+ pthread_attr_getstack.3 \
+ pthread_attr_setguardsize.3 \
+ pthread_attr_getguardsize.3 \
; do
ln -fh pthread_attr.3 $M
done
// pthread kext build options
+BUILD_VARIANTS = normal development
+BUILD_VARIANTS[sdk=macosx*] = normal
+
+PTHREAD_VARIANT_ = $(CURRENT_VARIANT)
+PTHREAD_VARIANT_YES = development
+PTHREAD_VARIANT = $(PTHREAD_VARIANT_$(DEBUG))
+
+VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL)
ARCHS = $(ARCHS_STANDARD_32_64_BIT)
SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
DYLIB_CURRENT_VERSION = $(RC_ProjectSourceVersion)
CLANG_CXX_LANGUAGE_STANDARD = gnu++0x
CLANG_CXX_LIBRARY = libc++
GCC_PRECOMPILE_PREFIX_HEADER = YES
+
+GCC_OPTIMIZATION_LEVEL_normal = s
+GCC_OPTIMIZATION_LEVEL_development = 0
+GCC_OPTIMIZATION_LEVEL = $(GCC_OPTIMIZATION_LEVEL_$(PTHREAD_VARIANT))
+
+LLVM_LTO_normal = YES
+LLVM_LTO_development = NO
+LLVM_LTO = $(LLVM_LTO_$(PTHREAD_VARIANT))
+
GCC_PREPROCESSOR_DEFINITIONS_kext = XNU_KERNEL_PRIVATE MACH_KERNEL_PRIVATE ABSOLUTETIME_SCALAR_TYPE NEEDS_SCHED_CALL_T
-GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS_kext)
+GCC_PREPROCESSOR_DEFINITIONS_kext_development = MACH_ASSERT DEBUG
+GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS_kext) $(GCC_PREPROCESSOR_DEFINITIONS_kext_$(PTHREAD_VARIANT))
GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES
GCC_TREAT_INCOMPATIBLE_POINTER_TYPE_WARNINGS_AS_ERRORS = YES
+++ /dev/null
-#include "kext.xcconfig"
-
-GCC_OPTIMIZATION_LEVEL = 0
-GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS_kext) MACH_ASSERT DEBUG
\ No newline at end of file
--- /dev/null
+DEBUG = YES
+
+#include "kext.xcconfig"
BASE_PREPROCESSOR_MACROS = __LIBC__ __DARWIN_UNIX03=1 __DARWIN_64_BIT_INO_T=1 __DARWIN_NON_CANCELABLE=1 __DARWIN_VERS_1050=1 _FORTIFY_SOURCE=0 __PTHREAD_BUILDING_PTHREAD__=1 $(SIM_PREPROCESSOR_MACROS)
GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS)
-OTHER_CFLAGS = -fno-stack-protector -fdollars-in-identifiers -fno-common -fno-builtin -momit-leaf-frame-pointer $($(PRODUCT_NAME)_CFLAGS)
-OTHER_CFLAGS_debug = -fno-stack-protector -fno-inline -O0 -DDEBUG=1
+OTHER_CFLAGS = $(OTHER_CFLAGS_$(CURRENT_VARIANT))
+OTHER_CFLAGS_normal = -fno-stack-protector -fdollars-in-identifiers -fno-common -fno-builtin -momit-leaf-frame-pointer $($(PRODUCT_NAME)_CFLAGS)
+OTHER_CFLAGS_debug = -fno-stack-protector -fno-inline -O0 -DDEBUG=1 $($(PRODUCT_NAME)_CFLAGS)
+
OTHER_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodescripts/pthread.aliases -Wl,-umbrella,System -L/usr/lib/system -lsystem_kernel -lsystem_platform -ldyld -lcompiler_rt $(UPLINK_LDFLAGS) $(CR_LDFLAGS)
// CrashReporter
+++ /dev/null
-#include "pthread.xcconfig"
-
-BUILD_VARIANTS = normal debug
-OTHER_CFLAGS = $(OTHER_CFLAGS_debug)
--- /dev/null
+#!/bin/bash
+
+if [[ "x${ACTION}" == "xinstall" && "x${SKIP_INSTALL}" == "xNO" ]]; then
+ $@
+else
+ exit 0
+fi
--- /dev/null
+#include "pthread.xcconfig"
+INSTALL_PATH = /usr/local/lib/system
+EXECUTABLE_PREFIX = lib
+PRODUCT_NAME = pthread
+GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) VARIANT_STATIC=1
+OTHER_LDFLAGS =