]> git.saurik.com Git - apple/libpthread.git/commitdiff
libpthread-301.1.6.tar.gz macos-1013 v301.1.6
authorApple <opensource@apple.com>
Tue, 26 Sep 2017 16:29:05 +0000 (16:29 +0000)
committerApple <opensource@apple.com>
Tue, 26 Sep 2017 16:29:05 +0000 (16:29 +0000)
88 files changed:
kern/kern_init.c
kern/kern_internal.h
kern/kern_support.c
kern/kern_trace.h
kern/synch_internal.h
kern/workqueue_internal.h
libpthread.xcodeproj/project.pbxproj
lldbmacros/pthread.py
man/pthread_attr_set_getinheritsched.3
man/pthread_attr_set_getschedparam.3
man/pthread_kill.2
man/pthread_mutex_destroy.3
private/private.h
private/qos_private.h
private/tsd_private.h
private/workqueue_private.h
pthread/introspection.h
pthread/pthread.h
pthread/pthread_spis.h
pthread/qos.h
pthread/spawn.h
src/internal.h
src/pthread.c
src/pthread_atfork.c
src/pthread_cancelable.c
src/pthread_cond.c
src/pthread_mutex.c
src/pthread_mutex_up.c [deleted file]
src/pthread_rwlock.c
src/pthread_support.c
src/pthread_tsd.c
src/qos.c
src/resolver.c [deleted file]
src/resolver.h [deleted file]
src/resolver/resolver.c [new file with mode: 0644]
src/resolver/resolver.h [new file with mode: 0644]
src/resolver/resolver_internal.h [new file with mode: 0644]
src/resolver_internal.h [deleted file]
src/thread_setup.c
src/variants/pthread_rwlock_legacy.c
sys/_pthread/_pthread_attr_t.h
sys/_pthread/_pthread_cond_t.h
sys/_pthread/_pthread_condattr_t.h
sys/_pthread/_pthread_key_t.h
sys/_pthread/_pthread_mutex_t.h
sys/_pthread/_pthread_mutexattr_t.h
sys/_pthread/_pthread_once_t.h
sys/_pthread/_pthread_rwlock_t.h
sys/_pthread/_pthread_rwlockattr_t.h
sys/_pthread/_pthread_t.h
sys/qos.h
tests/Makefile
tests/add_timer_termination.c
tests/atfork.c
tests/bsdthread_set_self.c
tests/cond.c
tests/cond_timed.c
tests/custom_stack.c
tests/darwintest_defaults.h [new file with mode: 0644]
tests/detach.c [new file with mode: 0644]
tests/join.c
tests/main_stack.c
tests/main_stack_custom.c
tests/main_stack_legacy.c
tests/mutex.c
tests/mutex_try.c
tests/once_cancel.c
tests/pthread_attr_setstacksize.c
tests/pthread_bulk_create.c
tests/pthread_cancel.c
tests/pthread_cwd.c
tests/pthread_exit.c
tests/pthread_get_qos_class_np.c [new file with mode: 0644]
tests/pthread_introspection.c
tests/pthread_setspecific.c
tests/pthread_threadid_np.c
tests/rdar_32848402.c [new file with mode: 0644]
tests/stack_aslr.c
tests/tsd.c
tests/wq_limits.c [new file with mode: 0644]
tools/pthtrace.lua [new file with mode: 0755]
tools/wqtrace.lua
xcodescripts/eos.xcconfig
xcodescripts/kext.xcconfig
xcodescripts/pthread.xcconfig
xcodescripts/resolved.xcconfig [new file with mode: 0644]
xcodescripts/resolver.xcconfig [new file with mode: 0644]
xcodescripts/static.xcconfig

index b45d277ae7ea600330afadca344c7002888bb984..3de9b5d034ed71a41164c8f94de37d1534db8b10 100644 (file)
@@ -17,7 +17,7 @@ pthread_callbacks_t pthread_kern;
 
 const struct pthread_functions_s pthread_internal_functions = {
        .pthread_init = _pthread_init,
-       .fill_procworkqueue = _fill_procworkqueue,
+       .fill_procworkqueue = (int(*)(proc_t, void*))_fill_procworkqueue,
        .get_pwq_state_kdp = _get_pwq_state_kdp,
        .workqueue_exit = _workqueue_exit,
        .workqueue_mark_exiting = _workqueue_mark_exiting,
@@ -51,6 +51,9 @@ const struct pthread_functions_s pthread_internal_functions = {
        .workq_reqthreads = _workq_reqthreads,
        .thread_qos_from_pthread_priority = _thread_qos_from_pthread_priority,
        .pthread_priority_canonicalize2 = _pthread_priority_canonicalize,
+       .workq_thread_has_been_unbound = _workq_thread_has_been_unbound,
+       .workq_threadreq = workq_kern_threadreq,
+       .workq_threadreq_modify = workq_kern_threadreq_modify,
 };
 
 kern_return_t pthread_start(__unused kmod_info_t * ki, __unused void *d)
index 3f3da0a7761510c246d77e91cf6144a7f5ed5691..80533c35bceb8383975b18de184f3e9b98186bdd 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
 #define _SYS_PTHREAD_INTERNAL_H_
 
 #ifdef KERNEL
+#include <stdatomic.h>
 #include <kern/thread_call.h>
+#include <kern/kcdata.h>
 #include <sys/pthread_shims.h>
 #include <sys/queue.h>
-#include <kern/kcdata.h>
+#include <sys/proc_info.h>
+
+#ifdef __arm64__
+#define PTHREAD_INLINE_RMW_ATOMICS 0
+#else
+#define PTHREAD_INLINE_RMW_ATOMICS 1
 #endif
+#endif // KERNEL
 
 #include "kern/synch_internal.h"
 #include "kern/workqueue_internal.h"
@@ -53,6 +61,7 @@
 #define PTHREAD_FEATURE_QOS_MAINTENANCE        0x10            /* is QOS_CLASS_MAINTENANCE available */
 #define PTHREAD_FEATURE_RESERVED               0x20            /* burnt, shipped in OSX 10.11 & iOS 9 with partial kevent delivery support */
 #define PTHREAD_FEATURE_KEVENT          0x40           /* supports direct kevent delivery */
+#define PTHREAD_FEATURE_WORKLOOP          0x80         /* supports workloops */
 #define PTHREAD_FEATURE_QOS_DEFAULT            0x40000000      /* the kernel supports QOS_CLASS_DEFAULT */
 
 /* pthread bsdthread_ctl sysctl commands */
@@ -65,6 +74,7 @@
 #define BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH    0x400   /* bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH, thread_port, priority, 0) */
 #define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD                           0x401   /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD, thread_port, priority, resource) */
 #define BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET                         0x402   /* bsdthread_ctl(BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET, 0|1 (?reset_all), resource, 0) */
+#define BSDTHREAD_CTL_QOS_MAX_PARALLELISM      0x800   /* bsdthread_ctl(BSDTHREAD_CTL_QOS_MAX_PARALLELISM, priority, flags, 0) */
 
 /* qos_class_t is mapped into one of these bits in the bitfield, this mapping now exists here because
  * libdispatch requires the QoS class mask of the pthread_priority_t to be a bitfield.
 #define __PTHREAD_PRIORITY_CBIT_MAINTENANCE 0x1
 #define __PTHREAD_PRIORITY_CBIT_UNSPECIFIED 0x0
 
-/* Added support for QOS_CLASS_MAINTENANCE */
-static inline pthread_priority_t
-_pthread_priority_make_newest(qos_class_t qc, int rel, unsigned long flags)
+static inline int
+_pthread_qos_class_to_thread_qos(qos_class_t qos)
 {
-       pthread_priority_t cls;
-       switch (qc) {
-               case QOS_CLASS_USER_INTERACTIVE: cls = __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE; break;
-               case QOS_CLASS_USER_INITIATED: cls = __PTHREAD_PRIORITY_CBIT_USER_INITIATED; break;
-               case QOS_CLASS_DEFAULT: cls = __PTHREAD_PRIORITY_CBIT_DEFAULT; break;
-               case QOS_CLASS_UTILITY: cls = __PTHREAD_PRIORITY_CBIT_UTILITY; break;
-               case QOS_CLASS_BACKGROUND: cls = __PTHREAD_PRIORITY_CBIT_BACKGROUND; break;
-               case QOS_CLASS_MAINTENANCE: cls = __PTHREAD_PRIORITY_CBIT_MAINTENANCE; break;
-               case QOS_CLASS_UNSPECIFIED:
-               default:
-                       cls = __PTHREAD_PRIORITY_CBIT_UNSPECIFIED;
-                       rel = 1; // results in priority bits == 0 <rdar://problem/16184900>
-                       break;
+       switch (qos) {
+       case QOS_CLASS_USER_INTERACTIVE: return THREAD_QOS_USER_INTERACTIVE;
+       case QOS_CLASS_USER_INITIATED: return THREAD_QOS_USER_INITIATED;
+       case QOS_CLASS_DEFAULT: return THREAD_QOS_LEGACY;
+       case QOS_CLASS_UTILITY: return THREAD_QOS_UTILITY;
+       case QOS_CLASS_BACKGROUND: return THREAD_QOS_BACKGROUND;
+       case QOS_CLASS_MAINTENANCE: return THREAD_QOS_MAINTENANCE;
+       default: return THREAD_QOS_UNSPECIFIED;
        }
-
-       pthread_priority_t p =
-               (flags & _PTHREAD_PRIORITY_FLAGS_MASK) |
-               ((cls << _PTHREAD_PRIORITY_QOS_CLASS_SHIFT) & _PTHREAD_PRIORITY_QOS_CLASS_MASK) |
-               (((uint8_t)rel - 1) & _PTHREAD_PRIORITY_PRIORITY_MASK);
-
-       return p;
 }
 
-/* Added support for QOS_CLASS_LEGACY and QOS_CLASS_INHERIT */
 static inline pthread_priority_t
-_pthread_priority_make_version2(qos_class_t qc, int rel, unsigned long flags)
+_pthread_priority_make_newest(qos_class_t qc, int rel, unsigned long flags)
 {
        pthread_priority_t cls;
        switch (qc) {
@@ -115,6 +111,7 @@ _pthread_priority_make_version2(qos_class_t qc, int rel, unsigned long flags)
                case QOS_CLASS_DEFAULT: cls = __PTHREAD_PRIORITY_CBIT_DEFAULT; break;
                case QOS_CLASS_UTILITY: cls = __PTHREAD_PRIORITY_CBIT_UTILITY; break;
                case QOS_CLASS_BACKGROUND: cls = __PTHREAD_PRIORITY_CBIT_BACKGROUND; break;
+               case QOS_CLASS_MAINTENANCE: cls = __PTHREAD_PRIORITY_CBIT_MAINTENANCE; break;
                case QOS_CLASS_UNSPECIFIED:
                default:
                        cls = __PTHREAD_PRIORITY_CBIT_UNSPECIFIED;
@@ -122,14 +119,6 @@ _pthread_priority_make_version2(qos_class_t qc, int rel, unsigned long flags)
                        break;
        }
 
-       /*
-        * __PTHREAD_PRIORITY_CBIT_MAINTENANCE was defined as the 0th bit by shifting all the
-        * existing bits to the left by one.  So for backward compatiblity for kernels that does
-        * not support QOS_CLASS_MAINTENANCE, we have to make it up by shifting the cls bit to
-        * right by one.
-        */
-       cls >>= 1;
-
        pthread_priority_t p =
                (flags & _PTHREAD_PRIORITY_FLAGS_MASK) |
                ((cls << _PTHREAD_PRIORITY_QOS_CLASS_SHIFT) & _PTHREAD_PRIORITY_QOS_CLASS_MASK) |
@@ -138,7 +127,6 @@ _pthread_priority_make_version2(qos_class_t qc, int rel, unsigned long flags)
        return p;
 }
 
-/* QOS_CLASS_MAINTENANCE is supported */
 static inline qos_class_t
 _pthread_priority_get_qos_newest(pthread_priority_t priority)
 {
@@ -156,35 +144,6 @@ _pthread_priority_get_qos_newest(pthread_priority_t priority)
        return qc;
 }
 
-/* QOS_CLASS_MAINTENANCE is not supported */
-static inline qos_class_t
-_pthread_priority_get_qos_version2(pthread_priority_t priority)
-{
-       qos_class_t qc;
-       pthread_priority_t cls;
-
-       cls = (priority & _PTHREAD_PRIORITY_QOS_CLASS_MASK) >> _PTHREAD_PRIORITY_QOS_CLASS_SHIFT;
-
-       /*
-        * __PTHREAD_PRIORITY_CBIT_MAINTENANCE was defined as the 0th bit by shifting all the
-        * existing bits to the left by one.  So for backward compatiblity for kernels that does
-        * not support QOS_CLASS_MAINTENANCE, pthread_priority_make() shifted the cls bit to the
-        * right by one.  Therefore we have to shift it back during decoding the priority bit.
-        */
-       cls <<= 1;
-
-       switch (cls) {
-               case __PTHREAD_PRIORITY_CBIT_USER_INTERACTIVE: qc = QOS_CLASS_USER_INTERACTIVE; break;
-               case __PTHREAD_PRIORITY_CBIT_USER_INITIATED: qc = QOS_CLASS_USER_INITIATED; break;
-               case __PTHREAD_PRIORITY_CBIT_DEFAULT: qc = QOS_CLASS_DEFAULT; break;
-               case __PTHREAD_PRIORITY_CBIT_UTILITY: qc = QOS_CLASS_UTILITY; break;
-               case __PTHREAD_PRIORITY_CBIT_BACKGROUND: qc = QOS_CLASS_BACKGROUND; break;
-               case __PTHREAD_PRIORITY_CBIT_UNSPECIFIED:
-               default: qc = QOS_CLASS_UNSPECIFIED; break;
-       }
-       return qc;
-}
-
 #define _pthread_priority_get_relpri(priority) \
        ((int8_t)((priority & _PTHREAD_PRIORITY_PRIORITY_MASK) >> _PTHREAD_PRIORITY_PRIORITY_SHIFT) + 1)
 
@@ -197,17 +156,8 @@ _pthread_priority_get_qos_version2(pthread_priority_t priority)
                   _pthread_priority_get_relpri(priority); \
        })
 
-#define _pthread_priority_split_version2(priority, qos, relpri) \
-       ({ qos = _pthread_priority_get_qos_version2(priority); \
-          relpri = (qos == QOS_CLASS_UNSPECIFIED) ? 0 : \
-                  _pthread_priority_get_relpri(priority); \
-       })
-
-/* <rdar://problem/15969976> Required for backward compatibility on older kernels. */
-#define _pthread_priority_make_version1(qos, relpri, flags) \
-       (((flags >> 15) & 0xffff0000) | \
-       ((qos << 8) & 0x0000ff00) | \
-       (((uint8_t)relpri - 1) & 0x000000ff))
+#define _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL 0x1
+#define _PTHREAD_QOS_PARALLELISM_REALTIME 0x2
 
 /* userspace <-> kernel registration struct, for passing data to/from the kext during main thread init. */
 struct _pthread_registration_data {
@@ -225,6 +175,8 @@ struct _pthread_registration_data {
        uint64_t dispatch_queue_offset; /* copy-in */
        uint64_t /* pthread_priority_t */ main_qos; /* copy-out */
        uint32_t tsd_offset; /* copy-in */
+       uint32_t return_to_kernel_offset; /* copy-in */
+       uint32_t mach_thread_self_offset; /* copy-in */
 } __attribute__ ((packed));
 
 #ifdef KERNEL
@@ -237,7 +189,8 @@ struct _pthread_registration_data {
        PTHREAD_FEATURE_SETSELF | \
        PTHREAD_FEATURE_QOS_MAINTENANCE | \
        PTHREAD_FEATURE_QOS_DEFAULT | \
-       PTHREAD_FEATURE_KEVENT )
+       PTHREAD_FEATURE_KEVENT | \
+       PTHREAD_FEATURE_WORKLOOP )
 
 extern pthread_callbacks_t pthread_kern;
 
@@ -322,6 +275,9 @@ extern thread_call_t psynch_thcall;
 
 struct uthread* current_uthread(void);
 
+#define WORKQ_REQTHREADS_THREADREQ   0x1
+#define WORKQ_REQTHREADS_NOEMERGENCY 0x2
+
 // Call for the kernel's kevent system to request threads.  A list of QoS/event
 // counts should be provided, sorted by flags and then QoS class.  If the
 // identity of the thread to handle the request is known, it will be returned.
@@ -334,6 +290,15 @@ integer_t _thread_qos_from_pthread_priority(unsigned long pri, unsigned long *fl
 // Clear out extraneous flags/pri info for putting in voucher
 pthread_priority_t _pthread_priority_canonicalize(pthread_priority_t pri, boolean_t for_propagation);
 
+boolean_t _workq_thread_has_been_unbound(thread_t th, int qos_class);
+
+int workq_kern_threadreq(struct proc *p, workq_threadreq_t req,
+               enum workq_threadreq_type, unsigned long priority, int flags);
+
+int workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
+               enum workq_threadreq_op operation,
+               unsigned long arg1, unsigned long arg2);
+
 #endif // KERNEL
 
 #endif /* _SYS_PTHREAD_INTERNAL_H_ */
index f63a7818e4e94b5abae8db7e16021e681c3ae0bc..0d269e2277cf16c3abbb65ae890198c9573b695d 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -22,7 +22,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /* Copyright (c) 1995-2005 Apple Computer, Inc. All Rights Reserved */
@@ -84,8 +84,7 @@ extern void panic(const char *string, ...) __printflike(1,2) __dead2;
 #include <kern/clock.h>
 #include <mach/kern_return.h>
 #include <kern/thread.h>
-#include <kern/sched_prim.h>
-#include <kern/kalloc.h>
+#include <kern/zalloc.h>
 #include <kern/sched_prim.h>   /* for thread_exception_return */
 #include <kern/processor.h>
 #include <kern/assert.h>
@@ -120,44 +119,41 @@ lck_grp_attr_t   *pthread_lck_grp_attr;
 lck_grp_t    *pthread_lck_grp;
 lck_attr_t   *pthread_lck_attr;
 
+zone_t pthread_zone_workqueue;
+zone_t pthread_zone_threadlist;
+zone_t pthread_zone_threadreq;
+
 extern void thread_set_cthreadself(thread_t thread, uint64_t pself, int isLP64);
 extern void workqueue_thread_yielded(void);
 
-enum run_nextreq_mode {
-       RUN_NEXTREQ_DEFAULT,
-       RUN_NEXTREQ_DEFAULT_KEVENT,
-       RUN_NEXTREQ_OVERCOMMIT,
-       RUN_NEXTREQ_OVERCOMMIT_KEVENT,
-       RUN_NEXTREQ_DEFERRED_OVERCOMMIT,
-       RUN_NEXTREQ_UNCONSTRAINED,
-       RUN_NEXTREQ_EVENT_MANAGER,
-       RUN_NEXTREQ_ADD_TIMER
-};
-static thread_t workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t th,
-               enum run_nextreq_mode mode, pthread_priority_t prio,
-               bool kevent_bind_via_return);
-
-static boolean_t workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, pthread_priority_t priority);
-
-static void wq_runreq(proc_t p, thread_t th, struct workqueue *wq,
-               struct threadlist *tl, boolean_t return_directly, boolean_t deferred_kevent);
-
-static void _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl, bool first_use);
+#define WQ_SETUP_FIRST_USE  1
+#define WQ_SETUP_CLEAR_VOUCHER  2
+static void _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq,
+               struct threadlist *tl, int flags);
 
 static void reset_priority(struct threadlist *tl, pthread_priority_t pri);
 static pthread_priority_t pthread_priority_from_wq_class_index(struct workqueue *wq, int index);
 
 static void wq_unpark_continue(void* ptr, wait_result_t wait_result) __dead2;
 
-static boolean_t workqueue_addnewthread(struct workqueue *wq, boolean_t ignore_constrained_thread_limit);
-
+static bool workqueue_addnewthread(proc_t p, struct workqueue *wq);
 static void workqueue_removethread(struct threadlist *tl, bool fromexit, bool first_use);
 static void workqueue_lock_spin(struct workqueue *);
 static void workqueue_unlock(struct workqueue *);
 
-static boolean_t may_start_constrained_thread(struct workqueue *wq, uint32_t at_priclass, uint32_t my_priclass, boolean_t *start_timer);
+#define WQ_RUN_TR_THROTTLED 0
+#define WQ_RUN_TR_THREAD_NEEDED 1
+#define WQ_RUN_TR_THREAD_STARTED 2
+#define WQ_RUN_TR_EXITING 3
+static int workqueue_run_threadreq_and_unlock(proc_t p, struct workqueue *wq,
+               struct threadlist *tl, struct threadreq *req, bool may_add_new_thread);
+
+static bool may_start_constrained_thread(struct workqueue *wq,
+               uint32_t at_priclass, struct threadlist *tl, bool may_start_timer);
 
 static mach_vm_offset_t stack_addr_hint(proc_t p, vm_map_t vmap);
+static boolean_t wq_thread_is_busy(uint64_t cur_ts,
+               _Atomic uint64_t *lastblocked_tsp);
 
 int proc_settargetconc(pid_t pid, int queuenum, int32_t targetconc);
 int proc_setalltargetconc(pid_t pid, int32_t * targetconcp);
@@ -173,7 +169,7 @@ int proc_setalltargetconc(pid_t pid, int32_t * targetconcp);
 #define PTHREAD_T_OFFSET 0
 
 /*
- * Flags filed passed to bsdthread_create and back in pthread_start 
+ * Flags filed passed to bsdthread_create and back in pthread_start
 31  <---------------------------------> 0
 _________________________________________
 | flags(8) | policy(8) | importance(16) |
@@ -198,20 +194,12 @@ _________________________________________
 
 #pragma mark sysctls
 
-uint32_t wq_yielded_threshold          = WQ_YIELDED_THRESHOLD;
-uint32_t wq_yielded_window_usecs       = WQ_YIELDED_WINDOW_USECS;
-uint32_t wq_stalled_window_usecs       = WQ_STALLED_WINDOW_USECS;
-uint32_t wq_reduce_pool_window_usecs   = WQ_REDUCE_POOL_WINDOW_USECS;
-uint32_t wq_max_timer_interval_usecs   = WQ_MAX_TIMER_INTERVAL_USECS;
-uint32_t wq_max_threads                        = WORKQUEUE_MAXTHREADS;
-uint32_t wq_max_constrained_threads    = WORKQUEUE_MAXTHREADS / 8;
-uint32_t wq_max_concurrency = 1; // set to ncpus on load
-
-SYSCTL_INT(_kern, OID_AUTO, wq_yielded_threshold, CTLFLAG_RW | CTLFLAG_LOCKED,
-          &wq_yielded_threshold, 0, "");
-
-SYSCTL_INT(_kern, OID_AUTO, wq_yielded_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
-          &wq_yielded_window_usecs, 0, "");
+static uint32_t wq_stalled_window_usecs        = WQ_STALLED_WINDOW_USECS;
+static uint32_t wq_reduce_pool_window_usecs    = WQ_REDUCE_POOL_WINDOW_USECS;
+static uint32_t wq_max_timer_interval_usecs    = WQ_MAX_TIMER_INTERVAL_USECS;
+static uint32_t wq_max_threads                 = WORKQUEUE_MAXTHREADS;
+static uint32_t wq_max_constrained_threads     = WORKQUEUE_MAXTHREADS / 8;
+static uint32_t wq_max_concurrency[WORKQUEUE_NUM_BUCKETS + 1]; // set to ncpus on load
 
 SYSCTL_INT(_kern, OID_AUTO, wq_stalled_window_usecs, CTLFLAG_RW | CTLFLAG_LOCKED,
           &wq_stalled_window_usecs, 0, "");
@@ -229,9 +217,6 @@ SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOC
           &wq_max_constrained_threads, 0, "");
 
 #ifdef DEBUG
-SYSCTL_INT(_kern, OID_AUTO, wq_max_concurrency, CTLFLAG_RW | CTLFLAG_LOCKED,
-                  &wq_max_concurrency, 0, "");
-
 static int wq_kevent_test SYSCTL_HANDLER_ARGS;
 SYSCTL_PROC(_debug, OID_AUTO, wq_kevent_test, CTLFLAG_MASKED | CTLFLAG_RW | CTLFLAG_LOCKED | CTLFLAG_ANYBODY | CTLTYPE_OPAQUE, NULL, 0, wq_kevent_test, 0, "-");
 #endif
@@ -243,6 +228,157 @@ uint32_t pthread_debug_tracing = 1;
 SYSCTL_INT(_kern, OID_AUTO, pthread_debug_tracing, CTLFLAG_RW | CTLFLAG_LOCKED,
                   &pthread_debug_tracing, 0, "")
 
+/*
+ *       +-----+-----+-----+-----+-----+-----+-----+
+ *       | MT  | BG  | UT  | DE  | IN  | UN  | mgr |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | pri |  5  |  4  |  3  |  2  |  1  |  0  |  6  |
+ * | qos |  1  |  2  |  3  |  4  |  5  |  6  |  7  |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+static inline uint32_t
+_wq_bucket_to_thread_qos(int pri)
+{
+       if (pri == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               return WORKQUEUE_EVENT_MANAGER_BUCKET + 1;
+       }
+       return WORKQUEUE_EVENT_MANAGER_BUCKET - pri;
+}
+
+#pragma mark wq_thactive
+
+#if defined(__LP64__)
+// Layout is:
+//   7 * 16 bits for each QoS bucket request count (including manager)
+//   3 bits of best QoS among all pending constrained requests
+//   13 bits of zeroes
+#define WQ_THACTIVE_BUCKET_WIDTH 16
+#define WQ_THACTIVE_QOS_SHIFT    (7 * WQ_THACTIVE_BUCKET_WIDTH)
+#else
+// Layout is:
+//   6 * 10 bits for each QoS bucket request count (except manager)
+//   1 bit for the manager bucket
+//   3 bits of best QoS among all pending constrained requests
+#define WQ_THACTIVE_BUCKET_WIDTH 10
+#define WQ_THACTIVE_QOS_SHIFT    (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
+#endif
+#define WQ_THACTIVE_BUCKET_MASK  ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
+#define WQ_THACTIVE_BUCKET_HALF  (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
+#define WQ_THACTIVE_NO_PENDING_REQUEST 6
+
+_Static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
+               "Make sure we have space to encode a QoS");
+
+static inline wq_thactive_t
+_wq_thactive_fetch_and_add(struct workqueue *wq, wq_thactive_t offset)
+{
+#if PTHREAD_INLINE_RMW_ATOMICS || !defined(__LP64__)
+       return atomic_fetch_add_explicit(&wq->wq_thactive, offset,
+                       memory_order_relaxed);
+#else
+       return pthread_kern->atomic_fetch_add_128_relaxed(&wq->wq_thactive, offset);
+#endif
+}
+
+static inline wq_thactive_t
+_wq_thactive(struct workqueue *wq)
+{
+#if PTHREAD_INLINE_RMW_ATOMICS || !defined(__LP64__)
+       return atomic_load_explicit(&wq->wq_thactive, memory_order_relaxed);
+#else
+       return pthread_kern->atomic_load_128_relaxed(&wq->wq_thactive);
+#endif
+}
+
+#define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
+               ((tha) >> WQ_THACTIVE_QOS_SHIFT)
+
+static inline uint32_t
+_wq_thactive_best_constrained_req_qos(struct workqueue *wq)
+{
+       // Avoid expensive atomic operations: the three bits we're loading are in
+       // a single byte, and always updated under the workqueue lock
+       wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
+       return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
+}
+
+static inline wq_thactive_t
+_wq_thactive_set_best_constrained_req_qos(struct workqueue *wq,
+               uint32_t orig_qos, uint32_t new_qos)
+{
+       wq_thactive_t v;
+       v = (wq_thactive_t)(new_qos - orig_qos) << WQ_THACTIVE_QOS_SHIFT;
+       /*
+        * We can do an atomic add relative to the initial load because updates
+        * to this qos are always serialized under the workqueue lock.
+        */
+       return _wq_thactive_fetch_and_add(wq, v) + v;
+}
+
+static inline wq_thactive_t
+_wq_thactive_offset_for_qos(int qos)
+{
+       return (wq_thactive_t)1 << (qos * WQ_THACTIVE_BUCKET_WIDTH);
+}
+
+static inline wq_thactive_t
+_wq_thactive_inc(struct workqueue *wq, int qos)
+{
+       return _wq_thactive_fetch_and_add(wq, _wq_thactive_offset_for_qos(qos));
+}
+
+static inline wq_thactive_t
+_wq_thactive_dec(struct workqueue *wq, int qos)
+{
+       return _wq_thactive_fetch_and_add(wq, -_wq_thactive_offset_for_qos(qos));
+}
+
+static inline wq_thactive_t
+_wq_thactive_move(struct workqueue *wq, int oldqos, int newqos)
+{
+       return _wq_thactive_fetch_and_add(wq, _wq_thactive_offset_for_qos(newqos) -
+                       _wq_thactive_offset_for_qos(oldqos));
+}
+
+static inline uint32_t
+_wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
+               int qos, uint32_t *busycount, uint32_t *max_busycount)
+{
+       uint32_t count = 0, active;
+       uint64_t curtime;
+
+#ifndef __LP64__
+       /*
+        * on 32bits the manager bucket is a single bit and the best constrained
+        * request QoS 3 bits are where the 10 bits of a regular QoS bucket count
+        * would be. Mask them out.
+        */
+       v &= ~(~0ull << WQ_THACTIVE_QOS_SHIFT);
+#endif
+       if (busycount) {
+               curtime = mach_absolute_time();
+               *busycount = 0;
+       }
+       if (max_busycount) {
+               *max_busycount = qos + 1;
+       }
+       for (int i = 0; i <= qos; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
+               active = v & WQ_THACTIVE_BUCKET_MASK;
+               count += active;
+               if (busycount && wq->wq_thscheduled_count[i] > active) {
+                       if (wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
+                               /*
+                                * We only consider the last blocked thread for a given bucket
+                                * as busy because we don't want to take the list lock in each
+                                * sched callback. However this is an approximation that could
+                                * contribute to thread creation storms.
+                                */
+                               (*busycount)++;
+                       }
+               }
+       }
+       return count;
+}
 
 #pragma mark - Process/Thread Setup/Teardown syscalls
 
@@ -272,15 +408,30 @@ stack_addr_hint(proc_t p, vm_map_t vmap)
                stackaddr = SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386 + aslr_offset;
        }
 #elif defined(__arm__) || defined(__arm64__)
-       // vm_map_get_max_aslr_slide_pages ensures 1MB of slide, we do better
-       aslr_offset = random() % ((proc64bit ? 4 : 2) * PTH_DEFAULT_STACKSIZE);
-       aslr_offset = vm_map_trunc_page_mask((vm_map_offset_t)aslr_offset, vm_map_page_mask(vmap));
-       if (proc64bit) {
-               // 64 stacks below nanomalloc (see NANOZONE_SIGNATURE)
-               stackaddr = 0x170000000 - 64 * PTH_DEFAULT_STACKSIZE - aslr_offset;
+       user_addr_t main_thread_stack_top = 0;
+       if (pthread_kern->proc_get_user_stack) {
+               main_thread_stack_top = pthread_kern->proc_get_user_stack(p);
+       }
+       if (proc64bit && main_thread_stack_top) {
+               // The main thread stack position is randomly slid by xnu (c.f.
+               // load_main() in mach_loader.c), so basing pthread stack allocations
+               // where the main thread stack ends is already ASLRd and doing so
+               // avoids creating a gap in the process address space that may cause
+               // extra PTE memory usage. rdar://problem/33328206
+               stackaddr = vm_map_trunc_page_mask((vm_map_offset_t)main_thread_stack_top,
+                               vm_map_page_mask(vmap));
        } else {
-               // If you try to slide down from this point, you risk ending up in memory consumed by malloc
-               stackaddr = SHARED_REGION_BASE_ARM - 32 * PTH_DEFAULT_STACKSIZE + aslr_offset;
+               // vm_map_get_max_aslr_slide_pages ensures 1MB of slide, we do better
+               aslr_offset = random() % ((proc64bit ? 4 : 2) * PTH_DEFAULT_STACKSIZE);
+               aslr_offset = vm_map_trunc_page_mask((vm_map_offset_t)aslr_offset,
+                               vm_map_page_mask(vmap));
+               if (proc64bit) {
+                       // 64 stacks below shared region
+                       stackaddr = SHARED_REGION_BASE_ARM64 - 64 * PTH_DEFAULT_STACKSIZE - aslr_offset;
+               } else {
+                       // If you try to slide down from this point, you risk ending up in memory consumed by malloc
+                       stackaddr = SHARED_REGION_BASE_ARM - 32 * PTH_DEFAULT_STACKSIZE + aslr_offset;
+               }
        }
 #else
 #error Need to define a stack address hint for this architecture
@@ -332,6 +483,10 @@ _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcar
 
        sright = (void *)pthread_kern->convert_thread_to_port(th);
        th_thport = pthread_kern->ipc_port_copyout_send(sright, pthread_kern->task_get_ipcspace(ctask));
+       if (!MACH_PORT_VALID(th_thport)) {
+               error = EMFILE; // userland will convert this into a crash
+               goto out;
+       }
 
        if ((flags & PTHREAD_START_CUSTOM) == 0) {
                mach_vm_size_t pthread_size =
@@ -364,31 +519,31 @@ _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcar
                 */
                kret = mach_vm_protect(vmap,  stackaddr, th_guardsize, FALSE, VM_PROT_NONE);
 
-               if (kret != KERN_SUCCESS) { 
+               if (kret != KERN_SUCCESS) {
                        error = ENOMEM;
                        goto out1;
                }
 
                th_pthread = stackaddr + th_guardsize + user_stack;
                th_stack = th_pthread;
-               
+
                /*
                * Pre-fault the first page of the new thread's stack and the page that will
                * contain the pthread_t structure.
-               */      
-               if (vm_map_trunc_page_mask((vm_map_offset_t)(th_stack - C_64_REDZONE_LEN), vm_map_page_mask(vmap)) != 
+               */
+               if (vm_map_trunc_page_mask((vm_map_offset_t)(th_stack - C_64_REDZONE_LEN), vm_map_page_mask(vmap)) !=
                                vm_map_trunc_page_mask((vm_map_offset_t)th_pthread, vm_map_page_mask(vmap))){
                        vm_fault( vmap,
                                        vm_map_trunc_page_mask((vm_map_offset_t)(th_stack - C_64_REDZONE_LEN), vm_map_page_mask(vmap)),
                                        VM_PROT_READ | VM_PROT_WRITE,
-                                       FALSE, 
+                                       FALSE,
                                        THREAD_UNINT, NULL, 0);
                }
-               
+
                vm_fault( vmap,
                                vm_map_trunc_page_mask((vm_map_offset_t)th_pthread, vm_map_page_mask(vmap)),
                                VM_PROT_READ | VM_PROT_WRITE,
-                               FALSE, 
+                               FALSE,
                                THREAD_UNINT, NULL, 0);
 
        } else {
@@ -451,7 +606,7 @@ _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcar
                        error = EINVAL;
                        goto out;
                }
-               
+
        }
 #elif defined(__arm__)
        arm_thread_state_t state = {
@@ -467,7 +622,7 @@ _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcar
                .r[7] = 0,
                .lr = 0,
 
-               /*      
+               /*
                 * set stack pointer
                 */
                .sp = (int)((vm_offset_t)(th_stack-C_32_STK_ALIGN))
@@ -509,6 +664,26 @@ _bsdthread_create(struct proc *p, user_addr_t user_func, user_addr_t user_funcar
                pthread_kern->thread_policy_set_internal(th, THREAD_QOS_POLICY, (thread_policy_t)&qos, THREAD_QOS_POLICY_COUNT);
        }
 
+       if (pthread_kern->proc_get_mach_thread_self_tsd_offset) {
+               uint64_t mach_thread_self_offset =
+                               pthread_kern->proc_get_mach_thread_self_tsd_offset(p);
+               if (mach_thread_self_offset && tsd_offset) {
+                       bool proc64bit = proc_is64bit(p);
+                       if (proc64bit) {
+                               uint64_t th_thport_tsd = (uint64_t)th_thport;
+                               error = copyout(&th_thport_tsd, th_pthread + tsd_offset +
+                                               mach_thread_self_offset, sizeof(th_thport_tsd));
+                       } else {
+                               uint32_t th_thport_tsd = (uint32_t)th_thport;
+                               error = copyout(&th_thport_tsd, th_pthread + tsd_offset +
+                                               mach_thread_self_offset, sizeof(th_thport_tsd));
+                       }
+                       if (error) {
+                               goto out1;
+                       }
+               }
+       }
+
        kret = pthread_kern->thread_resume(th);
        if (kret != KERN_SUCCESS) {
                error = EINVAL;
@@ -529,6 +704,9 @@ out1:
        }
 out:
        (void)pthread_kern->mach_port_deallocate(pthread_kern->task_get_ipcspace(ctask), th_thport);
+       if (pthread_kern->thread_will_park_or_terminate) {
+               pthread_kern->thread_will_park_or_terminate(th);
+       }
        (void)thread_terminate(th);
        (void)thread_deallocate(th);
        return(error);
@@ -571,8 +749,11 @@ _bsdthread_terminate(__unused struct proc *p,
                        }
                }
        }
-       
-       (void) thread_terminate(th);
+
+       if (pthread_kern->thread_will_park_or_terminate) {
+               pthread_kern->thread_will_park_or_terminate(th);
+       }
+       (void)thread_terminate(th);
        if (sem != MACH_PORT_NULL) {
                 kret = pthread_kern->semaphore_signal_internal_trap(sem);
                if (kret != KERN_SUCCESS) {
@@ -580,7 +761,7 @@ _bsdthread_terminate(__unused struct proc *p,
                        return(EINVAL);
                }
        }
-       
+
        if (kthport != MACH_PORT_NULL) {
                pthread_kern->mach_port_deallocate(pthread_kern->task_get_ipcspace(current_task()), kthport);
        }
@@ -609,45 +790,86 @@ _bsdthread_register(struct proc *p,
                    uint64_t dispatchqueue_offset,
                    int32_t *retval)
 {
-       /* We have to do this first so that it resets after fork */
-       pthread_kern->proc_set_stack_addr_hint(p, (user_addr_t)stack_addr_hint(p, pthread_kern->current_map()));
+       struct _pthread_registration_data data = {};
+       uint32_t max_tsd_offset;
+       kern_return_t kr;
+       size_t pthread_init_sz = 0;
 
-       /* prevent multiple registrations */
-       if (pthread_kern->proc_get_register(p) != 0) {
-               return(EINVAL);
-       }
        /* syscall randomizer test can pass bogus values */
        if (pthsize < 0 || pthsize > MAX_PTHREAD_SIZE) {
                return(EINVAL);
        }
+       /*
+        * if we have pthread_init_data, then we use that and target_concptr
+        * (which is an offset) get data.
+        */
+       if (pthread_init_data != 0) {
+               if (pthread_init_data_size < sizeof(data.version)) {
+                       return EINVAL;
+               }
+               pthread_init_sz = MIN(sizeof(data), (size_t)pthread_init_data_size);
+               int ret = copyin(pthread_init_data, &data, pthread_init_sz);
+               if (ret) {
+                       return ret;
+               }
+               if (data.version != (size_t)pthread_init_data_size) {
+                       return EINVAL;
+               }
+       } else {
+               data.dispatch_queue_offset = dispatchqueue_offset;
+       }
+
+       /* We have to do this before proc_get_register so that it resets after fork */
+       mach_vm_offset_t stackaddr = stack_addr_hint(p, pthread_kern->current_map());
+       pthread_kern->proc_set_stack_addr_hint(p, (user_addr_t)stackaddr);
+
+       /* prevent multiple registrations */
+       if (pthread_kern->proc_get_register(p) != 0) {
+               return(EINVAL);
+       }
+
        pthread_kern->proc_set_threadstart(p, threadstart);
        pthread_kern->proc_set_wqthread(p, wqthread);
        pthread_kern->proc_set_pthsize(p, pthsize);
        pthread_kern->proc_set_register(p);
 
-       /* if we have pthread_init_data, then we use that and target_concptr (which is an offset) get data. */
-       if (pthread_init_data != 0) {
-               thread_qos_policy_data_t qos;
+       uint32_t tsd_slot_sz = proc_is64bit(p) ? sizeof(uint64_t) : sizeof(uint32_t);
+       if ((uint32_t)pthsize >= tsd_slot_sz &&
+                       data.tsd_offset <= (uint32_t)(pthsize - tsd_slot_sz)) {
+               max_tsd_offset = ((uint32_t)pthsize - data.tsd_offset - tsd_slot_sz);
+       } else {
+               data.tsd_offset = 0;
+               max_tsd_offset = 0;
+       }
+       pthread_kern->proc_set_pthread_tsd_offset(p, data.tsd_offset);
 
-               struct _pthread_registration_data data = {};
-               size_t pthread_init_sz = MIN(sizeof(struct _pthread_registration_data), (size_t)pthread_init_data_size);
+       if (data.dispatch_queue_offset > max_tsd_offset) {
+               data.dispatch_queue_offset = 0;
+       }
+       pthread_kern->proc_set_dispatchqueue_offset(p, data.dispatch_queue_offset);
 
-               kern_return_t kr = copyin(pthread_init_data, &data, pthread_init_sz);
-               if (kr != KERN_SUCCESS) {
-                       return EINVAL;
+       if (pthread_kern->proc_set_return_to_kernel_offset) {
+               if (data.return_to_kernel_offset > max_tsd_offset) {
+                       data.return_to_kernel_offset = 0;
                }
+               pthread_kern->proc_set_return_to_kernel_offset(p,
+                               data.return_to_kernel_offset);
+       }
 
-               /* Incoming data from the data structure */
-               pthread_kern->proc_set_dispatchqueue_offset(p, data.dispatch_queue_offset);
-               if (data.version > offsetof(struct _pthread_registration_data, tsd_offset)
-                       && data.tsd_offset < (uint32_t)pthsize) {
-                       pthread_kern->proc_set_pthread_tsd_offset(p, data.tsd_offset);
+       if (pthread_kern->proc_set_mach_thread_self_tsd_offset) {
+               if (data.mach_thread_self_offset > max_tsd_offset) {
+                       data.mach_thread_self_offset = 0;
                }
+               pthread_kern->proc_set_mach_thread_self_tsd_offset(p,
+                               data.mach_thread_self_offset);
+       }
 
+       if (pthread_init_data != 0) {
                /* Outgoing data that userspace expects as a reply */
                data.version = sizeof(struct _pthread_registration_data);
                if (pthread_kern->qos_main_thread_active()) {
                        mach_msg_type_number_t nqos = THREAD_QOS_POLICY_COUNT;
+                       thread_qos_policy_data_t qos;
                        boolean_t gd = FALSE;
 
                        kr = pthread_kern->thread_policy_get(current_thread(), THREAD_QOS_POLICY, (thread_policy_t)&qos, &nqos, &gd);
@@ -672,8 +894,6 @@ _bsdthread_register(struct proc *p,
                if (kr != KERN_SUCCESS) {
                        return EINVAL;
                }
-       } else {
-               pthread_kern->proc_set_dispatchqueue_offset(p, dispatchqueue_offset);
        }
 
        /* return the supported feature set as the return value. */
@@ -687,7 +907,7 @@ _bsdthread_register(struct proc *p,
 int
 _bsdthread_ctl_set_qos(struct proc *p, user_addr_t __unused cmd, mach_port_name_t kport, user_addr_t tsd_priority_addr, user_addr_t arg3, int *retval)
 {
-       kern_return_t kr;
+       int rv;
        thread_t th;
 
        pthread_priority_t priority;
@@ -700,17 +920,13 @@ _bsdthread_ctl_set_qos(struct proc *p, user_addr_t __unused cmd, mach_port_name_
        /* QoS is stored in a given slot in the pthread TSD. We need to copy that in and set our QoS based on it. */
        if (proc_is64bit(p)) {
                uint64_t v;
-               kr = copyin(tsd_priority_addr, &v, sizeof(v));
-               if (kr != KERN_SUCCESS) {
-                       return kr;
-               }
+               rv = copyin(tsd_priority_addr, &v, sizeof(v));
+               if (rv) goto out;
                priority = (int)(v & 0xffffffff);
        } else {
                uint32_t v;
-               kr = copyin(tsd_priority_addr, &v, sizeof(v));
-               if (kr != KERN_SUCCESS) {
-                       return kr;
-               }
+               rv = copyin(tsd_priority_addr, &v, sizeof(v));
+               if (rv) goto out;
                priority = v;
        }
 
@@ -724,13 +940,14 @@ _bsdthread_ctl_set_qos(struct proc *p, user_addr_t __unused cmd, mach_port_name_
                return EPERM;
        }
 
-       int rv = _bsdthread_ctl_set_self(p, 0, priority, 0, _PTHREAD_SET_SELF_QOS_FLAG, retval);
+       rv = _bsdthread_ctl_set_self(p, 0, priority, 0, _PTHREAD_SET_SELF_QOS_FLAG, retval);
 
        /* Static param the thread, we just set QoS on it, so its stuck in QoS land now. */
        /* pthread_kern->thread_static_param(th, TRUE); */ // see <rdar://problem/16433744>, for details
 
        thread_deallocate(th);
 
+out:
        return rv;
 }
 
@@ -745,13 +962,42 @@ util_get_thread_threadlist_entry(thread_t th)
        return NULL;
 }
 
+boolean_t
+_workq_thread_has_been_unbound(thread_t th, int qos_class)
+{
+       struct threadlist *tl = util_get_thread_threadlist_entry(th);
+       if (!tl) {
+               return FALSE;
+       }
+
+       struct workqueue *wq = tl->th_workq;
+       workqueue_lock_spin(wq);
+
+       if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               goto failure;
+       } else if (qos_class != class_index_get_thread_qos(tl->th_priority)) {
+               goto failure;
+       }
+
+       if ((tl->th_flags & TH_LIST_KEVENT_BOUND)){
+               goto failure;
+       }
+       tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
+
+       workqueue_unlock(wq);
+       return TRUE;
+
+failure:
+       workqueue_unlock(wq);
+       return FALSE;
+}
+
 int
 _bsdthread_ctl_set_self(struct proc *p, user_addr_t __unused cmd, pthread_priority_t priority, mach_port_name_t voucher, _pthread_set_flags_t flags, int __unused *retval)
 {
        thread_qos_policy_data_t qos;
        mach_msg_type_number_t nqos = THREAD_QOS_POLICY_COUNT;
        boolean_t gd = FALSE;
-       bool was_manager_thread = false;
        thread_t th = current_thread();
        struct workqueue *wq = NULL;
        struct threadlist *tl = NULL;
@@ -770,13 +1016,14 @@ _bsdthread_ctl_set_self(struct proc *p, user_addr_t __unused cmd, pthread_priori
                workqueue_lock_spin(wq);
                if (tl->th_flags & TH_LIST_KEVENT_BOUND) {
                        tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
-                       unsigned int kevent_flags = KEVENT_FLAG_WORKQ;
+                       unsigned int kevent_flags = KEVENT_FLAG_WORKQ | KEVENT_FLAG_UNBIND_CHECK_FLAGS;
                        if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
                                kevent_flags |= KEVENT_FLAG_WORKQ_MANAGER;
                        }
 
                        workqueue_unlock(wq);
-                       kevent_qos_internal_unbind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
+                       __assert_only int ret = kevent_qos_internal_unbind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
+                       assert(ret == 0);
                } else {
                        workqueue_unlock(wq);
                }
@@ -790,13 +1037,16 @@ qos:
                        goto voucher;
                }
 
-               /* If we have main-thread QoS then we don't allow a thread to come out of QOS_CLASS_UNSPECIFIED. */
-               if (pthread_kern->qos_main_thread_active() && qos.qos_tier == THREAD_QOS_UNSPECIFIED) {
+               /*
+                * If we have main-thread QoS then we don't allow a thread to come out
+                * of QOS_CLASS_UNSPECIFIED.
+                */
+               if (pthread_kern->qos_main_thread_active() && qos.qos_tier ==
+                               THREAD_QOS_UNSPECIFIED) {
                        qos_rv = EPERM;
                        goto voucher;
                }
 
-               /* Get the work queue for tracing, also the threadlist for bucket manipluation. */
                if (!tl) {
                        tl = util_get_thread_threadlist_entry(th);
                        if (tl) wq = tl->th_workq;
@@ -807,55 +1057,58 @@ qos:
                qos.qos_tier = pthread_priority_get_thread_qos(priority);
                qos.tier_importance = (qos.qos_tier == QOS_CLASS_UNSPECIFIED) ? 0 : _pthread_priority_get_relpri(priority);
 
-               if (qos.qos_tier == QOS_CLASS_UNSPECIFIED) {
+               if (qos.qos_tier == QOS_CLASS_UNSPECIFIED ||
+                               qos.tier_importance > 0 || qos.tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
                        qos_rv = EINVAL;
                        goto voucher;
                }
 
-               /* If we're a workqueue, the threadlist item priority needs adjusting, along with the bucket we were running in. */
+               /*
+                * If we're a workqueue, the threadlist item priority needs adjusting,
+                * along with the bucket we were running in.
+                */
                if (tl) {
-                       workqueue_lock_spin(wq);
-                       bool now_under_constrained_limit = false;
-
-                       assert(!(tl->th_flags & TH_LIST_KEVENT_BOUND));
+                       bool try_run_threadreq = false;
 
+                       workqueue_lock_spin(wq);
                        kr = pthread_kern->thread_set_workq_qos(th, qos.qos_tier, qos.tier_importance);
                        assert(kr == KERN_SUCCESS || kr == KERN_TERMINATED);
 
                        /* Fix up counters. */
                        uint8_t old_bucket = tl->th_priority;
                        uint8_t new_bucket = pthread_priority_get_class_index(priority);
-                       if (old_bucket == WORKQUEUE_EVENT_MANAGER_BUCKET) {
-                               was_manager_thread = true;
-                       }
-
-                       uint32_t old_active = OSAddAtomic(-1, &wq->wq_thactive_count[old_bucket]);
-                       OSAddAtomic(1, &wq->wq_thactive_count[new_bucket]);
 
-                       wq->wq_thscheduled_count[old_bucket]--;
-                       wq->wq_thscheduled_count[new_bucket]++;
+                       if (old_bucket != new_bucket) {
+                               _wq_thactive_move(wq, old_bucket, new_bucket);
+                               wq->wq_thscheduled_count[old_bucket]--;
+                               wq->wq_thscheduled_count[new_bucket]++;
+                               if (old_bucket == WORKQUEUE_EVENT_MANAGER_BUCKET ||
+                                               old_bucket < new_bucket) {
+                                       /*
+                                        * if the QoS of the thread was lowered, then this could
+                                        * allow for a higher QoS thread request to run, so we need
+                                        * to reevaluate.
+                                        */
+                                       try_run_threadreq = true;
+                               }
+                               tl->th_priority = new_bucket;
+                       }
 
                        bool old_overcommit = !(tl->th_flags & TH_LIST_CONSTRAINED);
                        bool new_overcommit = priority & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
                        if (!old_overcommit && new_overcommit) {
-                               wq->wq_constrained_threads_scheduled--;
-                               tl->th_flags &= ~TH_LIST_CONSTRAINED;
-                               if (wq->wq_constrained_threads_scheduled == wq_max_constrained_threads - 1) {
-                                       now_under_constrained_limit = true;
+                               if (wq->wq_constrained_threads_scheduled-- ==
+                                               wq_max_constrained_threads) {
+                                       try_run_threadreq = true;
                                }
+                               tl->th_flags &= ~TH_LIST_CONSTRAINED;
                        } else if (old_overcommit && !new_overcommit) {
                                wq->wq_constrained_threads_scheduled++;
                                tl->th_flags |= TH_LIST_CONSTRAINED;
                        }
 
-                       tl->th_priority = new_bucket;
-
-                       /* If we were at the ceiling of threads for a given bucket, we have
-                        * to reevaluate whether we should start more work.
-                        */
-                       if (old_active == wq->wq_reqconc[old_bucket] || now_under_constrained_limit) {
-                               /* workqueue_run_nextreq will drop the workqueue lock in all exit paths. */
-                               (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT, 0, false);
+                       if (try_run_threadreq) {
+                               workqueue_run_threadreq_and_unlock(p, wq, NULL, NULL, true);
                        } else {
                                workqueue_unlock(wq);
                        }
@@ -929,7 +1182,7 @@ done:
        if (fixedpri_rv) {
                return fixedpri_rv;
        }
-       
+
        return 0;
 }
 
@@ -1056,6 +1309,31 @@ _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(struct proc __unused *p,
        return 0;
 }
 
+static int
+_bsdthread_ctl_max_parallelism(struct proc __unused *p, user_addr_t __unused cmd,
+               int qos, unsigned long flags, int *retval)
+{
+       _Static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
+                       _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
+       _Static_assert(QOS_PARALLELISM_REALTIME ==
+                       _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
+
+       if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL)) {
+               return EINVAL;
+       }
+
+       if (flags & QOS_PARALLELISM_REALTIME) {
+               if (qos) {
+                       return EINVAL;
+               }
+       } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
+               return EINVAL;
+       }
+
+       *retval = pthread_kern->qos_max_parallelism(qos, flags);
+       return 0;
+}
+
 int
 _bsdthread_ctl(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t arg2, user_addr_t arg3, int *retval)
 {
@@ -1076,175 +1354,491 @@ _bsdthread_ctl(struct proc *p, user_addr_t cmd, user_addr_t arg1, user_addr_t ar
                return _bsdthread_ctl_qos_dispatch_asynchronous_override_reset(p, cmd, (int)arg1, arg2, arg3, retval);
        case BSDTHREAD_CTL_SET_SELF:
                return _bsdthread_ctl_set_self(p, cmd, (pthread_priority_t)arg1, (mach_port_name_t)arg2, (_pthread_set_flags_t)arg3, retval);
+       case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
+               return _bsdthread_ctl_max_parallelism(p, cmd, (int)arg1, (unsigned long)arg2, retval);
        default:
                return EINVAL;
        }
 }
 
 #pragma mark - Workqueue Implementation
-#pragma mark workqueue lock
 
-static boolean_t workqueue_lock_spin_is_acquired_kdp(struct workqueue *wq) {
-  return kdp_lck_spin_is_acquired(&wq->wq_lock);
-}
+#pragma mark wq_flags
 
-static void
-workqueue_lock_spin(struct workqueue *wq)
+static inline uint32_t
+_wq_flags(struct workqueue *wq)
 {
-       boolean_t interrupt_state = ml_set_interrupts_enabled(FALSE);
-       lck_spin_lock(&wq->wq_lock);
-       wq->wq_interrupt_state = interrupt_state;
+       return atomic_load_explicit(&wq->wq_flags, memory_order_relaxed);
 }
 
-static void
-workqueue_unlock(struct workqueue *wq)
+static inline bool
+_wq_exiting(struct workqueue *wq)
 {
-       boolean_t interrupt_state = wq->wq_interrupt_state;
-       lck_spin_unlock(&wq->wq_lock);
-       ml_set_interrupts_enabled(interrupt_state);
+       return _wq_flags(wq) & WQ_EXITING;
 }
 
-#pragma mark workqueue add timer
-
-/**
- * Sets up the timer which will call out to workqueue_add_timer
- */
-static void
-workqueue_interval_timer_start(struct workqueue *wq)
+static inline uint32_t
+_wq_flags_or_orig(struct workqueue *wq, uint32_t v)
 {
-       uint64_t deadline;
+#if PTHREAD_INLINE_RMW_ATOMICS
+       uint32_t state;
+       do {
+               state = _wq_flags(wq);
+       } while (!OSCompareAndSwap(state, state | v, &wq->wq_flags));
+       return state;
+#else
+       return atomic_fetch_or_explicit(&wq->wq_flags, v, memory_order_relaxed);
+#endif
+}
 
-       /* n.b. wq_timer_interval is reset to 0 in workqueue_add_timer if the
-        ATIMER_RUNNING flag is not present.  The net effect here is that if a
-        sequence of threads is required, we'll double the time before we give out
-        the next one. */
-       if (wq->wq_timer_interval == 0) {
-               wq->wq_timer_interval = wq_stalled_window_usecs;
+static inline uint32_t
+_wq_flags_and_orig(struct workqueue *wq, uint32_t v)
+{
+#if PTHREAD_INLINE_RMW_ATOMICS
+       uint32_t state;
+       do {
+               state = _wq_flags(wq);
+       } while (!OSCompareAndSwap(state, state & v, &wq->wq_flags));
+       return state;
+#else
+       return atomic_fetch_and_explicit(&wq->wq_flags, v, memory_order_relaxed);
+#endif
+}
 
-       } else {
-               wq->wq_timer_interval = wq->wq_timer_interval * 2;
+static inline bool
+WQ_TIMER_DELAYED_NEEDED(struct workqueue *wq)
+{
+       uint32_t oldflags, newflags;
+       do {
+               oldflags = _wq_flags(wq);
+               if (oldflags & (WQ_EXITING | WQ_ATIMER_DELAYED_RUNNING)) {
+                       return false;
+               }
+               newflags = oldflags | WQ_ATIMER_DELAYED_RUNNING;
+       } while (!OSCompareAndSwap(oldflags, newflags, &wq->wq_flags));
+       return true;
+}
 
-               if (wq->wq_timer_interval > wq_max_timer_interval_usecs) {
-                       wq->wq_timer_interval = wq_max_timer_interval_usecs;
+static inline bool
+WQ_TIMER_IMMEDIATE_NEEDED(struct workqueue *wq)
+{
+       uint32_t oldflags, newflags;
+       do {
+               oldflags = _wq_flags(wq);
+               if (oldflags & (WQ_EXITING | WQ_ATIMER_IMMEDIATE_RUNNING)) {
+                       return false;
                }
-       }
-       clock_interval_to_deadline(wq->wq_timer_interval, 1000, &deadline);
+               newflags = oldflags | WQ_ATIMER_IMMEDIATE_RUNNING;
+       } while (!OSCompareAndSwap(oldflags, newflags, &wq->wq_flags));
+       return true;
+}
 
-       PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, wq->wq_flags, wq->wq_timer_interval, 0);
+#pragma mark thread requests pacing
 
-       boolean_t ret = thread_call_enter1_delayed(wq->wq_atimer_delayed_call, wq->wq_atimer_delayed_call, deadline);
-       if (ret) {
-               panic("delayed_call was already enqueued");
-       }
+static inline uint32_t
+_wq_pacing_shift_for_pri(int pri)
+{
+       return _wq_bucket_to_thread_qos(pri) - 1;
 }
 
-/**
- * Immediately trigger the workqueue_add_timer
- */
-static void
-workqueue_interval_timer_trigger(struct workqueue *wq)
+static inline int
+_wq_highest_paced_priority(struct workqueue *wq)
 {
-       PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount, wq->wq_flags, 0, 0);
+       uint8_t paced = wq->wq_paced;
+       int msb = paced ? 32 - __builtin_clz(paced) : 0; // fls(paced) == bit + 1
+       return WORKQUEUE_EVENT_MANAGER_BUCKET - msb;
+}
 
-       boolean_t ret = thread_call_enter1(wq->wq_atimer_immediate_call, wq->wq_atimer_immediate_call);
-       if (ret) {
-               panic("immediate_call was already enqueued");
-       }
+static inline uint8_t
+_wq_pacing_bit_for_pri(int pri)
+{
+       return 1u << _wq_pacing_shift_for_pri(pri);
 }
 
-/**
- * returns whether lastblocked_tsp is within wq_stalled_window_usecs of cur_ts
- */
-static boolean_t
-wq_thread_is_busy(uint64_t cur_ts, uint64_t *lastblocked_tsp)
+static inline bool
+_wq_should_pace_priority(struct workqueue *wq, int pri)
 {
-       clock_sec_t     secs;
-       clock_usec_t    usecs;
-       uint64_t lastblocked_ts;
-       uint64_t elapsed;
+       return wq->wq_paced >= _wq_pacing_bit_for_pri(pri);
+}
 
-       /*
-        * the timestamp is updated atomically w/o holding the workqueue lock
-        * so we need to do an atomic read of the 64 bits so that we don't see
-        * a mismatched pair of 32 bit reads... we accomplish this in an architecturally
-        * independent fashion by using OSCompareAndSwap64 to write back the
-        * value we grabbed... if it succeeds, then we have a good timestamp to
-        * evaluate... if it fails, we straddled grabbing the timestamp while it
-        * was being updated... treat a failed update as a busy thread since
-        * it implies we are about to see a really fresh timestamp anyway
-        */
-       lastblocked_ts = *lastblocked_tsp;
+static inline void
+_wq_pacing_start(struct workqueue *wq, struct threadlist *tl)
+{
+       uint8_t bit = _wq_pacing_bit_for_pri(tl->th_priority);
+       assert((tl->th_flags & TH_LIST_PACING) == 0);
+       assert((wq->wq_paced & bit) == 0);
+       wq->wq_paced |= bit;
+       tl->th_flags |= TH_LIST_PACING;
+}
 
-       if ( !OSCompareAndSwap64((UInt64)lastblocked_ts, (UInt64)lastblocked_ts, lastblocked_tsp))
-               return (TRUE);
+static inline bool
+_wq_pacing_end(struct workqueue *wq, struct threadlist *tl)
+{
+       if (tl->th_flags & TH_LIST_PACING) {
+               uint8_t bit = _wq_pacing_bit_for_pri(tl->th_priority);
+               assert((wq->wq_paced & bit) != 0);
+               wq->wq_paced ^= bit;
+               tl->th_flags &= ~TH_LIST_PACING;
+               return wq->wq_paced < bit; // !_wq_should_pace_priority
+       }
+       return false;
+}
 
-       if (lastblocked_ts >= cur_ts) {
+#pragma mark thread requests
+
+static void
+_threadreq_init_alloced(struct threadreq *req, int priority, int flags)
+{
+       assert((flags & TR_FLAG_ONSTACK) == 0);
+       req->tr_state = TR_STATE_NEW;
+       req->tr_priority = priority;
+       req->tr_flags = flags;
+}
+
+static void
+_threadreq_init_stack(struct threadreq *req, int priority, int flags)
+{
+       req->tr_state = TR_STATE_NEW;
+       req->tr_priority = priority;
+       req->tr_flags = flags | TR_FLAG_ONSTACK;
+}
+
+static void
+_threadreq_copy_prepare(struct workqueue *wq)
+{
+again:
+       if (wq->wq_cached_threadreq) {
+               return;
+       }
+
+       workqueue_unlock(wq);
+       struct threadreq *req = zalloc(pthread_zone_threadreq);
+       workqueue_lock_spin(wq);
+
+       if (wq->wq_cached_threadreq) {
                /*
-                * because the update of the timestamp when a thread blocks isn't
-                * serialized against us looking at it (i.e. we don't hold the workq lock)
-                * it's possible to have a timestamp that matches the current time or
-                * that even looks to be in the future relative to when we grabbed the current
-                * time... just treat this as a busy thread since it must have just blocked.
+                * We lost the race and someone left behind an extra threadreq for us
+                * to use.  Throw away our request and retry.
                 */
-               return (TRUE);
+               workqueue_unlock(wq);
+               zfree(pthread_zone_threadreq, req);
+               workqueue_lock_spin(wq);
+               goto again;
+       } else {
+               wq->wq_cached_threadreq = req;
        }
-       elapsed = cur_ts - lastblocked_ts;
 
-       pthread_kern->absolutetime_to_microtime(elapsed, &secs, &usecs);
+       assert(wq->wq_cached_threadreq);
+}
 
-       if (secs == 0 && usecs < wq_stalled_window_usecs)
-               return (TRUE);
-       return (FALSE);
+static bool
+_threadreq_copy_prepare_noblock(struct workqueue *wq)
+{
+       if (wq->wq_cached_threadreq) {
+               return true;
+       }
+
+       wq->wq_cached_threadreq = zalloc_noblock(pthread_zone_threadreq);
+
+       return wq->wq_cached_threadreq != NULL;
 }
 
-static inline bool
-WQ_TIMER_DELAYED_NEEDED(struct workqueue *wq)
+static inline struct threadreq_head *
+_threadreq_list_for_req(struct workqueue *wq, const struct threadreq *req)
 {
-       int oldflags;
-retry:
-       oldflags = wq->wq_flags;
-       if ( !(oldflags & (WQ_EXITING | WQ_ATIMER_DELAYED_RUNNING))) {
-               if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_DELAYED_RUNNING, (UInt32 *)&wq->wq_flags)) {
-                       return true;
-               } else {
-                       goto retry;
+       if (req->tr_flags & TR_FLAG_OVERCOMMIT) {
+               return &wq->wq_overcommit_reqlist[req->tr_priority];
+       } else {
+               return &wq->wq_reqlist[req->tr_priority];
+       }
+}
+
+static void
+_threadreq_enqueue(struct workqueue *wq, struct threadreq *req)
+{
+       assert(req && req->tr_state == TR_STATE_NEW);
+       if (req->tr_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               assert(wq->wq_event_manager_threadreq.tr_state != TR_STATE_WAITING);
+               memcpy(&wq->wq_event_manager_threadreq, req, sizeof(struct threadreq));
+               req = &wq->wq_event_manager_threadreq;
+               req->tr_flags &= ~(TR_FLAG_ONSTACK | TR_FLAG_NO_PACING);
+       } else {
+               if (req->tr_flags & TR_FLAG_ONSTACK) {
+                       assert(wq->wq_cached_threadreq);
+                       struct threadreq *newreq = wq->wq_cached_threadreq;
+                       wq->wq_cached_threadreq = NULL;
+
+                       memcpy(newreq, req, sizeof(struct threadreq));
+                       newreq->tr_flags &= ~(TR_FLAG_ONSTACK | TR_FLAG_NO_PACING);
+                       req->tr_state = TR_STATE_DEAD;
+                       req = newreq;
                }
+               TAILQ_INSERT_TAIL(_threadreq_list_for_req(wq, req), req, tr_entry);
        }
-       return false;
+       req->tr_state = TR_STATE_WAITING;
+       wq->wq_reqcount++;
 }
 
-static inline bool
-WQ_TIMER_IMMEDIATE_NEEDED(struct workqueue *wq)
+static void
+_threadreq_dequeue(struct workqueue *wq, struct threadreq *req)
+{
+       if (req->tr_priority != WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               struct threadreq_head *req_list = _threadreq_list_for_req(wq, req);
+#if DEBUG
+               struct threadreq *cursor = NULL;
+               TAILQ_FOREACH(cursor, req_list, tr_entry) {
+                       if (cursor == req) break;
+               }
+               assert(cursor == req);
+#endif
+               TAILQ_REMOVE(req_list, req, tr_entry);
+       }
+       wq->wq_reqcount--;
+}
+
+/*
+ * Mark a thread request as complete.  At this point, it is treated as owned by
+ * the submitting subsystem and you should assume it could be freed.
+ *
+ * Called with the workqueue lock held.
+ */
+static int
+_threadreq_complete_and_unlock(proc_t p, struct workqueue *wq,
+               struct threadreq *req, struct threadlist *tl)
 {
-       int oldflags;
-retry:
-       oldflags = wq->wq_flags;
-       if ( !(oldflags & (WQ_EXITING | WQ_ATIMER_IMMEDIATE_RUNNING))) {
-               if (OSCompareAndSwap(oldflags, oldflags | WQ_ATIMER_IMMEDIATE_RUNNING, (UInt32 *)&wq->wq_flags)) {
-                       return true;
+       struct threadreq *req_tofree = NULL;
+       bool sync = (req->tr_state == TR_STATE_NEW);
+       bool workloop = req->tr_flags & TR_FLAG_WORKLOOP;
+       bool onstack = req->tr_flags & TR_FLAG_ONSTACK;
+       bool kevent = req->tr_flags & TR_FLAG_KEVENT;
+       bool unbinding = tl->th_flags & TH_LIST_UNBINDING;
+       bool locked = true;
+       bool waking_parked_thread = (tl->th_flags & TH_LIST_BUSY);
+       int ret;
+
+       req->tr_state = TR_STATE_COMPLETE;
+
+       if (!workloop && !onstack && req != &wq->wq_event_manager_threadreq) {
+               if (wq->wq_cached_threadreq) {
+                       req_tofree = req;
                } else {
-                       goto retry;
+                       wq->wq_cached_threadreq = req;
                }
        }
-       return false;
+
+       if (tl->th_flags & TH_LIST_UNBINDING) {
+               tl->th_flags &= ~TH_LIST_UNBINDING;
+               assert((tl->th_flags & TH_LIST_KEVENT_BOUND));
+       } else if (workloop || kevent) {
+               assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
+               tl->th_flags |= TH_LIST_KEVENT_BOUND;
+       }
+
+       if (workloop) {
+               workqueue_unlock(wq);
+               ret = pthread_kern->workloop_fulfill_threadreq(wq->wq_proc, (void*)req,
+                               tl->th_thread, sync ? WORKLOOP_FULFILL_THREADREQ_SYNC : 0);
+               assert(ret == 0);
+               locked = false;
+       } else if (kevent) {
+               unsigned int kevent_flags = KEVENT_FLAG_WORKQ;
+               if (sync) {
+                       kevent_flags |= KEVENT_FLAG_SYNCHRONOUS_BIND;
+               }
+               if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+                       kevent_flags |= KEVENT_FLAG_WORKQ_MANAGER;
+               }
+               workqueue_unlock(wq);
+               ret = kevent_qos_internal_bind(wq->wq_proc,
+                               class_index_get_thread_qos(tl->th_priority), tl->th_thread,
+                               kevent_flags);
+               if (ret != 0) {
+                       workqueue_lock_spin(wq);
+                       tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
+                       locked = true;
+               } else {
+                       locked = false;
+               }
+       }
+
+       /*
+        * Run Thread, Run!
+        */
+       PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 0, 0, 0, 0);
+       PTHREAD_TRACE_WQ_REQ(TRACE_wq_runitem | DBG_FUNC_START, wq, req, tl->th_priority,
+                       thread_tid(current_thread()), thread_tid(tl->th_thread));
+
+       if (waking_parked_thread) {
+               if (!locked) {
+                       workqueue_lock_spin(wq);
+               }
+               tl->th_flags &= ~(TH_LIST_BUSY);
+               if ((tl->th_flags & TH_LIST_REMOVING_VOUCHER) == 0) {
+                       /*
+                        * If the thread is in the process of removing its voucher, then it
+                        * isn't actually in the wait event yet and we don't need to wake
+                        * it up.  Save the trouble (and potential lock-ordering issues
+                        * (see 30617015)).
+                        */
+                       thread_wakeup_thread(tl, tl->th_thread);
+               }
+               workqueue_unlock(wq);
+
+               if (req_tofree) zfree(pthread_zone_threadreq, req_tofree);
+               return WQ_RUN_TR_THREAD_STARTED;
+       }
+
+       assert ((tl->th_flags & TH_LIST_PACING) == 0);
+       if (locked) {
+               workqueue_unlock(wq);
+       }
+       if (req_tofree) zfree(pthread_zone_threadreq, req_tofree);
+       if (unbinding) {
+               return WQ_RUN_TR_THREAD_STARTED;
+       }
+       _setup_wqthread(p, tl->th_thread, wq, tl, WQ_SETUP_CLEAR_VOUCHER);
+       pthread_kern->unix_syscall_return(EJUSTRETURN);
+       __builtin_unreachable();
 }
 
+/*
+ * Mark a thread request as cancelled.  Has similar ownership semantics to the
+ * complete call above.
+ */
+static void
+_threadreq_cancel(struct workqueue *wq, struct threadreq *req)
+{
+       assert(req->tr_state == TR_STATE_WAITING);
+       req->tr_state = TR_STATE_DEAD;
+
+       assert((req->tr_flags & TR_FLAG_ONSTACK) == 0);
+       if (req->tr_flags & TR_FLAG_WORKLOOP) {
+               __assert_only int ret;
+               ret = pthread_kern->workloop_fulfill_threadreq(wq->wq_proc, (void*)req,
+                               THREAD_NULL, WORKLOOP_FULFILL_THREADREQ_CANCEL);
+               assert(ret == 0 || ret == ECANCELED);
+       } else if (req != &wq->wq_event_manager_threadreq) {
+               zfree(pthread_zone_threadreq, req);
+       }
+}
+
+#pragma mark workqueue lock
+
+static boolean_t workqueue_lock_spin_is_acquired_kdp(struct workqueue *wq) {
+  return kdp_lck_spin_is_acquired(&wq->wq_lock);
+}
+
+static void
+workqueue_lock_spin(struct workqueue *wq)
+{
+       assert(ml_get_interrupts_enabled() == TRUE);
+       lck_spin_lock(&wq->wq_lock);
+}
+
+static bool
+workqueue_lock_try(struct workqueue *wq)
+{
+       return lck_spin_try_lock(&wq->wq_lock);
+}
+
+static void
+workqueue_unlock(struct workqueue *wq)
+{
+       lck_spin_unlock(&wq->wq_lock);
+}
+
+#pragma mark workqueue add timer
+
 /**
- * handler function for the timer
+ * Sets up the timer which will call out to workqueue_add_timer
  */
 static void
-workqueue_add_timer(struct workqueue *wq, thread_call_t thread_call_self)
+workqueue_interval_timer_start(struct workqueue *wq)
 {
-       proc_t          p;
-       boolean_t       start_timer = FALSE;
-       boolean_t       retval;
+       uint64_t deadline;
+
+       /* n.b. wq_timer_interval is reset to 0 in workqueue_add_timer if the
+        ATIMER_RUNNING flag is not present.  The net effect here is that if a
+        sequence of threads is required, we'll double the time before we give out
+        the next one. */
+       if (wq->wq_timer_interval == 0) {
+               wq->wq_timer_interval = wq_stalled_window_usecs;
+
+       } else {
+               wq->wq_timer_interval = wq->wq_timer_interval * 2;
+
+               if (wq->wq_timer_interval > wq_max_timer_interval_usecs) {
+                       wq->wq_timer_interval = wq_max_timer_interval_usecs;
+               }
+       }
+       clock_interval_to_deadline(wq->wq_timer_interval, 1000, &deadline);
 
-       PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, wq->wq_flags, wq->wq_nthreads, wq->wq_thidlecount, 0);
+       PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
+                       _wq_flags(wq), wq->wq_timer_interval, 0);
 
-       p = wq->wq_proc;
+       thread_call_t call = wq->wq_atimer_delayed_call;
+       if (thread_call_enter1_delayed(call, call, deadline)) {
+               panic("delayed_call was already enqueued");
+       }
+}
+
+/**
+ * Immediately trigger the workqueue_add_timer
+ */
+static void
+workqueue_interval_timer_trigger(struct workqueue *wq)
+{
+       PTHREAD_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
+                       _wq_flags(wq), 0, 0);
+
+       thread_call_t call = wq->wq_atimer_immediate_call;
+       if (thread_call_enter1(call, call)) {
+               panic("immediate_call was already enqueued");
+       }
+}
+
+/**
+ * returns whether lastblocked_tsp is within wq_stalled_window_usecs of cur_ts
+ */
+static boolean_t
+wq_thread_is_busy(uint64_t cur_ts, _Atomic uint64_t *lastblocked_tsp)
+{
+       clock_sec_t     secs;
+       clock_usec_t    usecs;
+       uint64_t lastblocked_ts;
+       uint64_t elapsed;
+
+       lastblocked_ts = atomic_load_explicit(lastblocked_tsp, memory_order_relaxed);
+       if (lastblocked_ts >= cur_ts) {
+               /*
+                * because the update of the timestamp when a thread blocks isn't
+                * serialized against us looking at it (i.e. we don't hold the workq lock)
+                * it's possible to have a timestamp that matches the current time or
+                * that even looks to be in the future relative to when we grabbed the current
+                * time... just treat this as a busy thread since it must have just blocked.
+                */
+               return (TRUE);
+       }
+       elapsed = cur_ts - lastblocked_ts;
+
+       pthread_kern->absolutetime_to_microtime(elapsed, &secs, &usecs);
+
+       return (secs == 0 && usecs < wq_stalled_window_usecs);
+}
+
+/**
+ * handler function for the timer
+ */
+static void
+workqueue_add_timer(struct workqueue *wq, thread_call_t thread_call_self)
+{
+       proc_t p = wq->wq_proc;
 
        workqueue_lock_spin(wq);
 
+       PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq,
+                       _wq_flags(wq), wq->wq_nthreads, wq->wq_thidlecount, 0);
+
        /*
         * There's two tricky issues here.
         *
@@ -1297,140 +1891,61 @@ workqueue_add_timer(struct workqueue *wq, thread_call_t thread_call_self)
 
                workqueue_lock_spin(wq);
        }
+       /*
+        * Prevent _workqueue_mark_exiting() from going away
+        */
        wq->wq_lflags |= WQL_ATIMER_BUSY;
 
        /*
         * Decide which timer we are and remove the RUNNING flag.
         */
        if (thread_call_self == wq->wq_atimer_delayed_call) {
-               if ((wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING) == 0) {
-                       panic("workqueue_add_timer is the delayed timer but the delayed running flag isn't set");
+               uint64_t wq_flags = _wq_flags_and_orig(wq, ~WQ_ATIMER_DELAYED_RUNNING);
+               if ((wq_flags & WQ_ATIMER_DELAYED_RUNNING) == 0) {
+                       panic("workqueue_add_timer(delayed) w/o WQ_ATIMER_DELAYED_RUNNING");
                }
-               WQ_UNSETFLAG(wq, WQ_ATIMER_DELAYED_RUNNING);
        } else if (thread_call_self == wq->wq_atimer_immediate_call) {
-               if ((wq->wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) == 0) {
-                       panic("workqueue_add_timer is the immediate timer but the immediate running flag isn't set");
+               uint64_t wq_flags = _wq_flags_and_orig(wq, ~WQ_ATIMER_IMMEDIATE_RUNNING);
+               if ((wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) == 0) {
+                       panic("workqueue_add_timer(immediate) w/o WQ_ATIMER_IMMEDIATE_RUNNING");
                }
-               WQ_UNSETFLAG(wq, WQ_ATIMER_IMMEDIATE_RUNNING);
        } else {
                panic("workqueue_add_timer can't figure out which timer it is");
        }
 
-again:
-       retval = TRUE;
-       if ( !(wq->wq_flags & WQ_EXITING)) {
-               boolean_t add_thread = FALSE;
-               /*
-                * check to see if the stall frequency was beyond our tolerance
-                * or we have work on the queue, but haven't scheduled any
-                * new work within our acceptable time interval because
-                * there were no idle threads left to schedule
-                */
-               if (wq->wq_reqcount) {
-                       uint32_t        priclass = 0;
-                       uint32_t        thactive_count = 0;
-                       uint64_t        curtime = mach_absolute_time();
-                       uint64_t        busycount = 0;
-
-                       if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] &&
-                               wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
-                               priclass = WORKQUEUE_EVENT_MANAGER_BUCKET;
-                       } else {
-                               for (priclass = 0; priclass < WORKQUEUE_NUM_BUCKETS; priclass++) {
-                                       if (wq->wq_requests[priclass])
-                                               break;
-                               }
-                       }
+       int ret = WQ_RUN_TR_THREAD_STARTED;
+       while (ret == WQ_RUN_TR_THREAD_STARTED && wq->wq_reqcount) {
+               ret = workqueue_run_threadreq_and_unlock(p, wq, NULL, NULL, true);
 
-                       if (priclass < WORKQUEUE_EVENT_MANAGER_BUCKET){
-                               /*
-                                * Compute a metric for many how many threads are active.  We
-                                * find the highest priority request outstanding and then add up
-                                * the number of active threads in that and all higher-priority
-                                * buckets.  We'll also add any "busy" threads which are not
-                                * active but blocked recently enough that we can't be sure
-                                * they've gone idle yet.  We'll then compare this metric to our
-                                * max concurrency to decide whether to add a new thread.
-                                */
-                               for (uint32_t i = 0; i <= priclass; i++) {
-                                       thactive_count += wq->wq_thactive_count[i];
-
-                                       if (wq->wq_thscheduled_count[i] < wq->wq_thactive_count[i]) {
-                                               if (wq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i]))
-                                                       busycount++;
-                                       }
-                               }
-                       }
-
-                       if (thactive_count + busycount < wq->wq_max_concurrency ||
-                               priclass == WORKQUEUE_EVENT_MANAGER_BUCKET) {
-
-                               if (wq->wq_thidlecount == 0) {
-                                       /*
-                                        * if we have no idle threads, try to add one
-                                        */
-                                       retval = workqueue_addnewthread(wq, priclass == WORKQUEUE_EVENT_MANAGER_BUCKET);
-                               }
-                               add_thread = TRUE;
-                       }
-
-                       if (wq->wq_reqcount) {
-                               /*
-                                * as long as we have threads to schedule, and we successfully
-                                * scheduled new work, keep trying
-                                */
-                               while (wq->wq_thidlecount && !(wq->wq_flags & WQ_EXITING)) {
-                                       /*
-                                        * workqueue_run_nextreq is responsible for
-                                        * dropping the workqueue lock in all cases
-                                        */
-                                       retval = (workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_ADD_TIMER, 0, false) != THREAD_NULL);
-                                       workqueue_lock_spin(wq);
-
-                                       if (retval == FALSE)
-                                               break;
-                               }
-                               if ( !(wq->wq_flags & WQ_EXITING) && wq->wq_reqcount) {
-
-                                       if (wq->wq_thidlecount == 0 && retval == TRUE && add_thread == TRUE)
-                                               goto again;
-
-                                       if (wq->wq_thidlecount == 0 || busycount) {
-                                               start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
-                                       }
-
-                                       PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_NONE, wq, wq->wq_reqcount, wq->wq_thidlecount, busycount, 0);
-                               }
-                       }
-               }
+               workqueue_lock_spin(wq);
        }
+       _threadreq_copy_prepare(wq);
 
        /*
         * If we called WQ_TIMER_NEEDED above, then this flag will be set if that
         * call marked the timer running.  If so, we let the timer interval grow.
         * Otherwise, we reset it back to 0.
         */
-       if (!(wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING)) {
+       uint32_t wq_flags = _wq_flags(wq);
+       if (!(wq_flags & WQ_ATIMER_DELAYED_RUNNING)) {
                wq->wq_timer_interval = 0;
        }
 
        wq->wq_lflags &= ~WQL_ATIMER_BUSY;
 
-       if ((wq->wq_flags & WQ_EXITING) || (wq->wq_lflags & WQL_ATIMER_WAITING)) {
+       if ((wq_flags & WQ_EXITING) || (wq->wq_lflags & WQL_ATIMER_WAITING)) {
                /*
-                * wakeup the thread hung up in _workqueue_mark_exiting or workqueue_add_timer waiting for this timer
-                * to finish getting out of the way
+                * wakeup the thread hung up in _workqueue_mark_exiting or
+                * workqueue_add_timer waiting for this timer to finish getting out of
+                * the way
                 */
                wq->wq_lflags &= ~WQL_ATIMER_WAITING;
                wakeup(wq);
        }
 
-       PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, start_timer, wq->wq_nthreads, wq->wq_thidlecount, 0);
+       PTHREAD_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0, wq->wq_nthreads, wq->wq_thidlecount, 0);
 
        workqueue_unlock(wq);
-
-       if (start_timer == TRUE)
-               workqueue_interval_timer_start(wq);
 }
 
 #pragma mark thread state tracking
@@ -1439,132 +1954,76 @@ again:
 void
 _workqueue_thread_yielded(void)
 {
-       struct workqueue *wq;
-       proc_t p;
-
-       p = current_proc();
-
-       if ((wq = pthread_kern->proc_get_wqptr(p)) == NULL || wq->wq_reqcount == 0)
-               return;
-
-       workqueue_lock_spin(wq);
-
-       if (wq->wq_reqcount) {
-               uint64_t        curtime;
-               uint64_t        elapsed;
-               clock_sec_t     secs;
-               clock_usec_t    usecs;
-
-               if (wq->wq_thread_yielded_count++ == 0)
-                       wq->wq_thread_yielded_timestamp = mach_absolute_time();
-
-               if (wq->wq_thread_yielded_count < wq_yielded_threshold) {
-                       workqueue_unlock(wq);
-                       return;
-               }
-
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_START, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 0, 0);
-
-               wq->wq_thread_yielded_count = 0;
-
-               curtime = mach_absolute_time();
-               elapsed = curtime - wq->wq_thread_yielded_timestamp;
-               pthread_kern->absolutetime_to_microtime(elapsed, &secs, &usecs);
-
-               if (secs == 0 && usecs < wq_yielded_window_usecs) {
-
-                       if (wq->wq_thidlecount == 0) {
-                               workqueue_addnewthread(wq, TRUE);
-                               /*
-                                * 'workqueue_addnewthread' drops the workqueue lock
-                                * when creating the new thread and then retakes it before
-                                * returning... this window allows other threads to process
-                                * requests, so we need to recheck for available work
-                                * if none found, we just return...  the newly created thread
-                                * will eventually get used (if it hasn't already)...
-                                */
-                               if (wq->wq_reqcount == 0) {
-                                       workqueue_unlock(wq);
-                                       return;
-                               }
-                       }
-                       if (wq->wq_thidlecount) {
-                               (void)workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_UNCONSTRAINED, 0, false);
-                               /*
-                                * workqueue_run_nextreq is responsible for
-                                * dropping the workqueue lock in all cases
-                                */
-                               PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 1, 0);
-
-                               return;
-                       }
-               }
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_yielded | DBG_FUNC_END, wq, wq->wq_thread_yielded_count, wq->wq_reqcount, 2, 0);
-       }
-       workqueue_unlock(wq);
 }
 
 static void
 workqueue_callback(int type, thread_t thread)
 {
-       struct uthread    *uth;
-       struct threadlist *tl;
-       struct workqueue  *wq;
-
-       uth = pthread_kern->get_bsdthread_info(thread);
-       tl = pthread_kern->uthread_get_threadlist(uth);
-       wq = tl->th_workq;
+       struct uthread *uth = pthread_kern->get_bsdthread_info(thread);
+       struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
+       struct workqueue *wq = tl->th_workq;
+       uint32_t old_count, req_qos, qos = tl->th_priority;
+       wq_thactive_t old_thactive;
 
        switch (type) {
        case SCHED_CALL_BLOCK: {
-               uint32_t        old_activecount;
-               boolean_t       start_timer = FALSE;
-
-               old_activecount = OSAddAtomic(-1, &wq->wq_thactive_count[tl->th_priority]);
+               bool start_timer = false;
 
-               /*
-                * If we blocked and were at the requested concurrency previously, we may
-                * need to spin up a new thread.  Of course, if it's the event manager
-                * then that's moot, so ignore that case.
-                */
-               if (old_activecount == wq->wq_reqconc[tl->th_priority] &&
-                       tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET) {
-                       uint64_t        curtime;
-                       UInt64          *lastblocked_ptr;
+               old_thactive = _wq_thactive_dec(wq, tl->th_priority);
+               req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
+               old_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
+                               qos, NULL, NULL);
 
+               if (old_count == wq_max_concurrency[tl->th_priority]) {
                        /*
-                        * the number of active threads at this priority
-                        * has fallen below the maximum number of concurrent
-                        * threads that we're allowed to run
+                        * The number of active threads at this priority has fallen below
+                        * the maximum number of concurrent threads that are allowed to run
+                        *
+                        * if we collide with another thread trying to update the
+                        * last_blocked (really unlikely since another thread would have to
+                        * get scheduled and then block after we start down this path), it's
+                        * not a problem.  Either timestamp is adequate, so no need to retry
                         */
-                       lastblocked_ptr = (UInt64 *)&wq->wq_lastblocked_ts[tl->th_priority];
-                       curtime = mach_absolute_time();
+                       atomic_store_explicit(&wq->wq_lastblocked_ts[qos],
+                                       mach_absolute_time(), memory_order_relaxed);
+               }
 
+               if (req_qos == WORKQUEUE_EVENT_MANAGER_BUCKET || qos > req_qos) {
                        /*
-                        * if we collide with another thread trying to update the last_blocked (really unlikely
-                        * since another thread would have to get scheduled and then block after we start down
-                        * this path), it's not a problem.  Either timestamp is adequate, so no need to retry
+                        * The blocking thread is at a lower QoS than the highest currently
+                        * pending constrained request, nothing has to be redriven
                         */
-
-                       OSCompareAndSwap64(*lastblocked_ptr, (UInt64)curtime, lastblocked_ptr);
-
-                       if (wq->wq_reqcount) {
-                               /*
-                                * We have work to do so start up the timer if it's not
-                                * running; it'll sort out whether we need to start another
-                                * thread
-                                */
-                               start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
-                       }
-
-                       if (start_timer == TRUE) {
-                               workqueue_interval_timer_start(wq);
+               } else {
+                       uint32_t max_busycount, old_req_count;
+                       old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
+                                       req_qos, NULL, &max_busycount);
+                       /*
+                        * If it is possible that may_start_constrained_thread had refused
+                        * admission due to being over the max concurrency, we may need to
+                        * spin up a new thread.
+                        *
+                        * We take into account the maximum number of busy threads
+                        * that can affect may_start_constrained_thread as looking at the
+                        * actual number may_start_constrained_thread will see is racy.
+                        *
+                        * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
+                        * between NCPU (4) and NCPU - 2 (2) we need to redrive.
+                        */
+                       if (wq_max_concurrency[req_qos] <= old_req_count + max_busycount &&
+                                       old_req_count <= wq_max_concurrency[req_qos]) {
+                               if (WQ_TIMER_DELAYED_NEEDED(wq)) {
+                                       start_timer = true;
+                                       workqueue_interval_timer_start(wq);
+                               }
                        }
                }
-               PTHREAD_TRACE1_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq, old_activecount, tl->th_priority, start_timer, thread_tid(thread));
+
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
+                               old_count - 1, qos | (req_qos << 8),
+                               wq->wq_reqcount << 1 | start_timer, 0);
                break;
        }
-       case SCHED_CALL_UNBLOCK:
+       case SCHED_CALL_UNBLOCK: {
                /*
                 * we cannot take the workqueue_lock here...
                 * an UNBLOCK can occur from a timer event which
@@ -1573,12 +2032,18 @@ workqueue_callback(int type, thread_t thread)
                 * the thread lock for the thread being UNBLOCKED
                 * is also held
                 */
-               OSAddAtomic(1, &wq->wq_thactive_count[tl->th_priority]);
-
-               PTHREAD_TRACE1_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq, wq->wq_threads_scheduled, tl->th_priority, 0, thread_tid(thread));
-
+               old_thactive = _wq_thactive_inc(wq, qos);
+               if (pthread_debug_tracing) {
+                       req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
+                       old_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
+                                       qos, NULL, NULL);
+                       PTHREAD_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
+                                       old_count + 1, qos | (req_qos << 8),
+                                       wq->wq_threads_scheduled, 0);
+               }
                break;
        }
+       }
 }
 
 sched_call_t
@@ -1652,17 +2117,13 @@ workqueue_removethread(struct threadlist *tl, bool fromexit, bool first_use)
                        (void)mach_vm_deallocate(wq->wq_map, tl->th_stackaddr, _workqueue_allocsize(wq));
                }
                (void)pthread_kern->mach_port_deallocate(pthread_kern->task_get_ipcspace(wq->wq_task), tl->th_thport);
-
-       } else {
-
-               PTHREAD_TRACE1_WQ(TRACE_wq_thread_park | DBG_FUNC_END, wq, (uintptr_t)thread_tid(current_thread()), wq->wq_nthreads, 0xdead, thread_tid(tl->th_thread));
        }
        /*
         * drop our ref on the thread
         */
        thread_deallocate(tl->th_thread);
 
-       kfree(tl, sizeof(struct threadlist));
+       zfree(pthread_zone_threadlist, tl);
 }
 
 
@@ -1673,55 +2134,26 @@ workqueue_removethread(struct threadlist *tl, bool fromexit, bool first_use)
  * - dropped and retaken around thread creation
  * - return with workq lock held
  */
-static boolean_t
-workqueue_addnewthread(struct workqueue *wq, boolean_t ignore_constrained_thread_limit)
+static bool
+workqueue_addnewthread(proc_t p, struct workqueue *wq)
 {
-       struct threadlist *tl;
-       struct uthread  *uth;
-       kern_return_t   kret;
-       thread_t        th;
-       proc_t          p;
-       void            *sright;
-       mach_vm_offset_t stackaddr;
-
-       if ((wq->wq_flags & WQ_EXITING) == WQ_EXITING) {
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_add_during_exit | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
-               return (FALSE);
-       }
-
-       if (wq->wq_nthreads >= wq_max_threads) {
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_limit_exceeded | DBG_FUNC_NONE, wq, wq->wq_nthreads, wq_max_threads, 0, 0);
-               return (FALSE);
-       }
-
-       if (ignore_constrained_thread_limit == FALSE &&
-               wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
-               /*
-                * If we're not creating this thread to service an overcommit or
-                * event manager request, then we check to see if we are over our
-                * constrained thread limit, in which case we error out.
-                */
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_constrained_maxed | DBG_FUNC_NONE, wq, wq->wq_constrained_threads_scheduled,
-                               wq_max_constrained_threads, 0, 0);
-               return (FALSE);
-       }
+       kern_return_t kret;
 
        wq->wq_nthreads++;
 
-       p = wq->wq_proc;
        workqueue_unlock(wq);
 
-       tl = kalloc(sizeof(struct threadlist));
+       struct threadlist *tl = zalloc(pthread_zone_threadlist);
        bzero(tl, sizeof(struct threadlist));
 
+       thread_t th;
        kret = pthread_kern->thread_create_workq_waiting(wq->wq_task, wq_unpark_continue, tl, &th);
        if (kret != KERN_SUCCESS) {
                PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 0, 0, 0);
-               kfree(tl, sizeof(struct threadlist));
-               goto failed;
+               goto fail_free;
        }
 
-       stackaddr = pthread_kern->proc_get_stack_addr_hint(p);
+       mach_vm_offset_t stackaddr = pthread_kern->proc_get_stack_addr_hint(p);
 
        mach_vm_size_t guardsize = vm_map_page_size(wq->wq_map);
        mach_vm_size_t pthread_size =
@@ -1735,61 +2167,86 @@ workqueue_addnewthread(struct workqueue *wq, boolean_t ignore_constrained_thread
                        VM_INHERIT_DEFAULT);
 
        if (kret != KERN_SUCCESS) {
-               PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 1, 0, 0);
-
                kret = mach_vm_allocate(wq->wq_map,
                                &stackaddr, th_allocsize,
                                VM_MAKE_TAG(VM_MEMORY_STACK) | VM_FLAGS_ANYWHERE);
        }
-       if (kret == KERN_SUCCESS) {
-               /*
-                * The guard page is at the lowest address
-                * The stack base is the highest address
-                */
-               kret = mach_vm_protect(wq->wq_map, stackaddr, guardsize, FALSE, VM_PROT_NONE);
 
-               if (kret != KERN_SUCCESS) {
-                       (void) mach_vm_deallocate(wq->wq_map, stackaddr, th_allocsize);
-                       PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 2, 0, 0);
-               }
-       }
        if (kret != KERN_SUCCESS) {
-               (void) thread_terminate(th);
-               thread_deallocate(th);
-
-               kfree(tl, sizeof(struct threadlist));
-               goto failed;
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 1, 0, 0);
+               goto fail_terminate;
        }
-       thread_reference(th);
 
-       pthread_kern->thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
+       /*
+        * The guard page is at the lowest address
+        * The stack base is the highest address
+        */
+       kret = mach_vm_protect(wq->wq_map, stackaddr, guardsize, FALSE, VM_PROT_NONE);
+       if (kret != KERN_SUCCESS) {
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 2, 0, 0);
+               goto fail_vm_deallocate;
+       }
 
-       sright = (void *)pthread_kern->convert_thread_to_port(th);
-       tl->th_thport = pthread_kern->ipc_port_copyout_send(sright, pthread_kern->task_get_ipcspace(wq->wq_task));
 
+       pthread_kern->thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
        pthread_kern->thread_static_param(th, TRUE);
 
-       tl->th_flags = TH_LIST_INITED | TH_LIST_NEW;
+       /*
+        * convert_thread_to_port() consumes a reference
+        */
+       thread_reference(th);
+       void *sright = (void *)pthread_kern->convert_thread_to_port(th);
+       tl->th_thport = pthread_kern->ipc_port_copyout_send(sright,
+                       pthread_kern->task_get_ipcspace(wq->wq_task));
 
+       tl->th_flags = TH_LIST_INITED | TH_LIST_NEW;
        tl->th_thread = th;
        tl->th_workq = wq;
        tl->th_stackaddr = stackaddr;
        tl->th_priority = WORKQUEUE_NUM_BUCKETS;
 
-       uth = pthread_kern->get_bsdthread_info(tl->th_thread);
+       struct uthread *uth;
+       uth = pthread_kern->get_bsdthread_info(tl->th_thread);
+
+       workqueue_lock_spin(wq);
+
+       void *current_tl = pthread_kern->uthread_get_threadlist(uth);
+       if (current_tl == NULL) {
+               pthread_kern->uthread_set_threadlist(uth, tl);
+               TAILQ_INSERT_TAIL(&wq->wq_thidlelist, tl, th_entry);
+               wq->wq_thidlecount++;
+       } else if (current_tl == WQ_THREADLIST_EXITING_POISON) {
+               /*
+                * Failed thread creation race: The thread already woke up and has exited.
+                */
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq, kret, 3, 0, 0);
+               goto fail_unlock;
+       } else {
+               panic("Unexpected initial threadlist value");
+       }
+
+       PTHREAD_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
 
-       workqueue_lock_spin(wq);
+       return (TRUE);
 
-       pthread_kern->uthread_set_threadlist(uth, tl);
-       TAILQ_INSERT_TAIL(&wq->wq_thidlelist, tl, th_entry);
+fail_unlock:
+       workqueue_unlock(wq);
+       (void)pthread_kern->mach_port_deallocate(pthread_kern->task_get_ipcspace(wq->wq_task),
+                       tl->th_thport);
 
-       wq->wq_thidlecount++;
+fail_vm_deallocate:
+       (void) mach_vm_deallocate(wq->wq_map, stackaddr, th_allocsize);
 
-       PTHREAD_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0, 0);
+fail_terminate:
+       if (pthread_kern->thread_will_park_or_terminate) {
+               pthread_kern->thread_will_park_or_terminate(th);
+       }
+       (void)thread_terminate(th);
+       thread_deallocate(th);
 
-       return (TRUE);
+fail_free:
+       zfree(pthread_zone_threadlist, tl);
 
-failed:
        workqueue_lock_spin(wq);
        wq->wq_nthreads--;
 
@@ -1803,9 +2260,7 @@ int
 _workq_open(struct proc *p, __unused int32_t *retval)
 {
        struct workqueue * wq;
-       int wq_size;
        char * ptr;
-       uint32_t i;
        uint32_t num_cpus;
        int error = 0;
 
@@ -1829,6 +2284,9 @@ _workq_open(struct proc *p, __unused int32_t *retval)
 
                wq_init_constrained_limit = 0;
 
+               if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
+                       wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
+               }
                if (wq_max_threads > pthread_kern->config_thread_max - 20) {
                        wq_max_threads = pthread_kern->config_thread_max - 20;
                }
@@ -1840,31 +2298,24 @@ _workq_open(struct proc *p, __unused int32_t *retval)
                        goto out;
                }
 
-               wq_size = sizeof(struct workqueue);
-
-               ptr = (char *)kalloc(wq_size);
-               bzero(ptr, wq_size);
+               ptr = (char *)zalloc(pthread_zone_workqueue);
+               bzero(ptr, sizeof(struct workqueue));
 
                wq = (struct workqueue *)ptr;
-               wq->wq_flags = WQ_LIST_INITED;
                wq->wq_proc = p;
-               wq->wq_max_concurrency = wq_max_concurrency;
                wq->wq_task = current_task();
                wq->wq_map  = pthread_kern->current_map();
 
-               for (i = 0; i < WORKQUEUE_NUM_BUCKETS; i++)
-                       wq->wq_reqconc[i] = (uint16_t)wq->wq_max_concurrency;
-
-               // The event manager bucket is special, so its gets a concurrency of 1
-               // though we shouldn't ever read this value for that bucket
-               wq->wq_reqconc[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
-
                // Start the event manager at the priority hinted at by the policy engine
                int mgr_priority_hint = pthread_kern->task_get_default_manager_qos(current_task());
                wq->wq_event_manager_priority = (uint32_t)thread_qos_get_pthread_priority(mgr_priority_hint) | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
 
                TAILQ_INIT(&wq->wq_thrunlist);
                TAILQ_INIT(&wq->wq_thidlelist);
+               for (int i = 0; i < WORKQUEUE_EVENT_MANAGER_BUCKET; i++) {
+                       TAILQ_INIT(&wq->wq_overcommit_reqlist[i]);
+                       TAILQ_INIT(&wq->wq_reqlist[i]);
+               }
 
                wq->wq_atimer_delayed_call =
                                thread_call_allocate_with_priority((thread_call_func_t)workqueue_add_timer,
@@ -1875,6 +2326,11 @@ _workq_open(struct proc *p, __unused int32_t *retval)
 
                lck_spin_init(&wq->wq_lock, pthread_lck_grp, pthread_lck_attr);
 
+               wq->wq_cached_threadreq = zalloc(pthread_zone_threadreq);
+               *(wq_thactive_t *)&wq->wq_thactive =
+                               (wq_thactive_t)WQ_THACTIVE_NO_PENDING_REQUEST <<
+                               WQ_THACTIVE_QOS_SHIFT;
+
                pthread_kern->proc_set_wqptr(p, wq);
 
        }
@@ -1895,51 +2351,70 @@ void
 _workqueue_mark_exiting(struct proc *p)
 {
        struct workqueue *wq = pthread_kern->proc_get_wqptr(p);
+       if (!wq) return;
 
-       if (wq != NULL) {
-
-               PTHREAD_TRACE_WQ(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
+       PTHREAD_TRACE_WQ(TRACE_wq_pthread_exit|DBG_FUNC_START, wq, 0, 0, 0, 0);
 
-               workqueue_lock_spin(wq);
+       workqueue_lock_spin(wq);
 
-               /*
-                * We arm the add timer without holding the workqueue lock so we need
-                * to synchronize with any running or soon to be running timers.
-                *
-                * Threads that intend to arm the timer atomically OR
-                * WQ_ATIMER_{DELAYED,IMMEDIATE}_RUNNING into the wq_flags, only if
-                * WQ_EXITING is not present.  So, once we have set WQ_EXITING, we can
-                * be sure that no new RUNNING flags will be set, but still need to
-                * wait for the already running timers to complete.
-                *
-                * We always hold the workq lock when dropping WQ_ATIMER_RUNNING, so
-                * the check for and sleep until clear is protected.
-                */
-               WQ_SETFLAG(wq, WQ_EXITING);
+       /*
+        * We arm the add timer without holding the workqueue lock so we need
+        * to synchronize with any running or soon to be running timers.
+        *
+        * Threads that intend to arm the timer atomically OR
+        * WQ_ATIMER_{DELAYED,IMMEDIATE}_RUNNING into the wq_flags, only if
+        * WQ_EXITING is not present.  So, once we have set WQ_EXITING, we can
+        * be sure that no new RUNNING flags will be set, but still need to
+        * wait for the already running timers to complete.
+        *
+        * We always hold the workq lock when dropping WQ_ATIMER_RUNNING, so
+        * the check for and sleep until clear is protected.
+        */
+       uint64_t wq_flags = _wq_flags_or_orig(wq, WQ_EXITING);
 
-               if (wq->wq_flags & WQ_ATIMER_DELAYED_RUNNING) {
-                       if (thread_call_cancel(wq->wq_atimer_delayed_call) == TRUE) {
-                               WQ_UNSETFLAG(wq, WQ_ATIMER_DELAYED_RUNNING);
-                       }
+       if (wq_flags & WQ_ATIMER_DELAYED_RUNNING) {
+               if (thread_call_cancel(wq->wq_atimer_delayed_call) == TRUE) {
+                       wq_flags = _wq_flags_and_orig(wq, ~WQ_ATIMER_DELAYED_RUNNING);
                }
-               if (wq->wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) {
-                       if (thread_call_cancel(wq->wq_atimer_immediate_call) == TRUE) {
-                               WQ_UNSETFLAG(wq, WQ_ATIMER_IMMEDIATE_RUNNING);
-                       }
+       }
+       if (wq_flags & WQ_ATIMER_IMMEDIATE_RUNNING) {
+               if (thread_call_cancel(wq->wq_atimer_immediate_call) == TRUE) {
+                       wq_flags = _wq_flags_and_orig(wq, ~WQ_ATIMER_IMMEDIATE_RUNNING);
                }
-               while (wq->wq_flags & (WQ_ATIMER_DELAYED_RUNNING | WQ_ATIMER_IMMEDIATE_RUNNING) ||
-                               (wq->wq_lflags & WQL_ATIMER_BUSY)) {
-                       assert_wait((caddr_t)wq, (THREAD_UNINT));
-                       workqueue_unlock(wq);
+       }
+       while ((_wq_flags(wq) & (WQ_ATIMER_DELAYED_RUNNING | WQ_ATIMER_IMMEDIATE_RUNNING)) ||
+                       (wq->wq_lflags & WQL_ATIMER_BUSY)) {
+               assert_wait((caddr_t)wq, (THREAD_UNINT));
+               workqueue_unlock(wq);
 
-                       thread_block(THREAD_CONTINUE_NULL);
+               thread_block(THREAD_CONTINUE_NULL);
 
-                       workqueue_lock_spin(wq);
-               }
-               workqueue_unlock(wq);
+               workqueue_lock_spin(wq);
+       }
+
+       /*
+        * Save off pending requests, will complete/free them below after unlocking
+        */
+       TAILQ_HEAD(, threadreq) local_list = TAILQ_HEAD_INITIALIZER(local_list);
+
+       for (int i = 0; i < WORKQUEUE_EVENT_MANAGER_BUCKET; i++) {
+               TAILQ_CONCAT(&local_list, &wq->wq_overcommit_reqlist[i], tr_entry);
+               TAILQ_CONCAT(&local_list, &wq->wq_reqlist[i], tr_entry);
+       }
+
+       /*
+        * XXX: Can't deferred cancel the event manager request, so just smash it.
+        */
+       assert((wq->wq_event_manager_threadreq.tr_flags & TR_FLAG_WORKLOOP) == 0);
+       wq->wq_event_manager_threadreq.tr_state = TR_STATE_DEAD;
+
+       workqueue_unlock(wq);
 
-               PTHREAD_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
+       struct threadreq *tr, *tr_temp;
+       TAILQ_FOREACH_SAFE(tr, &local_list, tr_entry, tr_temp) {
+               _threadreq_cancel(wq, tr);
        }
+       PTHREAD_TRACE(TRACE_wq_pthread_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
 }
 
 /*
@@ -1957,7 +2432,6 @@ _workqueue_exit(struct proc *p)
        struct workqueue  * wq;
        struct threadlist  * tl, *tlist;
        struct uthread  *uth;
-       size_t wq_size = sizeof(struct workqueue);
 
        wq = pthread_kern->proc_get_wqptr(p);
        if (wq != NULL) {
@@ -1986,7 +2460,7 @@ _workqueue_exit(struct proc *p)
                         */
                        thread_deallocate(tl->th_thread);
 
-                       kfree(tl, sizeof(struct threadlist));
+                       zfree(pthread_zone_threadlist, tl);
                }
                TAILQ_FOREACH_SAFE(tl, &wq->wq_thidlelist, th_entry, tlist) {
                        assert((tl->th_flags & TH_LIST_RUNNING) == 0);
@@ -1998,11 +2472,19 @@ _workqueue_exit(struct proc *p)
                        assert(tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET);
                        workqueue_removethread(tl, true, false);
                }
+               if (wq->wq_cached_threadreq) {
+                       zfree(pthread_zone_threadreq, wq->wq_cached_threadreq);
+               }
                thread_call_free(wq->wq_atimer_delayed_call);
                thread_call_free(wq->wq_atimer_immediate_call);
                lck_spin_destroy(&wq->wq_lock, pthread_lck_grp);
 
-               kfree(wq, wq_size);
+               for (int i = 0; i < WORKQUEUE_EVENT_MANAGER_BUCKET; i++) {
+                       assert(TAILQ_EMPTY(&wq->wq_overcommit_reqlist[i]));
+                       assert(TAILQ_EMPTY(&wq->wq_reqlist[i]));
+               }
+
+               zfree(pthread_zone_workqueue, wq);
 
                PTHREAD_TRACE(TRACE_wq_workqueue_exit|DBG_FUNC_END, 0, 0, 0, 0, 0);
        }
@@ -2011,91 +2493,53 @@ _workqueue_exit(struct proc *p)
 
 #pragma mark workqueue thread manipulation
 
+
 /**
  * Entry point for libdispatch to ask for threads
  */
-static int wqops_queue_reqthreads(struct proc *p, int reqcount, pthread_priority_t priority){
-       struct workqueue *wq;
-       boolean_t start_timer = FALSE;
-
-       boolean_t overcommit = (_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) != 0;
-       int class = pthread_priority_get_class_index(priority);
-
-       boolean_t event_manager = (_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) != 0;
-       if (event_manager){
-               class = WORKQUEUE_EVENT_MANAGER_BUCKET;
-       }
+static int
+wqops_queue_reqthreads(struct proc *p, int reqcount,
+               pthread_priority_t priority)
+{
+       bool overcommit = _pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
+       bool event_manager = _pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
+       int class = event_manager ? WORKQUEUE_EVENT_MANAGER_BUCKET :
+                       pthread_priority_get_class_index(priority);
 
-       if ((reqcount <= 0) || (class < 0) || (class >= WORKQUEUE_NUM_BUCKETS) || (overcommit && event_manager)) {
+       if ((reqcount <= 0) || (class < 0) || (class >= WORKQUEUE_NUM_BUCKETS) ||
+                       (overcommit && event_manager)) {
                return EINVAL;
        }
-       
 
+       struct workqueue *wq;
        if ((wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p)) == NULL) {
                return EINVAL;
        }
 
        workqueue_lock_spin(wq);
-       
-       if (overcommit == 0 && event_manager == 0) {
-               wq->wq_reqcount += reqcount;
-               wq->wq_requests[class] += reqcount;
-               
-               PTHREAD_TRACE_WQ(TRACE_wq_req_threads | DBG_FUNC_NONE, wq, priority, wq->wq_requests[class], reqcount, 0);
-               
-               while (wq->wq_reqcount) {
-                       if (!workqueue_run_one(p, wq, overcommit, 0))
-                               break;
-               }
-       } else if (overcommit) {
-               PTHREAD_TRACE_WQ(TRACE_wq_req_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
-               
-               while (reqcount) {
-                       if (!workqueue_run_one(p, wq, overcommit, priority))
-                               break;
-                       reqcount--;
-               }
-               if (reqcount) {
-                       /*
-                        * We need to delay starting some of the overcommit requests.
-                        * We'll record the request here and as existing threads return to
-                        * the kernel, we'll notice the ocrequests and spin them back to
-                        * user space as the overcommit variety.
-                        */
-                       wq->wq_reqcount += reqcount;
-                       wq->wq_requests[class] += reqcount;
-                       wq->wq_ocrequests[class] += reqcount;
-                       
-                       PTHREAD_TRACE_WQ(TRACE_wq_delay_octhreads | DBG_FUNC_NONE, wq, priority, wq->wq_ocrequests[class], reqcount, 0);
-
-                       /*
-                        * If we delayed this thread coming up but we're not constrained
-                        * or at max threads then we need to start the timer so we don't
-                        * risk dropping this request on the floor.
-                        */
-                       if ((wq->wq_constrained_threads_scheduled < wq_max_constrained_threads) &&
-                                       (wq->wq_nthreads < wq_max_threads)){
-                               start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
-                       }
-               }
-       } else if (event_manager) {
-               PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, wq->wq_event_manager_priority, wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET], 0);
+       _threadreq_copy_prepare(wq);
 
-               if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
-                       wq->wq_reqcount += 1;
-                       wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
-               }
+       PTHREAD_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE, wq, reqcount, priority, 0, 0);
 
-               // We've recorded the request for an event manager thread above.  We'll
-               // let the timer pick it up as we would for a kernel callout.  We can
-               // do a direct add/wakeup when that support is added for the kevent path.
-               if (wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
-                       start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
-               }
+       int tr_flags = 0;
+       if (overcommit) tr_flags |= TR_FLAG_OVERCOMMIT;
+       if (reqcount > 1) {
+               /*
+                * when libdispatch asks for more than one thread, it wants to achieve
+                * parallelism. Pacing would be detrimental to this ask, so treat
+                * these specially to not do the pacing admission check
+                */
+               tr_flags |= TR_FLAG_NO_PACING;
        }
 
-       if (start_timer) {
-               workqueue_interval_timer_start(wq);
+       while (reqcount-- && !_wq_exiting(wq)) {
+               struct threadreq req;
+               _threadreq_init_stack(&req, class, tr_flags);
+
+               workqueue_run_threadreq_and_unlock(p, wq, NULL, &req, true);
+
+               workqueue_lock_spin(wq); /* reacquire */
+               _threadreq_copy_prepare(wq);
        }
 
        workqueue_unlock(wq);
@@ -2108,142 +2552,234 @@ static int wqops_queue_reqthreads(struct proc *p, int reqcount, pthread_priority
  *
  * Currently count is ignored and we always return one thread per invocation.
  */
-thread_t _workq_reqthreads(struct proc *p, int requests_count, workq_reqthreads_req_t requests){
-       thread_t th = THREAD_NULL;
-       boolean_t do_thread_call = FALSE;
-       boolean_t emergency_thread = FALSE;
-       assert(requests_count > 0);
+static thread_t
+_workq_kevent_reqthreads(struct proc *p, pthread_priority_t priority,
+               bool no_emergency)
+{
+       int wq_run_tr = WQ_RUN_TR_THROTTLED;
+       bool emergency_thread = false;
+       struct threadreq req;
 
-#if DEBUG
-       // Make sure that the requests array is sorted, highest priority first
-       if (requests_count > 1){
-               __assert_only qos_class_t priority = _pthread_priority_get_qos_newest(requests[0].priority);
-               __assert_only unsigned long flags = ((_pthread_priority_get_flags(requests[0].priority) & (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG|_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) != 0);
-               for (int i = 1; i < requests_count; i++){
-                       if (requests[i].count == 0) continue;
-                       __assert_only qos_class_t next_priority = _pthread_priority_get_qos_newest(requests[i].priority);
-                       __assert_only unsigned long next_flags = ((_pthread_priority_get_flags(requests[i].priority) & (_PTHREAD_PRIORITY_OVERCOMMIT_FLAG|_PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)) != 0);
-                       if (next_flags != flags){
-                               flags = next_flags;
-                               priority = next_priority;
-                       } else {
-                               assert(next_priority <= priority);
-                       }
-               }
-       }
-#endif // DEBUG
 
        struct workqueue *wq;
        if ((wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p)) == NULL) {
                return THREAD_NULL;
        }
 
+       int class = pthread_priority_get_class_index(priority);
+
        workqueue_lock_spin(wq);
+       bool has_threadreq = _threadreq_copy_prepare_noblock(wq);
 
-       PTHREAD_TRACE_WQ(TRACE_wq_kevent_req_threads | DBG_FUNC_START, wq, requests_count, 0, 0, 0);
+       PTHREAD_TRACE_WQ_REQ(TRACE_wq_kevent_reqthreads | DBG_FUNC_NONE, wq, NULL, priority, 0, 0);
 
-       // Look for overcommit or event-manager-only requests.
-       boolean_t have_overcommit = FALSE;
-       pthread_priority_t priority = 0;
-       for (int i = 0; i < requests_count; i++){
-               if (requests[i].count == 0)
-                       continue;
-               priority = requests[i].priority;
-               if (_pthread_priority_get_qos_newest(priority) == QOS_CLASS_UNSPECIFIED){
-                       priority |= _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
-               }
-               if ((_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) != 0){
-                       goto event_manager;
-               }
-               if ((_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) != 0){
-                       have_overcommit = TRUE;
-                       break;
-               }
+       /*
+        * Skip straight to event manager if that's what was requested
+        */
+       if ((_pthread_priority_get_qos_newest(priority) == QOS_CLASS_UNSPECIFIED) ||
+                       (_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG)){
+               goto event_manager;
        }
 
-       if (have_overcommit){
-               if (wq->wq_thidlecount){
-                       th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_OVERCOMMIT_KEVENT, priority, true);
-                       if (th != THREAD_NULL){
-                               goto out;
-                       } else {
-                               workqueue_lock_spin(wq); // reacquire lock
-                       }
-               }
+       bool will_pace = _wq_should_pace_priority(wq, class);
+       if ((wq->wq_thidlecount == 0 || will_pace) && has_threadreq == false) {
+               /*
+                * We'll need to persist the request and can't, so return the emergency
+                * thread instead, which has a persistent request object.
+                */
+               emergency_thread = true;
+               goto event_manager;
+       }
 
-               int class = pthread_priority_get_class_index(priority);
-               wq->wq_reqcount += 1;
-               wq->wq_requests[class] += 1;
-               wq->wq_kevent_ocrequests[class] += 1;
+       /*
+        * Handle overcommit requests
+        */
+       if ((_pthread_priority_get_flags(priority) & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) != 0){
+               _threadreq_init_stack(&req, class, TR_FLAG_KEVENT | TR_FLAG_OVERCOMMIT);
+               wq_run_tr = workqueue_run_threadreq_and_unlock(p, wq, NULL, &req, false);
+               goto done;
+       }
+
+       /*
+        * Handle constrained requests
+        */
+       boolean_t may_start = may_start_constrained_thread(wq, class, NULL, false);
+       if (may_start || no_emergency) {
+               _threadreq_init_stack(&req, class, TR_FLAG_KEVENT);
+               wq_run_tr = workqueue_run_threadreq_and_unlock(p, wq, NULL, &req, false);
+               goto done;
+       } else {
+               emergency_thread = true;
+       }
+
+
+event_manager:
+       _threadreq_init_stack(&req, WORKQUEUE_EVENT_MANAGER_BUCKET, TR_FLAG_KEVENT);
+       wq_run_tr = workqueue_run_threadreq_and_unlock(p, wq, NULL, &req, false);
 
-               do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
-               goto deferred;
+done:
+       if (wq_run_tr == WQ_RUN_TR_THREAD_NEEDED && WQ_TIMER_IMMEDIATE_NEEDED(wq)) {
+               workqueue_interval_timer_trigger(wq);
        }
+       return emergency_thread ? (void*)-1 : 0;
+}
 
-       // Having no overcommit requests, try to find any request that can start
-       // There's no TOCTTOU since we hold the workqueue lock
-       for (int i = 0; i < requests_count; i++){
-               workq_reqthreads_req_t req = requests + i;
-               priority = req->priority;
-               int class = pthread_priority_get_class_index(priority);
+thread_t
+_workq_reqthreads(struct proc *p, __assert_only int requests_count,
+               workq_reqthreads_req_t request)
+{
+       assert(requests_count == 1);
 
-               if (req->count == 0)
-                       continue;
+       pthread_priority_t priority = request->priority;
+       bool no_emergency = request->count & WORKQ_REQTHREADS_NOEMERGENCY;
 
-               if (!may_start_constrained_thread(wq, class, WORKQUEUE_NUM_BUCKETS, NULL))
-                       continue;
+       return _workq_kevent_reqthreads(p, priority, no_emergency);
+}
 
-               wq->wq_reqcount += 1;
-               wq->wq_requests[class] += 1;
-               wq->wq_kevent_requests[class] += 1;
 
-               PTHREAD_TRACE_WQ(TRACE_wq_req_kevent_threads | DBG_FUNC_NONE, wq, priority, wq->wq_kevent_requests[class], 1, 0);
+int
+workq_kern_threadreq(struct proc *p, workq_threadreq_t _req,
+               enum workq_threadreq_type type, unsigned long priority, int flags)
+{
+       struct workqueue *wq;
+       int ret;
 
-               if (wq->wq_thidlecount){
-                       th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_DEFAULT_KEVENT, priority, true);
-                       goto out;
+       if ((wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p)) == NULL) {
+               return EINVAL;
+       }
+
+       switch (type) {
+       case WORKQ_THREADREQ_KEVENT: {
+               bool no_emergency = flags & WORKQ_THREADREQ_FLAG_NOEMERGENCY;
+               (void)_workq_kevent_reqthreads(p, priority, no_emergency);
+               return 0;
+       }
+       case WORKQ_THREADREQ_WORKLOOP:
+       case WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL: {
+               struct threadreq *req = (struct threadreq *)_req;
+               int req_class = pthread_priority_get_class_index(priority);
+               int req_flags = TR_FLAG_WORKLOOP;
+               if ((_pthread_priority_get_flags(priority) &
+                               _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) != 0){
+                       req_flags |= TR_FLAG_OVERCOMMIT;
+               }
+
+               thread_t thread = current_thread();
+               struct threadlist *tl = util_get_thread_threadlist_entry(thread);
+
+               if (tl && tl != WQ_THREADLIST_EXITING_POISON &&
+                               (tl->th_flags & TH_LIST_UNBINDING)) {
+                       /*
+                        * we're called back synchronously from the context of
+                        * kevent_qos_internal_unbind from within wqops_thread_return()
+                        * we can try to match up this thread with this request !
+                        */
                } else {
-                       do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
-                       goto deferred;
+                       tl = NULL;
+               }
+
+               _threadreq_init_alloced(req, req_class, req_flags);
+               workqueue_lock_spin(wq);
+               PTHREAD_TRACE_WQ_REQ(TRACE_wq_kevent_reqthreads | DBG_FUNC_NONE, wq, req, priority, 1, 0);
+               ret = workqueue_run_threadreq_and_unlock(p, wq, tl, req, false);
+               if (ret == WQ_RUN_TR_EXITING) {
+                       return ECANCELED;
+               }
+               if (ret == WQ_RUN_TR_THREAD_NEEDED) {
+                       if (type == WORKQ_THREADREQ_WORKLOOP_NO_THREAD_CALL) {
+                               return EAGAIN;
+                       }
+                       if (WQ_TIMER_IMMEDIATE_NEEDED(wq)) {
+                               workqueue_interval_timer_trigger(wq);
+                       }
+               }
+               return 0;
+       }
+       case WORKQ_THREADREQ_REDRIVE:
+               PTHREAD_TRACE_WQ_REQ(TRACE_wq_kevent_reqthreads | DBG_FUNC_NONE, wq, 0, 0, 4, 0);
+               workqueue_lock_spin(wq);
+               ret = workqueue_run_threadreq_and_unlock(p, wq, NULL, NULL, true);
+               if (ret == WQ_RUN_TR_EXITING) {
+                       return ECANCELED;
                }
+               return 0;
+       default:
+               return ENOTSUP;
        }
+}
 
-       // Okay, here's the fun case: we can't spin up any of the non-overcommit threads
-       // that we've seen a request for, so we kick this over to the event manager thread
-       emergency_thread = TRUE;
+int
+workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t _req,
+               enum workq_threadreq_op operation, unsigned long arg1,
+               unsigned long __unused arg2)
+{
+       struct threadreq *req = (struct threadreq *)_req;
+       struct workqueue *wq;
+       int priclass, ret = 0, wq_tr_rc = WQ_RUN_TR_THROTTLED;
 
-event_manager:
-       if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
-               wq->wq_reqcount += 1;
-               wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
-               PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, 0, wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], 1, 0);
-       } else {
-               PTHREAD_TRACE_WQ(TRACE_wq_req_event_manager | DBG_FUNC_NONE, wq, 0, wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET], 0, 0);
+       if (req == NULL || (wq = pthread_kern->proc_get_wqptr(p)) == NULL) {
+               return EINVAL;
        }
-       wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
 
-       if (wq->wq_thidlecount && wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0){
-               th = workqueue_run_nextreq(p, wq, THREAD_NULL, RUN_NEXTREQ_EVENT_MANAGER, 0, true);
-               assert(th != THREAD_NULL);
-               goto out;
+       workqueue_lock_spin(wq);
+
+       if (_wq_exiting(wq)) {
+               ret = ECANCELED;
+               goto out_unlock;
        }
-       do_thread_call = WQ_TIMER_IMMEDIATE_NEEDED(wq);
 
-deferred:
-       workqueue_unlock(wq);
+       /*
+        * Find/validate the referenced request structure
+        */
+       if (req->tr_state != TR_STATE_WAITING) {
+               ret = EINVAL;
+               goto out_unlock;
+       }
+       assert(req->tr_priority < WORKQUEUE_EVENT_MANAGER_BUCKET);
+       assert(req->tr_flags & TR_FLAG_WORKLOOP);
+
+       switch (operation) {
+       case WORKQ_THREADREQ_CHANGE_PRI:
+       case WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL:
+               priclass = pthread_priority_get_class_index(arg1);
+               PTHREAD_TRACE_WQ_REQ(TRACE_wq_kevent_reqthreads | DBG_FUNC_NONE, wq, req, arg1, 2, 0);
+               if (req->tr_priority == priclass) {
+                       goto out_unlock;
+               }
+               _threadreq_dequeue(wq, req);
+               req->tr_priority = priclass;
+               req->tr_state = TR_STATE_NEW; // what was old is new again
+               wq_tr_rc = workqueue_run_threadreq_and_unlock(p, wq, NULL, req, false);
+               goto out;
 
-       if (do_thread_call == TRUE){
-               workqueue_interval_timer_trigger(wq);
+       case WORKQ_THREADREQ_CANCEL:
+               PTHREAD_TRACE_WQ_REQ(TRACE_wq_kevent_reqthreads | DBG_FUNC_NONE, wq, req, 0, 3, 0);
+               _threadreq_dequeue(wq, req);
+               req->tr_state = TR_STATE_DEAD;
+               break;
+
+       default:
+               ret = ENOTSUP;
+               break;
        }
 
+out_unlock:
+       workqueue_unlock(wq);
 out:
-       PTHREAD_TRACE_WQ(TRACE_wq_kevent_req_threads | DBG_FUNC_END, wq, do_thread_call, 0, 0, 0);
-
-       return emergency_thread ? (void*)-1 : th;
+       if (wq_tr_rc == WQ_RUN_TR_THREAD_NEEDED) {
+               if (operation == WORKQ_THREADREQ_CHANGE_PRI_NO_THREAD_CALL) {
+                       ret = EAGAIN;
+               } else if (WQ_TIMER_IMMEDIATE_NEEDED(wq)) {
+                       workqueue_interval_timer_trigger(wq);
+               }
+       }
+       return ret;
 }
 
 
-static int wqops_thread_return(struct proc *p){
+static int
+wqops_thread_return(struct proc *p, struct workqueue *wq)
+{
        thread_t th = current_thread();
        struct uthread *uth = pthread_kern->get_bsdthread_info(th);
        struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
@@ -2255,7 +2791,6 @@ static int wqops_thread_return(struct proc *p){
                pthread_kern->proc_unlock(p);
        }
 
-       struct workqueue *wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
        if (wq == NULL || !tl) {
                return EINVAL;
        }
@@ -2269,49 +2804,54 @@ static int wqops_thread_return(struct proc *p){
         * lowered.  Of course, now our understanding of the thread's QoS is wrong,
         * so we'll adjust below.
         */
-       int new_qos =
-       pthread_kern->proc_usynch_thread_qos_squash_override_for_resource(th,
-                       THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD,
-                       THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
+       bool was_manager = (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET);
+       int new_qos;
+
+       if (!was_manager) {
+               new_qos = pthread_kern->proc_usynch_thread_qos_squash_override_for_resource(th,
+                               THREAD_QOS_OVERRIDE_RESOURCE_WILDCARD,
+                               THREAD_QOS_OVERRIDE_TYPE_DISPATCH_ASYNCHRONOUS_OVERRIDE);
+       }
+
+       PTHREAD_TRACE_WQ(TRACE_wq_runitem | DBG_FUNC_END, wq, tl->th_priority, 0, 0, 0);
 
        workqueue_lock_spin(wq);
 
        if (tl->th_flags & TH_LIST_KEVENT_BOUND) {
                unsigned int flags = KEVENT_FLAG_WORKQ;
-               if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               if (was_manager) {
                        flags |= KEVENT_FLAG_WORKQ_MANAGER;
                }
 
+               tl->th_flags |= TH_LIST_UNBINDING;
                workqueue_unlock(wq);
                kevent_qos_internal_unbind(p, class_index_get_thread_qos(tl->th_priority), th, flags);
+               if (!(tl->th_flags & TH_LIST_UNBINDING)) {
+                       _setup_wqthread(p, th, wq, tl, WQ_SETUP_CLEAR_VOUCHER);
+                       pthread_kern->unix_syscall_return(EJUSTRETURN);
+                       __builtin_unreachable();
+               }
                workqueue_lock_spin(wq);
-
-               tl->th_flags &= ~TH_LIST_KEVENT_BOUND;
+               tl->th_flags &= ~(TH_LIST_KEVENT_BOUND | TH_LIST_UNBINDING);
        }
 
-       /* Fix up counters from the squash operation. */
-       uint8_t old_bucket = tl->th_priority;
-       uint8_t new_bucket = thread_qos_get_class_index(new_qos);
-
-       if (old_bucket != new_bucket) {
-               OSAddAtomic(-1, &wq->wq_thactive_count[old_bucket]);
-               OSAddAtomic(1, &wq->wq_thactive_count[new_bucket]);
+       if (!was_manager) {
+               /* Fix up counters from the squash operation. */
+               uint8_t old_bucket = tl->th_priority;
+               uint8_t new_bucket = thread_qos_get_class_index(new_qos);
 
-               wq->wq_thscheduled_count[old_bucket]--;
-               wq->wq_thscheduled_count[new_bucket]++;
+               if (old_bucket != new_bucket) {
+                       _wq_thactive_move(wq, old_bucket, new_bucket);
+                       wq->wq_thscheduled_count[old_bucket]--;
+                       wq->wq_thscheduled_count[new_bucket]++;
 
-               tl->th_priority = new_bucket;
+                       PTHREAD_TRACE_WQ(TRACE_wq_thread_squash | DBG_FUNC_NONE, wq, tl->th_priority, new_bucket, 0, 0);
+                       tl->th_priority = new_bucket;
+                       PTHREAD_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_END, tl->th_workq, new_qos, 0, 0, 0);
+               }
        }
 
-       PTHREAD_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_END, tl->th_workq, new_qos, 0, 0, 0);
-
-       PTHREAD_TRACE_WQ(TRACE_wq_runitem | DBG_FUNC_END, wq, 0, 0, 0, 0);
-
-       (void)workqueue_run_nextreq(p, wq, th, RUN_NEXTREQ_DEFAULT, 0, false);
-       /*
-        * workqueue_run_nextreq is responsible for
-        * dropping the workqueue lock in all cases
-        */
+       workqueue_run_threadreq_and_unlock(p, wq, tl, NULL, false);
        return 0;
 }
 
@@ -2326,6 +2866,7 @@ _workq_kernreturn(struct proc *p,
                  int arg3,
                  int32_t *retval)
 {
+       struct workqueue *wq;
        int error = 0;
 
        if (pthread_kern->proc_get_register(p) == 0) {
@@ -2364,19 +2905,21 @@ _workq_kernreturn(struct proc *p,
                 */
                pthread_priority_t pri = arg2;
 
-               struct workqueue *wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
+               wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
                if (wq == NULL) {
                        error = EINVAL;
                        break;
                }
                workqueue_lock_spin(wq);
                if (pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG){
-                       // If userspace passes a scheduling priority, that takes precidence
-                       // over any QoS.  (So, userspace should take care not to accidenatally
-                       // lower the priority this way.)
-                       uint32_t sched_pri = pri & (~_PTHREAD_PRIORITY_FLAGS_MASK);
+                       /*
+                        * If userspace passes a scheduling priority, that takes precidence
+                        * over any QoS.  (So, userspace should take care not to accidenatally
+                        * lower the priority this way.)
+                        */
+                       uint32_t sched_pri = pri & _PTHREAD_PRIORITY_SCHED_PRI_MASK;
                        if (wq->wq_event_manager_priority & _PTHREAD_PRIORITY_SCHED_PRI_FLAG){
-                               wq->wq_event_manager_priority = MAX(sched_pri, wq->wq_event_manager_priority & (~_PTHREAD_PRIORITY_FLAGS_MASK))
+                               wq->wq_event_manager_priority = MAX(sched_pri, wq->wq_event_manager_priority & _PTHREAD_PRIORITY_SCHED_PRI_MASK)
                                                | _PTHREAD_PRIORITY_SCHED_PRI_FLAG | _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG;
                        } else {
                                wq->wq_event_manager_priority = sched_pri
@@ -2391,13 +2934,29 @@ _workq_kernreturn(struct proc *p,
                break;
        }
        case WQOPS_THREAD_KEVENT_RETURN:
-               if (item != 0) {
+       case WQOPS_THREAD_WORKLOOP_RETURN:
+               wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
+               PTHREAD_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, options, 0, 0, 0);
+               if (item != 0 && arg2 != 0) {
                        int32_t kevent_retval;
-                       int ret = kevent_qos_internal(p, -1, item, arg2, item, arg2, NULL, NULL, KEVENT_FLAG_WORKQ | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS, &kevent_retval);
-                       // We shouldn't be getting more errors out than events we put in, so
-                       // reusing the input buffer should always provide enough space.  But,
-                       // the assert is commented out since we get errors in edge cases in the
-                       // process lifecycle.
+                       int ret;
+                       if (options == WQOPS_THREAD_KEVENT_RETURN) {
+                               ret = kevent_qos_internal(p, -1, item, arg2, item, arg2, NULL, NULL,
+                                               KEVENT_FLAG_WORKQ | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS,
+                                               &kevent_retval);
+                       } else /* options == WQOPS_THREAD_WORKLOOP_RETURN */ {
+                               kqueue_id_t kevent_id = -1;
+                               ret = kevent_id_internal(p, &kevent_id, item, arg2, item, arg2,
+                                               NULL, NULL,
+                                               KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_IMMEDIATE | KEVENT_FLAG_ERROR_EVENTS,
+                                               &kevent_retval);
+                       }
+                       /*
+                        * We shouldn't be getting more errors out than events we put in, so
+                        * reusing the input buffer should always provide enough space.  But,
+                        * the assert is commented out since we get errors in edge cases in the
+                        * process lifecycle.
+                        */
                        //assert(ret == KERN_SUCCESS && kevent_retval >= 0);
                        if (ret != KERN_SUCCESS){
                                error = ret;
@@ -2409,44 +2968,53 @@ _workq_kernreturn(struct proc *p,
                                break;
                        }
                }
-               // FALLTHRU
+               goto thread_return;
+
        case WQOPS_THREAD_RETURN:
-               error = wqops_thread_return(p);
+               wq = (struct workqueue *)pthread_kern->proc_get_wqptr(p);
+               PTHREAD_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, options, 0, 0, 0);
+       thread_return:
+               error = wqops_thread_return(p, wq);
                // NOT REACHED except in case of error
                assert(error);
                break;
-       default:
-               error = EINVAL;
-               break;
-       }
-       return (error);
-}
 
+       case WQOPS_SHOULD_NARROW: {
+               /*
+                * arg2 = priority to test
+                * arg3 = unused
+                */
+               pthread_priority_t priority = arg2;
+               thread_t th = current_thread();
+               struct threadlist *tl = util_get_thread_threadlist_entry(th);
 
-static boolean_t
-workqueue_run_one(proc_t p, struct workqueue *wq, boolean_t overcommit, pthread_priority_t priority)
-{
-       boolean_t       ran_one;
+               if (tl == NULL || (tl->th_flags & TH_LIST_CONSTRAINED) == 0) {
+                       error = EINVAL;
+                       break;
+               }
 
-       if (wq->wq_thidlecount == 0) {
-               if (overcommit == FALSE) {
-                       if (wq->wq_constrained_threads_scheduled < wq->wq_max_concurrency)
-                               workqueue_addnewthread(wq, overcommit);
-               } else {
-                       workqueue_addnewthread(wq, overcommit);
+               int class = pthread_priority_get_class_index(priority);
+               wq = tl->th_workq;
+               workqueue_lock_spin(wq);
+               bool should_narrow = !may_start_constrained_thread(wq, class, tl, false);
+               workqueue_unlock(wq);
 
-                       if (wq->wq_thidlecount == 0)
-                               return (FALSE);
-               }
+               *retval = should_narrow;
+               break;
+       }
+       default:
+               error = EINVAL;
+               break;
        }
-       ran_one = (workqueue_run_nextreq(p, wq, THREAD_NULL, overcommit ? RUN_NEXTREQ_OVERCOMMIT : RUN_NEXTREQ_DEFAULT, priority, false) != THREAD_NULL);
-       /*
-        * workqueue_run_nextreq is responsible for
-        * dropping the workqueue lock in all cases
-        */
-       workqueue_lock_spin(wq);
 
-       return (ran_one);
+       switch (options) {
+       case WQOPS_THREAD_KEVENT_RETURN:
+       case WQOPS_THREAD_WORKLOOP_RETURN:
+       case WQOPS_THREAD_RETURN:
+               PTHREAD_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START, wq, options, 0, 0, 0);
+               break;
+       }
+       return (error);
 }
 
 /*
@@ -2460,6 +3028,8 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
        assert(thread == tl->th_thread);
        assert(thread == current_thread());
 
+       PTHREAD_TRACE_WQ(TRACE_wq_thread_park | DBG_FUNC_START, wq, 0, 0, 0, 0);
+
        uint32_t us_to_wait = 0;
 
        TAILQ_REMOVE(&wq->wq_thrunlist, tl, th_entry);
@@ -2473,7 +3043,7 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
                tl->th_flags &= ~TH_LIST_CONSTRAINED;
        }
 
-       OSAddAtomic(-1, &wq->wq_thactive_count[tl->th_priority]);
+       _wq_thactive_dec(wq, tl->th_priority);
        wq->wq_thscheduled_count[tl->th_priority]--;
        wq->wq_threads_scheduled--;
        uint32_t thidlecount = ++wq->wq_thidlecount;
@@ -2492,6 +3062,9 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
         */
        if (TAILQ_EMPTY(&wq->wq_thidlemgrlist) &&
                        tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET){
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_reset_priority | DBG_FUNC_NONE,
+                                       wq, thread_tid(thread),
+                                       (tl->th_priority << 16) | WORKQUEUE_EVENT_MANAGER_BUCKET, 2, 0);
                reset_priority(tl, pthread_priority_from_wq_class_index(wq, WORKQUEUE_EVENT_MANAGER_BUCKET));
                tl->th_priority = WORKQUEUE_EVENT_MANAGER_BUCKET;
        }
@@ -2502,9 +3075,6 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
                TAILQ_INSERT_HEAD(&wq->wq_thidlelist, tl, th_entry);
        }
 
-       PTHREAD_TRACE_WQ(TRACE_wq_thread_park | DBG_FUNC_START, wq,
-                       wq->wq_threads_scheduled, wq->wq_thidlecount, us_to_wait, 0);
-
        /*
         * When we remove the voucher from the thread, we may lose our importance
         * causing us to get preempted, so we do this after putting the thread on
@@ -2512,10 +3082,16 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
         * to use this thread from e.g. the kevent call out to deliver a boosting
         * message.
         */
+       tl->th_flags |= TH_LIST_REMOVING_VOUCHER;
        workqueue_unlock(wq);
-       kern_return_t kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
+       if (pthread_kern->thread_will_park_or_terminate) {
+               pthread_kern->thread_will_park_or_terminate(tl->th_thread);
+       }
+       __assert_only kern_return_t kr;
+       kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
        assert(kr == KERN_SUCCESS);
        workqueue_lock_spin(wq);
+       tl->th_flags &= ~(TH_LIST_REMOVING_VOUCHER);
 
        if ((tl->th_flags & TH_LIST_RUNNING) == 0) {
                if (thidlecount < 101) {
@@ -2546,52 +3122,92 @@ parkit(struct workqueue *wq, struct threadlist *tl, thread_t thread)
        }
 }
 
-static boolean_t may_start_constrained_thread(struct workqueue *wq, uint32_t at_priclass, uint32_t my_priclass, boolean_t *start_timer){
-       if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
+static bool
+may_start_constrained_thread(struct workqueue *wq, uint32_t at_priclass,
+               struct threadlist *tl, bool may_start_timer)
+{
+       uint32_t req_qos = _wq_thactive_best_constrained_req_qos(wq);
+       wq_thactive_t thactive;
+
+       if (may_start_timer && at_priclass < req_qos) {
+               /*
+                * When called from workqueue_run_threadreq_and_unlock() pre-post newest
+                * higher priorities into the thactive state so that
+                * workqueue_callback() takes the right decision.
+                *
+                * If the admission check passes, workqueue_run_threadreq_and_unlock
+                * will reset this value before running the request.
+                */
+               thactive = _wq_thactive_set_best_constrained_req_qos(wq, req_qos,
+                               at_priclass);
+#ifdef __LP64__
+               PTHREAD_TRACE_WQ(TRACE_wq_thactive_update, 1, (uint64_t)thactive,
+                               (uint64_t)(thactive >> 64), 0, 0);
+#endif
+       } else {
+               thactive = _wq_thactive(wq);
+       }
+
+       uint32_t constrained_threads = wq->wq_constrained_threads_scheduled;
+       if (tl && (tl->th_flags & TH_LIST_CONSTRAINED)) {
+               /*
+                * don't count the current thread as scheduled
+                */
+               constrained_threads--;
+       }
+       if (constrained_threads >= wq_max_constrained_threads) {
+               PTHREAD_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
+                               wq->wq_constrained_threads_scheduled,
+                               wq_max_constrained_threads, 0);
                /*
                 * we need 1 or more constrained threads to return to the kernel before
                 * we can dispatch additional work
                 */
-               return FALSE;
+               return false;
        }
 
-       uint32_t busycount = 0;
-       uint32_t thactive_count = wq->wq_thactive_count[at_priclass];
+       /*
+        * Compute a metric for many how many threads are active.  We find the
+        * highest priority request outstanding and then add up the number of
+        * active threads in that and all higher-priority buckets.  We'll also add
+        * any "busy" threads which are not active but blocked recently enough that
+        * we can't be sure they've gone idle yet.  We'll then compare this metric
+        * to our max concurrency to decide whether to add a new thread.
+        */
 
-       // Has our most recently blocked thread blocked recently enough that we
-       // should still consider it busy?
-       if (wq->wq_thscheduled_count[at_priclass] > wq->wq_thactive_count[at_priclass]) {
-               if (wq_thread_is_busy(mach_absolute_time(), &wq->wq_lastblocked_ts[at_priclass])) {
-                       busycount++;
-               }
-       }
+       uint32_t busycount, thactive_count;
 
-       if (my_priclass < WORKQUEUE_NUM_BUCKETS && my_priclass == at_priclass){
+       thactive_count = _wq_thactive_aggregate_downto_qos(wq, thactive,
+                       at_priclass, &busycount, NULL);
+
+       if (tl && tl->th_priority <= at_priclass) {
                /*
                 * don't count this thread as currently active
                 */
+               assert(thactive_count > 0);
                thactive_count--;
        }
 
-       if (thactive_count + busycount >= wq->wq_max_concurrency) {
-               if (busycount && start_timer) {
-                               /*
-                                * we found at least 1 thread in the
-                                * 'busy' state... make sure we start
-                                * the timer because if they are the only
-                                * threads keeping us from scheduling
-                                * this work request, we won't get a callback
-                                * to kick off the timer... we need to
-                                * start it now...
-                                */
-                               *start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
+       if (thactive_count + busycount < wq_max_concurrency[at_priclass]) {
+               PTHREAD_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
+                               thactive_count, busycount, 0);
+               return true;
+       } else {
+               PTHREAD_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
+                               thactive_count, busycount, 0);
+       }
+
+       if (busycount && may_start_timer) {
+               /*
+                * If this is called from the add timer, we won't have another timer
+                * fire when the thread exits the "busy" state, so rearm the timer.
+                */
+               if (WQ_TIMER_DELAYED_NEEDED(wq)) {
+                       workqueue_interval_timer_start(wq);
                }
-
-               PTHREAD_TRACE_WQ(TRACE_wq_overcommitted|DBG_FUNC_NONE, wq, ((start_timer && *start_timer) ? 1 << _PTHREAD_PRIORITY_FLAGS_SHIFT : 0) | class_index_get_pthread_priority(at_priclass), thactive_count, busycount, 0);
-
-               return FALSE;
        }
-       return TRUE;
+
+       return false;
 }
 
 static struct threadlist *
@@ -2601,7 +3217,7 @@ pop_from_thidlelist(struct workqueue *wq, uint32_t priclass)
 
        struct threadlist *tl = NULL;
 
-       if (!TAILQ_EMPTY(&wq->wq_thidlemgrlist) && 
+       if (!TAILQ_EMPTY(&wq->wq_thidlemgrlist) &&
                        (priclass == WORKQUEUE_EVENT_MANAGER_BUCKET || TAILQ_EMPTY(&wq->wq_thidlelist))){
                tl = TAILQ_FIRST(&wq->wq_thidlemgrlist);
                TAILQ_REMOVE(&wq->wq_thidlemgrlist, tl, th_entry);
@@ -2625,13 +3241,13 @@ pop_from_thidlelist(struct workqueue *wq, uint32_t priclass)
 
        wq->wq_threads_scheduled++;
        wq->wq_thscheduled_count[priclass]++;
-       OSAddAtomic(1, &wq->wq_thactive_count[priclass]);
-
+       _wq_thactive_inc(wq, priclass);
        return tl;
 }
 
 static pthread_priority_t
-pthread_priority_from_wq_class_index(struct workqueue *wq, int index){
+pthread_priority_from_wq_class_index(struct workqueue *wq, int index)
+{
        if (index == WORKQUEUE_EVENT_MANAGER_BUCKET){
                return wq->wq_event_manager_priority;
        } else {
@@ -2640,7 +3256,8 @@ pthread_priority_from_wq_class_index(struct workqueue *wq, int index){
 }
 
 static void
-reset_priority(struct threadlist *tl, pthread_priority_t pri){
+reset_priority(struct threadlist *tl, pthread_priority_t pri)
+{
        kern_return_t ret;
        thread_t th = tl->th_thread;
 
@@ -2667,299 +3284,321 @@ reset_priority(struct threadlist *tl, pthread_priority_t pri){
        }
 }
 
+/*
+ * Picks the best request to run, and returns the best overcommit fallback
+ * if the best pick is non overcommit and risks failing its admission check.
+ */
+static struct threadreq *
+workqueue_best_threadreqs(struct workqueue *wq, struct threadlist *tl,
+               struct threadreq **fallback)
+{
+       struct threadreq *req, *best_req = NULL;
+       int priclass, prilimit;
+
+       if ((wq->wq_event_manager_threadreq.tr_state == TR_STATE_WAITING) &&
+                       ((wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0) ||
+                       (tl && tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET))) {
+               /*
+                * There's an event manager request and either:
+                *   - no event manager currently running
+                *   - we are re-using the event manager
+                */
+               req = &wq->wq_event_manager_threadreq;
+               PTHREAD_TRACE_WQ_REQ(TRACE_wq_run_threadreq_req_select | DBG_FUNC_NONE, wq, req, 1, 0, 0);
+               return req;
+       }
+
+       if (tl) {
+               prilimit = WORKQUEUE_EVENT_MANAGER_BUCKET;
+       } else {
+               prilimit = _wq_highest_paced_priority(wq);
+       }
+       for (priclass = 0; priclass < prilimit; priclass++) {
+               req = TAILQ_FIRST(&wq->wq_overcommit_reqlist[priclass]);
+               if (req) {
+                       PTHREAD_TRACE_WQ_REQ(TRACE_wq_run_threadreq_req_select | DBG_FUNC_NONE, wq, req, 2, 0, 0);
+                       if (best_req) {
+                               *fallback = req;
+                       } else {
+                               best_req = req;
+                       }
+                       break;
+               }
+               if (!best_req) {
+                       best_req = TAILQ_FIRST(&wq->wq_reqlist[priclass]);
+                       if (best_req) {
+                               PTHREAD_TRACE_WQ_REQ(TRACE_wq_run_threadreq_req_select | DBG_FUNC_NONE, wq, best_req, 3, 0, 0);
+                       }
+               }
+       }
+       return best_req;
+}
+
 /**
- * grabs a thread for a request
+ * Runs a thread request on a thread
  *
- *  - called with the workqueue lock held...
- *  - responsible for dropping it in all cases
- *  - if provided mode is for overcommit, doesn't consume a reqcount
+ * - if thread is THREAD_NULL, will find a thread and run the request there.
+ *   Otherwise, the thread must be the current thread.
  *
+ * - if req is NULL, will find the highest priority request and run that.  If
+ *   it is not NULL, it must be a threadreq object in state NEW.  If it can not
+ *   be run immediately, it will be enqueued and moved to state WAITING.
+ *
+ *   Either way, the thread request object serviced will be moved to state
+ *   PENDING and attached to the threadlist.
+ *
+ *   Should be called with the workqueue lock held.  Will drop it.
+ *
+ *   WARNING: _workq_kevent_reqthreads needs to be able to preflight any
+ *   admission checks in this function.  If you are changing this function,
+ *   keep that one up-to-date.
+ *
+ * - if parking_tl is non NULL, then the current thread is parking. This will
+ *   try to reuse this thread for a request. If no match is found, it will be
+ *   parked.
  */
-static thread_t
-workqueue_run_nextreq(proc_t p, struct workqueue *wq, thread_t thread,
-               enum run_nextreq_mode mode, pthread_priority_t prio,
-               bool kevent_bind_via_return)
+static int
+workqueue_run_threadreq_and_unlock(proc_t p, struct workqueue *wq,
+               struct threadlist *parking_tl, struct threadreq *req,
+               bool may_add_new_thread)
 {
-       thread_t th_to_run = THREAD_NULL;
-       uint32_t upcall_flags = 0;
-       uint32_t priclass;
-       struct threadlist *tl = NULL;
-       struct uthread *uth = NULL;
-       boolean_t start_timer = FALSE;
+       struct threadreq *incoming_req = req;
 
-       if (mode == RUN_NEXTREQ_ADD_TIMER) {
-               mode = RUN_NEXTREQ_DEFAULT;
-       }
+       struct threadlist *tl = parking_tl;
+       int rc = WQ_RUN_TR_THROTTLED;
 
-       // valid modes to call this function with
-       assert(mode == RUN_NEXTREQ_DEFAULT || mode == RUN_NEXTREQ_DEFAULT_KEVENT ||
-                       mode == RUN_NEXTREQ_OVERCOMMIT || mode == RUN_NEXTREQ_UNCONSTRAINED ||
-                       mode == RUN_NEXTREQ_EVENT_MANAGER || mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT);
-       // may only have a priority if in OVERCOMMIT or DEFAULT_KEVENT mode
-       assert(mode == RUN_NEXTREQ_OVERCOMMIT || mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT ||
-                       mode == RUN_NEXTREQ_DEFAULT_KEVENT || prio == 0);
-       // thread == thread_null means "please spin up a new workqueue thread, we can't reuse this"
-       // thread != thread_null is thread reuse, and must be the current thread
-       assert(thread == THREAD_NULL || thread == current_thread());
+       assert(tl == NULL || tl->th_thread == current_thread());
+       assert(req == NULL || req->tr_state == TR_STATE_NEW);
+       assert(!may_add_new_thread || !tl);
 
-       PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_START, wq, thread_tid(thread), wq->wq_thidlecount, wq->wq_reqcount, 0);
+       PTHREAD_TRACE_WQ_REQ(TRACE_wq_run_threadreq | DBG_FUNC_START, wq, req,
+                       tl ? thread_tid(tl->th_thread) : 0,
+                       req ? (req->tr_priority << 16 | req->tr_flags) : 0, 0);
+
+       /*
+        * Special cases when provided an event manager request
+        */
+       if (req && req->tr_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
+               // Clients must not rely on identity of event manager requests
+               assert(req->tr_flags & TR_FLAG_ONSTACK);
+               // You can't be both overcommit and event manager
+               assert((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0);
+
+               /*
+                * We can only ever have one event manager request, so coalesce them if
+                * there's already one outstanding.
+                */
+               if (wq->wq_event_manager_threadreq.tr_state == TR_STATE_WAITING) {
+                       PTHREAD_TRACE_WQ_REQ(TRACE_wq_run_threadreq_mgr_merge | DBG_FUNC_NONE, wq, req, 0, 0, 0);
+
+                       struct threadreq *existing_req = &wq->wq_event_manager_threadreq;
+                       if (req->tr_flags & TR_FLAG_KEVENT) {
+                               existing_req->tr_flags |= TR_FLAG_KEVENT;
+                       }
 
-       if (thread != THREAD_NULL) {
-               uth = pthread_kern->get_bsdthread_info(thread);
+                       req = existing_req;
+                       incoming_req = NULL;
+               }
 
-               if ((tl = pthread_kern->uthread_get_threadlist(uth)) == NULL) {
-                       panic("wq thread with no threadlist");
+               if (wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] &&
+                               (!tl || tl->th_priority != WORKQUEUE_EVENT_MANAGER_BUCKET)){
+                       /*
+                        * There can only be one event manager running at a time.
+                        */
+                       PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 1, 0, 0, 0);
+                       goto done;
                }
        }
 
-       /*
-        * from here until we drop the workq lock we can't be pre-empted since we
-        * hold the lock in spin mode... this is important since we have to
-        * independently update the priority that the thread is associated with and
-        * the priorty based counters that "workqueue_callback" also changes and
-        * bases decisions on.
-        */
+again: // Start again after creating a thread
+
+       if (_wq_exiting(wq)) {
+               rc = WQ_RUN_TR_EXITING;
+               goto exiting;
+       }
 
        /*
-        * This giant monstrosity does three things:
-        *
-        *   - adjusts the mode, if required
-        *   - selects the priclass that we'll be servicing
-        *   - sets any mode-specific upcall flags
-        *
-        * When possible special-cases should be handled here and converted into
-        * non-special cases.
+        * Thread request selection and admission control
         */
-       if (mode == RUN_NEXTREQ_OVERCOMMIT) {
-               priclass = pthread_priority_get_class_index(prio);
-               upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
-       } else if (mode == RUN_NEXTREQ_OVERCOMMIT_KEVENT){
-               priclass = pthread_priority_get_class_index(prio);
-               upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-       } else if (mode == RUN_NEXTREQ_EVENT_MANAGER){
-               assert(wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0);
-               priclass = WORKQUEUE_EVENT_MANAGER_BUCKET;
-               upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
-               if (wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET]){
-                       upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-               }
-       } else if (wq->wq_reqcount == 0){
-               // no work to do.  we'll check again when new work arrives.
-               goto done;
-       } else if (mode == RUN_NEXTREQ_DEFAULT_KEVENT) {
-               assert(kevent_bind_via_return);
-
-               priclass = pthread_priority_get_class_index(prio);
-               assert(priclass < WORKQUEUE_EVENT_MANAGER_BUCKET);
-               assert(wq->wq_kevent_requests[priclass] > 0);
-
-               upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-               mode = RUN_NEXTREQ_DEFAULT;
-       } else if (wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] &&
-                          ((wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 0) ||
-                               (thread != THREAD_NULL && tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET))){
-               // There's an event manager request and either:
-               //   - no event manager currently running
-               //   - we are re-using the event manager
-               mode = RUN_NEXTREQ_EVENT_MANAGER;
-               priclass = WORKQUEUE_EVENT_MANAGER_BUCKET;
-               upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
-               if (wq->wq_kevent_requests[WORKQUEUE_EVENT_MANAGER_BUCKET]){
-                       upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-               }
-       } else {
-               // Find highest priority and check for special request types
-               for (priclass = 0; priclass < WORKQUEUE_EVENT_MANAGER_BUCKET; priclass++) {
-                       if (wq->wq_requests[priclass])
-                               break;
-               }
-               if (priclass == WORKQUEUE_EVENT_MANAGER_BUCKET){
-                       // only request should have been event manager since it's not in a bucket,
-                       // but we weren't able to handle it since there's already an event manager running,
-                       // so we fell into this case
-                       assert(wq->wq_requests[WORKQUEUE_EVENT_MANAGER_BUCKET] == 1 &&
-                                  wq->wq_thscheduled_count[WORKQUEUE_EVENT_MANAGER_BUCKET] == 1 &&
-                                  wq->wq_reqcount == 1);
+       struct threadreq *fallback = NULL;
+       if (req) {
+               if ((req->tr_flags & TR_FLAG_NO_PACING) == 0 &&
+                               _wq_should_pace_priority(wq, req->tr_priority)) {
+                       /*
+                        * If a request fails the pacing admission check, then thread
+                        * requests are redriven when the pacing thread is finally scheduled
+                        * when it calls _wq_pacing_end() in wq_unpark_continue().
+                        */
                        goto done;
                }
-
-               if (wq->wq_kevent_ocrequests[priclass]){
-                       mode = RUN_NEXTREQ_DEFERRED_OVERCOMMIT;
-                       upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-                       upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
-               } else if (wq->wq_ocrequests[priclass]){
-                       mode = RUN_NEXTREQ_DEFERRED_OVERCOMMIT;
-                       upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
-               } else if (wq->wq_kevent_requests[priclass]){
-                       upcall_flags |= WQ_FLAG_THREAD_KEVENT;
-               }
+       } else if (wq->wq_reqcount == 0) {
+               PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 2, 0, 0, 0);
+               goto done;
+       } else if ((req = workqueue_best_threadreqs(wq, tl, &fallback)) == NULL) {
+               PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 3, 0, 0, 0);
+               goto done;
        }
 
-       assert(mode != RUN_NEXTREQ_EVENT_MANAGER || priclass == WORKQUEUE_EVENT_MANAGER_BUCKET);
-       assert(mode == RUN_NEXTREQ_EVENT_MANAGER || priclass != WORKQUEUE_EVENT_MANAGER_BUCKET);
-
-       if (mode == RUN_NEXTREQ_DEFAULT /* non-overcommit */){
-               uint32_t my_priclass = (thread != THREAD_NULL) ? tl->th_priority : WORKQUEUE_NUM_BUCKETS;
-               if (may_start_constrained_thread(wq, priclass, my_priclass, &start_timer) == FALSE){
-                       // per policy, we won't start another constrained thread
-                       goto done;
+       if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0 &&
+                       (req->tr_priority < WORKQUEUE_EVENT_MANAGER_BUCKET)) {
+               if (!may_start_constrained_thread(wq, req->tr_priority, parking_tl, true)) {
+                       if (!fallback) {
+                               PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 4, 0, 0, 0);
+                               goto done;
+                       }
+                       assert(req->tr_state == TR_STATE_WAITING);
+                       req = fallback;
                }
        }
 
-       if (thread != THREAD_NULL) {
-               /*
-                * thread is non-NULL here when we return from userspace
-                * in workq_kernreturn, rather than trying to find a thread
-                * we pick up new work for this specific thread.
-                */
-               th_to_run = thread;
-               upcall_flags |= WQ_FLAG_THREAD_REUSE;
-       } else if (wq->wq_thidlecount == 0) {
+       /*
+        * Thread selection.
+        */
+       if (parking_tl) {
+               if (tl->th_priority != req->tr_priority) {
+                       _wq_thactive_move(wq, tl->th_priority, req->tr_priority);
+                       wq->wq_thscheduled_count[tl->th_priority]--;
+                       wq->wq_thscheduled_count[req->tr_priority]++;
+               }
+               PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq_thread_select | DBG_FUNC_NONE,
+                               wq, 1, thread_tid(tl->th_thread), 0, 0);
+       } else if (wq->wq_thidlecount) {
+               tl = pop_from_thidlelist(wq, req->tr_priority);
                /*
-                * we have no additional threads waiting to pick up
-                * work, however, there is additional work to do.
+                * This call will update wq_thscheduled_count and wq_thactive_count for
+                * the provided priority.  It will not set the returned thread to that
+                * priority.  This matches the behavior of the parking_tl clause above.
                 */
-               start_timer = WQ_TIMER_DELAYED_NEEDED(wq);
-
-               PTHREAD_TRACE_WQ(TRACE_wq_stalled, wq, wq->wq_nthreads, start_timer, 0, 0);
-
-               goto done;
-       } else {
-               // there is both work available and an idle thread, so activate a thread
-               tl = pop_from_thidlelist(wq, priclass);
-               th_to_run = tl->th_thread;
-       }
-
-       // Adjust counters and thread flags AKA consume the request
-       // TODO: It would be lovely if OVERCOMMIT consumed reqcount
-       switch (mode) {
-               case RUN_NEXTREQ_DEFAULT:
-               case RUN_NEXTREQ_DEFAULT_KEVENT: /* actually mapped to DEFAULT above */
-               case RUN_NEXTREQ_ADD_TIMER: /* actually mapped to DEFAULT above */
-               case RUN_NEXTREQ_UNCONSTRAINED:
-                       wq->wq_reqcount--;
-                       wq->wq_requests[priclass]--;
-
-                       if (mode == RUN_NEXTREQ_DEFAULT){
-                               if (!(tl->th_flags & TH_LIST_CONSTRAINED)) {
-                                       wq->wq_constrained_threads_scheduled++;
-                                       tl->th_flags |= TH_LIST_CONSTRAINED;
-                               }
-                       } else if (mode == RUN_NEXTREQ_UNCONSTRAINED){
-                               if (tl->th_flags & TH_LIST_CONSTRAINED) {
-                                       wq->wq_constrained_threads_scheduled--;
-                                       tl->th_flags &= ~TH_LIST_CONSTRAINED;
-                               }
-                       }
-                       if (upcall_flags & WQ_FLAG_THREAD_KEVENT){
-                               wq->wq_kevent_requests[priclass]--;
+               PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq_thread_select | DBG_FUNC_NONE,
+                               wq, 2, thread_tid(tl->th_thread), 0, 0);
+       } else /* no idle threads */ {
+               if (!may_add_new_thread || wq->wq_nthreads >= wq_max_threads) {
+                       PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 5,
+                                       may_add_new_thread, wq->wq_nthreads, 0);
+                       if (wq->wq_nthreads < wq_max_threads) {
+                               rc = WQ_RUN_TR_THREAD_NEEDED;
                        }
-                       break;
-
-               case RUN_NEXTREQ_EVENT_MANAGER:
-                       wq->wq_reqcount--;
-                       wq->wq_requests[priclass]--;
+                       goto done;
+               }
 
-                       if (tl->th_flags & TH_LIST_CONSTRAINED) {
-                               wq->wq_constrained_threads_scheduled--;
-                               tl->th_flags &= ~TH_LIST_CONSTRAINED;
-                       }
-                       if (upcall_flags & WQ_FLAG_THREAD_KEVENT){
-                               wq->wq_kevent_requests[priclass]--;
-                       }
-                       break;
+               bool added_thread = workqueue_addnewthread(p, wq);
+               /*
+                * workqueue_addnewthread will drop and re-take the lock, so we
+                * need to ensure we still have a cached request.
+                *
+                * It also means we have to pick a new request, since our old pick may
+                * not be valid anymore.
+                */
+               req = incoming_req;
+               if (req && (req->tr_flags & TR_FLAG_ONSTACK)) {
+                       _threadreq_copy_prepare(wq);
+               }
 
-               case RUN_NEXTREQ_DEFERRED_OVERCOMMIT:
-                       wq->wq_reqcount--;
-                       wq->wq_requests[priclass]--;
-                       if (upcall_flags & WQ_FLAG_THREAD_KEVENT){
-                               wq->wq_kevent_ocrequests[priclass]--;
-                       } else {
-                       wq->wq_ocrequests[priclass]--;
-                       }
-                       /* FALLTHROUGH */
-               case RUN_NEXTREQ_OVERCOMMIT:
-               case RUN_NEXTREQ_OVERCOMMIT_KEVENT:
-                       if (tl->th_flags & TH_LIST_CONSTRAINED) {
-                               wq->wq_constrained_threads_scheduled--;
-                               tl->th_flags &= ~TH_LIST_CONSTRAINED;
+               if (added_thread) {
+                       PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq_thread_select | DBG_FUNC_NONE,
+                                       wq, 3, 0, 0, 0);
+                       goto again;
+               } else if (_wq_exiting(wq)) {
+                       rc = WQ_RUN_TR_EXITING;
+                       goto exiting;
+               } else {
+                       PTHREAD_TRACE_WQ(TRACE_wq_run_threadreq | DBG_FUNC_END, wq, 6, 0, 0, 0);
+                       /*
+                        * Something caused thread creation to fail.  Kick off the timer in
+                        * the hope that it'll succeed next time.
+                        */
+                       if (WQ_TIMER_DELAYED_NEEDED(wq)) {
+                               workqueue_interval_timer_start(wq);
                        }
-                       break;
+                       goto done;
+               }
        }
 
-       // Confirm we've maintained our counter invariants
-       assert(wq->wq_requests[priclass] < UINT16_MAX);
-       assert(wq->wq_ocrequests[priclass] < UINT16_MAX);
-       assert(wq->wq_kevent_requests[priclass] < UINT16_MAX);
-       assert(wq->wq_kevent_ocrequests[priclass] < UINT16_MAX);
-       assert(wq->wq_ocrequests[priclass] + wq->wq_kevent_requests[priclass] +
-                       wq->wq_kevent_ocrequests[priclass] <=
-                       wq->wq_requests[priclass]);
-
-       assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
-       if (upcall_flags & WQ_FLAG_THREAD_KEVENT) {
-               tl->th_flags |= TH_LIST_KEVENT;
+       /*
+        * Setup thread, mark request as complete and run with it.
+        */
+       if (req->tr_state == TR_STATE_WAITING) {
+               _threadreq_dequeue(wq, req);
+       }
+       if (tl->th_priority != req->tr_priority) {
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_reset_priority | DBG_FUNC_NONE,
+                                       wq, thread_tid(tl->th_thread),
+                                       (tl->th_priority << 16) | req->tr_priority, 1, 0);
+               reset_priority(tl, pthread_priority_from_wq_class_index(wq, req->tr_priority));
+               tl->th_priority = (uint8_t)req->tr_priority;
+       }
+       if (req->tr_flags & TR_FLAG_OVERCOMMIT) {
+               if ((tl->th_flags & TH_LIST_CONSTRAINED) != 0) {
+                       tl->th_flags &= ~TH_LIST_CONSTRAINED;
+                       wq->wq_constrained_threads_scheduled--;
+               }
        } else {
-               tl->th_flags &= ~TH_LIST_KEVENT;
+               if ((tl->th_flags & TH_LIST_CONSTRAINED) == 0) {
+                       tl->th_flags |= TH_LIST_CONSTRAINED;
+                       wq->wq_constrained_threads_scheduled++;
+               }
        }
 
-       uint32_t orig_class = tl->th_priority;
-       tl->th_priority = (uint8_t)priclass;
+       if (!parking_tl && !(req->tr_flags & TR_FLAG_NO_PACING)) {
+               _wq_pacing_start(wq, tl);
+       }
+       if ((req->tr_flags & TR_FLAG_OVERCOMMIT) == 0) {
+               uint32_t old_qos, new_qos;
 
-       if ((thread != THREAD_NULL) && (orig_class != priclass)) {
                /*
-                * we need to adjust these counters based on this
-                * thread's new disposition w/r to priority
+                * If we are scheduling a constrained thread request, we may need to
+                * update the best constrained qos in the thactive atomic state.
                 */
-               OSAddAtomic(-1, &wq->wq_thactive_count[orig_class]);
-               OSAddAtomic(1, &wq->wq_thactive_count[priclass]);
-
-               wq->wq_thscheduled_count[orig_class]--;
-               wq->wq_thscheduled_count[priclass]++;
+               for (new_qos = 0; new_qos < WQ_THACTIVE_NO_PENDING_REQUEST; new_qos++) {
+                       if (TAILQ_FIRST(&wq->wq_reqlist[new_qos]))
+                               break;
+               }
+               old_qos = _wq_thactive_best_constrained_req_qos(wq);
+               if (old_qos != new_qos) {
+                       wq_thactive_t v = _wq_thactive_set_best_constrained_req_qos(wq,
+                                       old_qos, new_qos);
+#ifdef __LP64__
+                       PTHREAD_TRACE_WQ(TRACE_wq_thactive_update, 2, (uint64_t)v,
+                                       (uint64_t)(v >> 64), 0, 0);
+#else
+                       PTHREAD_TRACE_WQ(TRACE_wq_thactive_update, 2, v, 0, 0, 0);
+#endif
+               }
        }
-       wq->wq_thread_yielded_count = 0;
-
-       pthread_priority_t outgoing_priority = pthread_priority_from_wq_class_index(wq, tl->th_priority);
-       PTHREAD_TRACE_WQ(TRACE_wq_reset_priority | DBG_FUNC_START, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
-       reset_priority(tl, outgoing_priority);
-       PTHREAD_TRACE_WQ(TRACE_wq_reset_priority | DBG_FUNC_END, wq, thread_tid(tl->th_thread), outgoing_priority, 0, 0);
-
-       /*
-        * persist upcall_flags so that in can be retrieved in setup_wqthread
-        */
-       tl->th_upcall_flags = upcall_flags >> WQ_FLAG_THREAD_PRIOSHIFT;
-
-       /*
-        * if current thread is reused for work request, does not return via unix_syscall
-        */
-       wq_runreq(p, th_to_run, wq, tl, (thread == th_to_run),
-                       (upcall_flags & WQ_FLAG_THREAD_KEVENT) && !kevent_bind_via_return);
-
-       PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_END, wq, thread_tid(th_to_run), mode == RUN_NEXTREQ_OVERCOMMIT, 1, 0);
-
-       assert(!kevent_bind_via_return || (upcall_flags & WQ_FLAG_THREAD_KEVENT));
-       if (kevent_bind_via_return && (upcall_flags & WQ_FLAG_THREAD_KEVENT)) {
-               tl->th_flags |= TH_LIST_KEVENT_BOUND;
+       {
+               uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
+               if (req->tr_flags & TR_FLAG_OVERCOMMIT)
+                       upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
+               if (req->tr_flags & TR_FLAG_KEVENT)
+                       upcall_flags |= WQ_FLAG_THREAD_KEVENT;
+               if (req->tr_flags & TR_FLAG_WORKLOOP)
+                       upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
+               if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET)
+                       upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
+               tl->th_upcall_flags = upcall_flags >> WQ_FLAG_THREAD_PRIOSHIFT;
        }
-
-       workqueue_unlock(wq);
-
-       return th_to_run;
+       if (req->tr_flags & TR_FLAG_KEVENT) {
+               tl->th_flags |= TH_LIST_KEVENT;
+       } else {
+               tl->th_flags &= ~TH_LIST_KEVENT;
+       }
+       return _threadreq_complete_and_unlock(p, wq, req, tl);
 
 done:
-       if (start_timer)
-               workqueue_interval_timer_start(wq);
+       if (incoming_req) {
+               _threadreq_enqueue(wq, incoming_req);
+       }
 
-       PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem | DBG_FUNC_END, wq, thread_tid(thread), start_timer, 3, 0);
+exiting:
 
-       if (thread != THREAD_NULL){
-               parkit(wq, tl, thread);
-               /* NOT REACHED */
+       if (parking_tl && !(parking_tl->th_flags & TH_LIST_UNBINDING)) {
+               parkit(wq, parking_tl, parking_tl->th_thread);
+               __builtin_unreachable();
        }
 
        workqueue_unlock(wq);
 
-       return THREAD_NULL;
+       return rc;
 }
 
 /**
@@ -2975,13 +3614,24 @@ wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
        struct uthread *uth = pthread_kern->get_bsdthread_info(th);
        if (uth == NULL) goto done;
 
-       struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
-       if (tl == NULL) goto done;
-
-       struct workqueue *wq = tl->th_workq;
+       struct workqueue *wq = pthread_kern->proc_get_wqptr(p);
+       if (wq == NULL) goto done;
 
        workqueue_lock_spin(wq);
 
+       struct threadlist *tl = pthread_kern->uthread_get_threadlist(uth);
+       assert(tl != WQ_THREADLIST_EXITING_POISON);
+       if (tl == NULL) {
+               /*
+                * We woke up before addnewthread() was finished setting us up.  Go
+                * ahead and exit, but before we do poison the threadlist variable so
+                * that addnewthread() doesn't think we are valid still.
+                */
+               pthread_kern->uthread_set_threadlist(uth, WQ_THREADLIST_EXITING_POISON);
+               workqueue_unlock(wq);
+               goto done;
+       }
+
        assert(tl->th_flags & TH_LIST_INITED);
 
        if ((tl->th_flags & TH_LIST_NEW)){
@@ -3029,12 +3679,15 @@ wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
                        workqueue_unlock(wq);
 
                        thread_block(wq_unpark_continue);
-                       /* NOT REACHED */
+                       __builtin_unreachable();
                }
        }
 
        if ((tl->th_flags & TH_LIST_RUNNING) == 0) {
                assert((tl->th_flags & TH_LIST_BUSY) == 0);
+               if (!first_use) {
+                       PTHREAD_TRACE_WQ(TRACE_wq_thread_park | DBG_FUNC_END, wq, 0, 0, 0, 0);
+               }
                /*
                 * We were set running, but not for the purposes of actually running.
                 * This could be because the timer elapsed.  Or it could be because the
@@ -3047,6 +3700,9 @@ wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
                                (tl->th_priority < qos_class_get_class_index(WQ_THREAD_CLEANUP_QOS) ||
                                (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET))) {
                        // Reset the QoS to something low for the pthread cleanup
+                       PTHREAD_TRACE_WQ(TRACE_wq_thread_reset_priority | DBG_FUNC_NONE,
+                                               wq, thread_tid(th),
+                                               (tl->th_priority << 16) | qos_class_get_class_index(WQ_THREAD_CLEANUP_QOS), 3, 0);
                        pthread_priority_t cleanup_pri = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
                        reset_priority(tl, cleanup_pri);
                }
@@ -3058,7 +3714,7 @@ wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
                } else {
                        pthread_kern->unix_syscall_return(0);
                }
-               /* NOT REACHED */
+               __builtin_unreachable();
        }
 
        /*
@@ -3077,8 +3733,15 @@ wq_unpark_continue(void* __unused ptr, wait_result_t wait_result)
        }
 
 return_to_user:
-       workqueue_unlock(wq);
-       _setup_wqthread(p, th, wq, tl, first_use);
+       if (!first_use) {
+               PTHREAD_TRACE_WQ(TRACE_wq_thread_park | DBG_FUNC_END, wq, 0, 0, 0, 0);
+       }
+       if (_wq_pacing_end(wq, tl) && wq->wq_reqcount) {
+               workqueue_run_threadreq_and_unlock(p, wq, NULL, NULL, true);
+       } else {
+               workqueue_unlock(wq);
+       }
+       _setup_wqthread(p, th, wq, tl, first_use ? WQ_SETUP_FIRST_USE : 0);
        pthread_kern->thread_sched_call(th, workqueue_callback);
 done:
        if (first_use){
@@ -3089,66 +3752,6 @@ done:
        panic("Our attempt to return to userspace failed...");
 }
 
-/* called with workqueue lock held */
-static void
-wq_runreq(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
-                 boolean_t return_directly, boolean_t needs_kevent_bind)
-{
-       PTHREAD_TRACE1_WQ(TRACE_wq_runitem | DBG_FUNC_START, tl->th_workq, 0, 0, thread_tid(current_thread()), thread_tid(th));
-
-       unsigned int kevent_flags = KEVENT_FLAG_WORKQ;
-       if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
-               kevent_flags |= KEVENT_FLAG_WORKQ_MANAGER;
-       }
-
-       if (return_directly) {
-               if (needs_kevent_bind) {
-                       assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
-                       tl->th_flags |= TH_LIST_KEVENT_BOUND;
-               }
-
-               workqueue_unlock(wq);
-
-               if (needs_kevent_bind) {
-                       kevent_qos_internal_bind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
-               }
-
-               /*
-                * For preemption reasons, we want to reset the voucher as late as
-                * possible, so we do it in two places:
-                *   - Just before parking (i.e. in parkit())
-                *   - Prior to doing the setup for the next workitem (i.e. here)
-                *
-                * Those two places are sufficient to ensure we always reset it before
-                * it goes back out to user space, but be careful to not break that
-                * guarantee.
-                */
-               kern_return_t kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
-               assert(kr == KERN_SUCCESS);
-
-               _setup_wqthread(p, th, wq, tl, false);
-
-               PTHREAD_TRACE_WQ(TRACE_wq_run_nextitem|DBG_FUNC_END, tl->th_workq, 0, 0, 4, 0);
-
-               pthread_kern->unix_syscall_return(EJUSTRETURN);
-               /* NOT REACHED */
-       }
-
-       if (needs_kevent_bind) {
-               // Leave TH_LIST_BUSY set so that the thread can't beat us to calling kevent
-               workqueue_unlock(wq);
-               assert((tl->th_flags & TH_LIST_KEVENT_BOUND) == 0);
-               kevent_qos_internal_bind(p, class_index_get_thread_qos(tl->th_priority), th, kevent_flags);
-               tl->th_flags |= TH_LIST_KEVENT_BOUND;
-               workqueue_lock_spin(wq);
-       }
-       tl->th_flags &= ~(TH_LIST_BUSY);
-       thread_wakeup_thread(tl,th);
-}
-
-#define KEVENT_LIST_LEN 16 // WORKQ_KEVENT_EVENT_BUFFER_LEN
-#define KEVENT_DATA_SIZE (32 * 1024)
-
 /**
  * configures initial thread stack/registers to jump into:
  * _pthread_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *keventlist, int upcall_flags, int nkevents);
@@ -3164,8 +3767,8 @@ wq_runreq(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
  * When we are done the stack will look like:
  * |-----------| th_stackaddr + th_allocsize
  * |pthread_t  | th_stackaddr + DEFAULT_STACKSIZE + guardsize + PTHREAD_STACK_OFFSET
- * |kevent list| optionally - at most KEVENT_LIST_LEN events
- * |kevent data| optionally - at most KEVENT_DATA_SIZE bytes
+ * |kevent list| optionally - at most WQ_KEVENT_LIST_LEN events
+ * |kevent data| optionally - at most WQ_KEVENT_DATA_SIZE bytes
  * |stack gap  | bottom aligned to 16 bytes, and at least as big as stack_gap_min
  * |   STACK   |
  * |     ⇓     |
@@ -3174,13 +3777,37 @@ wq_runreq(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
  * |-----------| th_stackaddr
  */
 void
-_setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *tl,
-               bool first_use)
+_setup_wqthread(proc_t p, thread_t th, struct workqueue *wq,
+               struct threadlist *tl, int setup_flags)
 {
        int error;
-       uint32_t upcall_flags;
+       if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
+               /*
+                * For preemption reasons, we want to reset the voucher as late as
+                * possible, so we do it in two places:
+                *   - Just before parking (i.e. in parkit())
+                *   - Prior to doing the setup for the next workitem (i.e. here)
+                *
+                * Those two places are sufficient to ensure we always reset it before
+                * it goes back out to user space, but be careful to not break that
+                * guarantee.
+                */
+               __assert_only kern_return_t kr;
+               kr = pthread_kern->thread_set_voucher_name(MACH_PORT_NULL);
+               assert(kr == KERN_SUCCESS);
+       }
+
+       uint32_t upcall_flags = tl->th_upcall_flags << WQ_FLAG_THREAD_PRIOSHIFT;
+       if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
+               upcall_flags |= WQ_FLAG_THREAD_REUSE;
+       }
 
+       /*
+        * Put the QoS class value into the lower bits of the reuse_thread register, this is where
+        * the thread priority used to be stored anyway.
+        */
        pthread_priority_t priority = pthread_priority_from_wq_class_index(wq, tl->th_priority);
+       upcall_flags |= (_pthread_priority_get_qos_newest(priority) & WQ_FLAG_THREAD_PRIOMASK);
 
        const vm_size_t guardsize = vm_map_page_size(tl->th_workq->wq_map);
        const vm_size_t stack_gap_min = (proc_is64bit(p) == 0) ? C_32_STK_ALIGN : C_64_REDZONE_LEN;
@@ -3195,24 +3822,16 @@ _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *
                panic("workqueue thread start function pointer is NULL");
        }
 
-       /* Put the QoS class value into the lower bits of the reuse_thread register, this is where
-        * the thread priority used to be stored anyway.
-        */
-       upcall_flags  = tl->th_upcall_flags << WQ_FLAG_THREAD_PRIOSHIFT;
-       upcall_flags |= (_pthread_priority_get_qos_newest(priority) & WQ_FLAG_THREAD_PRIOMASK);
-
-       upcall_flags |= WQ_FLAG_THREAD_NEWSPI;
-
-       uint32_t tsd_offset = pthread_kern->proc_get_pthread_tsd_offset(p);
-       if (tsd_offset) {
-               mach_vm_offset_t th_tsd_base = (mach_vm_offset_t)pthread_self_addr + tsd_offset;
-               kern_return_t kret = pthread_kern->thread_set_tsd_base(th, th_tsd_base);
-               if (kret == KERN_SUCCESS) {
-                       upcall_flags |= WQ_FLAG_THREAD_TSD_BASE_SET;
+       if (setup_flags & WQ_SETUP_FIRST_USE) {
+               uint32_t tsd_offset = pthread_kern->proc_get_pthread_tsd_offset(p);
+               if (tsd_offset) {
+                       mach_vm_offset_t th_tsd_base = (mach_vm_offset_t)pthread_self_addr + tsd_offset;
+                       kern_return_t kret = pthread_kern->thread_set_tsd_base(th, th_tsd_base);
+                       if (kret == KERN_SUCCESS) {
+                               upcall_flags |= WQ_FLAG_THREAD_TSD_BASE_SET;
+                       }
                }
-       }
 
-       if (first_use) {
                /*
                * Pre-fault the first page of the new thread's stack and the page that will
                * contain the pthread_t structure.
@@ -3231,45 +3850,66 @@ _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *
                                VM_PROT_READ | VM_PROT_WRITE,
                                FALSE,
                                THREAD_UNINT, NULL, 0);
-       } else {
-               upcall_flags |= WQ_FLAG_THREAD_REUSE;
        }
 
        user_addr_t kevent_list = NULL;
        int kevent_count = 0;
        if (upcall_flags & WQ_FLAG_THREAD_KEVENT){
-               kevent_list = pthread_self_addr - KEVENT_LIST_LEN * sizeof(struct kevent_qos_s);
-               kevent_count = KEVENT_LIST_LEN;
+               bool workloop = upcall_flags & WQ_FLAG_THREAD_WORKLOOP;
+
+               kevent_list = pthread_self_addr - WQ_KEVENT_LIST_LEN * sizeof(struct kevent_qos_s);
+               kevent_count = WQ_KEVENT_LIST_LEN;
+
+               user_addr_t kevent_id_addr = kevent_list;
+               if (workloop) {
+                       /*
+                        * The kevent ID goes just below the kevent list.  Sufficiently new
+                        * userspace will know to look there.  Old userspace will just
+                        * ignore it.
+                        */
+                       kevent_id_addr -= sizeof(kqueue_id_t);
+               }
 
-               user_addr_t kevent_data_buf = kevent_list - KEVENT_DATA_SIZE;
-               user_size_t kevent_data_available = KEVENT_DATA_SIZE;
+               user_addr_t kevent_data_buf = kevent_id_addr - WQ_KEVENT_DATA_SIZE;
+               user_size_t kevent_data_available = WQ_KEVENT_DATA_SIZE;
 
                int32_t events_out = 0;
 
                assert(tl->th_flags | TH_LIST_KEVENT_BOUND);
-               unsigned int flags = KEVENT_FLAG_WORKQ | KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE;
+               unsigned int flags = KEVENT_FLAG_STACK_DATA | KEVENT_FLAG_IMMEDIATE;
                if (tl->th_priority == WORKQUEUE_EVENT_MANAGER_BUCKET) {
                        flags |= KEVENT_FLAG_WORKQ_MANAGER;
                }
-               int ret = kevent_qos_internal(p, class_index_get_thread_qos(tl->th_priority), NULL, 0, kevent_list, kevent_count,
-                                                                         kevent_data_buf, &kevent_data_available,
-                                                                         flags, &events_out);
-
-               // turns out there are a lot of edge cases where this will fail, so not enabled by default
-               //assert((ret == KERN_SUCCESS && events_out != -1) || ret == KERN_ABORTED);
+               int ret = 0;
+               if (workloop) {
+                       flags |= KEVENT_FLAG_WORKLOOP;
+                       kqueue_id_t kevent_id = -1;
+                       ret = kevent_id_internal(p, &kevent_id,
+                                       NULL, 0, kevent_list, kevent_count,
+                                       kevent_data_buf, &kevent_data_available,
+                                       flags, &events_out);
+                       copyout(&kevent_id, kevent_id_addr, sizeof(kevent_id));
+               } else {
+                       flags |= KEVENT_FLAG_WORKQ;
+                       ret = kevent_qos_internal(p,
+                                       class_index_get_thread_qos(tl->th_priority),
+                                       NULL, 0, kevent_list, kevent_count,
+                                       kevent_data_buf, &kevent_data_available,
+                                       flags, &events_out);
+               }
 
-               // squash any errors into just empty output on
+               // squash any errors into just empty output
                if (ret != KERN_SUCCESS || events_out == -1){
                        events_out = 0;
-                       kevent_data_available = KEVENT_DATA_SIZE;
+                       kevent_data_available = WQ_KEVENT_DATA_SIZE;
                }
 
                // We shouldn't get data out if there aren't events available
-               assert(events_out != 0 || kevent_data_available == KEVENT_DATA_SIZE);
+               assert(events_out != 0 || kevent_data_available == WQ_KEVENT_DATA_SIZE);
 
                if (events_out > 0){
-                       if (kevent_data_available == KEVENT_DATA_SIZE){
-                               stack_top_addr = (kevent_list - stack_gap_min) & -stack_align_min;
+                       if (kevent_data_available == WQ_KEVENT_DATA_SIZE){
+                               stack_top_addr = (kevent_id_addr - stack_gap_min) & -stack_align_min;
                        } else {
                                stack_top_addr = (kevent_data_buf + kevent_data_available - stack_gap_min) & -stack_align_min;
                        }
@@ -3281,6 +3921,8 @@ _setup_wqthread(proc_t p, thread_t th, struct workqueue *wq, struct threadlist *
                }
        }
 
+       PTHREAD_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START, wq, 0, 0, 0, 0);
+
 #if defined(__i386__) || defined(__x86_64__)
        if (proc_is64bit(p) == 0) {
                x86_thread_state32_t state = {
@@ -3337,31 +3979,38 @@ static int wq_kevent_test SYSCTL_HANDLER_ARGS {
        if (error) return error;
 
        _workq_reqthreads(req->p, (int)(req->newlen / sizeof(struct workq_reqthreads_req_s)), requests);
-       
+
        return 0;
 }
 #endif // DEBUG
-       
+
 #pragma mark - Misc
 
-int 
+int
 _fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
 {
        struct workqueue * wq;
        int error = 0;
        int     activecount;
-       uint32_t pri;
 
        if ((wq = pthread_kern->proc_get_wqptr(p)) == NULL) {
                return EINVAL;
        }
 
-       workqueue_lock_spin(wq);
-       activecount = 0;
-
-       for (pri = 0; pri < WORKQUEUE_NUM_BUCKETS; pri++) {
-               activecount += wq->wq_thactive_count[pri];
+       /*
+        * This is sometimes called from interrupt context by the kperf sampler.
+        * In that case, it's not safe to spin trying to take the lock since we
+        * might already hold it.  So, we just try-lock it and error out if it's
+        * already held.  Since this is just a debugging aid, and all our callers
+        * are able to handle an error, that's fine.
+        */
+       bool locked = workqueue_lock_try(wq);
+       if (!locked) {
+               return EBUSY;
        }
+
+       activecount = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
+                       WORKQUEUE_NUM_BUCKETS - 1, NULL, NULL);
        pwqinfo->pwq_nthreads = wq->wq_nthreads;
        pwqinfo->pwq_runthreads = activecount;
        pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
@@ -3405,7 +4054,7 @@ _get_pwq_state_kdp(proc_t p)
        return pwq_state;
 }
 
-int 
+int
 _thread_selfid(__unused struct proc *p, uint64_t *retval)
 {
        thread_t thread = current_thread();
@@ -3418,23 +4067,28 @@ _pthread_init(void)
 {
        pthread_lck_grp_attr = lck_grp_attr_alloc_init();
        pthread_lck_grp = lck_grp_alloc_init("pthread", pthread_lck_grp_attr);
-       
+
        /*
         * allocate the lock attribute for pthread synchronizers
         */
        pthread_lck_attr = lck_attr_alloc_init();
 
        pthread_list_mlock = lck_mtx_alloc_init(pthread_lck_grp, pthread_lck_attr);
-       
+
        pth_global_hashinit();
        psynch_thcall = thread_call_allocate(psynch_wq_cleanup, NULL);
        psynch_zoneinit();
 
+       pthread_zone_workqueue = zinit(sizeof(struct workqueue),
+                       1024 * sizeof(struct workqueue), 8192, "pthread.workqueue");
+       pthread_zone_threadlist = zinit(sizeof(struct threadlist),
+                       1024 * sizeof(struct threadlist), 8192, "pthread.threadlist");
+       pthread_zone_threadreq = zinit(sizeof(struct threadreq),
+                       1024 * sizeof(struct threadreq), 8192, "pthread.threadreq");
+
        /*
         * register sysctls
         */
-       sysctl_register_oid(&sysctl__kern_wq_yielded_threshold);
-       sysctl_register_oid(&sysctl__kern_wq_yielded_window_usecs);
        sysctl_register_oid(&sysctl__kern_wq_stalled_window_usecs);
        sysctl_register_oid(&sysctl__kern_wq_reduce_pool_window_usecs);
        sysctl_register_oid(&sysctl__kern_wq_max_timer_interval_usecs);
@@ -3443,10 +4097,13 @@ _pthread_init(void)
        sysctl_register_oid(&sysctl__kern_pthread_debug_tracing);
 
 #if DEBUG
-       sysctl_register_oid(&sysctl__kern_wq_max_concurrency);
        sysctl_register_oid(&sysctl__debug_wq_kevent_test);
 #endif
 
-       wq_max_concurrency = pthread_kern->ml_get_max_cpus();
-
+       for (int i = 0; i < WORKQUEUE_NUM_BUCKETS; i++) {
+               uint32_t thread_qos = _wq_bucket_to_thread_qos(i);
+               wq_max_concurrency[i] = pthread_kern->qos_max_parallelism(thread_qos,
+                               QOS_PARALLELISM_COUNT_LOGICAL);
+       }
+       wq_max_concurrency[WORKQUEUE_EVENT_MANAGER_BUCKET] = 1;
 }
index 0762358912bc0e31dd6322d3e5ed1cc9027439bf..57ebb4c98b46446eeab5ab0b9d8c6e2dc4f42329 100644 (file)
@@ -62,15 +62,13 @@ VM_UNSLIDE(void* ptr)
 # define PTHREAD_TRACE(x,a,b,c,d,e) \
        { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, a, b, c, d, e); } }
 
-# define PTHREAD_TRACE1(x,a,b,c,d,e) \
-       { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT1(x, a, b, c, d, e); } }
-#endif
-
 # define PTHREAD_TRACE_WQ(x,a,b,c,d,e) \
        { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, VM_UNSLIDE(a), b, c, d, e); } }
 
-# define PTHREAD_TRACE1_WQ(x,a,b,c,d,e) \
-       { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT1(x, VM_UNSLIDE(a), b, c, d, e); } }
+# define PTHREAD_TRACE_WQ_REQ(x,a,b,c,d,e) \
+       { if (pthread_debug_tracing) { KERNEL_DEBUG_CONSTANT(x, VM_UNSLIDE(a), VM_UNSLIDE(b), c, d, e); } }
+
+#endif
 
 # define TRACE_CODE(name, subclass, code) \
        static const int TRACE_##name = KDBG_CODE(DBG_PTHREAD, subclass, code)
@@ -98,35 +96,28 @@ TRACE_CODE(pthread_set_qos_self, _TRACE_SUB_DEFAULT, 0x30);
 // workqueue trace points
 TRACE_CODE(wq_pthread_exit, _TRACE_SUB_WORKQUEUE, 0x01);
 TRACE_CODE(wq_workqueue_exit, _TRACE_SUB_WORKQUEUE, 0x02);
-TRACE_CODE(wq_run_nextitem, _TRACE_SUB_WORKQUEUE, 0x03);
+TRACE_CODE(wq_runthread, _TRACE_SUB_WORKQUEUE, 0x03);
 TRACE_CODE(wq_runitem, _TRACE_SUB_WORKQUEUE, 0x04);
-TRACE_CODE(wq_req_threads, _TRACE_SUB_WORKQUEUE, 0x05);
-TRACE_CODE(wq_req_octhreads, _TRACE_SUB_WORKQUEUE, 0x06);
-TRACE_CODE(wq_thread_suspend, _TRACE_SUB_WORKQUEUE, 0x07);
-TRACE_CODE(wq_thread_park, _TRACE_SUB_WORKQUEUE, 0x08);
 TRACE_CODE(wq_thread_block, _TRACE_SUB_WORKQUEUE, 0x9);
-TRACE_CODE(wq_new_max_scheduled, _TRACE_SUB_WORKQUEUE, 0xa);
+TRACE_CODE(wq_thactive_update, _TRACE_SUB_WORKQUEUE, 0xa);
 TRACE_CODE(wq_add_timer, _TRACE_SUB_WORKQUEUE, 0xb);
 TRACE_CODE(wq_start_add_timer, _TRACE_SUB_WORKQUEUE, 0x0c);
-TRACE_CODE(wq_stalled, _TRACE_SUB_WORKQUEUE, 0x0d);
-TRACE_CODE(wq_reset_priority, _TRACE_SUB_WORKQUEUE, 0x0e);
-TRACE_CODE(wq_thread_yielded, _TRACE_SUB_WORKQUEUE, 0x0f);
-TRACE_CODE(wq_delay_octhreads, _TRACE_SUB_WORKQUEUE, 0x10);
-TRACE_CODE(wq_overcommitted, _TRACE_SUB_WORKQUEUE, 0x11);
 TRACE_CODE(wq_override_start, _TRACE_SUB_WORKQUEUE, 0x12);
 TRACE_CODE(wq_override_end, _TRACE_SUB_WORKQUEUE, 0x13);
 TRACE_CODE(wq_override_dispatch, _TRACE_SUB_WORKQUEUE, 0x14);
 TRACE_CODE(wq_override_reset, _TRACE_SUB_WORKQUEUE, 0x15);
-TRACE_CODE(wq_req_event_manager, _TRACE_SUB_WORKQUEUE, 0x16);
-TRACE_CODE(wq_kevent_req_threads, _TRACE_SUB_WORKQUEUE, 0x17);
-TRACE_CODE(wq_req_kevent_threads, _TRACE_SUB_WORKQUEUE, 0x18);
-TRACE_CODE(wq_req_kevent_octhreads, _TRACE_SUB_WORKQUEUE, 0x19);
-TRACE_CODE(wq_thread_limit_exceeded, _TRACE_SUB_WORKQUEUE, 0x1a);
-TRACE_CODE(wq_thread_constrained_maxed, _TRACE_SUB_WORKQUEUE, 0x1b);
-TRACE_CODE(wq_thread_add_during_exit, _TRACE_SUB_WORKQUEUE, 0x1c);
 TRACE_CODE(wq_thread_create_failed, _TRACE_SUB_WORKQUEUE, 0x1d);
-TRACE_CODE(wq_manager_request, _TRACE_SUB_WORKQUEUE, 0x1e);
 TRACE_CODE(wq_thread_create, _TRACE_SUB_WORKQUEUE, 0x1f);
+TRACE_CODE(wq_run_threadreq, _TRACE_SUB_WORKQUEUE, 0x20);
+TRACE_CODE(wq_run_threadreq_mgr_merge, _TRACE_SUB_WORKQUEUE, 0x21);
+TRACE_CODE(wq_run_threadreq_req_select, _TRACE_SUB_WORKQUEUE, 0x22);
+TRACE_CODE(wq_run_threadreq_thread_select, _TRACE_SUB_WORKQUEUE, 0x23);
+TRACE_CODE(wq_thread_reset_priority, _TRACE_SUB_WORKQUEUE, 0x24);
+TRACE_CODE(wq_constrained_admission, _TRACE_SUB_WORKQUEUE, 0x25);
+TRACE_CODE(wq_wqops_reqthreads, _TRACE_SUB_WORKQUEUE, 0x26);
+TRACE_CODE(wq_kevent_reqthreads, _TRACE_SUB_WORKQUEUE, 0x27);
+TRACE_CODE(wq_thread_park, _TRACE_SUB_WORKQUEUE, 0x28);
+TRACE_CODE(wq_thread_squash, _TRACE_SUB_WORKQUEUE, 0x29);
 
 // synch trace points
 TRACE_CODE(psynch_mutex_ulock, _TRACE_SUB_MUTEX, 0x0);
index a3e5594d723947c34611fadc92d4b55ca9c7361a..6b22c412b7396dd035b46cfbd8997ff499710103 100644 (file)
 #define PTH_RWL_PBIT           0x04    // prepost (cv) pending in kernel
 
 #define PTH_RWL_MTX_WAIT       0x20    // in cvar in mutex wait
-#define PTH_RWL_RBIT           0x40    // reader pending in kernel (not used)
-#define PTH_RWL_MBIT           0x40    // overlapping grants from kernel
+#define PTH_RWL_UBIT           0x40    // lock is unlocked (no readers or writers)
+#define PTH_RWL_MBIT           0x40    // overlapping grants from kernel (only in updateval)
 #define PTH_RWL_IBIT           0x80    // lock reset, held until first successful unlock
 
 #define PTHRW_RWL_INIT         PTH_RWL_IBIT    // reset on the lock bits (U)
-#define PTHRW_RWLOCK_INIT      (PTH_RWL_IBIT | PTH_RWL_RBIT)   // reset on the lock bits (U)
-#define PTH_RWLOCK_RESET_RBIT  ((uint32_t)~PTH_RWL_RBIT)
+#define PTHRW_RWLOCK_INIT      (PTH_RWL_IBIT | PTH_RWL_UBIT)   // reset on the lock bits (U)
 
 // S word
 #define PTH_RWS_SBIT           0x01    // kernel transition seq not set yet
@@ -66,7 +65,6 @@
 #define PTHRW_RWS_SAVEMASK     (PTH_RWS_WSVBIT)        // save bits mask
 
 #define PTHRW_RWS_INIT         PTH_RWS_SBIT    // reset on the lock bits (U)
-#define PTHRW_SW_Reset_BIT_MASK (PTHRW_BIT_MASK & ~PTH_RWS_SBIT)       // All bits except the S bit
 
 // rw_flags
 #define PTHRW_KERN_PROCESS_SHARED      0x10
@@ -75,7 +73,6 @@
 #define PTHREAD_MTX_TID_SWITCHING (uint64_t)-1
 
 // L word tests
-#define can_rwl_readinuser(x) (((x) & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0)
 #define is_rwl_ebit_set(x) (((x) & PTH_RWL_EBIT) != 0)
 #define is_rwl_wbit_set(x) (((x) & PTH_RWL_WBIT) != 0)
 #define is_rwl_ebit_clear(x) (((x) & PTH_RWL_EBIT) == 0)
index 7e61a10658727875842268fda2bd2ecd174f41e7..28d870e529aba81f885c27274791e25de43fa346 100644 (file)
@@ -40,6 +40,8 @@
 #define WQOPS_QUEUE_REQTHREADS2    0x30        /* request a number of threads in a given priority bucket */
 #define WQOPS_THREAD_KEVENT_RETURN 0x40        /* parks the thread after delivering the passed kevent array */
 #define WQOPS_SET_EVENT_MANAGER_PRIORITY 0x80  /* max() in the provided priority in the the priority of the event manager */
+#define WQOPS_THREAD_WORKLOOP_RETURN 0x100     /* parks the thread after delivering the passed kevent array */
+#define WQOPS_SHOULD_NARROW 0x200      /* checks whether we should narrow our concurrency */
 
 /* flag values for upcall flags field, only 8 bits per struct threadlist */
 #define        WQ_FLAG_THREAD_PRIOMASK                 0x0000ffff
 #define WQ_FLAG_THREAD_KEVENT                  0x00080000  /* thread is response to kevent req */
 #define WQ_FLAG_THREAD_EVENT_MANAGER   0x00100000  /* event manager thread */
 #define WQ_FLAG_THREAD_TSD_BASE_SET            0x00200000  /* tsd base has already been set */
+#define WQ_FLAG_THREAD_WORKLOOP                        0x00400000  /* workloop thread */
 
 #define WQ_THREAD_CLEANUP_QOS QOS_CLASS_DEFAULT
 
+#define WQ_KEVENT_LIST_LEN  16 // WORKQ_KEVENT_EVENT_BUFFER_LEN
+#define WQ_KEVENT_DATA_SIZE (32 * 1024)
+
 /* These definitions are only available to the kext, to avoid bleeding constants and types across the boundary to
  * the userspace library.
  */
@@ -109,80 +115,93 @@ struct threadlist {
        uint8_t th_priority;
 };
 
-#define TH_LIST_INITED                 0x01 /* Set at thread creation. */
-#define TH_LIST_RUNNING        0x02 /* On thrunlist, not parked. */
-#define TH_LIST_KEVENT         0x04 /* Thread requested by kevent */
-#define TH_LIST_NEW                    0x08 /* First return to userspace */
-#define TH_LIST_BUSY           0x10 /* Removed from idle list but not ready yet. */
-#define TH_LIST_KEVENT_BOUND   0x20 /* Thread bound to kqueues */
-#define TH_LIST_CONSTRAINED    0x40 /* Non-overcommit thread. */
-#define TH_LIST_EVENT_MGR_SCHED_PRI    0x80 /* Non-QoS Event Manager */
+#define TH_LIST_INITED         0x0001 /* Set at thread creation. */
+#define TH_LIST_RUNNING                0x0002 /* On thrunlist, not parked. */
+#define TH_LIST_KEVENT         0x0004 /* Thread requested by kevent */
+#define TH_LIST_NEW            0x0008 /* First return to userspace */
+#define TH_LIST_BUSY           0x0010 /* Removed from idle list but not ready yet. */
+#define TH_LIST_KEVENT_BOUND   0x0020 /* Thread bound to kqueues */
+#define TH_LIST_CONSTRAINED    0x0040 /* Non-overcommit thread. */
+#define TH_LIST_EVENT_MGR_SCHED_PRI    0x0080 /* Non-QoS Event Manager */
+#define TH_LIST_UNBINDING      0x0100 /* Thread is unbinding during park */
+#define TH_LIST_REMOVING_VOUCHER       0x0200 /* Thread is removing its voucher */
+#define TH_LIST_PACING         0x0400 /* Thread is participating in pacing */
+
+struct threadreq {
+       TAILQ_ENTRY(threadreq) tr_entry;
+       uint16_t tr_flags;
+       uint8_t tr_state;
+       uint8_t tr_priority;
+};
+TAILQ_HEAD(threadreq_head, threadreq);
+
+#define TR_STATE_NEW           0 /* Not yet enqueued */
+#define TR_STATE_WAITING       1 /* Waiting to be serviced - on reqlist */
+#define TR_STATE_COMPLETE      2 /* Request handled - for caller to free */
+#define TR_STATE_DEAD          3
+
+#define TR_FLAG_KEVENT         0x01
+#define TR_FLAG_OVERCOMMIT     0x02
+#define TR_FLAG_ONSTACK                0x04
+#define TR_FLAG_WORKLOOP       0x08
+#define TR_FLAG_NO_PACING      0x10
+
+#if defined(__LP64__)
+typedef unsigned __int128 wq_thactive_t;
+#else
+typedef uint64_t wq_thactive_t;
+#endif
 
 struct workqueue {
        proc_t          wq_proc;
        vm_map_t        wq_map;
        task_t          wq_task;
 
-       _Atomic uint32_t        wq_flags;  // updated atomically
-       uint32_t        wq_lflags; // protected by wqueue lock
-
        lck_spin_t      wq_lock;
-       boolean_t       wq_interrupt_state;
 
        thread_call_t   wq_atimer_delayed_call;
        thread_call_t   wq_atimer_immediate_call;
 
-       uint64_t        wq_thread_yielded_timestamp;
-       uint32_t        wq_thread_yielded_count;
+       uint32_t _Atomic wq_flags;
        uint32_t        wq_timer_interval;
-       uint32_t        wq_max_concurrency;
        uint32_t        wq_threads_scheduled;
        uint32_t        wq_constrained_threads_scheduled;
        uint32_t        wq_nthreads;
        uint32_t        wq_thidlecount;
+       uint32_t        wq_event_manager_priority;
+       uint8_t         wq_lflags; // protected by wqueue lock
+       uint8_t         wq_paced; // protected by wqueue lock
+       uint16_t    __wq_unused;
 
        TAILQ_HEAD(, threadlist) wq_thrunlist;
        TAILQ_HEAD(, threadlist) wq_thidlelist;
        TAILQ_HEAD(, threadlist) wq_thidlemgrlist;
 
-       /* Counters for how many requests we have outstanding.  The invariants here:
-        *   - reqcount == SUM(requests) + (event manager ? 1 : 0)
-        *   - SUM(ocrequests) + SUM(kevent_requests) + SUM(kevent_ocrequests) <= SUM(requests)
-        *   - # of constrained requests is difference between quantities above
-        * i.e. a kevent+overcommit request will increment reqcount, requests and
-        * kevent_ocrequests only.
-        */
-       uint32_t        wq_reqcount;
-       uint16_t        wq_requests[WORKQUEUE_NUM_BUCKETS];
-       uint16_t        wq_ocrequests[WORKQUEUE_NUM_BUCKETS];
-       uint16_t        wq_kevent_requests[WORKQUEUE_NUM_BUCKETS];
-       uint16_t        wq_kevent_ocrequests[WORKQUEUE_NUM_BUCKETS];
-
-       uint16_t        wq_reqconc[WORKQUEUE_NUM_BUCKETS];                      /* requested concurrency for each priority level */
-       uint16_t        wq_thscheduled_count[WORKQUEUE_NUM_BUCKETS];
-       uint32_t        wq_thactive_count[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(4))); /* must be uint32_t since we OSAddAtomic on these */
-       uint64_t        wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS] __attribute__((aligned(8))); /* XXX: why per bucket? */
+       uint32_t        wq_reqcount;    /* number of elements on the following lists */
+       struct threadreq_head wq_overcommit_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET];
+       struct threadreq_head wq_reqlist[WORKQUEUE_EVENT_MANAGER_BUCKET];
+       struct threadreq wq_event_manager_threadreq;
 
-       uint32_t        wq_event_manager_priority;
-};
-#define WQ_LIST_INITED         0x01
-#define WQ_EXITING             0x02
-#define WQ_ATIMER_DELAYED_RUNNING      0x04
-#define WQ_ATIMER_IMMEDIATE_RUNNING    0x08
+       struct threadreq *wq_cached_threadreq;
 
-#define WQ_SETFLAG(wq, flag) __c11_atomic_fetch_or(&wq->wq_flags, flag, __ATOMIC_SEQ_CST)
-#define WQ_UNSETFLAG(wq, flag) __c11_atomic_fetch_and(&wq->wq_flags, ~flag, __ATOMIC_SEQ_CST)
+       uint16_t        wq_thscheduled_count[WORKQUEUE_NUM_BUCKETS];
+       _Atomic wq_thactive_t wq_thactive;
+       _Atomic uint64_t wq_lastblocked_ts[WORKQUEUE_NUM_BUCKETS];
+};
+#define WQ_EXITING             0x01
+#define WQ_ATIMER_DELAYED_RUNNING      0x02
+#define WQ_ATIMER_IMMEDIATE_RUNNING    0x04
 
 #define WQL_ATIMER_BUSY                0x01
 #define WQL_ATIMER_WAITING     0x02
 
 #define WORKQUEUE_MAXTHREADS           512
-#define WQ_YIELDED_THRESHOLD           2000
-#define WQ_YIELDED_WINDOW_USECS                30000
 #define WQ_STALLED_WINDOW_USECS                200
 #define WQ_REDUCE_POOL_WINDOW_USECS    5000000
 #define        WQ_MAX_TIMER_INTERVAL_USECS     50000
 
+#define WQ_THREADLIST_EXITING_POISON (void *)~0ul
+
 #endif // KERNEL
 
 #endif // _WORKQUEUE_INTERNAL_H_
index 050329393038e0de02d6ccefc620109f79be23ab..33df53770ed9fd472894ec34b84a3cd3e4aaaba0 100644 (file)
@@ -68,7 +68,6 @@
                6E8C16571B14F08A00C8987C /* plockstat.d in Sources */ = {isa = PBXBuildFile; fileRef = C9A325EF15B7513200270056 /* plockstat.d */; };
                6E8C16581B14F08A00C8987C /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
                6E8C16591B14F08A00C8987C /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
-               6E8C165A1B14F08A00C8987C /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
                6E8C165B1B14F08A00C8987C /* qos.c in Sources */ = {isa = PBXBuildFile; fileRef = C9244C1C1860D8EF00075748 /* qos.c */; };
                6E8C165C1B14F08A00C8987C /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
                6E8C165D1B14F08A00C8987C /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
                6E8C16761B14F08A00C8987C /* spinlock_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F715B7513200270056 /* spinlock_private.h */; };
                6E8C16771B14F08A00C8987C /* workqueue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F915B7513200270056 /* workqueue_private.h */; };
                6E8C16781B14F08A00C8987C /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9153095167ACC22006BB094 /* private.h */; };
-               6EB232CB1B0EB2E2005915CE /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
                6EB232CC1B0EB2F0005915CE /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
-               6EB232CD1B0EB318005915CE /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
                6EB232CE1B0EB31B005915CE /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
-               6EB232CF1B0EB321005915CE /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
                6EB232D01B0EB325005915CE /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
                74E594931613AAF4006C417B /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
                74E594941613AAF4006C417B /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
                74E5949C1613AAF4006C417B /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
                74E5949E1613AAF4006C417B /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
                74E594A61613AB10006C417B /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
-               9202B2311D1A5B3F00945880 /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; };
                9202B2321D1A5B6200945880 /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; };
                924D8EDF1C11833D002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
                924D8EE01C11833D002AC2BC /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
                C04545A71C584F4A006A53B3 /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
                C04545A81C584F4A006A53B3 /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
                C04545A91C584F4A006A53B3 /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
-               C04545AA1C584F4A006A53B3 /* pthread_mutex_up.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */; };
                C04545AB1C584F4A006A53B3 /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
                C04545AC1C584F4A006A53B3 /* pthread_support.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DC15C9D16B0098ECD8 /* pthread_support.c */; };
                C04545AD1C584F4A006A53B3 /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
                C9CCFB9D18B6D0910060CAAE /* qos_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C99B17DA189C2E1B00991D38 /* qos_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
                C9D75E4216127B3900C2FB26 /* kern_synch.c in Sources */ = {isa = PBXBuildFile; fileRef = C9169DDB1603DE84005A2F8C /* kern_synch.c */; };
                E4063CF31906B75A000202F9 /* qos.h in Headers */ = {isa = PBXBuildFile; fileRef = E4063CF21906B4FB000202F9 /* qos.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E41505D21E818BEB00F243FB /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
+               E41505D31E818BEB00F243FB /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
+               E41505D41E818BEB00F243FB /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
+               E41505D51E818BEB00F243FB /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
+               E41505D61E818BEB00F243FB /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
+               E41505D71E818BEB00F243FB /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
+               E41505D91E818BEB00F243FB /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
+               E41505DA1E818BEB00F243FB /* pthread_support.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DC15C9D16B0098ECD8 /* pthread_support.c */; };
+               E41505DB1E818BEB00F243FB /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
+               E41505DC1E818BEB00F243FB /* thread_setup.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FC15B7513200270056 /* thread_setup.c */; };
+               E41505DD1E818BEB00F243FB /* qos.c in Sources */ = {isa = PBXBuildFile; fileRef = C9244C1C1860D8EF00075748 /* qos.c */; };
+               E41505DE1E818BEB00F243FB /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+               E41505DF1E818BEB00F243FB /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
+               E41505E01E818BEB00F243FB /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
+               E41A64AE1E83C470009479A9 /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; };
                E4657D4117284F7B007D1847 /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4657D4017284F7B007D1847 /* introspection_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F4498E1E82C1F000A7FB9A /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
+               E4F4498F1E82C1F000A7FB9A /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
+               E4F449901E82C1F000A7FB9A /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
+               E4F449911E82C1F000A7FB9A /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
+               E4F449921E82C1F000A7FB9A /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
+               E4F449931E82C1F000A7FB9A /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
+               E4F449941E82C1F000A7FB9A /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
+               E4F449951E82C1F000A7FB9A /* pthread_support.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DC15C9D16B0098ECD8 /* pthread_support.c */; };
+               E4F449961E82C1F000A7FB9A /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
+               E4F449971E82C1F000A7FB9A /* thread_setup.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FC15B7513200270056 /* thread_setup.c */; };
+               E4F449981E82C1F000A7FB9A /* qos.c in Sources */ = {isa = PBXBuildFile; fileRef = C9244C1C1860D8EF00075748 /* qos.c */; };
+               E4F449991E82C1F000A7FB9A /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+               E4F4499A1E82C1F000A7FB9A /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
+               E4F4499B1E82C1F000A7FB9A /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
+               E4F449AA1E82D03500A7FB9A /* resolver.c in Sources */ = {isa = PBXBuildFile; fileRef = 6EB232C91B0EB29D005915CE /* resolver.c */; };
+               E4F449AB1E82D03500A7FB9A /* pthread.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FA15B7513200270056 /* pthread.c */; };
+               E4F449AC1E82D03500A7FB9A /* pthread_cancelable.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F115B7513200270056 /* pthread_cancelable.c */; };
+               E4F449AD1E82D03500A7FB9A /* pthread_cond.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F215B7513200270056 /* pthread_cond.c */; };
+               E4F449AE1E82D03500A7FB9A /* pthread_mutex.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F515B7513200270056 /* pthread_mutex.c */; };
+               E4F449AF1E82D03500A7FB9A /* qos.c in Sources */ = {isa = PBXBuildFile; fileRef = C9244C1C1860D8EF00075748 /* qos.c */; };
+               E4F449B01E82D03500A7FB9A /* pthread_rwlock.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F615B7513200270056 /* pthread_rwlock.c */; };
+               E4F449B11E82D03500A7FB9A /* pthread_cwd.c in Sources */ = {isa = PBXBuildFile; fileRef = 924D8EDE1C11832A002AC2BC /* pthread_cwd.c */; };
+               E4F449B21E82D03500A7FB9A /* pthread_tsd.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325F815B7513200270056 /* pthread_tsd.c */; };
+               E4F449B31E82D03500A7FB9A /* pthread_cancelable_cancel.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */; };
+               E4F449B41E82D03500A7FB9A /* pthread_cancelable_legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A1BF5415C9CB9D006BB313 /* pthread_cancelable_legacy.c */; };
+               E4F449B51E82D03500A7FB9A /* pthread_cond_legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5D615C9CECA0098ECD8 /* pthread_cond_legacy.c */; };
+               E4F449B61E82D03500A7FB9A /* pthread_mutex_legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5D815C9CEEA0098ECD8 /* pthread_mutex_legacy.c */; };
+               E4F449B71E82D03500A7FB9A /* pthread_rwlock_legacy.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DA15C9CEFA0098ECD8 /* pthread_rwlock_legacy.c */; };
+               E4F449B81E82D03500A7FB9A /* pthread_support.c in Sources */ = {isa = PBXBuildFile; fileRef = C975D5DC15C9D16B0098ECD8 /* pthread_support.c */; };
+               E4F449B91E82D03500A7FB9A /* thread_setup.c in Sources */ = {isa = PBXBuildFile; fileRef = C9A325FC15B7513200270056 /* thread_setup.c */; };
+               E4F449BA1E82D03500A7FB9A /* pthread_atfork.c in Sources */ = {isa = PBXBuildFile; fileRef = C90E7AB415DC40D900A06D48 /* pthread_atfork.c */; };
+               E4F449BB1E82D03500A7FB9A /* pthread_asm.s in Sources */ = {isa = PBXBuildFile; fileRef = C99AD87D15DF04D10009A6F8 /* pthread_asm.s */; };
+               E4F449BE1E82D03500A7FB9A /* qos.h in Headers */ = {isa = PBXBuildFile; fileRef = C9244C1A185FCFED00075748 /* qos.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449BF1E82D03500A7FB9A /* pthread.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325FE15B7513700270056 /* pthread.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449C01E82D03500A7FB9A /* pthread_impl.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325FF15B7513700270056 /* pthread_impl.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449C11E82D03500A7FB9A /* qos.h in Headers */ = {isa = PBXBuildFile; fileRef = E4063CF21906B4FB000202F9 /* qos.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449C21E82D03500A7FB9A /* pthread_spis.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A3260015B7513700270056 /* pthread_spis.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449C31E82D03500A7FB9A /* introspection.h in Headers */ = {isa = PBXBuildFile; fileRef = 9202B2301D1A5B3F00945880 /* introspection.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449C41E82D03500A7FB9A /* sched.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A3260115B7513700270056 /* sched.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449C51E82D03500A7FB9A /* introspection_private.h in Headers */ = {isa = PBXBuildFile; fileRef = E4657D4017284F7B007D1847 /* introspection_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449C61E82D03500A7FB9A /* tsd_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F415B7513200270056 /* tsd_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449C71E82D03500A7FB9A /* posix_sched.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F015B7513200270056 /* posix_sched.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449C81E82D03500A7FB9A /* qos_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C99B17DA189C2E1B00991D38 /* qos_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449C91E82D03500A7FB9A /* spawn.h in Headers */ = {isa = PBXBuildFile; fileRef = C98C95D818FF1F4E005654FB /* spawn.h */; settings = {ATTRIBUTES = (Public, ); }; };
+               E4F449CA1E82D03500A7FB9A /* spinlock_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F715B7513200270056 /* spinlock_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449CB1E82D03500A7FB9A /* workqueue_private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9A325F915B7513200270056 /* workqueue_private.h */; settings = {ATTRIBUTES = (Private, ); }; };
+               E4F449CC1E82D03500A7FB9A /* private.h in Headers */ = {isa = PBXBuildFile; fileRef = C9153095167ACC22006BB094 /* private.h */; settings = {ATTRIBUTES = (Private, ); }; };
 /* End PBXBuildFile section */
 
 /* Begin PBXContainerItemProxy section */
                        remoteGlobalIDString = C90E7A9E15DC3C3800A06D48;
                        remoteInfo = libpthread.a;
                };
+               E4F4498A1E825D2B00A7FB9A /* PBXContainerItemProxy */ = {
+                       isa = PBXContainerItemProxy;
+                       containerPortal = C9A325D915B7347000270056 /* Project object */;
+                       proxyType = 1;
+                       remoteGlobalIDString = E41505D01E818BEB00F243FB;
+                       remoteInfo = "libpthread mp resolved";
+               };
+               E4F449A11E82C5A400A7FB9A /* PBXContainerItemProxy */ = {
+                       isa = PBXContainerItemProxy;
+                       containerPortal = C9A325D915B7347000270056 /* Project object */;
+                       proxyType = 1;
+                       remoteGlobalIDString = E4F4498C1E82C1F000A7FB9A;
+                       remoteInfo = "libpthread alt resolved";
+               };
 /* End PBXContainerItemProxy section */
 
 /* Begin PBXFileReference section */
                6E8C16801B14F08A00C8987C /* libsystem_pthread.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libsystem_pthread.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
                6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = pthread_introspection.xcconfig; sourceTree = "<group>"; };
-               6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_mutex_up.c; sourceTree = "<group>"; };
                6EB232C91B0EB29D005915CE /* resolver.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = resolver.c; sourceTree = "<group>"; };
                6EB232CA1B0EB29D005915CE /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = "<group>"; };
                74E594A41613AAF4006C417B /* libpthread_eOS.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread_eOS.a; sourceTree = BUILT_PRODUCTS_DIR; };
                C9A1BF5215C9A9F5006BB313 /* pthread_cancelable_cancel.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_cancelable_cancel.c; sourceTree = "<group>"; };
                C9A1BF5415C9CB9D006BB313 /* pthread_cancelable_legacy.c */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.c; path = pthread_cancelable_legacy.c; sourceTree = "<group>"; };
                C9A325E215B7347000270056 /* libsystem_pthread.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libsystem_pthread.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
-               C9A325EE15B7513200270056 /* mk_pthread_impl.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = mk_pthread_impl.c; sourceTree = "<group>"; };
                C9A325EF15B7513200270056 /* plockstat.d */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.dtrace; path = plockstat.d; sourceTree = "<group>"; };
                C9A325F015B7513200270056 /* posix_sched.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = posix_sched.h; sourceTree = "<group>"; };
                C9A325F115B7513200270056 /* pthread_cancelable.c */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.c; path = pthread_cancelable.c; sourceTree = "<group>"; };
                C9D9E8FE1626248800448CED /* pthread-Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = "pthread-Info.plist"; sourceTree = "<group>"; };
                C9DCA2A115DC4F2000D057E2 /* install-manpages.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-manpages.sh"; sourceTree = "<group>"; };
                E4063CF21906B4FB000202F9 /* qos.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = qos.h; sourceTree = "<group>"; };
+               E41505E71E818BEB00F243FB /* libpthread_mp.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread_mp.a; sourceTree = BUILT_PRODUCTS_DIR; };
+               E41505E81E818D4D00F243FB /* resolved.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = resolved.xcconfig; sourceTree = "<group>"; };
                E4657D4017284F7B007D1847 /* introspection_private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = introspection_private.h; sourceTree = "<group>"; };
+               E4943AA71E80BD8400D2A961 /* resolver_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver_internal.h; sourceTree = "<group>"; };
                E4D962F919086AD600E8A9F2 /* qos.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qos.h; sourceTree = "<group>"; };
                E4D962FC19086C5700E8A9F2 /* install-sys-headers.sh */ = {isa = PBXFileReference; lastKnownFileType = text.script.sh; path = "install-sys-headers.sh"; sourceTree = "<group>"; };
+               E4F449A01E82C1F000A7FB9A /* libpthread_alt.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libpthread_alt.a; sourceTree = BUILT_PRODUCTS_DIR; };
+               E4F449A31E82CF0100A7FB9A /* resolver.xcconfig */ = {isa = PBXFileReference; lastKnownFileType = text.xcconfig; path = resolver.xcconfig; sourceTree = "<group>"; };
+               E4F449D41E82D03500A7FB9A /* libsystem_pthread.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libsystem_pthread.dylib; sourceTree = BUILT_PRODUCTS_DIR; };
                FC30E28D16A747AD00A25B5F /* synch_internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = synch_internal.h; sourceTree = "<group>"; };
                FC5A372417CEB3D6008C323E /* _pthread_attr_t.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = _pthread_attr_t.h; sourceTree = "<group>"; };
                FC5A372517CEB3D6008C323E /* _pthread_cond_t.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = _pthread_cond_t.h; sourceTree = "<group>"; };
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
+               E4F449BC1E82D03500A7FB9A /* Frameworks */ = {
+                       isa = PBXFrameworksBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
 /* End PBXFrameworksBuildPhase section */
 
 /* Begin PBXGroup section */
                                C9CA27D91602813000259F78 /* pthread.kext */,
                                6E8C16801B14F08A00C8987C /* libsystem_pthread.dylib */,
                                C04545B81C584F4A006A53B3 /* libpthread.a */,
+                               E41505E71E818BEB00F243FB /* libpthread_mp.a */,
+                               E4F449A01E82C1F000A7FB9A /* libpthread_alt.a */,
+                               E4F449D41E82D03500A7FB9A /* libsystem_pthread.dylib */,
                        );
                        name = Products;
                        sourceTree = "<group>";
                        isa = PBXGroup;
                        children = (
                                C9A325F315B7513200270056 /* internal.h */,
-                               6EB232CA1B0EB29D005915CE /* resolver.h */,
-                               6EB232C91B0EB29D005915CE /* resolver.c */,
-                               C9A325EE15B7513200270056 /* mk_pthread_impl.c */,
                                C9A325EF15B7513200270056 /* plockstat.d */,
                                C9A325FA15B7513200270056 /* pthread.c */,
                                C99AD87D15DF04D10009A6F8 /* pthread_asm.s */,
                                C9A325F215B7513200270056 /* pthread_cond.c */,
                                924D8EDE1C11832A002AC2BC /* pthread_cwd.c */,
                                C9A325F515B7513200270056 /* pthread_mutex.c */,
-                               6EB232C81B0EB29D005915CE /* pthread_mutex_up.c */,
                                C9A325F615B7513200270056 /* pthread_rwlock.c */,
                                C975D5DC15C9D16B0098ECD8 /* pthread_support.c */,
                                C9A325F815B7513200270056 /* pthread_tsd.c */,
                                C9244C1C1860D8EF00075748 /* qos.c */,
                                C9A325FC15B7513200270056 /* thread_setup.c */,
+                               E4943AAA1E80BE1F00D2A961 /* resolver */,
                                C9A1BF5115C9A8B7006BB313 /* variants */,
                        );
                        path = src;
                                9235CA551CA48D010015C92B /* kext_development.xcconfig */,
                                C91D01BA162893CD0002E29A /* kext.xcconfig */,
                                C9A3260C15B759B600270056 /* pthread.xcconfig */,
+                               E4F449A31E82CF0100A7FB9A /* resolver.xcconfig */,
+                               E41505E81E818D4D00F243FB /* resolved.xcconfig */,
                                6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */,
                                C99EA612161F8288003EBC56 /* eos.xcconfig */,
                                C04545B91C584F8B006A53B3 /* static.xcconfig */,
                        path = private;
                        sourceTree = "<group>";
                };
+               E4943AAA1E80BE1F00D2A961 /* resolver */ = {
+                       isa = PBXGroup;
+                       children = (
+                               6EB232CA1B0EB29D005915CE /* resolver.h */,
+                               E4943AA71E80BD8400D2A961 /* resolver_internal.h */,
+                               6EB232C91B0EB29D005915CE /* resolver.c */,
+                       );
+                       path = resolver;
+                       sourceTree = "<group>";
+               };
                FC5A372217CEB3D6008C323E /* sys */ = {
                        isa = PBXGroup;
                        children = (
                        files = (
                                6E8C16711B14F08A00C8987C /* posix_sched.h in Headers */,
                                6E8C166F1B14F08A00C8987C /* introspection_private.h in Headers */,
+                               E41A64AE1E83C470009479A9 /* introspection.h in Headers */,
                                6E8C166C1B14F08A00C8987C /* qos.h in Headers */,
                                6E8C16701B14F08A00C8987C /* tsd_private.h in Headers */,
                                6E8C16731B14F08A00C8987C /* qos_private.h in Headers */,
                        isa = PBXHeadersBuildPhase;
                        buildActionMask = 2147483647;
                        files = (
-                               9202B2311D1A5B3F00945880 /* introspection.h in Headers */,
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
+               E4F449BD1E82D03500A7FB9A /* Headers */ = {
+                       isa = PBXHeadersBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                               E4F449BE1E82D03500A7FB9A /* qos.h in Headers */,
+                               E4F449BF1E82D03500A7FB9A /* pthread.h in Headers */,
+                               E4F449C01E82D03500A7FB9A /* pthread_impl.h in Headers */,
+                               E4F449C11E82D03500A7FB9A /* qos.h in Headers */,
+                               E4F449C21E82D03500A7FB9A /* pthread_spis.h in Headers */,
+                               E4F449C31E82D03500A7FB9A /* introspection.h in Headers */,
+                               E4F449C41E82D03500A7FB9A /* sched.h in Headers */,
+                               E4F449C51E82D03500A7FB9A /* introspection_private.h in Headers */,
+                               E4F449C61E82D03500A7FB9A /* tsd_private.h in Headers */,
+                               E4F449C71E82D03500A7FB9A /* posix_sched.h in Headers */,
+                               E4F449C81E82D03500A7FB9A /* qos_private.h in Headers */,
+                               E4F449C91E82D03500A7FB9A /* spawn.h in Headers */,
+                               E4F449CA1E82D03500A7FB9A /* spinlock_private.h in Headers */,
+                               E4F449CB1E82D03500A7FB9A /* workqueue_private.h in Headers */,
+                               E4F449CC1E82D03500A7FB9A /* private.h in Headers */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
 /* End PBXLegacyTarget section */
 
 /* Begin PBXNativeTarget section */
-               6E8C16511B14F08A00C8987C /* libsystem_pthread.dylib introspection */ = {
+               6E8C16511B14F08A00C8987C /* libsystem_pthread introspection */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = 6E8C167D1B14F08A00C8987C /* Build configuration list for PBXNativeTarget "libsystem_pthread.dylib introspection" */;
+                       buildConfigurationList = 6E8C167D1B14F08A00C8987C /* Build configuration list for PBXNativeTarget "libsystem_pthread introspection" */;
                        buildPhases = (
                                6E8C16521B14F08A00C8987C /* Generate dtrace header */,
                                6E8C16531B14F08A00C8987C /* Sources */,
                        );
                        dependencies = (
                        );
-                       name = "libsystem_pthread.dylib introspection";
-                       productName = Libpthread;
+                       name = "libsystem_pthread introspection";
+                       productName = libsystem_pthread.dylib;
                        productReference = 6E8C16801B14F08A00C8987C /* libsystem_pthread.dylib */;
                        productType = "com.apple.product-type.library.dynamic";
                };
-               74E594911613AAF4006C417B /* libpthread.a eOS */ = {
+               74E594911613AAF4006C417B /* libpthread eOS */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = 74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread.a eOS" */;
+                       buildConfigurationList = 74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread eOS" */;
                        buildPhases = (
                                74E594921613AAF4006C417B /* Sources */,
                                74E594A01613AAF4006C417B /* Frameworks */,
                        );
                        dependencies = (
                        );
-                       name = "libpthread.a eOS";
-                       productName = libpthread.a;
+                       name = "libpthread eOS";
+                       productName = libpthread_eOS.a;
                        productReference = 74E594A41613AAF4006C417B /* libpthread_eOS.a */;
                        productType = "com.apple.product-type.library.static";
                };
-               C04545A21C584F4A006A53B3 /* libpthread.a generic */ = {
+               C04545A21C584F4A006A53B3 /* libpthread generic */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread.a generic" */;
+                       buildConfigurationList = C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread generic" */;
                        buildPhases = (
                                C04545A31C584F4A006A53B3 /* Sources */,
                                C04545B31C584F4A006A53B3 /* Frameworks */,
                        );
                        dependencies = (
                        );
-                       name = "libpthread.a generic";
+                       name = "libpthread generic";
                        productName = libpthread.a;
                        productReference = C04545B81C584F4A006A53B3 /* libpthread.a */;
                        productType = "com.apple.product-type.library.static";
                };
-               C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */ = {
+               C90E7A9E15DC3C3800A06D48 /* libpthread dyld */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a dyld" */;
+                       buildConfigurationList = C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread dyld" */;
                        buildPhases = (
                                C90E7A9B15DC3C3800A06D48 /* Sources */,
                                C90E7A9C15DC3C3800A06D48 /* Frameworks */,
                        );
                        dependencies = (
                        );
-                       name = "libpthread.a dyld";
-                       productName = libpthread.a;
+                       name = "libpthread dyld";
+                       productName = libpthread_dyld.a;
                        productReference = C90E7A9F15DC3C3800A06D48 /* libpthread_dyld.a */;
                        productType = "com.apple.product-type.library.static";
                };
-               C9A325E115B7347000270056 /* libsystem_pthread.dylib */ = {
+               C9A325E115B7347000270056 /* libsystem_pthread */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = C9A325E615B7347000270056 /* Build configuration list for PBXNativeTarget "libsystem_pthread.dylib" */;
+                       buildConfigurationList = C9A325E615B7347000270056 /* Build configuration list for PBXNativeTarget "libsystem_pthread" */;
                        buildPhases = (
                                C9A325DE15B7347000270056 /* Sources */,
                                C9A325DF15B7347000270056 /* Frameworks */,
                        buildRules = (
                        );
                        dependencies = (
+                               E4F4498B1E825D2B00A7FB9A /* PBXTargetDependency */,
+                               E4F449A21E82C5A400A7FB9A /* PBXTargetDependency */,
                        );
-                       name = libsystem_pthread.dylib;
-                       productName = Libpthread;
+                       name = libsystem_pthread;
+                       productName = libsystem_pthread.dylib;
                        productReference = C9A325E215B7347000270056 /* libsystem_pthread.dylib */;
                        productType = "com.apple.product-type.library.dynamic";
                };
-               C9CA27D81602813000259F78 /* pthread */ = {
+               C9CA27D81602813000259F78 /* pthread kext */ = {
                        isa = PBXNativeTarget;
-                       buildConfigurationList = C9CA27E71602813000259F78 /* Build configuration list for PBXNativeTarget "pthread" */;
+                       buildConfigurationList = C9CA27E71602813000259F78 /* Build configuration list for PBXNativeTarget "pthread kext" */;
                        buildPhases = (
                                C9CA27D31602813000259F78 /* Sources */,
                                C9CA27D41602813000259F78 /* Frameworks */,
                                C9CA27D51602813000259F78 /* Headers */,
                                C9CA27D61602813000259F78 /* Resources */,
-                               C9CA27D71602813000259F78 /* Rez */,
                                C9A960B518452C1800AE10C8 /* Install lldbmacros */,
                        );
                        buildRules = (
                        );
                        dependencies = (
                        );
-                       name = pthread;
-                       productName = pthread;
+                       name = "pthread kext";
+                       productName = pthread.kext;
                        productReference = C9CA27D91602813000259F78 /* pthread.kext */;
                        productType = "com.apple.product-type.kernel-extension";
                };
+               E41505D01E818BEB00F243FB /* libpthread mp resolved */ = {
+                       isa = PBXNativeTarget;
+                       buildConfigurationList = E41505E41E818BEB00F243FB /* Build configuration list for PBXNativeTarget "libpthread mp resolved" */;
+                       buildPhases = (
+                               E41505D11E818BEB00F243FB /* Sources */,
+                               E41505E31E818BEB00F243FB /* Symlink normal variant */,
+                       );
+                       buildRules = (
+                       );
+                       dependencies = (
+                       );
+                       name = "libpthread mp resolved";
+                       productName = libpthread_mp.a;
+                       productReference = E41505E71E818BEB00F243FB /* libpthread_mp.a */;
+                       productType = "com.apple.product-type.library.static";
+               };
+               E4F4498C1E82C1F000A7FB9A /* libpthread alt resolved */ = {
+                       isa = PBXNativeTarget;
+                       buildConfigurationList = E4F4499D1E82C1F000A7FB9A /* Build configuration list for PBXNativeTarget "libpthread alt resolved" */;
+                       buildPhases = (
+                               E4F4498D1E82C1F000A7FB9A /* Sources */,
+                               E4F4499C1E82C1F000A7FB9A /* Symlink normal variant */,
+                       );
+                       buildRules = (
+                       );
+                       dependencies = (
+                       );
+                       name = "libpthread alt resolved";
+                       productName = libpthread_alt.a;
+                       productReference = E4F449A01E82C1F000A7FB9A /* libpthread_alt.a */;
+                       productType = "com.apple.product-type.library.static";
+               };
+               E4F449A41E82D03500A7FB9A /* libsystem_pthread noresolver */ = {
+                       isa = PBXNativeTarget;
+                       buildConfigurationList = E4F449D11E82D03500A7FB9A /* Build configuration list for PBXNativeTarget "libsystem_pthread noresolver" */;
+                       buildPhases = (
+                               E4F449A91E82D03500A7FB9A /* Sources */,
+                               E4F449BC1E82D03500A7FB9A /* Frameworks */,
+                               E4F449BD1E82D03500A7FB9A /* Headers */,
+                               E4F449CD1E82D03500A7FB9A /* Symlink Old Header Location */,
+                               E4F449CE1E82D03500A7FB9A /* Install Manpages */,
+                               E4F449CF1E82D03500A7FB9A /* Install sys headers */,
+                               E4F449D01E82D03500A7FB9A /* Install Codes file */,
+                       );
+                       buildRules = (
+                       );
+                       dependencies = (
+                       );
+                       name = "libsystem_pthread noresolver";
+                       productName = libsystem_pthread.dylib;
+                       productReference = E4F449D41E82D03500A7FB9A /* libsystem_pthread.dylib */;
+                       productType = "com.apple.product-type.library.dynamic";
+               };
 /* End PBXNativeTarget section */
 
 /* Begin PBXProject section */
                C9A325D915B7347000270056 /* Project object */ = {
                        isa = PBXProject;
                        attributes = {
-                               LastUpgradeCheck = 0800;
+                               LastUpgradeCheck = 0900;
                                ORGANIZATIONNAME = "";
                                TargetAttributes = {
                                        92799B441B96A5FD00861404 = {
                                C91D01B5162892FF0002E29A /* Kext */,
                                C98832C115DEB44000B3308E /* Embedded */,
                                92799B441B96A5FD00861404 /* Tests */,
-                               6E8C16511B14F08A00C8987C /* libsystem_pthread.dylib introspection */,
-                               C9A325E115B7347000270056 /* libsystem_pthread.dylib */,
-                               C04545A21C584F4A006A53B3 /* libpthread.a generic */,
-                               C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */,
-                               74E594911613AAF4006C417B /* libpthread.a eOS */,
-                               C9CA27D81602813000259F78 /* pthread */,
+                               C9A325E115B7347000270056 /* libsystem_pthread */,
+                               E4F449A41E82D03500A7FB9A /* libsystem_pthread noresolver */,
+                               6E8C16511B14F08A00C8987C /* libsystem_pthread introspection */,
+                               E41505D01E818BEB00F243FB /* libpthread mp resolved */,
+                               E4F4498C1E82C1F000A7FB9A /* libpthread alt resolved */,
+                               C04545A21C584F4A006A53B3 /* libpthread generic */,
+                               C90E7A9E15DC3C3800A06D48 /* libpthread dyld */,
+                               74E594911613AAF4006C417B /* libpthread eOS */,
+                               C9CA27D81602813000259F78 /* pthread kext */,
                                92B275F01BCE4C5E007D06D7 /* darwintests */,
                        );
                };
                };
 /* End PBXResourcesBuildPhase section */
 
-/* Begin PBXRezBuildPhase section */
-               C9CA27D71602813000259F78 /* Rez */ = {
-                       isa = PBXRezBuildPhase;
-                       buildActionMask = 2147483647;
-                       files = (
-                       );
-                       runOnlyForDeploymentPostprocessing = 0;
-               };
-/* End PBXRezBuildPhase section */
-
 /* Begin PBXShellScriptBuildPhase section */
                6E8C16521B14F08A00C8987C /* Generate dtrace header */ = {
                        isa = PBXShellScriptBuildPhase;
                                "$(DERIVED_FILE_DIR)/dtrace/plockstat.h",
                        );
                        runOnlyForDeploymentPostprocessing = 0;
-                       shellPath = /bin/sh;
+                       shellPath = "/bin/bash -e -x";
                        shellScript = "dtrace -h -C -s \"${SCRIPT_INPUT_FILE_0}\" -o \"${SCRIPT_OUTPUT_FILE_0}\"";
+                       showEnvVarsInLog = 0;
                };
                C04545891C5844F8006A53B3 /* Symlink libpthread_dyld.a to libpthread.a */ = {
                        isa = PBXShellScriptBuildPhase;
                        outputPaths = (
                        );
                        runOnlyForDeploymentPostprocessing = 1;
-                       shellPath = /bin/sh;
-                       shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf libpthread_dyld.a ${DSTROOT}${INSTALL_PATH}/libpthread.a";
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf libpthread_dyld.a \"${DSTROOT}${INSTALL_PATH}/libpthread.a\"";
+                       showEnvVarsInLog = 0;
                };
                C04545BA1C585034006A53B3 /* Symlink libpthread.a to the loaderd path */ = {
                        isa = PBXShellScriptBuildPhase;
                                "${DSTROOT}/usr/local/lib/loaderd/libpthread.a",
                        );
                        runOnlyForDeploymentPostprocessing = 1;
-                       shellPath = /bin/sh;
-                       shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf ../../../..${INSTALL_PATH}/libpthread.a ${DSTROOT}/usr/local/lib/loaderd/libpthread.a";
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"${SCRIPT_INPUT_FILE_0}\" /bin/ln -sf \"../../../..${INSTALL_PATH}/libpthread.a\" \"${DSTROOT}/usr/local/lib/loaderd/libpthread.a\"";
+                       showEnvVarsInLog = 0;
                };
                C979E9FD18A2BF3D000951E5 /* Install Codes file */ = {
                        isa = PBXShellScriptBuildPhase;
                                "$(DSTROOT)/usr/local/share/misc/pthread.codes",
                        );
                        runOnlyForDeploymentPostprocessing = 1;
-                       shellPath = "/bin/bash -e";
+                       shellPath = "/bin/bash -e -x";
                        shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-codes.sh";
+                       showEnvVarsInLog = 0;
                };
                C9A960B518452C1800AE10C8 /* Install lldbmacros */ = {
                        isa = PBXShellScriptBuildPhase;
                        outputPaths = (
                        );
                        runOnlyForDeploymentPostprocessing = 0;
-                       shellPath = /bin/sh;
+                       shellPath = "/bin/bash -e -x";
                        shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-lldbmacros.sh";
+                       showEnvVarsInLog = 0;
                };
                C9D70EBD167AC76700D52713 /* Symlink Old Header Location */ = {
                        isa = PBXShellScriptBuildPhase;
                        outputPaths = (
                        );
                        runOnlyForDeploymentPostprocessing = 1;
-                       shellPath = "/bin/bash -e";
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-manpages.sh";
+                       showEnvVarsInLog = 0;
+               };
+               E41505E31E818BEB00F243FB /* Symlink normal variant */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 12;
+                       files = (
+                       );
+                       inputPaths = (
+                       );
+                       name = "Symlink normal variant";
+                       outputPaths = (
+                               "$(CONFIGURATION_BUILD_DIR)/$(EXECUTABLE_PREFIX)$(PRODUCT_NAME)_normal$(EXECUTABLE_SUFFIX)",
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = "ln -fs \"${EXECUTABLE_PREFIX}${PRODUCT_NAME}${EXECUTABLE_SUFFIX}\" \"${SCRIPT_OUTPUT_FILE_0}\"";
+                       showEnvVarsInLog = 0;
+               };
+               E4F4499C1E82C1F000A7FB9A /* Symlink normal variant */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 12;
+                       files = (
+                       );
+                       inputPaths = (
+                       );
+                       name = "Symlink normal variant";
+                       outputPaths = (
+                               "$(CONFIGURATION_BUILD_DIR)/$(EXECUTABLE_PREFIX)$(PRODUCT_NAME)_normal$(EXECUTABLE_SUFFIX)",
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = "ln -fs \"${EXECUTABLE_PREFIX}${PRODUCT_NAME}${EXECUTABLE_SUFFIX}\" \"${SCRIPT_OUTPUT_FILE_0}\"";
+                       showEnvVarsInLog = 0;
+               };
+               E4F449CD1E82D03500A7FB9A /* Symlink Old Header Location */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 8;
+                       files = (
+                       );
+                       inputPaths = (
+                               "",
+                       );
+                       name = "Symlink Old Header Location";
+                       outputPaths = (
+                       );
+                       runOnlyForDeploymentPostprocessing = 1;
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-symlinks.sh";
+                       showEnvVarsInLog = 0;
+               };
+               E4F449CE1E82D03500A7FB9A /* Install Manpages */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 8;
+                       files = (
+                       );
+                       inputPaths = (
+                               "",
+                       );
+                       name = "Install Manpages";
+                       outputPaths = (
+                       );
+                       runOnlyForDeploymentPostprocessing = 1;
+                       shellPath = "/bin/bash -e -x";
                        shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-manpages.sh";
                        showEnvVarsInLog = 0;
                };
+               E4F449CF1E82D03500A7FB9A /* Install sys headers */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 8;
+                       files = (
+                       );
+                       inputPaths = (
+                       );
+                       name = "Install sys headers";
+                       outputPaths = (
+                       );
+                       runOnlyForDeploymentPostprocessing = 1;
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-sys-headers.sh";
+                       showEnvVarsInLog = 0;
+               };
+               E4F449D01E82D03500A7FB9A /* Install Codes file */ = {
+                       isa = PBXShellScriptBuildPhase;
+                       buildActionMask = 8;
+                       files = (
+                       );
+                       inputPaths = (
+                               "$(SRCROOT)/kern/kern_trace.h",
+                       );
+                       name = "Install Codes file";
+                       outputPaths = (
+                               "$(DSTROOT)/usr/local/share/misc/pthread.codes",
+                       );
+                       runOnlyForDeploymentPostprocessing = 1;
+                       shellPath = "/bin/bash -e -x";
+                       shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-codes.sh";
+                       showEnvVarsInLog = 0;
+               };
                FC5A372F17CEB60D008C323E /* Install sys headers */ = {
                        isa = PBXShellScriptBuildPhase;
                        buildActionMask = 8;
                        outputPaths = (
                        );
                        runOnlyForDeploymentPostprocessing = 1;
-                       shellPath = /bin/sh;
+                       shellPath = "/bin/bash -e -x";
                        shellScript = ". \"$PROJECT_DIR\"/xcodescripts/install-sys-headers.sh";
                        showEnvVarsInLog = 0;
                };
                                6E8C16571B14F08A00C8987C /* plockstat.d in Sources */,
                                6E8C16581B14F08A00C8987C /* pthread_cond.c in Sources */,
                                6E8C16591B14F08A00C8987C /* pthread_mutex.c in Sources */,
-                               6E8C165A1B14F08A00C8987C /* pthread_mutex_up.c in Sources */,
                                924D8EE21C11833E002AC2BC /* pthread_cwd.c in Sources */,
                                6E8C165B1B14F08A00C8987C /* qos.c in Sources */,
                                6E8C165C1B14F08A00C8987C /* pthread_rwlock.c in Sources */,
                                74E594A61613AB10006C417B /* pthread_cancelable_cancel.c in Sources */,
                                74E594951613AAF4006C417B /* pthread_cond.c in Sources */,
                                74E594961613AAF4006C417B /* pthread_mutex.c in Sources */,
-                               6EB232CF1B0EB321005915CE /* pthread_mutex_up.c in Sources */,
                                74E594971613AAF4006C417B /* pthread_rwlock.c in Sources */,
                                74E594981613AAF4006C417B /* pthread_support.c in Sources */,
                                74E594991613AAF4006C417B /* pthread_tsd.c in Sources */,
                                C04545A71C584F4A006A53B3 /* pthread_cancelable_cancel.c in Sources */,
                                C04545A81C584F4A006A53B3 /* pthread_cond.c in Sources */,
                                C04545A91C584F4A006A53B3 /* pthread_mutex.c in Sources */,
-                               C04545AA1C584F4A006A53B3 /* pthread_mutex_up.c in Sources */,
                                C04545AB1C584F4A006A53B3 /* pthread_rwlock.c in Sources */,
                                C04545AC1C584F4A006A53B3 /* pthread_support.c in Sources */,
                                C04545AD1C584F4A006A53B3 /* pthread_tsd.c in Sources */,
                                C90E7AA515DC3C9D00A06D48 /* pthread_cancelable.c in Sources */,
                                C90E7AA615DC3C9D00A06D48 /* pthread_cond.c in Sources */,
                                C90E7AA715DC3C9D00A06D48 /* pthread_mutex.c in Sources */,
-                               6EB232CD1B0EB318005915CE /* pthread_mutex_up.c in Sources */,
                                C90E7AA815DC3C9D00A06D48 /* pthread_rwlock.c in Sources */,
                                C90E7AA915DC3C9D00A06D48 /* pthread_support.c in Sources */,
                                C90E7AAA15DC3C9D00A06D48 /* pthread_tsd.c in Sources */,
                                C9A1BF4815C9A578006BB313 /* pthread_cancelable.c in Sources */,
                                C9A1BF4915C9A578006BB313 /* pthread_cond.c in Sources */,
                                C9A1BF4A15C9A578006BB313 /* pthread_mutex.c in Sources */,
-                               6EB232CB1B0EB2E2005915CE /* pthread_mutex_up.c in Sources */,
                                C9244C1D1860D8EF00075748 /* qos.c in Sources */,
                                C9A1BF4B15C9A578006BB313 /* pthread_rwlock.c in Sources */,
                                924D8EE11C11833E002AC2BC /* pthread_cwd.c in Sources */,
                        );
                        runOnlyForDeploymentPostprocessing = 0;
                };
+               E41505D11E818BEB00F243FB /* Sources */ = {
+                       isa = PBXSourcesBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                               E41505D21E818BEB00F243FB /* resolver.c in Sources */,
+                               E41505D31E818BEB00F243FB /* pthread.c in Sources */,
+                               E41505D41E818BEB00F243FB /* pthread_cancelable.c in Sources */,
+                               E41505D51E818BEB00F243FB /* pthread_cancelable_cancel.c in Sources */,
+                               E41505D61E818BEB00F243FB /* pthread_cond.c in Sources */,
+                               E41505D71E818BEB00F243FB /* pthread_mutex.c in Sources */,
+                               E41505D91E818BEB00F243FB /* pthread_rwlock.c in Sources */,
+                               E41505DA1E818BEB00F243FB /* pthread_support.c in Sources */,
+                               E41505DB1E818BEB00F243FB /* pthread_tsd.c in Sources */,
+                               E41505DC1E818BEB00F243FB /* thread_setup.c in Sources */,
+                               E41505DD1E818BEB00F243FB /* qos.c in Sources */,
+                               E41505DE1E818BEB00F243FB /* pthread_cwd.c in Sources */,
+                               E41505DF1E818BEB00F243FB /* pthread_atfork.c in Sources */,
+                               E41505E01E818BEB00F243FB /* pthread_asm.s in Sources */,
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
+               E4F4498D1E82C1F000A7FB9A /* Sources */ = {
+                       isa = PBXSourcesBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                               E4F4498E1E82C1F000A7FB9A /* resolver.c in Sources */,
+                               E4F4498F1E82C1F000A7FB9A /* pthread.c in Sources */,
+                               E4F449901E82C1F000A7FB9A /* pthread_cancelable.c in Sources */,
+                               E4F449911E82C1F000A7FB9A /* pthread_cancelable_cancel.c in Sources */,
+                               E4F449921E82C1F000A7FB9A /* pthread_cond.c in Sources */,
+                               E4F449931E82C1F000A7FB9A /* pthread_mutex.c in Sources */,
+                               E4F449941E82C1F000A7FB9A /* pthread_rwlock.c in Sources */,
+                               E4F449951E82C1F000A7FB9A /* pthread_support.c in Sources */,
+                               E4F449961E82C1F000A7FB9A /* pthread_tsd.c in Sources */,
+                               E4F449971E82C1F000A7FB9A /* thread_setup.c in Sources */,
+                               E4F449981E82C1F000A7FB9A /* qos.c in Sources */,
+                               E4F449991E82C1F000A7FB9A /* pthread_cwd.c in Sources */,
+                               E4F4499A1E82C1F000A7FB9A /* pthread_atfork.c in Sources */,
+                               E4F4499B1E82C1F000A7FB9A /* pthread_asm.s in Sources */,
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
+               E4F449A91E82D03500A7FB9A /* Sources */ = {
+                       isa = PBXSourcesBuildPhase;
+                       buildActionMask = 2147483647;
+                       files = (
+                               E4F449AA1E82D03500A7FB9A /* resolver.c in Sources */,
+                               E4F449AB1E82D03500A7FB9A /* pthread.c in Sources */,
+                               E4F449AC1E82D03500A7FB9A /* pthread_cancelable.c in Sources */,
+                               E4F449AD1E82D03500A7FB9A /* pthread_cond.c in Sources */,
+                               E4F449AE1E82D03500A7FB9A /* pthread_mutex.c in Sources */,
+                               E4F449AF1E82D03500A7FB9A /* qos.c in Sources */,
+                               E4F449B01E82D03500A7FB9A /* pthread_rwlock.c in Sources */,
+                               E4F449B11E82D03500A7FB9A /* pthread_cwd.c in Sources */,
+                               E4F449B21E82D03500A7FB9A /* pthread_tsd.c in Sources */,
+                               E4F449B31E82D03500A7FB9A /* pthread_cancelable_cancel.c in Sources */,
+                               E4F449B41E82D03500A7FB9A /* pthread_cancelable_legacy.c in Sources */,
+                               E4F449B51E82D03500A7FB9A /* pthread_cond_legacy.c in Sources */,
+                               E4F449B61E82D03500A7FB9A /* pthread_mutex_legacy.c in Sources */,
+                               E4F449B71E82D03500A7FB9A /* pthread_rwlock_legacy.c in Sources */,
+                               E4F449B81E82D03500A7FB9A /* pthread_support.c in Sources */,
+                               E4F449B91E82D03500A7FB9A /* thread_setup.c in Sources */,
+                               E4F449BA1E82D03500A7FB9A /* pthread_atfork.c in Sources */,
+                               E4F449BB1E82D03500A7FB9A /* pthread_asm.s in Sources */,
+                       );
+                       runOnlyForDeploymentPostprocessing = 0;
+               };
 /* End PBXSourcesBuildPhase section */
 
 /* Begin PBXTargetDependency section */
                6E8C16821B14F11800C8987C /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = 6E8C16511B14F08A00C8987C /* libsystem_pthread.dylib introspection */;
+                       target = 6E8C16511B14F08A00C8987C /* libsystem_pthread introspection */;
                        targetProxy = 6E8C16811B14F11800C8987C /* PBXContainerItemProxy */;
                };
                6E8C16841B14F11B00C8987C /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = 6E8C16511B14F08A00C8987C /* libsystem_pthread.dylib introspection */;
+                       target = 6E8C16511B14F08A00C8987C /* libsystem_pthread introspection */;
                        targetProxy = 6E8C16831B14F11B00C8987C /* PBXContainerItemProxy */;
                };
                74E594AB1613AD7F006C417B /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = 74E594911613AAF4006C417B /* libpthread.a eOS */;
+                       target = 74E594911613AAF4006C417B /* libpthread eOS */;
                        targetProxy = 74E594AA1613AD7F006C417B /* PBXContainerItemProxy */;
                };
                925383BB1BD01EED00F745DB /* PBXTargetDependency */ = {
                };
                C04545BC1C58510F006A53B3 /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C04545A21C584F4A006A53B3 /* libpthread.a generic */;
+                       target = C04545A21C584F4A006A53B3 /* libpthread generic */;
                        targetProxy = C04545BB1C58510F006A53B3 /* PBXContainerItemProxy */;
                };
                C04545BE1C585487006A53B3 /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C04545A21C584F4A006A53B3 /* libpthread.a generic */;
+                       target = C04545A21C584F4A006A53B3 /* libpthread generic */;
                        targetProxy = C04545BD1C585487006A53B3 /* PBXContainerItemProxy */;
                };
                C90E7AB015DC3D3D00A06D48 /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C9A325E115B7347000270056 /* libsystem_pthread.dylib */;
+                       target = C9A325E115B7347000270056 /* libsystem_pthread */;
                        targetProxy = C90E7AAF15DC3D3D00A06D48 /* PBXContainerItemProxy */;
                };
                C90E7AB215DC3D3D00A06D48 /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */;
+                       target = C90E7A9E15DC3C3800A06D48 /* libpthread dyld */;
                        targetProxy = C90E7AB115DC3D3D00A06D48 /* PBXContainerItemProxy */;
                };
                C91D01B9162893070002E29A /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C9CA27D81602813000259F78 /* pthread */;
+                       target = C9CA27D81602813000259F78 /* pthread kext */;
                        targetProxy = C91D01B8162893070002E29A /* PBXContainerItemProxy */;
                };
                C91D01BC162CA80F0002E29A /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C9CA27D81602813000259F78 /* pthread */;
+                       target = C9CA27D81602813000259F78 /* pthread kext */;
                        targetProxy = C91D01BB162CA80F0002E29A /* PBXContainerItemProxy */;
                };
                C98832C615DEB44B00B3308E /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C9A325E115B7347000270056 /* libsystem_pthread.dylib */;
+                       target = C9A325E115B7347000270056 /* libsystem_pthread */;
                        targetProxy = C98832C515DEB44B00B3308E /* PBXContainerItemProxy */;
                };
                C98832C815DEB44B00B3308E /* PBXTargetDependency */ = {
                        isa = PBXTargetDependency;
-                       target = C90E7A9E15DC3C3800A06D48 /* libpthread.a dyld */;
+                       target = C90E7A9E15DC3C3800A06D48 /* libpthread dyld */;
                        targetProxy = C98832C715DEB44B00B3308E /* PBXContainerItemProxy */;
                };
+               E4F4498B1E825D2B00A7FB9A /* PBXTargetDependency */ = {
+                       isa = PBXTargetDependency;
+                       target = E41505D01E818BEB00F243FB /* libpthread mp resolved */;
+                       targetProxy = E4F4498A1E825D2B00A7FB9A /* PBXContainerItemProxy */;
+               };
+               E4F449A21E82C5A400A7FB9A /* PBXTargetDependency */ = {
+                       isa = PBXTargetDependency;
+                       target = E4F4498C1E82C1F000A7FB9A /* libpthread alt resolved */;
+                       targetProxy = E4F449A11E82C5A400A7FB9A /* PBXContainerItemProxy */;
+               };
 /* End PBXTargetDependency section */
 
 /* Begin XCBuildConfiguration section */
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = 6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */;
                        buildSettings = {
-                               EXECUTABLE_PREFIX = lib;
-                               PRODUCT_NAME = system_pthread;
                        };
                        name = Release;
                };
                9235CA491CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               COPY_PHASE_STRIP = YES;
+                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Debug;
                };
                9235CA4A1CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Debug;
                };
                9235CA4B1CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Debug;
                };
                9235CA4C1CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Debug;
                };
                9235CA4D1CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                                SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos";
                        };
                        name = Debug;
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = 6E8C16851B14F14000C8987C /* pthread_introspection.xcconfig */;
                        buildSettings = {
-                               EXECUTABLE_PREFIX = lib;
-                               PRODUCT_NAME = system_pthread;
                        };
                        name = Debug;
                };
                9235CA4F1CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
-                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       baseConfigurationReference = E4F449A31E82CF0100A7FB9A /* resolver.xcconfig */;
                        buildSettings = {
-                               EXECUTABLE_PREFIX = lib;
-                               PRODUCT_NAME = system_pthread;
                        };
                        name = Debug;
                };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Debug;
                };
                9235CA511CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
-                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
                        buildSettings = {
-                               ALWAYS_SEARCH_USER_PATHS = NO;
                                BUILD_VARIANTS = normal;
-                               EXECUTABLE_PREFIX = lib;
                                GCC_PREPROCESSOR_DEFINITIONS = (
-                                       "$(BASE_PREPROCESSOR_MACROS)",
+                                       "$(inherited)",
                                        "VARIANT_DYLD=1",
-                                       "VARIANT_STATIC=1",
                                );
                                INSTALL_PATH = /usr/local/lib/dyld;
-                               OTHER_LDFLAGS = "";
                                PRODUCT_NAME = pthread_dyld;
                        };
                        name = Debug;
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = 9235CA551CA48D010015C92B /* kext_development.xcconfig */;
                        buildSettings = {
-                               DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-                               PRODUCT_BUNDLE_IDENTIFIER = "${MODULE_NAME}";
-                               PRODUCT_NAME = "$(TARGET_NAME)";
-                               SDKROOT = macosx.internal;
                        };
                        name = Debug;
                };
                9235CA541CA48CEA0015C92B /* Debug */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = darwintests;
                                SDKROOT = macosx.internal;
                                SUPPORTED_PLATFORMS = "iphoneos macosx watchos appletvos";
                        };
                92799B461B96A5FE00861404 /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                                SUPPORTED_PLATFORMS = "macosx iphoneos appletvos watchos";
                        };
                        name = Release;
                92B275F21BCE4C5E007D06D7 /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = darwintests;
                                SDKROOT = macosx.internal;
                                SUPPORTED_PLATFORMS = "iphoneos macosx watchos appletvos";
                        };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
                        buildSettings = {
-                               PRODUCT_NAME = "$(PRODUCT_NAME)";
                        };
                        name = Release;
                };
                C90E7AA015DC3C3800A06D48 /* Release */ = {
                        isa = XCBuildConfiguration;
-                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       baseConfigurationReference = C04545B91C584F8B006A53B3 /* static.xcconfig */;
                        buildSettings = {
-                               ALWAYS_SEARCH_USER_PATHS = NO;
                                BUILD_VARIANTS = normal;
-                               EXECUTABLE_PREFIX = lib;
                                GCC_PREPROCESSOR_DEFINITIONS = (
-                                       "$(BASE_PREPROCESSOR_MACROS)",
+                                       "$(inherited)",
                                        "VARIANT_DYLD=1",
-                                       "VARIANT_STATIC=1",
                                );
                                INSTALL_PATH = /usr/local/lib/dyld;
-                               OTHER_LDFLAGS = "";
                                PRODUCT_NAME = pthread_dyld;
                        };
                        name = Release;
                C90E7AAE15DC3D3300A06D48 /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Release;
                };
                C91D01B7162892FF0002E29A /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Release;
                };
                C98832C215DEB44000B3308E /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
-                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Release;
                };
                C9A325E515B7347000270056 /* Release */ = {
                        isa = XCBuildConfiguration;
                        buildSettings = {
+                               PRODUCT_NAME = "$(TARGET_NAME)";
                        };
                        name = Release;
                };
                C9A325E815B7347000270056 /* Release */ = {
                        isa = XCBuildConfiguration;
-                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       baseConfigurationReference = E4F449A31E82CF0100A7FB9A /* resolver.xcconfig */;
                        buildSettings = {
-                               EXECUTABLE_PREFIX = lib;
-                               PRODUCT_NAME = system_pthread;
                        };
                        name = Release;
                };
                        isa = XCBuildConfiguration;
                        baseConfigurationReference = C91D01BA162893CD0002E29A /* kext.xcconfig */;
                        buildSettings = {
-                               DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
-                               PRODUCT_BUNDLE_IDENTIFIER = "${MODULE_NAME}";
-                               PRODUCT_NAME = "$(TARGET_NAME)";
-                               SDKROOT = macosx.internal;
                        };
                        name = Release;
                };
+               E41505E51E818BEB00F243FB /* Release */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E41505E81E818D4D00F243FB /* resolved.xcconfig */;
+                       buildSettings = {
+                               RESOLVED_VARIANT = mp;
+                       };
+                       name = Release;
+               };
+               E41505E61E818BEB00F243FB /* Debug */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E41505E81E818D4D00F243FB /* resolved.xcconfig */;
+                       buildSettings = {
+                               RESOLVED_VARIANT = mp;
+                       };
+                       name = Debug;
+               };
+               E4F4499E1E82C1F000A7FB9A /* Release */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E41505E81E818D4D00F243FB /* resolved.xcconfig */;
+                       buildSettings = {
+                               RESOLVED_VARIANT = alt;
+                       };
+                       name = Release;
+               };
+               E4F4499F1E82C1F000A7FB9A /* Debug */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = E41505E81E818D4D00F243FB /* resolved.xcconfig */;
+                       buildSettings = {
+                               RESOLVED_VARIANT = alt;
+                       };
+                       name = Debug;
+               };
+               E4F449D21E82D03500A7FB9A /* Release */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       buildSettings = {
+                               WARNING_CFLAGS = (
+                                       "-Weverything",
+                                       "$(inherited)",
+                               );
+                       };
+                       name = Release;
+               };
+               E4F449D31E82D03500A7FB9A /* Debug */ = {
+                       isa = XCBuildConfiguration;
+                       baseConfigurationReference = C9A3260C15B759B600270056 /* pthread.xcconfig */;
+                       buildSettings = {
+                               ONLY_ACTIVE_ARCH = YES;
+                               WARNING_CFLAGS = (
+                                       "-Weverything",
+                                       "$(inherited)",
+                               );
+                       };
+                       name = Debug;
+               };
 /* End XCBuildConfiguration section */
 
 /* Begin XCConfigurationList section */
-               6E8C167D1B14F08A00C8987C /* Build configuration list for PBXNativeTarget "libsystem_pthread.dylib introspection" */ = {
+               6E8C167D1B14F08A00C8987C /* Build configuration list for PBXNativeTarget "libsystem_pthread introspection" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                6E8C167E1B14F08A00C8987C /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
-               74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread.a eOS" */ = {
+               74E594A21613AAF4006C417B /* Build configuration list for PBXNativeTarget "libpthread eOS" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                74E594A31613AAF4006C417B /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
-               C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread.a generic" */ = {
+               C04545B51C584F4A006A53B3 /* Build configuration list for PBXNativeTarget "libpthread generic" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                C04545B61C584F4A006A53B3 /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
-               C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread.a dyld" */ = {
+               C90E7AA115DC3C3800A06D48 /* Build configuration list for PBXNativeTarget "libpthread dyld" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                C90E7AA015DC3C3800A06D48 /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
-               C9A325E615B7347000270056 /* Build configuration list for PBXNativeTarget "libsystem_pthread.dylib" */ = {
+               C9A325E615B7347000270056 /* Build configuration list for PBXNativeTarget "libsystem_pthread" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                C9A325E815B7347000270056 /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
-               C9CA27E71602813000259F78 /* Build configuration list for PBXNativeTarget "pthread" */ = {
+               C9CA27E71602813000259F78 /* Build configuration list for PBXNativeTarget "pthread kext" */ = {
                        isa = XCConfigurationList;
                        buildConfigurations = (
                                C9CA27E61602813000259F78 /* Release */,
                        defaultConfigurationIsVisible = 0;
                        defaultConfigurationName = Release;
                };
+               E41505E41E818BEB00F243FB /* Build configuration list for PBXNativeTarget "libpthread mp resolved" */ = {
+                       isa = XCConfigurationList;
+                       buildConfigurations = (
+                               E41505E51E818BEB00F243FB /* Release */,
+                               E41505E61E818BEB00F243FB /* Debug */,
+                       );
+                       defaultConfigurationIsVisible = 0;
+                       defaultConfigurationName = Release;
+               };
+               E4F4499D1E82C1F000A7FB9A /* Build configuration list for PBXNativeTarget "libpthread alt resolved" */ = {
+                       isa = XCConfigurationList;
+                       buildConfigurations = (
+                               E4F4499E1E82C1F000A7FB9A /* Release */,
+                               E4F4499F1E82C1F000A7FB9A /* Debug */,
+                       );
+                       defaultConfigurationIsVisible = 0;
+                       defaultConfigurationName = Release;
+               };
+               E4F449D11E82D03500A7FB9A /* Build configuration list for PBXNativeTarget "libsystem_pthread noresolver" */ = {
+                       isa = XCConfigurationList;
+                       buildConfigurations = (
+                               E4F449D21E82D03500A7FB9A /* Release */,
+                               E4F449D31E82D03500A7FB9A /* Debug */,
+                       );
+                       defaultConfigurationIsVisible = 0;
+                       defaultConfigurationName = Release;
+               };
 /* End XCConfigurationList section */
        };
        rootObject = C9A325D915B7347000270056 /* Project object */;
index d3d75c7af3237003df1a16bc2a5fbba9b7e49a61..a24779cf6875deba8936ea9a4d43af3e294054ed 100644 (file)
@@ -107,13 +107,9 @@ def GetPthreadWorkqueueSummary(wq):
 def GetPthreadWorkqueueDetail(wq):
        format = "  {0: <22s} {1: <5d} {2: <5d} {3: <5d} {4: <5d} {5: <5d} {6: <5d} {7: <5d}"
        # requests
-       reqstr = format.format('requests', wq.wq_requests[0], wq.wq_requests[1], wq.wq_requests[2], wq.wq_requests[3], wq.wq_requests[4], wq.wq_requests[5], wq.wq_requests[6])
-       ocstr = format.format('ocreqs', wq.wq_ocrequests[0], wq.wq_ocrequests[1], wq.wq_ocrequests[2], wq.wq_ocrequests[3], wq.wq_ocrequests[4], wq.wq_ocrequests[5], wq.wq_ocrequests[6])
-       keventstr = format.format('kevent_reqs', wq.wq_kevent_requests[0], wq.wq_kevent_requests[1], wq.wq_kevent_requests[2], wq.wq_kevent_requests[3], wq.wq_kevent_requests[4], wq.wq_kevent_requests[5], wq.wq_kevent_requests[6])
-       ockeventstr = format.format('kevent_ocreqs', wq.wq_kevent_ocrequests[0], wq.wq_kevent_ocrequests[1], wq.wq_kevent_ocrequests[2], wq.wq_kevent_ocrequests[3], wq.wq_kevent_ocrequests[4], wq.wq_kevent_ocrequests[5], wq.wq_kevent_ocrequests[6])
        schedstr = format.format('scheduled', wq.wq_thscheduled_count[0], wq.wq_thscheduled_count[1], wq.wq_thscheduled_count[2], wq.wq_thscheduled_count[3], wq.wq_thscheduled_count[4], wq.wq_thscheduled_count[5], wq.wq_thscheduled_count[6])
        activestr = format.format('active', wq.wq_thactive_count[0], wq.wq_thactive_count[1], wq.wq_thactive_count[2], wq.wq_thactive_count[3], wq.wq_thactive_count[4], wq.wq_thactive_count[5], wq.wq_thactive_count[6])
-       return "\n".join([reqstr, ocstr, keventstr, ockeventstr, schedstr, activestr])
+       return "\n".join([schedstr, activestr])
 
 @lldb_command('showpthreadstate')
 def PthreadCurrentMutex(cmd_args=None):
index b5e52cc6f0c1adaf9cf88fb27d997353ffa98018..0e4d9cf84453533ef7d9a115335bbd5abdb41c39 100644 (file)
@@ -4,7 +4,7 @@
 .Os
 .Sh NAME
 .Nm pthread_attr_getinheritsched ,
-.Nm pthread_attr_setinheritsched 
+.Nm pthread_attr_setinheritsched
 .Nd thread attribute operations
 .Sh SYNOPSIS
 .Fd #include <pthread.h>
@@ -30,7 +30,7 @@ controls the scheduling policy and related attributes of the newly created threa
 "inheritsched" attribute can be either PTHREAD_INHERIT_SCHED or PTHREAD_EXPLICIT_SCHED.
 .Pp
 PTHREAD_INHERIT_SCHED
-.Pp    
+.Pp
        Indicates that the newly created thread should inherit all it's scheduling related attributes from it's creating
 thread. It ignores the values of the relevant attributes within the
 .Fa attr
@@ -73,6 +73,9 @@ will fail if:
 Invalid value for
 .Fa attr .
 .El
+.Sh BUGS
+.Pp
+The "inheritsched" attribute has no effect on Darwin.  It is provided for compatibility only.
 .Sh SEE ALSO
 .Xr pthread_create 3 ,
 .Xr pthread_attr_init 3 ,
index dff13e45f24dd4a687df3fbdaaf60d1a5907f212..d7160d7235ec76a704f2cd9eb8c3babea42a3e25 100644 (file)
@@ -38,7 +38,11 @@ The
 .Fa sched_priority
 field of
 .Fa struct sched_param
-can be set to SCHED_OTHER, SCHED_FIFO and SCHED_RR.
+must be within the range returned by the
+.Xr sched_get_priority_min 2
+and
+.Xr sched_get_priority_max 2
+system calls.
 .Sh RETURN VALUES
 If successful, these functions return 0.
 Otherwise, an error number is returned to indicate the error.
index 5934a2e1056904e84c58d6f7fdedf415f4efae74..a3b3332f7128688d8457be6538e3ddb659126812 100644 (file)
@@ -64,6 +64,12 @@ is an invalid thread ID.
 .It Bq Er EINVAL
 .Fa sig
 is an invalid or unsupported signal number.
+.It Bq Er ENOTSUP
+.Fa thread
+was not created by
+.Fn pthread_create
+and does not support being killed with
+.Fn pthread_kill
 .El
 .Sh LEGACY SYNOPSIS
 .Fd #include <pthread.h>
index 4a560c213482c734a7e8d05d26f7fce9f407aa7a..2276cdaaa10553f1ddcce9d3c0a95ff1ef8841a1 100644 (file)
@@ -58,7 +58,7 @@ The value specified by
 is invalid.
 .It Bq Er EBUSY
 .Fa Mutex
-is locked by another thread.
+is locked.
 .El
 .Sh SEE ALSO
 .Xr pthread_mutex_init 3 ,
index d196c55114bb03ff5a313927636f47a3b510bd02..b98a350e0f28f0aa3f2894242634c5202cec5c99 100644 (file)
@@ -42,7 +42,7 @@ _pthread_set_errno_direct(int value)
        *((int*)_pthread_getspecific_direct(_PTHREAD_TSD_SLOT_ERRNO)) = value;
 }
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+__API_AVAILABLE(macos(10.9), ios(7.0))
 pthread_t pthread_main_thread_np(void);
 
 struct _libpthread_functions {
@@ -69,10 +69,7 @@ struct _libpthread_functions {
  * @result
  * 0 upon success, -1 upon error and errno is set.
  */
-__OSX_AVAILABLE(10.12)
-__IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0)
-__WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 int pthread_chdir_np(char *path);
 
 /*!
@@ -93,10 +90,7 @@ int pthread_chdir_np(char *path);
  * @result
  * 0 upon success, -1 upon error and errno is set.
  */
-__OSX_AVAILABLE(10.12)
-__IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0)
-__WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 int pthread_fchdir_np(int fd);
 
 
index 469bf79ed7244dc3ac1425252f6f855b25354e8a..50f273a0de43877198a686bbb2c5d20002003020 100644 (file)
@@ -25,6 +25,7 @@
 #define _QOS_PRIVATE_H
 
 #include <pthread/qos.h>
+#include <sys/qos.h> /* qos_class_t */
 #include <sys/qos_private.h>
 
 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL
@@ -59,12 +60,13 @@ typedef unsigned long pthread_priority_t;
 // priority is actually a scheduling priority not a QoS.  We can have ROOTQUEUE_FLAG
 // perform double duty because it's never provided to the kernel.
 #define _PTHREAD_PRIORITY_SCHED_PRI_FLAG               0x20000000
+#define _PTHREAD_PRIORITY_SCHED_PRI_MASK               0x0000ffff
 #define _PTHREAD_PRIORITY_ENFORCE_FLAG                 0x10000000
 #define _PTHREAD_PRIORITY_OVERRIDE_FLAG                        0x08000000
 
 // libdispatch defines the following, so it's not safe to use for anything we
 // expect to be passed in from userspace
-//#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG 0x04000000
+#define _PTHREAD_PRIORITY_DEFAULTQUEUE_FLAG            0x04000000
 
 // The event manager flag indicates that this thread/request is for a event
 // manager thread.  There can only ever be one event manager thread at a time and
@@ -86,11 +88,11 @@ typedef unsigned long pthread_priority_t;
 #endif
 #if __has_feature(enumerator_attributes)
 #undef __QOS_AVAILABLE_10_10
-#define __QOS_AVAILABLE_10_10 __OSX_AVAILABLE(10.10) __IOS_AVAILABLE(8.0)
+#define __QOS_AVAILABLE_10_10 __API_AVAILABLE(macos(10.10), ios(8.0))
 #undef __QOS_AVAILABLE_10_11
-#define __QOS_AVAILABLE_10_11 __OSX_AVAILABLE(10.11) __IOS_AVAILABLE(9.0)  __WATCHOS_AVAILABLE(2.0) __TVOS_AVAILABLE(9.0)
+#define __QOS_AVAILABLE_10_11 __API_AVAILABLE(macos(10.11), ios(9.0), tvos(9.0), watchos(2.0))
 #undef __QOS_AVAILABLE_10_12
-#define __QOS_AVAILABLE_10_12 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) __TVOS_AVAILABLE(10.0)
+#define __QOS_AVAILABLE_10_12 __API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 #endif
 #endif
 
@@ -151,8 +153,7 @@ __BEGIN_DECLS
  * @return
  * Zero if successful, othwerise an errno value.
  */
-__OSX_AVAILABLE_BUT_DEPRECATED_MSG(__MAC_10_10, __MAC_10_10, __IPHONE_8_0, __IPHONE_8_0, \
-               "Use pthread_set_qos_class_self_np() instead")
+__API_DEPRECATED_WITH_REPLACEMENT("pthread_set_qos_class_self_np", macos(10.10, 10.10), ios(8.0, 8.0))
 int
 pthread_set_qos_class_np(pthread_t __pthread,
                                                 qos_class_t __qos_class,
@@ -161,37 +162,86 @@ pthread_set_qos_class_np(pthread_t __pthread,
 /* Private interfaces for libdispatch to encode/decode specific values of pthread_priority_t. */
 
 // Encode a class+priority pair into a pthread_priority_t,
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 pthread_priority_t
 _pthread_qos_class_encode(qos_class_t qos_class, int relative_priority, unsigned long flags);
 
 // Decode a pthread_priority_t into a class+priority pair.
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 qos_class_t
 _pthread_qos_class_decode(pthread_priority_t priority, int *relative_priority, unsigned long *flags);
 
-// Encode a legacy workqueue API priority into a pthread_priority_t
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+// Encode a legacy workqueue API priority into a pthread_priority_t. This API
+// is deprecated and can be removed when the simulator no longer uses it.
+__API_DEPRECATED("no longer used", macos(10.10, 10.13), ios(8.0, 11.0))
 pthread_priority_t
 _pthread_qos_class_encode_workqueue(int queue_priority, unsigned long flags);
 
 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL
+
 // Set QoS or voucher, or both, on pthread_self()
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_set_properties_self(_pthread_set_flags_t flags, pthread_priority_t priority, mach_port_t voucher);
 
 // Set self to fixed priority without disturbing QoS or priority
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_set_fixedpriority_self(void);
 
 // Inverse of pthread_set_fixedpriority_self()
-__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_set_timeshare_self(void);
 
-#endif
+/*!
+ * @const PTHREAD_MAX_PARALLELISM_PHYSICAL
+ * Flag that can be used with pthread_qos_max_parallelism() and
+ * pthread_time_constraint_max_parallelism() to ask for a count of physical
+ * compute units available for parallelism (default is logical).
+ */
+#define PTHREAD_MAX_PARALLELISM_PHYSICAL 0x1
+
+/*!
+ * @function pthread_qos_max_parallelism
+ *
+ * @abstract
+ * Returns the number of compute units available for parallel computation at
+ * a specified QoS class.
+ *
+ * @param qos
+ * The specified QoS class.
+ *
+ * @param flags
+ * 0 or PTHREAD_MAX_PARALLELISM_PHYSICAL.
+ *
+ * @return
+ * The number of compute units available for parallel computation for the
+ * specified QoS, or -1 on failure (with errno set accordingly).
+ */
+__API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0))
+int
+pthread_qos_max_parallelism(qos_class_t qos, unsigned long flags);
+
+/*!
+ * @function pthread_time_constraint_max_parallelism()
+ *
+ * @abstract
+ * Returns the number of compute units available for parallel computation on
+ * realtime threads.
+ *
+ * @param flags
+ * 0 or PTHREAD_MAX_PARALLELISM_PHYSICAL.
+ *
+ * @return
+ * The number of compute units available for parallel computation on realtime
+ * threads, or -1 on failure (with errno set accordingly).
+ */
+__API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0))
+int
+pthread_time_constraint_max_parallelism(unsigned long flags);
+
+#endif // __DARWIN_C_LEVEL >= __DARWIN_C_FULL
 
 __END_DECLS
 
index 4a9ef6e2290e634e0480d10ab29efa21cb0ac8f7..f91c1f6cf206875ceffa419853a2f82c2490c05e 100644 (file)
 #define __TSD_THREAD_QOS_CLASS 4
 #endif
 
+#ifndef __TSD_RETURN_TO_KERNEL
+#define __TSD_RETURN_TO_KERNEL 5
+#endif
+
+#ifndef __TSD_MACH_SPECIAL_REPLY
+#define __TSD_MACH_SPECIAL_REPLY 8
+#endif
+
 /* Constant TSD slots for inline pthread_getspecific() usage. */
 
 /* Keys 0 - 9 are for Libsyscall/libplatform usage */
@@ -72,7 +80,9 @@
 #define _PTHREAD_TSD_SLOT_MIG_REPLY __TSD_MIG_REPLY
 #define _PTHREAD_TSD_SLOT_MACH_THREAD_SELF __TSD_MACH_THREAD_SELF
 #define _PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS    __TSD_THREAD_QOS_CLASS
-//#define _PTHREAD_TSD_SLOT_SEMAPHORE_CACHE__TSD_SEMAPHORE_CACHE
+#define _PTHREAD_TSD_SLOT_RETURN_TO_KERNEL __TSD_RETURN_TO_KERNEL
+#define _PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY __TSD_MACH_SPECIAL_REPLY
+//#define _PTHREAD_TSD_SLOT_SEMAPHORE_CACHE __TSD_SEMAPHORE_CACHE
 
 /*
  * Windows 64-bit ABI bakes %gs relative accesses into its code in the same
@@ -208,17 +218,14 @@ extern int pthread_setspecific(unsigned long, const void *);
 /* setup destructor function for static key as it is not created with pthread_key_create() */
 extern int pthread_key_init_np(int, void (*)(void *));
 
-__OSX_AVAILABLE(10.12)
-__IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0)
-__WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 extern int _pthread_setspecific_static(unsigned long, void *);
 
 #if PTHREAD_LAYOUT_SPI
 
 /* SPI intended for CoreSymbolication only */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_10,__IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 extern const struct pthread_layout_offsets_s {
        // always add new fields at the end
        const uint16_t plo_version;
index 9bbec51ac682ad1de47ea0034d6be80dc359027b..0b0a001e9bbeb51de9f45492b4f0ecc984f943a6 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef __PTHREAD_WORKQUEUE_H__
 #define __PTHREAD_WORKQUEUE_H__
 
+#include <stdbool.h>
 #include <sys/cdefs.h>
 #include <sys/event.h>
 #include <Availability.h>
@@ -33,7 +34,7 @@
 #include <pthread/qos_private.h>
 #endif
 
-#define PTHREAD_WORKQUEUE_SPI_VERSION 20160427
+#define PTHREAD_WORKQUEUE_SPI_VERSION 20170201
 
 /* Feature checking flags, returned by _pthread_workqueue_supported()
  *
@@ -44,6 +45,7 @@
 #define WORKQ_FEATURE_FINEPRIO         0x02    // fine grained pthread workq priorities
 #define WORKQ_FEATURE_MAINTENANCE      0x10    // QOS class maintenance
 #define WORKQ_FEATURE_KEVENT        0x40    // Support for direct kevent delivery
+#define WORKQ_FEATURE_WORKLOOP      0x80    // Support for direct workloop requests
 
 /* Legacy dispatch priority bands */
 
@@ -70,106 +72,110 @@ typedef void (*pthread_workqueue_function2_t)(pthread_priority_t priority);
 #define WORKQ_KEVENT_EVENT_BUFFER_LEN 16
 typedef void (*pthread_workqueue_function_kevent_t)(void **events, int *nevents);
 
+typedef void (*pthread_workqueue_function_workloop_t)(uint64_t *workloop_id, void **events, int *nevents);
+
 // Initialises the pthread workqueue subsystem, passing the new-style callback prototype,
 // the dispatchoffset and an unused flags field.
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0)
+__API_AVAILABLE(macos(10.11), ios(9.0))
 int
 _pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags);
 
+__API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0))
+int
+_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, pthread_workqueue_function_workloop_t workloop_func, int offset, int flags);
+
 // Non-zero enables kill on current thread, zero disables it.
-__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
+__API_AVAILABLE(macos(10.6), ios(3.2))
 int
 __pthread_workqueue_setkill(int);
 
 // Dispatch function to be called when new worker threads are created.
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+__API_AVAILABLE(macos(10.8), ios(6.0))
 int
 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func);
 
 // Dispatch offset to be set in the kernel.
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+__API_AVAILABLE(macos(10.9), ios(7.0))
 void
 pthread_workqueue_setdispatchoffset_np(int offset);
 
 // Request additional worker threads.
-__OSX_AVAILABLE_STARTING(__MAC_10_8, __IPHONE_6_0)
+__API_AVAILABLE(macos(10.8), ios(6.0))
 int
 pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads);
 
 // Retrieve the supported pthread feature set
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_workqueue_supported(void);
 
 // Request worker threads (fine grained priority)
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_9_0)
+// Should this thread return to the kernel?
+__API_AVAILABLE(macos(10.13), ios(11.0), tvos(11.0), watchos(4.0))
+bool
+_pthread_workqueue_should_narrow(pthread_priority_t priority);
+
+__API_AVAILABLE(macos(10.11), ios(9.0))
 int
 _pthread_workqueue_set_event_manager_priority(pthread_priority_t priority);
 
 // Apply a QoS override without allocating userspace memory
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 int
 _pthread_qos_override_start_direct(mach_port_t thread, pthread_priority_t priority, void *resource);
 
 // Drop a corresponding QoS override made above, if the resource matches
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 int
 _pthread_qos_override_end_direct(mach_port_t thread, void *resource);
 
 // Apply a QoS override without allocating userspace memory
-__OSX_DEPRECATED(10.10, 10.12, "use _pthread_qos_override_start_direct()")
-__IOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_start_direct()")
-__TVOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_start_direct()")
-__WATCHOS_DEPRECATED(1.0, 3.0, "use _pthread_qos_override_start_direct()")
+__API_DEPRECATED_WITH_REPLACEMENT("_pthread_qos_override_start_direct",
+               macos(10.10, 10.12), ios(8.0, 10.0), tvos(8.0, 10.0), watchos(1.0, 3.0))
 int
 _pthread_override_qos_class_start_direct(mach_port_t thread, pthread_priority_t priority);
 
 // Drop a corresponding QoS override made above.
-__OSX_DEPRECATED(10.10, 10.12, "use _pthread_qos_override_end_direct()")
-__IOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_end_direct()")
-__TVOS_DEPRECATED(8.0, 10.0, "use _pthread_qos_override_end_direct()")
-__WATCHOS_DEPRECATED(1.0, 3.0, "use _pthread_qos_override_end_direct()")
+__API_DEPRECATED_WITH_REPLACEMENT("_pthread_qos_override_end_direct",
+               macos(10.10, 10.12), ios(8.0, 10.0), tvos(8.0, 10.0), watchos(1.0, 3.0))
 int
 _pthread_override_qos_class_end_direct(mach_port_t thread);
 
 // Apply a QoS override on a given workqueue thread.
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_workqueue_override_start_direct(mach_port_t thread, pthread_priority_t priority);
 
 // Apply a QoS override on a given workqueue thread.
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 int
 _pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread_priority_t priority, mach_port_t *ulock_addr);
 
 // Drop all QoS overrides on the current workqueue thread.
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 _pthread_workqueue_override_reset(void);
 
 // Apply a QoS override on a given thread (can be non-workqueue as well) with a resource/queue token
-__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+__API_AVAILABLE(macos(10.10.2))
 int
 _pthread_workqueue_asynchronous_override_add(mach_port_t thread, pthread_priority_t priority, void *resource);
 
 // Reset overrides for the given resource for the current thread
-__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+__API_AVAILABLE(macos(10.10.2))
 int
 _pthread_workqueue_asynchronous_override_reset_self(void *resource);
 
 // Reset overrides for all resources for the current thread
-__OSX_AVAILABLE_STARTING(__MAC_10_10_2, __IPHONE_NA)
+__API_AVAILABLE(macos(10.10.2))
 int
 _pthread_workqueue_asynchronous_override_reset_all_self(void);
 
index 366c7baad5045da0b78184e6a5d3ec4c18e651b5..18292085d5c1ce4c7a5611c338eeffd7ec86acbf 100644 (file)
@@ -101,7 +101,7 @@ enum {
  * Previously installed hook function or NULL.
  */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_9, __IPHONE_7_0)
+__API_AVAILABLE(macos(10.9), ios(7.0))
 __attribute__((__nonnull__, __warn_unused_result__))
 extern pthread_introspection_hook_t
 pthread_introspection_hook_install(pthread_introspection_hook_t hook);
index b0b9534be99efa6f6deeda799d56333867a0c0e3..0e2ecb73b9a4baf217a471803cd9ef93f6e27c9a 100644 (file)
 
 #include <pthread/qos.h>
 
-#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE)
+#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) || defined(__cplusplus)
 
 #include <sys/_types/_mach_port_t.h>
 #include <sys/_types/_sigset_t.h>
 
-#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
+#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE || __cplusplus */
 
 /*
  * These symbols indicate which [optional] features are available
@@ -212,116 +212,116 @@ __BEGIN_DECLS
 /*
  * Prototypes for all PTHREAD interfaces
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_atfork(void (* _Nullable)(void), void (* _Nullable)(void),
                void (* _Nullable)(void));
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_destroy(pthread_attr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getdetachstate(const pthread_attr_t *, int *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getguardsize(const pthread_attr_t * __restrict, size_t * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getinheritsched(const pthread_attr_t * __restrict, int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getschedparam(const pthread_attr_t * __restrict,
                struct sched_param * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getschedpolicy(const pthread_attr_t * __restrict, int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getscope(const pthread_attr_t * __restrict, int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getstack(const pthread_attr_t * __restrict,
                void * _Nullable * _Nonnull __restrict, size_t * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getstackaddr(const pthread_attr_t * __restrict,
                void * _Nullable * _Nonnull __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_getstacksize(const pthread_attr_t * __restrict, size_t * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_init(pthread_attr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setdetachstate(pthread_attr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setguardsize(pthread_attr_t *, size_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setinheritsched(pthread_attr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setschedparam(pthread_attr_t * __restrict,
                const struct sched_param * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setschedpolicy(pthread_attr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setscope(pthread_attr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setstack(pthread_attr_t *, void *, size_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setstackaddr(pthread_attr_t *, void *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_attr_setstacksize(pthread_attr_t *, size_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cancel(pthread_t) __DARWIN_ALIAS(pthread_cancel);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_broadcast(pthread_cond_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_destroy(pthread_cond_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_init(
                pthread_cond_t * __restrict,
                const pthread_condattr_t * _Nullable __restrict)
                __DARWIN_ALIAS(pthread_cond_init);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_signal(pthread_cond_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_timedwait(
                pthread_cond_t * __restrict, pthread_mutex_t * __restrict,
                const struct timespec * _Nullable __restrict)
                __DARWIN_ALIAS_C(pthread_cond_timedwait);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_wait(pthread_cond_t * __restrict,
                pthread_mutex_t * __restrict) __DARWIN_ALIAS_C(pthread_cond_wait);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_condattr_destroy(pthread_condattr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_condattr_init(pthread_condattr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_condattr_getpshared(const pthread_condattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_condattr_setpshared(pthread_condattr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 #if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
 int pthread_create(pthread_t _Nullable * _Nonnull __restrict,
                const pthread_attr_t * _Nullable __restrict,
@@ -333,197 +333,197 @@ int pthread_create(pthread_t * __restrict,
                void *(* _Nonnull)(void *), void * _Nullable __restrict);
 #endif // _PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_detach(pthread_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_equal(pthread_t _Nullable, pthread_t _Nullable);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 void pthread_exit(void * _Nullable) __dead2;
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_getconcurrency(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_getschedparam(pthread_t , int * _Nullable __restrict,
                struct sched_param * _Nullable __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 void* _Nullable pthread_getspecific(pthread_key_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_join(pthread_t , void * _Nullable * _Nullable)
                __DARWIN_ALIAS_C(pthread_join);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_key_create(pthread_key_t *, void (* _Nullable)(void *));
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_key_delete(pthread_key_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_destroy(pthread_mutex_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_init(pthread_mutex_t * __restrict,
                const pthread_mutexattr_t * _Nullable __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_lock(pthread_mutex_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_setprioceiling(pthread_mutex_t * __restrict, int,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_trylock(pthread_mutex_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutex_unlock(pthread_mutex_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_destroy(pthread_mutexattr_t *) __DARWIN_ALIAS(pthread_mutexattr_destroy);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_getprotocol(const pthread_mutexattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_getpshared(const pthread_mutexattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_gettype(const pthread_mutexattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_init(pthread_mutexattr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_setprotocol(pthread_mutexattr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_mutexattr_settype(pthread_mutexattr_t *, int);
 
 __SWIFT_UNAVAILABLE_MSG("Use lazily initialized globals instead")
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_once(pthread_once_t *, void (* _Nonnull)(void));
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_destroy(pthread_rwlock_t * ) __DARWIN_ALIAS(pthread_rwlock_destroy);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_init(pthread_rwlock_t * __restrict,
                const pthread_rwlockattr_t * _Nullable __restrict)
                __DARWIN_ALIAS(pthread_rwlock_init);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_rdlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_rdlock);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_tryrdlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_tryrdlock);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_trywrlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_trywrlock);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_wrlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_wrlock);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlock_unlock(pthread_rwlock_t *) __DARWIN_ALIAS(pthread_rwlock_unlock);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t * __restrict,
                int * __restrict);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlockattr_init(pthread_rwlockattr_t *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 pthread_t pthread_self(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_setcancelstate(int , int * _Nullable)
                __DARWIN_ALIAS(pthread_setcancelstate);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_setcanceltype(int , int * _Nullable)
                __DARWIN_ALIAS(pthread_setcanceltype);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_setconcurrency(int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_setschedparam(pthread_t, int, const struct sched_param *);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_setspecific(pthread_key_t , const void * _Nullable);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 void pthread_testcancel(void) __DARWIN_ALIAS(pthread_testcancel);
 
-#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE)
+#if (!defined(_POSIX_C_SOURCE) && !defined(_XOPEN_SOURCE)) || defined(_DARWIN_C_SOURCE) || defined(__cplusplus)
 
 /* returns non-zero if pthread_create or cthread_fork have been called */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_is_threaded_np(void);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
+__API_AVAILABLE(macos(10.6), ios(3.2))
 int pthread_threadid_np(pthread_t _Nullable,__uint64_t* _Nullable);
 
 /*SPI to set and get pthread name*/
-__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
+__API_AVAILABLE(macos(10.6), ios(3.2))
 int    pthread_getname_np(pthread_t,char*,size_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
+__API_AVAILABLE(macos(10.6), ios(3.2))
 int    pthread_setname_np(const char*);
 
 /* returns non-zero if the current thread is the main thread */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int    pthread_main_np(void);
 
 /* return the mach thread bound to the pthread */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 mach_port_t pthread_mach_thread_np(pthread_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 size_t pthread_get_stacksize_np(pthread_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 void* pthread_get_stackaddr_np(pthread_t);
 
 /* Like pthread_cond_signal(), but only wake up the specified pthread */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_signal_thread_np(pthread_cond_t *, pthread_t _Nullable);
 
 /* Like pthread_cond_timedwait, but use a relative timeout */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_cond_timedwait_relative_np(pthread_cond_t *, pthread_mutex_t *,
                const struct timespec * _Nullable);
 
 /* Like pthread_create(), but leaves the thread suspended */
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 #if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
 int pthread_create_suspended_np(
                pthread_t _Nullable * _Nonnull, const pthread_attr_t * _Nullable,
@@ -533,20 +533,20 @@ int pthread_create_suspended_np(pthread_t *, const pthread_attr_t * _Nullable,
                void *(* _Nonnull)(void *), void * _Nullable);
 #endif
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_kill(pthread_t, int);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_5, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.5), ios(2.0))
 _Nullable pthread_t pthread_from_mach_thread_np(mach_port_t);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 int pthread_sigmask(int, const sigset_t * _Nullable, sigset_t * _Nullable)
                __DARWIN_ALIAS(pthread_sigmask);
 
-__OSX_AVAILABLE_STARTING(__MAC_10_4, __IPHONE_2_0)
+__API_AVAILABLE(macos(10.4), ios(2.0))
 void pthread_yield_np(void);
 
-#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
+#endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE || __cplusplus */
 __END_DECLS
 #if __has_feature(assume_nonnull)
 _Pragma("clang assume_nonnull end")
index 95362753aa44b97d71f9323790d3f4b1ab5edf9a..c1f464d67e3af14b211859a6116656cc824bb816 100644 (file)
@@ -71,12 +71,12 @@ __BEGIN_DECLS
 #define _PTHREAD_MUTEX_POLICY_FIRSTFIT         2
 
 /* sets the mutex policy attributes */
-__OSX_AVAILABLE_STARTING(__MAC_10_7, __IPHONE_5_0)
+__API_AVAILABLE(macos(10.7), ios(5.0))
 int pthread_mutexattr_setpolicy_np(pthread_mutexattr_t *, int );
 
 #endif /* (!_POSIX_C_SOURCE && !_XOPEN_SOURCE) || _DARWIN_C_SOURCE */
 
-__OSX_AVAILABLE_STARTING(__MAC_10_11, __IPHONE_NA)
+__API_AVAILABLE(macos(10.11))
 void _pthread_mutex_enable_legacy_mode(void);
 
 /*
@@ -86,8 +86,7 @@ void _pthread_mutex_enable_legacy_mode(void);
  *
  * It is not safe to call this function concurrently.
  */
-__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
-__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
+__API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
 #if !_PTHREAD_SWIFT_IMPORTER_NULLABILITY_COMPAT
 int pthread_create_from_mach_thread(
                pthread_t _Nullable * _Nonnull __restrict,
index 04e6b6f3f2a0cf37099737792a1392573d1237fb..9c1bfd8a2bbf45994ad1bed87ac4f5b55713cb8f 100644 (file)
@@ -25,6 +25,8 @@
 #define _PTHREAD_QOS_H
 
 #include <sys/cdefs.h>
+#include <sys/_pthread/_pthread_attr_t.h> /* pthread_attr_t */
+#include <sys/_pthread/_pthread_t.h>      /* pthread_t */
 #include <Availability.h>
 
 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL
@@ -76,7 +78,7 @@ __BEGIN_DECLS
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_attr_set_qos_class_np(pthread_attr_t *__attr,
                qos_class_t __qos_class, int __relative_priority);
@@ -107,7 +109,7 @@ pthread_attr_set_qos_class_np(pthread_attr_t *__attr,
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_attr_get_qos_class_np(pthread_attr_t * __restrict __attr,
                qos_class_t * _Nullable __restrict __qos_class,
@@ -148,7 +150,7 @@ pthread_attr_get_qos_class_np(pthread_attr_t * __restrict __attr,
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_set_qos_class_self_np(qos_class_t __qos_class,
                int __relative_priority);
@@ -179,7 +181,7 @@ pthread_set_qos_class_self_np(qos_class_t __qos_class,
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_get_qos_class_np(pthread_t __pthread,
                qos_class_t * _Nullable __restrict __qos_class,
@@ -258,7 +260,7 @@ typedef struct pthread_override_s* pthread_override_t;
  * A newly allocated override object if successful, or NULL if the override
  * could not be started.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 pthread_override_t
 pthread_override_qos_class_start_np(pthread_t __pthread,
                qos_class_t __qos_class, int __relative_priority);
@@ -286,7 +288,7 @@ pthread_override_qos_class_start_np(pthread_t __pthread,
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 pthread_override_qos_class_end_np(pthread_override_t __override);
 
index 11af0fe15a22b7ffb1cd8d3a67ed8614651e29ef..f38783808f6ca98d907596af57f05622fde27dc2 100644 (file)
@@ -59,7 +59,7 @@ __BEGIN_DECLS
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr,
                                  qos_class_t __qos_class);
@@ -82,7 +82,7 @@ posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr,
  * @return
  * Zero if successful, otherwise an errno value.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 int
 posix_spawnattr_get_qos_class_np(const posix_spawnattr_t *__restrict __attr,
                                  qos_class_t * __restrict __qos_class);
index 173a7fdde6a9a2ee126a49b93c86de9ef10845af..b50e608593776a6decce19f61571560d0ac4c696 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991  
- *              All Rights Reserved 
- *  
- * Permission to use, copy, modify, and distribute this software and 
- * its documentation for any purpose and without fee is hereby granted, 
- * provided that the above copyright notice appears in all copies and 
- * that both the copyright notice and this permission notice appear in 
- * supporting documentation. 
- *  
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE. 
- *  
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ *              All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
  */
 /*
  * MkLinux
@@ -67,7 +67,6 @@ typedef struct _pthread_attr_t pthread_attr_t;
 #include <limits.h>
 #include <errno.h>
 #include <TargetConditionals.h>
-#include <libkern/OSAtomic.h>
 #include <mach/mach.h>
 #include <mach/mach_error.h>
 #include <sys/queue.h>
@@ -76,6 +75,10 @@ typedef struct _pthread_attr_t pthread_attr_t;
 #include <os/internal/internal_shared.h>
 #include <os/once_private.h>
 
+#if TARGET_IPHONE_SIMULATOR
+#error Unsupported target
+#endif
+
 #define PTHREAD_INTERNAL_CRASH(c, x) do { \
                _os_set_crash_log_cause_and_message((c), \
                                "BUG IN LIBPTHREAD: " x); \
@@ -100,6 +103,18 @@ typedef struct _pthread_attr_t pthread_attr_t;
 #include "tsd_private.h"
 #include "spinlock_private.h"
 
+#define PTHREAD_EXPORT extern __attribute__((visibility("default")))
+#define PTHREAD_EXTERN extern
+#define PTHREAD_NOEXPORT __attribute__((visibility("hidden")))
+#define PTHREAD_NOEXPORT_VARIANT
+#define PTHREAD_NORETURN __attribute__((__noreturn__))
+#define PTHREAD_ALWAYS_INLINE __attribute__((always_inline))
+#define PTHREAD_NOINLINE __attribute__((noinline))
+#define PTHREAD_WEAK __attribute__((weak))
+#define PTHREAD_USED __attribute__((used))
+#define PTHREAD_NOT_TAIL_CALLED __attribute__((__not_tail_called__))
+
+
 #define OS_UNFAIR_LOCK_INLINE 1
 #include <os/lock_private.h>
 typedef os_unfair_lock _pthread_lock;
@@ -110,10 +125,6 @@ typedef os_unfair_lock _pthread_lock;
 #define _PTHREAD_UNLOCK(lock) os_unfair_lock_unlock_inline(&(lock))
 #define _PTHREAD_UNLOCK_FROM_MACH_THREAD(lock) os_unfair_lock_unlock_inline_no_tsd_4libpthread(&(lock))
 
-#if TARGET_IPHONE_SIMULATOR
-#error Unsupported target
-#endif
-
 // List of all pthreads in the process.
 TAILQ_HEAD(__pthread_list, _pthread);
 extern struct __pthread_list __pthread_head;
@@ -123,6 +134,12 @@ extern _pthread_lock _pthread_list_lock;
 
 extern int __is_threaded;
 
+#if PTHREAD_DEBUG_LOG
+#include <mach/mach_time.h>
+extern int _pthread_debuglog;
+extern uint64_t _pthread_debugstart;
+#endif
+
 /*
  * Compiled-in limits
  */
@@ -153,7 +170,7 @@ typedef struct _pthread {
                        parentcheck:1,
                        childexit:1,
                        pad3:29;
-       
+
        _pthread_lock lock; // protect access to everything below
        uint32_t detached:8,
                        inherit:8,
@@ -337,21 +354,19 @@ typedef struct {
        long sig;
        _pthread_lock lock;
        uint32_t unused:29,
-               misalign:1,
-               pshared:2;
+                       misalign:1,
+                       pshared:2;
        uint32_t rw_flags;
 #if defined(__LP64__)
        uint32_t _pad;
 #endif
-       volatile uint32_t rw_seq[4];
-       struct _pthread *rw_owner;
-       volatile uint32_t *rw_lcntaddr;
-       volatile uint32_t *rw_seqaddr;
-       volatile uint32_t *rw_ucntaddr;
+       uint32_t rw_tid[2]; // thread id of thread that has exclusive (write) lock
+       uint32_t rw_seq[4]; // rw sequence id (at 128-bit aligned boundary)
+       uint32_t rw_mis[4]; // for misaligned locks rw_seq will span into here
 #if defined(__LP64__)
-       uint32_t _reserved[31];
+       uint32_t _reserved[34];
 #else
-       uint32_t _reserved[19];
+       uint32_t _reserved[18];
 #endif
 } _pthread_rwlock;
 
@@ -361,6 +376,9 @@ typedef struct {
 _Static_assert(sizeof(_pthread_mutex) == sizeof(pthread_mutex_t),
                "Incorrect _pthread_mutex structure size");
 
+_Static_assert(sizeof(_pthread_rwlock) == sizeof(pthread_rwlock_t),
+               "Incorrect _pthread_rwlock structure size");
+
 // Internal references to pthread_self() use TSD slot 0 directly.
 inline static pthread_t __attribute__((__pure__))
 _pthread_self_direct(void)
@@ -369,7 +387,8 @@ _pthread_self_direct(void)
 }
 #define pthread_self() _pthread_self_direct()
 
-inline static pthread_t __attribute__((__pure__))
+PTHREAD_ALWAYS_INLINE
+inline static uint64_t __attribute__((__pure__))
 _pthread_selfid_direct(void)
 {
        return (_pthread_self_direct())->thread_id;
@@ -426,18 +445,10 @@ _pthread_selfid_direct(void)
 #define _PTHREAD_CANCEL_STATE_MASK   0x01
 #define _PTHREAD_CANCEL_TYPE_MASK    0x02
 #define _PTHREAD_CANCEL_PENDING             0x10  /* pthread_cancel() has been called for this thread */
+#define _PTHREAD_CANCEL_INITIALIZED  0x20  /* the thread in the list is properly initialized */
 
 extern boolean_t swtch_pri(int);
 
-#define PTHREAD_EXPORT extern __attribute__((visibility("default")))
-#define PTHREAD_EXTERN extern
-#define PTHREAD_NOEXPORT __attribute__((visibility("hidden")))
-#define PTHREAD_NORETURN __attribute__((__noreturn__))
-#define PTHREAD_ALWAYS_INLINE __attribute__((always_inline))
-#define PTHREAD_NOINLINE __attribute__((noinline))
-#define PTHREAD_WEAK __attribute__((weak))
-#define PTHREAD_USED __attribute__((used))
-
 #include "kern/kern_internal.h"
 
 /* Prototypes. */
@@ -450,7 +461,7 @@ PTHREAD_NOEXPORT void _pthread_setup(pthread_t th, void (*f)(pthread_t), void *s
 
 PTHREAD_NOEXPORT void _pthread_tsd_cleanup(pthread_t self);
 
-PTHREAD_NOEXPORT int __mtx_droplock(_pthread_mutex *mutex, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp);
+PTHREAD_NOEXPORT int _pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp);
 
 /* internally redirected upcalls. */
 PTHREAD_NOEXPORT void* malloc(size_t);
@@ -478,7 +489,6 @@ PTHREAD_EXTERN
 int
 __proc_info(int callnum, int pid, int flavor, uint64_t arg, void * buffer, int buffersize);
 
-PTHREAD_NOEXPORT int _pthread_lookup_thread(pthread_t thread, mach_port_t * port, int only_joinable);
 PTHREAD_NOEXPORT int _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming);
 
 PTHREAD_NORETURN PTHREAD_NOEXPORT
@@ -487,7 +497,7 @@ __pthread_abort(void);
 
 PTHREAD_NORETURN PTHREAD_NOEXPORT
 void
-__pthread_abort_reason(const char *fmt, ...);
+__pthread_abort_reason(const char *fmt, ...) __printflike(1,2);
 
 PTHREAD_NOEXPORT
 void
@@ -507,13 +517,17 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *keve
 
 PTHREAD_NOEXPORT
 void
-__pthread_fork_child_internal(pthread_t p);
+_pthread_main_thread_init(pthread_t p);
 
-PTHREAD_EXPORT
+PTHREAD_NOEXPORT
+void
+_pthread_bsdthread_init(void);
+
+PTHREAD_NOEXPORT_VARIANT
 void
 _pthread_clear_qos_tsd(mach_port_t thread_port);
 
-PTHREAD_EXPORT
+PTHREAD_NOEXPORT_VARIANT
 void
 _pthread_testcancel(pthread_t thread, int isconforming);
 
@@ -521,6 +535,18 @@ PTHREAD_EXPORT
 void
 _pthread_exit_if_canceled(int error);
 
+PTHREAD_NOEXPORT
+void
+_pthread_markcancel_if_canceled(pthread_t thread, mach_port_t kport);
+
+PTHREAD_NOEXPORT
+void
+_pthread_setcancelstate_exit(pthread_t self, void *value_ptr, int conforming);
+
+PTHREAD_NOEXPORT
+void *
+_pthread_get_exit_value(pthread_t t, int conforming);
+
 PTHREAD_ALWAYS_INLINE
 static inline mach_port_t
 _pthread_kernel_thread(pthread_t t)
@@ -532,15 +558,14 @@ PTHREAD_ALWAYS_INLINE
 static inline void
 _pthread_set_kernel_thread(pthread_t t, mach_port_t p)
 {
-       if (os_slowpath(!MACH_PORT_VALID(p))) {
-               PTHREAD_INTERNAL_CRASH(t, "Invalid thread port");
-       }
        t->tsd[_PTHREAD_TSD_SLOT_MACH_THREAD_SELF] = p;
 }
 
-#define PTHREAD_ABORT(f,...) __pthread_abort_reason("%s:%s:%u: " f, __FILE__, __func__, __LINE__, ## __VA_ARGS__)
+#define PTHREAD_ABORT(f,...) __pthread_abort_reason( \
+               "%s:%s:%u: " f, __FILE__, __func__, __LINE__, ## __VA_ARGS__)
 
-#define PTHREAD_ASSERT(b) do { if (!(b)) PTHREAD_ABORT("failed assertion `%s'", #b); } while (0)
+#define PTHREAD_ASSERT(b) \
+               do { if (!(b)) PTHREAD_ABORT("failed assertion `%s'", #b); } while (0)
 
 #include <os/semaphore_private.h>
 #include <os/alloc_once_private.h>
@@ -563,6 +588,9 @@ struct pthread_globals_s {
        size_t atfork_count;
        struct pthread_atfork_entry atfork_storage[PTHREAD_ATFORK_INLINE_MAX];
        struct pthread_atfork_entry *atfork;
+       uint16_t qmp_logical[THREAD_QOS_LAST];
+       uint16_t qmp_physical[THREAD_QOS_LAST];
+
 };
 typedef struct pthread_globals_s *pthread_globals_t;
 
@@ -581,21 +609,120 @@ PTHREAD_ALWAYS_INLINE
 static inline bool
 _pthread_mutex_check_signature_fast(_pthread_mutex *mutex)
 {
-       return os_fastpath(mutex->sig == _PTHREAD_MUTEX_SIG_fast);
+       return (mutex->sig == _PTHREAD_MUTEX_SIG_fast);
 }
 
 PTHREAD_ALWAYS_INLINE
 static inline bool
 _pthread_mutex_check_signature(_pthread_mutex *mutex)
 {
-       return os_fastpath((mutex->sig & _PTHREAD_MUTEX_SIG_MASK) == _PTHREAD_MUTEX_SIG_CMP);
+       return ((mutex->sig & _PTHREAD_MUTEX_SIG_MASK) == _PTHREAD_MUTEX_SIG_CMP);
 }
 
 PTHREAD_ALWAYS_INLINE
 static inline bool
 _pthread_mutex_check_signature_init(_pthread_mutex *mutex)
 {
-       return os_fastpath((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) == _PTHREAD_MUTEX_SIG_init_CMP);
+       return ((mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) ==
+                       _PTHREAD_MUTEX_SIG_init_CMP);
+}
+
+#pragma mark _pthread_rwlock_check_signature
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_rwlock_check_signature(_pthread_rwlock *rwlock)
+{
+       return (rwlock->sig == _PTHREAD_RWLOCK_SIG);
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_rwlock_check_signature_init(_pthread_rwlock *rwlock)
+{
+       return (rwlock->sig == _PTHREAD_RWLOCK_SIG_init);
+}
+
+/* ALWAYS called with list lock and return with list lock */
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_is_valid_locked(pthread_t thread)
+{
+       pthread_t p;
+loop:
+       TAILQ_FOREACH(p, &__pthread_head, plist) {
+               if (p == thread) {
+                       int state = os_atomic_load(&p->cancel_state, relaxed);
+                       if (state & _PTHREAD_CANCEL_INITIALIZED) {
+                               return true;
+                       }
+                       _PTHREAD_UNLOCK(_pthread_list_lock);
+                       thread_switch(_pthread_kernel_thread(p),
+                                       SWITCH_OPTION_OSLOCK_DEPRESS, 1);
+                       _PTHREAD_LOCK(_pthread_list_lock);
+                       goto loop;
+               }
+       }
+
+       return false;
+}
+
+#define PTHREAD_IS_VALID_LOCK_THREAD 0x1
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_is_valid(pthread_t thread, int flags, mach_port_t *portp)
+{
+       mach_port_t kport = MACH_PORT_NULL;
+       bool valid;
+
+       if (thread == NULL) {
+               return false;
+       }
+
+       if (thread == pthread_self()) {
+               valid = true;
+               kport = _pthread_kernel_thread(thread);
+               if (flags & PTHREAD_IS_VALID_LOCK_THREAD) {
+                       _PTHREAD_LOCK(thread->lock);
+               }
+       } else {
+               _PTHREAD_LOCK(_pthread_list_lock);
+               if (_pthread_is_valid_locked(thread)) {
+                       kport = _pthread_kernel_thread(thread);
+                       valid = true;
+                       if (flags & PTHREAD_IS_VALID_LOCK_THREAD) {
+                               _PTHREAD_LOCK(thread->lock);
+                       }
+               } else {
+                       valid = false;
+               }
+               _PTHREAD_UNLOCK(_pthread_list_lock);
+       }
+
+       if (portp != NULL) {
+               *portp = kport;
+       }
+       return valid;
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline void*
+_pthread_atomic_xchg_ptr_inline(void **p, void *v)
+{
+       return os_atomic_xchg(p, v, seq_cst);
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline uint32_t
+_pthread_atomic_xchg_uint32_relaxed_inline(uint32_t *p,uint32_t v)
+{
+       return os_atomic_xchg(p, v, relaxed);
 }
 
+#define _pthread_atomic_xchg_ptr(p, v) \
+               _pthread_atomic_xchg_ptr_inline(p, v)
+#define _pthread_atomic_xchg_uint32_relaxed(p, v) \
+               _pthread_atomic_xchg_uint32_relaxed_inline(p, v)
+
 #endif /* _POSIX_PTHREAD_INTERNALS_H */
index 4380326f6be9a8fcbdbe3fe3f0aa9be14866597f..4922a1c5b32bd0cccf88b3215764c691a5ad054d 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991  
- *              All Rights Reserved 
- *  
- * Permission to use, copy, modify, and distribute this software and 
- * its documentation for any purpose and without fee is hereby granted, 
- * provided that the above copyright notice appears in all copies and 
- * that both the copyright notice and this permission notice appear in 
- * supporting documentation. 
- *  
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE. 
- *  
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ *              All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
  * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
  */
 /*
  * MkLinux
@@ -49,6 +49,7 @@
  * POSIX Pthread Library
  */
 
+#include "resolver.h"
 #include "internal.h"
 #include "private.h"
 #include "workqueue_private.h"
@@ -70,7 +71,6 @@
 #include <machine/vmparam.h>
 #define        __APPLE_API_PRIVATE
 #include <machine/cpu_capabilities.h>
-#include <libkern/OSAtomic.h>
 
 #include <_simple.h>
 #include <platform/string.h>
 extern int __sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp,
                     void *newp, size_t newlen);
 extern void __exit(int) __attribute__((noreturn));
+extern int __pthread_kill(mach_port_t, int);
+
+extern struct _pthread _thread;
+extern int default_priority;
 
-static void (*exitf)(int) = __exit;
-__private_extern__ void* (*_pthread_malloc)(size_t) = NULL;
-__private_extern__ void (*_pthread_free)(void *) = NULL;
 
 //
 // Global variables
 //
 
+static void (*exitf)(int) = __exit;
+PTHREAD_NOEXPORT void* (*_pthread_malloc)(size_t) = NULL;
+PTHREAD_NOEXPORT void (*_pthread_free)(void *) = NULL;
+
+#if PTHREAD_DEBUG_LOG
+#include <fcntl.h>
+int _pthread_debuglog;
+uint64_t _pthread_debugstart;
+#endif
+
 // This global should be used (carefully) by anyone needing to know if a
 // pthread (other than the main thread) has been created.
 int __is_threaded = 0;
@@ -97,8 +108,8 @@ int __unix_conforming = 0;
 // _pthread_list_lock protects _pthread_count, access to the __pthread_head
 // list, and the parentcheck, childrun and childexit flags of the pthread
 // structure. Externally imported by pthread_cancelable.c.
-__private_extern__ _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
-__private_extern__ struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
+PTHREAD_NOEXPORT _pthread_lock _pthread_list_lock = _PTHREAD_LOCK_INITIALIZER;
+PTHREAD_NOEXPORT struct __pthread_list __pthread_head = TAILQ_HEAD_INITIALIZER(__pthread_head);
 static int _pthread_count = 1;
 
 #if PTHREAD_LAYOUT_SPI
@@ -123,22 +134,22 @@ typedef struct _pthread_reap_msg_t {
        mach_msg_trailer_t trailer;
 } pthread_reap_msg_t;
 
-/* 
+/*
  * The pthread may be offset into a page.  In that event, by contract
  * with the kernel, the allocation will extend PTHREAD_SIZE from the
  * start of the next page.  There's also one page worth of allocation
- * below stacksize for the guard page. <rdar://problem/19941744> 
+ * below stacksize for the guard page. <rdar://problem/19941744>
  */
 #define PTHREAD_SIZE ((size_t)mach_vm_round_page(sizeof(struct _pthread)))
 #define PTHREAD_ALLOCADDR(stackaddr, stacksize) ((stackaddr - stacksize) - vm_page_size)
 #define PTHREAD_ALLOCSIZE(stackaddr, stacksize) ((round_page((uintptr_t)stackaddr) + PTHREAD_SIZE) - (uintptr_t)PTHREAD_ALLOCADDR(stackaddr, stacksize))
 
-static pthread_attr_t _pthread_attr_default = {0};
+static pthread_attr_t _pthread_attr_default = { };
 
 // The main thread's pthread_t
-static struct _pthread _thread __attribute__((aligned(64))) = {0};
+PTHREAD_NOEXPORT struct _pthread _thread __attribute__((aligned(64))) = { };
 
-static int default_priority;
+PTHREAD_NOEXPORT int default_priority;
 static int max_priority;
 static int min_priority;
 static int pthread_concurrency;
@@ -146,10 +157,12 @@ static int pthread_concurrency;
 // work queue support data
 static void (*__libdispatch_workerfunction)(pthread_priority_t) = NULL;
 static void (*__libdispatch_keventfunction)(void **events, int *nevents) = NULL;
+static void (*__libdispatch_workloopfunction)(uint64_t *workloop_id, void **events, int *nevents) = NULL;
 static int __libdispatch_offset;
 
 // supported feature set
 int __pthread_supported_features;
+static bool __workq_newapi;
 
 //
 // Function prototypes
@@ -159,40 +172,34 @@ int __pthread_supported_features;
 static int _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack);
 static int _pthread_deallocate(pthread_t t);
 
-static void _pthread_terminate(pthread_t t);
+static void _pthread_terminate_invoke(pthread_t t);
 
-static void _pthread_struct_init(pthread_t t,
+static inline void _pthread_struct_init(pthread_t t,
        const pthread_attr_t *attrs,
        void *stack,
        size_t stacksize,
        void *freeaddr,
        size_t freesize);
 
-extern void _pthread_set_self(pthread_t);
-static void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
+static inline void _pthread_set_self_internal(pthread_t, bool needs_tsd_base_set);
 
 static void _pthread_dealloc_reply_port(pthread_t t);
+static void _pthread_dealloc_special_reply_port(pthread_t t);
 
-static inline void __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread);
+static inline void __pthread_add_thread(pthread_t t, const pthread_attr_t *attr, bool parent, bool from_mach_thread);
 static inline int __pthread_remove_thread(pthread_t t, bool child, bool *should_exit);
 
-static int _pthread_find_thread(pthread_t thread);
-
 static void _pthread_exit(pthread_t self, void *value_ptr) __dead2;
-static void _pthread_setcancelstate_exit(pthread_t self, void  *value_ptr, int conforming);
 
 static inline void _pthread_introspection_thread_create(pthread_t t, bool destroy);
 static inline void _pthread_introspection_thread_start(pthread_t t);
 static inline void _pthread_introspection_thread_terminate(pthread_t t, void *freeaddr, size_t freesize, bool destroy);
 static inline void _pthread_introspection_thread_destroy(pthread_t t);
 
+extern void _pthread_set_self(pthread_t);
 extern void start_wqthread(pthread_t self, mach_port_t kport, void *stackaddr, void *unused, int reuse); // trampoline into _pthread_wqthread
 extern void thread_start(pthread_t self, mach_port_t kport, void *(*fun)(void *), void * funarg, size_t stacksize, unsigned int flags); // trampoline into _pthread_start
 
-void pthread_workqueue_atfork_child(void);
-
-static bool __workq_newapi;
-
 /* Compatibility: previous pthread API used WORKQUEUE_OVERCOMMIT to request overcommit threads from
  * the kernel. This definition is kept here, in userspace only, to perform the compatibility shimm
  * from old API requests to the new kext conventions.
@@ -200,7 +207,7 @@ static bool __workq_newapi;
 #define WORKQUEUE_OVERCOMMIT 0x10000
 
 /*
- * Flags filed passed to bsdthread_create and back in pthread_start 
+ * Flags filed passed to bsdthread_create and back in pthread_start
 31  <---------------------------------> 0
 _________________________________________
 | flags(8) | policy(8) | importance(16) |
@@ -222,8 +229,6 @@ extern pthread_t __bsdthread_create(void *(*func)(void *), void * func_arg, void
 extern int __bsdthread_register(void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), void (*)(pthread_t, mach_port_t, void *, void *, int), int,void (*)(pthread_t, mach_port_t, void *(*)(void *), void *, size_t, unsigned int), int32_t *,__uint64_t);
 extern int __bsdthread_terminate(void * freeaddr, size_t freesize, mach_port_t kport, mach_port_t joinsem);
 extern __uint64_t __thread_selfid( void );
-extern int __pthread_canceled(int);
-extern int __pthread_kill(mach_port_t, int);
 
 extern int __workq_open(void);
 extern int __workq_kernreturn(int, void *, int, int);
@@ -261,14 +266,14 @@ _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
        size_t allocsize = 0;
        size_t guardsize = 0;
        size_t stacksize = 0;
-       
+
        PTHREAD_ASSERT(attrs->stacksize >= PTHREAD_STACK_MIN);
 
        *thread = NULL;
        *stack = NULL;
-       
+
        // Allocate a pthread structure if necessary
-       
+
        if (attrs->stackaddr != NULL) {
                PTHREAD_ASSERT(((uintptr_t)attrs->stackaddr % vm_page_size) == 0);
                *stack = attrs->stackaddr;
@@ -278,7 +283,7 @@ _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
                stacksize = attrs->stacksize;
                allocsize = stacksize + guardsize + PTHREAD_SIZE;
        }
-       
+
        kr = mach_vm_map(mach_task_self(),
                         &allocaddr,
                         allocsize,
@@ -314,7 +319,7 @@ _pthread_allocate(pthread_t *thread, const pthread_attr_t *attrs, void **stack)
                        *stack = t;
                }
        }
-       
+
        if (t != NULL) {
                _pthread_struct_init(t, attrs,
                                     *stack, attrs->stacksize,
@@ -344,7 +349,7 @@ _pthread_deallocate(pthread_t t)
 
 PTHREAD_NOINLINE
 static void*
-_current_stack_address(void)
+_pthread_current_stack_address(void)
 {
        int a;
        return &a;
@@ -353,7 +358,7 @@ _current_stack_address(void)
 #pragma clang diagnostic pop
 
 // Terminates the thread if called from the currently running thread.
-PTHREAD_NORETURN PTHREAD_NOINLINE
+PTHREAD_NORETURN PTHREAD_NOINLINE PTHREAD_NOT_TAIL_CALLED
 static void
 _pthread_terminate(pthread_t t)
 {
@@ -385,7 +390,7 @@ _pthread_terminate(pthread_t t)
                t->freesize -= freesize_stack;
        } else if (t == &_thread){
                freeaddr = t->stackaddr - pthread_get_stacksize_np(t);
-               uintptr_t stackborder = trunc_page((uintptr_t)_current_stack_address());
+               uintptr_t stackborder = trunc_page((uintptr_t)_pthread_current_stack_address());
                freesize_stack = stackborder - freeaddr;
        } else {
                freesize_stack = 0;
@@ -394,6 +399,7 @@ _pthread_terminate(pthread_t t)
        mach_port_t kport = _pthread_kernel_thread(t);
        semaphore_t joinsem = t->joiner_notify;
 
+       _pthread_dealloc_special_reply_port(t);
        _pthread_dealloc_reply_port(t);
 
        // After the call to __pthread_remove_thread, it is not safe to
@@ -421,7 +427,14 @@ _pthread_terminate(pthread_t t)
        PTHREAD_ABORT("thread %p didn't terminate", t);
 }
 
-int       
+PTHREAD_NORETURN
+static void
+_pthread_terminate_invoke(pthread_t t)
+{
+       _pthread_terminate(t);
+}
+
+int
 pthread_attr_destroy(pthread_attr_t *attr)
 {
        int ret = EINVAL;
@@ -432,7 +445,7 @@ pthread_attr_destroy(pthread_attr_t *attr)
        return ret;
 }
 
-int       
+int
 pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
 {
        int ret = EINVAL;
@@ -443,7 +456,7 @@ pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
        return ret;
 }
 
-int       
+int
 pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
 {
        int ret = EINVAL;
@@ -454,7 +467,7 @@ pthread_attr_getinheritsched(const pthread_attr_t *attr, int *inheritsched)
        return ret;
 }
 
-int       
+int
 pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
 {
        int ret = EINVAL;
@@ -465,7 +478,7 @@ pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param
        return ret;
 }
 
-int       
+int
 pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
 {
        int ret = EINVAL;
@@ -497,7 +510,7 @@ pthread_attr_init(pthread_attr_t *attr)
        return 0;
 }
 
-int       
+int
 pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
 {
        int ret = EINVAL;
@@ -510,7 +523,7 @@ pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
        return ret;
 }
 
-int       
+int
 pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
 {
        int ret = EINVAL;
@@ -523,7 +536,7 @@ pthread_attr_setinheritsched(pthread_attr_t *attr, int inheritsched)
        return ret;
 }
 
-int       
+int
 pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
 {
        int ret = EINVAL;
@@ -536,7 +549,7 @@ pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param
        return ret;
 }
 
-int       
+int
 pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
 {
        int ret = EINVAL;
@@ -686,17 +699,18 @@ pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
 /*
  * Create and start execution of a new thread.
  */
-PTHREAD_NOINLINE
+PTHREAD_NOINLINE PTHREAD_NORETURN
 static void
 _pthread_body(pthread_t self, bool needs_tsd_base_set)
 {
        _pthread_set_self_internal(self, needs_tsd_base_set);
-       __pthread_add_thread(self, false, false);
+       __pthread_add_thread(self, NULL, false, false);
        void *result = (self->fun)(self->arg);
 
        _pthread_exit(self, result);
 }
 
+PTHREAD_NORETURN
 void
 _pthread_start(pthread_t self,
               mach_port_t kport,
@@ -735,14 +749,21 @@ _pthread_start(pthread_t self,
 
        bool thread_tsd_bsd_set = (bool)(pflags & PTHREAD_START_TSD_BASE_SET);
 
-       _pthread_set_kernel_thread(self, kport);
+#if DEBUG
+       PTHREAD_ASSERT(MACH_PORT_VALID(kport));
+       PTHREAD_ASSERT(_pthread_kernel_thread(self) == kport);
+#endif
+       // will mark the thread initialized
+       _pthread_markcancel_if_canceled(self, kport);
+
        self->fun = fun;
        self->arg = arg;
 
        _pthread_body(self, !thread_tsd_bsd_set);
 }
 
-static void
+PTHREAD_ALWAYS_INLINE
+static inline void
 _pthread_struct_init(pthread_t t,
                     const pthread_attr_t *attrs,
                     void *stackaddr,
@@ -780,7 +801,7 @@ _pthread_is_threaded(void)
        return __is_threaded;
 }
 
-/* Non portable public api to know whether this process has(had) atleast one thread 
+/* Non portable public api to know whether this process has(had) atleast one thread
  * apart from main thread. There could be race if there is a thread in the process of
  * creation at the time of call . It does not tell whether there are more than one thread
  * at this point of time.
@@ -791,25 +812,17 @@ pthread_is_threaded_np(void)
        return __is_threaded;
 }
 
+
+PTHREAD_NOEXPORT_VARIANT
 mach_port_t
 pthread_mach_thread_np(pthread_t t)
 {
        mach_port_t kport = MACH_PORT_NULL;
-
-       if (t == pthread_self()) {
-               /*
-                * If the call is on self, return the kernel port. We cannot
-                * add this bypass for main thread as it might have exited,
-                * and we should not return stale port info.
-                */
-               kport = _pthread_kernel_thread(t);
-       } else {
-               (void)_pthread_lookup_thread(t, &kport, 0);
-       }
-
+       (void)_pthread_is_valid(t, 0, &kport);
        return kport;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 pthread_t
 pthread_from_mach_thread_np(mach_port_t kernel_thread)
 {
@@ -829,10 +842,10 @@ pthread_from_mach_thread_np(mach_port_t kernel_thread)
        return p;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 size_t
 pthread_get_stacksize_np(pthread_t t)
 {
-       int ret;
        size_t size = 0;
 
        if (t == NULL) {
@@ -853,16 +866,16 @@ pthread_get_stacksize_np(pthread_t t)
        if (t == &_thread && t->stacksize + vm_page_size != t->freesize) {
                // We want to call getrlimit() just once, as it's relatively expensive
                static size_t rlimit_stack;
-               
+
                if (rlimit_stack == 0) {
                        struct rlimit limit;
                        int ret = getrlimit(RLIMIT_STACK, &limit);
-                       
+
                        if (ret == 0) {
                                rlimit_stack = (size_t) limit.rlim_cur;
                        }
                }
-               
+
                if (rlimit_stack == 0 || rlimit_stack > t->freesize) {
                        return t->stacksize;
                } else {
@@ -877,11 +890,10 @@ pthread_get_stacksize_np(pthread_t t)
 
        _PTHREAD_LOCK(_pthread_list_lock);
 
-       ret = _pthread_find_thread(t);
-       if (ret == 0) {
+       if (_pthread_is_valid_locked(t)) {
                size = t->stacksize;
        } else {
-               size = ret; // XXX bug?
+               size = ESRCH; // XXX bug?
        }
 
        _PTHREAD_UNLOCK(_pthread_list_lock);
@@ -889,16 +901,16 @@ pthread_get_stacksize_np(pthread_t t)
        return size;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 void *
 pthread_get_stackaddr_np(pthread_t t)
 {
-       int ret;
        void *addr = NULL;
 
        if (t == NULL) {
                return (void *)(uintptr_t)ESRCH; // XXX bug?
        }
-       
+
        // since the main thread will not get de-allocated from underneath us
        if (t == pthread_self() || t == &_thread) {
                return t->stackaddr;
@@ -906,11 +918,10 @@ pthread_get_stackaddr_np(pthread_t t)
 
        _PTHREAD_LOCK(_pthread_list_lock);
 
-       ret = _pthread_find_thread(t);
-       if (ret == 0) {
+       if (_pthread_is_valid_locked(t)) {
                addr = t->stackaddr;
        } else {
-               addr = (void *)(uintptr_t)ret; // XXX bug?
+               addr = (void *)(uintptr_t)ESRCH; // XXX bug?
        }
 
        _PTHREAD_UNLOCK(_pthread_list_lock);
@@ -918,6 +929,7 @@ pthread_get_stackaddr_np(pthread_t t)
        return addr;
 }
 
+
 static mach_port_t
 _pthread_reply_port(pthread_t t)
 {
@@ -950,6 +962,28 @@ _pthread_dealloc_reply_port(pthread_t t)
        }
 }
 
+static mach_port_t
+_pthread_special_reply_port(pthread_t t)
+{
+       void *p;
+       if (t == NULL) {
+               p = _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY);
+       } else {
+               p = t->tsd[_PTHREAD_TSD_SLOT_MACH_SPECIAL_REPLY];
+       }
+       return (mach_port_t)(uintptr_t)p;
+}
+
+static void
+_pthread_dealloc_special_reply_port(pthread_t t)
+{
+       mach_port_t special_reply_port = _pthread_special_reply_port(t);
+       if (special_reply_port != MACH_PORT_NULL) {
+               mach_port_mod_refs(mach_task_self(), special_reply_port,
+                               MACH_PORT_RIGHT_RECEIVE, -1);
+       }
+}
+
 pthread_t
 pthread_main_thread_np(void)
 {
@@ -968,9 +1002,10 @@ pthread_main_np(void)
 
 /* if we are passed in a pthread_t that is NULL, then we return
    the current thread's thread_id. So folks don't have to call
-   pthread_self, in addition to us doing it, if they just want 
+   pthread_self, in addition to us doing it, if they just want
    their thread_id.
 */
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
 {
@@ -985,8 +1020,11 @@ pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
                *thread_id = self->thread_id;
        } else {
                _PTHREAD_LOCK(_pthread_list_lock);
-               res = _pthread_find_thread(thread);
-               if (res == 0) {
+               if (!_pthread_is_valid_locked(thread)) {
+                       res = ESRCH;
+               } else if (thread->thread_id == 0) {
+                       res = EINVAL;
+               } else {
                        *thread_id = thread->thread_id;
                }
                _PTHREAD_UNLOCK(_pthread_list_lock);
@@ -994,24 +1032,27 @@ pthread_threadid_np(pthread_t thread, uint64_t *thread_id)
        return res;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_getname_np(pthread_t thread, char *threadname, size_t len)
 {
-       int res;
+       int res = 0;
 
        if (thread == NULL) {
                return ESRCH;
        }
 
        _PTHREAD_LOCK(_pthread_list_lock);
-       res = _pthread_find_thread(thread);
-       if (res == 0) {
+       if (_pthread_is_valid_locked(thread)) {
                strlcpy(threadname, thread->pthread_name, len);
+       } else {
+               res = ESRCH;
        }
        _PTHREAD_UNLOCK(_pthread_list_lock);
        return res;
 }
 
+
 int
 pthread_setname_np(const char *name)
 {
@@ -1038,12 +1079,19 @@ pthread_setname_np(const char *name)
 
 PTHREAD_ALWAYS_INLINE
 static inline void
-__pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread)
+__pthread_add_thread(pthread_t t, const pthread_attr_t *attrs,
+               bool parent, bool from_mach_thread)
 {
        bool should_deallocate = false;
        bool should_add = true;
 
-       if (from_mach_thread){
+       mach_port_t kport = _pthread_kernel_thread(t);
+       if (os_slowpath(!MACH_PORT_VALID(kport))) {
+               PTHREAD_CLIENT_CRASH(kport,
+                               "Unable to allocate thread port, possible port leak");
+       }
+
+       if (from_mach_thread) {
                _PTHREAD_LOCK_FROM_MACH_THREAD(_pthread_list_lock);
        } else {
                _PTHREAD_LOCK(_pthread_list_lock);
@@ -1086,6 +1134,14 @@ __pthread_add_thread(pthread_t t, bool parent, bool from_mach_thread)
        if (should_add) {
                TAILQ_INSERT_TAIL(&__pthread_head, t, plist);
                _pthread_count++;
+
+               /*
+                * Set some initial values which we know in the pthread structure in
+                * case folks try to get the values before the thread can set them.
+                */
+               if (parent && attrs && attrs->schedset == 0) {
+                       t->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = attrs->qosclass;
+               }
        }
 
        if (from_mach_thread){
@@ -1118,7 +1174,7 @@ static inline int
 __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
 {
        int ret = 0;
-       
+
        bool should_remove = true;
 
        _PTHREAD_LOCK(_pthread_list_lock);
@@ -1132,7 +1188,7 @@ __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
        //  - Update the running thread count.
        // When another thread removes a joinable thread:
        //  - CAREFUL not to dereference the thread before verifying that the
-       //    reference is still valid using _pthread_find_thread().
+       //    reference is still valid using _pthread_is_valid_locked().
        //  - Remove the thread from the list.
 
        if (child) {
@@ -1145,22 +1201,25 @@ __pthread_remove_thread(pthread_t t, bool child, bool *should_exit)
                        should_remove = false;
                }
                *should_exit = (--_pthread_count <= 0);
-       } else {
-               ret = _pthread_find_thread(t);
-               if (ret == 0) {
-                       // If we found a thread but it's not joinable, bail.
-                       if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
-                               should_remove = false;
-                               ret = ESRCH;
-                       }
-               }
+       } else if (!_pthread_is_valid_locked(t)) {
+               ret = ESRCH;
+               should_remove = false;
+       } else if ((t->detached & PTHREAD_CREATE_JOINABLE) == 0) {
+               // If we found a thread but it's not joinable, bail.
+               ret = ESRCH;
+               should_remove = false;
+       } else if (t->parentcheck == 0) {
+               // If we're not the child thread *and* the parent has not finished
+               // creating the thread yet, then we are another thread that's joining
+               // and we cannot deallocate the pthread.
+               ret = EBUSY;
        }
        if (should_remove) {
                TAILQ_REMOVE(&__pthread_head, t, plist);
        }
 
        _PTHREAD_UNLOCK(_pthread_list_lock);
-       
+
        return ret;
 }
 
@@ -1218,6 +1277,10 @@ _pthread_create(pthread_t *thread,
        pthread_t t2;
        t2 = __bsdthread_create(start_routine, arg, stack, t, flags);
        if (t2 == (pthread_t)-1) {
+               if (errno == EMFILE) {
+                       PTHREAD_CLIENT_CRASH(0,
+                                       "Unable to allocate thread port, possible port leak");
+               }
                if (flags & PTHREAD_START_CUSTOM) {
                        // free the thread and stack if we allocated it
                        _pthread_deallocate(t);
@@ -1228,7 +1291,7 @@ _pthread_create(pthread_t *thread,
                t = t2;
        }
 
-       __pthread_add_thread(t, true, from_mach_thread);
+       __pthread_add_thread(t, attrs, true, from_mach_thread);
 
        // n.b. if a thread is created detached and exits, t will be invalid
        *thread = t;
@@ -1253,11 +1316,12 @@ pthread_create_from_mach_thread(pthread_t *thread,
        return _pthread_create(thread, attr, start_routine, arg, true);
 }
 
+PTHREAD_NORETURN
 static void
 _pthread_suspended_body(pthread_t self)
 {
        _pthread_set_self(self);
-       __pthread_add_thread(self, false, false);
+       __pthread_add_thread(self, NULL, false, false);
        _pthread_exit(self, (self->fun)(self->arg));
 }
 
@@ -1283,7 +1347,7 @@ pthread_create_suspended_np(pthread_t *thread,
        if (res) {
                return res;
        }
-               
+
        *thread = t;
 
        kern_return_t kr;
@@ -1295,44 +1359,45 @@ pthread_create_suspended_np(pthread_t *thread,
 
        _pthread_set_kernel_thread(t, kernel_thread);
        (void)pthread_setschedparam_internal(t, kernel_thread, t->policy, &t->param);
-               
+
        __is_threaded = 1;
 
        t->arg = arg;
        t->fun = start_routine;
 
-       __pthread_add_thread(t, true, false);
+       t->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
+       __pthread_add_thread(t, NULL, true, false);
 
        // Set up a suspended thread.
        _pthread_setup(t, _pthread_suspended_body, stack, 1, 0);
        return res;
 }
 
-int       
+
+PTHREAD_NOEXPORT_VARIANT
+int
 pthread_detach(pthread_t thread)
 {
-       int res;
+       int res = 0;
        bool join = false;
        semaphore_t sema = SEMAPHORE_NULL;
 
-       res = _pthread_lookup_thread(thread, NULL, 1);
-       if (res) {
-               return res; // Not a valid thread to detach.
+       if (!_pthread_is_valid(thread, PTHREAD_IS_VALID_LOCK_THREAD, NULL)) {
+               return ESRCH; // Not a valid thread to detach.
        }
 
-       _PTHREAD_LOCK(thread->lock);
-       if (thread->detached & PTHREAD_CREATE_JOINABLE) {
-               if (thread->detached & _PTHREAD_EXITED) {
-                       // Join the thread if it's already exited.
-                       join = true;
-               } else {
-                       thread->detached &= ~PTHREAD_CREATE_JOINABLE;
-                       thread->detached |= PTHREAD_CREATE_DETACHED;
-                       sema = thread->joiner_notify;
-               }
-       } else {
+       if ((thread->detached & PTHREAD_CREATE_DETACHED) ||
+                       !(thread->detached & PTHREAD_CREATE_JOINABLE)) {
                res = EINVAL;
+       } else if (thread->detached & _PTHREAD_EXITED) {
+               // Join the thread if it's already exited.
+               join = true;
+       } else {
+               thread->detached &= ~PTHREAD_CREATE_JOINABLE;
+               thread->detached |= PTHREAD_CREATE_DETACHED;
+               sema = thread->joiner_notify;
        }
+
        _PTHREAD_UNLOCK(thread->lock);
 
        if (join) {
@@ -1344,15 +1409,16 @@ pthread_detach(pthread_t thread)
        return res;
 }
 
-int   
+PTHREAD_NOEXPORT_VARIANT
+int
 pthread_kill(pthread_t th, int sig)
-{      
+{
        if (sig < 0 || sig > NSIG) {
                return EINVAL;
        }
 
        mach_port_t kport = MACH_PORT_NULL;
-       if (_pthread_lookup_thread(th, &kport, 0) != 0) {
+       if (!_pthread_is_valid(th, 0, &kport)) {
                return ESRCH; // Not a valid thread.
        }
 
@@ -1369,7 +1435,8 @@ pthread_kill(pthread_t th, int sig)
        return ret;
 }
 
-int 
+PTHREAD_NOEXPORT_VARIANT
+int
 __pthread_workqueue_setkill(int enable)
 {
        pthread_t self = pthread_self();
@@ -1381,18 +1448,6 @@ __pthread_workqueue_setkill(int enable)
        return 0;
 }
 
-static void *
-__pthread_get_exit_value(pthread_t t, int conforming)
-{
-       const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
-       void *value = t->exit_value;
-       if (conforming) {
-               if ((t->cancel_state & flags) == flags) {
-                       value = PTHREAD_CANCELED;
-               }
-       }
-       return value;
-}
 
 /* For compatibility... */
 
@@ -1407,7 +1462,7 @@ _pthread_self(void) {
 int __disable_threadsignal(int);
 
 PTHREAD_NORETURN
-static void 
+static void
 _pthread_exit(pthread_t self, void *value_ptr)
 {
        struct __darwin_pthread_handler_rec *handler;
@@ -1437,7 +1492,7 @@ _pthread_exit(pthread_t self, void *value_ptr)
        // Clear per-thread semaphore cache
        os_put_cached_semaphore(SEMAPHORE_NULL);
 
-       _pthread_terminate(self);
+       _pthread_terminate_invoke(self);
 }
 
 void
@@ -1451,27 +1506,30 @@ pthread_exit(void *value_ptr)
        }
 }
 
-int       
-pthread_getschedparam(pthread_t thread, 
+
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_getschedparam(pthread_t thread,
                      int *policy,
                      struct sched_param *param)
 {
-       int ret;
+       int ret = 0;
 
        if (thread == NULL) {
                return ESRCH;
        }
-       
+
        _PTHREAD_LOCK(_pthread_list_lock);
 
-       ret = _pthread_find_thread(thread);
-       if (ret == 0) {
+       if (_pthread_is_valid_locked(thread)) {
                if (policy) {
                        *policy = thread->policy;
                }
                if (param) {
                        *param = thread->param;
                }
+       } else {
+               ret = ESRCH;
        }
 
        _PTHREAD_UNLOCK(_pthread_list_lock);
@@ -1479,8 +1537,10 @@ pthread_getschedparam(pthread_t thread,
        return ret;
 }
 
-static int       
-pthread_setschedparam_internal(pthread_t thread, 
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+pthread_setschedparam_internal(pthread_t thread,
                      mach_port_t kport,
                      int policy,
                      const struct sched_param *param)
@@ -1515,7 +1575,9 @@ pthread_setschedparam_internal(pthread_t thread,
        return (ret != KERN_SUCCESS) ? EINVAL : 0;
 }
 
-int       
+
+PTHREAD_NOEXPORT_VARIANT
+int
 pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
 {
        mach_port_t kport = MACH_PORT_NULL;
@@ -1523,22 +1585,23 @@ pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
        int bypass = 1;
 
        // since the main thread will not get de-allocated from underneath us
-       if (t == pthread_self() || t == &_thread ) {
+       if (t == pthread_self() || t == &_thread) {
                kport = _pthread_kernel_thread(t);
        } else {
                bypass = 0;
-               (void)_pthread_lookup_thread(t, &kport, 0);
+               (void)_pthread_is_valid(t, 0, &kport);
        }
-       
+
        res = pthread_setschedparam_internal(t, kport, policy, param);
        if (res == 0) {
                if (bypass == 0) {
                        // Ensure the thread is still valid.
                        _PTHREAD_LOCK(_pthread_list_lock);
-                       res = _pthread_find_thread(t);
-                       if (res == 0) {
+                       if (_pthread_is_valid_locked(t)) {
                                t->policy = policy;
                                t->param = *param;
+                       } else {
+                               res = ESRCH;
                        }
                        _PTHREAD_UNLOCK(_pthread_list_lock);
                }  else {
@@ -1549,6 +1612,7 @@ pthread_setschedparam(pthread_t t, int policy, const struct sched_param *param)
        return res;
 }
 
+
 int
 sched_get_priority_min(int policy)
 {
@@ -1561,13 +1625,13 @@ sched_get_priority_max(int policy)
        return default_priority + 16;
 }
 
-int       
+int
 pthread_equal(pthread_t t1, pthread_t t2)
 {
        return (t1 == t2);
 }
 
-/* 
+/*
  * Force LLVM not to optimise this to a call to __pthread_set_self, if it does
  * then _pthread_set_self won't be bound when secondary threads try and start up.
  */
@@ -1578,7 +1642,8 @@ _pthread_set_self(pthread_t p)
        return _pthread_set_self_internal(p, true);
 }
 
-void
+PTHREAD_ALWAYS_INLINE
+static inline void
 _pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
 {
        if (p == NULL) {
@@ -1599,6 +1664,18 @@ _pthread_set_self_internal(pthread_t p, bool needs_tsd_base_set)
        }
 }
 
+
+// <rdar://problem/28984807> pthread_once should have an acquire barrier
+PTHREAD_ALWAYS_INLINE
+static inline void
+_os_once_acquire(os_once_t *predicate, void *context, os_function_t function)
+{
+       if (OS_EXPECT(os_atomic_load(predicate, acquire), ~0l) != ~0l) {
+               _os_once(predicate, context, function);
+               OS_COMPILER_CAN_ASSUME(*predicate == ~0l);
+       }
+}
+
 struct _pthread_once_context {
        pthread_once_t *pthread_once;
        void (*routine)(void);
@@ -1614,41 +1691,17 @@ __pthread_once_handler(void *context)
        ctx->pthread_once->sig = _PTHREAD_ONCE_SIG;
 }
 
-int       
+PTHREAD_NOEXPORT_VARIANT
+int
 pthread_once(pthread_once_t *once_control, void (*init_routine)(void))
 {
        struct _pthread_once_context ctx = { once_control, init_routine };
        do {
-               os_once(&once_control->once, &ctx, __pthread_once_handler);
+               _os_once_acquire(&once_control->once, &ctx, __pthread_once_handler);
        } while (once_control->sig == _PTHREAD_ONCE_SIG_init);
        return 0;
 }
 
-void
-_pthread_testcancel(pthread_t thread, int isconforming)
-{
-       const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
-
-       _PTHREAD_LOCK(thread->lock);
-       bool canceled = ((thread->cancel_state & flags) == flags);
-       _PTHREAD_UNLOCK(thread->lock);
-       
-       if (canceled) {
-               pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
-       }
-}
-
-void
-_pthread_exit_if_canceled(int error)
-{
-       if (__unix_conforming && ((error & 0xff) == EINTR) && (__pthread_canceled(0) == 0)) {
-               pthread_t self = pthread_self();
-               if (self != NULL) {
-                       self->cancel_error = error;
-               }
-               pthread_exit(PTHREAD_CANCELED);
-       }
-}
 
 int
 pthread_getconcurrency(void)
@@ -1670,7 +1723,7 @@ static unsigned long
 _pthread_strtoul(const char *p, const char **endptr, int base)
 {
        uintptr_t val = 0;
-       
+
        // Expect hex string starting with "0x"
        if ((base == 16 || base == 0) && p && p[0] == '0' && p[1] == 'x') {
                p += 2;
@@ -1823,26 +1876,28 @@ __pthread_init(const struct _libpthread_functions *pthread_funcs,
        // Finishes initialization of main thread attributes.
        // Initializes the thread list and add the main thread.
        // Calls _pthread_set_self() to prepare the main thread for execution.
-       __pthread_fork_child_internal(thread);
-       
+       _pthread_main_thread_init(thread);
+
        // Set up kernel entry points with __bsdthread_register.
-       pthread_workqueue_atfork_child();
+       _pthread_bsdthread_init();
 
        // Have pthread_key do its init envvar checks.
        _pthread_key_global_init(envp);
 
-       return 0;
-}
+#if PTHREAD_DEBUG_LOG
+       _SIMPLE_STRING path = _simple_salloc();
+       _simple_sprintf(path, "/var/tmp/libpthread.%d.log", getpid());
+       _pthread_debuglog = open(_simple_string(path),
+                       O_WRONLY | O_APPEND | O_CREAT | O_NOFOLLOW | O_CLOEXEC, 0666);
+       _simple_sfree(path);
+       _pthread_debugstart = mach_absolute_time();
+#endif
 
-int
-sched_yield(void)
-{
-    swtch_pri(0);
-    return 0;
+       return 0;
 }
 
 PTHREAD_NOEXPORT void
-__pthread_fork_child_internal(pthread_t p)
+_pthread_main_thread_init(pthread_t p)
 {
        TAILQ_INIT(&__pthread_head);
        _PTHREAD_LOCK_INIT(_pthread_list_lock);
@@ -1862,7 +1917,8 @@ __pthread_fork_child_internal(pthread_t p)
        p->joiner_notify = SEMAPHORE_NULL;
        p->joiner = MACH_PORT_NULL;
        p->detached |= _PTHREAD_CREATE_PARENT;
-       p->tsd[__TSD_SEMAPHORE_CACHE] = SEMAPHORE_NULL;
+       p->tsd[__TSD_SEMAPHORE_CACHE] = (void*)SEMAPHORE_NULL;
+       p->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
 
        // Initialize the list of threads with the new main thread.
        TAILQ_INSERT_HEAD(&__pthread_head, p, plist);
@@ -1872,129 +1928,51 @@ __pthread_fork_child_internal(pthread_t p)
        _pthread_introspection_thread_start(p);
 }
 
-/*
- * Query/update the cancelability 'state' of a thread
- */
-PTHREAD_NOEXPORT int
-_pthread_setcancelstate_internal(int state, int *oldstate, int conforming)
-{
-       pthread_t self;
-
-       switch (state) {
-               case PTHREAD_CANCEL_ENABLE:
-                       if (conforming) {
-                               __pthread_canceled(1);
-                       }
-                       break;
-               case PTHREAD_CANCEL_DISABLE:
-                       if (conforming) {
-                               __pthread_canceled(2);
-                       }
-                       break;
-               default:
-                       return EINVAL;
-       }
-
-       self = pthread_self();
-       _PTHREAD_LOCK(self->lock);
-       if (oldstate) {
-               *oldstate = self->cancel_state & _PTHREAD_CANCEL_STATE_MASK;
-       }
-       self->cancel_state &= ~_PTHREAD_CANCEL_STATE_MASK;
-       self->cancel_state |= state;
-       _PTHREAD_UNLOCK(self->lock);
-       if (!conforming) {
-               _pthread_testcancel(self, 0);  /* See if we need to 'die' now... */
-       }
-       return 0;
-}
-
-/* When a thread exits set the cancellation state to DISABLE and DEFERRED */
-static void
-_pthread_setcancelstate_exit(pthread_t self, void * value_ptr, int conforming)
-{
-       _PTHREAD_LOCK(self->lock);
-       self->cancel_state &= ~(_PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK);
-       self->cancel_state |= (PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
-       if (value_ptr == PTHREAD_CANCELED) {
-// 4597450: begin
-               self->detached |= _PTHREAD_WASCANCEL;
-// 4597450: end
-       }
-       _PTHREAD_UNLOCK(self->lock);
-}
-
 int
 _pthread_join_cleanup(pthread_t thread, void ** value_ptr, int conforming)
 {
-       // Returns ESRCH if the thread was not created joinable.
        int ret = __pthread_remove_thread(thread, false, NULL);
-       if (ret != 0) {
+       if (ret != 0 && ret != EBUSY) {
+               // Returns ESRCH if the thread was not created joinable.
                return ret;
        }
-       
+
        if (value_ptr) {
-               *value_ptr = __pthread_get_exit_value(thread, conforming);
+               *value_ptr = _pthread_get_exit_value(thread, conforming);
        }
        _pthread_introspection_thread_destroy(thread);
-       _pthread_deallocate(thread);
+       if (ret != EBUSY) {
+               // __pthread_remove_thread returns EBUSY if the parent has not
+               // finished creating the thread (and is still expecting the pthread_t
+               // to be alive).
+               _pthread_deallocate(thread);
+       }
        return 0;
 }
 
-/* ALWAYS called with list lock and return with list lock */
 int
-_pthread_find_thread(pthread_t thread)
+sched_yield(void)
 {
-       if (thread != NULL) {
-               pthread_t p;
-loop:
-               TAILQ_FOREACH(p, &__pthread_head, plist) {
-                       if (p == thread) {
-                               if (_pthread_kernel_thread(thread) == MACH_PORT_NULL) {
-                                       _PTHREAD_UNLOCK(_pthread_list_lock);
-                                       sched_yield();
-                                       _PTHREAD_LOCK(_pthread_list_lock);
-                                       goto loop;
-                               } 
-                               return 0;
-                       }
-               }
-       }
-       return ESRCH;
+    swtch_pri(0);
+    return 0;
 }
 
-int
-_pthread_lookup_thread(pthread_t thread, mach_port_t *portp, int only_joinable)
+// XXX remove
+void
+cthread_yield(void)
 {
-       mach_port_t kport = MACH_PORT_NULL;
-       int ret;
-
-       if (thread == NULL) {
-               return ESRCH;
-       }
-       
-       _PTHREAD_LOCK(_pthread_list_lock);
-       
-       ret = _pthread_find_thread(thread);
-       if (ret == 0) {
-               // Fail if we only want joinable threads and the thread found is
-               // not in the detached state.
-               if (only_joinable != 0 && (thread->detached & PTHREAD_CREATE_DETACHED) != 0) {
-                       ret = EINVAL;
-               } else {
-                       kport = _pthread_kernel_thread(thread);
-               }
-       }
-       
-       _PTHREAD_UNLOCK(_pthread_list_lock);
-       
-       if (portp != NULL) {
-               *portp = kport;
-       }
+       sched_yield();
+}
 
-       return ret;
+void
+pthread_yield_np(void)
+{
+       sched_yield();
 }
 
+
+
+PTHREAD_NOEXPORT_VARIANT
 void
 _pthread_clear_qos_tsd(mach_port_t thread_port)
 {
@@ -2018,15 +1996,18 @@ _pthread_clear_qos_tsd(mach_port_t thread_port)
        }
 }
 
+
 /***** pthread workqueue support routines *****/
 
 PTHREAD_NOEXPORT void
-pthread_workqueue_atfork_child(void)
+_pthread_bsdthread_init(void)
 {
        struct _pthread_registration_data data = {};
        data.version = sizeof(struct _pthread_registration_data);
        data.dispatch_queue_offset = __PTK_LIBDISPATCH_KEY0 * sizeof(void *);
+       data.return_to_kernel_offset = __TSD_RETURN_TO_KERNEL * sizeof(void *);
        data.tsd_offset = offsetof(struct _pthread, tsd);
+       data.mach_thread_self_offset = __TSD_MACH_THREAD_SELF * sizeof(void *);
 
        int rv = __bsdthread_register(thread_start,
                        start_wqthread, (int)PTHREAD_SIZE,
@@ -2034,6 +2015,14 @@ pthread_workqueue_atfork_child(void)
                        data.dispatch_queue_offset);
 
        if (rv > 0) {
+               if ((rv & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
+                       PTHREAD_INTERNAL_CRASH(rv,
+                                       "Missing required support for QOS_CLASS_DEFAULT");
+               }
+               if ((rv & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
+                       PTHREAD_INTERNAL_CRASH(rv,
+                                       "Missing required support for QOS_CLASS_MAINTENANCE");
+               }
                __pthread_supported_features = rv;
        }
 
@@ -2051,16 +2040,19 @@ pthread_workqueue_atfork_child(void)
 }
 
 // workqueue entry point from kernel
+PTHREAD_NORETURN
 void
 _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *keventlist, int flags, int nkevents)
 {
        PTHREAD_ASSERT(flags & WQ_FLAG_THREAD_NEWSPI);
 
-       int thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
-       int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
-       int overcommit = (flags & WQ_FLAG_THREAD_OVERCOMMIT) != 0;
-       int kevent = flags & WQ_FLAG_THREAD_KEVENT;
+       bool thread_reuse = flags & WQ_FLAG_THREAD_REUSE;
+       bool overcommit = flags & WQ_FLAG_THREAD_OVERCOMMIT;
+       bool kevent = flags & WQ_FLAG_THREAD_KEVENT;
+       bool workloop = (flags & WQ_FLAG_THREAD_WORKLOOP) &&
+                       __libdispatch_workloopfunction != NULL;
        PTHREAD_ASSERT((!kevent) || (__libdispatch_keventfunction != NULL));
+       PTHREAD_ASSERT(!workloop || kevent);
 
        pthread_priority_t priority = 0;
        unsigned long priority_flags = 0;
@@ -2072,13 +2064,10 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *k
        if (kevent)
                priority_flags |= _PTHREAD_PRIORITY_NEEDS_UNBIND_FLAG;
 
-       if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
-               priority = _pthread_priority_make_version2(thread_class, 0, priority_flags);
-       } else {
-               priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
-       }
+       int thread_class = flags & WQ_FLAG_THREAD_PRIOMASK;
+       priority = _pthread_priority_make_newest(thread_class, 0, priority_flags);
 
-       if (thread_reuse == 0) {
+       if (!thread_reuse) {
                // New thread created by kernel, needs initialization.
                void *stackaddr = self;
                size_t stacksize = (uintptr_t)self - (uintptr_t)stacklowaddr;
@@ -2090,6 +2079,7 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *k
                _pthread_set_kernel_thread(self, kport);
                self->wqthread = 1;
                self->wqkillset = 0;
+               self->cancel_state |= _PTHREAD_CANCEL_INITIALIZED;
 
                // Not a joinable thread.
                self->detached &= ~PTHREAD_CREATE_JOINABLE;
@@ -2099,7 +2089,7 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *k
                bool thread_tsd_base_set = (bool)(flags & WQ_FLAG_THREAD_TSD_BASE_SET);
                _pthread_set_self_internal(self, !thread_tsd_base_set);
                _pthread_introspection_thread_create(self, false);
-               __pthread_add_thread(self, false, false);
+               __pthread_add_thread(self, NULL, false, false);
        }
 
        // If we're running with fine-grained priority, we also need to
@@ -2113,18 +2103,30 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *k
        PTHREAD_ASSERT(self == pthread_self());
 #endif // WQ_DEBUG
 
-       if (kevent){
+       if (workloop) {
+               self->fun = (void *(*)(void*))__libdispatch_workloopfunction;
+       } else if (kevent){
                self->fun = (void *(*)(void*))__libdispatch_keventfunction;
        } else {
-               self->fun = (void *(*)(void *))__libdispatch_workerfunction;
+               self->fun = (void *(*)(void*))__libdispatch_workerfunction;
        }
        self->arg = (void *)(uintptr_t)thread_class;
 
        if (kevent && keventlist && nkevents > 0){
+               int errors_out;
        kevent_errors_retry:
-               (*__libdispatch_keventfunction)(&keventlist, &nkevents);
 
-               int errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+               if (workloop) {
+                       kqueue_id_t kevent_id = *(kqueue_id_t*)((char*)keventlist - sizeof(kqueue_id_t));
+                       kqueue_id_t kevent_id_in = kevent_id;
+                       (__libdispatch_workloopfunction)(&kevent_id, &keventlist, &nkevents);
+                       PTHREAD_ASSERT(kevent_id == kevent_id_in || nkevents == 0);
+                       errors_out = __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, keventlist, nkevents, 0);
+               } else {
+                       (__libdispatch_keventfunction)(&keventlist, &nkevents);
+                       errors_out = __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, keventlist, nkevents, 0);
+               }
+
                if (errors_out > 0){
                        nkevents = errors_out;
                        goto kevent_errors_retry;
@@ -2133,9 +2135,14 @@ _pthread_wqthread(pthread_t self, mach_port_t kport, void *stacklowaddr, void *k
                }
                goto thexit;
     } else if (kevent){
-               (*__libdispatch_keventfunction)(NULL, NULL);
+               if (workloop) {
+                       (__libdispatch_workloopfunction)(0, NULL, NULL);
+                       __workq_kernreturn(WQOPS_THREAD_WORKLOOP_RETURN, NULL, 0, -1);
+               } else {
+                       (__libdispatch_keventfunction)(NULL, NULL);
+                       __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
+               }
 
-               __workq_kernreturn(WQOPS_THREAD_KEVENT_RETURN, NULL, 0, 0);
                goto thexit;
     }
 
@@ -2208,7 +2215,7 @@ thexit:
                if ((current_priority & _PTHREAD_PRIORITY_EVENT_MANAGER_FLAG) ||
                        (_pthread_priority_get_qos_newest(current_priority) > WQ_THREAD_CLEANUP_QOS)) {
                        // Reset QoS to something low for the cleanup process
-                       pthread_priority_t priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
+                       priority = _pthread_priority_make_newest(WQ_THREAD_CLEANUP_QOS, 0, 0);
                        _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
                }
        }
@@ -2218,14 +2225,19 @@ thexit:
 
 /***** pthread workqueue API for libdispatch *****/
 
+_Static_assert(WORKQ_KEVENT_EVENT_BUFFER_LEN == WQ_KEVENT_LIST_LEN,
+               "Kernel and userland should agree on the event list size");
+
 void
 pthread_workqueue_setdispatchoffset_np(int offset)
 {
        __libdispatch_offset = offset;
 }
 
-int
-pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func)
+static int
+pthread_workqueue_setdispatch_with_workloop_np(pthread_workqueue_function2_t queue_func,
+               pthread_workqueue_function_kevent_t kevent_func,
+               pthread_workqueue_function_workloop_t workloop_func)
 {
        int res = EBUSY;
        if (__libdispatch_workerfunction == NULL) {
@@ -2236,6 +2248,7 @@ pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue
                } else {
                        __libdispatch_workerfunction = queue_func;
                        __libdispatch_keventfunction = kevent_func;
+                       __libdispatch_workloopfunction = workloop_func;
 
                        // Prepare the kernel for workq action
                        (void)__workq_open();
@@ -2248,19 +2261,30 @@ pthread_workqueue_setdispatch_with_kevent_np(pthread_workqueue_function2_t queue
 }
 
 int
-_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func, pthread_workqueue_function_kevent_t kevent_func, int offset, int flags)
+_pthread_workqueue_init_with_workloop(pthread_workqueue_function2_t queue_func,
+               pthread_workqueue_function_kevent_t kevent_func,
+               pthread_workqueue_function_workloop_t workloop_func,
+               int offset, int flags)
 {
        if (flags != 0) {
                return ENOTSUP;
        }
-       
+
        __workq_newapi = true;
        __libdispatch_offset = offset;
-       
-       int rv = pthread_workqueue_setdispatch_with_kevent_np(queue_func, kevent_func);
+
+       int rv = pthread_workqueue_setdispatch_with_workloop_np(queue_func, kevent_func, workloop_func);
        return rv;
 }
 
+int
+_pthread_workqueue_init_with_kevent(pthread_workqueue_function2_t queue_func,
+               pthread_workqueue_function_kevent_t kevent_func,
+               int offset, int flags)
+{
+       return _pthread_workqueue_init_with_workloop(queue_func, kevent_func, NULL, offset, flags);
+}
+
 int
 _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flags)
 {
@@ -2270,12 +2294,16 @@ _pthread_workqueue_init(pthread_workqueue_function2_t func, int offset, int flag
 int
 pthread_workqueue_setdispatch_np(pthread_workqueue_function_t worker_func)
 {
-       return pthread_workqueue_setdispatch_with_kevent_np((pthread_workqueue_function2_t)worker_func, NULL);
+       return pthread_workqueue_setdispatch_with_workloop_np((pthread_workqueue_function2_t)worker_func, NULL, NULL);
 }
 
 int
 _pthread_workqueue_supported(void)
 {
+       if (os_unlikely(!__pthread_supported_features)) {
+               PTHREAD_INTERNAL_CRASH(0, "libpthread has not been initialized");
+       }
+
        return __pthread_supported_features;
 }
 
@@ -2310,7 +2338,10 @@ pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
                        flags = _PTHREAD_PRIORITY_OVERCOMMIT_FLAG;
                }
 
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
                kp = _pthread_qos_class_encode_workqueue(compat_priority, flags);
+#pragma clang diagnostic pop
 
        } else {
                /* Running on the old kernel, queue_priority is what we pass directly to
@@ -2330,6 +2361,16 @@ pthread_workqueue_addthreads_np(int queue_priority, int options, int numthreads)
        return res;
 }
 
+bool
+_pthread_workqueue_should_narrow(pthread_priority_t pri)
+{
+       int res = __workq_kernreturn(WQOPS_SHOULD_NARROW, NULL, (int)pri, 0);
+       if (res == -1) {
+               return false;
+       }
+       return res;
+}
+
 int
 _pthread_workqueue_addthreads(int numthreads, pthread_priority_t priority)
 {
@@ -2369,11 +2410,8 @@ static pthread_introspection_hook_t _pthread_introspection_hook;
 pthread_introspection_hook_t
 pthread_introspection_hook_install(pthread_introspection_hook_t hook)
 {
-       if (os_slowpath(!hook)) {
-               PTHREAD_ABORT("pthread_introspection_hook_install was passed NULL");
-       }
        pthread_introspection_hook_t prev;
-       prev = __sync_swap(&_pthread_introspection_hook, hook);
+       prev = _pthread_atomic_xchg_ptr((void**)&_pthread_introspection_hook, hook);
        return prev;
 }
 
index 8c04ef6f192b5997cfff1cdffb7f4e41b8724b94..add7e48c8152d2b0637296fca91a11556913ccc3 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 1999, 2012 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 
 #include "internal.h"
 
-#include <libkern/OSAtomic.h>
 #include <mach/mach_init.h>
 #include <mach/mach_vm.h>
 #include <platform/compat.h>
 
-PTHREAD_NOEXPORT void pthread_workqueue_atfork_child(void);
-PTHREAD_NOEXPORT void __pthread_fork_child_internal(pthread_t);
-
 int
 pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
 {
        int res = 0;
        size_t idx;
        pthread_globals_t globals = _pthread_globals();
-       
+
        _PTHREAD_LOCK(globals->pthread_atfork_lock);
        idx = globals->atfork_count++;
 
@@ -79,7 +75,7 @@ pthread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void))
        } else if (idx >= PTHREAD_ATFORK_MAX) {
                res = ENOMEM;
        }
+
        if (res == 0) {
                struct pthread_atfork_entry *e = &globals->atfork[idx];
                e->prepare = prepare;
@@ -159,9 +155,9 @@ _pthread_atfork_child(void)
 {
        pthread_globals_t globals = _pthread_globals();
        _PTHREAD_LOCK_INIT(globals->psaved_self_global_lock);
-       __pthread_fork_child_internal(globals->psaved_self);
        __is_threaded = 0;
-       pthread_workqueue_atfork_child();
+       _pthread_main_thread_init(globals->psaved_self);
+       _pthread_bsdthread_init();
 }
 
 // Iterate pthread_atfork child handlers.
index bfe8eed5b851b96ff93fc9d11a329b00ff55b168..894178c810e0ef8ce6336efc16165fe3d94cf5bd 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2013 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991  
- *              All Rights Reserved 
- *  
- * Permission to use, copy, modify, and distribute this software and 
- * its documentation for any purpose and without fee is hereby granted, 
- * provided that the above copyright notice appears in all copies and 
- * that both the copyright notice and this permission notice appear in 
- * supporting documentation. 
- *  
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE. 
- *  
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ *              All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
  */
 /*
  * MkLinux
@@ -49,6 +49,7 @@
  * POSIX Pthread Library
  */
 
+#include "resolver.h"
 #include "internal.h"
 
 #include <stdio.h>     /* For printf(). */
@@ -63,8 +64,6 @@
 #include <mach/vm_statistics.h>
 
 extern int __unix_conforming;
-extern int _pthread_setcancelstate_internal(int state, int *oldstate, int conforming);
-extern void _pthread_testcancel(pthread_t thread, int isconforming);
 extern int _pthread_cond_wait(pthread_cond_t *cond,
                        pthread_mutex_t *mutex,
                        const struct timespec *abstime,
@@ -72,7 +71,8 @@ extern int _pthread_cond_wait(pthread_cond_t *cond,
                        int isconforming);
 extern int __sigwait(const sigset_t *set, int *sig);
 extern int __pthread_sigmask(int, const sigset_t *, sigset_t *);
-extern int __pthread_markcancel(int);
+extern int __pthread_markcancel(mach_port_t);
+extern int __pthread_canceled(int);
 
 #ifdef VARIANT_CANCELABLE
 extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, __int64_t tv_sec, __int32_t tv_nsec);
@@ -80,11 +80,29 @@ extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relati
 extern int __semwait_signal(int cond_sem, int mutex_sem, int timeout, int relative, __int64_t tv_sec, __int32_t tv_nsec)  __asm__("___semwait_signal_nocancel");
 #endif
 
+PTHREAD_NOEXPORT
+int _pthread_join(pthread_t thread, void **value_ptr, int conforming,
+               int (*_semwait_signal)(int, int, int, int, __int64_t, __int32_t));
+
 #ifndef VARIANT_CANCELABLE
 
+PTHREAD_ALWAYS_INLINE
+static inline int
+_pthread_update_cancel_state(pthread_t thread, int mask, int state)
+{
+       int oldstate, newstate;
+       os_atomic_rmw_loop2o(thread, cancel_state, oldstate, newstate, seq_cst, {
+               newstate = oldstate;
+               newstate &= ~mask;
+               newstate |= state;
+       });
+       return oldstate;
+}
+
 /*
  * Cancel a thread
  */
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_cancel(pthread_t thread)
 {
@@ -93,27 +111,27 @@ pthread_cancel(pthread_t thread)
                __unix_conforming = 1;
 #endif /* __DARWIN_UNIX03 */
 
-       if (_pthread_lookup_thread(thread, NULL, 0) != 0)
+       if (!_pthread_is_valid(thread, 0, NULL)) {
                return(ESRCH);
+       }
 
        /* if the thread is a workqueue thread, then return error */
        if (thread->wqthread != 0) {
                return(ENOTSUP);
        }
 #if __DARWIN_UNIX03
-       int state;
-
-       _PTHREAD_LOCK(thread->lock);
-       state = thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
-       _PTHREAD_UNLOCK(thread->lock);
-       if (state & PTHREAD_CANCEL_ENABLE)
-               __pthread_markcancel(_pthread_kernel_thread(thread));
+       int state = os_atomic_or2o(thread, cancel_state, _PTHREAD_CANCEL_PENDING, relaxed);
+       if (state & PTHREAD_CANCEL_ENABLE) {
+               mach_port_t kport = _pthread_kernel_thread(thread);
+               if (kport) __pthread_markcancel(kport);
+       }
 #else /* __DARWIN_UNIX03 */
        thread->cancel_state |= _PTHREAD_CANCEL_PENDING;
 #endif /* __DARWIN_UNIX03 */
        return (0);
 }
 
+
 void
 pthread_testcancel(void)
 {
@@ -128,9 +146,116 @@ pthread_testcancel(void)
 #endif /* __DARWIN_UNIX03 */
 }
 
+#ifndef BUILDING_VARIANT /* [ */
+
+PTHREAD_NOEXPORT_VARIANT
+void
+_pthread_exit_if_canceled(int error)
+{
+       if (((error & 0xff) == EINTR) && __unix_conforming && (__pthread_canceled(0) == 0)) {
+               pthread_t self = pthread_self();
+               if (self != NULL) {
+                       self->cancel_error = error;
+               }
+               pthread_exit(PTHREAD_CANCELED);
+       }
+}
+
+
+PTHREAD_NOEXPORT_VARIANT
+void
+_pthread_testcancel(pthread_t thread, int isconforming)
+{
+       const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
+
+       int state = os_atomic_load2o(thread, cancel_state, seq_cst);
+       if ((state & flags) == flags) {
+               pthread_exit(isconforming ? PTHREAD_CANCELED : 0);
+       }
+}
+
+PTHREAD_NOEXPORT
+void
+_pthread_markcancel_if_canceled(pthread_t thread, mach_port_t kport)
+{
+       const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
+
+       int state = os_atomic_or2o(thread, cancel_state,
+                       _PTHREAD_CANCEL_INITIALIZED, relaxed);
+       if ((state & flags) == flags && __unix_conforming) {
+               __pthread_markcancel(kport);
+       }
+}
+
+PTHREAD_NOEXPORT
+void *
+_pthread_get_exit_value(pthread_t thread, int conforming)
+{
+       const int flags = (PTHREAD_CANCEL_ENABLE|_PTHREAD_CANCEL_PENDING);
+       void *value = thread->exit_value;
+
+       if (conforming) {
+               int state = os_atomic_load2o(thread, cancel_state, seq_cst);
+               if ((state & flags) == flags) {
+                       value = PTHREAD_CANCELED;
+               }
+       }
+       return value;
+}
+
+/* When a thread exits set the cancellation state to DISABLE and DEFERRED */
+PTHREAD_NOEXPORT
+void
+_pthread_setcancelstate_exit(pthread_t thread, void *value_ptr, int conforming)
+{
+       _pthread_update_cancel_state(thread,
+                       _PTHREAD_CANCEL_STATE_MASK | _PTHREAD_CANCEL_TYPE_MASK,
+                       PTHREAD_CANCEL_DISABLE | PTHREAD_CANCEL_DEFERRED);
+       if (value_ptr == PTHREAD_CANCELED) {
+               _PTHREAD_LOCK(thread->lock);
+               thread->detached |= _PTHREAD_WASCANCEL; // 4597450
+               _PTHREAD_UNLOCK(thread->lock);
+       }
+}
+
+#endif /* !BUILDING_VARIANT ] */
+
 /*
  * Query/update the cancelability 'state' of a thread
  */
+PTHREAD_ALWAYS_INLINE
+static inline int
+_pthread_setcancelstate_internal(int state, int *oldstateptr, int conforming)
+{
+       pthread_t self;
+
+       switch (state) {
+               case PTHREAD_CANCEL_ENABLE:
+                       if (conforming) {
+                               __pthread_canceled(1);
+                       }
+                       break;
+               case PTHREAD_CANCEL_DISABLE:
+                       if (conforming) {
+                               __pthread_canceled(2);
+                       }
+                       break;
+               default:
+                       return EINVAL;
+       }
+
+       self = pthread_self();
+       int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_STATE_MASK, state);
+       if (oldstateptr) {
+               *oldstateptr = oldstate & _PTHREAD_CANCEL_STATE_MASK;
+       }
+       if (!conforming) {
+               _pthread_testcancel(self, 0);  /* See if we need to 'die' now... */
+       }
+       return 0;
+}
+
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_setcancelstate(int state, int *oldstate)
 {
@@ -147,6 +272,7 @@ pthread_setcancelstate(int state, int *oldstate)
 /*
  * Query/update the cancelability 'type' of a thread
  */
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_setcanceltype(int type, int *oldtype)
 {
@@ -161,18 +287,17 @@ pthread_setcanceltype(int type, int *oldtype)
            (type != PTHREAD_CANCEL_ASYNCHRONOUS))
                return EINVAL;
        self = pthread_self();
-       _PTHREAD_LOCK(self->lock);
-       if (oldtype)
-               *oldtype = self->cancel_state & _PTHREAD_CANCEL_TYPE_MASK;
-       self->cancel_state &= ~_PTHREAD_CANCEL_TYPE_MASK;
-       self->cancel_state |= type;
-       _PTHREAD_UNLOCK(self->lock);
+       int oldstate = _pthread_update_cancel_state(self, _PTHREAD_CANCEL_TYPE_MASK, type);
+       if (oldtype) {
+               *oldtype = oldstate & _PTHREAD_CANCEL_TYPE_MASK;
+       }
 #if !__DARWIN_UNIX03
        _pthread_testcancel(self, 0);  /* See if we need to 'die' now... */
 #endif /* __DARWIN_UNIX03 */
        return (0);
 }
 
+
 int
 pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
 {
@@ -188,9 +313,7 @@ pthread_sigmask(int how, const sigset_t * set, sigset_t * oset)
 #endif /* __DARWIN_UNIX03 */
 }
 
-#endif /* VARIANT_CANCELABLE */
-
-#if __DARWIN_UNIX03
+#ifndef BUILDING_VARIANT /* [ */
 
 static void
 __posix_join_cleanup(void *arg)
@@ -203,94 +326,94 @@ __posix_join_cleanup(void *arg)
        _PTHREAD_UNLOCK(thread->lock);
 }
 
-#endif /* __DARWIN_UNIX03 */
-
-/*
- * Wait for a thread to terminate and obtain its exit value.
- */
-int       
-pthread_join(pthread_t thread, 
-            void **value_ptr)
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
+int
+_pthread_join(pthread_t thread, void **value_ptr, int conforming,
+               int (*_semwait_signal)(int, int, int, int, __int64_t, __int32_t))
 {
        int res = 0;
        pthread_t self = pthread_self();
-       mach_port_t kthport;
-       int conforming = 0;
-#if !__DARWIN_UNIX03
        kern_return_t kern_res;
-#endif
+       semaphore_t joinsem, death = (semaphore_t)os_get_cached_semaphore();
 
-#if __DARWIN_UNIX03
-       if (__unix_conforming == 0)
-               __unix_conforming = 1;
+       if (!_pthread_is_valid(thread, PTHREAD_IS_VALID_LOCK_THREAD, NULL)) {
+               res = ESRCH;
+               goto out;
+       }
 
-#ifdef VARIANT_CANCELABLE
-       _pthread_testcancel(self, 1);
-#endif /* VARIANT_CANCELABLE */
-#endif /* __DARWIN_UNIX03 */
+       if (thread->sig != _PTHREAD_SIG) {
+               res = ESRCH;
+       } else if ((thread->detached & PTHREAD_CREATE_DETACHED) ||
+                       !(thread->detached & PTHREAD_CREATE_JOINABLE) ||
+                       (thread->joiner != NULL)) {
+               res = EINVAL;
+       } else if (thread == self || (self != NULL && self->joiner == thread)) {
+               res = EDEADLK;
+       }
+       if (res != 0) {
+               _PTHREAD_UNLOCK(thread->lock);
+               goto out;
+       }
+
+       joinsem = thread->joiner_notify;
+       if (joinsem == SEMAPHORE_NULL) {
+               thread->joiner_notify = joinsem = death;
+               death = MACH_PORT_NULL;
+       }
+       thread->joiner = self;
+       _PTHREAD_UNLOCK(thread->lock);
 
-       if ((res = _pthread_lookup_thread(thread, &kthport, 1)) != 0)
-               return(res);
+       if (conforming) {
+               /* Wait for it to signal... */
+               pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
+               do {
+                       res = _semwait_signal(joinsem, 0, 0, 0, 0, 0);
+               } while ((res < 0) && (errno == EINTR));
+               pthread_cleanup_pop(0);
+       } else {
+               /* Wait for it to signal... */
+               kern_return_t (*_semaphore_wait)(semaphore_t) =
+                               (void*)_semwait_signal;
+               do {
+                       kern_res = _semaphore_wait(joinsem);
+               } while (kern_res != KERN_SUCCESS);
+       }
 
-       if (thread->sig == _PTHREAD_SIG) {
-               semaphore_t death = SEMAPHORE_NULL; /* in case we need it */
-               semaphore_t joinsem = SEMAPHORE_NULL;
+       os_put_cached_semaphore((os_semaphore_t)joinsem);
+       res = _pthread_join_cleanup(thread, value_ptr, conforming);
 
-               if (thread->joiner_notify == SEMAPHORE_NULL) {
-                       death = (semaphore_t)os_get_cached_semaphore();
-               }
+out:
+       if (death) {
+               os_put_cached_semaphore(death);
+       }
+       return res;
+}
 
-               _PTHREAD_LOCK(thread->lock);
-               if ((thread->detached & PTHREAD_CREATE_JOINABLE) &&
-                               (thread->joiner == NULL)) {
-                       PTHREAD_ASSERT(_pthread_kernel_thread(thread) == kthport);
-                       if (thread != self && (self == NULL || self->joiner != thread)) {
-                               if (thread->joiner_notify == SEMAPHORE_NULL) {
-                                       thread->joiner_notify = death;
-                                       death = SEMAPHORE_NULL;
-                               } 
-                               joinsem = thread->joiner_notify;
-                               thread->joiner = self;
-                               _PTHREAD_UNLOCK(thread->lock);
-
-                               if (death != SEMAPHORE_NULL) {
-                                       os_put_cached_semaphore((os_semaphore_t)death);
-                                       death = SEMAPHORE_NULL;
-                               }
+#endif /* !BUILDING_VARIANT ] */
+#endif /* VARIANT_CANCELABLE */
+
+/*
+ * Wait for a thread to terminate and obtain its exit value.
+ */
+int
+pthread_join(pthread_t thread, void **value_ptr)
+{
 #if __DARWIN_UNIX03
-                               /* Wait for it to signal... */ 
-                               pthread_cleanup_push(__posix_join_cleanup, (void *)thread);
-                               do {
-                                       res = __semwait_signal(joinsem, 0, 0, 0, (int64_t)0, (int32_t)0);
-                               } while ((res < 0) && (errno == EINTR));
-                               pthread_cleanup_pop(0);
-#else /* __DARWIN_UNIX03 */
-                               /* Wait for it to signal... */ 
-                               do {
-                                       kern_res = semaphore_wait(joinsem);
-                               } while (kern_res != KERN_SUCCESS);
+       if (__unix_conforming == 0)
+               __unix_conforming = 1;
+
+#ifdef VARIANT_CANCELABLE
+       _pthread_testcancel(pthread_self(), 1);
+#endif /* VARIANT_CANCELABLE */
+       return _pthread_join(thread, value_ptr, 1, __semwait_signal);
+#else
+       return _pthread_join(thread, value_ptr, 0, (void*)semaphore_wait);
 #endif /* __DARWIN_UNIX03 */
 
-                               os_put_cached_semaphore((os_semaphore_t)joinsem);
-                               res = _pthread_join_cleanup(thread, value_ptr, conforming);
-                       } else {
-                               _PTHREAD_UNLOCK(thread->lock);
-                               res = EDEADLK;
-                       }
-               } else {
-                       _PTHREAD_UNLOCK(thread->lock);
-                       res = EINVAL;
-               }
-               if (death != SEMAPHORE_NULL) {
-                       os_put_cached_semaphore((os_semaphore_t)death);
-               }
-               return res;
-       }
-       return ESRCH;
 }
 
-int       
-pthread_cond_wait(pthread_cond_t *cond, 
+int
+pthread_cond_wait(pthread_cond_t *cond,
                  pthread_mutex_t *mutex)
 {
        int conforming;
@@ -310,8 +433,8 @@ pthread_cond_wait(pthread_cond_t *cond,
        return (_pthread_cond_wait(cond, mutex, (struct timespec *)NULL, 0, conforming));
 }
 
-int       
-pthread_cond_timedwait(pthread_cond_t *cond, 
+int
+pthread_cond_timedwait(pthread_cond_t *cond,
                       pthread_mutex_t *mutex,
                       const struct timespec *abstime)
 {
@@ -351,8 +474,8 @@ sigwait(const sigset_t * set, int * sig)
 #ifdef VARIANT_CANCELABLE
                _pthread_testcancel(pthread_self(), 1);
 #endif /* VARIANT_CANCELABLE */
-               
-               /* 
+
+               /*
                 * EINTR that isn't a result of pthread_cancel()
                 * is translated to 0.
                 */
@@ -363,7 +486,7 @@ sigwait(const sigset_t * set, int * sig)
        return(err);
 #else /* __DARWIN_UNIX03 */
        if (__sigwait(set, sig) == -1) {
-               /* 
+               /*
                 * EINTR that isn't a result of pthread_cancel()
                 * is translated to 0.
                 */
@@ -375,3 +498,4 @@ sigwait(const sigset_t * set, int * sig)
        return 0;
 #endif /* __DARWIN_UNIX03 */
 }
+
index cc8e7d8b473bb962e02c353ddcfcc0553997c226..be55e1d164942c7ffad560259747fb93e206c999 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991  
- *              All Rights Reserved 
- *  
- * Permission to use, copy, modify, and distribute this software and 
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ *              All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
  * its documentation for any purpose and without fee is hereby granted,
- * provided that the above copyright notice appears in all copies and 
- * that both the copyright notice and this permission notice appear in 
- * supporting documentation. 
- *  
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE. 
- *  
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 /*
  * MkLinux
@@ -48,9 +48,9 @@
  * POSIX Pthread Library
  */
 
+#include "resolver.h"
 #include "internal.h"
 #include <sys/time.h>        /* For struct timespec and getclock(). */
-#include <stdio.h>
 
 #ifdef PLOCKSTAT
 #include "plockstat.h"
 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
 #endif /* PLOCKSTAT */
 
-__private_extern__ int _pthread_cond_init(_pthread_cond *, const pthread_condattr_t *, int);
-__private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
-
 extern int __gettimeofday(struct timeval *, struct timezone *);
+extern void _pthread_testcancel(pthread_t thread, int isconforming);
 
-#ifndef BUILDING_VARIANT
-static void _pthread_cond_cleanup(void *arg);
-static void _pthread_cond_updateval(_pthread_cond * cond, int error, uint32_t updateval);
-#endif
+PTHREAD_NOEXPORT
+int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+               const struct timespec *abstime, int isRelative, int isconforming);
 
-static void
+PTHREAD_ALWAYS_INLINE
+static inline void
 COND_GETSEQ_ADDR(_pthread_cond *cond,
-                volatile uint32_t **c_lseqcnt,
-                volatile uint32_t **c_useqcnt,
-                volatile uint32_t **c_sseqcnt)
+               volatile uint64_t **c_lsseqaddr,
+               volatile uint32_t **c_lseqcnt,
+               volatile uint32_t **c_useqcnt,
+               volatile uint32_t **c_sseqcnt)
 {
        if (cond->misalign) {
                *c_lseqcnt = &cond->c_seq[1];
@@ -83,10 +82,16 @@ COND_GETSEQ_ADDR(_pthread_cond *cond,
                *c_sseqcnt = &cond->c_seq[1];
                *c_useqcnt = &cond->c_seq[2];
        }
+       *c_lsseqaddr = (volatile uint64_t *)*c_lseqcnt;
 }
 
 #ifndef BUILDING_VARIANT /* [ */
 
+static void _pthread_cond_cleanup(void *arg);
+static void _pthread_cond_updateval(_pthread_cond * cond, int error,
+               uint32_t updateval);
+
+
 int
 pthread_condattr_init(pthread_condattr_t *attr)
 {
@@ -95,7 +100,7 @@ pthread_condattr_init(pthread_condattr_t *attr)
        return 0;
 }
 
-int       
+int
 pthread_condattr_destroy(pthread_condattr_t *attr)
 {
        attr->sig = _PTHREAD_NO_SIG;
@@ -131,9 +136,20 @@ pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
        return res;
 }
 
-__private_extern__ int       
+int
+pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex,
+               const struct timespec *abstime)
+{
+       return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
+}
+
+#endif /* !BUILDING_VARIANT ] */
+
+PTHREAD_ALWAYS_INLINE
+static inline int
 _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
 {
+       volatile uint64_t *c_lsseqaddr;
        volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
 
        cond->busy = NULL;
@@ -143,9 +159,9 @@ _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conf
        cond->unused = 0;
 
        cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
-       COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
+       COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
        *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
-       
+
        if (conforming) {
                if (attr) {
                        cond->pshared = attr->pshared;
@@ -155,14 +171,26 @@ _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conf
        } else {
                cond->pshared = _PTHREAD_DEFAULT_PSHARED;
        }
-       
+
+       long sig = _PTHREAD_COND_SIG;
+
        // Ensure all contents are properly set before setting signature.
-       OSMemoryBarrier();
-       cond->sig = _PTHREAD_COND_SIG;
-       
+#if defined(__LP64__)
+       // For binary compatibility reasons we cannot require natural alignment of
+       // the 64bit 'sig' long value in the struct. rdar://problem/21610439
+       uint32_t *sig32_ptr = (uint32_t*)&cond->sig;
+       uint32_t *sig32_val = (uint32_t*)&sig;
+       *(sig32_ptr + 1) = *(sig32_val + 1);
+       os_atomic_store(sig32_ptr, *sig32_val, release);
+#else
+       os_atomic_store2o(cond, sig, sig, release);
+#endif
+
        return 0;
 }
 
+#ifndef BUILDING_VARIANT /* [ */
+
 PTHREAD_NOINLINE
 static int
 _pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
@@ -185,6 +213,7 @@ _pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
        return res;
 }
 
+PTHREAD_ALWAYS_INLINE
 static inline int
 _pthread_cond_check_init(_pthread_cond *cond, bool *inited)
 {
@@ -195,6 +224,7 @@ _pthread_cond_check_init(_pthread_cond *cond, bool *inited)
        return res;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_cond_destroy(pthread_cond_t *ocond)
 {
@@ -205,15 +235,16 @@ pthread_cond_destroy(pthread_cond_t *ocond)
 
                uint64_t oldval64, newval64;
                uint32_t lcntval, ucntval, scntval;
+               volatile uint64_t *c_lsseqaddr;
                volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
 
-               COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
+               COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
 
                do {
                        lcntval = *c_lseqcnt;
                        ucntval = *c_useqcnt;
                        scntval = *c_sseqcnt;
-                       
+
                        // validate it is not busy
                        if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) {
                                //res = EBUSY;
@@ -222,7 +253,7 @@ pthread_cond_destroy(pthread_cond_t *ocond)
                        oldval64 = (((uint64_t)scntval) << 32);
                        oldval64 |= lcntval;
                        newval64 = oldval64;
-               } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
+               } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
 
                // <rdar://problem/13782056> Need to clear preposts.
                uint32_t flags = 0;
@@ -233,7 +264,7 @@ pthread_cond_destroy(pthread_cond_t *ocond)
 
                cond->sig = _PTHREAD_NO_SIG;
                res = 0;
-               
+
                _PTHREAD_UNLOCK(cond->lock);
 
                if (needclearpre) {
@@ -248,7 +279,8 @@ pthread_cond_destroy(pthread_cond_t *ocond)
        return res;
 }
 
-static int
+PTHREAD_ALWAYS_INLINE
+static inline int
 _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
 {
        int res;
@@ -260,6 +292,7 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
 
        uint64_t oldval64, newval64;
        uint32_t lcntval, ucntval, scntval;
+       volatile uint64_t *c_lsseqaddr;
        volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
 
        int retry_count = 0, uretry_count = 0;
@@ -271,7 +304,7 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
                return res;
        }
 
-       COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
+       COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
 
        bool retry;
        do {
@@ -280,6 +313,8 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
                lcntval = *c_lseqcnt;
                ucntval = *c_useqcnt;
                scntval = *c_sseqcnt;
+               diffgen = 0;
+               ulval = 0;
 
                if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
                    (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
@@ -287,8 +322,8 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
                        oldval64 = (((uint64_t)scntval) << 32);
                        oldval64 |= lcntval;
                        newval64 = oldval64;
-                       
-                       if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) {
+
+                       if (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst)) {
                                retry = true;
                                continue;
                        } else {
@@ -321,7 +356,7 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
                                 */
                                if (ucountreset != 0) {
                                        return EAGAIN;
-                               } else if (OSAtomicCompareAndSwap32Barrier(ucntval, (scntval & PTHRW_COUNT_MASK), (volatile int32_t *)c_useqcnt) == TRUE) {
+                               } else if (os_atomic_cmpxchg(c_useqcnt, ucntval, (scntval & PTHRW_COUNT_MASK), seq_cst)) {
                                        /* now the U is reset to S value */
                                        ucountreset = 1;
                                        uretry_count = 0;
@@ -348,8 +383,8 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
                        ulval += PTHRW_INC;
                }
 
-       } while (retry || OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE);
-       
+       } while (retry || !os_atomic_cmpxchg(c_useqcnt, ucntval, ulval, seq_cst));
+
        uint32_t flags = 0;
        if (cond->pshared == PTHREAD_PROCESS_SHARED) {
                flags |= _PTHREAD_MTX_OPT_PSHARED;
@@ -372,10 +407,10 @@ _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
        return 0;
 }
 
-
 /*
  * Signal a condition variable, waking up all threads waiting for it.
  */
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_cond_broadcast(pthread_cond_t *ocond)
 {
@@ -385,12 +420,13 @@ pthread_cond_broadcast(pthread_cond_t *ocond)
 /*
  * Signal a condition variable, waking a specified thread.
  */
-int       
+PTHREAD_NOEXPORT_VARIANT
+int
 pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
 {
        mach_port_t mp = MACH_PORT_NULL;
        if (thread) {
-               mp = pthread_mach_thread_np(thread);
+               mp = pthread_mach_thread_np((_Nonnull pthread_t)thread);
        }
        return _pthread_cond_signal(ocond, false, mp);
 }
@@ -398,17 +434,17 @@ pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
 /*
  * Signal a condition variable, waking only one thread.
  */
+PTHREAD_NOEXPORT_VARIANT
 int
-pthread_cond_signal(pthread_cond_t *cond)
+pthread_cond_signal(pthread_cond_t *ocond)
 {
-       return pthread_cond_signal_thread_np(cond, NULL);
+       return _pthread_cond_signal(ocond, false, MACH_PORT_NULL);
 }
 
 /*
  * Manage a list of condition variables associated with a mutex
  */
 
-
 /*
  * Suspend waiting for a condition variable.
  * Note: we have to keep a list of condition variables which are using
@@ -416,8 +452,9 @@ pthread_cond_signal(pthread_cond_t *cond)
  * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
  * remaining conforming behavior..
  */
-__private_extern__ int
-_pthread_cond_wait(pthread_cond_t *ocond, 
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
+int
+_pthread_cond_wait(pthread_cond_t *ocond,
                        pthread_mutex_t *omutex,
                        const struct timespec *abstime,
                        int isRelative,
@@ -430,12 +467,11 @@ _pthread_cond_wait(pthread_cond_t *ocond,
        uint32_t mtxgen, mtxugen, flags=0, updateval;
        uint32_t lcntval, ucntval, scntval;
        uint32_t nlval, ulval, savebits;
+       volatile uint64_t *c_lsseqaddr;
        volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
        uint64_t oldval64, newval64, mugen, cvlsgen;
        uint32_t *npmtx = NULL;
 
-extern void _pthread_testcancel(pthread_t thread, int isconforming);
-
        res = _pthread_cond_check_init(cond, NULL);
        if (res != 0) {
                return res;
@@ -494,7 +530,7 @@ extern void _pthread_testcancel(pthread_t thread, int isconforming);
                return EINVAL;
        }
 
-       COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
+       COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
 
        do {
                lcntval = *c_lseqcnt;
@@ -510,11 +546,11 @@ extern void _pthread_testcancel(pthread_t thread, int isconforming);
                nlval = lcntval + PTHRW_INC;
                newval64 = (((uint64_t)ulval) << 32);
                newval64 |= nlval;
-       } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
+       } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
 
        cond->busy = mutex;
 
-       res = __mtx_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
+       res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
 
        /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
        if (res != 0) {
@@ -569,7 +605,7 @@ extern void _pthread_testcancel(pthread_t thread, int isconforming);
        return res;
 }
 
-static void 
+static void
 _pthread_cond_cleanup(void *arg)
 {
        _pthread_cond *cond = (_pthread_cond *)arg;
@@ -589,7 +625,7 @@ _pthread_cond_cleanup(void *arg)
 
 // 4597450: end
        mutex = (pthread_mutex_t *)cond->busy;
-       
+
        // add unlock ref to show one less waiter
        _pthread_cond_updateval(cond, thread->cancel_error, 0);
 
@@ -608,10 +644,11 @@ static void
 _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
 {
        int needclearpre;
-       
+
        uint32_t diffgen, nsval;
        uint64_t oldval64, newval64;
        uint32_t lcntval, ucntval, scntval;
+       volatile uint64_t *c_lsseqaddr;
        volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
 
        if (error != 0) {
@@ -624,12 +661,14 @@ _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
                }
        }
 
-       COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
+       COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
 
        do {
                lcntval = *c_lseqcnt;
                ucntval = *c_useqcnt;
                scntval = *c_sseqcnt;
+               nsval = 0;
+               needclearpre = 0;
 
                diffgen = diff_genseq(lcntval, scntval); // pending waiters
 
@@ -654,14 +693,12 @@ _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
                                // reset p bit but retain c bit on the sword
                                nsval &= PTH_RWS_CV_RESET_PBIT;
                                needclearpre = 1;
-                       } else {
-                               needclearpre = 0;
                        }
 
                        newval64 = (((uint64_t)nsval) << 32);
                        newval64 |= lcntval;
                }
-       } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
+       } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
 
        if (diffgen > 0) {
                // if L == S, then reset associated mutex
@@ -679,15 +716,9 @@ _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
        }
 }
 
-
-int
-pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
-{
-       return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
-}
-
 #endif /* !BUILDING_VARIANT ] */
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
 {
@@ -703,3 +734,4 @@ pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
        _PTHREAD_LOCK_INIT(cond->lock);
        return _pthread_cond_init(cond, attr, conforming);
 }
+
index 5eaf689c56ef106b080c4d0d76b7e416337f8e70..4f1d06fd534a811f290fa184010db46b303b2b14 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
@@ -17,7 +17,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
 #include "resolver.h"
 #include "internal.h"
 #include "kern/kern_trace.h"
-#include <sys/syscall.h>
+
+extern int __unix_conforming;
+
+#ifndef BUILDING_VARIANT /* [ */
 
 #ifdef PLOCKSTAT
 #include "plockstat.h"
+/* This function is never called and exists to provide never-fired dtrace
+ * probes so that user d scripts don't get errors.
+ */
+PTHREAD_NOEXPORT PTHREAD_USED
+void
+_plockstat_never_fired(void)
+{
+       PLOCKSTAT_MUTEX_SPIN(NULL);
+       PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
+}
 #else /* !PLOCKSTAT */
 #define        PLOCKSTAT_MUTEX_SPIN(x)
 #define        PLOCKSTAT_MUTEX_SPUN(x, y, z)
 #define        PLOCKSTAT_MUTEX_RELEASE(x, y)
 #endif /* PLOCKSTAT */
 
-#define PTHREAD_MUTEX_INIT_UNUSED 1
+#define BLOCK_FAIL_PLOCKSTAT    0
+#define BLOCK_SUCCESS_PLOCKSTAT 1
 
-extern int __unix_conforming;
+#define PTHREAD_MUTEX_INIT_UNUSED 1
 
-#ifndef BUILDING_VARIANT
+PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
+int _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
 
 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
-int
-_pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
+int _pthread_mutex_unlock_slow(pthread_mutex_t *omutex);
 
 PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
-int
-_pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock);
+int _pthread_mutex_corruption_abort(_pthread_mutex *mutex);
 
-PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into _pthread_mutex_lock
-int
-_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid);
 
-#endif /* BUILDING_VARIANT */
+PTHREAD_ALWAYS_INLINE
+static inline int _pthread_mutex_init(_pthread_mutex *mutex,
+               const pthread_mutexattr_t *attr, uint32_t static_type);
 
 #define DEBUG_TRACE_POINTS 0
 
 #if DEBUG_TRACE_POINTS
-extern int __syscall(int number, ...);
-#define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d)
+#include <sys/kdebug.h>
+#define DEBUG_TRACE(x, a, b, c, d) kdebug_trace(TRACE_##x, a, b, c, d)
 #else
 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
 #endif
 
-#include <machine/cpu_capabilities.h>
+typedef union mutex_seq {
+       uint32_t seq[2];
+       struct { uint32_t lgenval; uint32_t ugenval; };
+       struct { uint32_t mgen; uint32_t ugen; };
+       uint64_t seq_LU;
+       uint64_t _Atomic atomic_seq_LU;
+} mutex_seq;
 
-static inline int _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr, uint32_t static_type);
+_Static_assert(sizeof(mutex_seq) == 2 * sizeof(uint32_t),
+               "Incorrect mutex_seq size");
 
 #if !__LITTLE_ENDIAN__
 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
@@ -106,47 +125,74 @@ static inline int _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutex
 
 PTHREAD_ALWAYS_INLINE
 static inline void
-MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex,
-                 volatile uint64_t **seqaddr)
+MUTEX_GETSEQ_ADDR(_pthread_mutex *mutex, mutex_seq **seqaddr)
 {
        // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
        // We don't require more than byte alignment on OS X. rdar://22278325
-       *seqaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
+       *seqaddr = (void *)(((uintptr_t)mutex->m_seq + 0x7ul) & ~0x7ul);
 }
 
 PTHREAD_ALWAYS_INLINE
 static inline void
-MUTEX_GETTID_ADDR(_pthread_mutex *mutex,
-                                 volatile uint64_t **tidaddr)
+MUTEX_GETTID_ADDR(_pthread_mutex *mutex, uint64_t **tidaddr)
 {
        // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
        // We don't require more than byte alignment on OS X. rdar://22278325
-       *tidaddr = (volatile uint64_t*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
+       *tidaddr = (void*)(((uintptr_t)mutex->m_tid + 0x7ul) & ~0x7ul);
 }
 
-#ifndef BUILDING_VARIANT /* [ */
-#ifndef OS_UP_VARIANT_ONLY
+PTHREAD_ALWAYS_INLINE
+static inline void
+mutex_seq_load(mutex_seq *seqaddr, mutex_seq *oldseqval)
+{
+       oldseqval->seq_LU = seqaddr->seq_LU;
+}
 
-#define BLOCK_FAIL_PLOCKSTAT    0
-#define BLOCK_SUCCESS_PLOCKSTAT 1
+PTHREAD_ALWAYS_INLINE
+static inline void
+mutex_seq_atomic_load_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval)
+{
+       oldseqval->seq_LU = os_atomic_load(&seqaddr->atomic_seq_LU, relaxed);
+}
 
-#ifdef PLOCKSTAT
-/* This function is never called and exists to provide never-fired dtrace
- * probes so that user d scripts don't get errors.
- */
-PTHREAD_NOEXPORT PTHREAD_USED
-void
-_plockstat_never_fired(void) 
+#define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
+               mutex_seq_atomic_load_##m(seqaddr, oldseqval)
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq *seqaddr, mutex_seq *oldseqval,
+               mutex_seq *newseqval)
 {
-       PLOCKSTAT_MUTEX_SPIN(NULL);
-       PLOCKSTAT_MUTEX_SPUN(NULL, 0, 0);
+       return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
+                       newseqval->seq_LU, &oldseqval->seq_LU, relaxed);
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+mutex_seq_atomic_cmpxchgv_acquire(mutex_seq *seqaddr, mutex_seq *oldseqval,
+               mutex_seq *newseqval)
+{
+       return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
+                       newseqval->seq_LU, &oldseqval->seq_LU, acquire);
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+mutex_seq_atomic_cmpxchgv_release(mutex_seq *seqaddr, mutex_seq *oldseqval,
+               mutex_seq *newseqval)
+{
+       return os_atomic_cmpxchgv(&seqaddr->atomic_seq_LU, oldseqval->seq_LU,
+                       newseqval->seq_LU, &oldseqval->seq_LU, release);
 }
-#endif // PLOCKSTAT
+
+#define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
+               mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
 
 /*
  * Initialize a mutex variable, possibly with additional attributes.
  * Public interface - so don't trust the lock - initialize it first.
  */
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
 {
@@ -161,6 +207,7 @@ pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
        return (_pthread_mutex_init(mutex, attr, 0x7));
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
 {
@@ -175,16 +222,18 @@ pthread_mutex_getprioceiling(const pthread_mutex_t *omutex, int *prioceiling)
        return res;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
-pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling, int *old_prioceiling)
+pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling,
+               int *old_prioceiling)
 {
        int res = EINVAL;
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
        if (_pthread_mutex_check_signature(mutex)) {
                _PTHREAD_LOCK(mutex->lock);
-               if (prioceiling >= -999 || prioceiling <= 999) {
+               if (prioceiling >= -999 && prioceiling <= 999) {
                        *old_prioceiling = mutex->prioceiling;
-                       mutex->prioceiling = prioceiling;
+                       mutex->prioceiling = (int16_t)prioceiling;
                        res = 0;
                }
                _PTHREAD_UNLOCK(mutex->lock);
@@ -192,8 +241,10 @@ pthread_mutex_setprioceiling(pthread_mutex_t *omutex, int prioceiling, int *old_
        return res;
 }
 
+
 int
-pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr, int *prioceiling)
+pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *attr,
+               int *prioceiling)
 {
        int res = EINVAL;
        if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
@@ -253,7 +304,7 @@ pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr, int prioceiling)
 {
        int res = EINVAL;
        if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
-               if (prioceiling >= -999 || prioceiling <= 999) {
+               if (prioceiling >= -999 && prioceiling <= 999) {
                        attr->prioceiling = prioceiling;
                        res = 0;
                }
@@ -312,23 +363,6 @@ pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
        return res;
 }
 
-// XXX remove
-void
-cthread_yield(void) 
-{
-       sched_yield();
-}
-
-void
-pthread_yield_np(void) 
-{
-       sched_yield();
-}
-
-
-/*
- * Temp: till pshared is fixed correctly
- */
 int
 pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
 {
@@ -341,30 +375,28 @@ pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
 
        if (attr->sig == _PTHREAD_MUTEX_ATTR_SIG) {
 #if __DARWIN_UNIX03
-               if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
+               if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
+                               (pshared == PTHREAD_PROCESS_SHARED))
 #else /* __DARWIN_UNIX03 */
                if ( pshared == PTHREAD_PROCESS_PRIVATE)
 #endif /* __DARWIN_UNIX03 */
                {
-                       attr->pshared = pshared; 
+                       attr->pshared = pshared;
                        res = 0;
                }
        }
        return res;
 }
 
-PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
-int
-_pthread_mutex_corruption_abort(_pthread_mutex *mutex);
-
-PTHREAD_NOINLINE
+PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
 int
 _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
 {
-       PTHREAD_ABORT("pthread_mutex corruption: mutex %p owner changed in the middle of lock/unlock");
-       return EINVAL; // NOTREACHED
+       PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
+                       "middle of lock/unlock");
 }
 
+
 /*
  * Sequence numbers and TID:
  *
@@ -374,42 +406,50 @@ _pthread_mutex_corruption_abort(_pthread_mutex *mutex)
  * the unlock path will then transition to D=[L5 U4 TID0] and then finally
  * E=[L5 U5 TID0].
  *
- * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0]
- * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to
- * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either
- * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex
- * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
- * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter
- * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to
- * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
+ * If a contender comes in after B, the mutex will instead transition to
+ * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
+ * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
+ * contender will enter the kernel with either mutexwait(U4, TID0) or
+ * mutexwait(U4, TID940). The first owner will unlock the mutex by first
+ * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
+ * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
+ * signal the next waiter (potentially as a prepost). When the waiter comes out
+ * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
+ * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
  *
- * At various points along these timelines, since the sequence words and TID are written independently,
- * a thread may get preempted and another thread might see inconsistent data. In the worst case, another
- * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the
- * owning thread was preempted.
+ * At various points along these timelines, since the sequence words and TID are
+ * written independently, a thread may get preempted and another thread might
+ * see inconsistent data. In the worst case, another thread may see the TID in
+ * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
+ * thread was preempted.
  */
 
 /*
- * Drop the mutex unlock references from cond_wait. or mutex_unlock.
+ * Drop the mutex unlock references from cond_wait or mutex_unlock.
  */
 PTHREAD_ALWAYS_INLINE
 static inline int
-_pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
+_pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp,
+               uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
 {
-       bool firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
-       uint32_t lgenval, ugenval, flags;
-       uint64_t oldtid, newtid;
-       volatile uint64_t *tidaddr;
-       MUTEX_GETTID_ADDR(mutex, &tidaddr);
-
-       flags = mutex->mtxopts.value;
+       bool firstfit = (mutex->mtxopts.options.policy ==
+                       _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+       uint32_t flags = mutex->mtxopts.value;
        flags &= ~_PTHREAD_MTX_OPT_NOTIFY; // no notification by default
 
+       mutex_seq *seqaddr;
+       MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
+
+       uint64_t *tidaddr;
+       MUTEX_GETTID_ADDR(mutex, &tidaddr);
+       uint64_t oldtid, newtid;
+
        if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
                uint64_t selfid = _pthread_selfid_direct();
-
-               if (*tidaddr != selfid) {
-                       //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread");
+               if (os_atomic_load(tidaddr, relaxed) != selfid) {
                        PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
                        return EPERM;
                } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
@@ -422,39 +462,36 @@ _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32
                }
        }
 
-       uint64_t oldval64, newval64;
-       volatile uint64_t *seqaddr;
-       MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
-
        bool clearprepost, clearnotify, spurious;
        do {
-               oldval64 = *seqaddr;
-               oldtid = *tidaddr;
-               lgenval = (uint32_t)oldval64;
-               ugenval = (uint32_t)(oldval64 >> 32);
+               newseq = oldseq;
+               oldtid = os_atomic_load(tidaddr, relaxed);
 
                clearprepost = false;
                clearnotify = false;
                spurious = false;
 
-               int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
-
+               // pending waiters
+               int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
                if (numwaiters == 0) {
-                       // spurious unlock; do not touch tid
+                       // spurious unlock (unlock of unlocked lock)
                        spurious = true;
                } else {
-                       ugenval += PTHRW_INC;
+                       newseq.ugenval += PTHRW_INC;
 
-                       if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
-                               // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
+                       if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
+                                       (newseq.ugenval & PTHRW_COUNT_MASK)) {
+                               // our unlock sequence matches to lock sequence, so if the
+                               // CAS is successful, the mutex is unlocked
 
                                /* do not reset Ibit, just K&E */
-                               lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
+                               newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
                                clearnotify = true;
                                newtid = 0; // clear owner
                        } else {
                                if (firstfit) {
-                                       lgenval &= ~PTH_RWL_EBIT; // reset E bit so another can acquire meanwhile
+                                       // reset E bit so another can acquire meanwhile
+                                       newseq.lgenval &= ~PTH_RWL_EBIT;
                                        newtid = 0;
                                } else {
                                        newtid = PTHREAD_MTX_TID_SWITCHING;
@@ -462,12 +499,13 @@ _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32
                                // need to signal others waiting for mutex
                                flags |= _PTHREAD_MTX_OPT_NOTIFY;
                        }
-                       
+
                        if (newtid != oldtid) {
-                               // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING
-                               // or 0 so that once the CAS below succeeds, there is no stale ownership information.
-                               // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
-                               // to be SWITCHING/0
+                               // We're giving up the mutex one way or the other, so go ahead
+                               // and update the owner to 0 so that once the CAS below
+                               // succeeds, there is no stale ownership information. If the
+                               // CAS of the seqaddr fails, we may loop, but it's still valid
+                               // for the owner to be SWITCHING/0
                                if (!os_atomic_cmpxchg(tidaddr, oldtid, newtid, relaxed)) {
                                        // we own this mutex, nobody should be updating it except us
                                        return _pthread_mutex_corruption_abort(mutex);
@@ -477,26 +515,23 @@ _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32
 
                if (clearnotify || spurious) {
                        flags &= ~_PTHREAD_MTX_OPT_NOTIFY;
-                       if (firstfit && ((lgenval & PTH_RWL_PBIT) != 0)) {
+                       if (firstfit && (newseq.lgenval & PTH_RWL_PBIT)) {
                                clearprepost = true;
-                               lgenval &= ~PTH_RWL_PBIT;
+                               newseq.lgenval &= ~PTH_RWL_PBIT;
                        }
                }
-               
-               newval64 = (((uint64_t)ugenval) << 32);
-               newval64 |= lgenval;
-
-       } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
+       } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, release));
 
        if (clearprepost) {
-                __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
+               __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
+                               newseq.lgenval, flags | _PTHREAD_MTX_OPT_MUTEX);
        }
 
        if (mgenp != NULL) {
-               *mgenp = lgenval;
+               *mgenp = newseq.lgenval;
        }
        if (ugenp != NULL) {
-               *ugenp = ugenval;
+               *ugenp = newseq.ugenval;
        }
        if (pmtxp != NULL) {
                *pmtxp = (uint32_t *)mutex;
@@ -508,9 +543,10 @@ _pthread_mutex_unlock_updatebits(_pthread_mutex *mutex, uint32_t *flagsp, uint32
        return 0;
 }
 
-PTHREAD_NOEXPORT
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
 int
-__mtx_droplock(_pthread_mutex *mutex, uint32_t *flagsp, uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
+_pthread_mutex_droplock(_pthread_mutex *mutex, uint32_t *flagsp,
+               uint32_t **pmtxp, uint32_t *mgenp, uint32_t *ugenp)
 {
        return _pthread_mutex_unlock_updatebits(mutex, flagsp, pmtxp, mgenp, ugenp);
 }
@@ -520,28 +556,33 @@ static inline int
 _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
 {
        int res = 0;
-       int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
-       int isebit = 0;
+       bool firstfit = (mutex->mtxopts.options.policy ==
+                       _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+       bool isebit = false, updated = false;
 
-       uint32_t lgenval, ugenval;
-       uint64_t oldval64, newval64;
-       volatile uint64_t *seqaddr;
+       mutex_seq *seqaddr;
        MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
-       uint64_t oldtid;
-       volatile uint64_t *tidaddr;
+
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
+
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
+       uint64_t oldtid;
 
        do {
-               do {
-                       oldval64 = *seqaddr;
-                       oldtid = *tidaddr;
-                       lgenval = (uint32_t)oldval64;
-                       ugenval = (uint32_t)(oldval64 >> 32);
+               if (firstfit && isebit && updated) {
+                       mutex_seq_atomic_load(seqaddr, &oldseq, relaxed);
+               }
+               newseq = oldseq;
+               oldtid = os_atomic_load(tidaddr, relaxed);
 
+               if (isebit && !(oldseq.lgenval & PTH_RWL_EBIT)) {
                        // E bit was set on first pass through the loop but is no longer
                        // set. Apparently we spin until it arrives.
                        // XXX: verify this is desired behavior.
-               } while (isebit && (lgenval & PTH_RWL_EBIT) == 0);
+                       continue;
+               }
 
                if (isebit) {
                        // first fit mutex now has the E bit set. Return 1.
@@ -550,22 +591,21 @@ _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
                }
 
                if (firstfit) {
-                       isebit = (lgenval & PTH_RWL_EBIT) != 0;
-               } else if ((lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) == (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
+                       isebit = (oldseq.lgenval & PTH_RWL_EBIT);
+               } else if ((oldseq.lgenval & (PTH_RWL_KBIT|PTH_RWL_EBIT)) ==
+                               (PTH_RWL_KBIT|PTH_RWL_EBIT)) {
                        // fairshare mutex and the bits are already set, just update tid
                        break;
                }
 
                // either first fit or no E bit set
                // update the bits
-               lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
+               newseq.lgenval |= PTH_RWL_KBIT | PTH_RWL_EBIT;
 
-               newval64 = (((uint64_t)ugenval) << 32);
-               newval64 |= lgenval;
-
-               // set s and b bit
-               // Retry if CAS fails, or if it succeeds with firstfit and E bit already set
-       } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire) || (firstfit && isebit));
+               // Retry if CAS fails, or if it succeeds with firstfit and E bit
+               // already set
+       } while (!(updated = mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       relaxed)) || (firstfit && isebit));
 
        if (res == 0) {
                if (!os_atomic_cmpxchg(tidaddr, oldtid, selfid, relaxed)) {
@@ -579,41 +619,34 @@ _pthread_mutex_lock_updatebits(_pthread_mutex *mutex, uint64_t selfid)
 
 PTHREAD_NOINLINE
 static int
-__mtx_markprepost(_pthread_mutex *mutex, uint32_t updateval, int firstfit)
+_pthread_mutex_markprepost(_pthread_mutex *mutex, uint32_t updateval)
 {
-       uint32_t flags;
-       uint32_t lgenval, ugenval;
-       uint64_t oldval64, newval64;
-
-       volatile uint64_t *seqaddr;
+       mutex_seq *seqaddr;
        MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
 
-       if (firstfit != 0 && (updateval & PTH_RWL_PBIT) != 0) {
-               int clearprepost;
-               do {                            
-                       clearprepost = 0;
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
 
-                       flags = mutex->mtxopts.value;
-
-                       oldval64 = *seqaddr;
-                       lgenval = (uint32_t)oldval64;
-                       ugenval = (uint32_t)(oldval64 >> 32);
+       bool clearprepost;
+       do {
+               clearprepost = false;
+               newseq = oldseq;
 
-                       /* update the bits */
-                       if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
-                               clearprepost = 1;       
-                               lgenval &= ~PTH_RWL_PBIT;
-                       } else {
-                               lgenval |= PTH_RWL_PBIT;
-                       }
-                       newval64 = (((uint64_t)ugenval) << 32);
-                       newval64 |= lgenval;
-               } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
-               
-               if (clearprepost != 0) {
-                       __psynch_cvclrprepost(mutex, lgenval, ugenval, 0, 0, lgenval, (flags | _PTHREAD_MTX_OPT_MUTEX));
+               /* update the bits */
+               if ((oldseq.lgenval & PTHRW_COUNT_MASK) ==
+                               (oldseq.ugenval & PTHRW_COUNT_MASK)) {
+                       clearprepost = true;
+                       newseq.lgenval &= ~PTH_RWL_PBIT;
+               } else {
+                       newseq.lgenval |= PTH_RWL_PBIT;
                }
+       } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, relaxed));
+
+       if (clearprepost) {
+               __psynch_cvclrprepost(mutex, newseq.lgenval, newseq.ugenval, 0, 0,
+                               newseq.lgenval, mutex->mtxopts.value | _PTHREAD_MTX_OPT_MUTEX);
        }
+
        return 0;
 }
 
@@ -650,7 +683,7 @@ _pthread_mutex_check_init(pthread_mutex_t *omutex)
 {
        int res = 0;
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       
+
        if (!_pthread_mutex_check_signature(mutex)) {
                return _pthread_mutex_check_init_slow(omutex);
        }
@@ -658,14 +691,13 @@ _pthread_mutex_check_init(pthread_mutex_t *omutex)
 }
 
 PTHREAD_NOINLINE
-int
-_pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t oldtid)
+static int
+_pthread_mutex_lock_wait(pthread_mutex_t *omutex, mutex_seq newseq,
+               uint64_t oldtid)
 {
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       uint32_t lgenval = (uint32_t)newval64;
-       uint32_t ugenval = (uint32_t)(newval64 >> 32);
 
-       volatile uint64_t *tidaddr;
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
        uint64_t selfid = _pthread_selfid_direct();
 
@@ -673,8 +705,9 @@ _pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t ol
        do {
                uint32_t updateval;
                do {
-                       updateval = __psynch_mutexwait(omutex, lgenval, ugenval, oldtid, mutex->mtxopts.value);
-                       oldtid = *tidaddr;
+                       updateval = __psynch_mutexwait(omutex, newseq.lgenval,
+                                       newseq.ugenval, oldtid, mutex->mtxopts.value);
+                       oldtid = os_atomic_load(tidaddr, relaxed);
                } while (updateval == (uint32_t)-1);
 
                // returns 0 on succesful update; in firstfit it may fail with 1
@@ -684,60 +717,53 @@ _pthread_mutex_lock_wait(pthread_mutex_t *omutex, uint64_t newval64, uint64_t ol
        return 0;
 }
 
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
 int
 _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
 {
-       int res;
+       int res, recursive = 0;
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
 
        res = _pthread_mutex_check_init(omutex);
-       if (res != 0) {
-               return res;
-       }
+       if (res != 0) return res;
 
-       uint64_t oldtid;
-       volatile uint64_t *tidaddr;
+       mutex_seq *seqaddr;
+       MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
+
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
+
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
-       uint64_t selfid = _pthread_selfid_direct();
+       uint64_t oldtid, selfid = _pthread_selfid_direct();
 
        if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
-               if (*tidaddr == selfid) {
+               if (os_atomic_load(tidaddr, relaxed) == selfid) {
                        if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
                                if (mutex->mtxopts.options.lock_count < USHRT_MAX) {
                                        mutex->mtxopts.options.lock_count++;
-                                       PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
+                                       recursive = 1;
                                        res = 0;
                                } else {
                                        res = EAGAIN;
-                                       PLOCKSTAT_MUTEX_ERROR(omutex, res);
                                }
                        } else if (trylock) { /* PTHREAD_MUTEX_ERRORCHECK */
                                // <rdar://problem/16261552> as per OpenGroup, trylock cannot
                                // return EDEADLK on a deadlock, it should return EBUSY.
                                res = EBUSY;
-                               PLOCKSTAT_MUTEX_ERROR(omutex, res);
                        } else  { /* PTHREAD_MUTEX_ERRORCHECK */
                                res = EDEADLK;
-                               PLOCKSTAT_MUTEX_ERROR(omutex, res);
                        }
-                       return res;
+                       goto out;
                }
        }
 
-       uint64_t oldval64, newval64;
-       volatile uint64_t *seqaddr;
-       MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
-
-       uint32_t lgenval, ugenval;
-       bool gotlock = false;
-
+       bool gotlock;
        do {
-               oldval64 = *seqaddr;
-               oldtid = *tidaddr;
-               lgenval = (uint32_t)oldval64;
-               ugenval = (uint32_t)(oldval64 >> 32);
+               newseq = oldseq;
+               oldtid = os_atomic_load(tidaddr, relaxed);
 
-               gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
+               gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
 
                if (trylock && !gotlock) {
                        // A trylock on a held lock will fail immediately. But since
@@ -746,40 +772,39 @@ _pthread_mutex_lock_slow(pthread_mutex_t *omutex, bool trylock)
                } else {
                        // Increment the lock sequence number and force the lock into E+K
                        // mode, whether "gotlock" is true or not.
-                       lgenval += PTHRW_INC;
-                       lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
+                       newseq.lgenval += PTHRW_INC;
+                       newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
                }
-
-               newval64 = (((uint64_t)ugenval) << 32);
-               newval64 |= lgenval;
-               
-               // Set S and B bit
-       } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
+       } while (!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, acquire));
 
        if (gotlock) {
                os_atomic_store(tidaddr, selfid, relaxed);
                res = 0;
                DEBUG_TRACE(psynch_mutex_ulock, omutex, lgenval, ugenval, selfid);
-               PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
        } else if (trylock) {
                res = EBUSY;
-               DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval, oldtid);
-               PLOCKSTAT_MUTEX_ERROR(omutex, res);
+               DEBUG_TRACE(psynch_mutex_utrylock_failed, omutex, lgenval, ugenval,
+                               oldtid);
        } else {
-               res = _pthread_mutex_lock_wait(omutex, newval64, oldtid);
+               res = _pthread_mutex_lock_wait(omutex, newseq, oldtid);
        }
 
        if (res == 0 && mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE) {
                mutex->mtxopts.options.lock_count = 1;
        }
 
-       PLOCKSTAT_MUTEX_ACQUIRE(omutex, 0, 0);
+out:
+#if PLOCKSTAT
+       if (res == 0) {
+               PLOCKSTAT_MUTEX_ACQUIRE(omutex, recursive, 0);
+       } else {
+               PLOCKSTAT_MUTEX_ERROR(omutex, res);
+       }
+#endif
 
        return res;
 }
 
-#endif // OS_UP_VARIANT_ONLY
-
 PTHREAD_ALWAYS_INLINE
 static inline int
 _pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
@@ -791,54 +816,52 @@ _pthread_mutex_lock(pthread_mutex_t *omutex, bool trylock)
        }
 #endif
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       if (!_pthread_mutex_check_signature_fast(mutex)) {
+       if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
                return _pthread_mutex_lock_slow(omutex, trylock);
        }
 
-       uint64_t oldtid;
-       volatile uint64_t *tidaddr;
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
        uint64_t selfid = _pthread_selfid_direct();
 
-       uint64_t oldval64, newval64;
-       volatile uint64_t *seqaddr;
+       mutex_seq *seqaddr;
        MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
 
-       uint32_t lgenval, ugenval;
-       bool gotlock = false;
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
+
+       if (os_unlikely(oldseq.lgenval & PTH_RWL_EBIT)) {
+               return _pthread_mutex_lock_slow(omutex, trylock);
+       }
 
+       bool gotlock;
        do {
-               oldval64 = *seqaddr;
-               oldtid = *tidaddr;
-               lgenval = (uint32_t)oldval64;
-               ugenval = (uint32_t)(oldval64 >> 32);
+               newseq = oldseq;
 
-               gotlock = ((lgenval & PTH_RWL_EBIT) == 0);
+               gotlock = ((oldseq.lgenval & PTH_RWL_EBIT) == 0);
 
                if (trylock && !gotlock) {
                        // A trylock on a held lock will fail immediately. But since
                        // we did not load the sequence words atomically, perform a
                        // no-op CAS64 to ensure that nobody has unlocked concurrently.
-               } else {
+               } else if (os_likely(gotlock)) {
                        // Increment the lock sequence number and force the lock into E+K
                        // mode, whether "gotlock" is true or not.
-                       lgenval += PTHRW_INC;
-                       lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
+                       newseq.lgenval += PTHRW_INC;
+                       newseq.lgenval |= PTH_RWL_EBIT | PTH_RWL_KBIT;
+               } else {
+                       return _pthread_mutex_lock_slow(omutex, trylock);
                }
+       } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       acquire)));
 
-               newval64 = (((uint64_t)ugenval) << 32);
-               newval64 |= lgenval;
-
-               // Set S and B bit
-       } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, acquire));
-
-       if (os_fastpath(gotlock)) {
+       if (os_likely(gotlock)) {
                os_atomic_store(tidaddr, selfid, relaxed);
                return 0;
        } else if (trylock) {
                return EBUSY;
        } else {
-               return _pthread_mutex_lock_wait(omutex, newval64, oldtid);
+               __builtin_trap();
        }
 }
 
@@ -856,7 +879,6 @@ pthread_mutex_trylock(pthread_mutex_t *mutex)
        return _pthread_mutex_lock(mutex, true);
 }
 
-#ifndef OS_UP_VARIANT_ONLY
 /*
  * Unlock a mutex.
  * TODO: Priority inheritance stuff
@@ -864,19 +886,19 @@ pthread_mutex_trylock(pthread_mutex_t *mutex)
 
 PTHREAD_NOINLINE
 static int
-_pthread_mutex_unlock_drop(pthread_mutex_t *omutex, uint64_t newval64, uint32_t flags)
+_pthread_mutex_unlock_drop(pthread_mutex_t *omutex, mutex_seq newseq,
+               uint32_t flags)
 {
        int res;
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       uint32_t lgenval = (uint32_t)newval64;
-       uint32_t ugenval = (uint32_t)(newval64 >> 32);
 
        uint32_t updateval;
-       int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
-       volatile uint64_t *tidaddr;
+
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
 
-       updateval = __psynch_mutexdrop(omutex, lgenval, ugenval, *tidaddr, flags);
+       updateval = __psynch_mutexdrop(omutex, newseq.lgenval, newseq.ugenval,
+                       os_atomic_load(tidaddr, relaxed), flags);
 
        if (updateval == (uint32_t)-1) {
                res = errno;
@@ -885,53 +907,47 @@ _pthread_mutex_unlock_drop(pthread_mutex_t *omutex, uint64_t newval64, uint32_t
                        res = 0;
                }
                if (res != 0) {
-                       PTHREAD_ABORT("__p_mutexdrop failed with error %d", res);
+                       PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res);
                }
                return res;
-       } else if (firstfit == 1) {
-               if ((updateval & PTH_RWL_PBIT) != 0) {
-                       __mtx_markprepost(mutex, updateval, firstfit);
-               }
+       } else if ((mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT)
+                       && (updateval & PTH_RWL_PBIT)) {
+               return _pthread_mutex_markprepost(mutex, updateval);
        }
 
        return 0;
 }
 
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
 int
 _pthread_mutex_unlock_slow(pthread_mutex_t *omutex)
 {
        int res;
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       uint32_t mtxgen, mtxugen, flags;
+       mutex_seq newseq;
+       uint32_t flags;
 
        // Initialize static mutexes for compatibility with misbehaving
        // applications (unlock should not be the first operation on a mutex).
        res = _pthread_mutex_check_init(omutex);
-       if (res != 0) {
-               return res;
-       }
+       if (res != 0) return res;
 
-       res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &mtxgen, &mtxugen);
-       if (res != 0) {
-               return res;
-       }
+       res = _pthread_mutex_unlock_updatebits(mutex, &flags, NULL, &newseq.lgenval,
+                       &newseq.ugenval);
+       if (res != 0) return res;
 
        if ((flags & _PTHREAD_MTX_OPT_NOTIFY) != 0) {
-               uint64_t newval64;
-               newval64 = (((uint64_t)mtxugen) << 32);
-               newval64 |= mtxgen;
-               return _pthread_mutex_unlock_drop(omutex, newval64, flags);
+               return _pthread_mutex_unlock_drop(omutex, newseq, flags);
        } else {
-               volatile uint64_t *tidaddr;
+               uint64_t *tidaddr;
                MUTEX_GETTID_ADDR(mutex, &tidaddr);
-               DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen, *tidaddr);
+               DEBUG_TRACE(psynch_mutex_uunlock, omutex, mtxgen, mtxugen,
+                               os_atomic_load(tidaddr, relaxed));
        }
 
        return 0;
 }
 
-#endif // OS_UP_VARIANT_ONLY
-
 PTHREAD_NOEXPORT_VARIANT
 int
 pthread_mutex_unlock(pthread_mutex_t *omutex)
@@ -943,58 +959,54 @@ pthread_mutex_unlock(pthread_mutex_t *omutex)
        }
 #endif
        _pthread_mutex *mutex = (_pthread_mutex *)omutex;
-       if (!_pthread_mutex_check_signature_fast(mutex)) {
+       if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex))) {
                return _pthread_mutex_unlock_slow(omutex);
        }
 
-       volatile uint64_t *tidaddr;
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
 
-       uint64_t oldval64, newval64;
-       volatile uint64_t *seqaddr;
+       mutex_seq *seqaddr;
        MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
 
-       uint32_t lgenval, ugenval;
-
-       do {
-               oldval64 = *seqaddr;
-               lgenval = (uint32_t)oldval64;
-               ugenval = (uint32_t)(oldval64 >> 32);
+       mutex_seq oldseq, newseq;
+       mutex_seq_load(seqaddr, &oldseq);
 
-               int numwaiters = diff_genseq(lgenval, ugenval); // pending waiters
+       int numwaiters = diff_genseq(oldseq.lgenval, oldseq.ugenval);
+       if (os_unlikely(numwaiters == 0)) {
+               // spurious unlock (unlock of unlocked lock)
+               return 0;
+       }
 
-               if (numwaiters == 0) {
-                       // spurious unlock; do not touch tid
-               } else {
-                       ugenval += PTHRW_INC;
+       // We're giving up the mutex one way or the other, so go ahead and
+       // update the owner to 0 so that once the CAS below succeeds, there
+       // is no stale ownership information. If the CAS of the seqaddr
+       // fails, we may loop, but it's still valid for the owner to be
+       // SWITCHING/0
+       os_atomic_store(tidaddr, 0, relaxed);
 
-                       if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
-                               // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
+       do {
+               newseq = oldseq;
+               newseq.ugenval += PTHRW_INC;
 
-                               /* do not reset Ibit, just K&E */
-                               lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
-                       } else {
-                               return _pthread_mutex_unlock_slow(omutex);
-                       }
+               if (os_likely((oldseq.lgenval & PTHRW_COUNT_MASK) ==
+                               (newseq.ugenval & PTHRW_COUNT_MASK))) {
+                       // our unlock sequence matches to lock sequence, so if the
+                       // CAS is successful, the mutex is unlocked
 
-                       // We're giving up the mutex one way or the other, so go ahead and update the owner
-                       // to 0 so that once the CAS below succeeds, there is no stale ownership information.
-                       // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
-                       // to be SWITCHING/0
-                       os_atomic_store(tidaddr, 0, relaxed);
+                       // do not reset Ibit, just K&E
+                       newseq.lgenval &= ~(PTH_RWL_KBIT | PTH_RWL_EBIT);
+               } else {
+                       return _pthread_mutex_unlock_slow(omutex);
                }
-
-               newval64 = (((uint64_t)ugenval) << 32);
-               newval64 |= lgenval;
-
-       } while (!os_atomic_cmpxchg(seqaddr, oldval64, newval64, release));
+       } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       release)));
 
        return 0;
 }
 
-#ifndef OS_UP_VARIANT_ONLY
-
 
+PTHREAD_ALWAYS_INLINE
 static inline int
 _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
                uint32_t static_type)
@@ -1005,7 +1017,7 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
                if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG) {
                        return EINVAL;
                }
-               mutex->prioceiling = attr->prioceiling;
+               mutex->prioceiling = (int16_t)attr->prioceiling;
                mutex->mtxopts.options.protocol = attr->protocol;
                mutex->mtxopts.options.policy = attr->policy;
                mutex->mtxopts.options.type = attr->type;
@@ -1038,10 +1050,12 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
        }
        mutex->priority = 0;
 
-       volatile uint64_t *seqaddr;
+       mutex_seq *seqaddr;
        MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
-       volatile uint64_t *tidaddr;
+
+       uint64_t *tidaddr;
        MUTEX_GETTID_ADDR(mutex, &tidaddr);
+
 #if PTHREAD_MUTEX_INIT_UNUSED
        if ((uint32_t*)tidaddr != mutex->m_tid) {
                mutex->mtxopts.options.misalign = 1;
@@ -1050,7 +1064,7 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
        __builtin_memset(mutex->m_mis, 0xff, sizeof(mutex->m_mis));
 #endif // PTHREAD_MUTEX_INIT_UNUSED
        *tidaddr = 0;
-       *seqaddr = 0;
+       *seqaddr = (mutex_seq){ };
 
        long sig = _PTHREAD_MUTEX_SIG;
        if (mutex->mtxopts.options.type == PTHREAD_MUTEX_NORMAL &&
@@ -1063,7 +1077,7 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
        // For detecting copied mutexes and smashes during debugging
        uint32_t sig32 = (uint32_t)sig;
 #if defined(__LP64__)
-       uintptr_t guard =  ~(uintptr_t)mutex; // use ~ to hide from leaks
+       uintptr_t guard = ~(uintptr_t)mutex; // use ~ to hide from leaks
        __builtin_memcpy(mutex->_reserved, &guard, sizeof(guard));
        mutex->_reserved[2] = sig32;
        mutex->_reserved[3] = sig32;
@@ -1079,7 +1093,7 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
        // the 64bit 'sig' long value in the struct. rdar://problem/21610439
        uint32_t *sig32_ptr = (uint32_t*)&mutex->sig;
        uint32_t *sig32_val = (uint32_t*)&sig;
-       *(sig32_ptr+1) = *(sig32_val+1);
+       *(sig32_ptr + 1) = *(sig32_val + 1);
        os_atomic_store(sig32_ptr, *sig32_val, release);
 #else
        os_atomic_store2o(mutex, sig, sig, release);
@@ -1088,6 +1102,7 @@ _pthread_mutex_init(_pthread_mutex *mutex, const pthread_mutexattr_t *attr,
        return 0;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_mutex_destroy(pthread_mutex_t *omutex)
 {
@@ -1097,18 +1112,18 @@ pthread_mutex_destroy(pthread_mutex_t *omutex)
 
        _PTHREAD_LOCK(mutex->lock);
        if (_pthread_mutex_check_signature(mutex)) {
-               uint32_t lgenval, ugenval;
-               uint64_t oldval64;
-               volatile uint64_t *seqaddr;
+               mutex_seq *seqaddr;
                MUTEX_GETSEQ_ADDR(mutex, &seqaddr);
-               volatile uint64_t *tidaddr;
+
+               mutex_seq seq;
+               mutex_seq_load(seqaddr, &seq);
+
+               uint64_t *tidaddr;
                MUTEX_GETTID_ADDR(mutex, &tidaddr);
 
-               oldval64 = *seqaddr;
-               lgenval = (uint32_t)oldval64;
-               ugenval = (uint32_t)(oldval64 >> 32);
-               if ((*tidaddr == (uint64_t)0) &&
-                   ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))) {
+               if ((os_atomic_load(tidaddr, relaxed) == 0) &&
+                               (seq.lgenval & PTHRW_COUNT_MASK) ==
+                               (seq.ugenval & PTHRW_COUNT_MASK)) {
                        mutex->sig = _PTHREAD_NO_SIG;
                        res = 0;
                } else {
@@ -1119,15 +1134,12 @@ pthread_mutex_destroy(pthread_mutex_t *omutex)
                res = 0;
        }
        _PTHREAD_UNLOCK(mutex->lock);
-       
-       return res;     
-}
 
-#endif // OS_UP_VARIANT_ONLY
+       return res;
+}
 
 #endif /* !BUILDING_VARIANT ] */
 
-#ifndef OS_UP_VARIANT_ONLY
 /*
  * Destroy a mutex attribute structure.
  */
@@ -1147,4 +1159,3 @@ pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
        return 0;
 }
 
-#endif // OS_UP_VARIANT_ONLY
diff --git a/src/pthread_mutex_up.c b/src/pthread_mutex_up.c
deleted file mode 100644 (file)
index f983b9a..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-#define OS_ATOMIC_UP 1
-#include "resolver_internal.h"
-
-#ifdef OS_VARIANT_SELECTOR
-#define OS_UP_VARIANT_ONLY 1
-#include "pthread_mutex.c"
-#endif
-
-struct _os_empty_files_are_not_c_files;
-
index 4f461a33b971418c34f2ad18f0e2dcc832e5ab02..85358df6e7fb8d91376f6fcf63ce1e98411d2612 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
@@ -17,7 +17,7 @@
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*-
  * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
  */
 
-/* 
- * POSIX Pthread Library 
+/*
+ * POSIX Pthread Library
  * -- Read Write Lock support
  * 4/24/02: A. Ramesh
  *        Ported from FreeBSD
  */
 
+#include "resolver.h"
 #include "internal.h"
-#include <stdio.h>      /* For printf(). */
+#if DEBUG
+#include <platform/compat.h> // for bzero
+#endif
 
 extern int __unix_conforming;
 
@@ -76,40 +79,286 @@ extern int __unix_conforming;
 #define BLOCK_FAIL_PLOCKSTAT    0
 #define BLOCK_SUCCESS_PLOCKSTAT 1
 
-/* maximum number of times a read lock may be obtained */
-#define        MAX_READ_LOCKS          (INT_MAX - 1) 
+#define PTHREAD_RWLOCK_INIT_UNUSED 1
+
+// maximum number of times a read lock may be obtained
+#define        MAX_READ_LOCKS          (INT_MAX - 1)
+
+union rwlock_seq; // forward declaration
+enum rwlock_seqfields; // forward declaration
+
+PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
+int _pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
+               bool trylock);
+
+PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
+int _pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
+               enum rwlock_seqfields updated_seqfields);
+
+
+#if defined(__LP64__)
+#define RWLOCK_USE_INT128 1
+#endif
+
+typedef union rwlock_seq {
+       uint32_t seq[4];
+       struct { uint32_t lcntval; uint32_t rw_seq; uint32_t ucntval; };
+       struct { uint32_t lgen; uint32_t rw_wc; uint32_t ugen; };
+#if RWLOCK_USE_INT128
+       unsigned __int128 seq_LSU;
+       unsigned __int128 _Atomic atomic_seq_LSU;
+#endif
+       struct {
+               uint64_t seq_LS;
+               uint32_t seq_U;
+               uint32_t _pad;
+       };
+       struct {
+               uint64_t _Atomic atomic_seq_LS;
+               uint32_t _Atomic atomic_seq_U;
+               uint32_t _Atomic _atomic_pad;
+       };
+} rwlock_seq;
+
+_Static_assert(sizeof(rwlock_seq) == 4 * sizeof(uint32_t),
+               "Incorrect rwlock_seq size");
+
+typedef enum rwlock_seqfields {
+       RWLOCK_SEQ_NONE = 0,
+       RWLOCK_SEQ_LS = 1,
+       RWLOCK_SEQ_U = 2,
+       RWLOCK_SEQ_LSU = RWLOCK_SEQ_LS | RWLOCK_SEQ_U,
+} rwlock_seqfields;
+
+#if PTHREAD_DEBUG_LOG
+#define RWLOCK_DEBUG_SEQ(op, rwlock, oldseq, newseq, updateval, f) \
+               if (_pthread_debuglog >= 0) { \
+               _simple_dprintf(_pthread_debuglog, "rw_" #op " %p tck %7llu thr %llx " \
+               "L %x -> %x S %x -> %x U %x -> %x updt %x\n", rwlock, \
+               mach_absolute_time() - _pthread_debugstart, _pthread_selfid_direct(), \
+               (f) & RWLOCK_SEQ_LS ? (oldseq).lcntval : 0, \
+               (f) & RWLOCK_SEQ_LS ? (newseq).lcntval : 0, \
+               (f) & RWLOCK_SEQ_LS ? (oldseq).rw_seq  : 0, \
+               (f) & RWLOCK_SEQ_LS ? (newseq).rw_seq  : 0, \
+               (f) & RWLOCK_SEQ_U  ? (oldseq).ucntval : 0, \
+               (f) & RWLOCK_SEQ_U  ? (newseq).ucntval : 0, updateval); }
+#else
+#define RWLOCK_DEBUG_SEQ(m, rwlock, oldseq, newseq, updateval, f)
+#endif
+
+#if !__LITTLE_ENDIAN__
+#error RWLOCK_GETSEQ_ADDR assumes little endian layout of sequence words
+#endif
+
+PTHREAD_ALWAYS_INLINE
+static inline void
+RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock, rwlock_seq **seqaddr)
+{
+       // 128-bit aligned address inside rw_seq & rw_mis arrays
+       *seqaddr = (void*)(((uintptr_t)rwlock->rw_seq + 0xful) & ~0xful);
+}
 
-#include <platform/string.h>
-#include <platform/compat.h>
+PTHREAD_ALWAYS_INLINE
+static inline void
+RWLOCK_GETTID_ADDR(_pthread_rwlock *rwlock, uint64_t **tidaddr)
+{
+       // 64-bit aligned address inside rw_tid array (&rw_tid[0] for aligned lock)
+       *tidaddr = (void*)(((uintptr_t)rwlock->rw_tid + 0x7ul) & ~0x7ul);
+}
 
-__private_extern__ int __pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr);
-__private_extern__ void _pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval);
+PTHREAD_ALWAYS_INLINE
+static inline void
+rwlock_seq_load(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
+               const rwlock_seqfields seqfields)
+{
+       switch (seqfields) {
+       case RWLOCK_SEQ_LSU:
+#if RWLOCK_USE_INT128
+               oldseqval->seq_LSU = seqaddr->seq_LSU;
+#else
+               oldseqval->seq_LS = seqaddr->seq_LS;
+               oldseqval->seq_U = seqaddr->seq_U;
+#endif
+               break;
+       case RWLOCK_SEQ_LS:
+               oldseqval->seq_LS = seqaddr->seq_LS;
+               break;
+#if DEBUG // unused
+       case RWLOCK_SEQ_U:
+               oldseqval->seq_U = seqaddr->seq_U;
+               break;
+#endif // unused
+       default:
+               __builtin_trap();
+       }
+}
 
-static void
-RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock,
-                  volatile uint32_t **lcntaddr,
-                  volatile uint32_t **ucntaddr,
-                  volatile uint32_t **seqaddr)
+PTHREAD_ALWAYS_INLINE
+static inline void
+rwlock_seq_atomic_load_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
+               const rwlock_seqfields seqfields)
 {
-       if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
-               if (rwlock->misalign) {
-                       *lcntaddr = &rwlock->rw_seq[1];
-                       *seqaddr = &rwlock->rw_seq[2];
-                       *ucntaddr = &rwlock->rw_seq[3];
+       switch (seqfields) {
+       case RWLOCK_SEQ_LSU:
+#if RWLOCK_USE_INT128
+               oldseqval->seq_LSU = os_atomic_load(&seqaddr->atomic_seq_LSU, relaxed);
+#else
+               oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
+               oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
+#endif
+               break;
+       case RWLOCK_SEQ_LS:
+               oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
+               break;
+#if DEBUG // unused
+       case RWLOCK_SEQ_U:
+               oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
+               break;
+#endif // unused
+       default:
+               __builtin_trap();
+       }
+}
+
+#define rwlock_seq_atomic_load(seqaddr, oldseqval, seqfields, m) \
+               rwlock_seq_atomic_load_##m(seqaddr, oldseqval, seqfields)
+
+PTHREAD_ALWAYS_INLINE
+static inline rwlock_seqfields
+rwlock_seq_atomic_cmpxchgv_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
+               rwlock_seq *newseqval, const rwlock_seqfields seqfields)
+{
+       bool r;
+       rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
+       switch (seqfields) {
+#if DEBUG // unused
+       case RWLOCK_SEQ_LSU:
+#if RWLOCK_USE_INT128
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
+                               newseqval->seq_LSU, &oldseqval->seq_LSU, relaxed);
+               if (r) updated_seqfields = RWLOCK_SEQ_LSU;
+#else
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                               newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
+               if (r) {
+                       r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                                       newseqval->seq_U, &oldseqval->seq_U, relaxed);
+                       if (!r) oldseqval->seq_LS = newseqval->seq_LS;
+                       updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
                } else {
-                       *lcntaddr = &rwlock->rw_seq[0];
-                       *seqaddr = &rwlock->rw_seq[1];
-                       *ucntaddr = &rwlock->rw_seq[2];
+                       oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
                }
-       } else {
-               *lcntaddr = rwlock->rw_lcntaddr;
-               *seqaddr = rwlock->rw_seqaddr;
-               *ucntaddr = rwlock->rw_ucntaddr;
+#endif
+               break;
+       case RWLOCK_SEQ_U:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                               newseqval->seq_U, &oldseqval->seq_U, relaxed);
+               if (r) updated_seqfields = RWLOCK_SEQ_U;
+               break;
+#endif // unused
+       case RWLOCK_SEQ_LS:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                               newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
+               if (r) updated_seqfields = RWLOCK_SEQ_LS;
+               break;
+       default:
+               __builtin_trap();
+       }
+       return updated_seqfields;
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline rwlock_seqfields
+rwlock_seq_atomic_cmpxchgv_acquire(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
+               rwlock_seq *newseqval, const rwlock_seqfields seqfields)
+{
+       bool r;
+       rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
+       switch (seqfields) {
+#if DEBUG // unused
+       case RWLOCK_SEQ_LSU:
+#if RWLOCK_USE_INT128
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
+                               newseqval->seq_LSU, &oldseqval->seq_LSU, acquire);
+               if (r) updated_seqfields = RWLOCK_SEQ_LSU;
+#else
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                               newseqval->seq_LS, &oldseqval->seq_LS, acquire);
+               if (r) {
+                       r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                                       newseqval->seq_U, &oldseqval->seq_U, relaxed);
+                       if (!r) oldseqval->seq_LS = newseqval->seq_LS;
+                       updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
+               } else {
+                       oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
+               }
+#endif
+               break;
+       case RWLOCK_SEQ_U:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                               newseqval->seq_U, &oldseqval->seq_U, acquire);
+               if (r) updated_seqfields = RWLOCK_SEQ_U;
+               break;
+#endif // unused
+       case RWLOCK_SEQ_LS:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                               newseqval->seq_LS, &oldseqval->seq_LS, acquire);
+               if (r) updated_seqfields = RWLOCK_SEQ_LS;
+               break;
+       default:
+               __builtin_trap();
        }
+       return updated_seqfields;
 }
 
+PTHREAD_ALWAYS_INLINE
+static inline rwlock_seqfields
+rwlock_seq_atomic_cmpxchgv_release(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
+               rwlock_seq *newseqval, const rwlock_seqfields seqfields)
+{
+       bool r;
+       rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
+       switch (seqfields) {
+       case RWLOCK_SEQ_LSU:
+#if RWLOCK_USE_INT128
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
+                               newseqval->seq_LSU, &oldseqval->seq_LSU, release);
+               if (r) updated_seqfields = RWLOCK_SEQ_LSU;
+#else
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                               newseqval->seq_U, &oldseqval->seq_U, release);
+               if (r) {
+                       r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                                       newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
+                       if (!r) oldseqval->seq_U = newseqval->seq_U;
+                       updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_U;
+               } else {
+                       oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS,relaxed);
+               }
+#endif
+               break;
+       case RWLOCK_SEQ_LS:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
+                               newseqval->seq_LS, &oldseqval->seq_LS, release);
+               if (r) updated_seqfields = RWLOCK_SEQ_LS;
+               break;
+#if DEBUG // unused
+       case RWLOCK_SEQ_U:
+               r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
+                               newseqval->seq_U, &oldseqval->seq_U, release);
+               if (r) updated_seqfields = RWLOCK_SEQ_U;
+               break;
+#endif // unused
+       default:
+               __builtin_trap();
+       }
+       return updated_seqfields;
+}
+
+#define rwlock_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, seqfields, m)\
+               rwlock_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval, seqfields)
+
 #ifndef BUILDING_VARIANT /* [ */
-static uint32_t modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits);
 
 int
 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
@@ -119,7 +368,7 @@ pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
        return 0;
 }
 
-int    
+int
 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
 {
        attr->sig = _PTHREAD_NO_SIG;
@@ -144,7 +393,8 @@ pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
        int res = EINVAL;
        if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
 #if __DARWIN_UNIX03
-               if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
+               if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
+                               (pshared == PTHREAD_PROCESS_SHARED))
 #else /* __DARWIN_UNIX03 */
                if ( pshared == PTHREAD_PROCESS_PRIVATE)
 #endif /* __DARWIN_UNIX03 */
@@ -156,16 +406,34 @@ pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
        return res;
 }
 
-__private_extern__ int
-__pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
+#endif /* !BUILDING_VARIANT ] */
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+_pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
 {
-       // Force RWLOCK_GETSEQ_ADDR to calculate addresses by setting pshared.
-       rwlock->pshared = PTHREAD_PROCESS_SHARED;
-       rwlock->misalign = (((uintptr_t)&rwlock->rw_seq[0]) & 0x7) != 0;
-       RWLOCK_GETSEQ_ADDR(rwlock, &rwlock->rw_lcntaddr, &rwlock->rw_ucntaddr, &rwlock->rw_seqaddr);
-       *rwlock->rw_lcntaddr = PTHRW_RWLOCK_INIT;
-       *rwlock->rw_seqaddr = PTHRW_RWS_INIT;
-       *rwlock->rw_ucntaddr = 0;
+       uint64_t *tidaddr;
+       RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
+
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
+
+#if PTHREAD_RWLOCK_INIT_UNUSED
+       if ((uint32_t*)tidaddr != rwlock->rw_tid) {
+               rwlock->misalign = 1;
+               __builtin_memset(rwlock->rw_tid, 0xff, sizeof(rwlock->rw_tid));
+       }
+       if ((uint32_t*)seqaddr != rwlock->rw_seq) {
+               __builtin_memset(rwlock->rw_seq, 0xff, sizeof(rwlock->rw_seq));
+       }
+       __builtin_memset(rwlock->rw_mis, 0xff, sizeof(rwlock->rw_mis));
+#endif // PTHREAD_MUTEX_INIT_UNUSED
+       *tidaddr = 0;
+       *seqaddr = (rwlock_seq){
+               .lcntval = PTHRW_RWLOCK_INIT,
+               .rw_seq = PTHRW_RWS_INIT,
+               .ucntval = 0,
+       };
 
        if (attr != NULL && attr->pshared == PTHREAD_PROCESS_SHARED) {
                rwlock->pshared = PTHREAD_PROCESS_SHARED;
@@ -174,33 +442,56 @@ __pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
                rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
                rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
        }
-               
-       rwlock->rw_owner = NULL;
+
+       long sig = _PTHREAD_RWLOCK_SIG;
+
+#if DEBUG
        bzero(rwlock->_reserved, sizeof(rwlock->_reserved));
+#endif
+#if PTHREAD_RWLOCK_INIT_UNUSED
+       // For detecting copied rwlocks and smashes during debugging
+       uint32_t sig32 = (uint32_t)sig;
+       uintptr_t guard = ~(uintptr_t)rwlock; // use ~ to hide from leaks
+       __builtin_memcpy(rwlock->_reserved, &guard, sizeof(guard));
+#define countof(x) (sizeof(x) / sizeof(x[0]))
+       rwlock->_reserved[countof(rwlock->_reserved) - 1] = sig32;
+#if defined(__LP64__)
+       rwlock->_pad = sig32;
+#endif
+#endif // PTHREAD_RWLOCK_INIT_UNUSED
 
        // Ensure all contents are properly set before setting signature.
-       OSMemoryBarrier();
-       rwlock->sig = _PTHREAD_RWLOCK_SIG;
-       
+#if defined(__LP64__)
+       // For binary compatibility reasons we cannot require natural alignment of
+       // the 64bit 'sig' long value in the struct. rdar://problem/21610439
+       uint32_t *sig32_ptr = (uint32_t*)&rwlock->sig;
+       uint32_t *sig32_val = (uint32_t*)&sig;
+       *(sig32_ptr + 1) = *(sig32_val + 1);
+       os_atomic_store(sig32_ptr, *sig32_val, release);
+#else
+       os_atomic_store2o(rwlock, sig, sig, release);
+#endif
+
        return 0;
 }
 
 static uint32_t
-modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
+_pthread_rwlock_modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
 {
        uint32_t lval = lgenval & PTHRW_BIT_MASK;
        uint32_t uval = updateval & PTHRW_BIT_MASK;
        uint32_t rval, nlval;
 
        nlval = (lval | uval) & ~(PTH_RWL_MBIT);
-       
-       /* reconcile bits on the lock with what kernel needs to set */
+
+       // reconcile bits on the lock with what kernel needs to set
        if ((uval & PTH_RWL_KBIT) == 0 && (lval & PTH_RWL_WBIT) == 0) {
                nlval &= ~PTH_RWL_KBIT;
        }
 
        if (savebits != 0) {
-               if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 && (nlval & PTH_RWL_EBIT) == 0) {
+               if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 &&
+                               (nlval & PTH_RWL_EBIT) == 0) {
                        nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
                }
        }
@@ -208,102 +499,97 @@ modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
        return(rval);
 }
 
-__private_extern__ void
+PTHREAD_ALWAYS_INLINE
+static inline void
 _pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval)
 {
        bool isoverlap = (updateval & PTH_RWL_MBIT) != 0;
 
-       uint64_t oldval64, newval64;
-       volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
-
-       /* TBD: restore U bit */
-       RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
+       // TBD: restore U bit
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
 
+       rwlock_seq oldseq, newseq;
+       rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LS);
        do {
-               uint32_t lcntval = *lcntaddr;
-               uint32_t rw_seq = *seqaddr;
-               
-               uint32_t newval, newsval;
-               if (isoverlap || is_rws_setunlockinit(rw_seq) != 0) {
+               newseq = oldseq;
+               if (isoverlap || is_rws_setunlockinit(oldseq.rw_seq) != 0) {
                        // Set S word to the specified value
-                       uint32_t savebits = (rw_seq & PTHRW_RWS_SAVEMASK);
-                       newval = modbits(lcntval, updateval, savebits);
-                       newsval = rw_seq + (updateval & PTHRW_COUNT_MASK);
+                       uint32_t savebits = (oldseq.rw_seq & PTHRW_RWS_SAVEMASK);
+                       newseq.lcntval = _pthread_rwlock_modbits(oldseq.lcntval, updateval,
+                                       savebits);
+                       newseq.rw_seq += (updateval & PTHRW_COUNT_MASK);
                        if (!isoverlap) {
-                               newsval &= PTHRW_COUNT_MASK;
+                               newseq.rw_seq &= PTHRW_COUNT_MASK;
                        }
-                       newsval &= ~PTHRW_RWS_SAVEMASK;
-               } else {
-                       newval = lcntval;
-                       newsval = rw_seq;
+                       newseq.rw_seq &= ~PTHRW_RWS_SAVEMASK;
                }
-
-               oldval64 = (((uint64_t)rw_seq) << 32);
-               oldval64 |= lcntval;
-               newval64 = (((uint64_t)newsval) << 32);
-               newval64 |= newval;
-       } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
+       } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       RWLOCK_SEQ_LS, relaxed));
+       RWLOCK_DEBUG_SEQ(update, rwlock, oldseq, newseq, updateval, RWLOCK_SEQ_LS);
 }
 
-#endif /* !BUILDING_VARIANT ] */
-
-static int
+#if __DARWIN_UNIX03
+PTHREAD_ALWAYS_INLINE
+static inline int
 _pthread_rwlock_check_busy(_pthread_rwlock *rwlock)
 {
        int res = 0;
-       
-       volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
-       
-       RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
-       
-       uint32_t rw_lcnt = *lcntaddr;
-       uint32_t rw_ucnt = *ucntaddr;
-       
-       if ((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt) {
+
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
+
+       rwlock_seq seq;
+       rwlock_seq_atomic_load(seqaddr, &seq, RWLOCK_SEQ_LSU, relaxed);
+       if ((seq.lcntval & PTHRW_COUNT_MASK) != seq.ucntval) {
                res = EBUSY;
        }
-       
+
        return res;
 }
+#endif /* __DARWIN_UNIX03 */
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
 {
        int res = 0;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
 
-       if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
+       _PTHREAD_LOCK(rwlock->lock);
+       if (_pthread_rwlock_check_signature(rwlock)) {
 #if __DARWIN_UNIX03
                res = _pthread_rwlock_check_busy(rwlock);
 #endif /* __DARWIN_UNIX03 */
-       } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG_init) {
+       } else if (!_pthread_rwlock_check_signature_init(rwlock)) {
                res = EINVAL;
        }
        if (res == 0) {
                rwlock->sig = _PTHREAD_NO_SIG;
        }
+       _PTHREAD_UNLOCK(rwlock->lock);
        return res;
 }
 
-
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_init(pthread_rwlock_t *orwlock, const pthread_rwlockattr_t *attr)
 {
        int res = 0;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
-       
+
 #if __DARWIN_UNIX03
        if (attr && attr->sig != _PTHREAD_RWLOCK_ATTR_SIG) {
                res = EINVAL;
        }
 
-       if (res == 0 && rwlock->sig == _PTHREAD_RWLOCK_SIG) {
+       if (res == 0 && _pthread_rwlock_check_signature(rwlock)) {
                res = _pthread_rwlock_check_busy(rwlock);
        }
 #endif
        if (res == 0) {
                _PTHREAD_LOCK_INIT(rwlock->lock);
-               res = __pthread_rwlock_init(rwlock, attr);
+               res = _pthread_rwlock_init(rwlock, attr);
        }
        return res;
 }
@@ -315,15 +601,15 @@ _pthread_rwlock_check_init_slow(pthread_rwlock_t *orwlock)
        int res = EINVAL;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
 
-       if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+       if (_pthread_rwlock_check_signature_init(rwlock)) {
                _PTHREAD_LOCK(rwlock->lock);
-               if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
-                       res = __pthread_rwlock_init(rwlock, NULL);
-               } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
+               if (_pthread_rwlock_check_signature_init(rwlock)) {
+                       res = _pthread_rwlock_init(rwlock, NULL);
+               } else if (_pthread_rwlock_check_signature(rwlock)){
                        res = 0;
                }
                _PTHREAD_UNLOCK(rwlock->lock);
-       } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
+       } else if (_pthread_rwlock_check_signature(rwlock)){
                res = 0;
        }
        if (res != 0) {
@@ -333,164 +619,265 @@ _pthread_rwlock_check_init_slow(pthread_rwlock_t *orwlock)
 }
 
 PTHREAD_ALWAYS_INLINE
-static int
+static inline int
 _pthread_rwlock_check_init(pthread_rwlock_t *orwlock)
 {
        int res = 0;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
 
-       if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+       if (!_pthread_rwlock_check_signature(rwlock)) {
                return _pthread_rwlock_check_init_slow(orwlock);
        }
        return res;
 }
 
+PTHREAD_NOINLINE
 static int
-_pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
+_pthread_rwlock_lock_wait(pthread_rwlock_t *orwlock, bool readlock,
+               rwlock_seq newseq)
 {
        int res;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
 
-       res = _pthread_rwlock_check_init(orwlock);
-       if (res != 0) {
-               return res;
-       }
+#ifdef PLOCKSTAT
+       int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
+#endif
 
-       uint64_t oldval64, newval64;
-       volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
-       RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
+       if (readlock) {
+               RWLOCK_DEBUG_SEQ(rdlock, rwlock, oldseq, newseq, gotlock,
+                               RWLOCK_SEQ_LSU);
+       } else {
+               RWLOCK_DEBUG_SEQ(wrlock, rwlock, oldseq, newseq, gotlock,
+                               RWLOCK_SEQ_LSU);
+       }
 
-       uint32_t newval, newsval;
-       uint32_t lcntval, ucntval, rw_seq;
+       uint32_t updateval;
 
-       bool gotlock;
-       bool retry;
-       int retry_count = 0;
+       PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
 
        do {
-               res = 0;
-               retry = false;
-               
-               lcntval = *lcntaddr;
-               ucntval = *ucntaddr;
-               rw_seq = *seqaddr;
+               if (readlock) {
+                       updateval = __psynch_rw_rdlock(orwlock, newseq.lcntval,
+                                       newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
+               } else {
+                       updateval = __psynch_rw_wrlock(orwlock, newseq.lcntval,
+                                       newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
+               }
+               if (updateval == (uint32_t)-1) {
+                       res = errno;
+               } else {
+                       res = 0;
+               }
+       } while (res == EINTR);
+
+       if (res == 0) {
+               _pthread_rwlock_updateval(rwlock, updateval);
+               PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
+       } else {
+               PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
+               PTHREAD_ABORT("kernel rwlock returned unknown error %x: "
+                               "tid %llx\n", res, _pthread_selfid_direct());
+       }
+
+       return res;
+}
+
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
+int
+_pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
+               bool trylock)
+{
+       int res;
+       _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
+
+#ifdef PLOCKSTAT
+       int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
+#endif
+
+       res = _pthread_rwlock_check_init(orwlock);
+       if (res != 0) return res;
+
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
+
+       rwlock_seq oldseq, newseq;
+       rwlock_seq_atomic_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU, relaxed);
 
 #if __DARWIN_UNIX03
-               if (is_rwl_ebit_set(lcntval)) {
-                       if (rwlock->rw_owner == pthread_self()) {
-                               res = EDEADLK;
-                               break;
-                       }
-               }
+       uint64_t *tidaddr;
+       RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
+       uint64_t selfid = _pthread_selfid_direct();
+       if (is_rwl_ebit_set(oldseq.lcntval)) {
+               if (os_atomic_load(tidaddr, relaxed) == selfid) return EDEADLK;
+       }
 #endif /* __DARWIN_UNIX03 */
 
-               oldval64 = (((uint64_t)rw_seq) << 32);
-               oldval64 |= lcntval;
+       int retry_count;
+       bool gotlock;
+       do {
+               retry_count = 0;
+retry:
+               newseq = oldseq;
 
-               /* if l bit is on or u and k bit is clear, acquire lock in userland */
+               // if W and K bit are clear or U bit is on, acquire lock in userland
                if (readlock) {
-                       gotlock = can_rwl_readinuser(lcntval);
+                       gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
                } else {
-                       gotlock = (lcntval & PTH_RWL_RBIT) != 0;
+                       gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
                }
 
-               uint32_t bits = 0;
-               uint32_t mask = ~0ul;
-               
-               newval = lcntval + PTHRW_INC;
-
-               if (gotlock) {
+               if (trylock && !gotlock) {
+                       // A trylock on a held lock will fail immediately. But since
+                       // we did not load the sequence words atomically, perform a
+                       // no-op CAS to ensure that nobody has unlocked concurrently.
+               } else if (gotlock) {
                        if (readlock) {
-                               if (diff_genseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
-                                       /* since ucntval may be newer, just redo */
+                               if (diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
+                                               PTHRW_MAX_READERS) {
+                                       // since ucntval may be newer, just redo
                                        retry_count++;
                                        if (retry_count > 1024) {
+                                               gotlock = false;
                                                res = EAGAIN;
-                                               break;
+                                               goto out;
                                        } else {
                                                sched_yield();
-                                               retry = true;
-                                               continue;
+                                               rwlock_seq_atomic_load(seqaddr, &oldseq,
+                                                               RWLOCK_SEQ_LSU, relaxed);
+                                               goto retry;
                                        }
                                }
-                               
-                               // Need to update L (remove R bit) and S word
-                               mask = PTH_RWLOCK_RESET_RBIT;
+                               // Need to update L (remove U bit) and S word
+                               newseq.lcntval &= ~PTH_RWL_UBIT;
                        } else {
-                               mask = PTHRW_COUNT_MASK;
-                               bits = PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
+                               newseq.lcntval &= PTHRW_COUNT_MASK;
+                               newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
                        }
-                       newsval = rw_seq + PTHRW_INC;
-               } else if (trylock) {
-                       res = EBUSY;
-                       break;
+                       newseq.lcntval += PTHRW_INC;
+                       newseq.rw_seq  += PTHRW_INC;
                } else {
                        if (readlock) {
-                               // Need to block in kernel. Remove R bit.
-                               mask = PTH_RWLOCK_RESET_RBIT;
+                               // Need to block in kernel. Remove U bit.
+                               newseq.lcntval &= ~PTH_RWL_UBIT;
                        } else {
-                               bits = PTH_RWL_KBIT | PTH_RWL_WBIT;
+                               newseq.lcntval |= PTH_RWL_KBIT | PTH_RWL_WBIT;
                        }
-                       newsval = rw_seq;
-                       if (is_rws_setseq(rw_seq)) {
-                               newsval &= PTHRW_SW_Reset_BIT_MASK;
-                               newsval |= (newval & PTHRW_COUNT_MASK);
+                       newseq.lcntval += PTHRW_INC;
+                       if (is_rws_setseq(oldseq.rw_seq)) {
+                               // Clear the S bit and set S to L
+                               newseq.rw_seq &= (PTHRW_BIT_MASK & ~PTH_RWS_SBIT);
+                               newseq.rw_seq |= (oldseq.lcntval & PTHRW_COUNT_MASK);
                        }
                }
-               newval = (newval & mask) | bits;
-               newval64 = (((uint64_t)newsval) << 32);
-               newval64 |= newval;
+       } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       RWLOCK_SEQ_LS, acquire));
 
-       } while (retry || OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
+       if (gotlock) {
+#if __DARWIN_UNIX03
+               if (!readlock) os_atomic_store(tidaddr, selfid, relaxed);
+#endif /* __DARWIN_UNIX03 */
+               res = 0;
+       } else if (trylock) {
+               res = EBUSY;
+       } else {
+               res = _pthread_rwlock_lock_wait(orwlock, readlock, newseq);
+       }
 
+out:
 #ifdef PLOCKSTAT
-       int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
+       if (res == 0) {
+               PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
+       } else {
+               PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
+       }
+#endif
+
+       return res;
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline int
+_pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
+{
+       _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
+#if PLOCKSTAT
+       if (PLOCKSTAT_RW_ACQUIRE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
+               return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
+       }
 #endif
 
-       // Unable to acquire in userland, transition to kernel.
-       if (res == 0 && !gotlock) {
-               uint32_t updateval;
+       if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
+               return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
+       }
+
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
+
+       rwlock_seq oldseq, newseq;
+       // no need to perform a single-copy-atomic 128-bit load in the fastpath,
+       // if stores to L and U are seen out of order, we will fallback to the
+       // slowpath below (which has rwlock_seq_atomic_load)
+       rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU);
 
-               PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
-               
-               do {
+#if __DARWIN_UNIX03
+       if (os_unlikely(is_rwl_ebit_set(oldseq.lcntval))) {
+               return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
+       }
+#endif /* __DARWIN_UNIX03 */
+
+       bool gotlock;
+       do {
+               newseq = oldseq;
+
+               // if W and K bit are clear or U bit is on, acquire lock in userland
+               if (readlock) {
+                       gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
+               } else {
+                       gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
+               }
+
+               if (trylock && !gotlock) {
+                       // A trylock on a held lock will fail immediately. But since
+                       // we did not load the sequence words atomically, perform a
+                       // no-op CAS to ensure that nobody has unlocked concurrently.
+               } else if (os_likely(gotlock)) {
                        if (readlock) {
-                               updateval = __psynch_rw_rdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
-                       } else {
-                               updateval = __psynch_rw_wrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
-                       }
-                       if (updateval == (uint32_t)-1) {
-                               res = errno;
+                               if (os_unlikely(diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
+                                               PTHRW_MAX_READERS)) {
+                                       return _pthread_rwlock_lock_slow(orwlock, readlock,trylock);
+                               }
+                               // Need to update L (remove U bit) and S word
+                               newseq.lcntval &= ~PTH_RWL_UBIT;
                        } else {
-                               res = 0;
+                               newseq.lcntval &= PTHRW_COUNT_MASK;
+                               newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
                        }
-               } while (res == EINTR);
-               
-               if (res == 0) {
-                       _pthread_rwlock_updateval(rwlock, updateval);
-                       PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
+                       newseq.lcntval += PTHRW_INC;
+                       newseq.rw_seq  += PTHRW_INC;
                } else {
-                       PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
-                       uint64_t myid;
-                       (void)pthread_threadid_np(pthread_self(), &myid);
-                       PTHREAD_ABORT("kernel lock returned unknown error %x with tid %x\n", updateval, (uint32_t)myid);
+                       return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
                }
-       }
-       
-       if (res == 0) {
+       } while (os_unlikely(!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
+                       RWLOCK_SEQ_LS, acquire)));
+
+       if (os_likely(gotlock)) {
 #if __DARWIN_UNIX03
                if (!readlock) {
-                       rwlock->rw_owner = pthread_self();
+                       uint64_t *tidaddr;
+                       RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
+                       uint64_t selfid = _pthread_selfid_direct();
+                       os_atomic_store(tidaddr, selfid, relaxed);
                }
 #endif /* __DARWIN_UNIX03 */
-               PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
+               return 0;
+       } else if (trylock) {
+               return EBUSY;
        } else {
-               PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
+               __builtin_trap();
        }
-       
-       return res;
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
 {
@@ -498,6 +885,7 @@ pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
        return _pthread_rwlock_lock(orwlock, true, false);
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
 {
@@ -505,6 +893,7 @@ pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
        return _pthread_rwlock_lock(orwlock, true, true);
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
 {
@@ -512,6 +901,7 @@ pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
        return _pthread_rwlock_lock(orwlock, false, false);
 }
 
+PTHREAD_NOEXPORT_VARIANT
 int
 pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
 {
@@ -519,121 +909,115 @@ pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
        return _pthread_rwlock_lock(orwlock, false, true);
 }
 
-int
-pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
+PTHREAD_NOINLINE
+static int
+_pthread_rwlock_unlock_drop(pthread_rwlock_t *orwlock, rwlock_seq oldseq,
+               rwlock_seq newseq)
 {
        int res;
        _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
-#ifdef PLOCKSTAT
-       int wrlock = 0;
-#endif
 
-       res = _pthread_rwlock_check_init(orwlock);
+       RWLOCK_DEBUG_SEQ(unlock, rwlock, oldseq, newseq, !droplock, RWLOCK_SEQ_LSU);
+       uint32_t updateval;
+       do {
+               updateval = __psynch_rw_unlock(orwlock, oldseq.lcntval,
+                               newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
+               if (updateval == (uint32_t)-1) {
+                       res = errno;
+               } else {
+                       res = 0;
+                       RWLOCK_DEBUG_SEQ(wakeup, rwlock, oldseq, newseq, updateval,
+                                       RWLOCK_SEQ_LSU);
+               }
+       } while (res == EINTR);
+
        if (res != 0) {
-               return res;
+               PTHREAD_ABORT("kernel rwunlock returned unknown error %x: "
+                               "tid %llx\n", res, _pthread_selfid_direct());
        }
 
-       uint64_t oldval64 = 0, newval64 = 0;
-       volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
-       RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
-
-       bool droplock;
-       bool reload;
-       bool incr_ucnt = true;
-       bool check_spurious = true;
-       uint32_t lcntval, ucntval, rw_seq, ulval = 0, newval, newsval;
+       return res;
+}
 
-       do {
-               reload = false;
-               droplock = true;
+PTHREAD_NOEXPORT PTHREAD_NOINLINE
+int
+_pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
+               rwlock_seqfields updated_seqfields)
+{
+       int res;
+       _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
+       rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
+#ifdef PLOCKSTAT
+       int wrlock = 0;
+#endif
 
-               lcntval = *lcntaddr;
-               ucntval = *ucntaddr;
-               rw_seq = *seqaddr;
+       res = _pthread_rwlock_check_init(orwlock);
+       if (res != 0) return res;
 
-               oldval64 = (((uint64_t)rw_seq) << 32);
-               oldval64 |= lcntval;
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
 
-               // check for spurious unlocks
-               if (check_spurious) {
-                       if ((lcntval & PTH_RWL_RBIT) != 0) {
-                               droplock = false;
+       rwlock_seq oldseq, newseq;
+       rwlock_seq_load(seqaddr, &oldseq, seqfields);
 
-                               newval64 = oldval64;
-                               continue;
-                       }
-                       check_spurious = false;
-               }
+       if ((oldseq.lcntval & PTH_RWL_UBIT) != 0) {
+               // spurious unlock (unlock of unlocked lock)
+               return 0;
+       }
 
-               if (is_rwl_ebit_set(lcntval)) {
+       if (is_rwl_ebit_set(oldseq.lcntval)) {
 #ifdef PLOCKSTAT
-                       wrlock = 1;
+               wrlock = 1;
 #endif
 #if __DARWIN_UNIX03
-                       rwlock->rw_owner = NULL;
+               uint64_t *tidaddr;
+               RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
+               os_atomic_store(tidaddr, 0, relaxed);
 #endif /* __DARWIN_UNIX03 */
-               }
+       }
 
-               // update U
-               if (incr_ucnt) {
-                       ulval = (ucntval + PTHRW_INC);
-                       incr_ucnt = (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)ucntaddr) != TRUE);
-                       newval64 = oldval64;
-                       reload = true;
-                       continue;
-               }
+       bool droplock;
+       do {
+               // stop loading & updating fields that have successfully been stored
+               seqfields &= ~updated_seqfields;
 
-               // last unlock, note U is already updated ?
-               if ((lcntval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
-                       /* Set L with R and init bits and set S to L */
-                       newval  = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWLOCK_INIT;
-                       newsval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWS_INIT;
+               newseq = oldseq;
+               if (seqfields & RWLOCK_SEQ_U) {
+                       newseq.ucntval += PTHRW_INC;
+               }
 
-                       droplock = false;
+               droplock = false;
+               uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
+               if (newseq.ucntval == oldlcnt) {
+                       // last unlock, set L with U and init bits and set S to L with S bit
+                       newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
+                       newseq.rw_seq =  oldlcnt | PTHRW_RWS_INIT;
                } else {
-                       /* if it is not exclusive or no Writer/yield pending, skip */
-                       if ((lcntval & (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
-                               droplock = false;
-                               break;
+                       // no L/S update if lock is not exclusive or no writer pending
+                       if ((oldseq.lcntval &
+                                       (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
+                               continue;
                        }
 
-                       /* kernel transition needed? */
-                       /* U+1 == S? */
-                       if ((ulval + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK)) {
-                               droplock = false;
-                               break;
+                       // kernel transition only needed if U == S
+                       if (newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK)) {
+                               continue;
                        }
 
-                       /* reset all bits and set k */
-                       newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
-                       /* set I bit on S word */       
-                       newsval = rw_seq | PTH_RWS_IBIT;
-                       if ((lcntval & PTH_RWL_WBIT) != 0) {
-                               newsval |= PTH_RWS_WSVBIT;
+                       droplock = true;
+                       // reset all bits and set K
+                       newseq.lcntval = oldlcnt | PTH_RWL_KBIT;
+                       // set I bit on S word
+                       newseq.rw_seq |= PTH_RWS_IBIT;
+                       if ((oldseq.lcntval & PTH_RWL_WBIT) != 0) {
+                               newseq.rw_seq |= PTH_RWS_WSVBIT;
                        }
                }
-
-               newval64 = (((uint64_t)newsval) << 32);
-               newval64 |= newval;
-
-       } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE || reload);
+       } while (seqfields != (updated_seqfields = rwlock_seq_atomic_cmpxchgv(
+                       seqaddr, &oldseq, &newseq, seqfields, release)));
 
        if (droplock) {
-               uint32_t updateval;
-               do {
-                       updateval = __psynch_rw_unlock(orwlock, lcntval, ulval, newsval, rwlock->rw_flags);
-                       if (updateval == (uint32_t)-1) {
-                               res = errno;
-                       } else {
-                               res = 0;
-                       }
-               } while (res == EINTR);
-
-               if (res != 0) {
-                       uint64_t myid = 0;
-                       (void)pthread_threadid_np(pthread_self(), &myid);
-                       PTHREAD_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", res, (uint32_t)myid);
-               }
+               res = _pthread_rwlock_unlock_drop(orwlock, oldseq, newseq);
        }
 
        PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
@@ -641,3 +1025,72 @@ pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
        return res;
 }
 
+PTHREAD_NOEXPORT_VARIANT
+int
+pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
+{
+       _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
+       rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
+       rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
+
+#if PLOCKSTAT
+       if (PLOCKSTAT_RW_RELEASE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
+               return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
+       }
+#endif
+
+       if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
+               return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
+       }
+
+       rwlock_seq *seqaddr;
+       RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
+
+       rwlock_seq oldseq, newseq;
+       rwlock_seq_load(seqaddr, &oldseq, seqfields);
+
+       if (os_unlikely(oldseq.lcntval & PTH_RWL_UBIT)) {
+               // spurious unlock (unlock of unlocked lock)
+               return 0;
+       }
+
+       if (is_rwl_ebit_set(oldseq.lcntval)) {
+#if __DARWIN_UNIX03
+               uint64_t *tidaddr;
+               RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
+               os_atomic_store(tidaddr, 0, relaxed);
+#endif /* __DARWIN_UNIX03 */
+       }
+
+       do {
+               if (updated_seqfields) {
+                       return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
+               }
+
+               newseq = oldseq;
+               if (seqfields & RWLOCK_SEQ_U) {
+                       newseq.ucntval += PTHRW_INC;
+               }
+
+               uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
+               if (os_likely(newseq.ucntval == oldlcnt)) {
+                       // last unlock, set L with U and init bits and set S to L with S bit
+                       newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
+                       newseq.rw_seq =  oldlcnt | PTHRW_RWS_INIT;
+               } else {
+                       if (os_likely((oldseq.lcntval &
+                                       (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0 ||
+                                       newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK))) {
+                               // no L/S update if lock is not exclusive or no writer pending
+                               // kernel transition only needed if U == S
+                       } else {
+                               return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
+                       }
+               }
+       } while (os_unlikely(seqfields != (updated_seqfields =
+                       rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, seqfields,
+                       release))));
+
+       return 0;
+}
+
index 5097d60a5855ffd1eb97a409368e64485014f106..f8c529ff1da5e5327e2a1e205f1035946f1b95c9 100644 (file)
@@ -31,7 +31,7 @@
 /* We should move abort() into Libsyscall, if possible. */
 int __getpid(void);
 
-PTHREAD_NORETURN int
+int
 __kill(int pid, int signum, int posix);
 
 void
index 1aa0285510a2888615bb51a6d9c87ebe0f7128ae..3a772668ee81d3ae292dd0afe4cdb94da7bf4e68 100644 (file)
@@ -2,14 +2,14 @@
  * Copyright (c) 2000-2003, 2007, 2012 Apple Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * compliance with the License. Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this
  * file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_LICENSE_HEADER_END@
  */
 /*
- * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991  
- *              All Rights Reserved 
- *  
- * Permission to use, copy, modify, and distribute this software and 
- * its documentation for any purpose and without fee is hereby granted, 
- * provided that the above copyright notice appears in all copies and 
- * that both the copyright notice and this permission notice appear in 
- * supporting documentation. 
- *  
- * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
- * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
- * FOR A PARTICULAR PURPOSE. 
- *  
- * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
- * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
- * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
- * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
- * 
+ * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
+ *              All Rights Reserved
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE.
+ *
+ * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
+ * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+ * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
  */
 /*
  * MkLinux
@@ -269,7 +269,7 @@ _pthread_tsd_behaviour_check(pthread_t self)
        Dl_info i;
        pthread_key_t k;
 
-       for (k = __pthread_tsd_start; k <= __pthread_tsd_end; k++) {
+       for (k = __pthread_tsd_start; k < __pthread_tsd_end; k++) {
                void (*destructor)(void *);
                if (_pthread_key_get_destructor(k, &destructor)) {
                        void **ptr = &self->tsd[k];
index 59747f3451368b442f0315db439e1417644510ab..b31098a3d825a2a24584f1a8062e12be1647340c 100644 (file)
--- a/src/qos.c
+++ b/src/qos.c
@@ -128,8 +128,7 @@ pthread_set_qos_class_self_np(qos_class_t __qos_class,
                 * read the value out of it and set the QoS class.
                 */
                _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
-
-               mach_port_t kport = pthread_mach_thread_np(pthread_self());
+               mach_port_t kport = _pthread_kernel_thread(pthread_self());
                int res = __bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, kport, &pthread_self()->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS], 0);
 
                if (res == -1) {
@@ -145,14 +144,6 @@ pthread_set_qos_class_np(pthread_t __pthread,
                                                 qos_class_t __qos_class,
                                                 int __relative_priority)
 {
-       if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) {
-               return ENOTSUP;
-       }
-
-       if (__relative_priority > 0 || __relative_priority < QOS_MIN_RELATIVE_PRIORITY) {
-               return EINVAL;
-       }
-
        if (__pthread != pthread_self()) {
                /* The kext now enforces this anyway, if we check here too, it allows us to call
                 * _pthread_set_properties_self later if we can.
@@ -160,30 +151,7 @@ pthread_set_qos_class_np(pthread_t __pthread,
                return EPERM;
        }
 
-       pthread_priority_t priority = _pthread_priority_make_newest(__qos_class, __relative_priority, 0);
-
-       if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) {
-               /* If we have _pthread_set_properties_self, then we can easily set this using that. */
-               return _pthread_set_properties_self(_PTHREAD_SET_SELF_QOS_FLAG, priority, 0);
-       } else {
-               /* We set the thread QoS class in the TSD and then call into the kernel to
-                * read the value out of it and set the QoS class.
-                */
-               if (__pthread == pthread_self()) {
-                       _pthread_setspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS, priority);
-               } else {
-                       __pthread->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS] = priority;
-               }
-
-               mach_port_t kport = pthread_mach_thread_np(__pthread);
-               int res = __bsdthread_ctl(BSDTHREAD_CTL_SET_QOS, kport, &__pthread->tsd[_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS], 0);
-
-               if (res == -1) {
-                       res = errno;
-               }
-
-               return res;
-       }
+       return pthread_set_qos_class_self_np(__qos_class, __relative_priority);
 }
 
 int
@@ -234,11 +202,7 @@ qos_class_main(void)
 pthread_priority_t
 _pthread_qos_class_encode(qos_class_t qos_class, int relative_priority, unsigned long flags)
 {
-       if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
-               return _pthread_priority_make_version2(qos_class, relative_priority, flags);
-       } else {
-               return _pthread_priority_make_newest(qos_class, relative_priority, flags);
-       }
+       return _pthread_priority_make_newest(qos_class, relative_priority, flags);
 }
 
 qos_class_t
@@ -246,66 +210,31 @@ _pthread_qos_class_decode(pthread_priority_t priority, int *relative_priority, u
 {
        qos_class_t qos; int relpri;
 
-       if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
-               _pthread_priority_split_version2(priority, qos, relpri);
-       } else {
-               _pthread_priority_split_newest(priority, qos, relpri);
-       }
+       _pthread_priority_split_newest(priority, qos, relpri);
 
        if (relative_priority) { *relative_priority = relpri; }
        if (flags) { *flags = _pthread_priority_get_flags(priority); }
        return qos;
 }
 
+// Encode a legacy workqueue API priority into a pthread_priority_t. This API
+// is deprecated and can be removed when the simulator no longer uses it.
 pthread_priority_t
 _pthread_qos_class_encode_workqueue(int queue_priority, unsigned long flags)
 {
-       if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_DEFAULT) == 0) {
-               switch (queue_priority) {
-                       case WORKQ_HIGH_PRIOQUEUE:
-                               return _pthread_priority_make_version1(QOS_CLASS_USER_INTERACTIVE, 0, flags);
-                       case WORKQ_DEFAULT_PRIOQUEUE:
-                               return _pthread_priority_make_version1(QOS_CLASS_USER_INITIATED, 0, flags);
-                       case WORKQ_LOW_PRIOQUEUE:
-                       case WORKQ_NON_INTERACTIVE_PRIOQUEUE:
-                               return _pthread_priority_make_version1(QOS_CLASS_UTILITY, 0, flags);
-                       case WORKQ_BG_PRIOQUEUE:
-                               return _pthread_priority_make_version1(QOS_CLASS_BACKGROUND, 0, flags);
-                       default:
-                               __pthread_abort();
-               }
-       }
-
-       if ((__pthread_supported_features & PTHREAD_FEATURE_QOS_MAINTENANCE) == 0) {
-                       switch (queue_priority) {
-                               case WORKQ_HIGH_PRIOQUEUE:
-                                       return _pthread_priority_make_version2(QOS_CLASS_USER_INITIATED, 0, flags);
-                               case WORKQ_DEFAULT_PRIOQUEUE:
-                                       return _pthread_priority_make_version2(QOS_CLASS_DEFAULT, 0, flags);
-                               case WORKQ_LOW_PRIOQUEUE:
-                               case WORKQ_NON_INTERACTIVE_PRIOQUEUE:
-                                       return _pthread_priority_make_version2(QOS_CLASS_UTILITY, 0, flags);
-                               case WORKQ_BG_PRIOQUEUE:
-                                       return _pthread_priority_make_version2(QOS_CLASS_BACKGROUND, 0, flags);
-                               /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
-                               default:
-                                       __pthread_abort();
-                       }
-       }
-
        switch (queue_priority) {
-               case WORKQ_HIGH_PRIOQUEUE:
-                       return _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, flags);
-               case WORKQ_DEFAULT_PRIOQUEUE:
-                       return _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, flags);
-               case WORKQ_LOW_PRIOQUEUE:
-               case WORKQ_NON_INTERACTIVE_PRIOQUEUE:
-                       return _pthread_priority_make_newest(QOS_CLASS_UTILITY, 0, flags);
-               case WORKQ_BG_PRIOQUEUE:
-                       return _pthread_priority_make_newest(QOS_CLASS_BACKGROUND, 0, flags);
-               /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
-               default:
-                       __pthread_abort();
+       case WORKQ_HIGH_PRIOQUEUE:
+               return _pthread_priority_make_newest(QOS_CLASS_USER_INITIATED, 0, flags);
+       case WORKQ_DEFAULT_PRIOQUEUE:
+               return _pthread_priority_make_newest(QOS_CLASS_DEFAULT, 0, flags);
+       case WORKQ_LOW_PRIOQUEUE:
+       case WORKQ_NON_INTERACTIVE_PRIOQUEUE:
+               return _pthread_priority_make_newest(QOS_CLASS_UTILITY, 0, flags);
+       case WORKQ_BG_PRIOQUEUE:
+               return _pthread_priority_make_newest(QOS_CLASS_BACKGROUND, 0, flags);
+       /* Legacy dispatch does not use QOS_CLASS_MAINTENANCE, so no need to handle it here */
+       default:
+               __pthread_abort();
        }
 }
 
@@ -337,7 +266,7 @@ pthread_set_fixedpriority_self(void)
        if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) {
                return ENOTSUP;
        }
-       
+
        if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) {
                return _pthread_set_properties_self(_PTHREAD_SET_SELF_FIXEDPRIORITY_FLAG, 0, 0);
        } else {
@@ -351,7 +280,7 @@ pthread_set_timeshare_self(void)
        if (!(__pthread_supported_features & PTHREAD_FEATURE_BSDTHREADCTL)) {
                return ENOTSUP;
        }
-       
+
        if (__pthread_supported_features & PTHREAD_FEATURE_SETSELF) {
                return _pthread_set_properties_self(_PTHREAD_SET_SELF_TIMESHARE_FLAG, 0, 0);
        } else {
@@ -423,12 +352,10 @@ pthread_override_qos_class_end_np(pthread_override_t override)
        int res = 0;
 
        /* Double-free is a fault. Swap the signature and check the old one. */
-       if (__sync_swap(&override->sig, PTHREAD_OVERRIDE_SIG_DEAD) != PTHREAD_OVERRIDE_SIGNATURE) {
+       if (_pthread_atomic_xchg_uint32_relaxed(&override->sig, PTHREAD_OVERRIDE_SIG_DEAD) != PTHREAD_OVERRIDE_SIGNATURE) {
                __builtin_trap();
        }
 
-       override->sig = PTHREAD_OVERRIDE_SIG_DEAD;
-
        /* Always consumes (and deallocates) the pthread_override_t object given. */
        res = __bsdthread_ctl(BSDTHREAD_CTL_QOS_OVERRIDE_END, override->kthread, (uintptr_t)override, 0);
        if (res == -1) { res = errno; }
@@ -526,7 +453,7 @@ _pthread_workqueue_override_start_direct_check_owner(mach_port_t thread, pthread
                if (ulock_addr && res == EFAULT) {
                        // kernel wants us to redrive the call, so while we refault the
                        // memory, also revalidate the owner
-                       uint32_t uval = os_atomic_load(ulock_addr, relaxed);
+                       uint32_t uval = *(uint32_t volatile *)ulock_addr;
                        if (ulock_owner_value_to_port_name(uval) != thread) {
                                return ESTALE;
                        }
@@ -575,6 +502,63 @@ _pthread_workqueue_asynchronous_override_reset_all_self(void)
        return res;
 }
 
+static inline uint16_t
+_pthread_workqueue_parallelism_for_priority(int qos, unsigned long flags)
+{
+       int rc = __bsdthread_ctl(BSDTHREAD_CTL_QOS_MAX_PARALLELISM, qos, flags, 0);
+       if (os_unlikely(rc == -1)) {
+               rc = errno;
+               if (rc != EINVAL) {
+                       PTHREAD_INTERNAL_CRASH(rc, "qos_max_parallelism failed");
+               }
+               if (flags & _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL) {
+                       return *(uint8_t *)_COMM_PAGE_LOGICAL_CPUS;
+               } else {
+                       return *(uint8_t *)_COMM_PAGE_PHYSICAL_CPUS;
+               }
+       }
+       return (uint16_t)rc;
+}
+
+int
+pthread_qos_max_parallelism(qos_class_t qos, unsigned long flags)
+{
+       int thread_qos = _pthread_qos_class_to_thread_qos(qos);
+       if (thread_qos == THREAD_QOS_UNSPECIFIED) {
+               errno = EINVAL;
+               return -1;
+       }
+
+       unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL;
+       uint16_t *ptr = &_pthread_globals()->qmp_logical[thread_qos];
+
+       if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) {
+               syscall_flags = 0;
+               ptr = &_pthread_globals()->qmp_physical[thread_qos];
+       }
+       if (*ptr == 0) {
+               *ptr = _pthread_workqueue_parallelism_for_priority(thread_qos, syscall_flags);
+       }
+       return *ptr;
+}
+
+int
+pthread_time_constraint_max_parallelism(unsigned long flags)
+{
+       unsigned long syscall_flags = _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL;
+       uint16_t *ptr = &_pthread_globals()->qmp_logical[0];
+
+       if (flags & PTHREAD_MAX_PARALLELISM_PHYSICAL) {
+               syscall_flags = 0;
+               ptr = &_pthread_globals()->qmp_physical[0];
+       }
+       if (*ptr == 0) {
+               *ptr = _pthread_workqueue_parallelism_for_priority(0,
+                               syscall_flags | _PTHREAD_QOS_PARALLELISM_REALTIME);
+       }
+       return *ptr;
+}
+
 int
 posix_spawnattr_set_qos_class_np(posix_spawnattr_t * __restrict __attr, qos_class_t __qos_class)
 {
diff --git a/src/resolver.c b/src/resolver.c
deleted file mode 100644 (file)
index cb9ae24..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-#include "resolver_internal.h"
-
-#define _OS_VARIANT_RESOLVER(s, v, ...) \
-       __attribute__((visibility(OS_STRINGIFY(v)))) extern void* s(void); \
-       void* s(void) { \
-       __asm__(".symbol_resolver _" OS_STRINGIFY(s)); \
-               __VA_ARGS__ \
-       }
-
-#define _OS_VARIANT_UPMP_RESOLVER(s, v) \
-       _OS_VARIANT_RESOLVER(s, v, \
-               uint32_t *_c = (void*)(uintptr_t)_COMM_PAGE_CPU_CAPABILITIES; \
-               if (*_c & kUP) { \
-                       extern void OS_VARIANT(s, up)(void); \
-                       return &OS_VARIANT(s, up); \
-               } else { \
-                       extern void OS_VARIANT(s, mp)(void); \
-                       return &OS_VARIANT(s, mp); \
-               })
-
-#define OS_VARIANT_UPMP_RESOLVER(s) \
-       _OS_VARIANT_UPMP_RESOLVER(s, default)
-
-#define OS_VARIANT_UPMP_RESOLVER_INTERNAL(s) \
-       _OS_VARIANT_UPMP_RESOLVER(s, hidden)
-
-
-#ifdef OS_VARIANT_SELECTOR
-
-OS_VARIANT_UPMP_RESOLVER(pthread_mutex_lock)
-OS_VARIANT_UPMP_RESOLVER(pthread_mutex_trylock)
-OS_VARIANT_UPMP_RESOLVER(pthread_mutex_unlock)
-
-#endif // OS_VARIANT_SELECTOR
diff --git a/src/resolver.h b/src/resolver.h
deleted file mode 100644 (file)
index b34e65d..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-#ifndef __PTHREAD_RESOLVER_H__
-#define __PTHREAD_RESOLVER_H__
-
-#include "resolver_internal.h"
-
-#ifdef OS_VARIANT_SELECTOR
-
-#if TARGET_OS_EMBEDDED
-#define pthread_mutex_lock \
-               OS_VARIANT(pthread_mutex_lock, OS_VARIANT_SELECTOR)
-#define pthread_mutex_trylock \
-               OS_VARIANT(pthread_mutex_trylock, OS_VARIANT_SELECTOR)
-#define pthread_mutex_unlock \
-               OS_VARIANT(pthread_mutex_unlock, OS_VARIANT_SELECTOR)
-#endif // TARGET_OS_EMBEDDED
-
-#endif // OS_VARIANT_SELECTOR
-
-#endif // __PTHREAD_RESOLVER_H__
diff --git a/src/resolver/resolver.c b/src/resolver/resolver.c
new file mode 100644 (file)
index 0000000..7a0e078
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+
+struct _os_empty_files_are_not_c_files;
diff --git a/src/resolver/resolver.h b/src/resolver/resolver.h
new file mode 100644 (file)
index 0000000..96eb8e9
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __PTHREAD_RESOLVER_H__
+#define __PTHREAD_RESOLVER_H__
+
+#include "resolver_internal.h"
+
+
+#endif // __PTHREAD_RESOLVER_H__
diff --git a/src/resolver/resolver_internal.h b/src/resolver/resolver_internal.h
new file mode 100644 (file)
index 0000000..c1a9014
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 Apple Inc. All rights reserved.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_START@
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * @APPLE_APACHE_LICENSE_HEADER_END@
+ */
+
+#ifndef __PTHREAD_RESOLVER_INTERNAL_H__
+#define __PTHREAD_RESOLVER_INTERNAL_H__
+
+
+#endif // __PTHREAD_RESOLVER_INTERNAL_H__
diff --git a/src/resolver_internal.h b/src/resolver_internal.h
deleted file mode 100644 (file)
index f3b1254..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2015 Apple Inc. All rights reserved.
- *
- * @APPLE_APACHE_LICENSE_HEADER_START@
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- * @APPLE_APACHE_LICENSE_HEADER_END@
- */
-
-#ifndef __PTHREAD_RESOLVER_INTERNAL_H__
-#define __PTHREAD_RESOLVER_INTERNAL_H__
-
-#include <os/base.h>
-#include <TargetConditionals.h>
-#include <machine/cpu_capabilities.h>
-#if defined(__arm__)
-#include <arm/arch.h>
-#endif
-
-// XXX <rdar://problem/24290376> eOS version of libpthread doesn't have UP optimizations
-#if !defined(VARIANT_STATIC) && \
-     defined(_ARM_ARCH_7) && !defined(__ARM_ARCH_7S__)
-
-#if OS_ATOMIC_UP
-#define OS_VARIANT_SELECTOR up
-#else
-#define OS_VARIANT_SELECTOR mp
-#endif
-
-#endif // !VARIANT_STATIC && _ARM_ARCH_7 && !__ARM_ARCH_7S__
-
-#define OS_VARIANT(f, v) OS_CONCAT(f, OS_CONCAT($VARIANT$, v))
-
-#ifdef OS_VARIANT_SELECTOR
-#define PTHREAD_NOEXPORT_VARIANT  PTHREAD_NOEXPORT
-#else
-#define PTHREAD_NOEXPORT_VARIANT
-#endif
-
-#endif // __PTHREAD_RESOLVER_H__
index 3f7a8d0008184ae7890cb000155d4d7d1ee454f8..761103eca3caaf15d21ada2a89c43a0ba68ac511 100644 (file)
  * Machine specific support for thread initialization
  */
 
-
 #include "internal.h"
 #include <platform/string.h>
 
+
 /*
  * Set up the initial state of a MACH thread
  */
@@ -64,15 +64,15 @@ _pthread_setup(pthread_t thread,
               int needresume)
 {
 #if defined(__i386__)
-       i386_thread_state_t state = {0};
+       i386_thread_state_t state = { };
        thread_state_flavor_t flavor = x86_THREAD_STATE32;
        mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
 #elif defined(__x86_64__)
-       x86_thread_state64_t state = {0};
+       x86_thread_state64_t state = { };
        thread_state_flavor_t flavor = x86_THREAD_STATE64;
        mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
 #elif defined(__arm__)
-       arm_thread_state_t state = {0};
+       arm_thread_state_t state = { };
        thread_state_flavor_t flavor = ARM_THREAD_STATE;
        mach_msg_type_number_t count = ARM_THREAD_STATE_COUNT;
 #else
index f0c93510e3d6c219de8f7110aceb5731fb09bcae..de0e9788ae86780e938bb87198b4bf0bfc6fa6fb 100644 (file)
@@ -29,6 +29,9 @@
 #undef __DARWIN_UNIX03
 #define __DARWIN_UNIX03 0
 
+#define _pthread_rwlock_lock_slow _pthread_rwlock_lock_legacy_slow
+#define _pthread_rwlock_unlock_slow _pthread_rwlock_unlock_legacy_slow
+
 #include "../pthread_rwlock.c"
 
 #endif
index 943f2a942aaf11031ac01494f3bbb28c46a22538..cba5882afb9542e2993afbf1e25c4cdffd7006ec 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_ATTR_T 
 #define _PTHREAD_ATTR_T 
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_attr_t */
 typedef __darwin_pthread_attr_t pthread_attr_t;
 #endif  /* _PTHREAD_ATTR_T */
index 4e901b6f706acd6660f917a0e97d2d0857f88465..b6a7b42f4a2bd311a8ab177db785a7e040a43634 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_COND_T
 #define _PTHREAD_COND_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_cond_t */
 typedef __darwin_pthread_cond_t pthread_cond_t;
 #endif /* _PTHREAD_COND_T */
index 51b5cdd3e1df3c216e37b15cef7e3c38e5c991a1..6e9227a8a332c7de28797c7f4f3cca9f9ad1060b 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_CONDATTR_T
 #define _PTHREAD_CONDATTR_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_condattr_t */
 typedef __darwin_pthread_condattr_t pthread_condattr_t;
 #endif /* _PTHREAD_CONDATTR_T */
index 19d1f3106ed6f187e80a9598a54b083fe1252fc8..20d7a0a4440be48a4ad332d1ad98e81b33e383df 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 #ifndef _PTHREAD_KEY_T
 #define _PTHREAD_KEY_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_key_t */
 typedef __darwin_pthread_key_t pthread_key_t;
 #endif /* _PTHREAD_KEY_T */
index 75071c664195d87101433b23116b8344645a3660..e5aff0bc2ebb6c4e91ab769a8ea21ae1f7139575 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 #ifndef _PTHREAD_MUTEX_T
 #define _PTHREAD_MUTEX_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_mutex_t */
 typedef __darwin_pthread_mutex_t pthread_mutex_t;
 #endif /*_PTHREAD_MUTEX_T */
index f68cdcdb262d3174619cb2fbf2671e1592a90c2e..218d74a91cb4f4551f524721fb821a16fa15ff34 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 #ifndef _PTHREAD_MUTEXATTR_T
 #define _PTHREAD_MUTEXATTR_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_mutexattr_t */
 typedef __darwin_pthread_mutexattr_t pthread_mutexattr_t;
 #endif /* _PTHREAD_MUTEXATTR_T */
index 18d12efe76261ea8755a2c17e5c31070a57da6e5..d50a6244f9384da13e8756dc352c99958bdae6f0 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright (c) 2003-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- * 
+ *
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
  * unlawful or unlicensed copies of an Apple operating system, or to
  * circumvent, violate, or enable the circumvention or violation of, any
  * terms of an Apple operating system software license agreement.
- * 
+ *
  * Please obtain a copy of the License at
  * http://www.opensource.apple.com/apsl/ and read it before using this file.
- * 
+ *
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  * Please see the License for the specific language governing rights and
  * limitations under the License.
- * 
+ *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 #ifndef _PTHREAD_ONCE_T
 #define _PTHREAD_ONCE_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_once_t */
 typedef __darwin_pthread_once_t pthread_once_t;
 #endif /* _PTHREAD_ONCE_T */
index c1e955516aeaff58d5e55002f72057a158e29a94..75c4e350b42f18a7211107c8d19547443d1eb309 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_RWLOCK_T
 #define _PTHREAD_RWLOCK_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_rwlock_t */
 typedef __darwin_pthread_rwlock_t pthread_rwlock_t;
 #endif /* _PTHREAD_RWLOCK_T */
index 53927018071f29f4791670c18c16d15af67cf845..6ccd234725c2fd32720a1509f1e8fc61825b1a0b 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_RWLOCKATTR_T
 #define _PTHREAD_RWLOCKATTR_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_rwlockattr_t */
 typedef __darwin_pthread_rwlockattr_t pthread_rwlockattr_t;
 #endif /* _PTHREAD_RWLOCKATTR_T */
index bf829185804366b46c986354627ee56008301431..4d9e3dac95fce020cfb2902d77160192c1159955 100644 (file)
@@ -27,5 +27,6 @@
  */
 #ifndef _PTHREAD_T
 #define _PTHREAD_T
+#include <sys/_pthread/_pthread_types.h> /* __darwin_pthread_t */
 typedef __darwin_pthread_t pthread_t;
 #endif /* _PTHREAD_T */
index df36ac51d8b97137461f9d6e036bb6b3a01f7a0a..d33af6bc0798f2580c9e6376552013984bfec655 100644 (file)
--- a/sys/qos.h
+++ b/sys/qos.h
  */
 
 #define __QOS_ENUM(name, type, ...) enum { __VA_ARGS__ }; typedef type name##_t
-#define __QOS_CLASS_AVAILABLE_STARTING(...)
+#define __QOS_CLASS_AVAILABLE(...)
 
 #if defined(__has_feature) && defined(__has_extension)
 #if __has_feature(objc_fixed_enum) || __has_extension(cxx_strong_enums)
 #define __QOS_ENUM(name, type, ...) typedef enum : type { __VA_ARGS__ } name##_t
 #endif
 #if __has_feature(enumerator_attributes)
-#undef __QOS_CLASS_AVAILABLE_STARTING
-#define __QOS_CLASS_AVAILABLE_STARTING __OSX_AVAILABLE_STARTING
+#undef __QOS_CLASS_AVAILABLE
+#define __QOS_CLASS_AVAILABLE __API_AVAILABLE
 #endif
 #endif
 
 __QOS_ENUM(qos_class, unsigned int,
        QOS_CLASS_USER_INTERACTIVE
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x21,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x21,
        QOS_CLASS_USER_INITIATED
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x19,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x19,
        QOS_CLASS_DEFAULT
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x15,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x15,
        QOS_CLASS_UTILITY
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x11,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x11,
        QOS_CLASS_BACKGROUND
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x09,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x09,
        QOS_CLASS_UNSPECIFIED
-                       __QOS_CLASS_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0) = 0x00,
+                       __QOS_CLASS_AVAILABLE(macos(10.10), ios(8.0)) = 0x00,
 );
 
 #undef __QOS_ENUM
@@ -165,7 +165,7 @@ __BEGIN_DECLS
  * @return
  * One of the QOS class values in qos_class_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 qos_class_t
 qos_class_self(void);
 
@@ -187,7 +187,7 @@ qos_class_self(void);
  * @return
  * One of the QOS class values in qos_class_t.
  */
-__OSX_AVAILABLE_STARTING(__MAC_10_10, __IPHONE_8_0)
+__API_AVAILABLE(macos(10.10), ios(8.0))
 qos_class_t
 qos_class_main(void);
 
index 081849119ee49784a91496295e3f082bdf2fc26b..408b1010c88423b59f8390a82d019f0de27571f3 100644 (file)
@@ -1,6 +1,8 @@
 PROJECT := libpthread
 TEST_DIR := tests/
 
+ENABLE_LTE_TESTS=YES
+
 ifeq ($(DEVELOPER_DIR),)
        DEVELOPER_DIR := $(shell xcode-select -p)
 endif
@@ -19,6 +21,7 @@ TARGETS += stack_aslr
 TARGETS += join
 TARGETS += main_stack
 TARGETS += main_stack_custom
+TARGETS += detach
 #TARGETS += maxwidth
 TARGETS += mutex
 TARGETS += mutex_try
@@ -31,7 +34,9 @@ TARGETS += pthread_exit
 TARGETS += pthread_introspection
 TARGETS += pthread_setspecific
 TARGETS += pthread_threadid_np
+TARGETS += pthread_get_qos_class_np
 #TARGETS += qos
+TARGETS += rdar_32848402
 #TARGETS += rwlock-22244050
 #TARGETS += rwlock-signal
 #TARGETS += rwlock
@@ -40,9 +45,11 @@ TARGETS += tsd
 #TARGETS += wq_event_manager
 #TARGETS += wq_kevent
 #TARGETS += wq_kevent_stress
+TARGETS += wq_limits
 TARGETS += add_timer_termination
 
-OTHER_TARGETS :=
+OTHER_LTE_INCLUDE_FILES += \
+       /usr/local/lib/libdarwintest_utils.dylib
 
 OTHER_CFLAGS := -DDARWINTEST -Weverything \
                -Wno-vla -Wno-bad-function-cast -Wno-missing-noreturn \
index ce669c1a69e88146b0ec6cb2efb5a301357ac4db..9bc877f168142e032eaee8880de19b8678b40d8b 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <dispatch/dispatch.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 #include <darwintest_utils.h>
 
 extern void __exit(int) __attribute__((noreturn));
index 8955b5e5c4d2d4e2e02b43a20def73dbf64f4275..a33cf2e67df27ef3999266153e636aba7cd5c7cc 100644 (file)
@@ -5,7 +5,7 @@
 #include <sys/wait.h>
 #include <stdlib.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static const char ATFORK_PREPARE[] = "prepare";
 static const char ATFORK_PARENT[] = "parent";
index dd22660b5b1ef9a5d8f41f2f587c6fe8ba91410a..a1b6d73cce6619a558974cc8589a91e4b0cd5c29 100644 (file)
@@ -17,7 +17,7 @@
 
 #include <dispatch/dispatch.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 T_DECL(bsdthread_set_self_constrained_transition, "bsdthread_ctl(SET_SELF) with overcommit change",
                T_META_ALL_VALID_ARCHS(YES))
@@ -73,15 +73,3 @@ T_DECL(bsdthread_set_self_constrained_threads, "bsdthread_ctl(SET_SELF) with ove
 
        dispatch_main();
 }
-
-T_DECL(bsdthread_set_self_unbind, "bsdthread_ctl(SET_SELF) with kevent unbind",
-               T_META_ALL_VALID_ARCHS(YES))
-{
-       dispatch_async(dispatch_get_global_queue(0, 0), ^{
-               T_ASSERT_POSIX_ZERO(_pthread_set_properties_self(_PTHREAD_SET_SELF_WQ_KEVENT_UNBIND, 0, 0), NULL);
-
-               T_END;
-       });
-
-       dispatch_main();
-}
index b35273ef653290e17393f155aac2ce2253ec34a9..0fc91c2b5aa9082edc67c80f81e326caa44ec588 100644 (file)
@@ -8,73 +8,95 @@
 #include <errno.h>
 #include <libkern/OSAtomic.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 #include <darwintest_utils.h>
 
 struct context {
        pthread_cond_t cond;
        pthread_mutex_t mutex;
+       pthread_cond_t ready_cond;
        long waiters;
        long count;
+       bool ready;
+       char _padding[7];
 };
 
+
 static void *wait_thread(void *ptr) {
-       int res;
        struct context *context = ptr;
 
+       // tell producer thread that we are ready
+       T_QUIET;
+       T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context->mutex), "pthread_mutex_lock");
+       context->ready = true;
+       T_QUIET;
+       T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context->ready_cond), "pthread_cond_signal");
+
        bool loop = true;
        while (loop) {
-               res = pthread_mutex_lock(&context->mutex);
-               if (res) {
-                       T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_lock", context->count);
-               }
-               
+
                if (context->count > 0) {
                        ++context->waiters;
-                       res = pthread_cond_wait(&context->cond, &context->mutex);
-                       if (res) {
-                               T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_rwlock_unlock", context->count);
-                       }
+                       T_QUIET;
+                       T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context->cond, &context->mutex), "[%ld] pthread_rwlock_unlock", context->count);
                        --context->waiters;
                        --context->count;
                } else {
                        loop = false;
                }
-               
-               res = pthread_mutex_unlock(&context->mutex);
-               if (res) {
-                       T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_unlock", context->count);
-               }
+
        }
 
+       T_QUIET;
+       T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context->mutex), "[%ld] pthread_mutex_unlock", context->count);
+
        return NULL;
 }
 
 T_DECL(cond, "pthread_cond",
-               T_META_ALL_VALID_ARCHS(YES))
+               T_META_ALL_VALID_ARCHS(YES), T_META_TIMEOUT(120), T_META_CHECK_LEAKS(NO))
 {
        struct context context = {
                .cond = PTHREAD_COND_INITIALIZER,
                .mutex = PTHREAD_MUTEX_INITIALIZER,
+               .ready_cond = PTHREAD_COND_INITIALIZER,
                .waiters = 0,
-               .count = 100000 * dt_ncpu(),
+               .count = 50000 * dt_ncpu(),
+               .ready = false,
        };
        int i;
        int res;
        int threads = 2;
        pthread_t p[threads];
        for (i = 0; i < threads; ++i) {
-               T_ASSERT_POSIX_ZERO(pthread_create(&p[i], NULL, wait_thread, &context), NULL);
+               T_QUIET;
+               T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex), "pthread_mutex_lock");
+
+               context.ready = false;
+
+               T_QUIET;
+               T_ASSERT_POSIX_ZERO(pthread_create(&p[i], NULL, wait_thread, &context), "pthread_create");
+
+               do {
+                       // mutex will be dropped and allow consumer thread to acquire
+                       T_QUIET;
+                       T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context.ready_cond, &context.mutex), "pthread_cond_wait");
+               } while (context.ready == false);
+
+               T_QUIET;
+               T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex), "pthread_mutex_lock");
+
+               T_LOG("Thread %d ready.", i);
        }
 
+       T_LOG("All threads ready.");
+
        long half = context.count / 2;
 
        bool loop = true;
        while (loop) {
-               res = pthread_mutex_lock(&context.mutex);
-               if (res) {
-                       T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_lock", context.count);
-               }
+               T_QUIET;
+               T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex), "[%ld] pthread_mutex_lock", context.count);
                if (context.waiters) {
                        char *str;
                        if (context.count > half) {
@@ -84,21 +106,19 @@ T_DECL(cond, "pthread_cond",
                                str = "pthread_cond_signal";
                                res = pthread_cond_signal(&context.cond);
                        }
-                       if (res != 0) {
-                               T_ASSERT_POSIX_ZERO(res, "[%ld] %s", context.count, str);
-                       }
+                       T_QUIET;
+                       T_ASSERT_POSIX_ZERO(res, "[%ld] %s", context.count, str);
                }
                if (context.count <= 0) {
                        loop = false;
                        T_PASS("Completed stres test successfully.");
                }
-               
-               res = pthread_mutex_unlock(&context.mutex);
-               if (res) {
-                       T_ASSERT_POSIX_ZERO(res, "[%ld] pthread_mutex_unlock", context.count);
-               }
+
+               T_QUIET;
+               T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex),
+                               "[%ld] pthread_mutex_unlock", context.count);
        }
-       
+
        for (i = 0; i < threads; ++i) {
                T_ASSERT_POSIX_ZERO(pthread_join(p[i], NULL), NULL);
        }
index 9e4249d41baeb4ea108c947b8f15d24ce09b5a19..3f3c4e7a1d6ced1e7566802e86bafc1eeaf4baba 100644 (file)
@@ -10,7 +10,7 @@
 #include <libkern/OSAtomic.h>
 #include <dispatch/dispatch.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #define NUM_THREADS 8
 
@@ -88,12 +88,12 @@ T_DECL(cond_timedwait_timeout, "pthread_cond_timedwait() timeout")
        usleep((useconds_t) uexpected);
        bool loop = true;
        while (loop) {
-               T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex),
+               T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex),
                                                        "pthread_mutex_lock");
                if (context.count <= 0) {
                        loop = false;
                }
-               T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex),
+               T_QUIET; T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex),
                                                        "pthread_mutex_unlock");
        }
 
@@ -107,3 +107,181 @@ T_DECL(cond_timedwait_timeout, "pthread_cond_timedwait() timeout")
                        ((uint64_t) start.tv_sec * USEC_PER_SEC + (uint64_t) start.tv_usec);
        T_LOG("waittime actual:   %llu us", uelapsed);
 }
+
+struct prodcons_context {
+       pthread_cond_t cond;
+       pthread_mutex_t mutex;
+       bool consumer_ready;
+       bool workitem_available;
+       bool padding[6];
+};
+
+static void *consumer_thread(void *ptr) {
+       struct prodcons_context *context = ptr;
+
+       // tell producer thread that we are ready
+       T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context->mutex), "pthread_mutex_lock");
+
+       context->consumer_ready = true;
+       T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context->cond), "pthread_cond_signal");
+
+       // wait for a work item to become available
+       do {
+               // mutex will be dropped and allow producer thread to acquire
+               T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context->cond, &context->mutex), "pthread_cond_wait");
+
+               // loop in case of spurious wakeups
+       } while (context->workitem_available == false);
+
+       // work item has been sent, so dequeue it and tell producer
+       context->workitem_available = false;
+       T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context->cond), "pthread_cond_signal");
+
+       // unlock mutex, we are done here
+       T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context->mutex), "pthread_mutex_unlock");
+
+       T_PASS("Consumer thread exiting");
+
+       return NULL;
+}
+
+#define TESTCASE_TIMEOUT (10) /* seconds */
+typedef enum {
+       eNullTimeout,
+       eZeroTimeout,
+       eBeforeEpochTimeout,
+       eRecentPastTimeout
+} TimeOutType;
+
+static DT_TEST_RETURN cond_timedwait_timeouts_internal(TimeOutType timeout, bool relative);
+
+T_DECL(cond_timedwait_nulltimeout, "pthread_cond_timedwait() with NULL timeout, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eNullTimeout, false);
+}
+
+T_DECL(cond_timedwait_zerotimeout, "pthread_cond_timedwait() with zero timeout, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eZeroTimeout, false);
+}
+
+T_DECL(cond_timedwait_beforeepochtimeout, "pthread_cond_timedwait() with timeout before the epoch, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eBeforeEpochTimeout, false);
+}
+
+T_DECL(cond_timedwait_pasttimeout, "pthread_cond_timedwait() with timeout in the past, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eRecentPastTimeout, false);
+}
+
+T_DECL(cond_timedwait_relative_nulltimeout, "pthread_cond_timedwait_relative_np() with relative NULL timeout, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eNullTimeout, true);
+}
+
+T_DECL(cond_timedwait_relative_pasttimeout, "pthread_cond_timedwait_relative_np() with relative timeout in the past, ensure mutex is unlocked")
+{
+       cond_timedwait_timeouts_internal(eRecentPastTimeout, true);
+}
+
+static DT_TEST_RETURN cond_timedwait_timeouts_internal(TimeOutType timeout, bool relative)
+{
+       // This testcase mimics a producer-consumer model where the consumer checks
+       // in and waits until work becomes available. The producer then waits until
+       // the work has been consumed and the consumer quiesces. Since condition
+       // variables may have spurious wakeups, the timeout should not matter,
+       // but there have been functional issues where the mutex would not be unlocked
+       // for a timeout in the past.
+       struct prodcons_context context = {
+               .cond = PTHREAD_COND_INITIALIZER,
+               .mutex = PTHREAD_MUTEX_INITIALIZER,
+               .consumer_ready = false,
+               .workitem_available = false
+       };
+
+       struct timeval test_timeout;
+       gettimeofday(&test_timeout, NULL);
+       test_timeout.tv_sec += TESTCASE_TIMEOUT;
+
+       T_ASSERT_POSIX_ZERO(pthread_mutex_lock(&context.mutex), "pthread_mutex_lock");
+
+       pthread_t p;
+       T_ASSERT_POSIX_ZERO(pthread_create(&p, NULL, consumer_thread, &context),
+                                                       "pthread_create");
+
+       // Wait until consumer thread is able to acquire the mutex, check in, and block
+       // in its own condition variable. We do not want to start generating work before
+       // the consumer thread is available
+       do {
+               // mutex will be dropped and allow consumer thread to acquire
+               T_ASSERT_POSIX_ZERO(pthread_cond_wait(&context.cond, &context.mutex), "pthread_cond_wait");
+
+               // loop in case of spurious wakeups
+       } while (context.consumer_ready == false);
+
+       // consumer is ready and blocked in its own condition variable, and
+       // producer has mutex acquired. Send a work item and wait for it
+       // to be dequeued
+
+       context.workitem_available = true;
+       T_ASSERT_POSIX_ZERO(pthread_cond_signal(&context.cond), "pthread_cond_signal");
+
+       do {
+               struct timeval now;
+
+               gettimeofday(&now, NULL);
+               T_QUIET; T_ASSERT_TRUE(timercmp(&now, &test_timeout, <), "timeout reached waiting for consumer thread to consume");
+
+               struct timespec ts;
+
+               if (relative) {
+                       switch (timeout) {
+                               case eNullTimeout:
+                                       break;
+                               case eRecentPastTimeout:
+                                       ts.tv_sec = -1;
+                                       ts.tv_nsec = 0;
+                                       break;
+                               case eZeroTimeout:
+                               case eBeforeEpochTimeout:
+                                       break;
+                       }
+               } else {
+                       switch (timeout) {
+                               case eNullTimeout:
+                                       break;
+                               case eZeroTimeout:
+                                       ts.tv_sec = 0;
+                                       ts.tv_nsec = 0;
+                                       break;
+                               case eBeforeEpochTimeout:
+                                       ts.tv_sec = -1;
+                                       ts.tv_nsec = 0;
+                                       break;
+                               case eRecentPastTimeout:
+                                       ts.tv_sec = now.tv_sec - 1;
+                                       ts.tv_nsec = now.tv_usec / 1000;
+                                       break;
+                       }
+               }
+
+               int ret;
+               if (relative) {
+                       ret = pthread_cond_timedwait_relative_np(&context.cond, &context.mutex, timeout == eNullTimeout ? NULL : &ts);
+               } else {
+                       ret = pthread_cond_timedwait(&context.cond, &context.mutex, timeout == eNullTimeout ? NULL : &ts);
+               }
+               if (ret != 0 && ret != EINTR && ret != ETIMEDOUT) T_ASSERT_POSIX_ZERO(ret, "timedwait returned error");
+
+               usleep(10*1000); // avoid spinning in a CPU-bound loop
+
+               // loop in case of spurious wakeups
+       } while (context.workitem_available == true);
+
+       T_ASSERT_POSIX_ZERO(pthread_mutex_unlock(&context.mutex), "pthread_mutex_unlock");
+
+       T_ASSERT_POSIX_ZERO(pthread_join(p, NULL), "pthread_join");
+
+       T_PASS("Consumer completed work");
+}
index 54a8d17bc298ba2fd376a75870c0c18ec52f8a88..410bedeb7c2e650184a33a6cc9aee1b4b63bfecf 100644 (file)
@@ -4,7 +4,8 @@
 #include <unistd.h>
 #include <os/assumes.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
+
 
 static uintptr_t stackaddr;
 static const size_t stacksize = 4096 * 8;
diff --git a/tests/darwintest_defaults.h b/tests/darwintest_defaults.h
new file mode 100644 (file)
index 0000000..c6bfe07
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2017 Apple Inc. All rights reserved.
+ *
+ * @APPLE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_HEADER_END@
+ */
+
+#ifndef DARWINTEST_DEFAULTS_H
+#define DARWINTEST_DEFAULTS_H
+
+#include <darwintest.h>
+
+T_GLOBAL_META (
+    T_META_TIMEOUT(30),
+    T_META_LTEPHASE(LTE_POSTINIT)
+);
+
+
+#endif // DARWINTEST_DEFAULTS_H
diff --git a/tests/detach.c b/tests/detach.c
new file mode 100644 (file)
index 0000000..3e265e1
--- /dev/null
@@ -0,0 +1,42 @@
+#include <assert.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <dispatch/dispatch.h>
+#include <sys/mman.h>
+
+#include "darwintest_defaults.h"
+
+static void*
+thread_routine(void *loc)
+{
+       uintptr_t i = (uintptr_t)loc;
+
+       switch (i % 3) {
+       case 0:
+               usleep(1000);
+               break;
+       case 1:
+               pthread_exit(pthread_self());
+               __builtin_unreachable();
+       case 2:
+               break;
+       }
+       return NULL;
+}
+
+T_DECL(pthread_detach, "Test creating and detaching threads in a loop",
+               T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES))
+{
+       const size_t count = 32;
+       pthread_t ths[count];
+
+       for (size_t i = 0; i < 100; i++) {
+               for (size_t j = 0; j < count; j++) {
+                       T_ASSERT_POSIX_ZERO(pthread_create(&ths[j], NULL,
+                                                       thread_routine, (void *)j), "thread creation");
+                       T_ASSERT_POSIX_ZERO(pthread_detach(ths[j]), "thread detach");
+               }
+               usleep(50000);
+       }
+}
index 921c136a990269467e73c4e1a668b3307a9ec857..a0fbe16856cefcc84ae79ae33d14f9d8dcfbce2f 100644 (file)
@@ -3,9 +3,11 @@
 #include <stdint.h>
 #include <stdio.h>
 #include <unistd.h>
+#include <stdlib.h>
 #include <mach/mach.h>
+#include <libkern/OSAtomicQueue.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #define WAITTIME (100 * 1000)
 
@@ -117,3 +119,81 @@ T_DECL(pthread_join_stress, "pthread_join in a loop")
        }
        T_PASS("Success!");
 }
+
+static pthread_mutex_t join3way_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t join3way_cond = PTHREAD_COND_INITIALIZER;
+static OSQueueHead join3way_queue = OS_ATOMIC_QUEUE_INIT;
+
+struct join3way_item {
+    pthread_t th;
+    struct join3way_item *next;
+};
+
+static void *
+join3way_joiner(__unused void *arg)
+{
+    pthread_mutex_lock(&join3way_mutex);
+    while (1) {
+        pthread_cond_wait(&join3way_cond, &join3way_mutex);
+        struct join3way_item *item = OSAtomicDequeue(&join3way_queue,
+                offsetof(struct join3way_item, next));
+        pthread_join(item->th, NULL);
+        free(item);
+    }
+    return NULL;
+}
+
+static void *
+join3way_child(__unused void *arg)
+{
+    struct join3way_item *item = malloc(sizeof(struct join3way_item));
+    item->th = pthread_self();
+    item->next = NULL;
+    OSAtomicEnqueue(&join3way_queue, item,
+            offsetof(struct join3way_item, next));
+    pthread_cond_signal(&join3way_cond);
+    return NULL;
+}
+
+static void *
+join3way_creator(__unused void *arg)
+{
+    pthread_attr_t attr;
+    T_QUIET; T_ASSERT_POSIX_ZERO(pthread_attr_init(&attr), "pthread_attr_init");
+    T_ASSERT_POSIX_ZERO(pthread_attr_set_qos_class_np(&attr,
+            QOS_CLASS_USER_INTERACTIVE, 0), "pthread_attr_set_qos_class_np (child)");
+
+    int n = 1000;
+    while (--n > 0) {
+        pthread_t t;
+        T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&t, &attr,
+                join3way_child, NULL), "create thread");
+    }
+    T_ASSERT_EQ_INT(n, 0, "created all child threads");
+    return NULL;
+}
+
+T_DECL(pthread_join_3way, "pthread_join from non-parent with priority inversion")
+{
+    pthread_attr_t joinerattr;
+    T_QUIET; T_ASSERT_POSIX_ZERO(pthread_attr_init(&joinerattr),
+            "pthread_attr_init");
+    T_ASSERT_POSIX_ZERO(pthread_attr_set_qos_class_np(&joinerattr,
+            QOS_CLASS_USER_INTERACTIVE, 0), "pthread_attr_set_qos_class_np");
+
+    pthread_t joiner;
+    T_ASSERT_POSIX_SUCCESS(pthread_create(&joiner, &joinerattr, join3way_joiner,
+            NULL), "create joiner thread");
+
+    pthread_attr_t creatorattr;
+    T_QUIET; T_ASSERT_POSIX_ZERO(pthread_attr_init(&creatorattr),
+            "pthread_attr_init");
+    T_ASSERT_POSIX_ZERO(pthread_attr_set_qos_class_np(&joinerattr,
+            QOS_CLASS_BACKGROUND, 0), "pthread_attr_set_qos_class_np (creator)");
+
+    pthread_t creator;
+    T_ASSERT_POSIX_SUCCESS(pthread_create(&creator, &creatorattr,
+            join3way_creator, NULL), "create creator thread");
+
+    pthread_join(creator, NULL);
+}
index b317b8aef82fa178366b801ad4dd2ed350a1f409..bc6e89f8743b0b0eacb60402b5762470e8414d7f 100644 (file)
@@ -1,6 +1,6 @@
 #include <stdlib.h>
 #include <pthread.h>
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 #include <machine/vmparam.h>
 
 T_DECL(main_stack, "tests the reported values for the main thread stack",
index 5f40de2156de7f3f3dd1c0968e77c2ed296b3477..eb0d660114fe8806fc4015f86027de420afdabcd 100644 (file)
@@ -1,6 +1,6 @@
 #include <stdlib.h>
 #include <pthread.h>
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 #include <machine/vmparam.h>
 
 T_DECL(main_stack_custom, "tests the reported values for a custom main thread stack"){
index c4df26572535945e3d94700e6151c5c07a3db417..f8373eb0f12d08659b042ca5b63c17cd40141f4e 100644 (file)
@@ -1,6 +1,6 @@
 #include <stdlib.h>
 #include <pthread.h>
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 #include <machine/vmparam.h>
 
 T_DECL(main_stack_legacy, "tests the reported values for a custom main thread stack",
index bd2490a4b70b18a7c8a4a61e89895e625e81dc18..7fca3252ce44c44549847c23980b0d60e7715134 100644 (file)
@@ -5,7 +5,7 @@
 #include <stdbool.h>
 #include <errno.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 struct context {
        pthread_mutex_t mutex;
index 61a37a9c5d2cb2faa0ae5c13087a94d7dee03a85..98d0c56929927a29ca4cbc44f1b9b1ddccbfd41b 100644 (file)
@@ -5,7 +5,7 @@
 #include <pthread.h>
 #include <stdlib.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 struct ctx {
        volatile int last_holder;
index 4daa1db92d2271b4fb15ca31fb60a9e69bcd1507..5a73308d075534b14e3fa19381f5665e3c525b3f 100644 (file)
@@ -1,6 +1,6 @@
 #include <pthread.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static volatile int once_invoked = 0;
 
index 032b832e9aee9b0174a0526a1dfd027207282359..845ff6579580e5645dc3bdcf48e6769f2cd5753e 100644 (file)
@@ -4,13 +4,26 @@
 #include <limits.h>
 #include <pthread.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #define STACK_ALLOWANCE (1024ULL * 6)
 
 static void *
 pthread_attr_setstacksize_func(void *arg)
 {
+    if ((size_t)arg < 1024ULL * 32) {
+        /*
+         * We can't use darwintest because it requires a bigger stack than
+         * this, so cheat and use the return value for the test.
+         */
+#ifndef __arm64__
+        if ((size_t)arg != pthread_get_stacksize_np(pthread_self())) {
+            return NULL;
+        }
+#endif
+        return (void*)pthread_attr_setstacksize_func;
+    }
+
 #if defined(__arm64__)
     // Because of <rdar://problem/19941744>, the kext adds additional size to the stack on arm64.
     T_EXPECTFAIL;
index 26e19f6cd09da707fb74240178afbd1442f50f74..37d82aa3ad32fdde8752920522fa54c9338b4a4c 100644 (file)
@@ -1,6 +1,6 @@
 #include <pthread.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #define MAX_THREADS 512
 #define THREAD_DEPTH 32
index 20b8f88e4bf0e3ace2a874144ac447458a4e21c4..8ce343ebebbdbc527eaaf7d2c68e10d9f49624bc 100644 (file)
@@ -1,6 +1,6 @@
 #include <pthread.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static void *
 run(void * __unused arg)
index 86c8b7dc9f3b041f437107c4bddb19c866c16e34..68490f825736764433ea86d0d588512c797199fa 100644 (file)
@@ -9,7 +9,7 @@
 #include <sys/param.h>
 #include <pthread/private.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #include "../src/pthread_cwd.c"
 
@@ -81,9 +81,9 @@ T_DECL(pthread_cwd, "per-thread working directory")
        umask(0);               /* "always successful" */
 
        /* Now set us up the test directories... */
-       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR, 0777) != -1 || errno != EEXIST, NULL);
-       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR1, 0777) != -1 || errno != EEXIST, NULL);
-       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR2, 0777) != -1 || errno != EEXIST, NULL);
+       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR, 0777) != -1 || errno == EEXIST, NULL);
+       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR1, 0777) != -1 || errno == EEXIST, NULL);
+       T_WITH_ERRNO; T_ASSERT_TRUE(mkdir(WORKDIR2, 0777) != -1 || errno == EEXIST, NULL);
 
        T_SETUPEND;
 
index 76154f4138a4f21dd7076b72ea731702b62c96dd..cd5b12a7ec8ac31963ea323c3f22eef0f8152b26 100644 (file)
@@ -4,7 +4,7 @@
 #include <stdlib.h>
 #include <unistd.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 #define STACK_SIZE      32768
 #define THREAD_DEPTH    2000
@@ -33,7 +33,7 @@ thread_exit(__unused void *arg)
        return NULL;
 }
 
-T_DECL(pthread_exit, "pthread_exit")
+T_DECL(pthread_exit, "pthread_exit", T_META_LTEPHASE(LTE_INSTALLEDUSEROS))
 {
        int j;
        pthread_t th[THREAD_DEPTH];
@@ -57,7 +57,8 @@ thread_stub(__unused void *arg)
        return NULL;
 }
 
-T_DECL(pthread_exit_private_stacks, "pthread_exit with private stacks", T_META_CHECK_LEAKS(NO))
+T_DECL(pthread_exit_private_stacks, "pthread_exit with private stacks",
+       T_META_CHECK_LEAKS(NO))
 {
        int j;
        pthread_t th[THREAD_DEPTH];
diff --git a/tests/pthread_get_qos_class_np.c b/tests/pthread_get_qos_class_np.c
new file mode 100644 (file)
index 0000000..9bfaa76
--- /dev/null
@@ -0,0 +1,29 @@
+#include <assert.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdbool.h>
+
+#include <sys/qos.h>
+#include <sys/resource.h>
+#include <pthread.h>
+
+#include "darwintest_defaults.h"
+
+static void *sleep_thread(void __unused *arg){
+       sleep(1);
+       return NULL;
+}
+
+/* Regression test for <rdar://problem/29209770> */
+T_DECL(test_pthread_get_qos_class_np, "Test for pthread_get_qos_class_np()", T_META_CHECK_LEAKS(NO)) {
+       pthread_t thread;
+       pthread_attr_t attr;
+       pthread_attr_init(&attr);
+       pthread_attr_set_qos_class_np(&attr, QOS_CLASS_BACKGROUND, 0);
+       pthread_create(&thread, &attr, sleep_thread, NULL);
+
+       qos_class_t qos;
+       pthread_get_qos_class_np(thread, &qos, NULL);
+
+       T_EXPECT_EQ(qos, (qos_class_t)QOS_CLASS_BACKGROUND, NULL);
+}
index acb4d8fb008aa089d4f4e0bde9188a50514cb9ba..5fc54595f9b28d7ba0e29f546bf758e75d23bf24 100644 (file)
@@ -3,7 +3,7 @@
 #include <pthread/introspection_private.h>
 #include <dispatch/dispatch.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static pthread_introspection_hook_t prev_pthread_introspection_hook;
 
@@ -34,24 +34,29 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
        }
 }
 
-T_DECL(PR_25679871, "PR-25679871",
-               T_META_TIMEOUT(30), T_META_ALL_VALID_ARCHS(YES))
+T_DECL(pthread_introspection, "PR-25679871",
+       T_META_TIMEOUT(30), T_META_ALL_VALID_ARCHS(YES))
 {
        prev_pthread_introspection_hook = pthread_introspection_hook_install(&my_pthread_introspection_hook);
 
-       // minus two that come up in dispatch internally, one that comes after this block
-       for (int i = 0; i < THREAD_COUNT - 3; i++) {
+       // minus one that comes after this block
+       for (int i = 0; i < THREAD_COUNT - 1; i++) {
+               T_LOG("Creating dispatch_async thread %d.", i);
                dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{
+                       T_LOG("Started dispatch_async thread %d.", i);
                        sleep(3);
                });
        }
        dispatch_queue_t serial_queue = dispatch_queue_create("test queue", NULL);
        __block dispatch_block_t looping_block = ^{
                static int count;
-               if (count < 20) {
+               if (count++ < 20) {
                        dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 50 * NSEC_PER_MSEC), serial_queue, looping_block);
+               } else {
+                       T_LOG("Looping block complete");
                }
        };
+       T_LOG("Starting looping block");
        dispatch_async(serial_queue, looping_block);
 
        sleep(30);
index 6e474373ed2edf0ae324c6f65ca5df39bfdf1486..d5adb7889816740756e83b27baed3de0fc62118f 100644 (file)
@@ -31,7 +31,7 @@
 #include <stdlib.h>
 #include <unistd.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static pthread_key_t key;
 
@@ -54,7 +54,7 @@ grim_reaper(void * param)
 }
 
 T_DECL(pthread_setspecific, "pthread_setspecific",
-        T_META_ALL_VALID_ARCHS(YES))
+       T_META_ALL_VALID_ARCHS(YES))
 {
        void * thread_res;
        pthread_t t1, t2;
index b8a3f72ce86e4dfd69bb919bd1d60058f3d20695..d28ca65e7e52d6257ad704046b6c886b7aa2a90c 100644 (file)
@@ -2,7 +2,7 @@
 #include <pthread/private.h>
 #include <dispatch/dispatch.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 extern __uint64_t __thread_selfid( void );
 
diff --git a/tests/rdar_32848402.c b/tests/rdar_32848402.c
new file mode 100644 (file)
index 0000000..65cd56e
--- /dev/null
@@ -0,0 +1,86 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <dispatch/dispatch.h>
+#include <dispatch/private.h>
+#include <sys/time.h>
+#include <sys/sysctl.h>
+
+#include "darwintest_defaults.h"
+#include <darwintest_utils.h>
+
+static uint64_t end_spin;
+
+static uint32_t
+get_ncpu(void)
+{
+       static uint32_t activecpu;
+       if (!activecpu) {
+               uint32_t n;
+               size_t s = sizeof(activecpu);
+               sysctlbyname("hw.activecpu", &n, &s, NULL, 0);
+               activecpu = n;
+       }
+       return activecpu;
+}
+
+static void
+spin_and_pause(void *ctx)
+{
+       long i = (long)ctx;
+
+       printf("Thread %ld starts\n", i);
+
+       while (clock_gettime_nsec_np(CLOCK_MONOTONIC) < end_spin) {
+#if defined(__x86_64__) || defined(__i386__)
+               __asm__("pause");
+#elif defined(__arm__) || defined(__arm64__)
+               __asm__("wfe");
+#endif
+       }
+       printf("Thread %ld blocks\n", i);
+       pause();
+}
+
+static void
+spin(void *ctx)
+{
+       long i = (long)ctx;
+
+       printf("Thread %ld starts\n", i);
+
+       while (clock_gettime_nsec_np(CLOCK_MONOTONIC)) {
+#if defined(__x86_64__) || defined(__i386__)
+               __asm__("pause");
+#elif defined(__arm__) || defined(__arm64__)
+               __asm__("wfe");
+#endif
+       }
+}
+
+T_DECL(thread_request_32848402, "repro for rdar://32848402")
+{
+       dispatch_queue_attr_t bg_attr, in_attr;
+
+       bg_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,
+                       QOS_CLASS_BACKGROUND, 0);
+       in_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_CONCURRENT,
+                       QOS_CLASS_USER_INITIATED, 0);
+
+       dispatch_queue_t a = dispatch_queue_create_with_target("in", in_attr, NULL);
+       dispatch_queue_t b = dispatch_queue_create_with_target("bg", bg_attr, NULL);
+
+       end_spin = clock_gettime_nsec_np(CLOCK_MONOTONIC) + 2 * NSEC_PER_SEC;
+
+       dispatch_async_f(a, (void *)0, spin_and_pause);
+       for (long i = 1; i < get_ncpu(); i++) {
+               dispatch_async_f(b, (void *)i, spin);
+       }
+
+       dispatch_async(b, ^{
+               T_PASS("The NCPU+1-nth block got scheduled");
+               T_END;
+       });
+
+       sleep(10);
+       T_FAIL("The NCPU+1-nth block didn't get scheduled");
+}
index ba973f7a1b3f3d5c90d621f53f14c9b6f01713f3..a8dab42eb170c3b69a283b0126a0161220b28008 100644 (file)
@@ -4,17 +4,26 @@
 #include <unistd.h>
 #include <dispatch/dispatch.h>
 #include <sys/mman.h>
+#include <stdatomic.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
+#include <darwintest_multiprocess.h>
+#include <darwintest_utils.h>
 
 #define T_LOG_VERBOSE(...)
 
+#ifndef T_MAYFAIL_WITH_REASON
+#define T_MAYFAIL_WITH_REASON(x) T_MAYFAIL
+#endif
+
 #ifdef __LP64__
 #define STACK_LOCATIONS 16
 #else
 #define STACK_LOCATIONS 8
 #endif
 
+static const int attempts = 128, attempt_rounds = 3;
+
 static void*
 thread_routine(void *loc)
 {
@@ -31,46 +40,118 @@ pointer_compare(const void *ap, const void *bp)
        return a > b ? 1 : a < b ? -1 : 0;
 }
 
+typedef struct shmem_s {
+       _Atomic int ctr, done;
+       uintptr_t addr_array[attempts];
+} *shmem_t;
+
+static shmem_t
+test_shmem_open(const char* shmem_name, int creatflags)
+{
+       int fd = open(shmem_name, O_RDWR | creatflags, 0600);
+       T_QUIET; T_ASSERT_POSIX_SUCCESS(fd, "open temp file");
+       if (creatflags) {
+               T_QUIET; T_ASSERT_POSIX_SUCCESS(ftruncate(fd,
+                               sizeof(struct shmem_s)), "resize temp file");
+       }
+       shmem_t shmem = mmap(NULL, sizeof(struct shmem_s),
+                       PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+       T_QUIET; T_ASSERT_NOTNULL(shmem, "mmap shmem");
+       T_QUIET; T_ASSERT_POSIX_SUCCESS(close(fd), "close temp file");
+       return shmem;
+}
+
+static uintptr_t*
+test_shmem_start(shmem_t shmem)
+{
+       int idx = atomic_fetch_add(&shmem->ctr, 1);
+       return &shmem->addr_array[idx];
+}
+
 static void
-test_stack_aslr(bool workqueue_thread)
+test_shmem_end(shmem_t shmem)
+{
+       atomic_fetch_add(&shmem->done, 1);
+}
+
+T_HELPER_DECL(wq_stack_aslr_helper,
+               "Confirm that workqueue stacks are ASLRed (Helper)")
 {
-       const int attempts = 128;
-       int attempt_round = 0;
+       shmem_t shmem = test_shmem_open(argv[0], 0);
+       uintptr_t *addr = test_shmem_start(shmem);
+       dispatch_group_t g = dispatch_group_create();
+       dispatch_group_async_f(g, dispatch_get_global_queue(0,0), addr,
+                       (dispatch_function_t)thread_routine);
+       dispatch_group_wait(g, DISPATCH_TIME_FOREVER);
+       dispatch_release(g);
+       test_shmem_end(shmem);
+}
 
-       uintptr_t *addr_array = mmap(NULL, sizeof(uintptr_t) * attempts,
-                       PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANON, -1, 0);
-       T_QUIET; T_ASSERT_NOTNULL(addr_array, NULL);
+T_HELPER_DECL(pthread_stack_aslr_helper,
+               "Confirm that stacks are ASLRed (Helper)")
+{
+       shmem_t shmem = test_shmem_open(argv[0], 0);
+       uintptr_t *addr = test_shmem_start(shmem);
+       pthread_t th;
+       int ret = pthread_create(&th, NULL, thread_routine, addr);
+       assert(ret == 0);
+       ret = pthread_join(th, NULL);
+       assert(ret == 0);
+       test_shmem_end(shmem);
+}
 
+static void
+test_stack_aslr(bool workqueue_thread)
+{
+       const char *tmpdir = dt_tmpdir();
+       char *tmp;
+       asprintf(&tmp, "%s/pthread_stack_aslr_XXXXX", tmpdir);
+       T_QUIET; T_ASSERT_NOTNULL(mkdtemp(tmp), "mkdtemp");
+
+       char *shmem_name;
+       asprintf(&shmem_name, "%s/shmem", tmp);
+       shmem_t shmem = test_shmem_open(shmem_name, O_CREAT|O_EXCL);
+       uintptr_t *addr_array = shmem->addr_array;
+
+       dt_helper_t helpers[attempts * attempt_rounds];
+       const char* helper = workqueue_thread ? "wq_stack_aslr_helper" :
+                       "pthread_stack_aslr_helper";
+       char *helper_args[] = {shmem_name, NULL};
+       size_t helper_idx = 0;
+
+       struct rlimit l;
+       if (!getrlimit(RLIMIT_NOFILE, &l)) {
+               l.rlim_cur += 3 * attempts * attempt_rounds; // 3 fifos per helper
+               T_QUIET; T_ASSERT_POSIX_SUCCESS(setrlimit(RLIMIT_NOFILE, &l),
+                               "setrlimit");
+       }
+       signal(SIGCHLD, SIG_IGN);
+
+       int attempt_round = attempt_rounds;
 again:
-       bzero(addr_array, sizeof(uintptr_t) * attempts);
+       bzero(shmem, sizeof(struct shmem_s));
 
        for (int i = 0; i < attempts; i++) {
-               pid_t pid = fork();
-               T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "[%d] fork()", i);
-
-               if (pid) { // parent
-                       pid = waitpid(pid, NULL, 0);
-                       T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "[%d] waitpid()", i);
-               } else if (workqueue_thread) { // child
-                       dispatch_async(dispatch_get_global_queue(0,0), ^{
-                               int foo;
-                               addr_array[i] = (uintptr_t)&foo;
-                               exit(0);
-                       });
-                       while (true) sleep(1);
-               } else { // child
-                       pthread_t th;
-                       int ret = pthread_create(&th, NULL, thread_routine, &addr_array[i]);
-                       assert(ret == 0);
-                       ret = pthread_join(th, NULL);
-                       assert(ret == 0);
-                       exit(0);
-               }
+               char *t;
+               asprintf(&t, "%s/%zd", tmp, i);
+               T_QUIET; T_ASSERT_POSIX_SUCCESS(mkdir(t, 0700), "mkdir");
+               setenv("BATS_TMP_DIR", t, 1); // hack to workaround rdar://33443485
+               free(t);
+               helpers[helper_idx++] = dt_child_helper_args(helper, helper_args);
+               int w = 100;
+               do {
+                       if (!w--) {
+                               T_QUIET; T_FAIL("Helper should complete in <.1s");
+                               goto timeout;
+                       }
+                       usleep(1000);
+               } while (shmem->done <= i);
        }
+       setenv("BATS_TMP_DIR", tmpdir, 1);
 
        qsort(addr_array, attempts, sizeof(uintptr_t), pointer_compare);
-
-       T_LOG("Stack address range: %p - %p (+%lx)", (void*)addr_array[0], (void*)addr_array[attempts-1],
+       T_LOG("Stack address range: %p - %p (+%lx)", (void*)addr_array[0],
+                       (void*)addr_array[attempts-1],
                        addr_array[attempts-1] - addr_array[0]);
 
        int unique_values = 0;
@@ -82,19 +163,26 @@ again:
                }
        }
 
-       if (attempt_round < 3) T_MAYFAIL;
-       T_EXPECT_GE(unique_values, STACK_LOCATIONS, "Should have more than %d unique stack locations", STACK_LOCATIONS);
-       if (attempt_round++ < 3 && unique_values < STACK_LOCATIONS) goto again;
+       if (--attempt_round) T_MAYFAIL_WITH_REASON("ASLR");
+       T_EXPECT_GE(unique_values, STACK_LOCATIONS,
+                       "Should have more than %d unique stack locations", STACK_LOCATIONS);
+       if (attempt_round && unique_values < STACK_LOCATIONS) goto again;
+
+timeout:
+       T_QUIET; T_EXPECT_POSIX_SUCCESS(unlink(shmem_name), "unlink temp file");
+       free(shmem_name);
+       free(tmp);
+       dt_run_helpers(helpers, helper_idx, 5);
 }
 
-T_DECL(pthread_stack_aslr, "Confirm that stacks are ASLRed", T_META_CHECK_LEAKS(NO),
-               T_META_ALL_VALID_ARCHS(YES))
+T_DECL(pthread_stack_aslr, "Confirm that stacks are ASLRed",
+               T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES))
 {
        test_stack_aslr(false);
 }
 
-T_DECL(wq_stack_aslr, "Confirm that workqueue stacks are ASLRed", T_META_CHECK_LEAKS(NO),
-               T_META_ALL_VALID_ARCHS(YES))
+T_DECL(wq_stack_aslr, "Confirm that workqueue stacks are ASLRed",
+               T_META_CHECK_LEAKS(NO), T_META_ALL_VALID_ARCHS(YES))
 {
        test_stack_aslr(true);
 }
index ba5668c36e545166a1c2d3a8d45020054a23f87d..259816d4cb1d5ba6071d8666e68d426b8659b95b 100644 (file)
@@ -1,7 +1,7 @@
 #include <pthread.h>
 #include <stdio.h>
 
-#include <darwintest.h>
+#include "darwintest_defaults.h"
 
 static void *ptr = NULL;
 
@@ -23,7 +23,7 @@ static void *thread(void *param)
 }
 
 T_DECL(tsd, "tsd",
-               T_META_ALL_VALID_ARCHS(YES))
+       T_META_ALL_VALID_ARCHS(YES))
 {
        pthread_key_t key;
 
diff --git a/tests/wq_limits.c b/tests/wq_limits.c
new file mode 100644 (file)
index 0000000..9ceda18
--- /dev/null
@@ -0,0 +1,57 @@
+#include <sys/sysctl.h>
+#include <dispatch/dispatch.h>
+#include <dispatch/private.h>
+#include "darwintest_defaults.h"
+
+T_DECL(wq_pool_limits, "test overcommit limit")
+{
+       dispatch_semaphore_t sema = dispatch_semaphore_create(0);
+       dispatch_group_t g = dispatch_group_create();
+       dispatch_time_t t;
+       uint32_t wq_max_threads, wq_max_constrained_threads;
+
+       dispatch_block_t b = ^{
+               dispatch_group_leave(g);
+               dispatch_semaphore_wait(sema, DISPATCH_TIME_FOREVER);
+       };
+
+       size_t s = sizeof(uint32_t);
+       sysctlbyname("kern.wq_max_threads", &wq_max_threads, &s, NULL, 0);
+       sysctlbyname("kern.wq_max_constrained_threads", &wq_max_constrained_threads,
+                                &s, NULL, 0);
+
+       for (uint32_t i = 0; i < wq_max_constrained_threads; i++) {
+               dispatch_group_enter(g);
+               dispatch_async(dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), b);
+       }
+
+       t = dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC);
+       T_ASSERT_EQ(dispatch_group_wait(g, t), 0L,
+                       "%d constrained threads bringup", wq_max_constrained_threads);
+
+       dispatch_async(dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0), ^{
+               T_ASSERT_FAIL("Should never run");
+       });
+
+       sleep(5);
+       T_PASS("constrained limit looks fine");
+
+       for (uint32_t i = wq_max_constrained_threads; i < wq_max_threads; i++) {
+               dispatch_group_enter(g);
+               dispatch_async(dispatch_get_global_queue(QOS_CLASS_DEFAULT,
+                                               DISPATCH_QUEUE_OVERCOMMIT), b);
+       }
+       t = dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC);
+       T_ASSERT_EQ(dispatch_group_wait(g, t), 0L,
+                       "%d threads bringup", wq_max_threads);
+
+
+       dispatch_async(dispatch_get_global_queue(QOS_CLASS_DEFAULT,
+                                       DISPATCH_QUEUE_OVERCOMMIT), ^{
+               T_ASSERT_FAIL("Should never run");
+       });
+
+       sleep(5);
+       T_PASS("thread limit looks fine");
+       T_END;
+}
diff --git a/tools/pthtrace.lua b/tools/pthtrace.lua
new file mode 100755 (executable)
index 0000000..f175160
--- /dev/null
@@ -0,0 +1,70 @@
+#!/usr/local/bin/luatrace -s
+
+trace_codename = function(codename, callback)
+       local debugid = trace.debugid(codename)
+       if debugid ~= 0 then
+               trace.single(debugid,callback)
+       else
+               printf("WARNING: Cannot locate debugid for '%s'\n", codename)
+       end
+end
+
+initial_timestamp = 0
+workqueue_ptr_map = {};
+get_prefix = function(buf)
+       if initial_timestamp == 0 then
+               initial_timestamp = buf.timestamp
+       end
+       local secs = trace.convert_timestamp_to_nanoseconds(buf.timestamp - initial_timestamp) / 1000000000
+
+       local prefix
+       if trace.debugid_is_start(buf.debugid) then
+               prefix = "→"
+       elseif trace.debugid_is_end(buf.debugid) then
+               prefix = "←"
+       else
+               prefix = "↔"
+       end
+
+       local proc
+       if buf.command ~= "kernel_task" then
+               proc = buf.command
+               workqueue_ptr_map[buf[1]] = buf.command
+       elseif workqueue_ptr_map[buf[1]] ~= nil then
+               proc = workqueue_ptr_map[buf[1]]
+       else
+               proc = "UNKNOWN"
+       end
+
+       return string.format("%s %6.9f %-17s [%05d.%06x] %-24s",
+               prefix, secs, proc, buf.pid, buf.threadid, buf.debugname)
+end
+
+trace_codename("pthread_thread_create", function(buf)
+    local prefix = get_prefix(buf)
+    if trace.debugid_is_start(buf.debugid) then
+        printf("%s\tthread creation request: 0x%x\n", prefix, buf[1])
+    elseif trace.debugid_is_end(buf.debugid) then
+        printf("%s\t  thread creation complete: pthread 0x%x (error: %d)\n", prefix, buf[2], buf[1])
+    elseif buf[4] == 2 then
+        printf("%s\t  thread stack created: 0x%x + 0x%x\n", prefix, buf[2], buf[1])
+    elseif buf[4] == 3 then
+        printf("%s\t  thread using custom stack")
+    end
+end)
+
+trace_codename("pthread_thread_terminate", function(buf)
+    local prefix = get_prefix(buf)
+    if trace.debugid_is_start(buf.debugid) then
+        printf("%s\tthread terminate: stack 0x%x + 0x%x, kthport 0x%x\n", prefix, buf[1], buf[2], buf[3])
+    elseif trace.debugid_is_end(buf.debugid) then
+        printf("%s\t  thread terminate: ret %d\n", prefix, buf[1])
+    end
+end)
+
+-- The trace codes we need aren't enabled by default
+darwin.sysctlbyname("kern.pthread_debug_tracing", 1)
+completion_handler = function()
+       darwin.sysctlbyname("kern.pthread_debug_tracing", 0)
+end
+trace.set_completion_handler(completion_handler)
index 61f0c607535d494d4e05b51f1ce3b0f58995067e..2da03da8a419683607c8cdf463413ba37cb71fb4 100755 (executable)
@@ -2,7 +2,7 @@
 
 trace_codename = function(codename, callback)
        local debugid = trace.debugid(codename)
-       if debugid ~= 0 then 
+       if debugid ~= 0 then
                trace.single(debugid,callback)
        else
                printf("WARNING: Cannot locate debugid for '%s'\n", codename)
@@ -47,24 +47,59 @@ parse_pthread_priority = function(pri)
        end
        local qos = (pri & 0x00ffff00) >> 8
        if qos == 0x20 then
-               return string.format("UInter[%x]", pri);
+               return string.format("UI[%x]", pri);
        elseif qos == 0x10 then
-               return string.format("UInit[%x]", pri);
+               return string.format("IN[%x]", pri);
        elseif qos == 0x08 then
-               return string.format("Dflt[%x]", pri);
+               return string.format("DF[%x]", pri);
        elseif qos == 0x04 then
-               return string.format("Util[%x]", pri);
+               return string.format("UT[%x]", pri);
        elseif qos == 0x02 then
                return string.format("BG[%x]", pri);
        elseif qos == 0x01 then
-               return string.format("Maint[%x]", pri);
+               return string.format("MT[%x]", pri);
        elseif qos == 0x00 then
-               return string.format("Unsp[%x]", pri);
+               return string.format("--[%x]", pri);
        else
-               return string.format("Unkn[%x]", pri);
+               return string.format("??[%x]", pri);
        end
 end
 
+parse_qos_bucket = function(pri)
+       if pri == 0 then
+               return string.format("UI[%x]", pri);
+       elseif pri == 1 then
+               return string.format("IN[%x]", pri);
+       elseif pri == 2 then
+               return string.format("DF[%x]", pri);
+       elseif pri == 3 then
+               return string.format("UT[%x]", pri);
+       elseif pri == 4 then
+               return string.format("BG[%x]", pri);
+       elseif pri == 5 then
+               return string.format("MT[%x]", pri);
+       elseif pri == 6 then
+               return string.format("MG[%x]", pri);
+       else
+               return string.format("??[%x]", pri);
+       end
+end
+
+parse_thactive_req_bucket = function(pri)
+    if pri ~= 6 then
+        return parse_qos_bucket(pri)
+    end
+    return "None"
+end
+
+get_thactive = function(low, high)
+    return string.format("req: %s, MG: %d, UI: %d, IN: %d, DE: %d, UT: %d, BG: %d, MT: %d",
+           parse_thactive_req_bucket(high >> (16 * 3)), (high >> (2 * 16)) & 0xffff,
+           (low  >> (0 * 16)) & 0xffff, (low  >> (1 * 16)) & 0xffff,
+           (low  >> (2 * 16)) & 0xffff, (low  >> (3 * 16)) & 0xffff,
+           (high >> (0 * 16)) & 0xffff, (high >> (1 * 16)) & 0xffff)
+end
+
 -- workqueue lifecycle
 
 trace_codename("wq_pthread_exit", function(buf)
@@ -85,55 +120,6 @@ trace_codename("wq_workqueue_exit", function(buf)
        end
 end)
 
--- thread requests
-
-trace_codename("wq_kevent_req_threads", function(buf)
-       local prefix = get_prefix(buf)
-       if trace.debugid_is_start(buf.debugid) then
-               printf("%s\tkevent requesting threads (requests[] length = %d)\n", prefix, buf.arg2)
-       else
-               printf("%s\tkevent request complete (start_timer: %d)\n", prefix, buf.arg2)
-       end
-end)
-
-trace_codename("wq_req_threads", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\trecording %d constrained request(s) at %s, total %d requests\n",
-               prefix, buf.arg4, parse_pthread_priority(buf.arg2), buf.arg3)
-end)
-
-trace_codename("wq_req_octhreads", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\tattempting %d overcommit request(s) at %s, total %d requests\n",
-               prefix, buf.arg4, parse_pthread_priority(buf.arg2), buf.arg3)
-end)
-trace_codename("wq_delay_octhreads", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\trecording %d delayed overcommit request(s) at %s, total %d requests\n",
-               prefix, buf.arg4, parse_pthread_priority(buf.arg2), buf.arg3)
-end)
-
-trace_codename("wq_req_kevent_threads", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\trecording kevent constrained request at %s, total %d requests\n",
-               prefix, parse_pthread_priority(buf.arg2), buf.arg3)
-end)
-trace_codename("wq_req_kevent_octhreads", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\trecording kevent overcommit request at %s, total %d requests\n",
-               prefix, parse_pthread_priority(buf.arg2), buf.arg3)
-end)
-trace_codename("wq_req_event_manager", function(buf)
-       local prefix = get_prefix(buf)
-       if buf.arg2 == 1 then
-               printf("%s\tstarting event manager thread, existing at %d, %d added\n",
-                       prefix, buf.arg3, buf.arg4)
-       else
-               printf("%s\trecording event manager request, existing at %d, %d added\n",
-                       prefix, buf.arg3, buf.arg4)
-       end
-end)
-
 trace_codename("wq_start_add_timer", function(buf)
        local prefix = get_prefix(buf)
        printf("%s\tarming timer to fire in %d us (flags: %x, reqcount: %d)\n",
@@ -155,47 +141,96 @@ trace_codename("wq_add_timer", function(buf)
        end
 end)
 
-trace_codename("wq_overcommitted", function(buf)
+trace_codename("wq_run_threadreq", function(buf)
+       local prefix = get_prefix(buf)
+       if trace.debugid_is_start(buf.debugid) then
+               if buf[2] > 0 then
+                       printf("%s\trun_threadreq: %x (priority: %s, flags: %d) on %x\n",
+                                       prefix, buf[2], parse_qos_bucket(buf[4] >> 16), buf[4] & 0xff, buf[3])
+               else
+                       printf("%s\trun_threadreq: <none> on %x\n",
+                                       prefix, buf[3])
+               end
+       else
+               if buf[2] == 1 then
+                       printf("%s\tpended event manager, already running\n", prefix)
+               elseif buf[2] == 2 then
+                       printf("%s\tnothing to do\n", prefix)
+               elseif buf[2] == 3 then
+                       printf("%s\tno eligible request found\n", prefix)
+               elseif buf[2] == 4 then
+                       printf("%s\tadmission control failed\n", prefix)
+               elseif buf[2] == 5 then
+                       printf("%s\tunable to add new thread (may_add_new_thread: %d, nthreads: %d)\n", prefix, buf[3], buf[4])
+               elseif buf[2] == 6 then
+                       printf("%s\tthread creation failed\n", prefix)
+               elseif buf[2] == 0 then
+                       printf("%s\tsuccess\n", prefix)
+               else
+                       printf("%s\tWARNING: UNKNOWN END CODE:%d\n", prefix, buf.arg4)
+               end
+       end
+end)
+
+trace_codename("wq_run_threadreq_mgr_merge", function(buf)
+       local prefix = get_prefix(buf)
+       printf("%s\t\tmerging incoming manager request into existing\n", prefix)
+end)
+
+trace_codename("wq_run_threadreq_req_select", function(buf)
        local prefix = get_prefix(buf)
-       if buf.arg2 & 0x1000000 ~= 0 then
-               printf("%s\tworkqueue overcommitted @ %s, starting timer (thactive_count: %d, busycount; %d)\n",
-                       prefix, parse_pthread_priority(buf.arg2), buf.arg3, buf.arg4)
+       if buf[3] == 1 then
+               printf("%s\t\tselected event manager request %x\n", prefix, buf[2])
+       elseif buf[3] == 2 then
+               printf("%s\t\tselected overcommit request %x\n", prefix, buf[2])
+       elseif buf[3] == 3 then
+               printf("%s\t\tselected constrained request %x\n", prefix, buf[2])
        else
-               printf("%s\tworkqueue overcommitted @ %s (thactive_count: %d, busycount; %d)\n",
-                       prefix, parse_pthread_priority(buf.arg2), buf.arg3, buf.arg4)
+               printf("%s\t\tWARNING: UNKNOWN DECISION CODE:%d\n", prefix, buf.arg[3])
        end
 end)
 
-trace_codename("wq_stalled", function(buf)
+trace_codename("wq_run_threadreq_thread_select", function(buf)
        local prefix = get_prefix(buf)
-       printf("%s\tworkqueue stalled (nthreads: %d)\n", prefix, buf.arg3)
+       if buf[2] == 1 then
+               printf("%s\t\trunning on current thread %x\n", prefix, buf[3])
+       elseif buf[2] == 2 then
+               printf("%s\t\trunning on idle thread %x\n", prefix, buf[3])
+       elseif buf[2] == 3 then
+               printf("%s\t\tcreated new thread\n", prefix)
+       else
+               printf("%s\t\tWARNING: UNKNOWN DECISION CODE:%d\n", prefix, buf.arg[2])
+       end
 end)
 
--- thread lifecycle
+trace_codename("wq_thread_reset_priority", function(buf)
+       local prefix = get_prefix(buf)
+       local old_qos = buf[3] >> 16;
+       local new_qos = buf[3] & 0xff;
+       if buf[4] == 1 then
+               printf("%s\t\treset priority of %x from %s to %s\n", prefix, buf[2], parse_qos_bucket(old_qos), parse_qos_bucket(new_qos))
+       elseif buf[4] == 2 then
+               printf("%s\t\treset priority of %x from %s to %s for reserve manager\n", prefix, buf[2], parse_qos_bucket(old_qos), parse_qos_bucket(new_qos))
+       elseif buf[4] == 3 then
+               printf("%s\t\treset priority of %x from %s to %s for cleanup\n", prefix, buf[2], parse_qos_bucket(old_qos), parse_qos_bucket(new_qos))
+       end
+end)
 
-trace_codename("wq_run_nextitem", function(buf)
+trace_codename("wq_thread_park", function(buf)
        local prefix = get_prefix(buf)
        if trace.debugid_is_start(buf.debugid) then
-               if buf.arg2 == 0 then
-                       printf("%s\ttrying to run a request on an idle thread (idlecount: %d, reqcount: %d)\n",
-                               prefix, buf.arg3, buf.arg4)
-               else
-                       printf("%s\tthread %x looking for next request (idlecount: %d, reqcount: %d)\n",
-                               prefix, buf.threadid, buf.arg3, buf.arg4)
-               end
+               printf("%s\tthread parking\n", prefix)
        else
-               if buf.arg4 == 1 then
-                       printf("%s\tkicked off work on thread %x (overcommit: %d)\n", prefix, buf.arg2, buf.arg3)
-               elseif buf.arg4 == 3 then
-                       printf("%s\tno work %x can currently do (start_timer: %d)\n", prefix, buf.arg2, buf.arg3)
-               elseif buf.arg4 == 4 then
-                       printf("%s\treturning to run next item\n", prefix)
-               else
-                       printf("%s\tWARNING: UNKNOWN END CODE:%d\n", prefix, buf.arg4)
-               end
+               printf("%s\tthread woken\n", prefix)
        end
 end)
 
+trace_codename("wq_thread_squash", function(buf)
+       local prefix = get_prefix(buf)
+       printf("%s\tthread squashed from %s to %s\n", prefix,
+                       parse_qos_bucket(buf[2]), parse_qos_bucket(buf[3]))
+end)
+
 trace.enable_thread_cputime()
 runitem_time_map = {}
 runitem_cputime_map = {}
@@ -205,7 +240,7 @@ trace_codename("wq_runitem", function(buf)
                runitem_time_map[buf.threadid] = buf.timestamp;
                runitem_cputime_map[buf.threadid] = trace.cputime_for_thread(buf.threadid);
 
-               printf("%s\tSTART running item\n", prefix)
+               printf("%s\tSTART running item @ %s\n", prefix, parse_qos_bucket(buf[3]))
        elseif runitem_time_map[buf.threadid] then
                local time = buf.timestamp - runitem_time_map[buf.threadid]
                local cputime = trace.cputime_for_thread(buf.threadid) - runitem_cputime_map[buf.threadid]
@@ -213,110 +248,99 @@ trace_codename("wq_runitem", function(buf)
                local time_ms = trace.convert_timestamp_to_nanoseconds(time) / 1000000
                local cputime_ms = trace.convert_timestamp_to_nanoseconds(cputime) / 1000000
 
-               printf("%s\tDONE running item: time = %6.6f ms, cputime = %6.6f ms\n", prefix, time_ms, cputime_ms)
+               printf("%s\tDONE running item @ %s: time = %6.6f ms, cputime = %6.6f ms\n",
+                               prefix, parse_qos_bucket(buf[2]), time_ms, cputime_ms)
 
                runitem_time_map[buf.threadid] = 0
                runitem_cputime_map[buf.threadid] = 0
        else
-               printf("%s\tDONE running item\n", prefix)
+               printf("%s\tDONE running item @ %s\n", prefix, parse_qos_bucket(buf[2]))
        end
 end)
 
-trace_codename("wq_thread_yielded", function(buf)
+trace_codename("wq_runthread", function(buf)
        local prefix = get_prefix(buf)
        if trace.debugid_is_start(buf.debugid) then
-               printf("%s\tthread_yielded called (yielded_count: %d, reqcount: %d)\n",
-                       prefix, buf.arg2, buf.arg3)
-       else
-               if buf.arg4 == 1 then
-                       printf("%s\tthread_yielded completed kicking thread (yielded_count: %d, reqcount: %d)\n",
-                               prefix, buf.arg2, buf.arg3)
-               elseif buf.arg4 == 2 then
-                       printf("%s\tthread_yielded completed (yielded_count: %d, reqcount: %d)\n",
-                               prefix, buf.arg2, buf.arg3)
-               else
-                       printf("%s\tthread_yielded completed unusually (yielded_count: %d, reqcount: %d)\n",
-                               prefix, buf.arg2, buf.arg3)
-               end
+               printf("%s\tSTART running thread\n", prefix)
+       elseif trace.debugid_is_end(buf.debugid) then
+               printf("%s\tDONE running thread\n", prefix)
        end
 end)
 
-trace_codename("wq_thread_block", function(buf)
-       local prefix = get_prefix(buf)
-       if trace.debugid_is_start(buf.debugid) then
-               printf("%s\tthread blocked (activecount: %d, prioritiy: %d, start_time: %d)\n",
-                       prefix, buf.arg2, buf.arg3, buf.arg3)
-       else
-               printf("%s\tthread unblocked (threads_scheduled: %d, priority: %d)\n",
-                       prefix, buf.arg2, buf.arg3)
-       end
+trace_codename("wq_thactive_update", function(buf)
+    local prefix = get_prefix(buf)
+    local thactive = get_thactive(buf[2], buf[3])
+    if buf[1] == 1 then
+        printf("%s\tthactive constrained pre-post (%s)\n", prefix, thactive)
+    elseif buf[1] == 2 then
+        printf("%s\tthactive constrained run (%s)\n", prefix, thactive)
+    else
+        return
+    end
 end)
 
-trace_codename("wq_thread_suspend", function(buf)
+trace_codename("wq_thread_block", function(buf)
        local prefix = get_prefix(buf)
+        local req_pri = parse_thactive_req_bucket(buf[3] >> 8)
        if trace.debugid_is_start(buf.debugid) then
-               printf("%s\tcreated new suspended thread (nthreads:%d)\n",
-                       prefix, buf.arg2)
+               printf("%s\tthread blocked (activecount: %d, priority: %s, req_pri: %s, reqcount: %d, start_timer: %d)\n",
+                       prefix, buf[2], parse_qos_bucket(buf[3] & 0xff), req_pri, buf[4] >> 1, buf[4] & 0x1)
        else
-               if buf.arg4 == 0xdead then
-                       printf("%s\tthread exited suspension to die (nthreads: %d)\n",
-                               prefix, buf.arg3)
-               end
+               printf("%s\tthread unblocked (activecount: %d, priority: %s, req_pri: %s, threads_scheduled: %d)\n",
+                       prefix, buf[2], parse_qos_bucket(buf[3] & 0xff), req_pri, buf[4])
        end
 end)
 
-trace_codename("wq_thread_park", function(buf)
+trace_codename("wq_thread_create_failed", function(buf)
        local prefix = get_prefix(buf)
-       if trace.debugid_is_start(buf.debugid) then
-               printf("%s\tthread parked (threads_scheduled: %d, thidlecount: %d, us_to_wait: %d)\n",
-                       prefix, buf.arg2, buf.arg3, buf.arg4)
-       else
-               if buf.arg4 == 0xdead then
-                       printf("%s\tthread exited park to die (nthreads: %d)\n", prefix, buf.arg3)
-               end
+       if buf[3] == 0 then
+               printf("%s\tfailed to create new workqueue thread, kern_return: 0x%x\n",
+                       prefix, buf[2])
+       elseif buf[3] == 1 then
+               printf("%s\tfailed to vm_map workq thread stack: 0x%x\n", prefix, buf[2])
+       elseif buf[3] == 2 then
+               printf("%s\tfailed to vm_protect workq thread guardsize: 0x%x\n", prefix, buf[2])
        end
-
 end)
 
-trace_codename("wq_thread_limit_exceeded", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\ttotal thread limit exceeded, %d threads, total %d max threads, (kern limit: %d)\n",
-               prefix, buf.arg2, buf.arg3, buf.arg4)
-end)
-
-trace_codename("wq_thread_constrained_maxed", function(buf)
-       local prefix = get_prefix(buf)
-       printf("%s\tattempted to add thread at max constrained limit, total %d threads (limit: %d)\n",
-               prefix, buf.arg2, buf.arg3)
+trace_codename("wq_thread_create", function(buf)
+       printf("%s\tcreated new workqueue thread\n", get_prefix(buf))
 end)
 
-trace_codename("wq_thread_add_during_exit", function(buf)
+trace_codename("wq_wqops_reqthreads", function(buf)
        local prefix = get_prefix(buf)
-       printf("%s\tattempted to add thread during WQ_EXITING\n", prefix)
+       printf("%s\tuserspace requested %d threads at %s\n", prefix, buf[2], parse_pthread_priority(buf[3]));
 end)
 
-trace_codename("wq_thread_create_failed", function(buf)
+trace_codename("wq_kevent_reqthreads", function(buf)
        local prefix = get_prefix(buf)
-       if buf.arg3 == 0 then
-               printf("%s\tfailed to create new workqueue thread, kern_return: 0x%x\n",
-                       prefix, buf.arg2)
-       elseif buf.arg3 == 1 then
-               printf("%s\tfailed to vm_map workq thread stack: 0x%x", prefix, buf.arg2)
-       elseif buf.arg3 == 2 then
-               printf("%s\tfailed to vm_protect workq thread guardsize: 0x%x", prefix, buf.arg2)
+       if buf[4] == 0 then
+               printf("%s\tkevent requested a thread at %s\n", prefix, parse_pthread_priority(buf[3]));
+       elseif buf[4] == 1 then
+               printf("%s\tworkloop requested a thread for req %x at %s\n", prefix, buf[2], parse_pthread_priority(buf[3]));
+       elseif buf[4] == 2 then
+               printf("%s\tworkloop updated priority of req %x to %s\n", prefix, buf[2], parse_pthread_priority(buf[3]));
+       elseif buf[4] == 3 then
+               printf("%s\tworkloop canceled req %x\n", prefix, buf[2], parse_pthread_priority(buf[3]));
+       elseif buf[4] == 4 then
+               printf("%s\tworkloop redrove a thread request\n", prefix);
        end
 end)
 
-trace_codename("wq_thread_create", function(buf)
-       printf("%s\tcreateed new workqueue thread\n", get_prefix(buf))
-end)
-
-trace_codename("wq_manager_request", function(buf)
+trace_codename("wq_constrained_admission", function(buf)
        local prefix = get_prefix(buf)
-       printf("%s\tthread in bucket %d\n", prefix, buf.arg3)
+       if buf[2] == 1 then
+               printf("fail: %s\twq_constrained_threads_scheduled=%d >= wq_max_constrained_threads=%d\n",
+                prefix, buf[3], buf[4])
+       elseif (buf[2] == 2) or (buf[2] == 3) then
+               local success = nil;
+               if buf[2] == 2 then success = "success"
+               else success = "fail" end
+               printf("%s: %s\tthactive_count=%d + busycount=%d >= wq->wq_max_concurrency\n",
+                               prefix, success, buf[3], buf[4])
+       end
 end)
 
-
 -- The trace codes we need aren't enabled by default
 darwin.sysctlbyname("kern.pthread_debug_tracing", 1)
 completion_handler = function()
index 596b8d30b1513892b886a87ab58d698f73cc0ac4..8eb585b1d1dbcdae27965d52c7e71f9fec526ce4 100644 (file)
@@ -1,6 +1,8 @@
 #include "pthread.xcconfig"
+
+BUILD_VARIANTS = normal
 INSTALL_PATH = /usr/local/lib/eOS
 EXECUTABLE_PREFIX = lib
 PRODUCT_NAME = pthread_eOS
-GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) PTHREAD_TARGET_EOS=1 VARIANT_STATIC=1
+GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) $(UP_PREPROCESSOR_DEFINITIONS) PTHREAD_TARGET_EOS=1 VARIANT_STATIC=1
 OTHER_LDFLAGS =
index a59dff9acd5c25add26c65b887457e0eea4edaf8..fcd42ea2477ea2d531e600d310881b99ad94be5a 100644 (file)
@@ -1,7 +1,8 @@
 // pthread kext build options
 
-BUILD_VARIANTS = normal development
-BUILD_VARIANTS[sdk=macosx*] = normal
+SDKROOT = macosx.internal
+SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
+BUILD_VARIANTS = normal development kasan
 
 PTHREAD_VARIANT_ = $(CURRENT_VARIANT)
 PTHREAD_VARIANT_YES = development
@@ -9,17 +10,17 @@ PTHREAD_VARIANT = $(PTHREAD_VARIANT_$(DEBUG))
 
 VALID_ARCHS[sdk=macosx*] = $(NATIVE_ARCH_ACTUAL)
 ARCHS = $(ARCHS_STANDARD_32_64_BIT)
-SUPPORTED_PLATFORMS = macosx iphoneos appletvos watchos
-DYLIB_CURRENT_VERSION = $(RC_ProjectSourceVersion)
+
 INSTALL_PATH = $(SYSTEM_LIBRARY_DIR)/Extensions
+PRODUCT_NAME = pthread
+WRAPPER_EXTENSION = kext
 MODULE_NAME = com.apple.kec.pthread
 MODULE_START = pthread_start
 MODULE_STOP = pthread_stop
 MODULE_VERSION = 1.0.0d1
-DEAD_CODE_STRIPPING = NO
+PRODUCT_BUNDLE_IDENTIFIER = ${MODULE_NAME}
 INFOPLIST_FILE = kern/pthread-Info.plist
-PRODUCT_NAME = $(TARGET_NAME)
-WRAPPER_EXTENSION = kext
+
 ALWAYS_SEARCH_USER_PATHS = NO
 SRCROOT_SEARCH_PATHS = $(SRCROOT) $(SRCROOT)/pthread $(SRCROOT)/private
 HEADER_SEARCH_PATHS = $(SDKROOT)/System/Library/Frameworks/Kernel.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/Kernel.framework/Headers $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders $(SDKROOT)/System/Library/Frameworks/System.framework/Headers $(SRCROOT_SEARCH_PATHS)
@@ -27,19 +28,25 @@ GCC_C_LANGUAGE_STANDARD = gnu99
 CLANG_CXX_LANGUAGE_STANDARD = gnu++0x
 CLANG_CXX_LIBRARY = libc++
 GCC_PRECOMPILE_PREFIX_HEADER = YES
+CODE_SIGN_IDENTITY = -
+DEBUG_INFORMATION_FORMAT = dwarf-with-dsym
 
 GCC_OPTIMIZATION_LEVEL_normal = s
 GCC_OPTIMIZATION_LEVEL_development = 0
 GCC_OPTIMIZATION_LEVEL = $(GCC_OPTIMIZATION_LEVEL_$(PTHREAD_VARIANT))
 
+DEAD_CODE_STRIPPING = NO
 LLVM_LTO_normal = YES
 LLVM_LTO_development = NO
+LLVM_LTO_kasan = NO
 LLVM_LTO = $(LLVM_LTO_$(PTHREAD_VARIANT))
 
 GCC_PREPROCESSOR_DEFINITIONS_kext = XNU_KERNEL_PRIVATE MACH_KERNEL_PRIVATE ABSOLUTETIME_SCALAR_TYPE NEEDS_SCHED_CALL_T
 GCC_PREPROCESSOR_DEFINITIONS_kext_development = MACH_ASSERT DEBUG
 GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS_kext) $(GCC_PREPROCESSOR_DEFINITIONS_kext_$(PTHREAD_VARIANT))
 
+OTHER_CFLAGS_kasan = -DKASAN=1 -fsanitize=address -mllvm -asan-globals-live-support -mllvm -asan-force-dynamic-shadow -fno-inline
+
 GCC_TREAT_IMPLICIT_FUNCTION_DECLARATIONS_AS_ERRORS = YES
 GCC_TREAT_INCOMPATIBLE_POINTER_TYPE_WARNINGS_AS_ERRORS = YES
 
index d7d67b6afdf59b1e8ab80ec224ec895e67846c95..7b2f244451ac6e034c58655f3a4dfe0024134d22 100644 (file)
@@ -1,65 +1,78 @@
 #include "<DEVELOPER_DIR>/Makefiles/CoreOS/Xcode/BSD.xcconfig"
 
-// Standard settings
 SDKROOT = macosx.internal
 SUPPORTED_PLATFORMS = macosx iphoneos iphonesimulator appletvos appletvsimulator watchos watchsimulator
-SRCROOT_SEARCH_PATHS = $(SRCROOT) $(SRCROOT)/private $(SRCROOT)/os
+BUILD_VARIANTS = normal debug
+
+INSTALL_PATH = /usr/lib/system
+EXECUTABLE_PREFIX = lib
+PRODUCT_NAME = system_pthread
+PUBLIC_HEADERS_FOLDER_PATH = /usr/include/pthread
+PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/pthread
+
+SRCROOT_SEARCH_PATHS = $(SRCROOT) $(SRCROOT)/private $(SRCROOT)/os $(SRCROOT)/src/resolver
 SYSTEM_FRAMEWORK_HEADERS = $(SDKROOT)/System/Library/Frameworks/System.framework/PrivateHeaders
 HEADER_SEARCH_PATHS = $($(PRODUCT_NAME)_SEARCH_PATHS) $(SRCROOT_SEARCH_PATHS) $(SYSTEM_FRAMEWORK_HEADERS) $(SDKROOT)/usr/local/include $(inherited)
+INSTALLHDRS_SCRIPT_PHASE = YES
 ALWAYS_SEARCH_USER_PATHS = YES
 USE_HEADERMAP = NO
-BUILD_VARIANTS = normal debug
 
 GCC_OPTIMIZATION_LEVEL = s
-GCC_C_LANGUAGE_STANDARD = gnu99
-GCC_ENABLE_OBJC_EXCEPTIONS = YES
+GCC_C_LANGUAGE_STANDARD = gnu11
+GCC_ENABLE_OBJC_EXCEPTIONS = NO
+GCC_NO_COMMON_BLOCKS = YES
 GCC_SYMBOLS_PRIVATE_EXTERN = NO
-GCC_DYNAMIC_NO_PIC = NO
-GCC_THUMB_SUPPORT = YES
+CLANG_LINK_OBJC_RUNTIME = NO
 
-// Warnings
-CLANG_WARN__DUPLICATE_METHOD_MATCH = YES
-CLANG_WARN_DOCUMENTATION_COMMENTS = YES
 GCC_WARN_64_TO_32_BIT_CONVERSION = YES
+GCC_WARN_ABOUT_MISSING_FIELD_INITIALIZERS = YES
+GCC_WARN_ABOUT_MISSING_NEWLINE = YES
+//GCC_WARN_ABOUT_MISSING_PROTOTYPES = YES
 GCC_WARN_ABOUT_RETURN_TYPE = YES
-GCC_WARN_UNINITIALIZED_AUTOS = YES
+GCC_WARN_INITIALIZER_NOT_FULLY_BRACKETED = YES
+GCC_WARN_SIGN_COMPARE = YES
+GCC_WARN_SHADOW = YES
+GCC_WARN_UNKNOWN_PRAGMAS = YES
+GCC_WARN_UNUSED_FUNCTION = YES
+GCC_WARN_UNUSED_LABEL = YES
+// GCC_WARN_UNUSED_PARAMETER = YES
+GCC_WARN_UNUSED_VALUE = YES
 GCC_WARN_UNUSED_VARIABLE = YES
+GCC_WARN_UNINITIALIZED_AUTOS = YES
+CLANG_WARN_ASSIGN_ENUM = YES
+CLANG_WARN_BOOL_CONVERSION = YES
+CLANG_WARN_CONSTANT_CONVERSION = YES
+CLANG_WARN_DOCUMENTATION_COMMENTS = YES
+CLANG_WARN_EMPTY_BODY = YES
+CLANG_WARN_ENUM_CONVERSION = YES
+//CLANG_WARN_IMPLICIT_SIGN_CONVERSION = YES
+CLANG_WARN_INFINITE_RECURSION = YES
+// CLANG_WARN_INT_CONVERSION = YES
+CLANG_WARN_SUSPICIOUS_IMPLICIT_CONVERSION = YES
+CLANG_WARN_SUSPICIOUS_MOVE = YES
+CLANG_WARN_UNREACHABLE_CODE = YES
+CLANG_WARN__DUPLICATE_METHOD_MATCH = YES
 
-INSTALLHDRS_SCRIPT_PHASE = YES
-
-COPY_PHASE_STRIP = NO
-STRIP_INSTALLED_PRODUCT = YES
-STRIP_STYLE = debugging
-
-// Versioning
-DYLIB_CURRENT_VERSION = $(RC_ProjectSourceVersion)
-DYLIB_COMPATIBILITY_VERSION = 1
-
-// Installation paths
-INSTALL_PATH = /usr/lib/system
-PUBLIC_HEADERS_FOLDER_PATH = /usr/include/pthread
-PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/pthread
-SKIP_INSTALL = NO
+DISABLED_WARNING_CFLAGS = -Wno-int-conversion -Wno-missing-prototypes -Wno-sign-compare -Wno-sign-conversion -Wno-unused-parameter
+WARNING_CFLAGS = -Wall -Wextra -Warray-bounds-pointer-arithmetic -Wcomma -Wconditional-uninitialized -Wcovered-switch-default -Wdate-time -Wdeprecated -Wdouble-promotion -Wduplicate-enum -Wfloat-equal -Widiomatic-parentheses -Wignored-qualifiers -Wimplicit-fallthrough -Wmissing-noreturn -Wnullable-to-nonnull-conversion -Wover-aligned -Wpointer-arith -Wstatic-in-inline -Wtautological-compare -Wunguarded-availability -Wunused $(NO_WARNING_CFLAGS) $(DISABLED_WARNING_CFLAGS)
+NO_WARNING_CFLAGS = -Wno-pedantic -Wno-bad-function-cast -Wno-c++98-compat-pedantic -Wno-cast-align -Wno-cast-qual -Wno-disabled-macro-expansion -Wno-documentation-unknown-command -Wno-format-nonliteral -Wno-missing-variable-declarations -Wno-packed -Wno-padded -Wno-reserved-id-macro -Wno-switch-enum -Wno-undef -Wno-unreachable-code-aggressive -Wno-unused-macros -Wno-used-but-marked-unused
 
-// Base definitions
-// TODO: Remove -fstack-protector on _debug when it is moved to libplatform
-LINK_WITH_STANDARD_LIBRARIES = NO
 BASE_PREPROCESSOR_MACROS = __LIBC__ __DARWIN_UNIX03=1 __DARWIN_64_BIT_INO_T=1 __DARWIN_NON_CANCELABLE=1 __DARWIN_VERS_1050=1 _FORTIFY_SOURCE=0 __PTHREAD_BUILDING_PTHREAD__=1 $(SIM_PREPROCESSOR_MACROS)
-GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS)
-
-OTHER_CFLAGS = $(OTHER_CFLAGS_$(CURRENT_VARIANT))
-OTHER_CFLAGS_normal = -fno-stack-protector -fdollars-in-identifiers -fno-common -fno-builtin -momit-leaf-frame-pointer $($(PRODUCT_NAME)_CFLAGS)
-OTHER_CFLAGS_debug = -fno-stack-protector -fno-inline -O0 -DDEBUG=1 $($(PRODUCT_NAME)_CFLAGS)
+GCC_PREPROCESSOR_DEFINITIONS = $(BASE_PREPROCESSOR_MACROS) $(PLATFORM_PREPROCESSOR_DEFINITIONS)
 
-OTHER_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodescripts/pthread.aliases -Wl,-umbrella,System -L/usr/lib/system -lsystem_kernel -lsystem_platform -ldyld -lcompiler_rt $(UPLINK_LDFLAGS) $(CR_LDFLAGS)
-
-// CrashReporter
-CR_LDFLAGS = -lCrashReporterClient
+// TODO: Remove -fstack-protector on _debug when it is moved to libplatform
+OTHER_CFLAGS = -fno-stack-protector -fno-builtin $(PLATFORM_CFLAGS) $($(PRODUCT_NAME)_CFLAGS)
+OTHER_CFLAGS_normal = -momit-leaf-frame-pointer
+OTHER_CFLAGS_debug = -fno-inline -O0 -DDEBUG=1
 
-ORDER_FILE = $(SDKROOT)/$(APPLE_INTERNAL_DIR)/OrderFiles/libsystem_pthread.order
-ORDER_FILE[sdk=iphonesimulator*] =
+LINK_WITH_STANDARD_LIBRARIES = NO
+DYLIB_CURRENT_VERSION = $(RC_ProjectSourceVersion)
+DYLIB_COMPATIBILITY_VERSION = 1
+DYLIB_LDFLAGS = -Wl,-alias_list,$(SRCROOT)/xcodescripts/pthread.aliases -Wl,-umbrella,System -L/usr/lib/system -lsystem_kernel -lsystem_platform -ldyld -lcompiler_rt
+OTHER_LDFLAGS = $(DYLIB_LDFLAGS) $(CR_LDFLAGS) $(PLATFORM_LDFLAGS)
 
 // Simulator build rules
 EXCLUDED_SOURCE_FILE_NAMES[sdk=iphonesimulator*] = *.c *.s
 SKIP_INSTALL[sdk=iphonesimulator*] = YES
 OTHER_LDFLAGS[sdk=iphonesimulator*] =
+
diff --git a/xcodescripts/resolved.xcconfig b/xcodescripts/resolved.xcconfig
new file mode 100644 (file)
index 0000000..2b33118
--- /dev/null
@@ -0,0 +1,8 @@
+#include "pthread.xcconfig"
+
+SUPPORTED_PLATFORMS = iphoneos appletvos watchos
+PRODUCT_NAME = pthread_$(RESOLVED_VARIANT)
+OTHER_LDFLAGS =
+SKIP_INSTALL = YES
+VERSIONING_SYSTEM =
+EXCLUDED_SOURCE_FILE_NAMES = *
diff --git a/xcodescripts/resolver.xcconfig b/xcodescripts/resolver.xcconfig
new file mode 100644 (file)
index 0000000..85729af
--- /dev/null
@@ -0,0 +1,2 @@
+#include "pthread.xcconfig"
+
index 8c8943bdbe0f9fae9918372d18abf2c1debd9a04..611c29c2925015d6672c02897266c36f065ddf8c 100644 (file)
@@ -1,4 +1,5 @@
 #include "pthread.xcconfig"
+
 INSTALL_PATH = /usr/local/lib/system
 EXECUTABLE_PREFIX = lib
 PRODUCT_NAME = pthread