+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_rwlock_check_signature_init(_pthread_rwlock *rwlock)
+{
+ return (rwlock->sig == _PTHREAD_RWLOCK_SIG_init);
+}
+
+/*
+ * ALWAYS called without list lock and return with list lock held on success
+ *
+ * This weird calling convention exists because this function will sometimes
+ * drop the lock, and it's best callers don't have to remember this.
+ */
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_validate_thread_and_list_lock(pthread_t thread)
+{
+ pthread_t p;
+ if (thread == NULL) return false;
+ _PTHREAD_LOCK(_pthread_list_lock);
+ TAILQ_FOREACH(p, &__pthread_head, tl_plist) {
+ if (p != thread) continue;
+ if (os_unlikely(p->sig != _PTHREAD_SIG)) {
+ PTHREAD_CLIENT_CRASH(0, "pthread_t was corrupted");
+ }
+ return true;
+ }
+ _PTHREAD_UNLOCK(_pthread_list_lock);
+
+ return false;
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline bool
+_pthread_is_valid(pthread_t thread, mach_port_t *portp)
+{
+ mach_port_t kport = MACH_PORT_NULL;
+ bool valid;
+
+ if (thread == pthread_self()) {
+ valid = true;
+ kport = _pthread_kernel_thread(thread);
+ } else if (!_pthread_validate_thread_and_list_lock(thread)) {
+ valid = false;
+ } else {
+ kport = _pthread_kernel_thread(thread);
+ valid = true;
+ _PTHREAD_UNLOCK(_pthread_list_lock);
+ }
+
+ if (portp != NULL) {
+ *portp = kport;
+ }
+ return valid;
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline void*
+_pthread_atomic_xchg_ptr_inline(void **p, void *v)
+{
+ return os_atomic_xchg(p, v, seq_cst);
+}
+
+PTHREAD_ALWAYS_INLINE
+static inline uint32_t
+_pthread_atomic_xchg_uint32_relaxed_inline(uint32_t *p,uint32_t v)
+{
+ return os_atomic_xchg(p, v, relaxed);
+}
+
+#define _pthread_atomic_xchg_ptr(p, v) \
+ _pthread_atomic_xchg_ptr_inline(p, v)
+#define _pthread_atomic_xchg_uint32_relaxed(p, v) \
+ _pthread_atomic_xchg_uint32_relaxed_inline(p, v)
+