+static inline void
+_kwq_clear_preposted_wakeup(ksyn_wait_queue_t kwq)
+{
+ kwq->kw_prepost.lseq = 0;
+ kwq->kw_prepost.sseq = PTHRW_RWS_INIT;
+ kwq->kw_prepost.count = 0;
+}
+
+static inline void
+_kwq_mark_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t count,
+ uint32_t lseq, uint32_t sseq)
+{
+ kwq->kw_prepost.count = count;
+ kwq->kw_prepost.lseq = lseq;
+ kwq->kw_prepost.sseq = sseq;
+}
+
+static inline void
+_kwq_clear_interrupted_wakeup(ksyn_wait_queue_t kwq)
+{
+ kwq->kw_intr.type = KWQ_INTR_NONE;
+ kwq->kw_intr.count = 0;
+ kwq->kw_intr.seq = 0;
+ kwq->kw_intr.returnbits = 0;
+}
+
+static inline void
+_kwq_mark_interruped_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
+ uint32_t count, uint32_t lseq, uint32_t returnbits)
+{
+ kwq->kw_intr.count = count;
+ kwq->kw_intr.seq = lseq;
+ kwq->kw_intr.returnbits = returnbits;
+ kwq->kw_intr.type = type;
+}
+
+static void
+_kwq_destroy(ksyn_wait_queue_t kwq)
+{
+ if (kwq->kw_owner) {
+ thread_deallocate(kwq->kw_owner);
+ }
+ lck_spin_destroy(&kwq->kw_lock, pthread_lck_grp);
+ zfree(kwq_zone, kwq);
+}
+
+#define KWQ_SET_OWNER_TRANSFER_REF 0x1
+
+static inline thread_t
+_kwq_set_owner(ksyn_wait_queue_t kwq, thread_t new_owner, int flags)
+{
+ thread_t old_owner = kwq->kw_owner;
+ if (old_owner == new_owner) {
+ if (flags & KWQ_SET_OWNER_TRANSFER_REF) return new_owner;
+ return THREAD_NULL;
+ }
+ if ((flags & KWQ_SET_OWNER_TRANSFER_REF) == 0) {
+ thread_reference(new_owner);
+ }
+ kwq->kw_owner = new_owner;
+ return old_owner;
+}
+
+static inline thread_t
+_kwq_clear_owner(ksyn_wait_queue_t kwq)
+{
+ return _kwq_set_owner(kwq, THREAD_NULL, KWQ_SET_OWNER_TRANSFER_REF);
+}
+
+static inline void
+_kwq_cleanup_old_owner(thread_t *thread)
+{
+ if (*thread) {
+ thread_deallocate(*thread);
+ *thread = THREAD_NULL;
+ }
+}
+
+static void
+CLEAR_REINIT_BITS(ksyn_wait_queue_t kwq)
+{
+ if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_CVAR) {
+ if (kwq->kw_inqueue != 0 && kwq->kw_inqueue != kwq->kw_fakecount) {
+ panic("CV:entries in queue durinmg reinit %d:%d\n",kwq->kw_inqueue, kwq->kw_fakecount);
+ }
+ };
+ if ((kwq->kw_type & KSYN_WQTYPE_MASK) == KSYN_WQTYPE_RWLOCK) {
+ kwq->kw_nextseqword = PTHRW_RWS_INIT;
+ kwq->kw_kflags &= ~KSYN_KWF_OVERLAP_GUARD;
+ };
+ _kwq_clear_preposted_wakeup(kwq);
+ kwq->kw_lastunlockseq = PTHRW_RWL_INIT;
+ kwq->kw_lastseqword = PTHRW_RWS_INIT;
+ _kwq_clear_interrupted_wakeup(kwq);
+ kwq->kw_lword = 0;
+ kwq->kw_uword = 0;
+ kwq->kw_sword = PTHRW_RWS_INIT;
+}
+
+static bool
+_kwq_handle_preposted_wakeup(ksyn_wait_queue_t kwq, uint32_t type,
+ uint32_t lseq, uint32_t *retval)
+{
+ if (kwq->kw_prepost.count == 0 ||
+ !is_seqlower_eq(lseq, kwq->kw_prepost.lseq)) {
+ return false;
+ }
+
+ kwq->kw_prepost.count--;
+ if (kwq->kw_prepost.count > 0) {
+ return false;
+ }
+
+ int error, should_block = 0;
+ uint32_t updatebits = 0;
+ uint32_t pp_lseq = kwq->kw_prepost.lseq;
+ uint32_t pp_sseq = kwq->kw_prepost.sseq;
+ _kwq_clear_preposted_wakeup(kwq);
+
+ kwq->kw_kflags &= ~KSYN_KWF_INITCLEARED;
+
+ error = kwq_handle_unlock(kwq, pp_lseq, pp_sseq, &updatebits,
+ (type | KW_UNLOCK_PREPOST), &should_block, lseq);
+ if (error) {
+ panic("_kwq_handle_preposted_wakeup: kwq_handle_unlock failed %d",
+ error);
+ }
+
+ if (should_block) {
+ return false;
+ }
+ *retval = updatebits;
+ return true;
+}
+
+static bool
+_kwq_handle_overlap(ksyn_wait_queue_t kwq, uint32_t type, uint32_t lgenval,
+ uint32_t rw_wc, uint32_t *retval)
+{
+ int res = 0;
+
+ // overlaps only occur on read lockers
+ if (type != PTH_RW_TYPE_READ) {
+ return false;
+ }
+
+ // check for overlap and no pending W bit (indicates writers)
+ if ((kwq->kw_kflags & KSYN_KWF_OVERLAP_GUARD) &&
+ !is_rws_savemask_set(rw_wc) && !is_rwl_wbit_set(lgenval)) {
+ /* overlap is set, so no need to check for valid state for overlap */
+
+ if (is_seqlower_eq(rw_wc, kwq->kw_nextseqword) || is_seqhigher_eq(kwq->kw_lastseqword, rw_wc)) {
+ /* increase the next expected seq by one */
+ kwq->kw_nextseqword += PTHRW_INC;
+ /* set count by one & bits from the nextseq and add M bit */
+ *retval = PTHRW_INC | ((kwq->kw_nextseqword & PTHRW_BIT_MASK) | PTH_RWL_MBIT);
+ res = 1;
+ }
+ }
+ return res;
+}
+
+static inline bool
+_kwq_is_used(ksyn_wait_queue_t kwq)
+{
+ return (kwq->kw_inqueue != 0 || kwq->kw_prepost.count != 0 ||
+ kwq->kw_intr.count != 0);
+}
+
+/*
+ * consumes a pending interrupted waiter, returns true if the current
+ * thread should return back to userspace because it was previously
+ * interrupted.
+ */
+static inline bool
+_kwq_handle_interrupted_wakeup(ksyn_wait_queue_t kwq, kwq_intr_type_t type,
+ uint32_t lseq, uint32_t *retval)
+{
+ if (kwq->kw_intr.count != 0 && kwq->kw_intr.type == type &&
+ (!kwq->kw_intr.seq || is_seqlower_eq(lseq, kwq->kw_intr.seq))) {
+ kwq->kw_intr.count--;
+ *retval = kwq->kw_intr.returnbits;
+ if (kwq->kw_intr.returnbits == 0) {
+ _kwq_clear_interrupted_wakeup(kwq);
+ }
+ return true;
+ }
+ return false;
+}
+