+ delay(1);
+ goto retry;
+ }
+ }
+
+ /* remove this set from sets it belongs to */
+ wait_queue_unlink_all_nofree_locked(&wq_set->wqs_wait_queue, links);
+
+ wqs_unlock(wq_set);
+ splx(s);
+
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Routine: wait_queue_set_unlink_all
+ * Purpose:
+ * Remove the linkage between a set wait queue and all its
+ * member wait queues and all the sets it may be members of.
+ * The link structures are freed for those links which were
+ * dynamically allocated.
+ * Conditions:
+ * The wait queue must be a set
+ */
+kern_return_t
+wait_queue_set_unlink_all(
+ wait_queue_set_t wq_set)
+{
+ wait_queue_link_t wql;
+ wait_queue_t wq;
+ queue_t q;
+ queue_head_t links_queue_head;
+ queue_t links = &links_queue_head;
+ spl_t s;
+
+ if (!wait_queue_is_set(wq_set)) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ queue_init(links);
+
+retry:
+ s = splsched();
+ wqs_lock(wq_set);
+
+ /* remove the wait queues that are members of our set */
+ q = &wq_set->wqs_setlinks;
+
+ wql = (wait_queue_link_t)queue_first(q);
+ while (!queue_end(q, (queue_entry_t)wql)) {
+ WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
+ wq = wql->wql_queue;
+ if (wait_queue_lock_try(wq)) {
+ boolean_t alloced;
+
+ alloced = (wql->wql_type == WAIT_QUEUE_LINK);
+ wait_queue_unlink_locked(wq, wq_set, wql);
+ wait_queue_unlock(wq);
+ if (alloced)
+ enqueue(links, &wql->wql_links);
+ wql = (wait_queue_link_t)queue_first(q);
+ } else {
+ wqs_unlock(wq_set);
+ splx(s);
+ delay(1);
+ goto retry;
+ }
+ }
+
+
+ /* remove this set from sets it belongs to */
+ wait_queue_unlink_all_locked(&wq_set->wqs_wait_queue, links);
+
+ wqs_unlock(wq_set);
+ splx(s);
+
+ while (!queue_empty (links)) {
+ wql = (wait_queue_link_t) dequeue(links);
+ zfree(_wait_queue_link_zone, wql);
+ }
+ return(KERN_SUCCESS);
+}
+
+kern_return_t
+wait_queue_set_unlink_one(
+ wait_queue_set_t wq_set,
+ wait_queue_link_t wql)
+{
+ wait_queue_t wq;
+ spl_t s;
+
+ assert(wait_queue_is_set(wq_set));
+
+retry:
+ s = splsched();
+ wqs_lock(wq_set);
+
+ WAIT_QUEUE_SET_CHECK(wq_set);
+
+ /* Already unlinked, e.g. by selclearthread() */
+ if (wql->wql_type == WAIT_QUEUE_UNLINKED) {
+ goto out;
+ }
+
+ WAIT_QUEUE_SET_LINK_CHECK(wq_set, wql);
+
+ /* On a wait queue, and we hold set queue lock ... */
+ wq = wql->wql_queue;
+ if (wait_queue_lock_try(wq)) {
+ wait_queue_unlink_locked(wq, wq_set, wql);
+ wait_queue_unlock(wq);
+ } else {
+ wqs_unlock(wq_set);
+ splx(s);
+ delay(1);
+ goto retry;
+ }
+
+out:
+ wqs_unlock(wq_set);
+ splx(s);
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: wait_queue_assert_wait64_locked
+ * Purpose:
+ * Insert the current thread into the supplied wait queue
+ * waiting for a particular event to be posted to that queue.
+ *
+ * Conditions:
+ * The wait queue is assumed locked.
+ * The waiting thread is assumed locked.
+ *
+ */
+__private_extern__ wait_result_t
+wait_queue_assert_wait64_locked(
+ wait_queue_t wq,
+ event64_t event,
+ wait_interrupt_t interruptible,
+ wait_timeout_urgency_t urgency,
+ uint64_t deadline,
+ uint64_t leeway,
+ thread_t thread)
+{
+ wait_result_t wait_result;
+ boolean_t realtime;
+
+ if (!wait_queue_assert_possible(thread))
+ panic("wait_queue_assert_wait64_locked");
+
+ if (wq->wq_type == _WAIT_QUEUE_SET_inited) {
+ wait_queue_set_t wqs = (wait_queue_set_t)wq;
+
+ if (event == NO_EVENT64 && wqs_is_preposted(wqs))
+ return(THREAD_AWAKENED);
+ }
+
+ /*
+ * Realtime threads get priority for wait queue placements.
+ * This allows wait_queue_wakeup_one to prefer a waiting
+ * realtime thread, similar in principle to performing
+ * a wait_queue_wakeup_all and allowing scheduler prioritization
+ * to run the realtime thread, but without causing the
+ * lock contention of that scenario.
+ */
+ realtime = (thread->sched_pri >= BASEPRI_REALTIME);
+
+ /*
+ * This is the extent to which we currently take scheduling attributes
+ * into account. If the thread is vm priviledged, we stick it at
+ * the front of the queue. Later, these queues will honor the policy
+ * value set at wait_queue_init time.
+ */
+ wait_result = thread_mark_wait_locked(thread, interruptible);
+ if (wait_result == THREAD_WAITING) {
+ if (!wq->wq_fifo
+ || (thread->options & TH_OPT_VMPRIV)
+ || realtime)
+ enqueue_head(&wq->wq_queue, (queue_entry_t) thread);
+ else
+ enqueue_tail(&wq->wq_queue, (queue_entry_t) thread);
+
+ thread->wait_event = event;
+ thread->wait_queue = wq;
+
+ if (deadline != 0) {
+
+ if (!timer_call_enter_with_leeway(&thread->wait_timer, NULL,
+ deadline, leeway, urgency, FALSE))
+ thread->wait_timer_active++;
+ thread->wait_timer_is_set = TRUE;
+ }
+ if (wait_queue_global(wq)) {
+ wq->wq_eventmask = wq->wq_eventmask | CAST_TO_EVENT_MASK(event);