#include <kern/lock.h>
#include <kern/queue.h>
-#include <machine/cpu_number.h>
+#include <mach/branch_predicates.h>
+#include <machine/cpu_number.h>
+#include <machine/machine_routines.h> /* machine_timeout_suspended() */
/*
* wait_queue_t
* This is the definition of the common event wait queue
unsigned int /* flags */
/* boolean_t */ wq_type:16, /* only public field */
wq_fifo:1, /* fifo wakeup policy? */
- wq_isprepost:1, /* is waitq preposted? set only */
+ wq_prepost:1, /* waitq supports prepost? set only */
:0; /* force to long boundary */
hw_lock_data_t wq_interlock; /* interlock */
queue_head_t wq_queue; /* queue of elements */
typedef struct wait_queue_set {
WaitQueue wqs_wait_queue; /* our wait queue */
queue_head_t wqs_setlinks; /* links from set perspective */
- unsigned int wqs_refcount; /* refcount for preposting */
+ queue_head_t wqs_preposts; /* preposted links */
} WaitQueueSet;
#define wqs_type wqs_wait_queue.wq_type
#define wqs_fifo wqs_wait_queue.wq_fifo
-#define wqs_isprepost wqs_wait_queue.wq_isprepost
+#define wqs_prepost wqs_wait_queue.wq_prepost
#define wqs_queue wqs_wait_queue.wq_queue
/*
* with that port may wake up any thread from any of those portsets,
* or one that was waiting locally on the port itself.
*/
-typedef struct wait_queue_link {
+typedef struct _wait_queue_link {
WaitQueueElement wql_element; /* element on master */
queue_chain_t wql_setlinks; /* element on set */
+ queue_chain_t wql_preposts; /* element on set prepost list */
wait_queue_set_t wql_setqueue; /* set queue */
} WaitQueueLink;
(((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
#define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue))
+
#define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock))
#define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386) || defined(x86_64)
+#define hwLockTimeOut LockTimeOutTSC
+#else
+#define hwLockTimeOut LockTimeOut
+#endif
/*
* Double the standard lock timeout, because wait queues tend
* to iterate over a number of threads - locking each. If there is
* a problem with a thread lock, it normally times out at the wait
* queue level first, hiding the real problem.
*/
-#define wait_queue_lock(wq) \
- ((void) (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2) ? \
- panic("wait queue deadlock - wq=0x%x, cpu=%d\n", \
- wq, cpu_number()) : 0))
-#define wait_queue_unlock(wq) \
- (assert(wait_queue_held(wq)), hw_lock_unlock(&(wq)->wq_interlock))
+static inline void wait_queue_lock(wait_queue_t wq) {
+ if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
+ boolean_t wql_acquired = FALSE;
+
+ while (machine_timeout_suspended()) {
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * i386/x86_64 return with preemption disabled on a timeout for
+ * diagnostic purposes.
+ */
+ mp_enable_preemption();
+#endif
+ if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
+ break;
+ }
+ if (wql_acquired == FALSE)
+ panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
+ }
+}
+
+static inline void wait_queue_unlock(wait_queue_t wq) {
+ assert(wait_queue_held(wq));
+ hw_lock_unlock(&(wq)->wq_interlock);
+}
#define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue)
#define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue)
#define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue)
+#define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
+
+#define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL)
+#define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
#define wait_queue_assert_possible(thread) \
((thread)->wait_queue == WAIT_QUEUE_NULL)
+/* bootstrap interface - can allocate/link wait_queues and sets after calling this */
+__private_extern__ void wait_queue_bootstrap(void);
+
/******** Decomposed interfaces (to build higher level constructs) ***********/
/* assert intent to wait on a locked wait queue */
uint64_t deadline,
thread_t thread);
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-__private_extern__ void wait_queue_peek64_locked(
- wait_queue_t wait_queue,
- event64_t event,
- thread_t *thread,
- wait_queue_t *found_queue);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
+/* pull a thread from its wait queue */
__private_extern__ void wait_queue_pull_thread_locked(
wait_queue_t wait_queue,
thread_t thread,
wait_result_t result,
boolean_t unlock);
+__private_extern__ uint32_t num_wait_queues;
+__private_extern__ struct wait_queue *wait_queues;
+/* The Jenkins "one at a time" hash.
+ * TBD: There may be some value to unrolling here,
+ * depending on the architecture.
+ */
+static inline uint32_t wq_hash(char *key)
+{
+ uint32_t hash = 0;
+ size_t i, length = sizeof(char *);
+
+ for (i = 0; i < length; i++) {
+ hash += key[i];
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* TBD: It should be possible to eliminate the divide here */
+#define wait_hash(event) \
+ (wq_hash((char *)&event) % (num_wait_queues))
+
#endif /* MACH_KERNEL_PRIVATE */
__BEGIN_DECLS
extern kern_return_t wait_queue_unlink_all(
wait_queue_t wait_queue);
-extern kern_return_t wait_queue_unlinkall_nofree(
- wait_queue_t wait_queue);
-
extern kern_return_t wait_queue_set_unlink_all(
wait_queue_set_t set_queue);
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t wait_queue_set_unlink_one(
+ wait_queue_set_t set_queue,
+ wait_queue_link_t link);
+
+extern wait_queue_link_t wait_queue_link_allocate(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
/* legacy API */
kern_return_t wait_queue_sub_init(
wait_queue_set_t set_queue,
extern kern_return_t wait_queue_wakeup_one(
wait_queue_t wait_queue,
event_t wake_event,
- wait_result_t result);
+ wait_result_t result,
+ int priority);
/* wakeup all the threads waiting on <wait_queue,event> pair */
extern kern_return_t wait_queue_wakeup_all(