]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/wait_queue.h
xnu-1228.9.59.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.h
index 560b25325e0ad01bfaf3502f6ca51a19bac6d8a5..5a1be3f35840fe896ef46d4469fddae7fcd4f2da 100644 (file)
@@ -123,7 +123,7 @@ typedef WaitQueueElement *wait_queue_element_t;
  *     with that port may wake up any thread from any of those portsets,
  *     or one that was waiting locally on the port itself.
  */
-typedef struct wait_queue_link {
+typedef struct _wait_queue_link {
        WaitQueueElement                wql_element;    /* element on master */
        queue_chain_t                   wql_setlinks;   /* element on set */
     wait_queue_set_t           wql_setqueue;   /* set queue */
@@ -146,22 +146,41 @@ typedef struct wait_queue_link {
        (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
 
 #define wait_queue_empty(wq)   (queue_empty(&(wq)->wq_queue))
+
 #define wait_queue_held(wq)            (hw_lock_held(&(wq)->wq_interlock))
 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
 
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386)
+#define        hwLockTimeOut LockTimeOutTSC
+#else
+#define        hwLockTimeOut LockTimeOut
+#endif
 /*
  * Double the standard lock timeout, because wait queues tend
  * to iterate over a number of threads - locking each.  If there is
  * a problem with a thread lock, it normally times out at the wait
  * queue level first, hiding the real problem.
  */
-#define wait_queue_lock(wq)    \
-       ((void) (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2) ? \
-                panic("wait queue deadlock - wq=0x%x, cpu=%d\n", \
-                      wq, cpu_number()) : 0))
 
-#define wait_queue_unlock(wq) \
-       (assert(wait_queue_held(wq)), hw_lock_unlock(&(wq)->wq_interlock))
+static inline void wait_queue_lock(wait_queue_t wq) {
+       if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))
+               panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number(
+));
+}
+static inline void wait_queue_unlock(wait_queue_t wq) {
+       assert(wait_queue_held(wq));
+#if defined(__i386__)
+       /* DRK: On certain x86 systems, this spinlock is susceptible to
+        * lock starvation. Hence use an unlock variant which performs
+        * a cacheline flush to minimize cache affinity on acquisition.
+        */
+       i386_lock_unlock_with_flush(&(wq)->wq_interlock);
+#else
+       hw_lock_unlock(&(wq)->wq_interlock);
+#endif
+}
 
 #define wqs_lock(wqs)          wait_queue_lock(&(wqs)->wqs_wait_queue)
 #define wqs_unlock(wqs)                wait_queue_unlock(&(wqs)->wqs_wait_queue)
@@ -180,14 +199,7 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked(
                        uint64_t deadline,
                        thread_t thread);
 
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-__private_extern__ void wait_queue_peek64_locked(
-                       wait_queue_t wait_queue,
-                       event64_t event,
-                       thread_t *thread,
-                       wait_queue_t *found_queue);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
+/* pull a thread from its wait queue */
 __private_extern__ void wait_queue_pull_thread_locked(
                        wait_queue_t wait_queue,
                        thread_t thread,