]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/wait_queue.h
xnu-2422.90.20.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.h
index 74c8fa6d7175a6c1494e8968a4d86764e7a3fb36..01111d16c573d3f9f176ee19bd29cba813f84c29 100644 (file)
 #include <mach/kern_return.h>          /* for kern_return_t */
 
 #include <kern/kern_types.h>           /* for wait_queue_t */
 #include <mach/kern_return.h>          /* for kern_return_t */
 
 #include <kern/kern_types.h>           /* for wait_queue_t */
+#include <kern/queue.h>
 
 #include <sys/cdefs.h>
 
 #ifdef MACH_KERNEL_PRIVATE
 
 #include <kern/lock.h>
 
 #include <sys/cdefs.h>
 
 #ifdef MACH_KERNEL_PRIVATE
 
 #include <kern/lock.h>
-#include <kern/queue.h>
+#include <mach/branch_predicates.h>
+
 #include <machine/cpu_number.h>
 #include <machine/cpu_number.h>
+#include <machine/machine_routines.h> /* machine_timeout_suspended() */
 
 
+/*
+ * The event mask is of 60 bits on 64 bit architeture and 28 bits on
+ * 32 bit architecture and so we calculate its size using sizeof(long).
+ * If the bitfield for wq_type and wq_fifo is changed, then value of 
+ * EVENT_MASK_BITS will also change. 
+ */
+#define EVENT_MASK_BITS  ((sizeof(long) * 8) - 4)
+
+/*
+ * Zero out the 4 msb of the event.
+ */
+#define CAST_TO_EVENT_MASK(event)  (((CAST_DOWN(unsigned long, event)) << 4) >> 4)
 /*
  *     wait_queue_t
  *     This is the definition of the common event wait queue
 /*
  *     wait_queue_t
  *     This is the definition of the common event wait queue
  *     them.
  */
 typedef struct wait_queue {
  *     them.
  */
 typedef struct wait_queue {
-    unsigned int                    /* flags */
-    /* boolean_t */    wq_type:16,             /* only public field */
+    unsigned long int                    /* flags */
+    /* boolean_t */    wq_type:2,              /* only public field */
                                        wq_fifo:1,              /* fifo wakeup policy? */
                                        wq_fifo:1,              /* fifo wakeup policy? */
-                                       wq_isprepost:1, /* is waitq preposted? set only */
-                                       :0;                             /* force to long boundary */
+                                       wq_prepost:1,   /* waitq supports prepost? set only */
+                                       wq_eventmask:EVENT_MASK_BITS; 
     hw_lock_data_t     wq_interlock;   /* interlock */
     queue_head_t       wq_queue;               /* queue of elements */
 } WaitQueue;
     hw_lock_data_t     wq_interlock;   /* interlock */
     queue_head_t       wq_queue;               /* queue of elements */
 } WaitQueue;
@@ -80,12 +95,12 @@ typedef struct wait_queue {
 typedef struct wait_queue_set {
        WaitQueue               wqs_wait_queue; /* our wait queue */
        queue_head_t    wqs_setlinks;   /* links from set perspective */
 typedef struct wait_queue_set {
        WaitQueue               wqs_wait_queue; /* our wait queue */
        queue_head_t    wqs_setlinks;   /* links from set perspective */
-       unsigned int    wqs_refcount;   /* refcount for preposting */
+       queue_head_t    wqs_preposts;   /* preposted links */
 } WaitQueueSet;
 
 #define wqs_type               wqs_wait_queue.wq_type
 #define wqs_fifo               wqs_wait_queue.wq_fifo
 } WaitQueueSet;
 
 #define wqs_type               wqs_wait_queue.wq_type
 #define wqs_fifo               wqs_wait_queue.wq_fifo
-#define wqs_isprepost  wqs_wait_queue.wq_isprepost
+#define wqs_prepost    wqs_wait_queue.wq_prepost
 #define wqs_queue              wqs_wait_queue.wq_queue
 
 /*
 #define wqs_queue              wqs_wait_queue.wq_queue
 
 /*
@@ -123,9 +138,10 @@ typedef WaitQueueElement *wait_queue_element_t;
  *     with that port may wake up any thread from any of those portsets,
  *     or one that was waiting locally on the port itself.
  */
  *     with that port may wake up any thread from any of those portsets,
  *     or one that was waiting locally on the port itself.
  */
-typedef struct wait_queue_link {
+typedef struct _wait_queue_link {
        WaitQueueElement                wql_element;    /* element on master */
        queue_chain_t                   wql_setlinks;   /* element on set */
        WaitQueueElement                wql_element;    /* element on master */
        queue_chain_t                   wql_setlinks;   /* element on set */
+       queue_chain_t                   wql_preposts;   /* element on set prepost list */
     wait_queue_set_t           wql_setqueue;   /* set queue */
 } WaitQueueLink;
 
     wait_queue_set_t           wql_setqueue;   /* set queue */
 } WaitQueueLink;
 
@@ -133,8 +149,8 @@ typedef struct wait_queue_link {
 #define wql_type  wql_element.wqe_type
 #define wql_queue wql_element.wqe_queue
 
 #define wql_type  wql_element.wqe_type
 #define wql_queue wql_element.wqe_queue
 
-#define _WAIT_QUEUE_inited                     0xf1d0
-#define _WAIT_QUEUE_SET_inited         0xf1d1
+#define _WAIT_QUEUE_inited             0x2
+#define _WAIT_QUEUE_SET_inited         0x3
 
 #define wait_queue_is_queue(wq)        \
        ((wq)->wq_type == _WAIT_QUEUE_inited)
 
 #define wait_queue_is_queue(wq)        \
        ((wq)->wq_type == _WAIT_QUEUE_inited)
@@ -146,9 +162,16 @@ typedef struct wait_queue_link {
        (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
 
 #define wait_queue_empty(wq)   (queue_empty(&(wq)->wq_queue))
        (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited)
 
 #define wait_queue_empty(wq)   (queue_empty(&(wq)->wq_queue))
+
 #define wait_queue_held(wq)            (hw_lock_held(&(wq)->wq_interlock))
 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
 
 #define wait_queue_held(wq)            (hw_lock_held(&(wq)->wq_interlock))
 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
 
+/* For x86, the hardware timeout is in TSC units. */
+#if defined(i386) || defined(x86_64)
+#define        hwLockTimeOut LockTimeOutTSC
+#else
+#define        hwLockTimeOut LockTimeOut
+#endif
 /*
  * Double the standard lock timeout, because wait queues tend
  * to iterate over a number of threads - locking each.  If there is
 /*
  * Double the standard lock timeout, because wait queues tend
  * to iterate over a number of threads - locking each.  If there is
@@ -157,30 +180,45 @@ typedef struct wait_queue_link {
  */
 
 static inline void wait_queue_lock(wait_queue_t wq) {
  */
 
 static inline void wait_queue_lock(wait_queue_t wq) {
-       if (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2))
-               panic("wait queue deadlock - wq=0x%x, cpu=%d\n", wq, cpu_number());
+       if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
+               boolean_t wql_acquired = FALSE;
+
+               while (machine_timeout_suspended()) {
+#if    defined(__i386__) || defined(__x86_64__)
+/*
+ * i386/x86_64 return with preemption disabled on a timeout for
+ * diagnostic purposes.
+ */
+                       mp_enable_preemption();
+#endif
+                       if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
+                               break;
+               }
+               if (wql_acquired == FALSE)
+                       panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
+       }
+       assert(wait_queue_held(wq));
 }
 
 static inline void wait_queue_unlock(wait_queue_t wq) {
        assert(wait_queue_held(wq));
 }
 
 static inline void wait_queue_unlock(wait_queue_t wq) {
        assert(wait_queue_held(wq));
-#if defined(__i386__)
-       /* On certain x86 systems, this spinlock is susceptible to
-        * lock starvation. Hence use an unlock variant which performs
-        * a cacheline flush to minimize cache affinity on acquisition.
-        */
-       i386_lock_unlock_with_flush(&(wq)->wq_interlock);
-#else
-        hw_lock_unlock(&(wq)->wq_interlock);
-#endif
+       hw_lock_unlock(&(wq)->wq_interlock);
 }
 
 #define wqs_lock(wqs)          wait_queue_lock(&(wqs)->wqs_wait_queue)
 #define wqs_unlock(wqs)                wait_queue_unlock(&(wqs)->wqs_wait_queue)
 #define wqs_lock_try(wqs)      wait_queue__try_lock(&(wqs)->wqs_wait_queue)
 }
 
 #define wqs_lock(wqs)          wait_queue_lock(&(wqs)->wqs_wait_queue)
 #define wqs_unlock(wqs)                wait_queue_unlock(&(wqs)->wqs_wait_queue)
 #define wqs_lock_try(wqs)      wait_queue__try_lock(&(wqs)->wqs_wait_queue)
+#define wqs_is_preposted(wqs)  ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts))
+
+#define wql_is_preposted(wql)  ((wql)->wql_preposts.next != NULL)
+#define wql_clear_prepost(wql)  ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL)
 
 #define wait_queue_assert_possible(thread) \
                        ((thread)->wait_queue == WAIT_QUEUE_NULL)
 
 
 #define wait_queue_assert_possible(thread) \
                        ((thread)->wait_queue == WAIT_QUEUE_NULL)
 
+/* bootstrap interface - can allocate/link wait_queues and sets after calling this */
+__private_extern__ void wait_queue_bootstrap(void);
+
 /******** Decomposed interfaces (to build higher level constructs) ***********/
 
 /* assert intent to wait on a locked wait queue */
 /******** Decomposed interfaces (to build higher level constructs) ***********/
 
 /* assert intent to wait on a locked wait queue */
@@ -188,17 +226,12 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked(
                        wait_queue_t wait_queue,
                        event64_t wait_event,
                        wait_interrupt_t interruptible,
                        wait_queue_t wait_queue,
                        event64_t wait_event,
                        wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
                        uint64_t deadline,
                        uint64_t deadline,
+                       uint64_t leeway,
                        thread_t thread);
 
                        thread_t thread);
 
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
-__private_extern__ void wait_queue_peek64_locked(
-                       wait_queue_t wait_queue,
-                       event64_t event,
-                       thread_t *thread,
-                       wait_queue_t *found_queue);
-
-/* peek to see which thread would be chosen for a wakeup - but keep on queue */
+/* pull a thread from its wait queue */
 __private_extern__ void wait_queue_pull_thread_locked(
                        wait_queue_t wait_queue,
                        thread_t thread,
 __private_extern__ void wait_queue_pull_thread_locked(
                        wait_queue_t wait_queue,
                        thread_t thread,
@@ -233,6 +266,33 @@ __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
                        wait_result_t result,
                        boolean_t unlock);
 
                        wait_result_t result,
                        boolean_t unlock);
 
+extern uint32_t num_wait_queues;
+extern struct wait_queue *wait_queues;
+/* The Jenkins "one at a time" hash.
+ * TBD: There may be some value to unrolling here,
+ * depending on the architecture.
+ */
+static inline uint32_t wq_hash(char *key)
+{
+       uint32_t hash = 0;
+       size_t i, length = sizeof(char *);
+
+       for (i = 0; i < length; i++) {
+               hash += key[i];
+               hash += (hash << 10);
+               hash ^= (hash >> 6);
+       }
+       hash += (hash << 3);
+       hash ^= (hash >> 11);
+       hash += (hash << 15);
+
+       hash &= (num_wait_queues - 1);
+       return hash;
+}
+
+#define        wait_hash(event) wq_hash((char *)&event) 
+
 #endif /* MACH_KERNEL_PRIVATE */
 
 __BEGIN_DECLS
 #endif /* MACH_KERNEL_PRIVATE */
 
 __BEGIN_DECLS
@@ -282,12 +342,31 @@ extern kern_return_t wait_queue_unlink(
 extern kern_return_t wait_queue_unlink_all(
                        wait_queue_t wait_queue);
 
 extern kern_return_t wait_queue_unlink_all(
                        wait_queue_t wait_queue);
 
-extern kern_return_t wait_queue_unlinkall_nofree(
-                       wait_queue_t wait_queue);
-
 extern kern_return_t wait_queue_set_unlink_all(
                        wait_queue_set_t set_queue);
 
 extern kern_return_t wait_queue_set_unlink_all(
                        wait_queue_set_t set_queue);
 
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t wait_queue_set_unlink_one(
+                       wait_queue_set_t set_queue,
+                       wait_queue_link_t link);
+
+extern kern_return_t wait_queue_unlink_nofree(
+                       wait_queue_t wait_queue,
+                       wait_queue_set_t set_queue,
+                       wait_queue_link_t *wqlp);
+
+extern kern_return_t wait_queue_unlink_all_nofree(
+                       wait_queue_t wait_queue,
+                       queue_t links);
+
+extern kern_return_t wait_queue_set_unlink_all_nofree(
+                       wait_queue_set_t set_queue,
+                       queue_t links);
+
+extern wait_queue_link_t wait_queue_link_allocate(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
 /* legacy API */
 kern_return_t wait_queue_sub_init(
                        wait_queue_set_t set_queue,
 /* legacy API */
 kern_return_t wait_queue_sub_init(
                        wait_queue_set_t set_queue,
@@ -312,6 +391,14 @@ extern wait_result_t wait_queue_assert_wait64(
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
+extern wait_result_t wait_queue_assert_wait64_with_leeway(
+                       wait_queue_t wait_queue,
+                       event64_t wait_event,
+                       wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
+                       uint64_t deadline,
+                       uint64_t leeway);
+
 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
 extern kern_return_t wait_queue_wakeup64_one(
                        wait_queue_t wait_queue,
 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
 extern kern_return_t wait_queue_wakeup64_one(
                        wait_queue_t wait_queue,
@@ -343,11 +430,21 @@ extern wait_result_t wait_queue_assert_wait(
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
+/* assert intent to wait on <wait_queue,event> pair */
+extern wait_result_t wait_queue_assert_wait_with_leeway(
+                       wait_queue_t wait_queue,
+                       event_t wait_event,
+                       wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
+                       uint64_t deadline,
+                       uint64_t leeway);
+
 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_one(
                        wait_queue_t wait_queue,
                        event_t wake_event,
 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_one(
                        wait_queue_t wait_queue,
                        event_t wake_event,
-                       wait_result_t result);
+                       wait_result_t result,
+                       int priority);
 
 /* wakeup all the threads waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_all(
 
 /* wakeup all the threads waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_all(