]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/wait_queue.h
xnu-2422.90.20.tar.gz
[apple/xnu.git] / osfmk / kern / wait_queue.h
index 030e82d282bc7867f9d0ddd7650f1ba784667b70..01111d16c573d3f9f176ee19bd29cba813f84c29 100644 (file)
 #include <mach/kern_return.h>          /* for kern_return_t */
 
 #include <kern/kern_types.h>           /* for wait_queue_t */
 #include <mach/kern_return.h>          /* for kern_return_t */
 
 #include <kern/kern_types.h>           /* for wait_queue_t */
+#include <kern/queue.h>
 
 #include <sys/cdefs.h>
 
 #ifdef MACH_KERNEL_PRIVATE
 
 #include <kern/lock.h>
 
 #include <sys/cdefs.h>
 
 #ifdef MACH_KERNEL_PRIVATE
 
 #include <kern/lock.h>
-#include <kern/queue.h>
+#include <mach/branch_predicates.h>
+
 #include <machine/cpu_number.h>
 #include <machine/cpu_number.h>
+#include <machine/machine_routines.h> /* machine_timeout_suspended() */
+
+/*
+ * The event mask is of 60 bits on 64 bit architeture and 28 bits on
+ * 32 bit architecture and so we calculate its size using sizeof(long).
+ * If the bitfield for wq_type and wq_fifo is changed, then value of 
+ * EVENT_MASK_BITS will also change. 
+ */
+#define EVENT_MASK_BITS  ((sizeof(long) * 8) - 4)
 
 
+/*
+ * Zero out the 4 msb of the event.
+ */
+#define CAST_TO_EVENT_MASK(event)  (((CAST_DOWN(unsigned long, event)) << 4) >> 4)
 /*
  *     wait_queue_t
  *     This is the definition of the common event wait queue
 /*
  *     wait_queue_t
  *     This is the definition of the common event wait queue
  *     them.
  */
 typedef struct wait_queue {
  *     them.
  */
 typedef struct wait_queue {
-    unsigned int                    /* flags */
-    /* boolean_t */    wq_type:16,             /* only public field */
+    unsigned long int                    /* flags */
+    /* boolean_t */    wq_type:2,              /* only public field */
                                        wq_fifo:1,              /* fifo wakeup policy? */
                                        wq_prepost:1,   /* waitq supports prepost? set only */
                                        wq_fifo:1,              /* fifo wakeup policy? */
                                        wq_prepost:1,   /* waitq supports prepost? set only */
-                                       :0;                             /* force to long boundary */
+                                       wq_eventmask:EVENT_MASK_BITS; 
     hw_lock_data_t     wq_interlock;   /* interlock */
     queue_head_t       wq_queue;               /* queue of elements */
 } WaitQueue;
     hw_lock_data_t     wq_interlock;   /* interlock */
     queue_head_t       wq_queue;               /* queue of elements */
 } WaitQueue;
@@ -134,8 +149,8 @@ typedef struct _wait_queue_link {
 #define wql_type  wql_element.wqe_type
 #define wql_queue wql_element.wqe_queue
 
 #define wql_type  wql_element.wqe_type
 #define wql_queue wql_element.wqe_queue
 
-#define _WAIT_QUEUE_inited                     0xf1d0
-#define _WAIT_QUEUE_SET_inited         0xf1d1
+#define _WAIT_QUEUE_inited             0x2
+#define _WAIT_QUEUE_SET_inited         0x3
 
 #define wait_queue_is_queue(wq)        \
        ((wq)->wq_type == _WAIT_QUEUE_inited)
 
 #define wait_queue_is_queue(wq)        \
        ((wq)->wq_type == _WAIT_QUEUE_inited)
@@ -152,7 +167,7 @@ typedef struct _wait_queue_link {
 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
 
 /* For x86, the hardware timeout is in TSC units. */
 #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock))
 
 /* For x86, the hardware timeout is in TSC units. */
-#if defined(i386)
+#if defined(i386) || defined(x86_64)
 #define        hwLockTimeOut LockTimeOutTSC
 #else
 #define        hwLockTimeOut LockTimeOut
 #define        hwLockTimeOut LockTimeOutTSC
 #else
 #define        hwLockTimeOut LockTimeOut
@@ -165,11 +180,26 @@ typedef struct _wait_queue_link {
  */
 
 static inline void wait_queue_lock(wait_queue_t wq) {
  */
 
 static inline void wait_queue_lock(wait_queue_t wq) {
-       if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))
-               panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number(
-));
+       if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) {
+               boolean_t wql_acquired = FALSE;
+
+               while (machine_timeout_suspended()) {
+#if    defined(__i386__) || defined(__x86_64__)
+/*
+ * i386/x86_64 return with preemption disabled on a timeout for
+ * diagnostic purposes.
+ */
+                       mp_enable_preemption();
+#endif
+                       if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)))
+                               break;
+               }
+               if (wql_acquired == FALSE)
+                       panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number());
+       }
+       assert(wait_queue_held(wq));
 }
 }
+
 static inline void wait_queue_unlock(wait_queue_t wq) {
        assert(wait_queue_held(wq));
        hw_lock_unlock(&(wq)->wq_interlock);
 static inline void wait_queue_unlock(wait_queue_t wq) {
        assert(wait_queue_held(wq));
        hw_lock_unlock(&(wq)->wq_interlock);
@@ -196,7 +226,9 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked(
                        wait_queue_t wait_queue,
                        event64_t wait_event,
                        wait_interrupt_t interruptible,
                        wait_queue_t wait_queue,
                        event64_t wait_event,
                        wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
                        uint64_t deadline,
                        uint64_t deadline,
+                       uint64_t leeway,
                        thread_t thread);
 
 /* pull a thread from its wait queue */
                        thread_t thread);
 
 /* pull a thread from its wait queue */
@@ -234,8 +266,8 @@ __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked(
                        wait_result_t result,
                        boolean_t unlock);
 
                        wait_result_t result,
                        boolean_t unlock);
 
-__private_extern__ uint32_t num_wait_queues;
-__private_extern__ struct wait_queue *wait_queues;
+extern uint32_t num_wait_queues;
+extern struct wait_queue *wait_queues;
 /* The Jenkins "one at a time" hash.
  * TBD: There may be some value to unrolling here,
  * depending on the architecture.
 /* The Jenkins "one at a time" hash.
  * TBD: There may be some value to unrolling here,
  * depending on the architecture.
@@ -255,12 +287,11 @@ static inline uint32_t wq_hash(char *key)
        hash ^= (hash >> 11);
        hash += (hash << 15);
 
        hash ^= (hash >> 11);
        hash += (hash << 15);
 
+       hash &= (num_wait_queues - 1);
        return hash;
 }
 
        return hash;
 }
 
-/* TBD: It should be possible to eliminate the divide here */
-#define       wait_hash(event)                                         \
-       (wq_hash((char *)&event) % (num_wait_queues))
+#define        wait_hash(event) wq_hash((char *)&event) 
 
 #endif /* MACH_KERNEL_PRIVATE */
 
 
 #endif /* MACH_KERNEL_PRIVATE */
 
@@ -314,6 +345,28 @@ extern kern_return_t wait_queue_unlink_all(
 extern kern_return_t wait_queue_set_unlink_all(
                        wait_queue_set_t set_queue);
 
 extern kern_return_t wait_queue_set_unlink_all(
                        wait_queue_set_t set_queue);
 
+#ifdef XNU_KERNEL_PRIVATE
+extern kern_return_t wait_queue_set_unlink_one(
+                       wait_queue_set_t set_queue,
+                       wait_queue_link_t link);
+
+extern kern_return_t wait_queue_unlink_nofree(
+                       wait_queue_t wait_queue,
+                       wait_queue_set_t set_queue,
+                       wait_queue_link_t *wqlp);
+
+extern kern_return_t wait_queue_unlink_all_nofree(
+                       wait_queue_t wait_queue,
+                       queue_t links);
+
+extern kern_return_t wait_queue_set_unlink_all_nofree(
+                       wait_queue_set_t set_queue,
+                       queue_t links);
+
+extern wait_queue_link_t wait_queue_link_allocate(void);
+
+#endif /* XNU_KERNEL_PRIVATE */
+
 /* legacy API */
 kern_return_t wait_queue_sub_init(
                        wait_queue_set_t set_queue,
 /* legacy API */
 kern_return_t wait_queue_sub_init(
                        wait_queue_set_t set_queue,
@@ -338,6 +391,14 @@ extern wait_result_t wait_queue_assert_wait64(
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
+extern wait_result_t wait_queue_assert_wait64_with_leeway(
+                       wait_queue_t wait_queue,
+                       event64_t wait_event,
+                       wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
+                       uint64_t deadline,
+                       uint64_t leeway);
+
 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
 extern kern_return_t wait_queue_wakeup64_one(
                        wait_queue_t wait_queue,
 /* wakeup the most appropriate thread waiting on <wait_queue,event64> pair */
 extern kern_return_t wait_queue_wakeup64_one(
                        wait_queue_t wait_queue,
@@ -369,11 +430,21 @@ extern wait_result_t wait_queue_assert_wait(
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
                        wait_interrupt_t interruptible,
                        uint64_t deadline);
 
+/* assert intent to wait on <wait_queue,event> pair */
+extern wait_result_t wait_queue_assert_wait_with_leeway(
+                       wait_queue_t wait_queue,
+                       event_t wait_event,
+                       wait_interrupt_t interruptible,
+                       wait_timeout_urgency_t urgency,
+                       uint64_t deadline,
+                       uint64_t leeway);
+
 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_one(
                        wait_queue_t wait_queue,
                        event_t wake_event,
 /* wakeup the most appropriate thread waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_one(
                        wait_queue_t wait_queue,
                        event_t wake_event,
-                       wait_result_t result);
+                       wait_result_t result,
+                       int priority);
 
 /* wakeup all the threads waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_all(
 
 /* wakeup all the threads waiting on <wait_queue,event> pair */
 extern kern_return_t wait_queue_wakeup_all(