X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..15129b1c8dbb3650c63b70adb1cad9af601c6c17:/osfmk/kern/wait_queue.h?ds=sidebyside diff --git a/osfmk/kern/wait_queue.h b/osfmk/kern/wait_queue.h index 560b25325..01111d16c 100644 --- a/osfmk/kern/wait_queue.h +++ b/osfmk/kern/wait_queue.h @@ -36,15 +36,30 @@ #include /* for kern_return_t */ #include /* for wait_queue_t */ +#include #include #ifdef MACH_KERNEL_PRIVATE #include -#include +#include + #include +#include /* machine_timeout_suspended() */ + +/* + * The event mask is of 60 bits on 64 bit architeture and 28 bits on + * 32 bit architecture and so we calculate its size using sizeof(long). + * If the bitfield for wq_type and wq_fifo is changed, then value of + * EVENT_MASK_BITS will also change. + */ +#define EVENT_MASK_BITS ((sizeof(long) * 8) - 4) +/* + * Zero out the 4 msb of the event. + */ +#define CAST_TO_EVENT_MASK(event) (((CAST_DOWN(unsigned long, event)) << 4) >> 4) /* * wait_queue_t * This is the definition of the common event wait queue @@ -61,11 +76,11 @@ * them. */ typedef struct wait_queue { - unsigned int /* flags */ - /* boolean_t */ wq_type:16, /* only public field */ + unsigned long int /* flags */ + /* boolean_t */ wq_type:2, /* only public field */ wq_fifo:1, /* fifo wakeup policy? */ - wq_isprepost:1, /* is waitq preposted? set only */ - :0; /* force to long boundary */ + wq_prepost:1, /* waitq supports prepost? set only */ + wq_eventmask:EVENT_MASK_BITS; hw_lock_data_t wq_interlock; /* interlock */ queue_head_t wq_queue; /* queue of elements */ } WaitQueue; @@ -80,12 +95,12 @@ typedef struct wait_queue { typedef struct wait_queue_set { WaitQueue wqs_wait_queue; /* our wait queue */ queue_head_t wqs_setlinks; /* links from set perspective */ - unsigned int wqs_refcount; /* refcount for preposting */ + queue_head_t wqs_preposts; /* preposted links */ } WaitQueueSet; #define wqs_type wqs_wait_queue.wq_type #define wqs_fifo wqs_wait_queue.wq_fifo -#define wqs_isprepost wqs_wait_queue.wq_isprepost +#define wqs_prepost wqs_wait_queue.wq_prepost #define wqs_queue wqs_wait_queue.wq_queue /* @@ -123,9 +138,10 @@ typedef WaitQueueElement *wait_queue_element_t; * with that port may wake up any thread from any of those portsets, * or one that was waiting locally on the port itself. */ -typedef struct wait_queue_link { +typedef struct _wait_queue_link { WaitQueueElement wql_element; /* element on master */ queue_chain_t wql_setlinks; /* element on set */ + queue_chain_t wql_preposts; /* element on set prepost list */ wait_queue_set_t wql_setqueue; /* set queue */ } WaitQueueLink; @@ -133,8 +149,8 @@ typedef struct wait_queue_link { #define wql_type wql_element.wqe_type #define wql_queue wql_element.wqe_queue -#define _WAIT_QUEUE_inited 0xf1d0 -#define _WAIT_QUEUE_SET_inited 0xf1d1 +#define _WAIT_QUEUE_inited 0x2 +#define _WAIT_QUEUE_SET_inited 0x3 #define wait_queue_is_queue(wq) \ ((wq)->wq_type == _WAIT_QUEUE_inited) @@ -146,30 +162,63 @@ typedef struct wait_queue_link { (((wq)->wq_type & ~1) == _WAIT_QUEUE_inited) #define wait_queue_empty(wq) (queue_empty(&(wq)->wq_queue)) + #define wait_queue_held(wq) (hw_lock_held(&(wq)->wq_interlock)) #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock)) +/* For x86, the hardware timeout is in TSC units. */ +#if defined(i386) || defined(x86_64) +#define hwLockTimeOut LockTimeOutTSC +#else +#define hwLockTimeOut LockTimeOut +#endif /* * Double the standard lock timeout, because wait queues tend * to iterate over a number of threads - locking each. If there is * a problem with a thread lock, it normally times out at the wait * queue level first, hiding the real problem. */ -#define wait_queue_lock(wq) \ - ((void) (!hw_lock_to(&(wq)->wq_interlock, LockTimeOut * 2) ? \ - panic("wait queue deadlock - wq=0x%x, cpu=%d\n", \ - wq, cpu_number()) : 0)) -#define wait_queue_unlock(wq) \ - (assert(wait_queue_held(wq)), hw_lock_unlock(&(wq)->wq_interlock)) +static inline void wait_queue_lock(wait_queue_t wq) { + if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) { + boolean_t wql_acquired = FALSE; + + while (machine_timeout_suspended()) { +#if defined(__i386__) || defined(__x86_64__) +/* + * i386/x86_64 return with preemption disabled on a timeout for + * diagnostic purposes. + */ + mp_enable_preemption(); +#endif + if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))) + break; + } + if (wql_acquired == FALSE) + panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number()); + } + assert(wait_queue_held(wq)); +} + +static inline void wait_queue_unlock(wait_queue_t wq) { + assert(wait_queue_held(wq)); + hw_lock_unlock(&(wq)->wq_interlock); +} #define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue) #define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue) #define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue) +#define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts)) + +#define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL) +#define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL) #define wait_queue_assert_possible(thread) \ ((thread)->wait_queue == WAIT_QUEUE_NULL) +/* bootstrap interface - can allocate/link wait_queues and sets after calling this */ +__private_extern__ void wait_queue_bootstrap(void); + /******** Decomposed interfaces (to build higher level constructs) ***********/ /* assert intent to wait on a locked wait queue */ @@ -177,17 +226,12 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked( wait_queue_t wait_queue, event64_t wait_event, wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, uint64_t deadline, + uint64_t leeway, thread_t thread); -/* peek to see which thread would be chosen for a wakeup - but keep on queue */ -__private_extern__ void wait_queue_peek64_locked( - wait_queue_t wait_queue, - event64_t event, - thread_t *thread, - wait_queue_t *found_queue); - -/* peek to see which thread would be chosen for a wakeup - but keep on queue */ +/* pull a thread from its wait queue */ __private_extern__ void wait_queue_pull_thread_locked( wait_queue_t wait_queue, thread_t thread, @@ -222,6 +266,33 @@ __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked( wait_result_t result, boolean_t unlock); +extern uint32_t num_wait_queues; +extern struct wait_queue *wait_queues; +/* The Jenkins "one at a time" hash. + * TBD: There may be some value to unrolling here, + * depending on the architecture. + */ +static inline uint32_t wq_hash(char *key) +{ + uint32_t hash = 0; + size_t i, length = sizeof(char *); + + for (i = 0; i < length; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + hash &= (num_wait_queues - 1); + return hash; +} + +#define wait_hash(event) wq_hash((char *)&event) + #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -271,12 +342,31 @@ extern kern_return_t wait_queue_unlink( extern kern_return_t wait_queue_unlink_all( wait_queue_t wait_queue); -extern kern_return_t wait_queue_unlinkall_nofree( - wait_queue_t wait_queue); - extern kern_return_t wait_queue_set_unlink_all( wait_queue_set_t set_queue); +#ifdef XNU_KERNEL_PRIVATE +extern kern_return_t wait_queue_set_unlink_one( + wait_queue_set_t set_queue, + wait_queue_link_t link); + +extern kern_return_t wait_queue_unlink_nofree( + wait_queue_t wait_queue, + wait_queue_set_t set_queue, + wait_queue_link_t *wqlp); + +extern kern_return_t wait_queue_unlink_all_nofree( + wait_queue_t wait_queue, + queue_t links); + +extern kern_return_t wait_queue_set_unlink_all_nofree( + wait_queue_set_t set_queue, + queue_t links); + +extern wait_queue_link_t wait_queue_link_allocate(void); + +#endif /* XNU_KERNEL_PRIVATE */ + /* legacy API */ kern_return_t wait_queue_sub_init( wait_queue_set_t set_queue, @@ -301,6 +391,14 @@ extern wait_result_t wait_queue_assert_wait64( wait_interrupt_t interruptible, uint64_t deadline); +extern wait_result_t wait_queue_assert_wait64_with_leeway( + wait_queue_t wait_queue, + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); + /* wakeup the most appropriate thread waiting on pair */ extern kern_return_t wait_queue_wakeup64_one( wait_queue_t wait_queue, @@ -332,11 +430,21 @@ extern wait_result_t wait_queue_assert_wait( wait_interrupt_t interruptible, uint64_t deadline); +/* assert intent to wait on pair */ +extern wait_result_t wait_queue_assert_wait_with_leeway( + wait_queue_t wait_queue, + event_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); + /* wakeup the most appropriate thread waiting on pair */ extern kern_return_t wait_queue_wakeup_one( wait_queue_t wait_queue, event_t wake_event, - wait_result_t result); + wait_result_t result, + int priority); /* wakeup all the threads waiting on pair */ extern kern_return_t wait_queue_wakeup_all(