X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..15129b1c8dbb3650c63b70adb1cad9af601c6c17:/osfmk/kern/wait_queue.h diff --git a/osfmk/kern/wait_queue.h b/osfmk/kern/wait_queue.h index 5a1be3f35..01111d16c 100644 --- a/osfmk/kern/wait_queue.h +++ b/osfmk/kern/wait_queue.h @@ -36,15 +36,30 @@ #include /* for kern_return_t */ #include /* for wait_queue_t */ +#include #include #ifdef MACH_KERNEL_PRIVATE #include -#include +#include + #include +#include /* machine_timeout_suspended() */ +/* + * The event mask is of 60 bits on 64 bit architeture and 28 bits on + * 32 bit architecture and so we calculate its size using sizeof(long). + * If the bitfield for wq_type and wq_fifo is changed, then value of + * EVENT_MASK_BITS will also change. + */ +#define EVENT_MASK_BITS ((sizeof(long) * 8) - 4) + +/* + * Zero out the 4 msb of the event. + */ +#define CAST_TO_EVENT_MASK(event) (((CAST_DOWN(unsigned long, event)) << 4) >> 4) /* * wait_queue_t * This is the definition of the common event wait queue @@ -61,11 +76,11 @@ * them. */ typedef struct wait_queue { - unsigned int /* flags */ - /* boolean_t */ wq_type:16, /* only public field */ + unsigned long int /* flags */ + /* boolean_t */ wq_type:2, /* only public field */ wq_fifo:1, /* fifo wakeup policy? */ - wq_isprepost:1, /* is waitq preposted? set only */ - :0; /* force to long boundary */ + wq_prepost:1, /* waitq supports prepost? set only */ + wq_eventmask:EVENT_MASK_BITS; hw_lock_data_t wq_interlock; /* interlock */ queue_head_t wq_queue; /* queue of elements */ } WaitQueue; @@ -80,12 +95,12 @@ typedef struct wait_queue { typedef struct wait_queue_set { WaitQueue wqs_wait_queue; /* our wait queue */ queue_head_t wqs_setlinks; /* links from set perspective */ - unsigned int wqs_refcount; /* refcount for preposting */ + queue_head_t wqs_preposts; /* preposted links */ } WaitQueueSet; #define wqs_type wqs_wait_queue.wq_type #define wqs_fifo wqs_wait_queue.wq_fifo -#define wqs_isprepost wqs_wait_queue.wq_isprepost +#define wqs_prepost wqs_wait_queue.wq_prepost #define wqs_queue wqs_wait_queue.wq_queue /* @@ -126,6 +141,7 @@ typedef WaitQueueElement *wait_queue_element_t; typedef struct _wait_queue_link { WaitQueueElement wql_element; /* element on master */ queue_chain_t wql_setlinks; /* element on set */ + queue_chain_t wql_preposts; /* element on set prepost list */ wait_queue_set_t wql_setqueue; /* set queue */ } WaitQueueLink; @@ -133,8 +149,8 @@ typedef struct _wait_queue_link { #define wql_type wql_element.wqe_type #define wql_queue wql_element.wqe_queue -#define _WAIT_QUEUE_inited 0xf1d0 -#define _WAIT_QUEUE_SET_inited 0xf1d1 +#define _WAIT_QUEUE_inited 0x2 +#define _WAIT_QUEUE_SET_inited 0x3 #define wait_queue_is_queue(wq) \ ((wq)->wq_type == _WAIT_QUEUE_inited) @@ -151,7 +167,7 @@ typedef struct _wait_queue_link { #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock)) /* For x86, the hardware timeout is in TSC units. */ -#if defined(i386) +#if defined(i386) || defined(x86_64) #define hwLockTimeOut LockTimeOutTSC #else #define hwLockTimeOut LockTimeOut @@ -164,31 +180,45 @@ typedef struct _wait_queue_link { */ static inline void wait_queue_lock(wait_queue_t wq) { - if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)) - panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number( -)); + if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) { + boolean_t wql_acquired = FALSE; + + while (machine_timeout_suspended()) { +#if defined(__i386__) || defined(__x86_64__) +/* + * i386/x86_64 return with preemption disabled on a timeout for + * diagnostic purposes. + */ + mp_enable_preemption(); +#endif + if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))) + break; + } + if (wql_acquired == FALSE) + panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number()); + } + assert(wait_queue_held(wq)); } - + static inline void wait_queue_unlock(wait_queue_t wq) { assert(wait_queue_held(wq)); -#if defined(__i386__) - /* DRK: On certain x86 systems, this spinlock is susceptible to - * lock starvation. Hence use an unlock variant which performs - * a cacheline flush to minimize cache affinity on acquisition. - */ - i386_lock_unlock_with_flush(&(wq)->wq_interlock); -#else hw_lock_unlock(&(wq)->wq_interlock); -#endif } #define wqs_lock(wqs) wait_queue_lock(&(wqs)->wqs_wait_queue) #define wqs_unlock(wqs) wait_queue_unlock(&(wqs)->wqs_wait_queue) #define wqs_lock_try(wqs) wait_queue__try_lock(&(wqs)->wqs_wait_queue) +#define wqs_is_preposted(wqs) ((wqs)->wqs_prepost && !queue_empty(&(wqs)->wqs_preposts)) + +#define wql_is_preposted(wql) ((wql)->wql_preposts.next != NULL) +#define wql_clear_prepost(wql) ((wql)->wql_preposts.next = (wql)->wql_preposts.prev = NULL) #define wait_queue_assert_possible(thread) \ ((thread)->wait_queue == WAIT_QUEUE_NULL) +/* bootstrap interface - can allocate/link wait_queues and sets after calling this */ +__private_extern__ void wait_queue_bootstrap(void); + /******** Decomposed interfaces (to build higher level constructs) ***********/ /* assert intent to wait on a locked wait queue */ @@ -196,7 +226,9 @@ __private_extern__ wait_result_t wait_queue_assert_wait64_locked( wait_queue_t wait_queue, event64_t wait_event, wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, uint64_t deadline, + uint64_t leeway, thread_t thread); /* pull a thread from its wait queue */ @@ -234,6 +266,33 @@ __private_extern__ kern_return_t wait_queue_wakeup64_thread_locked( wait_result_t result, boolean_t unlock); +extern uint32_t num_wait_queues; +extern struct wait_queue *wait_queues; +/* The Jenkins "one at a time" hash. + * TBD: There may be some value to unrolling here, + * depending on the architecture. + */ +static inline uint32_t wq_hash(char *key) +{ + uint32_t hash = 0; + size_t i, length = sizeof(char *); + + for (i = 0; i < length; i++) { + hash += key[i]; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + hash &= (num_wait_queues - 1); + return hash; +} + +#define wait_hash(event) wq_hash((char *)&event) + #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS @@ -283,12 +342,31 @@ extern kern_return_t wait_queue_unlink( extern kern_return_t wait_queue_unlink_all( wait_queue_t wait_queue); -extern kern_return_t wait_queue_unlinkall_nofree( - wait_queue_t wait_queue); - extern kern_return_t wait_queue_set_unlink_all( wait_queue_set_t set_queue); +#ifdef XNU_KERNEL_PRIVATE +extern kern_return_t wait_queue_set_unlink_one( + wait_queue_set_t set_queue, + wait_queue_link_t link); + +extern kern_return_t wait_queue_unlink_nofree( + wait_queue_t wait_queue, + wait_queue_set_t set_queue, + wait_queue_link_t *wqlp); + +extern kern_return_t wait_queue_unlink_all_nofree( + wait_queue_t wait_queue, + queue_t links); + +extern kern_return_t wait_queue_set_unlink_all_nofree( + wait_queue_set_t set_queue, + queue_t links); + +extern wait_queue_link_t wait_queue_link_allocate(void); + +#endif /* XNU_KERNEL_PRIVATE */ + /* legacy API */ kern_return_t wait_queue_sub_init( wait_queue_set_t set_queue, @@ -313,6 +391,14 @@ extern wait_result_t wait_queue_assert_wait64( wait_interrupt_t interruptible, uint64_t deadline); +extern wait_result_t wait_queue_assert_wait64_with_leeway( + wait_queue_t wait_queue, + event64_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); + /* wakeup the most appropriate thread waiting on pair */ extern kern_return_t wait_queue_wakeup64_one( wait_queue_t wait_queue, @@ -344,11 +430,21 @@ extern wait_result_t wait_queue_assert_wait( wait_interrupt_t interruptible, uint64_t deadline); +/* assert intent to wait on pair */ +extern wait_result_t wait_queue_assert_wait_with_leeway( + wait_queue_t wait_queue, + event_t wait_event, + wait_interrupt_t interruptible, + wait_timeout_urgency_t urgency, + uint64_t deadline, + uint64_t leeway); + /* wakeup the most appropriate thread waiting on pair */ extern kern_return_t wait_queue_wakeup_one( wait_queue_t wait_queue, event_t wake_event, - wait_result_t result); + wait_result_t result, + int priority); /* wakeup all the threads waiting on pair */ extern kern_return_t wait_queue_wakeup_all(