X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..bd504ef0e0b883cdd7917b73b3574eb9ce669905:/osfmk/kern/wait_queue.h diff --git a/osfmk/kern/wait_queue.h b/osfmk/kern/wait_queue.h index 030e82d28..fc91a60af 100644 --- a/osfmk/kern/wait_queue.h +++ b/osfmk/kern/wait_queue.h @@ -36,15 +36,17 @@ #include /* for kern_return_t */ #include /* for wait_queue_t */ +#include #include #ifdef MACH_KERNEL_PRIVATE #include -#include -#include +#include +#include +#include /* machine_timeout_suspended() */ /* * wait_queue_t * This is the definition of the common event wait queue @@ -152,7 +154,7 @@ typedef struct _wait_queue_link { #define wait_queue_lock_try(wq) (hw_lock_try(&(wq)->wq_interlock)) /* For x86, the hardware timeout is in TSC units. */ -#if defined(i386) +#if defined(i386) || defined(x86_64) #define hwLockTimeOut LockTimeOutTSC #else #define hwLockTimeOut LockTimeOut @@ -165,11 +167,25 @@ typedef struct _wait_queue_link { */ static inline void wait_queue_lock(wait_queue_t wq) { - if (!hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2)) - panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number( -)); + if (__improbable(hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2) == 0)) { + boolean_t wql_acquired = FALSE; + + while (machine_timeout_suspended()) { +#if defined(__i386__) || defined(__x86_64__) +/* + * i386/x86_64 return with preemption disabled on a timeout for + * diagnostic purposes. + */ + mp_enable_preemption(); +#endif + if ((wql_acquired = hw_lock_to(&(wq)->wq_interlock, hwLockTimeOut * 2))) + break; + } + if (wql_acquired == FALSE) + panic("wait queue deadlock - wq=%p, cpu=%d\n", wq, cpu_number()); + } } - + static inline void wait_queue_unlock(wait_queue_t wq) { assert(wait_queue_held(wq)); hw_lock_unlock(&(wq)->wq_interlock); @@ -255,12 +271,11 @@ static inline uint32_t wq_hash(char *key) hash ^= (hash >> 11); hash += (hash << 15); + hash &= (num_wait_queues - 1); return hash; } -/* TBD: It should be possible to eliminate the divide here */ -#define wait_hash(event) \ - (wq_hash((char *)&event) % (num_wait_queues)) +#define wait_hash(event) wq_hash((char *)&event) #endif /* MACH_KERNEL_PRIVATE */ @@ -314,6 +329,28 @@ extern kern_return_t wait_queue_unlink_all( extern kern_return_t wait_queue_set_unlink_all( wait_queue_set_t set_queue); +#ifdef XNU_KERNEL_PRIVATE +extern kern_return_t wait_queue_set_unlink_one( + wait_queue_set_t set_queue, + wait_queue_link_t link); + +extern kern_return_t wait_queue_unlink_nofree( + wait_queue_t wait_queue, + wait_queue_set_t set_queue, + wait_queue_link_t *wqlp); + +extern kern_return_t wait_queue_unlink_all_nofree( + wait_queue_t wait_queue, + queue_t links); + +extern kern_return_t wait_queue_set_unlink_all_nofree( + wait_queue_set_t set_queue, + queue_t links); + +extern wait_queue_link_t wait_queue_link_allocate(void); + +#endif /* XNU_KERNEL_PRIVATE */ + /* legacy API */ kern_return_t wait_queue_sub_init( wait_queue_set_t set_queue, @@ -373,7 +410,8 @@ extern wait_result_t wait_queue_assert_wait( extern kern_return_t wait_queue_wakeup_one( wait_queue_t wait_queue, event_t wake_event, - wait_result_t result); + wait_result_t result, + int priority); /* wakeup all the threads waiting on pair */ extern kern_return_t wait_queue_wakeup_all(