X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..04b8595b18b1b41ac7a206e4b3d51a635f8413d7:/iokit/Kernel/IOLocks.cpp diff --git a/iokit/Kernel/IOLocks.cpp b/iokit/Kernel/IOLocks.cpp index 988e01529..2febff6c4 100644 --- a/iokit/Kernel/IOLocks.cpp +++ b/iokit/Kernel/IOLocks.cpp @@ -25,15 +25,6 @@ * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ - - -#define IOLOCKS_CPP 1 #include @@ -46,6 +37,11 @@ extern "C" { #include +#if defined(__x86_64__) +/* Synthetic event if none is specified, for backwards compatibility only. */ +static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0; +#endif + void IOLockInitWithState( IOLock * lock, IOLockState state) { if( state == kIOLockStateLocked) @@ -69,13 +65,13 @@ lck_mtx_t * IOLockGetMachLock( IOLock * lock) int IOLockSleep( IOLock * lock, void *event, UInt32 interType) { - return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); + return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); } int IOLockSleepDeadline( IOLock * lock, void *event, AbsoluteTime deadline, UInt32 interType) { - return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event, + return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); } @@ -84,6 +80,42 @@ void IOLockWakeup(IOLock * lock, void *event, bool oneThread) thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); } +#if defined(__x86_64__) +/* + * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function, + * which supports a NULL event, + */ +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); + +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + return IOLockSleep(lock, event, interType); +} + +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + return IOLockSleepDeadline(lock, event, deadline, interType); +} + +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + IOLockWakeup(lock, event, oneThread); +} +#endif /* defined(__x86_64__) */ + struct _IORecursiveLock { lck_mtx_t *mutex; @@ -195,11 +227,33 @@ int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) int res; assert(lock->thread == IOThreadSelf()); - assert(lock->count == 1 || interType == THREAD_UNINT); lock->count = 0; lock->thread = 0; - res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); + res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; +} + +int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, + AbsoluteTime deadline, UInt32 interType) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = 0; + res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); // Must re-establish the recursive lock no matter why we woke up // otherwise we would potentially leave the return path corrupted.