/*
- * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
- *
- * HISTORY
- *
- */
-
-
-#define IOLOCKS_CPP 1
#include <IOKit/system.h>
#include <IOKit/IOLib.h>
#include <IOKit/assert.h>
+#include <IOKit/IOLocksPrivate.h>
+
extern "C" {
#include <kern/locks.h>
+#if defined(__x86_64__)
+/* Synthetic event if none is specified, for backwards compatibility only. */
+static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
+#endif
+
void IOLockInitWithState( IOLock * lock, IOLockState state)
{
if( state == kIOLockStateLocked)
int IOLockSleep( IOLock * lock, void *event, UInt32 interType)
{
- return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
+ return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
}
int IOLockSleepDeadline( IOLock * lock, void *event,
AbsoluteTime deadline, UInt32 interType)
{
- return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event,
+ return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
(wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
}
thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
}
+#if defined(__x86_64__)
+/*
+ * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
+ * which supports a NULL event,
+ */
+int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
+int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
+void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
+
+int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ return IOLockSleep(lock, event, interType);
+}
+
+int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ return IOLockSleepDeadline(lock, event, deadline, interType);
+}
+
+void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ IOLockWakeup(lock, event, oneThread);
+}
+#endif /* defined(__x86_64__) */
+
struct _IORecursiveLock {
- lck_mtx_t *mutex;
- thread_t thread;
- UInt32 count;
+ lck_mtx_t *mutex;
+ lck_grp_t *group;
+ thread_t thread;
+ UInt32 count;
};
-IORecursiveLock * IORecursiveLockAlloc( void )
+IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
{
_IORecursiveLock * lock;
- lock = IONew( _IORecursiveLock, 1);
- if( !lock)
+ if( lockGroup == 0 )
+ return( 0 );
+
+ lock = IONew( _IORecursiveLock, 1 );
+ if( !lock )
return( 0 );
- lock->mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
- if( lock->mutex) {
+ lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL );
+ if( lock->mutex ) {
+ lock->group = lockGroup;
lock->thread = 0;
lock->count = 0;
} else {
- IODelete( lock, _IORecursiveLock, 1);
+ IODelete( lock, _IORecursiveLock, 1 );
lock = 0;
}
return( (IORecursiveLock *) lock );
}
+
+IORecursiveLock * IORecursiveLockAlloc( void )
+{
+ return IORecursiveLockAllocWithLockGroup( IOLockGroup );
+}
+
void IORecursiveLockFree( IORecursiveLock * _lock )
{
_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
-
- lck_mtx_free( lock->mutex , IOLockGroup);
- IODelete( lock, _IORecursiveLock, 1);
+
+ lck_mtx_free( lock->mutex, lock->group );
+ IODelete( lock, _IORecursiveLock, 1 );
}
-lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock)
+lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
{
- return( lock->mutex);
+ return( lock->mutex );
}
void IORecursiveLockLock( IORecursiveLock * _lock)
int res;
assert(lock->thread == IOThreadSelf());
- assert(lock->count == 1 || interType == THREAD_UNINT);
lock->count = 0;
lock->thread = 0;
- res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
+ res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
+
+ // Must re-establish the recursive lock no matter why we woke up
+ // otherwise we would potentially leave the return path corrupted.
+ assert(lock->thread == 0);
+ assert(lock->count == 0);
+ lock->thread = IOThreadSelf();
+ lock->count = count;
+ return res;
+}
+
+int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ UInt32 count = lock->count;
+ int res;
+
+ assert(lock->thread == IOThreadSelf());
+
+ lock->count = 0;
+ lock->thread = 0;
+ res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
+ (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
// Must re-establish the recursive lock no matter why we woke up
// otherwise we would potentially leave the return path corrupted.