/*
- * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
- *
- * HISTORY
- *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-
#include <IOKit/system.h>
#include <IOKit/IOReturn.h>
#include <IOKit/IOLib.h>
#include <IOKit/assert.h>
+#include <IOKit/IOLocksPrivate.h>
+
extern "C" {
-#include <kern/simple_lock.h>
-#include <machine/machine_routines.h>
+#include <kern/locks.h>
+
+#if defined(__x86_64__)
+/* Synthetic event if none is specified, for backwards compatibility only. */
+static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
+#endif
+
+void IOLockInitWithState( IOLock * lock, IOLockState state)
+{
+ if( state == kIOLockStateLocked)
+ lck_mtx_lock( lock);
+}
IOLock * IOLockAlloc( void )
{
- return( mutex_alloc(ETAP_IO_AHA) );
+ return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
}
void IOLockFree( IOLock * lock)
{
- mutex_free( lock );
+ lck_mtx_free( lock, IOLockGroup);
}
-void IOLockInitWithState( IOLock * lock, IOLockState state)
+lck_mtx_t * IOLockGetMachLock( IOLock * lock)
{
- if( state == kIOLockStateLocked)
- IOLockLock( lock);
+ return( (lck_mtx_t *)lock);
+}
+
+int IOLockSleep( IOLock * lock, void *event, UInt32 interType)
+{
+ return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
}
+int IOLockSleepDeadline( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
+ (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
+}
+
+void IOLockWakeup(IOLock * lock, void *event, bool oneThread)
+{
+ thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
+}
+
+#if defined(__x86_64__)
+/*
+ * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
+ * which supports a NULL event,
+ */
+int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
+int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
+void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
+
+int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ return IOLockSleep(lock, event, interType);
+}
+
+int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ return IOLockSleepDeadline(lock, event, deadline, interType);
+}
+
+void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
+{
+ if (event == NULL)
+ event = (void *)&IOLockSleep_NO_EVENT;
+
+ IOLockWakeup(lock, event, oneThread);
+}
+#endif /* defined(__x86_64__) */
+
+
struct _IORecursiveLock {
- mutex_t * mutex;
- thread_t thread;
- UInt32 count;
+ lck_mtx_t *mutex;
+ lck_grp_t *group;
+ thread_t thread;
+ UInt32 count;
};
-IORecursiveLock * IORecursiveLockAlloc( void )
+IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
{
_IORecursiveLock * lock;
- lock = IONew( _IORecursiveLock, 1);
- if( !lock)
+ if( lockGroup == 0 )
return( 0 );
- lock->mutex = mutex_alloc(ETAP_IO_AHA);
- if( lock->mutex) {
+ lock = IONew( _IORecursiveLock, 1 );
+ if( !lock )
+ return( 0 );
+
+ lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL );
+ if( lock->mutex ) {
+ lock->group = lockGroup;
lock->thread = 0;
lock->count = 0;
} else {
- IODelete( lock, _IORecursiveLock, 1);
+ IODelete( lock, _IORecursiveLock, 1 );
lock = 0;
}
return( (IORecursiveLock *) lock );
}
+
+IORecursiveLock * IORecursiveLockAlloc( void )
+{
+ return IORecursiveLockAllocWithLockGroup( IOLockGroup );
+}
+
void IORecursiveLockFree( IORecursiveLock * _lock )
{
_IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+
+ lck_mtx_free( lock->mutex, lock->group );
+ IODelete( lock, _IORecursiveLock, 1 );
+}
- mutex_free( lock->mutex );
- IODelete( lock, _IORecursiveLock, 1);
+lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
+{
+ return( lock->mutex );
}
void IORecursiveLockLock( IORecursiveLock * _lock)
if( lock->thread == IOThreadSelf())
lock->count++;
else {
- mutex_lock( lock->mutex );
+ lck_mtx_lock( lock->mutex );
assert( lock->thread == 0 );
assert( lock->count == 0 );
lock->thread = IOThreadSelf();
lock->count++;
return( true );
} else {
- if( mutex_try( lock->mutex )) {
+ if( lck_mtx_try_lock( lock->mutex )) {
assert( lock->thread == 0 );
assert( lock->count == 0 );
lock->thread = IOThreadSelf();
if( 0 == (--lock->count)) {
lock->thread = 0;
- mutex_unlock( lock->mutex );
+ lck_mtx_unlock( lock->mutex );
}
}
int res;
assert(lock->thread == IOThreadSelf());
- assert(lock->count == 1 || interType == THREAD_UNINT);
lock->count = 0;
lock->thread = 0;
- res = thread_sleep_mutex((event_t) event, lock->mutex, (int) interType);
+ res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
+
+ // Must re-establish the recursive lock no matter why we woke up
+ // otherwise we would potentially leave the return path corrupted.
+ assert(lock->thread == 0);
+ assert(lock->count == 0);
+ lock->thread = IOThreadSelf();
+ lock->count = count;
+ return res;
+}
+
+int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ UInt32 count = lock->count;
+ int res;
+
+ assert(lock->thread == IOThreadSelf());
+
+ lock->count = 0;
+ lock->thread = 0;
+ res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
+ (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
// Must re-establish the recursive lock no matter why we woke up
// otherwise we would potentially leave the return path corrupted.
IORWLock * IORWLockAlloc( void )
{
- IORWLock * lock;
-
- lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA);
-
- return( lock);
+ return( lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
}
void IORWLockFree( IORWLock * lock)
{
- lock_free( lock );
+ lck_rw_free( lock, IOLockGroup);
+}
+
+lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
+{
+ return( (lck_rw_t *)lock);
}
IOSimpleLock * IOSimpleLockAlloc( void )
{
- IOSimpleLock * lock;
-
- lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock));
- if( lock)
- IOSimpleLockInit( lock );
-
- return( lock );
+ return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
}
void IOSimpleLockInit( IOSimpleLock * lock)
{
- simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA );
+ lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
}
void IOSimpleLockFree( IOSimpleLock * lock )
{
- IOFree( lock, sizeof(IOSimpleLock));
+ lck_spin_free( lock, IOLockGroup);
+}
+
+lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
+{
+ return( (lck_spin_t *)lock);
}
} /* extern "C" */