/*
- * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
- *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
- * @APPLE_LICENSE_HEADER_END@
- */
-/*
- * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
- *
- * HISTORY
*
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-
#include <IOKit/system.h>
#include <IOKit/IOReturn.h>
-#include <IOKit/IOLib.h>
+#include <IOKit/IOLib.h>
#include <IOKit/assert.h>
+#include <IOKit/IOLocksPrivate.h>
+
extern "C" {
-#include <kern/simple_lock.h>
-#include <machine/machine_routines.h>
+#include <kern/locks.h>
-IOLock * IOLockAlloc( void )
+#if defined(__x86_64__)
+/* Synthetic event if none is specified, for backwards compatibility only. */
+static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
+#endif
+
+void
+IOLockInitWithState( IOLock * lock, IOLockState state)
{
- return( mutex_alloc(ETAP_IO_AHA) );
+ if (state == kIOLockStateLocked) {
+ lck_mtx_lock( lock);
+ }
}
-void IOLockFree( IOLock * lock)
+IOLock *
+IOLockAlloc( void )
{
- mutex_free( lock );
+ return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
}
-void IOLockInitWithState( IOLock * lock, IOLockState state)
+void
+IOLockFree( IOLock * lock)
{
- if( state == kIOLockStateLocked)
- IOLockLock( lock);
+ lck_mtx_free( lock, IOLockGroup);
}
-struct _IORecursiveLock {
- mutex_t * mutex;
- thread_t thread;
- UInt32 count;
-};
+lck_mtx_t *
+IOLockGetMachLock( IOLock * lock)
+{
+ return (lck_mtx_t *)lock;
+}
-IORecursiveLock * IORecursiveLockAlloc( void )
+int
+IOLockSleep( IOLock * lock, void *event, UInt32 interType)
{
- _IORecursiveLock * lock;
+ return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
+}
- lock = IONew( _IORecursiveLock, 1);
- if( !lock)
- return( 0 );
+int
+IOLockSleepDeadline( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
+ (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
+}
- lock->mutex = mutex_alloc(ETAP_IO_AHA);
- if( lock->mutex) {
- lock->thread = 0;
- lock->count = 0;
- } else {
- IODelete( lock, _IORecursiveLock, 1);
- lock = 0;
- }
+void
+IOLockWakeup(IOLock * lock, void *event, bool oneThread)
+{
+ thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
+}
- return( (IORecursiveLock *) lock );
+
+#if defined(__x86_64__)
+/*
+ * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
+ * which supports a NULL event,
+ */
+int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
+int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
+void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
+
+int
+IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
+{
+ if (event == NULL) {
+ event = (void *)&IOLockSleep_NO_EVENT;
+ }
+
+ return IOLockSleep(lock, event, interType);
}
-void IORecursiveLockFree( IORecursiveLock * _lock )
+int
+IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ if (event == NULL) {
+ event = (void *)&IOLockSleep_NO_EVENT;
+ }
- mutex_free( lock->mutex );
- IODelete( lock, _IORecursiveLock, 1);
+ return IOLockSleepDeadline(lock, event, deadline, interType);
}
-void IORecursiveLockLock( IORecursiveLock * _lock)
+void
+IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ if (event == NULL) {
+ event = (void *)&IOLockSleep_NO_EVENT;
+ }
- if( lock->thread == IOThreadSelf())
- lock->count++;
- else {
- mutex_lock( lock->mutex );
- assert( lock->thread == 0 );
- assert( lock->count == 0 );
- lock->thread = IOThreadSelf();
- lock->count = 1;
- }
+ IOLockWakeup(lock, event, oneThread);
}
+#endif /* defined(__x86_64__) */
-boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
+
+struct _IORecursiveLock {
+ lck_mtx_t mutex;
+ lck_grp_t *group;
+ thread_t thread;
+ UInt32 count;
+};
+
+IORecursiveLock *
+IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ _IORecursiveLock * lock;
- if( lock->thread == IOThreadSelf()) {
- lock->count++;
- return( true );
- } else {
- if( mutex_try( lock->mutex )) {
- assert( lock->thread == 0 );
- assert( lock->count == 0 );
- lock->thread = IOThreadSelf();
- lock->count = 1;
- return( true );
+ if (lockGroup == 0) {
+ return 0;
}
- }
- return( false );
+
+ lock = IONew( _IORecursiveLock, 1 );
+ if (!lock) {
+ return 0;
+ }
+
+ lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
+ lock->group = lockGroup;
+ lock->thread = 0;
+ lock->count = 0;
+
+ return (IORecursiveLock *) lock;
+}
+
+
+IORecursiveLock *
+IORecursiveLockAlloc( void )
+{
+ return IORecursiveLockAllocWithLockGroup( IOLockGroup );
}
-void IORecursiveLockUnlock( IORecursiveLock * _lock)
+void
+IORecursiveLockFree( IORecursiveLock * _lock )
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
- assert( lock->thread == IOThreadSelf() );
+ lck_mtx_destroy(&lock->mutex, lock->group);
+ IODelete( lock, _IORecursiveLock, 1 );
+}
- if( 0 == (--lock->count)) {
- lock->thread = 0;
- mutex_unlock( lock->mutex );
- }
+lck_mtx_t *
+IORecursiveLockGetMachLock( IORecursiveLock * lock )
+{
+ return &lock->mutex;
}
-boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
+void
+IORecursiveLockLock( IORecursiveLock * _lock)
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+
+ if (lock->thread == IOThreadSelf()) {
+ lock->count++;
+ } else {
+ lck_mtx_lock( &lock->mutex );
+ assert( lock->thread == 0 );
+ assert( lock->count == 0 );
+ lock->thread = IOThreadSelf();
+ lock->count = 1;
+ }
+}
- return( lock->thread == IOThreadSelf());
+boolean_t
+IORecursiveLockTryLock( IORecursiveLock * _lock)
+{
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+
+ if (lock->thread == IOThreadSelf()) {
+ lock->count++;
+ return true;
+ } else {
+ if (lck_mtx_try_lock( &lock->mutex )) {
+ assert( lock->thread == 0 );
+ assert( lock->count == 0 );
+ lock->thread = IOThreadSelf();
+ lock->count = 1;
+ return true;
+ }
+ }
+ return false;
}
-int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
+void
+IORecursiveLockUnlock( IORecursiveLock * _lock)
{
- _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
- UInt32 count = lock->count;
- int res;
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
- assert(lock->thread == IOThreadSelf());
- assert(lock->count == 1 || interType == THREAD_UNINT);
-
- lock->count = 0;
- lock->thread = 0;
- res = thread_sleep_mutex((event_t) event, lock->mutex, (int) interType);
+ assert( lock->thread == IOThreadSelf());
- // Must re-establish the recursive lock no matter why we woke up
- // otherwise we would potentially leave the return path corrupted.
- assert(lock->thread == 0);
- assert(lock->count == 0);
- lock->thread = IOThreadSelf();
- lock->count = count;
- return res;
+ if (0 == (--lock->count)) {
+ lock->thread = 0;
+ lck_mtx_unlock( &lock->mutex );
+ }
}
-void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
+boolean_t
+IORecursiveLockHaveLock( const IORecursiveLock * _lock)
{
- thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+
+ return lock->thread == IOThreadSelf();
+}
+
+int
+IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
+{
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ UInt32 count = lock->count;
+ int res;
+
+ assert(lock->thread == IOThreadSelf());
+
+ lock->count = 0;
+ lock->thread = 0;
+ res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
+
+ // Must re-establish the recursive lock no matter why we woke up
+ // otherwise we would potentially leave the return path corrupted.
+ assert(lock->thread == 0);
+ assert(lock->count == 0);
+ lock->thread = IOThreadSelf();
+ lock->count = count;
+ return res;
+}
+
+int
+IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
+ AbsoluteTime deadline, UInt32 interType)
+{
+ _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
+ UInt32 count = lock->count;
+ int res;
+
+ assert(lock->thread == IOThreadSelf());
+
+ lock->count = 0;
+ lock->thread = 0;
+ res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
+ (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
+
+ // Must re-establish the recursive lock no matter why we woke up
+ // otherwise we would potentially leave the return path corrupted.
+ assert(lock->thread == 0);
+ assert(lock->count == 0);
+ lock->thread = IOThreadSelf();
+ lock->count = count;
+ return res;
+}
+
+void
+IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
+{
+ thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
}
/*
* Complex (read/write) lock operations
*/
-IORWLock * IORWLockAlloc( void )
+IORWLock *
+IORWLockAlloc( void )
{
- IORWLock * lock;
-
- lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA);
+ return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL);
+}
- return( lock);
+void
+IORWLockFree( IORWLock * lock)
+{
+ lck_rw_free( lock, IOLockGroup);
}
-void IORWLockFree( IORWLock * lock)
+lck_rw_t *
+IORWLockGetMachLock( IORWLock * lock)
{
- lock_free( lock );
+ return (lck_rw_t *)lock;
}
* Spin locks
*/
-IOSimpleLock * IOSimpleLockAlloc( void )
+IOSimpleLock *
+IOSimpleLockAlloc( void )
{
- IOSimpleLock * lock;
-
- lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock));
- if( lock)
- IOSimpleLockInit( lock );
+ return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL);
+}
- return( lock );
+void
+IOSimpleLockInit( IOSimpleLock * lock)
+{
+ lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
}
-void IOSimpleLockInit( IOSimpleLock * lock)
+void
+IOSimpleLockFree( IOSimpleLock * lock )
{
- simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA );
+ lck_spin_free( lock, IOLockGroup);
}
-void IOSimpleLockFree( IOSimpleLock * lock )
+lck_spin_t *
+IOSimpleLockGetMachLock( IOSimpleLock * lock)
{
- IOFree( lock, sizeof(IOSimpleLock));
+ return (lck_spin_t *)lock;
}
-} /* extern "C" */
+#ifndef IOLOCKS_INLINE
+/*
+ * Lock assertions
+ */
+void
+IOLockAssert(IOLock * lock, IOLockAssertState type)
+{
+ LCK_MTX_ASSERT(lock, type);
+}
+void
+IORWLockAssert(IORWLock * lock, IORWLockAssertState type)
+{
+ LCK_RW_ASSERT(lock, type);
+}
+
+void
+IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type)
+{
+ LCK_SPIN_ASSERT(l, type);
+}
+#endif /* !IOLOCKS_INLINE */
+} /* extern "C" */