X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/iokit/Kernel/IOLocks.cpp diff --git a/iokit/Kernel/IOLocks.cpp b/iokit/Kernel/IOLocks.cpp index f9940461e..3b8a95359 100644 --- a/iokit/Kernel/IOLocks.cpp +++ b/iokit/Kernel/IOLocks.cpp @@ -1,195 +1,317 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ - */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ - #include #include -#include +#include #include +#include + extern "C" { -#include -#include +#include -IOLock * IOLockAlloc( void ) +#if defined(__x86_64__) +/* Synthetic event if none is specified, for backwards compatibility only. */ +static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0; +#endif + +void +IOLockInitWithState( IOLock * lock, IOLockState state) { - return( mutex_alloc(ETAP_IO_AHA) ); + if (state == kIOLockStateLocked) { + lck_mtx_lock( lock); + } } -void IOLockFree( IOLock * lock) +IOLock * +IOLockAlloc( void ) { - mutex_free( lock ); + return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); } -void IOLockInitWithState( IOLock * lock, IOLockState state) +void +IOLockFree( IOLock * lock) { - mutex_init( lock, ETAP_IO_AHA); + lck_mtx_free( lock, IOLockGroup); +} - if( state == kIOLockStateLocked) - IOLockLock( lock); +lck_mtx_t * +IOLockGetMachLock( IOLock * lock) +{ + return (lck_mtx_t *)lock; } -struct _IORecursiveLock { - mutex_t * mutex; - thread_t thread; - UInt32 count; -}; +int +IOLockSleep( IOLock * lock, void *event, UInt32 interType) +{ + return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); +} -IORecursiveLock * IORecursiveLockAlloc( void ) +int +IOLockSleepDeadline( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) { - _IORecursiveLock * lock; + return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); +} - lock = IONew( _IORecursiveLock, 1); - if( !lock) - return( 0 ); +void +IOLockWakeup(IOLock * lock, void *event, bool oneThread) +{ + thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); +} - lock->mutex = mutex_alloc(ETAP_IO_AHA); - if( lock->mutex) { - lock->thread = 0; - lock->count = 0; - } else { - IODelete( lock, _IORecursiveLock, 1); - lock = 0; - } - return( (IORecursiveLock *) lock ); +#if defined(__x86_64__) +/* + * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function, + * which supports a NULL event, + */ +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); + +int +IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) +{ + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } + + return IOLockSleep(lock, event, interType); } -void IORecursiveLockFree( IORecursiveLock * _lock ) +int +IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } - mutex_free( lock->mutex ); - IODelete( lock, _IORecursiveLock, 1); + return IOLockSleepDeadline(lock, event, deadline, interType); } -void IORecursiveLockLock( IORecursiveLock * _lock) +void +IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + if (event == NULL) { + event = (void *)&IOLockSleep_NO_EVENT; + } - if( lock->thread == IOThreadSelf()) - lock->count++; - else { - _mutex_lock( lock->mutex ); - assert( lock->thread == 0 ); - assert( lock->count == 0 ); - lock->thread = IOThreadSelf(); - lock->count = 1; - } + IOLockWakeup(lock, event, oneThread); } +#endif /* defined(__x86_64__) */ + -boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock) +struct _IORecursiveLock { + lck_mtx_t mutex; + lck_grp_t *group; + thread_t thread; + UInt32 count; +}; + +IORecursiveLock * +IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + _IORecursiveLock * lock; + + if (lockGroup == NULL) { + return NULL; + } - if( lock->thread == IOThreadSelf()) { - lock->count++; - return( true ); - } else { - if( _mutex_try( lock->mutex )) { - assert( lock->thread == 0 ); - assert( lock->count == 0 ); - lock->thread = IOThreadSelf(); - lock->count = 1; - return( true ); + lock = IONew( _IORecursiveLock, 1 ); + if (!lock) { + return NULL; } - } - return( false ); + + lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL ); + lock->group = lockGroup; + lock->thread = NULL; + lock->count = 0; + + return (IORecursiveLock *) lock; } -void IORecursiveLockUnlock( IORecursiveLock * _lock) + +IORecursiveLock * +IORecursiveLockAlloc( void ) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + return IORecursiveLockAllocWithLockGroup( IOLockGroup ); +} - assert( lock->thread == IOThreadSelf() ); +void +IORecursiveLockFree( IORecursiveLock * _lock ) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + lck_mtx_destroy(&lock->mutex, lock->group); + IODelete( lock, _IORecursiveLock, 1 ); +} + +lck_mtx_t * +IORecursiveLockGetMachLock( IORecursiveLock * lock ) +{ + return &lock->mutex; +} + +void +IORecursiveLockLock( IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if (lock->thread == IOThreadSelf()) { + lock->count++; + } else { + lck_mtx_lock( &lock->mutex ); + assert( lock->thread == NULL ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + } +} - if( 0 == (--lock->count)) { - lock->thread = 0; - mutex_unlock( lock->mutex ); - } +boolean_t +IORecursiveLockTryLock( IORecursiveLock * _lock) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + + if (lock->thread == IOThreadSelf()) { + lock->count++; + return true; + } else { + if (lck_mtx_try_lock( &lock->mutex )) { + assert( lock->thread == NULL ); + assert( lock->count == 0 ); + lock->thread = IOThreadSelf(); + lock->count = 1; + return true; + } + } + return false; } -boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock) +void +IORecursiveLockUnlock( IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - return( lock->thread == IOThreadSelf()); + assert( lock->thread == IOThreadSelf()); + + if (0 == (--lock->count)) { + lock->thread = NULL; + lck_mtx_unlock( &lock->mutex ); + } } -int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) +boolean_t +IORecursiveLockHaveLock( const IORecursiveLock * _lock) { - _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - UInt32 count = lock->count; - int res; + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - assert(lock->thread == IOThreadSelf()); - assert(lock->count == 1 || interType == THREAD_UNINT); - - assert_wait((event_t) event, (int) interType); - lock->count = 0; - lock->thread = 0; - mutex_unlock(lock->mutex); - - res = thread_block(0); + return lock->thread == IOThreadSelf(); +} - if (THREAD_AWAKENED == res) { - _mutex_lock(lock->mutex); - assert(lock->thread == 0); - assert(lock->count == 0); - lock->thread = IOThreadSelf(); - lock->count = count; - } +int +IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = NULL; + res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == NULL); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; +} - return res; +int +IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, + AbsoluteTime deadline, UInt32 interType) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = NULL; + res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == NULL); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; } -void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) +void +IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) { - thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); + thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); } /* * Complex (read/write) lock operations */ -IORWLock * IORWLockAlloc( void ) +IORWLock * +IORWLockAlloc( void ) { - IORWLock * lock; - - lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA); + return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL); +} - return( lock); +void +IORWLockFree( IORWLock * lock) +{ + lck_rw_free( lock, IOLockGroup); } -void IORWLockFree( IORWLock * lock) +lck_rw_t * +IORWLockGetMachLock( IORWLock * lock) { - lock_free( lock ); + return (lck_rw_t *)lock; } @@ -197,27 +319,57 @@ void IORWLockFree( IORWLock * lock) * Spin locks */ -IOSimpleLock * IOSimpleLockAlloc( void ) +IOSimpleLock * +IOSimpleLockAlloc( void ) { - IOSimpleLock * lock; + return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL); +} - lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock)); - if( lock) - IOSimpleLockInit( lock ); +void +IOSimpleLockInit( IOSimpleLock * lock) +{ + lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL); +} - return( lock ); +void +IOSimpleLockDestroy( IOSimpleLock * lock ) +{ + lck_spin_destroy(lock, IOLockGroup); } -void IOSimpleLockInit( IOSimpleLock * lock) +void +IOSimpleLockFree( IOSimpleLock * lock ) { - simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA ); + lck_spin_free( lock, IOLockGroup); } -void IOSimpleLockFree( IOSimpleLock * lock ) +lck_spin_t * +IOSimpleLockGetMachLock( IOSimpleLock * lock) { - IOFree( lock, sizeof(IOSimpleLock)); + return (lck_spin_t *)lock; } -} /* extern "C" */ +#ifndef IOLOCKS_INLINE +/* + * Lock assertions + */ +void +IOLockAssert(IOLock * lock, IOLockAssertState type) +{ + LCK_MTX_ASSERT(lock, type); +} +void +IORWLockAssert(IORWLock * lock, IORWLockAssertState type) +{ + LCK_RW_ASSERT(lock, type); +} + +void +IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type) +{ + LCK_SPIN_ASSERT(l, type); +} +#endif /* !IOLOCKS_INLINE */ +} /* extern "C" */