X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/ff6e181ae92fc6f1e89841290f461d1f2f9badd9..7e41aa883dd258f888d0470250eead40a53ef1f5:/iokit/Kernel/IOLocks.cpp?ds=sidebyside diff --git a/iokit/Kernel/IOLocks.cpp b/iokit/Kernel/IOLocks.cpp index 29537eefe..2febff6c4 100644 --- a/iokit/Kernel/IOLocks.cpp +++ b/iokit/Kernel/IOLocks.cpp @@ -1,14 +1,19 @@ /* - * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 1998-2007 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -18,17 +23,8 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. - * - * HISTORY - * - */ - - -#define IOLOCKS_CPP 1 #include @@ -36,9 +32,16 @@ #include #include +#include + extern "C" { #include +#if defined(__x86_64__) +/* Synthetic event if none is specified, for backwards compatibility only. */ +static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0; +#endif + void IOLockInitWithState( IOLock * lock, IOLockState state) { if( state == kIOLockStateLocked) @@ -62,13 +65,13 @@ lck_mtx_t * IOLockGetMachLock( IOLock * lock) int IOLockSleep( IOLock * lock, void *event, UInt32 interType) { - return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); + return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); } int IOLockSleepDeadline( IOLock * lock, void *event, AbsoluteTime deadline, UInt32 interType) { - return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event, + return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); } @@ -77,44 +80,91 @@ void IOLockWakeup(IOLock * lock, void *event, bool oneThread) thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); } +#if defined(__x86_64__) +/* + * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function, + * which supports a NULL event, + */ +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep"); +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline"); +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup"); + +int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + return IOLockSleep(lock, event, interType); +} + +int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event, + AbsoluteTime deadline, UInt32 interType) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + return IOLockSleepDeadline(lock, event, deadline, interType); +} + +void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) +{ + if (event == NULL) + event = (void *)&IOLockSleep_NO_EVENT; + + IOLockWakeup(lock, event, oneThread); +} +#endif /* defined(__x86_64__) */ + struct _IORecursiveLock { - lck_mtx_t *mutex; - thread_t thread; - UInt32 count; + lck_mtx_t *mutex; + lck_grp_t *group; + thread_t thread; + UInt32 count; }; -IORecursiveLock * IORecursiveLockAlloc( void ) +IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup ) { _IORecursiveLock * lock; - lock = IONew( _IORecursiveLock, 1); - if( !lock) + if( lockGroup == 0 ) + return( 0 ); + + lock = IONew( _IORecursiveLock, 1 ); + if( !lock ) return( 0 ); - lock->mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); - if( lock->mutex) { + lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL ); + if( lock->mutex ) { + lock->group = lockGroup; lock->thread = 0; lock->count = 0; } else { - IODelete( lock, _IORecursiveLock, 1); + IODelete( lock, _IORecursiveLock, 1 ); lock = 0; } return( (IORecursiveLock *) lock ); } + +IORecursiveLock * IORecursiveLockAlloc( void ) +{ + return IORecursiveLockAllocWithLockGroup( IOLockGroup ); +} + void IORecursiveLockFree( IORecursiveLock * _lock ) { _IORecursiveLock * lock = (_IORecursiveLock *)_lock; - - lck_mtx_free( lock->mutex , IOLockGroup); - IODelete( lock, _IORecursiveLock, 1); + + lck_mtx_free( lock->mutex, lock->group ); + IODelete( lock, _IORecursiveLock, 1 ); } -lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock) +lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock ) { - return( lock->mutex); + return( lock->mutex ); } void IORecursiveLockLock( IORecursiveLock * _lock) @@ -177,11 +227,33 @@ int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) int res; assert(lock->thread == IOThreadSelf()); - assert(lock->count == 1 || interType == THREAD_UNINT); lock->count = 0; lock->thread = 0; - res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); + res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType); + + // Must re-establish the recursive lock no matter why we woke up + // otherwise we would potentially leave the return path corrupted. + assert(lock->thread == 0); + assert(lock->count == 0); + lock->thread = IOThreadSelf(); + lock->count = count; + return res; +} + +int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event, + AbsoluteTime deadline, UInt32 interType) +{ + _IORecursiveLock * lock = (_IORecursiveLock *)_lock; + UInt32 count = lock->count; + int res; + + assert(lock->thread == IOThreadSelf()); + + lock->count = 0; + lock->thread = 0; + res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, + (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); // Must re-establish the recursive lock no matter why we woke up // otherwise we would potentially leave the return path corrupted.