2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/system.h>
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
35 #include <IOKit/IOLocksPrivate.h>
38 #include <kern/locks.h>
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT
__attribute__((used
)) = 0;
46 IOLockInitWithState( IOLock
* lock
, IOLockState state
)
48 if (state
== kIOLockStateLocked
) {
56 return lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
60 IOLockFree( IOLock
* lock
)
62 lck_mtx_free( lock
, IOLockGroup
);
66 IOLockGetMachLock( IOLock
* lock
)
68 return (lck_mtx_t
*)lock
;
72 IOLockSleep( IOLock
* lock
, void *event
, UInt32 interType
)
74 return (int) lck_mtx_sleep(lock
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
, (wait_interrupt_t
) interType
);
78 IOLockSleepDeadline( IOLock
* lock
, void *event
,
79 AbsoluteTime deadline
, UInt32 interType
)
81 return (int) lck_mtx_sleep_deadline(lock
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
,
82 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
86 IOLockWakeup(IOLock
* lock
, void *event
, bool oneThread
)
88 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
92 #if defined(__x86_64__)
94 * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
95 * which supports a NULL event,
97 int IOLockSleep_legacy_x86_64( IOLock
* lock
, void *event
, UInt32 interType
) __asm("_IOLockSleep");
98 int IOLockSleepDeadline_legacy_x86_64( IOLock
* lock
, void *event
,
99 AbsoluteTime deadline
, UInt32 interType
) __asm("_IOLockSleepDeadline");
100 void IOLockWakeup_legacy_x86_64(IOLock
* lock
, void *event
, bool oneThread
) __asm("_IOLockWakeup");
103 IOLockSleep_legacy_x86_64( IOLock
* lock
, void *event
, UInt32 interType
)
106 event
= (void *)&IOLockSleep_NO_EVENT
;
109 return IOLockSleep(lock
, event
, interType
);
113 IOLockSleepDeadline_legacy_x86_64( IOLock
* lock
, void *event
,
114 AbsoluteTime deadline
, UInt32 interType
)
117 event
= (void *)&IOLockSleep_NO_EVENT
;
120 return IOLockSleepDeadline(lock
, event
, deadline
, interType
);
124 IOLockWakeup_legacy_x86_64(IOLock
* lock
, void *event
, bool oneThread
)
127 event
= (void *)&IOLockSleep_NO_EVENT
;
130 IOLockWakeup(lock
, event
, oneThread
);
132 #endif /* defined(__x86_64__) */
135 struct _IORecursiveLock
{
143 IORecursiveLockAllocWithLockGroup( lck_grp_t
* lockGroup
)
145 _IORecursiveLock
* lock
;
147 if (lockGroup
== NULL
) {
151 lock
= IONew( _IORecursiveLock
, 1 );
156 lck_mtx_init( &lock
->mutex
, lockGroup
, LCK_ATTR_NULL
);
157 lock
->group
= lockGroup
;
161 return (IORecursiveLock
*) lock
;
166 IORecursiveLockAlloc( void )
168 return IORecursiveLockAllocWithLockGroup( IOLockGroup
);
172 IORecursiveLockFree( IORecursiveLock
* _lock
)
174 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
176 lck_mtx_destroy(&lock
->mutex
, lock
->group
);
177 IODelete( lock
, _IORecursiveLock
, 1 );
181 IORecursiveLockGetMachLock( IORecursiveLock
* lock
)
187 IORecursiveLockLock( IORecursiveLock
* _lock
)
189 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
191 if (lock
->thread
== IOThreadSelf()) {
194 lck_mtx_lock( &lock
->mutex
);
195 assert( lock
->thread
== NULL
);
196 assert( lock
->count
== 0 );
197 lock
->thread
= IOThreadSelf();
203 IORecursiveLockTryLock( IORecursiveLock
* _lock
)
205 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
207 if (lock
->thread
== IOThreadSelf()) {
211 if (lck_mtx_try_lock( &lock
->mutex
)) {
212 assert( lock
->thread
== NULL
);
213 assert( lock
->count
== 0 );
214 lock
->thread
= IOThreadSelf();
223 IORecursiveLockUnlock( IORecursiveLock
* _lock
)
225 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
227 assert( lock
->thread
== IOThreadSelf());
229 if (0 == (--lock
->count
)) {
231 lck_mtx_unlock( &lock
->mutex
);
236 IORecursiveLockHaveLock( const IORecursiveLock
* _lock
)
238 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
240 return lock
->thread
== IOThreadSelf();
244 IORecursiveLockSleep(IORecursiveLock
*_lock
, void *event
, UInt32 interType
)
246 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
247 UInt32 count
= lock
->count
;
250 assert(lock
->thread
== IOThreadSelf());
254 res
= lck_mtx_sleep(&lock
->mutex
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
, (wait_interrupt_t
) interType
);
256 // Must re-establish the recursive lock no matter why we woke up
257 // otherwise we would potentially leave the return path corrupted.
258 assert(lock
->thread
== NULL
);
259 assert(lock
->count
== 0);
260 lock
->thread
= IOThreadSelf();
266 IORecursiveLockSleepDeadline( IORecursiveLock
* _lock
, void *event
,
267 AbsoluteTime deadline
, UInt32 interType
)
269 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
270 UInt32 count
= lock
->count
;
273 assert(lock
->thread
== IOThreadSelf());
277 res
= lck_mtx_sleep_deadline(&lock
->mutex
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
,
278 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
280 // Must re-establish the recursive lock no matter why we woke up
281 // otherwise we would potentially leave the return path corrupted.
282 assert(lock
->thread
== NULL
);
283 assert(lock
->count
== 0);
284 lock
->thread
= IOThreadSelf();
290 IORecursiveLockWakeup(IORecursiveLock
*, void *event
, bool oneThread
)
292 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
296 * Complex (read/write) lock operations
300 IORWLockAlloc( void )
302 return lck_rw_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
306 IORWLockFree( IORWLock
* lock
)
308 lck_rw_free( lock
, IOLockGroup
);
312 IORWLockGetMachLock( IORWLock
* lock
)
314 return (lck_rw_t
*)lock
;
323 IOSimpleLockAlloc( void )
325 return lck_spin_alloc_init( IOLockGroup
, LCK_ATTR_NULL
);
329 IOSimpleLockInit( IOSimpleLock
* lock
)
331 lck_spin_init( lock
, IOLockGroup
, LCK_ATTR_NULL
);
335 IOSimpleLockDestroy( IOSimpleLock
* lock
)
337 lck_spin_destroy(lock
, IOLockGroup
);
341 IOSimpleLockFree( IOSimpleLock
* lock
)
343 lck_spin_free( lock
, IOLockGroup
);
347 IOSimpleLockGetMachLock( IOSimpleLock
* lock
)
349 return (lck_spin_t
*)lock
;
352 #ifndef IOLOCKS_INLINE
358 IOLockAssert(IOLock
* lock
, IOLockAssertState type
)
360 LCK_MTX_ASSERT(lock
, type
);
364 IORWLockAssert(IORWLock
* lock
, IORWLockAssertState type
)
366 LCK_RW_ASSERT(lock
, type
);
370 IOSimpleLockAssert(IOSimpleLock
*lock
, IOSimpleLockAssertState type
)
372 LCK_SPIN_ASSERT(l
, type
);
374 #endif /* !IOLOCKS_INLINE */