]>
git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLocks.cpp
2febff6c404d47f02433fbf487326eab5d37d3de
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/system.h>
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
35 #include <IOKit/IOLocksPrivate.h>
38 #include <kern/locks.h>
40 #if defined(__x86_64__)
41 /* Synthetic event if none is specified, for backwards compatibility only. */
42 static bool IOLockSleep_NO_EVENT
__attribute__((used
)) = 0;
45 void IOLockInitWithState( IOLock
* lock
, IOLockState state
)
47 if( state
== kIOLockStateLocked
)
51 IOLock
* IOLockAlloc( void )
53 return( lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
56 void IOLockFree( IOLock
* lock
)
58 lck_mtx_free( lock
, IOLockGroup
);
61 lck_mtx_t
* IOLockGetMachLock( IOLock
* lock
)
63 return( (lck_mtx_t
*)lock
);
66 int IOLockSleep( IOLock
* lock
, void *event
, UInt32 interType
)
68 return (int) lck_mtx_sleep(lock
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
, (wait_interrupt_t
) interType
);
71 int IOLockSleepDeadline( IOLock
* lock
, void *event
,
72 AbsoluteTime deadline
, UInt32 interType
)
74 return (int) lck_mtx_sleep_deadline(lock
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
,
75 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
78 void IOLockWakeup(IOLock
* lock
, void *event
, bool oneThread
)
80 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
83 #if defined(__x86_64__)
85 * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
86 * which supports a NULL event,
88 int IOLockSleep_legacy_x86_64( IOLock
* lock
, void *event
, UInt32 interType
) __asm("_IOLockSleep");
89 int IOLockSleepDeadline_legacy_x86_64( IOLock
* lock
, void *event
,
90 AbsoluteTime deadline
, UInt32 interType
) __asm("_IOLockSleepDeadline");
91 void IOLockWakeup_legacy_x86_64(IOLock
* lock
, void *event
, bool oneThread
) __asm("_IOLockWakeup");
93 int IOLockSleep_legacy_x86_64( IOLock
* lock
, void *event
, UInt32 interType
)
96 event
= (void *)&IOLockSleep_NO_EVENT
;
98 return IOLockSleep(lock
, event
, interType
);
101 int IOLockSleepDeadline_legacy_x86_64( IOLock
* lock
, void *event
,
102 AbsoluteTime deadline
, UInt32 interType
)
105 event
= (void *)&IOLockSleep_NO_EVENT
;
107 return IOLockSleepDeadline(lock
, event
, deadline
, interType
);
110 void IOLockWakeup_legacy_x86_64(IOLock
* lock
, void *event
, bool oneThread
)
113 event
= (void *)&IOLockSleep_NO_EVENT
;
115 IOLockWakeup(lock
, event
, oneThread
);
117 #endif /* defined(__x86_64__) */
120 struct _IORecursiveLock
{
127 IORecursiveLock
* IORecursiveLockAllocWithLockGroup( lck_grp_t
* lockGroup
)
129 _IORecursiveLock
* lock
;
134 lock
= IONew( _IORecursiveLock
, 1 );
138 lock
->mutex
= lck_mtx_alloc_init( lockGroup
, LCK_ATTR_NULL
);
140 lock
->group
= lockGroup
;
144 IODelete( lock
, _IORecursiveLock
, 1 );
148 return( (IORecursiveLock
*) lock
);
152 IORecursiveLock
* IORecursiveLockAlloc( void )
154 return IORecursiveLockAllocWithLockGroup( IOLockGroup
);
157 void IORecursiveLockFree( IORecursiveLock
* _lock
)
159 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
161 lck_mtx_free( lock
->mutex
, lock
->group
);
162 IODelete( lock
, _IORecursiveLock
, 1 );
165 lck_mtx_t
* IORecursiveLockGetMachLock( IORecursiveLock
* lock
)
167 return( lock
->mutex
);
170 void IORecursiveLockLock( IORecursiveLock
* _lock
)
172 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
174 if( lock
->thread
== IOThreadSelf())
177 lck_mtx_lock( lock
->mutex
);
178 assert( lock
->thread
== 0 );
179 assert( lock
->count
== 0 );
180 lock
->thread
= IOThreadSelf();
185 boolean_t
IORecursiveLockTryLock( IORecursiveLock
* _lock
)
187 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
189 if( lock
->thread
== IOThreadSelf()) {
193 if( lck_mtx_try_lock( lock
->mutex
)) {
194 assert( lock
->thread
== 0 );
195 assert( lock
->count
== 0 );
196 lock
->thread
= IOThreadSelf();
204 void IORecursiveLockUnlock( IORecursiveLock
* _lock
)
206 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
208 assert( lock
->thread
== IOThreadSelf() );
210 if( 0 == (--lock
->count
)) {
212 lck_mtx_unlock( lock
->mutex
);
216 boolean_t
IORecursiveLockHaveLock( const IORecursiveLock
* _lock
)
218 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
220 return( lock
->thread
== IOThreadSelf());
223 int IORecursiveLockSleep(IORecursiveLock
*_lock
, void *event
, UInt32 interType
)
225 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
226 UInt32 count
= lock
->count
;
229 assert(lock
->thread
== IOThreadSelf());
233 res
= lck_mtx_sleep(lock
->mutex
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
, (wait_interrupt_t
) interType
);
235 // Must re-establish the recursive lock no matter why we woke up
236 // otherwise we would potentially leave the return path corrupted.
237 assert(lock
->thread
== 0);
238 assert(lock
->count
== 0);
239 lock
->thread
= IOThreadSelf();
244 int IORecursiveLockSleepDeadline( IORecursiveLock
* _lock
, void *event
,
245 AbsoluteTime deadline
, UInt32 interType
)
247 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
248 UInt32 count
= lock
->count
;
251 assert(lock
->thread
== IOThreadSelf());
255 res
= lck_mtx_sleep_deadline(lock
->mutex
, LCK_SLEEP_PROMOTED_PRI
, (event_t
) event
,
256 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
258 // Must re-establish the recursive lock no matter why we woke up
259 // otherwise we would potentially leave the return path corrupted.
260 assert(lock
->thread
== 0);
261 assert(lock
->count
== 0);
262 lock
->thread
= IOThreadSelf();
267 void IORecursiveLockWakeup(IORecursiveLock
*, void *event
, bool oneThread
)
269 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
273 * Complex (read/write) lock operations
276 IORWLock
* IORWLockAlloc( void )
278 return( lck_rw_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
281 void IORWLockFree( IORWLock
* lock
)
283 lck_rw_free( lock
, IOLockGroup
);
286 lck_rw_t
* IORWLockGetMachLock( IORWLock
* lock
)
288 return( (lck_rw_t
*)lock
);
296 IOSimpleLock
* IOSimpleLockAlloc( void )
298 return( lck_spin_alloc_init( IOLockGroup
, LCK_ATTR_NULL
) );
301 void IOSimpleLockInit( IOSimpleLock
* lock
)
303 lck_spin_init( lock
, IOLockGroup
, LCK_ATTR_NULL
);
306 void IOSimpleLockFree( IOSimpleLock
* lock
)
308 lck_spin_free( lock
, IOLockGroup
);
311 lck_spin_t
* IOSimpleLockGetMachLock( IOSimpleLock
* lock
)
313 return( (lck_spin_t
*)lock
);