]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOLocks.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLocks.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/system.h>
30
31#include <IOKit/IOReturn.h>
32#include <IOKit/IOLib.h>
33#include <IOKit/assert.h>
34
35#include <IOKit/IOLocksPrivate.h>
36
37extern "C" {
38#include <kern/locks.h>
39
40#if defined(__x86_64__)
41/* Synthetic event if none is specified, for backwards compatibility only. */
42static bool IOLockSleep_NO_EVENT __attribute__((used)) = 0;
43#endif
44
45void
46IOLockInitWithState( IOLock * lock, IOLockState state)
47{
48 if (state == kIOLockStateLocked) {
49 lck_mtx_lock( lock);
50 }
51}
52
53IOLock *
54IOLockAlloc( void )
55{
56 return lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL);
57}
58
59void
60IOLockFree( IOLock * lock)
61{
62 lck_mtx_free( lock, IOLockGroup);
63}
64
65lck_mtx_t *
66IOLockGetMachLock( IOLock * lock)
67{
68 return (lck_mtx_t *)lock;
69}
70
71int
72IOLockSleep( IOLock * lock, void *event, UInt32 interType)
73{
74 return (int) lck_mtx_sleep(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
75}
76
77int
78IOLockSleepDeadline( IOLock * lock, void *event,
79 AbsoluteTime deadline, UInt32 interType)
80{
81 return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
82 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
83}
84
85void
86IOLockWakeup(IOLock * lock, void *event, bool oneThread)
87{
88 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
89}
90
91
92#if defined(__x86_64__)
93/*
94 * For backwards compatibility, kexts built against pre-Darwin 14 headers will bind at runtime to this function,
95 * which supports a NULL event,
96 */
97int IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType) __asm("_IOLockSleep");
98int IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
99 AbsoluteTime deadline, UInt32 interType) __asm("_IOLockSleepDeadline");
100void IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread) __asm("_IOLockWakeup");
101
102int
103IOLockSleep_legacy_x86_64( IOLock * lock, void *event, UInt32 interType)
104{
105 if (event == NULL) {
106 event = (void *)&IOLockSleep_NO_EVENT;
107 }
108
109 return IOLockSleep(lock, event, interType);
110}
111
112int
113IOLockSleepDeadline_legacy_x86_64( IOLock * lock, void *event,
114 AbsoluteTime deadline, UInt32 interType)
115{
116 if (event == NULL) {
117 event = (void *)&IOLockSleep_NO_EVENT;
118 }
119
120 return IOLockSleepDeadline(lock, event, deadline, interType);
121}
122
123void
124IOLockWakeup_legacy_x86_64(IOLock * lock, void *event, bool oneThread)
125{
126 if (event == NULL) {
127 event = (void *)&IOLockSleep_NO_EVENT;
128 }
129
130 IOLockWakeup(lock, event, oneThread);
131}
132#endif /* defined(__x86_64__) */
133
134
135struct _IORecursiveLock {
136 lck_mtx_t mutex;
137 lck_grp_t *group;
138 thread_t thread;
139 UInt32 count;
140};
141
142IORecursiveLock *
143IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
144{
145 _IORecursiveLock * lock;
146
147 if (lockGroup == NULL) {
148 return NULL;
149 }
150
151 lock = IONew( _IORecursiveLock, 1 );
152 if (!lock) {
153 return NULL;
154 }
155
156 lck_mtx_init( &lock->mutex, lockGroup, LCK_ATTR_NULL );
157 lock->group = lockGroup;
158 lock->thread = NULL;
159 lock->count = 0;
160
161 return (IORecursiveLock *) lock;
162}
163
164
165IORecursiveLock *
166IORecursiveLockAlloc( void )
167{
168 return IORecursiveLockAllocWithLockGroup( IOLockGroup );
169}
170
171void
172IORecursiveLockFree( IORecursiveLock * _lock )
173{
174 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
175
176 lck_mtx_destroy(&lock->mutex, lock->group);
177 IODelete( lock, _IORecursiveLock, 1 );
178}
179
180lck_mtx_t *
181IORecursiveLockGetMachLock( IORecursiveLock * lock )
182{
183 return &lock->mutex;
184}
185
186void
187IORecursiveLockLock( IORecursiveLock * _lock)
188{
189 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
190
191 if (lock->thread == IOThreadSelf()) {
192 lock->count++;
193 } else {
194 lck_mtx_lock( &lock->mutex );
195 assert( lock->thread == NULL );
196 assert( lock->count == 0 );
197 lock->thread = IOThreadSelf();
198 lock->count = 1;
199 }
200}
201
202boolean_t
203IORecursiveLockTryLock( IORecursiveLock * _lock)
204{
205 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
206
207 if (lock->thread == IOThreadSelf()) {
208 lock->count++;
209 return true;
210 } else {
211 if (lck_mtx_try_lock( &lock->mutex )) {
212 assert( lock->thread == NULL );
213 assert( lock->count == 0 );
214 lock->thread = IOThreadSelf();
215 lock->count = 1;
216 return true;
217 }
218 }
219 return false;
220}
221
222void
223IORecursiveLockUnlock( IORecursiveLock * _lock)
224{
225 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
226
227 assert( lock->thread == IOThreadSelf());
228
229 if (0 == (--lock->count)) {
230 lock->thread = NULL;
231 lck_mtx_unlock( &lock->mutex );
232 }
233}
234
235boolean_t
236IORecursiveLockHaveLock( const IORecursiveLock * _lock)
237{
238 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
239
240 return lock->thread == IOThreadSelf();
241}
242
243int
244IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
245{
246 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
247 UInt32 count = lock->count;
248 int res;
249
250 assert(lock->thread == IOThreadSelf());
251
252 lock->count = 0;
253 lock->thread = NULL;
254 res = lck_mtx_sleep(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event, (wait_interrupt_t) interType);
255
256 // Must re-establish the recursive lock no matter why we woke up
257 // otherwise we would potentially leave the return path corrupted.
258 assert(lock->thread == NULL);
259 assert(lock->count == 0);
260 lock->thread = IOThreadSelf();
261 lock->count = count;
262 return res;
263}
264
265int
266IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
267 AbsoluteTime deadline, UInt32 interType)
268{
269 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
270 UInt32 count = lock->count;
271 int res;
272
273 assert(lock->thread == IOThreadSelf());
274
275 lock->count = 0;
276 lock->thread = NULL;
277 res = lck_mtx_sleep_deadline(&lock->mutex, LCK_SLEEP_PROMOTED_PRI, (event_t) event,
278 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
279
280 // Must re-establish the recursive lock no matter why we woke up
281 // otherwise we would potentially leave the return path corrupted.
282 assert(lock->thread == NULL);
283 assert(lock->count == 0);
284 lock->thread = IOThreadSelf();
285 lock->count = count;
286 return res;
287}
288
289void
290IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
291{
292 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
293}
294
295/*
296 * Complex (read/write) lock operations
297 */
298
299IORWLock *
300IORWLockAlloc( void )
301{
302 return lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL);
303}
304
305void
306IORWLockFree( IORWLock * lock)
307{
308 lck_rw_free( lock, IOLockGroup);
309}
310
311lck_rw_t *
312IORWLockGetMachLock( IORWLock * lock)
313{
314 return (lck_rw_t *)lock;
315}
316
317
318/*
319 * Spin locks
320 */
321
322IOSimpleLock *
323IOSimpleLockAlloc( void )
324{
325 return lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL);
326}
327
328void
329IOSimpleLockInit( IOSimpleLock * lock)
330{
331 lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
332}
333
334void
335IOSimpleLockDestroy( IOSimpleLock * lock )
336{
337 lck_spin_destroy(lock, IOLockGroup);
338}
339
340void
341IOSimpleLockFree( IOSimpleLock * lock )
342{
343 lck_spin_free( lock, IOLockGroup);
344}
345
346lck_spin_t *
347IOSimpleLockGetMachLock( IOSimpleLock * lock)
348{
349 return (lck_spin_t *)lock;
350}
351
352#ifndef IOLOCKS_INLINE
353/*
354 * Lock assertions
355 */
356
357void
358IOLockAssert(IOLock * lock, IOLockAssertState type)
359{
360 LCK_MTX_ASSERT(lock, type);
361}
362
363void
364IORWLockAssert(IORWLock * lock, IORWLockAssertState type)
365{
366 LCK_RW_ASSERT(lock, type);
367}
368
369void
370IOSimpleLockAssert(IOSimpleLock *lock, IOSimpleLockAssertState type)
371{
372 LCK_SPIN_ASSERT(l, type);
373}
374#endif /* !IOLOCKS_INLINE */
375} /* extern "C" */