]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOLocks.h
4b021deebf3736daf24275f8a530a537951d1f84
[apple/xnu.git] / iokit / IOKit / IOLocks.h
1 /*
2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 *
32 */
33
34 #ifndef __IOKIT_IOLOCKS_H
35 #define __IOKIT_IOLOCKS_H
36
37 #ifndef KERNEL
38 #error IOLocks.h is for kernel use only
39 #endif
40
41 #include <sys/appleapiopts.h>
42
43 #include <IOKit/system.h>
44
45 #include <IOKit/IOReturn.h>
46 #include <IOKit/IOTypes.h>
47
48 #ifdef __cplusplus
49 extern "C" {
50 #endif
51
52 #include <libkern/locks.h>
53 #include <machine/machine_routines.h>
54
55 extern lck_grp_t *IOLockGroup;
56
57 /*
58 * Mutex lock operations
59 */
60
61 #ifdef XNU_KERNEL_PRIVATE
62 typedef lck_mtx_t IOLock;
63 #else
64 typedef struct _IOLock IOLock;
65 #endif /* XNU_KERNEL_PRIVATE */
66
67
68 /*! @function IOLockAlloc
69 @abstract Allocates and initializes a mutex.
70 @discussion Allocates a mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
71 @result Pointer to the allocated lock, or zero on failure. */
72
73 IOLock * IOLockAlloc( void );
74
75 /*! @function IOLockFree
76 @abstract Frees a mutex.
77 @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken.
78 @param lock Pointer to the allocated lock. */
79
80 void IOLockFree( IOLock * lock);
81
82 /*! @function IOLockGetMachLock
83 @abstract Accessor to a Mach mutex.
84 @discussion Accessor to the Mach mutex.
85 @param lock Pointer to the allocated lock. */
86
87 lck_mtx_t * IOLockGetMachLock( IOLock * lock);
88
89 /*! @function IOLockLock
90 @abstract Lock a mutex.
91 @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock.
92 @param lock Pointer to the allocated lock. */
93
94 #ifdef XNU_KERNEL_PRIVATE
95 #ifndef IOLOCKS_CPP
96 static __inline__
97 void IOLockLock( IOLock * lock)
98 {
99 lck_mtx_lock(lock);
100 }
101 #else
102 void IOLockLock( IOLock * lock);
103 #endif /* !IOLOCKS_CPP */
104 #else
105 void IOLockLock( IOLock * lock);
106 #endif /* XNU_KERNEL_PRIVATE */
107
108 /*! @function IOLockTryLock
109 @abstract Attempt to lock a mutex.
110 @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false.
111 @param lock Pointer to the allocated lock.
112 @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */
113
114 #ifdef XNU_KERNEL_PRIVATE
115 #ifndef IOLOCKS_CPP
116 static __inline__
117 boolean_t IOLockTryLock( IOLock * lock)
118 {
119 return(lck_mtx_try_lock(lock));
120 }
121 #else
122 boolean_t IOLockTryLock( IOLock * lock);
123 #endif /* !IOLOCKS_CPP */
124 #else
125 boolean_t IOLockTryLock( IOLock * lock);
126 #endif /* XNU_KERNEL_PRIVATE */
127
128 /*! @function IOLockUnlock
129 @abstract Unlock a mutex.
130 @discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
131 @param lock Pointer to the allocated lock. */
132
133 #ifdef XNU_KERNEL_PRIVATE
134 #ifndef IOLOCKS_CPP
135 static __inline__
136 void IOLockUnlock( IOLock * lock)
137 {
138 lck_mtx_unlock(lock);
139 }
140 #else
141 void IOLockUnlock( IOLock * lock);
142 #endif /* !IOLOCKS_CPP */
143 #else
144 void IOLockUnlock( IOLock * lock);
145 #endif /* XNU_KERNEL_PRIVATE */
146
147 /*! @function IOLockSleep
148 @abstract Sleep with mutex unlock and relock
149 @discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup.Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
150 @param lock Pointer to the locked lock.
151 @param event The event to sleep on.
152 @param interType How can the sleep be interrupted.
153 @result The wait-result value indicating how the thread was awakened.*/
154 int IOLockSleep( IOLock * lock, void *event, UInt32 interType);
155
156 int IOLockSleepDeadline( IOLock * lock, void *event,
157 AbsoluteTime deadline, UInt32 interType);
158
159 void IOLockWakeup(IOLock * lock, void *event, bool oneThread);
160
161 #ifdef __APPLE_API_OBSOLETE
162
163 /* The following API is deprecated */
164
165 typedef enum {
166 kIOLockStateUnlocked = 0,
167 kIOLockStateLocked = 1
168 } IOLockState;
169
170 void IOLockInitWithState( IOLock * lock, IOLockState state);
171 #define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked);
172
173 static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); }
174 static __inline__ boolean_t IOTryLock( IOLock * lock) { return(IOLockTryLock(lock)); }
175 static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); }
176
177 #endif /* __APPLE_API_OBSOLETE */
178
179 /*
180 * Recursive lock operations
181 */
182
183 typedef struct _IORecursiveLock IORecursiveLock;
184
185 /*! @function IORecursiveLockAlloc
186 @abstract Allocates and initializes an recursive lock.
187 @discussion Allocates a recursive lock in general purpose memory, and initilizes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks.
188 @result Pointer to the allocated lock, or zero on failure. */
189
190 IORecursiveLock * IORecursiveLockAlloc( void );
191
192 /*! @function IORecursiveLockFree
193 @abstract Frees a recursive lock.
194 @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken.
195 @param lock Pointer to the allocated lock. */
196
197 void IORecursiveLockFree( IORecursiveLock * lock);
198
199 /*! @function IORecursiveLockGetMachLock
200 @abstract Accessor to a Mach mutex.
201 @discussion Accessor to the Mach mutex.
202 @param lock Pointer to the allocated lock. */
203
204 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock);
205
206 /*! @function IORecursiveLockLock
207 @abstract Lock a recursive lock.
208 @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock.
209 @param lock Pointer to the allocated lock. */
210
211 void IORecursiveLockLock( IORecursiveLock * lock);
212
213 /*! @function IORecursiveLockTryLock
214 @abstract Attempt to lock a recursive lock.
215 @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock.
216 @param lock Pointer to the allocated lock.
217 @result True if the lock is now locked by the caller, otherwise false. */
218
219 boolean_t IORecursiveLockTryLock( IORecursiveLock * lock);
220
221 /*! @function IORecursiveLockUnlock
222 @abstract Unlock a recursive lock.
223 @discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held.
224 @param lock Pointer to the allocated lock. */
225
226 void IORecursiveLockUnlock( IORecursiveLock * lock);
227
228 /*! @function IORecursiveLockHaveLock
229 @abstract Check if a recursive lock is held by the calling thread.
230 @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned.
231 @param lock Pointer to the allocated lock.
232 @result True if the calling thread holds the lock otherwise false. */
233
234 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock);
235
236 extern int IORecursiveLockSleep( IORecursiveLock *_lock,
237 void *event, UInt32 interType);
238 extern void IORecursiveLockWakeup( IORecursiveLock *_lock,
239 void *event, bool oneThread);
240
241 /*
242 * Complex (read/write) lock operations
243 */
244
245 #ifdef XNU_KERNEL_PRIVATE
246 typedef lck_rw_t IORWLock;
247 #else
248 typedef struct _IORWLock IORWLock;
249 #endif /* XNU_KERNEL_PRIVATE */
250
251 /*! @function IORWLockAlloc
252 @abstract Allocates and initializes a read/write lock.
253 @discussion Allocates and initializes a read/write lock in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
254 @result Pointer to the allocated lock, or zero on failure. */
255
256 IORWLock * IORWLockAlloc( void );
257
258 /*! @function IORWLockFree
259 @abstract Frees a read/write lock.
260 @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken.
261 @param lock Pointer to the allocated lock. */
262
263 void IORWLockFree( IORWLock * lock);
264
265 /*! @function IORWLockGetMachLock
266 @abstract Accessor to a Mach read/write lock.
267 @discussion Accessor to the Mach read/write lock.
268 @param lock Pointer to the allocated lock. */
269
270 lck_rw_t * IORWLockGetMachLock( IORWLock * lock);
271
272 /*! @function IORWLockRead
273 @abstract Lock a read/write lock for read.
274 @discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
275 @param lock Pointer to the allocated lock. */
276
277 #ifdef XNU_KERNEL_PRIVATE
278 #ifndef IOLOCKS_CPP
279 static __inline__
280 void IORWLockRead( IORWLock * lock)
281 {
282 lck_rw_lock_shared( lock);
283 }
284 #else
285 void IORWLockRead( IORWLock * lock);
286 #endif /* !IOLOCKS_CPP */
287 #else
288 void IORWLockRead( IORWLock * lock);
289 #endif /* XNU_KERNEL_PRIVATE */
290
291 /*! @function IORWLockWrite
292 @abstract Lock a read/write lock for write.
293 @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
294 @param lock Pointer to the allocated lock. */
295
296 #ifdef XNU_KERNEL_PRIVATE
297 #ifndef IOLOCKS_CPP
298 static __inline__
299 void IORWLockWrite( IORWLock * lock)
300 {
301 lck_rw_lock_exclusive( lock);
302 }
303 #else
304 void IORWLockWrite( IORWLock * lock);
305 #endif /* !IOLOCKS_CPP */
306 #else
307 void IORWLockWrite( IORWLock * lock);
308 #endif /* XNU_KERNEL_PRIVATE */
309
310 /*! @function IORWLockUnlock
311 @abstract Unlock a read/write lock.
312 @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held.
313 @param lock Pointer to the allocated lock. */
314
315 #ifdef XNU_KERNEL_PRIVATE
316 #ifndef IOLOCKS_CPP
317 static __inline__
318 void IORWLockUnlock( IORWLock * lock)
319 {
320 lck_rw_done( lock);
321 }
322 #else
323 void IORWLockUnlock( IORWLock * lock);
324 #endif /* !IOLOCKS_CPP */
325 #else
326 void IORWLockUnlock( IORWLock * lock);
327 #endif /* XNU_KERNEL_PRIVATE */
328
329 #ifdef __APPLE_API_OBSOLETE
330
331 /* The following API is deprecated */
332
333 static __inline__ void IOReadLock( IORWLock * lock) { IORWLockRead(lock); }
334 static __inline__ void IOWriteLock( IORWLock * lock) { IORWLockWrite(lock); }
335 static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); }
336
337 #endif /* __APPLE_API_OBSOLETE */
338
339
340 /*
341 * Simple locks. Cannot block while holding a simple lock.
342 */
343
344 #ifdef KERNEL_PRIVATE
345 typedef lck_spin_t IOSimpleLock;
346 #else
347 typedef struct _IOSimpleLock IOSimpleLock;
348 #endif /* XNU_KERNEL_PRIVATE */
349
350 /*! @function IOSimpleLockAlloc
351 @abstract Allocates and initializes a spin lock.
352 @discussion Allocates an initializes a spin lock in general purpose memory, and initilizes it. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
353 @result Pointer to the allocated lock, or zero on failure. */
354
355 IOSimpleLock * IOSimpleLockAlloc( void );
356
357 /*! @function IOSimpleLockFree
358 @abstract Frees a spin lock.
359 @discussion Frees a lock allocated with IOSimpleLockAlloc.
360 @param lock Pointer to the lock. */
361
362 void IOSimpleLockFree( IOSimpleLock * lock );
363
364 /*! @function IOSimpleLockGetMachLock
365 @abstract Accessor to a Mach spin lock.
366 @discussion Accessor to the Mach spin lock.
367 @param lock Pointer to the allocated lock. */
368
369 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock);
370
371 /*! @function IOSimpleLockInit
372 @abstract Initialize a spin lock.
373 @discussion Initialize an embedded spin lock, to the unlocked state.
374 @param lock Pointer to the lock. */
375
376 void IOSimpleLockInit( IOSimpleLock * lock );
377
378 /*! @function IOSimpleLockLock
379 @abstract Lock a spin lock.
380 @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
381 @param lock Pointer to the lock. */
382
383 #ifdef XNU_KERNEL_PRIVATE
384 #ifndef IOLOCKS_CPP
385 static __inline__
386 void IOSimpleLockLock( IOSimpleLock * lock )
387 {
388 lck_spin_lock( lock );
389 }
390 #else
391 void IOSimpleLockLock( IOSimpleLock * lock );
392 #endif /* !IOLOCKS_CPP */
393 #else
394 void IOSimpleLockLock( IOSimpleLock * lock );
395 #endif /* XNU_KERNEL_PRIVATE */
396
397 /*! @function IOSimpleLockTryLock
398 @abstract Attempt to lock a spin lock.
399 @discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock.
400 @param lock Pointer to the lock.
401 @result True if the lock was unlocked and is now locked by the caller, otherwise false. */
402
403 #ifdef XNU_KERNEL_PRIVATE
404 #ifndef IOLOCKS_CPP
405 static __inline__
406 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock )
407 {
408 return( lck_spin_try_lock( lock ) );
409 }
410 #else
411 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
412 #endif /* !IOLOCKS_CPP */
413 #else
414 boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
415 #endif /* XNU_KERNEL_PRIVATE */
416
417 /*! @function IOSimpleLockUnlock
418 @abstract Unlock a spin lock.
419 @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock.
420 @param lock Pointer to the lock. */
421
422 #ifdef XNU_KERNEL_PRIVATE
423 #ifndef IOLOCKS_CPP
424 static __inline__
425 void IOSimpleLockUnlock( IOSimpleLock * lock )
426 {
427 lck_spin_unlock( lock );
428 }
429 #else
430 void IOSimpleLockUnlock( IOSimpleLock * lock );
431 #endif /* !IOLOCKS_CPP */
432 #else
433 void IOSimpleLockUnlock( IOSimpleLock * lock );
434 #endif /* XNU_KERNEL_PRIVATE */
435
436 typedef long int IOInterruptState;
437
438 /*! @function IOSimpleLockLockDisableInterrupt
439 @abstract Lock a spin lock.
440 @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
441 @param lock Pointer to the lock. */
442
443 static __inline__
444 IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock )
445 {
446 IOInterruptState state = ml_set_interrupts_enabled( false );
447 IOSimpleLockLock( lock );
448 return( state );
449 }
450
451 /*! @function IOSimpleLockUnlockEnableInterrupt
452 @abstract Unlock a spin lock, and restore interrupt state.
453 @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock.
454 @param lock Pointer to the lock.
455 @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */
456
457 static __inline__
458 void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock,
459 IOInterruptState state )
460 {
461 IOSimpleLockUnlock( lock );
462 ml_set_interrupts_enabled( state );
463 }
464
465 #ifdef __cplusplus
466 } /* extern "C" */
467 #endif
468
469 #endif /* !__IOKIT_IOLOCKS_H */
470