]> git.saurik.com Git - apple/xnu.git/blame - iokit/IOKit/IOLocks.h
xnu-792.25.20.tar.gz
[apple/xnu.git] / iokit / IOKit / IOLocks.h
CommitLineData
1c79356b 1/*
9bccf70c 2 * Copyright (c) 1998-2002 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b
A
21 */
22/*
23 *
24 */
25
26#ifndef __IOKIT_IOLOCKS_H
27#define __IOKIT_IOLOCKS_H
28
29#ifndef KERNEL
30#error IOLocks.h is for kernel use only
31#endif
32
9bccf70c 33#include <sys/appleapiopts.h>
1c79356b
A
34
35#include <IOKit/system.h>
36
37#include <IOKit/IOReturn.h>
38#include <IOKit/IOTypes.h>
39
40#ifdef __cplusplus
41extern "C" {
42#endif
43
91447636 44#include <libkern/locks.h>
1c79356b
A
45#include <machine/machine_routines.h>
46
91447636
A
47extern lck_grp_t *IOLockGroup;
48
1c79356b
A
49/*
50 * Mutex lock operations
51 */
52
91447636
A
53#ifdef XNU_KERNEL_PRIVATE
54typedef lck_mtx_t IOLock;
55#else
56typedef struct _IOLock IOLock;
57#endif /* XNU_KERNEL_PRIVATE */
58
1c79356b
A
59
60/*! @function IOLockAlloc
91447636
A
61 @abstract Allocates and initializes a mutex.
62 @discussion Allocates a mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
63 @result Pointer to the allocated lock, or zero on failure. */
64
65IOLock * IOLockAlloc( void );
66
67/*! @function IOLockFree
91447636 68 @abstract Frees a mutex.
1c79356b
A
69 @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken.
70 @param lock Pointer to the allocated lock. */
71
72void IOLockFree( IOLock * lock);
73
91447636
A
74/*! @function IOLockGetMachLock
75 @abstract Accessor to a Mach mutex.
76 @discussion Accessor to the Mach mutex.
77 @param lock Pointer to the allocated lock. */
78
79lck_mtx_t * IOLockGetMachLock( IOLock * lock);
80
1c79356b 81/*! @function IOLockLock
91447636
A
82 @abstract Lock a mutex.
83 @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock.
1c79356b
A
84 @param lock Pointer to the allocated lock. */
85
91447636
A
86#ifdef XNU_KERNEL_PRIVATE
87#ifndef IOLOCKS_CPP
1c79356b
A
88static __inline__
89void IOLockLock( IOLock * lock)
90{
91447636 91 lck_mtx_lock(lock);
1c79356b 92}
91447636
A
93#else
94void IOLockLock( IOLock * lock);
95#endif /* !IOLOCKS_CPP */
96#else
97void IOLockLock( IOLock * lock);
98#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
99
100/*! @function IOLockTryLock
91447636 101 @abstract Attempt to lock a mutex.
1c79356b
A
102 @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false.
103 @param lock Pointer to the allocated lock.
104 @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */
105
91447636
A
106#ifdef XNU_KERNEL_PRIVATE
107#ifndef IOLOCKS_CPP
1c79356b
A
108static __inline__
109boolean_t IOLockTryLock( IOLock * lock)
110{
91447636 111 return(lck_mtx_try_lock(lock));
1c79356b 112}
91447636
A
113#else
114boolean_t IOLockTryLock( IOLock * lock);
115#endif /* !IOLOCKS_CPP */
116#else
117boolean_t IOLockTryLock( IOLock * lock);
118#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
119
120/*! @function IOLockUnlock
91447636
A
121 @abstract Unlock a mutex.
122@discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
123 @param lock Pointer to the allocated lock. */
124
91447636
A
125#ifdef XNU_KERNEL_PRIVATE
126#ifndef IOLOCKS_CPP
1c79356b
A
127static __inline__
128void IOLockUnlock( IOLock * lock)
129{
91447636 130 lck_mtx_unlock(lock);
1c79356b 131}
91447636
A
132#else
133void IOLockUnlock( IOLock * lock);
134#endif /* !IOLOCKS_CPP */
135#else
136void IOLockUnlock( IOLock * lock);
137#endif /* XNU_KERNEL_PRIVATE */
1c79356b 138
9bccf70c
A
139/*! @function IOLockSleep
140 @abstract Sleep with mutex unlock and relock
91447636 141@discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup.Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
9bccf70c
A
142 @param lock Pointer to the locked lock.
143 @param event The event to sleep on.
144 @param interType How can the sleep be interrupted.
145 @result The wait-result value indicating how the thread was awakened.*/
91447636 146int IOLockSleep( IOLock * lock, void *event, UInt32 interType);
9bccf70c 147
9bccf70c 148int IOLockSleepDeadline( IOLock * lock, void *event,
91447636 149 AbsoluteTime deadline, UInt32 interType);
9bccf70c 150
91447636 151void IOLockWakeup(IOLock * lock, void *event, bool oneThread);
9bccf70c
A
152
153#ifdef __APPLE_API_OBSOLETE
1c79356b
A
154
155/* The following API is deprecated */
156
157typedef enum {
158 kIOLockStateUnlocked = 0,
55e303ae 159 kIOLockStateLocked = 1
1c79356b
A
160} IOLockState;
161
162void IOLockInitWithState( IOLock * lock, IOLockState state);
163#define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked);
164
165static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); }
166static __inline__ boolean_t IOTryLock( IOLock * lock) { return(IOLockTryLock(lock)); }
167static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); }
168
9bccf70c 169#endif /* __APPLE_API_OBSOLETE */
1c79356b
A
170
171/*
172 * Recursive lock operations
173 */
174
175typedef struct _IORecursiveLock IORecursiveLock;
176
177/*! @function IORecursiveLockAlloc
178 @abstract Allocates and initializes an recursive lock.
91447636 179 @discussion Allocates a recursive lock in general purpose memory, and initilizes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks.
1c79356b
A
180 @result Pointer to the allocated lock, or zero on failure. */
181
182IORecursiveLock * IORecursiveLockAlloc( void );
183
184/*! @function IORecursiveLockFree
185 @abstract Frees a recursive lock.
186 @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken.
187 @param lock Pointer to the allocated lock. */
188
189void IORecursiveLockFree( IORecursiveLock * lock);
190
91447636
A
191/*! @function IORecursiveLockGetMachLock
192 @abstract Accessor to a Mach mutex.
193 @discussion Accessor to the Mach mutex.
194 @param lock Pointer to the allocated lock. */
195
196lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock);
197
1c79356b
A
198/*! @function IORecursiveLockLock
199 @abstract Lock a recursive lock.
91447636 200 @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock.
1c79356b
A
201 @param lock Pointer to the allocated lock. */
202
203void IORecursiveLockLock( IORecursiveLock * lock);
204
205/*! @function IORecursiveLockTryLock
206 @abstract Attempt to lock a recursive lock.
207 @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock.
208 @param lock Pointer to the allocated lock.
209 @result True if the lock is now locked by the caller, otherwise false. */
210
211boolean_t IORecursiveLockTryLock( IORecursiveLock * lock);
212
213/*! @function IORecursiveLockUnlock
214 @abstract Unlock a recursive lock.
91447636 215@discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
216 @param lock Pointer to the allocated lock. */
217
218void IORecursiveLockUnlock( IORecursiveLock * lock);
219
220/*! @function IORecursiveLockHaveLock
221 @abstract Check if a recursive lock is held by the calling thread.
222 @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned.
223 @param lock Pointer to the allocated lock.
224 @result True if the calling thread holds the lock otherwise false. */
225
226boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock);
227
228extern int IORecursiveLockSleep( IORecursiveLock *_lock,
229 void *event, UInt32 interType);
230extern void IORecursiveLockWakeup( IORecursiveLock *_lock,
231 void *event, bool oneThread);
232
233/*
234 * Complex (read/write) lock operations
235 */
236
91447636
A
237#ifdef XNU_KERNEL_PRIVATE
238typedef lck_rw_t IORWLock;
239#else
240typedef struct _IORWLock IORWLock;
241#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
242
243/*! @function IORWLockAlloc
91447636
A
244 @abstract Allocates and initializes a read/write lock.
245@discussion Allocates and initializes a read/write lock in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
246 @result Pointer to the allocated lock, or zero on failure. */
247
248IORWLock * IORWLockAlloc( void );
249
250/*! @function IORWLockFree
91447636 251 @abstract Frees a read/write lock.
1c79356b
A
252 @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken.
253 @param lock Pointer to the allocated lock. */
254
255void IORWLockFree( IORWLock * lock);
256
91447636
A
257/*! @function IORWLockGetMachLock
258 @abstract Accessor to a Mach read/write lock.
259 @discussion Accessor to the Mach read/write lock.
260 @param lock Pointer to the allocated lock. */
261
262lck_rw_t * IORWLockGetMachLock( IORWLock * lock);
263
1c79356b 264/*! @function IORWLockRead
91447636
A
265 @abstract Lock a read/write lock for read.
266@discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
1c79356b
A
267 @param lock Pointer to the allocated lock. */
268
91447636
A
269#ifdef XNU_KERNEL_PRIVATE
270#ifndef IOLOCKS_CPP
1c79356b
A
271static __inline__
272void IORWLockRead( IORWLock * lock)
273{
91447636 274 lck_rw_lock_shared( lock);
1c79356b 275}
91447636
A
276#else
277void IORWLockRead( IORWLock * lock);
278#endif /* !IOLOCKS_CPP */
279#else
280void IORWLockRead( IORWLock * lock);
281#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
282
283/*! @function IORWLockWrite
91447636
A
284 @abstract Lock a read/write lock for write.
285 @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
1c79356b
A
286 @param lock Pointer to the allocated lock. */
287
91447636
A
288#ifdef XNU_KERNEL_PRIVATE
289#ifndef IOLOCKS_CPP
1c79356b
A
290static __inline__
291void IORWLockWrite( IORWLock * lock)
292{
91447636 293 lck_rw_lock_exclusive( lock);
1c79356b 294}
91447636
A
295#else
296void IORWLockWrite( IORWLock * lock);
297#endif /* !IOLOCKS_CPP */
298#else
299void IORWLockWrite( IORWLock * lock);
300#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
301
302/*! @function IORWLockUnlock
91447636
A
303 @abstract Unlock a read/write lock.
304 @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
305 @param lock Pointer to the allocated lock. */
306
91447636
A
307#ifdef XNU_KERNEL_PRIVATE
308#ifndef IOLOCKS_CPP
1c79356b
A
309static __inline__
310void IORWLockUnlock( IORWLock * lock)
311{
91447636 312 lck_rw_done( lock);
1c79356b 313}
91447636
A
314#else
315void IORWLockUnlock( IORWLock * lock);
316#endif /* !IOLOCKS_CPP */
317#else
318void IORWLockUnlock( IORWLock * lock);
319#endif /* XNU_KERNEL_PRIVATE */
1c79356b 320
9bccf70c 321#ifdef __APPLE_API_OBSOLETE
1c79356b
A
322
323/* The following API is deprecated */
324
325static __inline__ void IOReadLock( IORWLock * lock) { IORWLockRead(lock); }
326static __inline__ void IOWriteLock( IORWLock * lock) { IORWLockWrite(lock); }
327static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); }
328
9bccf70c 329#endif /* __APPLE_API_OBSOLETE */
1c79356b
A
330
331
332/*
333 * Simple locks. Cannot block while holding a simple lock.
334 */
335
91447636
A
336#ifdef KERNEL_PRIVATE
337typedef lck_spin_t IOSimpleLock;
338#else
339typedef struct _IOSimpleLock IOSimpleLock;
340#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
341
342/*! @function IOSimpleLockAlloc
91447636
A
343 @abstract Allocates and initializes a spin lock.
344 @discussion Allocates an initializes a spin lock in general purpose memory, and initilizes it. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
345 @result Pointer to the allocated lock, or zero on failure. */
346
347IOSimpleLock * IOSimpleLockAlloc( void );
348
349/*! @function IOSimpleLockFree
91447636 350 @abstract Frees a spin lock.
1c79356b
A
351 @discussion Frees a lock allocated with IOSimpleLockAlloc.
352 @param lock Pointer to the lock. */
353
354void IOSimpleLockFree( IOSimpleLock * lock );
355
91447636
A
356/*! @function IOSimpleLockGetMachLock
357 @abstract Accessor to a Mach spin lock.
358 @discussion Accessor to the Mach spin lock.
359 @param lock Pointer to the allocated lock. */
360
361lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock);
362
1c79356b 363/*! @function IOSimpleLockInit
91447636
A
364 @abstract Initialize a spin lock.
365 @discussion Initialize an embedded spin lock, to the unlocked state.
1c79356b
A
366 @param lock Pointer to the lock. */
367
368void IOSimpleLockInit( IOSimpleLock * lock );
369
370/*! @function IOSimpleLockLock
91447636
A
371 @abstract Lock a spin lock.
372@discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
1c79356b
A
373 @param lock Pointer to the lock. */
374
91447636
A
375#ifdef XNU_KERNEL_PRIVATE
376#ifndef IOLOCKS_CPP
1c79356b
A
377static __inline__
378void IOSimpleLockLock( IOSimpleLock * lock )
379{
91447636 380 lck_spin_lock( lock );
1c79356b 381}
91447636
A
382#else
383void IOSimpleLockLock( IOSimpleLock * lock );
384#endif /* !IOLOCKS_CPP */
385#else
386void IOSimpleLockLock( IOSimpleLock * lock );
387#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
388
389/*! @function IOSimpleLockTryLock
91447636
A
390 @abstract Attempt to lock a spin lock.
391@discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock.
1c79356b
A
392 @param lock Pointer to the lock.
393 @result True if the lock was unlocked and is now locked by the caller, otherwise false. */
394
91447636
A
395#ifdef XNU_KERNEL_PRIVATE
396#ifndef IOLOCKS_CPP
1c79356b
A
397static __inline__
398boolean_t IOSimpleLockTryLock( IOSimpleLock * lock )
399{
91447636 400 return( lck_spin_try_lock( lock ) );
1c79356b 401}
91447636
A
402#else
403boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
404#endif /* !IOLOCKS_CPP */
405#else
406boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
407#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
408
409/*! @function IOSimpleLockUnlock
91447636 410 @abstract Unlock a spin lock.
1c79356b
A
411 @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock.
412 @param lock Pointer to the lock. */
413
91447636
A
414#ifdef XNU_KERNEL_PRIVATE
415#ifndef IOLOCKS_CPP
1c79356b
A
416static __inline__
417void IOSimpleLockUnlock( IOSimpleLock * lock )
418{
91447636 419 lck_spin_unlock( lock );
1c79356b 420}
91447636
A
421#else
422void IOSimpleLockUnlock( IOSimpleLock * lock );
423#endif /* !IOLOCKS_CPP */
424#else
425void IOSimpleLockUnlock( IOSimpleLock * lock );
426#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
427
428typedef long int IOInterruptState;
429
430/*! @function IOSimpleLockLockDisableInterrupt
91447636
A
431 @abstract Lock a spin lock.
432 @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
1c79356b
A
433 @param lock Pointer to the lock. */
434
435static __inline__
436IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock )
437{
438 IOInterruptState state = ml_set_interrupts_enabled( false );
91447636 439 IOSimpleLockLock( lock );
1c79356b
A
440 return( state );
441}
442
443/*! @function IOSimpleLockUnlockEnableInterrupt
91447636 444 @abstract Unlock a spin lock, and restore interrupt state.
1c79356b
A
445 @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock.
446 @param lock Pointer to the lock.
447 @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */
448
449static __inline__
450void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock,
451 IOInterruptState state )
452{
91447636 453 IOSimpleLockUnlock( lock );
1c79356b
A
454 ml_set_interrupts_enabled( state );
455}
456
457#ifdef __cplusplus
458} /* extern "C" */
459#endif
460
461#endif /* !__IOKIT_IOLOCKS_H */
462