]> git.saurik.com Git - apple/xnu.git/blame - iokit/IOKit/IOLocks.h
xnu-1228.9.59.tar.gz
[apple/xnu.git] / iokit / IOKit / IOLocks.h
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 *
30 */
31
32#ifndef __IOKIT_IOLOCKS_H
33#define __IOKIT_IOLOCKS_H
34
35#ifndef KERNEL
36#error IOLocks.h is for kernel use only
37#endif
38
9bccf70c 39#include <sys/appleapiopts.h>
1c79356b
A
40
41#include <IOKit/system.h>
42
43#include <IOKit/IOReturn.h>
44#include <IOKit/IOTypes.h>
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
91447636 50#include <libkern/locks.h>
1c79356b
A
51#include <machine/machine_routines.h>
52
91447636
A
53extern lck_grp_t *IOLockGroup;
54
1c79356b
A
55/*
56 * Mutex lock operations
57 */
58
91447636
A
59#ifdef XNU_KERNEL_PRIVATE
60typedef lck_mtx_t IOLock;
61#else
62typedef struct _IOLock IOLock;
63#endif /* XNU_KERNEL_PRIVATE */
64
1c79356b
A
65
66/*! @function IOLockAlloc
91447636
A
67 @abstract Allocates and initializes a mutex.
68 @discussion Allocates a mutex in general purpose memory, and initilizes it. Mutexes are general purpose blocking mutual exclusion locks, supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
69 @result Pointer to the allocated lock, or zero on failure. */
70
71IOLock * IOLockAlloc( void );
72
73/*! @function IOLockFree
91447636 74 @abstract Frees a mutex.
1c79356b
A
75 @discussion Frees a lock allocated with IOLockAlloc. Any blocked waiters will not be woken.
76 @param lock Pointer to the allocated lock. */
77
78void IOLockFree( IOLock * lock);
79
91447636
A
80/*! @function IOLockGetMachLock
81 @abstract Accessor to a Mach mutex.
82 @discussion Accessor to the Mach mutex.
83 @param lock Pointer to the allocated lock. */
84
85lck_mtx_t * IOLockGetMachLock( IOLock * lock);
86
1c79356b 87/*! @function IOLockLock
91447636
A
88 @abstract Lock a mutex.
89 @discussion Lock the mutex. If the lock is held by any thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the mutex recursively from one thread will result in deadlock.
1c79356b
A
90 @param lock Pointer to the allocated lock. */
91
91447636
A
92#ifdef XNU_KERNEL_PRIVATE
93#ifndef IOLOCKS_CPP
1c79356b
A
94static __inline__
95void IOLockLock( IOLock * lock)
96{
91447636 97 lck_mtx_lock(lock);
1c79356b 98}
91447636
A
99#else
100void IOLockLock( IOLock * lock);
101#endif /* !IOLOCKS_CPP */
102#else
103void IOLockLock( IOLock * lock);
104#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
105
106/*! @function IOLockTryLock
91447636 107 @abstract Attempt to lock a mutex.
1c79356b
A
108 @discussion Lock the mutex if it is currently unlocked, and return true. If the lock is held by any thread, return false.
109 @param lock Pointer to the allocated lock.
110 @result True if the mutex was unlocked and is now locked by the caller, otherwise false. */
111
91447636
A
112#ifdef XNU_KERNEL_PRIVATE
113#ifndef IOLOCKS_CPP
1c79356b
A
114static __inline__
115boolean_t IOLockTryLock( IOLock * lock)
116{
91447636 117 return(lck_mtx_try_lock(lock));
1c79356b 118}
91447636
A
119#else
120boolean_t IOLockTryLock( IOLock * lock);
121#endif /* !IOLOCKS_CPP */
122#else
123boolean_t IOLockTryLock( IOLock * lock);
124#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
125
126/*! @function IOLockUnlock
91447636
A
127 @abstract Unlock a mutex.
128@discussion Unlock the mutex and wake any blocked waiters. Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
129 @param lock Pointer to the allocated lock. */
130
91447636
A
131#ifdef XNU_KERNEL_PRIVATE
132#ifndef IOLOCKS_CPP
1c79356b
A
133static __inline__
134void IOLockUnlock( IOLock * lock)
135{
91447636 136 lck_mtx_unlock(lock);
1c79356b 137}
91447636
A
138#else
139void IOLockUnlock( IOLock * lock);
140#endif /* !IOLOCKS_CPP */
141#else
142void IOLockUnlock( IOLock * lock);
143#endif /* XNU_KERNEL_PRIVATE */
1c79356b 144
9bccf70c
A
145/*! @function IOLockSleep
146 @abstract Sleep with mutex unlock and relock
91447636 147@discussion Prepare to sleep,unlock the mutex, and re-acquire it on wakeup.Results are undefined if the caller has not locked the mutex. This function may block and so should not be called from interrupt level or while a spin lock is held.
9bccf70c
A
148 @param lock Pointer to the locked lock.
149 @param event The event to sleep on.
150 @param interType How can the sleep be interrupted.
151 @result The wait-result value indicating how the thread was awakened.*/
91447636 152int IOLockSleep( IOLock * lock, void *event, UInt32 interType);
9bccf70c 153
9bccf70c 154int IOLockSleepDeadline( IOLock * lock, void *event,
91447636 155 AbsoluteTime deadline, UInt32 interType);
9bccf70c 156
91447636 157void IOLockWakeup(IOLock * lock, void *event, bool oneThread);
9bccf70c
A
158
159#ifdef __APPLE_API_OBSOLETE
1c79356b
A
160
161/* The following API is deprecated */
162
163typedef enum {
164 kIOLockStateUnlocked = 0,
55e303ae 165 kIOLockStateLocked = 1
1c79356b
A
166} IOLockState;
167
168void IOLockInitWithState( IOLock * lock, IOLockState state);
169#define IOLockInit( l ) IOLockInitWithState( l, kIOLockStateUnlocked);
170
171static __inline__ void IOTakeLock( IOLock * lock) { IOLockLock(lock); }
172static __inline__ boolean_t IOTryLock( IOLock * lock) { return(IOLockTryLock(lock)); }
173static __inline__ void IOUnlock( IOLock * lock) { IOLockUnlock(lock); }
174
9bccf70c 175#endif /* __APPLE_API_OBSOLETE */
1c79356b
A
176
177/*
178 * Recursive lock operations
179 */
180
181typedef struct _IORecursiveLock IORecursiveLock;
182
183/*! @function IORecursiveLockAlloc
184 @abstract Allocates and initializes an recursive lock.
2d21ac55 185 @discussion Allocates a recursive lock in general purpose memory, and initializes it. Recursive locks function identically to mutexes but allow one thread to lock more than once, with balanced unlocks.
1c79356b
A
186 @result Pointer to the allocated lock, or zero on failure. */
187
188IORecursiveLock * IORecursiveLockAlloc( void );
189
190/*! @function IORecursiveLockFree
191 @abstract Frees a recursive lock.
192 @discussion Frees a lock allocated with IORecursiveLockAlloc. Any blocked waiters will not be woken.
193 @param lock Pointer to the allocated lock. */
194
195void IORecursiveLockFree( IORecursiveLock * lock);
196
91447636
A
197/*! @function IORecursiveLockGetMachLock
198 @abstract Accessor to a Mach mutex.
199 @discussion Accessor to the Mach mutex.
200 @param lock Pointer to the allocated lock. */
201
202lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock);
203
1c79356b
A
204/*! @function IORecursiveLockLock
205 @abstract Lock a recursive lock.
91447636 206 @discussion Lock the recursive lock. If the lock is held by another thread, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. The lock may be taken recursively by the same thread, with a balanced number of calls to IORecursiveLockUnlock.
1c79356b
A
207 @param lock Pointer to the allocated lock. */
208
209void IORecursiveLockLock( IORecursiveLock * lock);
210
211/*! @function IORecursiveLockTryLock
212 @abstract Attempt to lock a recursive lock.
213 @discussion Lock the lock if it is currently unlocked, or held by the calling thread, and return true. If the lock is held by another thread, return false. Successful calls to IORecursiveLockTryLock should be balanced with calls to IORecursiveLockUnlock.
214 @param lock Pointer to the allocated lock.
215 @result True if the lock is now locked by the caller, otherwise false. */
216
217boolean_t IORecursiveLockTryLock( IORecursiveLock * lock);
218
219/*! @function IORecursiveLockUnlock
220 @abstract Unlock a recursive lock.
91447636 221@discussion Undo one call to IORecursiveLockLock, if the lock is now unlocked wake any blocked waiters. Results are undefined if the caller does not balance calls to IORecursiveLockLock with IORecursiveLockUnlock. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
222 @param lock Pointer to the allocated lock. */
223
224void IORecursiveLockUnlock( IORecursiveLock * lock);
225
226/*! @function IORecursiveLockHaveLock
227 @abstract Check if a recursive lock is held by the calling thread.
228 @discussion If the lock is held by the calling thread, return true, otherwise the lock is unlocked, or held by another thread and false is returned.
229 @param lock Pointer to the allocated lock.
230 @result True if the calling thread holds the lock otherwise false. */
231
232boolean_t IORecursiveLockHaveLock( const IORecursiveLock * lock);
233
234extern int IORecursiveLockSleep( IORecursiveLock *_lock,
235 void *event, UInt32 interType);
236extern void IORecursiveLockWakeup( IORecursiveLock *_lock,
237 void *event, bool oneThread);
238
239/*
240 * Complex (read/write) lock operations
241 */
242
91447636
A
243#ifdef XNU_KERNEL_PRIVATE
244typedef lck_rw_t IORWLock;
245#else
246typedef struct _IORWLock IORWLock;
247#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
248
249/*! @function IORWLockAlloc
91447636
A
250 @abstract Allocates and initializes a read/write lock.
251@discussion Allocates and initializes a read/write lock in general purpose memory, and initilizes it. Read/write locks provide for multiple readers, one exclusive writer, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
252 @result Pointer to the allocated lock, or zero on failure. */
253
254IORWLock * IORWLockAlloc( void );
255
256/*! @function IORWLockFree
91447636 257 @abstract Frees a read/write lock.
1c79356b
A
258 @discussion Frees a lock allocated with IORWLockAlloc. Any blocked waiters will not be woken.
259 @param lock Pointer to the allocated lock. */
260
261void IORWLockFree( IORWLock * lock);
262
91447636
A
263/*! @function IORWLockGetMachLock
264 @abstract Accessor to a Mach read/write lock.
265 @discussion Accessor to the Mach read/write lock.
266 @param lock Pointer to the allocated lock. */
267
268lck_rw_t * IORWLockGetMachLock( IORWLock * lock);
269
1c79356b 270/*! @function IORWLockRead
91447636
A
271 @abstract Lock a read/write lock for read.
272@discussion Lock the lock for read, allowing multiple readers when there are no writers. If the lock is held for write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
1c79356b
A
273 @param lock Pointer to the allocated lock. */
274
91447636
A
275#ifdef XNU_KERNEL_PRIVATE
276#ifndef IOLOCKS_CPP
1c79356b
A
277static __inline__
278void IORWLockRead( IORWLock * lock)
279{
91447636 280 lck_rw_lock_shared( lock);
1c79356b 281}
91447636
A
282#else
283void IORWLockRead( IORWLock * lock);
284#endif /* !IOLOCKS_CPP */
285#else
286void IORWLockRead( IORWLock * lock);
287#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
288
289/*! @function IORWLockWrite
91447636
A
290 @abstract Lock a read/write lock for write.
291 @discussion Lock the lock for write, allowing one writer exlusive access. If the lock is held for read or write, block waiting for its unlock. This function may block and so should not be called from interrupt level or while a spin lock is held. Locking the lock recursively from one thread, for read or write, can result in deadlock.
1c79356b
A
292 @param lock Pointer to the allocated lock. */
293
91447636
A
294#ifdef XNU_KERNEL_PRIVATE
295#ifndef IOLOCKS_CPP
1c79356b
A
296static __inline__
297void IORWLockWrite( IORWLock * lock)
298{
91447636 299 lck_rw_lock_exclusive( lock);
1c79356b 300}
91447636
A
301#else
302void IORWLockWrite( IORWLock * lock);
303#endif /* !IOLOCKS_CPP */
304#else
305void IORWLockWrite( IORWLock * lock);
306#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
307
308/*! @function IORWLockUnlock
91447636
A
309 @abstract Unlock a read/write lock.
310 @discussion Undo one call to IORWLockRead or IORWLockWrite. Results are undefined if the caller has not locked the lock. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
311 @param lock Pointer to the allocated lock. */
312
91447636
A
313#ifdef XNU_KERNEL_PRIVATE
314#ifndef IOLOCKS_CPP
1c79356b
A
315static __inline__
316void IORWLockUnlock( IORWLock * lock)
317{
91447636 318 lck_rw_done( lock);
1c79356b 319}
91447636
A
320#else
321void IORWLockUnlock( IORWLock * lock);
322#endif /* !IOLOCKS_CPP */
323#else
324void IORWLockUnlock( IORWLock * lock);
325#endif /* XNU_KERNEL_PRIVATE */
1c79356b 326
9bccf70c 327#ifdef __APPLE_API_OBSOLETE
1c79356b
A
328
329/* The following API is deprecated */
330
331static __inline__ void IOReadLock( IORWLock * lock) { IORWLockRead(lock); }
332static __inline__ void IOWriteLock( IORWLock * lock) { IORWLockWrite(lock); }
333static __inline__ void IORWUnlock( IORWLock * lock) { IORWLockUnlock(lock); }
334
9bccf70c 335#endif /* __APPLE_API_OBSOLETE */
1c79356b
A
336
337
338/*
339 * Simple locks. Cannot block while holding a simple lock.
340 */
341
91447636
A
342#ifdef KERNEL_PRIVATE
343typedef lck_spin_t IOSimpleLock;
344#else
345typedef struct _IOSimpleLock IOSimpleLock;
346#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
347
348/*! @function IOSimpleLockAlloc
91447636
A
349 @abstract Allocates and initializes a spin lock.
350 @discussion Allocates an initializes a spin lock in general purpose memory, and initilizes it. Spin locks provide non-blocking mutual exclusion for synchronization between thread context and interrupt context, or for multiprocessor synchronization, and are supplied by libkern/locks.h. This function may block and so should not be called from interrupt level or while a spin lock is held.
1c79356b
A
351 @result Pointer to the allocated lock, or zero on failure. */
352
353IOSimpleLock * IOSimpleLockAlloc( void );
354
355/*! @function IOSimpleLockFree
91447636 356 @abstract Frees a spin lock.
1c79356b
A
357 @discussion Frees a lock allocated with IOSimpleLockAlloc.
358 @param lock Pointer to the lock. */
359
360void IOSimpleLockFree( IOSimpleLock * lock );
361
91447636
A
362/*! @function IOSimpleLockGetMachLock
363 @abstract Accessor to a Mach spin lock.
364 @discussion Accessor to the Mach spin lock.
365 @param lock Pointer to the allocated lock. */
366
367lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock);
368
1c79356b 369/*! @function IOSimpleLockInit
91447636
A
370 @abstract Initialize a spin lock.
371 @discussion Initialize an embedded spin lock, to the unlocked state.
1c79356b
A
372 @param lock Pointer to the lock. */
373
374void IOSimpleLockInit( IOSimpleLock * lock );
375
376/*! @function IOSimpleLockLock
91447636
A
377 @abstract Lock a spin lock.
378@discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Spin locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
1c79356b
A
379 @param lock Pointer to the lock. */
380
91447636
A
381#ifdef XNU_KERNEL_PRIVATE
382#ifndef IOLOCKS_CPP
1c79356b
A
383static __inline__
384void IOSimpleLockLock( IOSimpleLock * lock )
385{
91447636 386 lck_spin_lock( lock );
1c79356b 387}
91447636
A
388#else
389void IOSimpleLockLock( IOSimpleLock * lock );
390#endif /* !IOLOCKS_CPP */
391#else
392void IOSimpleLockLock( IOSimpleLock * lock );
393#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
394
395/*! @function IOSimpleLockTryLock
91447636
A
396 @abstract Attempt to lock a spin lock.
397@discussion Lock the spin lock if it is currently unlocked, and return true. If the lock is held, return false. Successful calls to IOSimpleLockTryLock should be balanced with calls to IOSimpleLockUnlock.
1c79356b
A
398 @param lock Pointer to the lock.
399 @result True if the lock was unlocked and is now locked by the caller, otherwise false. */
400
91447636
A
401#ifdef XNU_KERNEL_PRIVATE
402#ifndef IOLOCKS_CPP
1c79356b
A
403static __inline__
404boolean_t IOSimpleLockTryLock( IOSimpleLock * lock )
405{
91447636 406 return( lck_spin_try_lock( lock ) );
1c79356b 407}
91447636
A
408#else
409boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
410#endif /* !IOLOCKS_CPP */
411#else
412boolean_t IOSimpleLockTryLock( IOSimpleLock * lock );
413#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
414
415/*! @function IOSimpleLockUnlock
91447636 416 @abstract Unlock a spin lock.
1c79356b
A
417 @discussion Unlock the lock, and restore preemption. Results are undefined if the caller has not locked the lock.
418 @param lock Pointer to the lock. */
419
91447636
A
420#ifdef XNU_KERNEL_PRIVATE
421#ifndef IOLOCKS_CPP
1c79356b
A
422static __inline__
423void IOSimpleLockUnlock( IOSimpleLock * lock )
424{
91447636 425 lck_spin_unlock( lock );
1c79356b 426}
91447636
A
427#else
428void IOSimpleLockUnlock( IOSimpleLock * lock );
429#endif /* !IOLOCKS_CPP */
430#else
431void IOSimpleLockUnlock( IOSimpleLock * lock );
432#endif /* XNU_KERNEL_PRIVATE */
1c79356b
A
433
434typedef long int IOInterruptState;
435
436/*! @function IOSimpleLockLockDisableInterrupt
91447636
A
437 @abstract Lock a spin lock.
438 @discussion Lock the spin lock. If the lock is held, spin waiting for its unlock. Simple locks disable preemption, cannot be held across any blocking operation, and should be held for very short periods. When used to synchronize between interrupt context and thread context they should be locked with interrupts disabled - IOSimpleLockLockDisableInterrupt() will do both. Locking the lock recursively from one thread will result in deadlock.
1c79356b
A
439 @param lock Pointer to the lock. */
440
441static __inline__
442IOInterruptState IOSimpleLockLockDisableInterrupt( IOSimpleLock * lock )
443{
444 IOInterruptState state = ml_set_interrupts_enabled( false );
91447636 445 IOSimpleLockLock( lock );
1c79356b
A
446 return( state );
447}
448
449/*! @function IOSimpleLockUnlockEnableInterrupt
91447636 450 @abstract Unlock a spin lock, and restore interrupt state.
1c79356b
A
451 @discussion Unlock the lock, and restore preemption and interrupts to the state as they were when the lock was taken. Results are undefined if the caller has not locked the lock.
452 @param lock Pointer to the lock.
453 @param state The interrupt state returned by IOSimpleLockLockDisableInterrupt() */
454
455static __inline__
456void IOSimpleLockUnlockEnableInterrupt( IOSimpleLock * lock,
457 IOInterruptState state )
458{
91447636 459 IOSimpleLockUnlock( lock );
1c79356b
A
460 ml_set_interrupts_enabled( state );
461}
462
463#ifdef __cplusplus
464} /* extern "C" */
465#endif
466
467#endif /* !__IOKIT_IOLOCKS_H */
468