]>
git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLocks.cpp
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
32 #include <IOKit/system.h>
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/assert.h>
39 #include <kern/locks.h>
41 void IOLockInitWithState( IOLock
* lock
, IOLockState state
)
43 if( state
== kIOLockStateLocked
)
47 IOLock
* IOLockAlloc( void )
49 return( lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
52 void IOLockFree( IOLock
* lock
)
54 lck_mtx_free( lock
, IOLockGroup
);
57 lck_mtx_t
* IOLockGetMachLock( IOLock
* lock
)
59 return( (lck_mtx_t
*)lock
);
62 int IOLockSleep( IOLock
* lock
, void *event
, UInt32 interType
)
64 return (int) lck_mtx_sleep(lock
, LCK_SLEEP_DEFAULT
, (event_t
) event
, (wait_interrupt_t
) interType
);
67 int IOLockSleepDeadline( IOLock
* lock
, void *event
,
68 AbsoluteTime deadline
, UInt32 interType
)
70 return (int) lck_mtx_sleep_deadline(lock
, LCK_SLEEP_DEFAULT
, (event_t
) event
,
71 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
74 void IOLockWakeup(IOLock
* lock
, void *event
, bool oneThread
)
76 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
80 struct _IORecursiveLock
{
86 IORecursiveLock
* IORecursiveLockAlloc( void )
88 _IORecursiveLock
* lock
;
90 lock
= IONew( _IORecursiveLock
, 1);
94 lock
->mutex
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
99 IODelete( lock
, _IORecursiveLock
, 1);
103 return( (IORecursiveLock
*) lock
);
106 void IORecursiveLockFree( IORecursiveLock
* _lock
)
108 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
110 lck_mtx_free( lock
->mutex
, IOLockGroup
);
111 IODelete( lock
, _IORecursiveLock
, 1);
114 lck_mtx_t
* IORecursiveLockGetMachLock( IORecursiveLock
* lock
)
116 return( lock
->mutex
);
119 void IORecursiveLockLock( IORecursiveLock
* _lock
)
121 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
123 if( lock
->thread
== IOThreadSelf())
126 lck_mtx_lock( lock
->mutex
);
127 assert( lock
->thread
== 0 );
128 assert( lock
->count
== 0 );
129 lock
->thread
= IOThreadSelf();
134 boolean_t
IORecursiveLockTryLock( IORecursiveLock
* _lock
)
136 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
138 if( lock
->thread
== IOThreadSelf()) {
142 if( lck_mtx_try_lock( lock
->mutex
)) {
143 assert( lock
->thread
== 0 );
144 assert( lock
->count
== 0 );
145 lock
->thread
= IOThreadSelf();
153 void IORecursiveLockUnlock( IORecursiveLock
* _lock
)
155 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
157 assert( lock
->thread
== IOThreadSelf() );
159 if( 0 == (--lock
->count
)) {
161 lck_mtx_unlock( lock
->mutex
);
165 boolean_t
IORecursiveLockHaveLock( const IORecursiveLock
* _lock
)
167 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
169 return( lock
->thread
== IOThreadSelf());
172 int IORecursiveLockSleep(IORecursiveLock
*_lock
, void *event
, UInt32 interType
)
174 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
175 UInt32 count
= lock
->count
;
178 assert(lock
->thread
== IOThreadSelf());
179 assert(lock
->count
== 1 || interType
== THREAD_UNINT
);
183 res
= lck_mtx_sleep(lock
->mutex
, LCK_SLEEP_DEFAULT
, (event_t
) event
, (wait_interrupt_t
) interType
);
185 // Must re-establish the recursive lock no matter why we woke up
186 // otherwise we would potentially leave the return path corrupted.
187 assert(lock
->thread
== 0);
188 assert(lock
->count
== 0);
189 lock
->thread
= IOThreadSelf();
194 void IORecursiveLockWakeup(IORecursiveLock
*, void *event
, bool oneThread
)
196 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
200 * Complex (read/write) lock operations
203 IORWLock
* IORWLockAlloc( void )
205 return( lck_rw_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
208 void IORWLockFree( IORWLock
* lock
)
210 lck_rw_free( lock
, IOLockGroup
);
213 lck_rw_t
* IORWLockGetMachLock( IORWLock
* lock
)
215 return( (lck_rw_t
*)lock
);
223 IOSimpleLock
* IOSimpleLockAlloc( void )
225 return( lck_spin_alloc_init( IOLockGroup
, LCK_ATTR_NULL
) );
228 void IOSimpleLockInit( IOSimpleLock
* lock
)
230 lck_spin_init( lock
, IOLockGroup
, LCK_ATTR_NULL
);
233 void IOSimpleLockFree( IOSimpleLock
* lock
)
235 lck_spin_free( lock
, IOLockGroup
);
238 lck_spin_t
* IOSimpleLockGetMachLock( IOSimpleLock
* lock
)
240 return( (lck_spin_t
*)lock
);