]>
git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLocks.cpp
29537eefeec81d000cff70b3474b611d7ef5ea8d
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
33 #include <IOKit/system.h>
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/assert.h>
40 #include <kern/locks.h>
42 void IOLockInitWithState( IOLock
* lock
, IOLockState state
)
44 if( state
== kIOLockStateLocked
)
48 IOLock
* IOLockAlloc( void )
50 return( lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
53 void IOLockFree( IOLock
* lock
)
55 lck_mtx_free( lock
, IOLockGroup
);
58 lck_mtx_t
* IOLockGetMachLock( IOLock
* lock
)
60 return( (lck_mtx_t
*)lock
);
63 int IOLockSleep( IOLock
* lock
, void *event
, UInt32 interType
)
65 return (int) lck_mtx_sleep(lock
, LCK_SLEEP_DEFAULT
, (event_t
) event
, (wait_interrupt_t
) interType
);
68 int IOLockSleepDeadline( IOLock
* lock
, void *event
,
69 AbsoluteTime deadline
, UInt32 interType
)
71 return (int) lck_mtx_sleep_deadline(lock
, LCK_SLEEP_DEFAULT
, (event_t
) event
,
72 (wait_interrupt_t
) interType
, __OSAbsoluteTime(deadline
));
75 void IOLockWakeup(IOLock
* lock
, void *event
, bool oneThread
)
77 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
81 struct _IORecursiveLock
{
87 IORecursiveLock
* IORecursiveLockAlloc( void )
89 _IORecursiveLock
* lock
;
91 lock
= IONew( _IORecursiveLock
, 1);
95 lock
->mutex
= lck_mtx_alloc_init(IOLockGroup
, LCK_ATTR_NULL
);
100 IODelete( lock
, _IORecursiveLock
, 1);
104 return( (IORecursiveLock
*) lock
);
107 void IORecursiveLockFree( IORecursiveLock
* _lock
)
109 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
111 lck_mtx_free( lock
->mutex
, IOLockGroup
);
112 IODelete( lock
, _IORecursiveLock
, 1);
115 lck_mtx_t
* IORecursiveLockGetMachLock( IORecursiveLock
* lock
)
117 return( lock
->mutex
);
120 void IORecursiveLockLock( IORecursiveLock
* _lock
)
122 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
124 if( lock
->thread
== IOThreadSelf())
127 lck_mtx_lock( lock
->mutex
);
128 assert( lock
->thread
== 0 );
129 assert( lock
->count
== 0 );
130 lock
->thread
= IOThreadSelf();
135 boolean_t
IORecursiveLockTryLock( IORecursiveLock
* _lock
)
137 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
139 if( lock
->thread
== IOThreadSelf()) {
143 if( lck_mtx_try_lock( lock
->mutex
)) {
144 assert( lock
->thread
== 0 );
145 assert( lock
->count
== 0 );
146 lock
->thread
= IOThreadSelf();
154 void IORecursiveLockUnlock( IORecursiveLock
* _lock
)
156 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
158 assert( lock
->thread
== IOThreadSelf() );
160 if( 0 == (--lock
->count
)) {
162 lck_mtx_unlock( lock
->mutex
);
166 boolean_t
IORecursiveLockHaveLock( const IORecursiveLock
* _lock
)
168 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
170 return( lock
->thread
== IOThreadSelf());
173 int IORecursiveLockSleep(IORecursiveLock
*_lock
, void *event
, UInt32 interType
)
175 _IORecursiveLock
* lock
= (_IORecursiveLock
*)_lock
;
176 UInt32 count
= lock
->count
;
179 assert(lock
->thread
== IOThreadSelf());
180 assert(lock
->count
== 1 || interType
== THREAD_UNINT
);
184 res
= lck_mtx_sleep(lock
->mutex
, LCK_SLEEP_DEFAULT
, (event_t
) event
, (wait_interrupt_t
) interType
);
186 // Must re-establish the recursive lock no matter why we woke up
187 // otherwise we would potentially leave the return path corrupted.
188 assert(lock
->thread
== 0);
189 assert(lock
->count
== 0);
190 lock
->thread
= IOThreadSelf();
195 void IORecursiveLockWakeup(IORecursiveLock
*, void *event
, bool oneThread
)
197 thread_wakeup_prim((event_t
) event
, oneThread
, THREAD_AWAKENED
);
201 * Complex (read/write) lock operations
204 IORWLock
* IORWLockAlloc( void )
206 return( lck_rw_alloc_init(IOLockGroup
, LCK_ATTR_NULL
) );
209 void IORWLockFree( IORWLock
* lock
)
211 lck_rw_free( lock
, IOLockGroup
);
214 lck_rw_t
* IORWLockGetMachLock( IORWLock
* lock
)
216 return( (lck_rw_t
*)lock
);
224 IOSimpleLock
* IOSimpleLockAlloc( void )
226 return( lck_spin_alloc_init( IOLockGroup
, LCK_ATTR_NULL
) );
229 void IOSimpleLockInit( IOSimpleLock
* lock
)
231 lck_spin_init( lock
, IOLockGroup
, LCK_ATTR_NULL
);
234 void IOSimpleLockFree( IOSimpleLock
* lock
)
236 lck_spin_free( lock
, IOLockGroup
);
239 lck_spin_t
* IOSimpleLockGetMachLock( IOSimpleLock
* lock
)
241 return( (lck_spin_t
*)lock
);