]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLocks.cpp
xnu-1699.22.73.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLocks.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/system.h>
30
31 #include <IOKit/IOReturn.h>
32 #include <IOKit/IOLib.h>
33 #include <IOKit/assert.h>
34
35 #include <IOKit/IOLocksPrivate.h>
36
37 extern "C" {
38 #include <kern/locks.h>
39
40 void IOLockInitWithState( IOLock * lock, IOLockState state)
41 {
42 if( state == kIOLockStateLocked)
43 lck_mtx_lock( lock);
44 }
45
46 IOLock * IOLockAlloc( void )
47 {
48 return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
49 }
50
51 void IOLockFree( IOLock * lock)
52 {
53 lck_mtx_free( lock, IOLockGroup);
54 }
55
56 lck_mtx_t * IOLockGetMachLock( IOLock * lock)
57 {
58 return( (lck_mtx_t *)lock);
59 }
60
61 int IOLockSleep( IOLock * lock, void *event, UInt32 interType)
62 {
63 return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
64 }
65
66 int IOLockSleepDeadline( IOLock * lock, void *event,
67 AbsoluteTime deadline, UInt32 interType)
68 {
69 return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event,
70 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
71 }
72
73 void IOLockWakeup(IOLock * lock, void *event, bool oneThread)
74 {
75 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
76 }
77
78
79 struct _IORecursiveLock {
80 lck_mtx_t *mutex;
81 lck_grp_t *group;
82 thread_t thread;
83 UInt32 count;
84 };
85
86 IORecursiveLock * IORecursiveLockAllocWithLockGroup( lck_grp_t * lockGroup )
87 {
88 _IORecursiveLock * lock;
89
90 if( lockGroup == 0 )
91 return( 0 );
92
93 lock = IONew( _IORecursiveLock, 1 );
94 if( !lock )
95 return( 0 );
96
97 lock->mutex = lck_mtx_alloc_init( lockGroup, LCK_ATTR_NULL );
98 if( lock->mutex ) {
99 lock->group = lockGroup;
100 lock->thread = 0;
101 lock->count = 0;
102 } else {
103 IODelete( lock, _IORecursiveLock, 1 );
104 lock = 0;
105 }
106
107 return( (IORecursiveLock *) lock );
108 }
109
110
111 IORecursiveLock * IORecursiveLockAlloc( void )
112 {
113 return IORecursiveLockAllocWithLockGroup( IOLockGroup );
114 }
115
116 void IORecursiveLockFree( IORecursiveLock * _lock )
117 {
118 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
119
120 lck_mtx_free( lock->mutex, lock->group );
121 IODelete( lock, _IORecursiveLock, 1 );
122 }
123
124 lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock )
125 {
126 return( lock->mutex );
127 }
128
129 void IORecursiveLockLock( IORecursiveLock * _lock)
130 {
131 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
132
133 if( lock->thread == IOThreadSelf())
134 lock->count++;
135 else {
136 lck_mtx_lock( lock->mutex );
137 assert( lock->thread == 0 );
138 assert( lock->count == 0 );
139 lock->thread = IOThreadSelf();
140 lock->count = 1;
141 }
142 }
143
144 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
145 {
146 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
147
148 if( lock->thread == IOThreadSelf()) {
149 lock->count++;
150 return( true );
151 } else {
152 if( lck_mtx_try_lock( lock->mutex )) {
153 assert( lock->thread == 0 );
154 assert( lock->count == 0 );
155 lock->thread = IOThreadSelf();
156 lock->count = 1;
157 return( true );
158 }
159 }
160 return( false );
161 }
162
163 void IORecursiveLockUnlock( IORecursiveLock * _lock)
164 {
165 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
166
167 assert( lock->thread == IOThreadSelf() );
168
169 if( 0 == (--lock->count)) {
170 lock->thread = 0;
171 lck_mtx_unlock( lock->mutex );
172 }
173 }
174
175 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
176 {
177 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
178
179 return( lock->thread == IOThreadSelf());
180 }
181
182 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
183 {
184 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
185 UInt32 count = lock->count;
186 int res;
187
188 assert(lock->thread == IOThreadSelf());
189
190 lock->count = 0;
191 lock->thread = 0;
192 res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType);
193
194 // Must re-establish the recursive lock no matter why we woke up
195 // otherwise we would potentially leave the return path corrupted.
196 assert(lock->thread == 0);
197 assert(lock->count == 0);
198 lock->thread = IOThreadSelf();
199 lock->count = count;
200 return res;
201 }
202
203 int IORecursiveLockSleepDeadline( IORecursiveLock * _lock, void *event,
204 AbsoluteTime deadline, UInt32 interType)
205 {
206 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
207 UInt32 count = lock->count;
208 int res;
209
210 assert(lock->thread == IOThreadSelf());
211
212 lock->count = 0;
213 lock->thread = 0;
214 res = lck_mtx_sleep_deadline(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event,
215 (wait_interrupt_t) interType, __OSAbsoluteTime(deadline));
216
217 // Must re-establish the recursive lock no matter why we woke up
218 // otherwise we would potentially leave the return path corrupted.
219 assert(lock->thread == 0);
220 assert(lock->count == 0);
221 lock->thread = IOThreadSelf();
222 lock->count = count;
223 return res;
224 }
225
226 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
227 {
228 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
229 }
230
231 /*
232 * Complex (read/write) lock operations
233 */
234
235 IORWLock * IORWLockAlloc( void )
236 {
237 return( lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL) );
238 }
239
240 void IORWLockFree( IORWLock * lock)
241 {
242 lck_rw_free( lock, IOLockGroup);
243 }
244
245 lck_rw_t * IORWLockGetMachLock( IORWLock * lock)
246 {
247 return( (lck_rw_t *)lock);
248 }
249
250
251 /*
252 * Spin locks
253 */
254
255 IOSimpleLock * IOSimpleLockAlloc( void )
256 {
257 return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) );
258 }
259
260 void IOSimpleLockInit( IOSimpleLock * lock)
261 {
262 lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL);
263 }
264
265 void IOSimpleLockFree( IOSimpleLock * lock )
266 {
267 lck_spin_free( lock, IOLockGroup);
268 }
269
270 lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock)
271 {
272 return( (lck_spin_t *)lock);
273 }
274
275 } /* extern "C" */
276
277