]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
24 | * | |
25 | * HISTORY | |
26 | * | |
27 | */ | |
28 | ||
29 | ||
30 | #define IOLOCKS_CPP 1 | |
31 | ||
32 | #include <IOKit/system.h> | |
33 | ||
34 | #include <IOKit/IOReturn.h> | |
35 | #include <IOKit/IOLib.h> | |
36 | #include <IOKit/assert.h> | |
37 | ||
38 | extern "C" { | |
39 | #include <kern/locks.h> | |
40 | ||
41 | void IOLockInitWithState( IOLock * lock, IOLockState state) | |
42 | { | |
43 | if( state == kIOLockStateLocked) | |
44 | lck_mtx_lock( lock); | |
45 | } | |
46 | ||
47 | IOLock * IOLockAlloc( void ) | |
48 | { | |
49 | return( lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL) ); | |
50 | } | |
51 | ||
52 | void IOLockFree( IOLock * lock) | |
53 | { | |
54 | lck_mtx_free( lock, IOLockGroup); | |
55 | } | |
56 | ||
57 | lck_mtx_t * IOLockGetMachLock( IOLock * lock) | |
58 | { | |
59 | return( (lck_mtx_t *)lock); | |
60 | } | |
61 | ||
62 | int IOLockSleep( IOLock * lock, void *event, UInt32 interType) | |
63 | { | |
64 | return (int) lck_mtx_sleep(lock, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); | |
65 | } | |
66 | ||
67 | int IOLockSleepDeadline( IOLock * lock, void *event, | |
68 | AbsoluteTime deadline, UInt32 interType) | |
69 | { | |
70 | return (int) lck_mtx_sleep_deadline(lock, LCK_SLEEP_DEFAULT, (event_t) event, | |
71 | (wait_interrupt_t) interType, __OSAbsoluteTime(deadline)); | |
72 | } | |
73 | ||
74 | void IOLockWakeup(IOLock * lock, void *event, bool oneThread) | |
75 | { | |
76 | thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); | |
77 | } | |
78 | ||
79 | ||
80 | struct _IORecursiveLock { | |
81 | lck_mtx_t *mutex; | |
82 | thread_t thread; | |
83 | UInt32 count; | |
84 | }; | |
85 | ||
86 | IORecursiveLock * IORecursiveLockAlloc( void ) | |
87 | { | |
88 | _IORecursiveLock * lock; | |
89 | ||
90 | lock = IONew( _IORecursiveLock, 1); | |
91 | if( !lock) | |
92 | return( 0 ); | |
93 | ||
94 | lock->mutex = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); | |
95 | if( lock->mutex) { | |
96 | lock->thread = 0; | |
97 | lock->count = 0; | |
98 | } else { | |
99 | IODelete( lock, _IORecursiveLock, 1); | |
100 | lock = 0; | |
101 | } | |
102 | ||
103 | return( (IORecursiveLock *) lock ); | |
104 | } | |
105 | ||
106 | void IORecursiveLockFree( IORecursiveLock * _lock ) | |
107 | { | |
108 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
109 | ||
110 | lck_mtx_free( lock->mutex , IOLockGroup); | |
111 | IODelete( lock, _IORecursiveLock, 1); | |
112 | } | |
113 | ||
114 | lck_mtx_t * IORecursiveLockGetMachLock( IORecursiveLock * lock) | |
115 | { | |
116 | return( lock->mutex); | |
117 | } | |
118 | ||
119 | void IORecursiveLockLock( IORecursiveLock * _lock) | |
120 | { | |
121 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
122 | ||
123 | if( lock->thread == IOThreadSelf()) | |
124 | lock->count++; | |
125 | else { | |
126 | lck_mtx_lock( lock->mutex ); | |
127 | assert( lock->thread == 0 ); | |
128 | assert( lock->count == 0 ); | |
129 | lock->thread = IOThreadSelf(); | |
130 | lock->count = 1; | |
131 | } | |
132 | } | |
133 | ||
134 | boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock) | |
135 | { | |
136 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
137 | ||
138 | if( lock->thread == IOThreadSelf()) { | |
139 | lock->count++; | |
140 | return( true ); | |
141 | } else { | |
142 | if( lck_mtx_try_lock( lock->mutex )) { | |
143 | assert( lock->thread == 0 ); | |
144 | assert( lock->count == 0 ); | |
145 | lock->thread = IOThreadSelf(); | |
146 | lock->count = 1; | |
147 | return( true ); | |
148 | } | |
149 | } | |
150 | return( false ); | |
151 | } | |
152 | ||
153 | void IORecursiveLockUnlock( IORecursiveLock * _lock) | |
154 | { | |
155 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
156 | ||
157 | assert( lock->thread == IOThreadSelf() ); | |
158 | ||
159 | if( 0 == (--lock->count)) { | |
160 | lock->thread = 0; | |
161 | lck_mtx_unlock( lock->mutex ); | |
162 | } | |
163 | } | |
164 | ||
165 | boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock) | |
166 | { | |
167 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
168 | ||
169 | return( lock->thread == IOThreadSelf()); | |
170 | } | |
171 | ||
172 | int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType) | |
173 | { | |
174 | _IORecursiveLock * lock = (_IORecursiveLock *)_lock; | |
175 | UInt32 count = lock->count; | |
176 | int res; | |
177 | ||
178 | assert(lock->thread == IOThreadSelf()); | |
179 | assert(lock->count == 1 || interType == THREAD_UNINT); | |
180 | ||
181 | lock->count = 0; | |
182 | lock->thread = 0; | |
183 | res = lck_mtx_sleep(lock->mutex, LCK_SLEEP_DEFAULT, (event_t) event, (wait_interrupt_t) interType); | |
184 | ||
185 | // Must re-establish the recursive lock no matter why we woke up | |
186 | // otherwise we would potentially leave the return path corrupted. | |
187 | assert(lock->thread == 0); | |
188 | assert(lock->count == 0); | |
189 | lock->thread = IOThreadSelf(); | |
190 | lock->count = count; | |
191 | return res; | |
192 | } | |
193 | ||
194 | void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread) | |
195 | { | |
196 | thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED); | |
197 | } | |
198 | ||
199 | /* | |
200 | * Complex (read/write) lock operations | |
201 | */ | |
202 | ||
203 | IORWLock * IORWLockAlloc( void ) | |
204 | { | |
205 | return( lck_rw_alloc_init(IOLockGroup, LCK_ATTR_NULL) ); | |
206 | } | |
207 | ||
208 | void IORWLockFree( IORWLock * lock) | |
209 | { | |
210 | lck_rw_free( lock, IOLockGroup); | |
211 | } | |
212 | ||
213 | lck_rw_t * IORWLockGetMachLock( IORWLock * lock) | |
214 | { | |
215 | return( (lck_rw_t *)lock); | |
216 | } | |
217 | ||
218 | ||
219 | /* | |
220 | * Spin locks | |
221 | */ | |
222 | ||
223 | IOSimpleLock * IOSimpleLockAlloc( void ) | |
224 | { | |
225 | return( lck_spin_alloc_init( IOLockGroup, LCK_ATTR_NULL) ); | |
226 | } | |
227 | ||
228 | void IOSimpleLockInit( IOSimpleLock * lock) | |
229 | { | |
230 | lck_spin_init( lock, IOLockGroup, LCK_ATTR_NULL); | |
231 | } | |
232 | ||
233 | void IOSimpleLockFree( IOSimpleLock * lock ) | |
234 | { | |
235 | lck_spin_free( lock, IOLockGroup); | |
236 | } | |
237 | ||
238 | lck_spin_t * IOSimpleLockGetMachLock( IOSimpleLock * lock) | |
239 | { | |
240 | return( (lck_spin_t *)lock); | |
241 | } | |
242 | ||
243 | } /* extern "C" */ | |
244 | ||
245 |