]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOLocks.cpp
xnu-344.21.73.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLocks.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
d7e50217 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
d7e50217
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
d7e50217
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
27 *
28 * HISTORY
29 *
30 */
31
32
33#include <IOKit/system.h>
34
35#include <IOKit/IOReturn.h>
36#include <IOKit/IOLib.h>
37#include <IOKit/assert.h>
38
39extern "C" {
40#include <kern/simple_lock.h>
41#include <machine/machine_routines.h>
42
43IOLock * IOLockAlloc( void )
44{
45 return( mutex_alloc(ETAP_IO_AHA) );
46}
47
48void IOLockFree( IOLock * lock)
49{
50 mutex_free( lock );
51}
52
53void IOLockInitWithState( IOLock * lock, IOLockState state)
54{
1c79356b
A
55 if( state == kIOLockStateLocked)
56 IOLockLock( lock);
57}
58
59struct _IORecursiveLock {
60 mutex_t * mutex;
61 thread_t thread;
62 UInt32 count;
63};
64
65IORecursiveLock * IORecursiveLockAlloc( void )
66{
67 _IORecursiveLock * lock;
68
69 lock = IONew( _IORecursiveLock, 1);
70 if( !lock)
71 return( 0 );
72
73 lock->mutex = mutex_alloc(ETAP_IO_AHA);
74 if( lock->mutex) {
75 lock->thread = 0;
76 lock->count = 0;
77 } else {
78 IODelete( lock, _IORecursiveLock, 1);
79 lock = 0;
80 }
81
82 return( (IORecursiveLock *) lock );
83}
84
85void IORecursiveLockFree( IORecursiveLock * _lock )
86{
87 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
88
89 mutex_free( lock->mutex );
90 IODelete( lock, _IORecursiveLock, 1);
91}
92
93void IORecursiveLockLock( IORecursiveLock * _lock)
94{
95 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
96
97 if( lock->thread == IOThreadSelf())
98 lock->count++;
99 else {
0b4e3aa0 100 mutex_lock( lock->mutex );
1c79356b
A
101 assert( lock->thread == 0 );
102 assert( lock->count == 0 );
103 lock->thread = IOThreadSelf();
104 lock->count = 1;
105 }
106}
107
108boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
109{
110 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
111
112 if( lock->thread == IOThreadSelf()) {
113 lock->count++;
114 return( true );
115 } else {
0b4e3aa0 116 if( mutex_try( lock->mutex )) {
1c79356b
A
117 assert( lock->thread == 0 );
118 assert( lock->count == 0 );
119 lock->thread = IOThreadSelf();
120 lock->count = 1;
121 return( true );
122 }
123 }
124 return( false );
125}
126
127void IORecursiveLockUnlock( IORecursiveLock * _lock)
128{
129 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
130
131 assert( lock->thread == IOThreadSelf() );
132
133 if( 0 == (--lock->count)) {
134 lock->thread = 0;
135 mutex_unlock( lock->mutex );
136 }
137}
138
139boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
140{
141 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
142
143 return( lock->thread == IOThreadSelf());
144}
145
146int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
147{
148 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
149 UInt32 count = lock->count;
150 int res;
151
152 assert(lock->thread == IOThreadSelf());
153 assert(lock->count == 1 || interType == THREAD_UNINT);
154
1c79356b
A
155 lock->count = 0;
156 lock->thread = 0;
9bccf70c 157 res = thread_sleep_mutex((event_t) event, lock->mutex, (int) interType);
1c79356b 158
7b1edb79
A
159 // Must re-establish the recursive lock no matter why we woke up
160 // otherwise we would potentially leave the return path corrupted.
7b1edb79
A
161 assert(lock->thread == 0);
162 assert(lock->count == 0);
163 lock->thread = IOThreadSelf();
164 lock->count = count;
1c79356b
A
165 return res;
166}
167
168void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
169{
170 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
171}
172
173/*
174 * Complex (read/write) lock operations
175 */
176
177IORWLock * IORWLockAlloc( void )
178{
179 IORWLock * lock;
180
181 lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA);
182
183 return( lock);
184}
185
186void IORWLockFree( IORWLock * lock)
187{
188 lock_free( lock );
189}
190
191
192/*
193 * Spin locks
194 */
195
196IOSimpleLock * IOSimpleLockAlloc( void )
197{
198 IOSimpleLock * lock;
199
200 lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock));
201 if( lock)
202 IOSimpleLockInit( lock );
203
204 return( lock );
205}
206
207void IOSimpleLockInit( IOSimpleLock * lock)
208{
209 simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA );
210}
211
212void IOSimpleLockFree( IOSimpleLock * lock )
213{
214 IOFree( lock, sizeof(IOSimpleLock));
215}
216
217} /* extern "C" */
218
219