]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOLocks.cpp
xnu-344.tar.gz
[apple/xnu.git] / iokit / Kernel / IOLocks.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28
29
30 #include <IOKit/system.h>
31
32 #include <IOKit/IOReturn.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/assert.h>
35
36 extern "C" {
37 #include <kern/simple_lock.h>
38 #include <machine/machine_routines.h>
39
40 IOLock * IOLockAlloc( void )
41 {
42 return( mutex_alloc(ETAP_IO_AHA) );
43 }
44
45 void IOLockFree( IOLock * lock)
46 {
47 mutex_free( lock );
48 }
49
50 void IOLockInitWithState( IOLock * lock, IOLockState state)
51 {
52 if( state == kIOLockStateLocked)
53 IOLockLock( lock);
54 }
55
56 struct _IORecursiveLock {
57 mutex_t * mutex;
58 thread_t thread;
59 UInt32 count;
60 };
61
62 IORecursiveLock * IORecursiveLockAlloc( void )
63 {
64 _IORecursiveLock * lock;
65
66 lock = IONew( _IORecursiveLock, 1);
67 if( !lock)
68 return( 0 );
69
70 lock->mutex = mutex_alloc(ETAP_IO_AHA);
71 if( lock->mutex) {
72 lock->thread = 0;
73 lock->count = 0;
74 } else {
75 IODelete( lock, _IORecursiveLock, 1);
76 lock = 0;
77 }
78
79 return( (IORecursiveLock *) lock );
80 }
81
82 void IORecursiveLockFree( IORecursiveLock * _lock )
83 {
84 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
85
86 mutex_free( lock->mutex );
87 IODelete( lock, _IORecursiveLock, 1);
88 }
89
90 void IORecursiveLockLock( IORecursiveLock * _lock)
91 {
92 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
93
94 if( lock->thread == IOThreadSelf())
95 lock->count++;
96 else {
97 mutex_lock( lock->mutex );
98 assert( lock->thread == 0 );
99 assert( lock->count == 0 );
100 lock->thread = IOThreadSelf();
101 lock->count = 1;
102 }
103 }
104
105 boolean_t IORecursiveLockTryLock( IORecursiveLock * _lock)
106 {
107 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
108
109 if( lock->thread == IOThreadSelf()) {
110 lock->count++;
111 return( true );
112 } else {
113 if( mutex_try( lock->mutex )) {
114 assert( lock->thread == 0 );
115 assert( lock->count == 0 );
116 lock->thread = IOThreadSelf();
117 lock->count = 1;
118 return( true );
119 }
120 }
121 return( false );
122 }
123
124 void IORecursiveLockUnlock( IORecursiveLock * _lock)
125 {
126 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
127
128 assert( lock->thread == IOThreadSelf() );
129
130 if( 0 == (--lock->count)) {
131 lock->thread = 0;
132 mutex_unlock( lock->mutex );
133 }
134 }
135
136 boolean_t IORecursiveLockHaveLock( const IORecursiveLock * _lock)
137 {
138 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
139
140 return( lock->thread == IOThreadSelf());
141 }
142
143 int IORecursiveLockSleep(IORecursiveLock *_lock, void *event, UInt32 interType)
144 {
145 _IORecursiveLock * lock = (_IORecursiveLock *)_lock;
146 UInt32 count = lock->count;
147 int res;
148
149 assert(lock->thread == IOThreadSelf());
150 assert(lock->count == 1 || interType == THREAD_UNINT);
151
152 lock->count = 0;
153 lock->thread = 0;
154 res = thread_sleep_mutex((event_t) event, lock->mutex, (int) interType);
155
156 // Must re-establish the recursive lock no matter why we woke up
157 // otherwise we would potentially leave the return path corrupted.
158 assert(lock->thread == 0);
159 assert(lock->count == 0);
160 lock->thread = IOThreadSelf();
161 lock->count = count;
162 return res;
163 }
164
165 void IORecursiveLockWakeup(IORecursiveLock *, void *event, bool oneThread)
166 {
167 thread_wakeup_prim((event_t) event, oneThread, THREAD_AWAKENED);
168 }
169
170 /*
171 * Complex (read/write) lock operations
172 */
173
174 IORWLock * IORWLockAlloc( void )
175 {
176 IORWLock * lock;
177
178 lock = lock_alloc( true, ETAP_IO_AHA, ETAP_IO_AHA);
179
180 return( lock);
181 }
182
183 void IORWLockFree( IORWLock * lock)
184 {
185 lock_free( lock );
186 }
187
188
189 /*
190 * Spin locks
191 */
192
193 IOSimpleLock * IOSimpleLockAlloc( void )
194 {
195 IOSimpleLock * lock;
196
197 lock = (IOSimpleLock *) IOMalloc( sizeof(IOSimpleLock));
198 if( lock)
199 IOSimpleLockInit( lock );
200
201 return( lock );
202 }
203
204 void IOSimpleLockInit( IOSimpleLock * lock)
205 {
206 simple_lock_init( (simple_lock_t) lock, ETAP_IO_AHA );
207 }
208
209 void IOSimpleLockFree( IOSimpleLock * lock )
210 {
211 IOFree( lock, sizeof(IOSimpleLock));
212 }
213
214 } /* extern "C" */
215
216