]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/simple_lock.h
xnu-517.11.1.tar.gz
[apple/xnu.git] / osfmk / kern / simple_lock.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26 /*
27 * @OSF_COPYRIGHT@
28 */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54 /*
55 * File: kern/simple_lock.h (derived from kern/lock.h)
56 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Date: 1985
58 *
59 * Simple Locking primitives definitions
60 */
61
62 #ifndef _SIMPLE_LOCK_H_
63 #define _SIMPLE_LOCK_H_
64
65 /*
66 * Configuration variables:
67 *
68 *
69 * MACH_LDEBUG: record pc and thread of callers, turn on
70 * all lock debugging.
71 *
72 *
73 * ETAP: The Event Trace Analysis Package (ETAP) monitors
74 * and records micro-kernel lock behavior and general
75 * kernel events. ETAP supports two levels of
76 * tracing for locks:
77 * - cumulative (ETAP_LOCK_ACCUMULATE)
78 * - monitored (ETAP_LOCK_MONITOR)
79 *
80 * Note: If either level of tracing is configured then
81 * ETAP_LOCK_TRACE is automatically defined to
82 * equal one.
83 *
84 * Several macros are added throughout the lock code to
85 * allow for convenient configuration.
86 */
87
88 #include <mach/boolean.h>
89 #include <kern/kern_types.h>
90
91 #include <kern/simple_lock_types.h>
92 #include <machine/lock.h>
93 #include <mach/etap_events.h>
94 #include <mach/etap.h>
95
96 /*
97 * The Mach lock package exports the following simple lock abstractions:
98 *
99 * Lock Type Properties
100 * hw_lock lowest level hardware abstraction; atomic,
101 * non-blocking, mutual exclusion; supports pre-emption
102 * usimple non-blocking spinning lock, available in all
103 * kernel configurations; may be used from thread
104 * and interrupt contexts; supports debugging,
105 * statistics and pre-emption
106 * simple non-blocking spinning lock, intended for SMP
107 * synchronization (vanishes on a uniprocessor);
108 * supports debugging, statistics and pre-emption
109 *
110 * NOTES TO IMPLEMENTORS: there are essentially two versions
111 * of the lock package. One is portable, written in C, and
112 * supports all of the various flavors of debugging, statistics,
113 * uni- versus multi-processor, pre-emption, etc. The "other"
114 * is whatever set of lock routines is provided by machine-dependent
115 * code. Presumably, the machine-dependent package is heavily
116 * optimized and meant for production kernels.
117 *
118 * We encourage implementors to focus on highly-efficient,
119 * production implementations of machine-dependent lock code,
120 * and use the portable lock package for everything else.
121 */
122
123 #include <sys/appleapiopts.h>
124
125 #ifdef __APPLE_API_PRIVATE
126
127 #ifdef MACH_KERNEL_PRIVATE
128
129 /*
130 * Mach always initializes locks, even those statically
131 * allocated.
132 *
133 * The conditional acquisition call, hw_lock_try,
134 * must return non-zero on success and zero on failure.
135 *
136 * The hw_lock_held operation returns non-zero if the
137 * lock is set, zero if the lock is clear. This operation
138 * should be implemented using an ordinary memory read,
139 * rather than a special atomic instruction, allowing
140 * a processor to spin in cache waiting for the lock to
141 * be released without chewing up bus cycles.
142 */
143 extern void hw_lock_init(hw_lock_t);
144 extern void hw_lock_lock(hw_lock_t);
145 extern void hw_lock_unlock(hw_lock_t);
146 extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
147 extern unsigned int hw_lock_try(hw_lock_t);
148 extern unsigned int hw_lock_held(hw_lock_t);
149
150 #endif /* MACH_KERNEL_PRIVATE */
151
152 #endif /* __APPLE_API_PRIVATE */
153
154 /*
155 * Machine dependent ops.
156 */
157 extern unsigned int hw_lock_bit(unsigned int *, unsigned int, unsigned int);
158 extern unsigned int hw_cpu_sync(unsigned int *, unsigned int);
159 extern unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int);
160 extern unsigned int hw_lock_mbits(unsigned int *, unsigned int, unsigned int,
161 unsigned int, unsigned int);
162 void hw_unlock_bit(unsigned int *, unsigned int);
163
164 extern uint32_t hw_atomic_add(
165 uint32_t *dest,
166 uint32_t delt);
167
168 extern uint32_t hw_atomic_sub(
169 uint32_t *dest,
170 uint32_t delt);
171
172 extern uint32_t hw_atomic_or(
173 uint32_t *dest,
174 uint32_t mask);
175
176 extern uint32_t hw_atomic_and(
177 uint32_t *dest,
178 uint32_t mask);
179
180 extern uint32_t hw_compare_and_store(
181 uint32_t oldval,
182 uint32_t newval,
183 uint32_t *dest);
184
185 extern void hw_queue_atomic(unsigned int *anchor, unsigned int *elem, unsigned int disp);
186 extern void hw_queue_atomic_list(unsigned int *anchor, unsigned int *first, unsigned int *last, unsigned int disp);
187 extern unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp);
188
189
190 /*
191 * The remaining locking constructs may have two versions.
192 * One version is machine-independent, built in C on top of the
193 * hw_lock construct. This version supports production, debugging
194 * and statistics configurations and is portable across architectures.
195 *
196 * Any particular port may override some or all of the portable
197 * lock package for whatever reason -- usually efficiency.
198 *
199 * The direct use of hw_locks by machine-independent Mach code
200 * should be rare; the preferred spinning lock is the simple_lock
201 * (see below).
202 */
203
204 /*
205 * A "simple" spin lock, providing non-blocking mutual
206 * exclusion and conditional acquisition.
207 *
208 * The usimple_lock exists even in uniprocessor configurations.
209 * A data structure is always allocated for it and the following
210 * operations are always defined:
211 *
212 * usimple_lock_init lock initialization (mandatory!)
213 * usimple_lock lock acquisition
214 * usimple_unlock lock release
215 * usimple_lock_try conditional lock acquisition;
216 * non-zero means success
217 * Simple lock DEBUG interfaces
218 * usimple_lock_held verify lock already held by me
219 * usimple_lock_none_held verify no usimple locks are held
220 *
221 * The usimple_lock may be used for synchronization between
222 * thread context and interrupt context, or between a uniprocessor
223 * and an intelligent device. Obviously, it may also be used for
224 * multiprocessor synchronization. Its use should be rare; the
225 * simple_lock is the preferred spinning lock (see below).
226 *
227 * The usimple_lock supports optional lock debugging and statistics.
228 *
229 * Normally, we expect the usimple_lock data structure to be
230 * defined here, with its operations implemented in an efficient,
231 * machine-dependent way. However, any implementation may choose
232 * to rely on a C-based, portable version of the usimple_lock for
233 * debugging, statistics, and/or tracing. Three hooks are used in
234 * the portable lock package to allow the machine-dependent package
235 * to override some or all of the portable package's features.
236 *
237 * The usimple_lock also handles pre-emption. Lock acquisition
238 * implies disabling pre-emption, while lock release implies
239 * re-enabling pre-emption. Conditional lock acquisition does
240 * not assume success: on success, pre-emption is disabled
241 * but on failure the pre-emption state remains the same as
242 * the pre-emption state before the acquisition attempt.
243 */
244
245 /*
246 * Each usimple_lock has a type, used for debugging and
247 * statistics. This type may safely be ignored in a
248 * production configuration.
249 *
250 * The conditional acquisition call, usimple_lock_try,
251 * must return non-zero on success and zero on failure.
252 */
253 extern void usimple_lock_init(usimple_lock_t,etap_event_t);
254 extern void usimple_lock(usimple_lock_t);
255 extern void usimple_unlock(usimple_lock_t);
256 extern unsigned int usimple_lock_try(usimple_lock_t);
257 extern void usimple_lock_held(usimple_lock_t);
258 extern void usimple_lock_none_held(void);
259
260
261 /*
262 * Upon the usimple_lock we define the simple_lock, which
263 * exists for SMP configurations. These locks aren't needed
264 * in a uniprocessor configuration, so compile-time tricks
265 * make them disappear when NCPUS==1. (For debugging purposes,
266 * however, they can be enabled even on a uniprocessor.) This
267 * should be the "most popular" spinning lock; the usimple_lock
268 * and hw_lock should only be used in rare cases.
269 *
270 * IMPORTANT: simple_locks that may be shared between interrupt
271 * and thread context must have their use coordinated with spl.
272 * The spl level must alway be the same when acquiring the lock.
273 * Otherwise, deadlock may result.
274 */
275
276 #ifdef __APPLE_API_PRIVATE
277
278 #ifdef MACH_KERNEL_PRIVATE
279
280 #include <cpus.h>
281 #include <mach_ldebug.h>
282
283 #if NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG
284 /*
285 * MACH_RT is a very special case: in the case that the
286 * machine-dependent lock package hasn't taken responsibility
287 * but there is no other reason to turn on locks, if MACH_RT
288 * is turned on locks denote critical, non-preemptable points
289 * in the code.
290 *
291 * Otherwise, simple_locks may be layered directly on top of
292 * usimple_locks.
293 *
294 * N.B. The reason that simple_lock_try may be assumed to
295 * succeed under MACH_RT is that the definition only is used
296 * when NCPUS==1 AND because simple_locks shared between thread
297 * and interrupt context are always acquired with elevated spl.
298 * Thus, it is never possible to be interrupted in a dangerous
299 * way while holding a simple_lock.
300 */
301 /*
302 * for locks and there is no other apparent reason to turn them on.
303 * So make them disappear.
304 */
305 #define simple_lock_init(l,t)
306 #define simple_lock(l) disable_preemption()
307 #define simple_unlock(l) enable_preemption()
308 #define simple_lock_try(l) (disable_preemption(), 1)
309 #define simple_lock_addr(lock) ((simple_lock_t)0)
310 #define __slock_held_func__(l) preemption_is_disabled()
311 #endif /* NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG */
312
313 #if ETAP_LOCK_TRACE
314 extern void simple_lock_no_trace(simple_lock_t l);
315 extern int simple_lock_try_no_trace(simple_lock_t l);
316 extern void simple_unlock_no_trace(simple_lock_t l);
317 #endif /* ETAP_LOCK_TRACE */
318
319 #endif /* MACH_KERNEL_PRIVATE */
320
321 #endif /* __APPLE_API_PRIVATE */
322
323 /*
324 * If we got to here and we still don't have simple_lock_init
325 * defined, then we must either be outside the osfmk component,
326 * running on a true SMP, or need debug.
327 */
328 #if !defined(simple_lock_init)
329 #define simple_lock_init(l,t) usimple_lock_init(l,t)
330 #define simple_lock(l) usimple_lock(l)
331 #define simple_unlock(l) usimple_unlock(l)
332 #define simple_lock_try(l) usimple_lock_try(l)
333 #define simple_lock_addr(l) (&(l))
334 #define __slock_held_func__(l) usimple_lock_held(l)
335 #define thread_sleep_simple_lock(l, e, i) \
336 thread_sleep_usimple_lock((l), (e), (i))
337 #endif /* !defined(simple_lock_init) */
338
339 #if USLOCK_DEBUG
340 /*
341 * Debug-time only:
342 * + verify that usimple_lock is already held by caller
343 * + verify that usimple_lock is NOT held by caller
344 * + verify that current processor owns no usimple_locks
345 *
346 * We do not provide a simple_lock_NOT_held function because
347 * it's impossible to verify when only MACH_RT is turned on.
348 * In that situation, only preemption is enabled/disabled
349 * around lock use, and it's impossible to tell which lock
350 * acquisition caused preemption to be disabled. However,
351 * note that it's still valid to use check_simple_locks
352 * when only MACH_RT is turned on -- no locks should be
353 * held, hence preemption should be enabled.
354 * Actually, the above isn't strictly true, as explicit calls
355 * to disable_preemption() need to be accounted for.
356 */
357 #define simple_lock_held(l) __slock_held_func__(l)
358 #define check_simple_locks() usimple_lock_none_held()
359
360 #else /* USLOCK_DEBUG */
361
362 #define simple_lock_held(l)
363 #define check_simple_locks()
364
365 #endif /* USLOCK_DEBUG */
366
367 #endif /*!_SIMPLE_LOCK_H_*/