]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/simple_lock.h
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / simple_lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26/*
27 * @OSF_COPYRIGHT@
28 */
29/*
30 * Mach Operating System
31 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54/*
55 * File: kern/simple_lock.h (derived from kern/lock.h)
56 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Date: 1985
58 *
59 * Simple Locking primitives definitions
60 */
61
62#ifndef _SIMPLE_LOCK_H_
63#define _SIMPLE_LOCK_H_
64
65/*
66 * Configuration variables:
67 *
68 *
69 * MACH_LDEBUG: record pc and thread of callers, turn on
70 * all lock debugging.
71 *
72 *
73 * ETAP: The Event Trace Analysis Package (ETAP) monitors
74 * and records micro-kernel lock behavior and general
75 * kernel events. ETAP supports two levels of
76 * tracing for locks:
77 * - cumulative (ETAP_LOCK_ACCUMULATE)
78 * - monitored (ETAP_LOCK_MONITOR)
79 *
80 * Note: If either level of tracing is configured then
81 * ETAP_LOCK_TRACE is automatically defined to
82 * equal one.
83 *
84 * Several macros are added throughout the lock code to
85 * allow for convenient configuration.
86 */
87
88#include <mach/boolean.h>
89#include <kern/kern_types.h>
90
91#include <kern/simple_lock_types.h>
0b4e3aa0 92#include <machine/lock.h>
1c79356b
A
93#include <mach/etap_events.h>
94#include <mach/etap.h>
95
96/*
97 * The Mach lock package exports the following simple lock abstractions:
98 *
99 * Lock Type Properties
100 * hw_lock lowest level hardware abstraction; atomic,
101 * non-blocking, mutual exclusion; supports pre-emption
102 * usimple non-blocking spinning lock, available in all
103 * kernel configurations; may be used from thread
104 * and interrupt contexts; supports debugging,
105 * statistics and pre-emption
106 * simple non-blocking spinning lock, intended for SMP
107 * synchronization (vanishes on a uniprocessor);
108 * supports debugging, statistics and pre-emption
109 *
110 * NOTES TO IMPLEMENTORS: there are essentially two versions
111 * of the lock package. One is portable, written in C, and
112 * supports all of the various flavors of debugging, statistics,
113 * uni- versus multi-processor, pre-emption, etc. The "other"
114 * is whatever set of lock routines is provided by machine-dependent
115 * code. Presumably, the machine-dependent package is heavily
116 * optimized and meant for production kernels.
117 *
118 * We encourage implementors to focus on highly-efficient,
119 * production implementations of machine-dependent lock code,
120 * and use the portable lock package for everything else.
121 */
122
123#ifdef MACH_KERNEL_PRIVATE
124/*
125 * Mach always initializes locks, even those statically
126 * allocated.
127 *
128 * The conditional acquisition call, hw_lock_try,
129 * must return non-zero on success and zero on failure.
130 *
131 * The hw_lock_held operation returns non-zero if the
132 * lock is set, zero if the lock is clear. This operation
133 * should be implemented using an ordinary memory read,
134 * rather than a special atomic instruction, allowing
135 * a processor to spin in cache waiting for the lock to
136 * be released without chewing up bus cycles.
137 */
138extern void hw_lock_init(hw_lock_t);
139extern void hw_lock_lock(hw_lock_t);
140extern void hw_lock_unlock(hw_lock_t);
141extern unsigned int hw_lock_to(hw_lock_t, unsigned int);
142extern unsigned int hw_lock_try(hw_lock_t);
143extern unsigned int hw_lock_held(hw_lock_t);
144#endif /* MACH_KERNEL_PRIVATE */
145
146/*
147 * Machine dependent atomic ops. Probably should be in their own header.
148 */
149extern unsigned int hw_lock_bit(unsigned int *, unsigned int, unsigned int);
150extern unsigned int hw_cpu_sync(unsigned int *, unsigned int);
151extern unsigned int hw_cpu_wcng(unsigned int *, unsigned int, unsigned int);
152extern unsigned int hw_lock_mbits(unsigned int *, unsigned int, unsigned int,
153 unsigned int, unsigned int);
154void hw_unlock_bit(unsigned int *, unsigned int);
155extern int hw_atomic_add(int *area, int inc);
156extern int hw_atomic_sub(int *area, int dec);
0b4e3aa0
A
157extern int hw_atomic_or(int *area, int val);
158extern int hw_atomic_and(int *area, int mask);
1c79356b
A
159extern unsigned int hw_compare_and_store(unsigned int oldValue, unsigned int newValue, unsigned int *area);
160extern void hw_queue_atomic(unsigned int *anchor, unsigned int *elem, unsigned int disp);
161extern void hw_queue_atomic_list(unsigned int *anchor, unsigned int *first, unsigned int *last, unsigned int disp);
162extern unsigned int *hw_dequeue_atomic(unsigned int *anchor, unsigned int disp);
163
164
165/*
166 * The remaining locking constructs may have two versions.
167 * One version is machine-independent, built in C on top of the
168 * hw_lock construct. This version supports production, debugging
169 * and statistics configurations and is portable across architectures.
170 *
171 * Any particular port may override some or all of the portable
172 * lock package for whatever reason -- usually efficiency.
173 *
174 * The direct use of hw_locks by machine-independent Mach code
175 * should be rare; the preferred spinning lock is the simple_lock
176 * (see below).
177 */
178
179/*
180 * A "simple" spin lock, providing non-blocking mutual
181 * exclusion and conditional acquisition.
182 *
183 * The usimple_lock exists even in uniprocessor configurations.
184 * A data structure is always allocated for it and the following
185 * operations are always defined:
186 *
187 * usimple_lock_init lock initialization (mandatory!)
188 * usimple_lock lock acquisition
189 * usimple_unlock lock release
190 * usimple_lock_try conditional lock acquisition;
191 * non-zero means success
192 * Simple lock DEBUG interfaces
193 * usimple_lock_held verify lock already held by me
194 * usimple_lock_none_held verify no usimple locks are held
195 *
196 * The usimple_lock may be used for synchronization between
197 * thread context and interrupt context, or between a uniprocessor
198 * and an intelligent device. Obviously, it may also be used for
199 * multiprocessor synchronization. Its use should be rare; the
200 * simple_lock is the preferred spinning lock (see below).
201 *
202 * The usimple_lock supports optional lock debugging and statistics.
203 *
204 * Normally, we expect the usimple_lock data structure to be
205 * defined here, with its operations implemented in an efficient,
206 * machine-dependent way. However, any implementation may choose
207 * to rely on a C-based, portable version of the usimple_lock for
208 * debugging, statistics, and/or tracing. Three hooks are used in
209 * the portable lock package to allow the machine-dependent package
210 * to override some or all of the portable package's features.
211 *
212 * The usimple_lock also handles pre-emption. Lock acquisition
213 * implies disabling pre-emption, while lock release implies
214 * re-enabling pre-emption. Conditional lock acquisition does
215 * not assume success: on success, pre-emption is disabled
216 * but on failure the pre-emption state remains the same as
217 * the pre-emption state before the acquisition attempt.
218 */
219
220/*
221 * Each usimple_lock has a type, used for debugging and
222 * statistics. This type may safely be ignored in a
223 * production configuration.
224 *
225 * The conditional acquisition call, usimple_lock_try,
226 * must return non-zero on success and zero on failure.
227 */
228extern void usimple_lock_init(usimple_lock_t,etap_event_t);
229extern void usimple_lock(usimple_lock_t);
230extern void usimple_unlock(usimple_lock_t);
231extern unsigned int usimple_lock_try(usimple_lock_t);
232extern void usimple_lock_held(usimple_lock_t);
233extern void usimple_lock_none_held(void);
234
235
236/*
237 * Upon the usimple_lock we define the simple_lock, which
238 * exists for SMP configurations. These locks aren't needed
239 * in a uniprocessor configuration, so compile-time tricks
240 * make them disappear when NCPUS==1. (For debugging purposes,
241 * however, they can be enabled even on a uniprocessor.) This
242 * should be the "most popular" spinning lock; the usimple_lock
243 * and hw_lock should only be used in rare cases.
244 *
245 * IMPORTANT: simple_locks that may be shared between interrupt
246 * and thread context must have their use coordinated with spl.
247 * The spl level must alway be the same when acquiring the lock.
248 * Otherwise, deadlock may result.
249 */
250
251#if MACH_KERNEL_PRIVATE
252#include <cpus.h>
253#include <mach_ldebug.h>
254
255#if NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG
256/*
257 * MACH_RT is a very special case: in the case that the
258 * machine-dependent lock package hasn't taken responsibility
259 * but there is no other reason to turn on locks, if MACH_RT
260 * is turned on locks denote critical, non-preemptable points
261 * in the code.
262 *
263 * Otherwise, simple_locks may be layered directly on top of
264 * usimple_locks.
265 *
266 * N.B. The reason that simple_lock_try may be assumed to
267 * succeed under MACH_RT is that the definition only is used
268 * when NCPUS==1 AND because simple_locks shared between thread
269 * and interrupt context are always acquired with elevated spl.
270 * Thus, it is never possible to be interrupted in a dangerous
271 * way while holding a simple_lock.
272 */
273/*
274 * for locks and there is no other apparent reason to turn them on.
275 * So make them disappear.
276 */
277#define simple_lock_init(l,t)
278#define simple_lock(l) disable_preemption()
279#define simple_unlock(l) enable_preemption()
280#define simple_lock_try(l) (disable_preemption(), 1)
281#define simple_lock_addr(lock) ((simple_lock_t)0)
282#define __slock_held_func__(l) preemption_is_disabled()
283#endif /* NCPUS == 1 && !ETAP_LOCK_TRACE && !USLOCK_DEBUG */
284
285#if ETAP_LOCK_TRACE
286extern void simple_lock_no_trace(simple_lock_t l);
287extern int simple_lock_try_no_trace(simple_lock_t l);
288extern void simple_unlock_no_trace(simple_lock_t l);
289#endif /* ETAP_LOCK_TRACE */
290
291#endif /* MACH_KERNEL_PRIVATE */
292
293/*
294 * If we got to here and we still don't have simple_lock_init
295 * defined, then we must either be outside the osfmk component,
296 * running on a true SMP, or need debug.
297 */
298#if !defined(simple_lock_init)
299#define simple_lock_init(l,t) usimple_lock_init(l,t)
300#define simple_lock(l) usimple_lock(l)
301#define simple_unlock(l) usimple_unlock(l)
302#define simple_lock_try(l) usimple_lock_try(l)
303#define simple_lock_addr(l) (&(l))
304#define __slock_held_func__(l) usimple_lock_held(l)
305#endif / * !defined(simple_lock_init) */
306
307#if USLOCK_DEBUG
308/*
309 * Debug-time only:
310 * + verify that usimple_lock is already held by caller
311 * + verify that usimple_lock is NOT held by caller
312 * + verify that current processor owns no usimple_locks
313 *
314 * We do not provide a simple_lock_NOT_held function because
315 * it's impossible to verify when only MACH_RT is turned on.
316 * In that situation, only preemption is enabled/disabled
317 * around lock use, and it's impossible to tell which lock
318 * acquisition caused preemption to be disabled. However,
319 * note that it's still valid to use check_simple_locks
320 * when only MACH_RT is turned on -- no locks should be
321 * held, hence preemption should be enabled.
322 * Actually, the above isn't strictly true, as explicit calls
323 * to disable_preemption() need to be accounted for.
324 */
325#define simple_lock_held(l) __slock_held_func__(l)
326#define check_simple_locks() usimple_lock_none_held()
327#else /* USLOCK_DEBUG */
328#define simple_lock_held(l)
329#define check_simple_locks()
330#endif /* USLOCK_DEBUG */
331
332#endif /*!_SIMPLE_LOCK_H_*/