]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock.h
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / kern / lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
1c79356b
A
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 * File: kern/lock.h
55 * Author: Avadis Tevanian, Jr., Michael Wayne Young
56 * Date: 1985
57 *
58 * Higher Level Locking primitives definitions
59 */
60
61#ifndef _KERN_LOCK_H_
62#define _KERN_LOCK_H_
63
64/*
65 * Configuration variables:
66 *
67 *
68 * MACH_LDEBUG: record pc and thread of callers, turn on
69 * all lock debugging.
70 *
71 *
72 * ETAP: The Event Trace Analysis Package (ETAP) monitors
73 * and records micro-kernel lock behavior and general
74 * kernel events. ETAP supports two levels of
75 * tracing for locks:
76 * - cumulative (ETAP_LOCK_ACCUMULATE)
77 * - monitored (ETAP_LOCK_MONITOR)
78 *
79 * Note: If either level of tracing is configured then
80 * ETAP_LOCK_TRACE is automatically defined to
81 * equal one.
82 *
83 * Several macros are added throughout the lock code to
84 * allow for convenient configuration.
85 */
86
87#include <kern/simple_lock.h>
88#include <machine/lock.h>
89#include <mach/etap_events.h>
90#include <mach/etap.h>
91
92/*
93 * The Mach lock package exports the following high-level
94 * lock abstractions:
95 *
96 * Lock Type Properties
97 * mutex blocking mutual exclusion lock, intended for
98 * SMP synchronization (vanishes on a uniprocessor);
99 * supports debugging, statistics, and pre-emption
100 * lock blocking synchronization permitting multiple
101 * simultaneous readers or a single writer; supports
102 * debugging and statistics but not pre-emption
103 *
104 * In general, mutex locks are preferred over all others, as the
105 * mutex supports pre-emption and relinquishes the processor
106 * upon contention.
107 *
108 */
109
9bccf70c
A
110#include <sys/appleapiopts.h>
111
112#ifdef __APPLE_API_PRIVATE
113
114#ifdef MACH_KERNEL_PRIVATE
115
1c79356b
A
116/*
117 * A simple mutex lock.
118 * Do not change the order of the fields in this structure without
119 * changing the machine-dependent assembler routines which depend
120 * on them.
121 */
9bccf70c 122
1c79356b
A
123#include <mach_ldebug.h>
124#include <kern/etap_options.h>
125#include <kern/etap_pool.h>
126
127typedef struct {
128 hw_lock_data_t interlock;
129 hw_lock_data_t locked;
9bccf70c
A
130 uint16_t waiters;
131 uint16_t promoted_pri;
1c79356b
A
132#if MACH_LDEBUG
133 int type;
134#define MUTEX_TAG 0x4d4d
135 vm_offset_t pc;
136 vm_offset_t thread;
137#endif /* MACH_LDEBUG */
138#if ETAP_LOCK_TRACE
139 union { /* Must be overlaid on the event_tablep */
140 struct event_table_chain event_table_chain;
141 struct {
142 event_table_t event_tablep; /* ptr to event table entry */
143 etap_time_t start_hold_time; /* Time of last acquistion */
144 } s;
145 } u;
146#endif /* ETAP_LOCK_TRACE */
147#if ETAP_LOCK_ACCUMULATE
148 cbuff_entry_t cbuff_entry; /* cumulative buffer entry */
149#endif /* ETAP_LOCK_ACCUMULATE */
150#if ETAP_LOCK_MONITOR
151 vm_offset_t start_pc; /* pc where lock operation began */
152 vm_offset_t end_pc; /* pc where lock operation ended */
153#endif /* ETAP_LOCK_MONITOR */
154} mutex_t;
155
156#define decl_mutex_data(class,name) class mutex_t name;
157#define mutex_addr(m) (&(m))
158
0b4e3aa0 159extern void mutex_init (mutex_t*, etap_event_t);
9bccf70c
A
160extern void mutex_lock_wait (mutex_t *, thread_act_t);
161extern int mutex_lock_acquire (mutex_t *);
162extern void mutex_unlock_wakeup (mutex_t*, thread_act_t);
0b4e3aa0 163extern void interlock_unlock (hw_lock_t);
1c79356b 164
9bccf70c 165#endif /* MACH_KERNEL_PRIVATE */
1c79356b 166
9bccf70c
A
167extern void mutex_pause (void);
168
169#endif /* __APPLE_API_PRIVATE */
0b4e3aa0 170
9bccf70c 171#if !defined(MACH_KERNEL_PRIVATE)
1c79356b 172
9bccf70c
A
173typedef struct __mutex__ mutex_t;
174
175#endif /* MACH_KERNEL_PRIVATE */
1c79356b
A
176
177extern mutex_t *mutex_alloc (etap_event_t);
178extern void mutex_free (mutex_t*);
0b4e3aa0 179extern void mutex_lock (mutex_t*);
1c79356b 180extern void mutex_unlock (mutex_t*);
0b4e3aa0 181extern boolean_t mutex_try (mutex_t*);
1c79356b 182
9bccf70c
A
183#ifdef __APPLE_API_PRIVATE
184
185#ifdef MACH_KERNEL_PRIVATE
1c79356b
A
186
187/*
188 * The general lock structure. Provides for multiple readers,
189 * upgrading from read to write, and sleeping until the lock
190 * can be gained.
191 *
192 * On some architectures, assembly language code in the 'inline'
193 * program fiddles the lock structures. It must be changed in
194 * concert with the structure layout.
195 *
196 * Only the "interlock" field is used for hardware exclusion;
197 * other fields are modified with normal instructions after
198 * acquiring the interlock bit.
199 */
9bccf70c 200
1c79356b
A
201typedef struct {
202 decl_simple_lock_data(,interlock) /* "hardware" interlock field */
203 volatile unsigned int
204 read_count:16, /* No. of accepted readers */
205 want_upgrade:1, /* Read-to-write upgrade waiting */
206 want_write:1, /* Writer is waiting, or
207 locked for write */
208 waiting:1, /* Someone is sleeping on lock */
209 can_sleep:1; /* Can attempts to lock go to sleep? */
210#if ETAP_LOCK_TRACE
211 union { /* Must be overlaid on the event_tablep */
212 struct event_table_chain event_table_chain;
213 struct {
214 event_table_t event_tablep; /* ptr to event table entry */
215 start_data_node_t start_list; /* linked list of start times
216 and pcs */
217 } s;
218 } u;
219#endif /* ETAP_LOCK_TRACE */
220#if ETAP_LOCK_ACCUMULATE
221 cbuff_entry_t cbuff_write; /* write cumulative buffer entry */
222 cbuff_entry_t cbuff_read; /* read cumulative buffer entry */
223#endif /* ETAP_LOCK_ACCUMULATE */
224} lock_t;
225
226/* Sleep locks must work even if no multiprocessing */
227
228/*
229 * Complex lock operations
230 */
231
232#if ETAP
233/*
234 * Locks have a pointer into an event_table entry that names the
235 * corresponding lock event and controls whether it is being traced.
236 * Initially this pointer is into a read-only table event_table_init[].
237 * Once dynamic allocation becomes possible a modifiable copy of the table
238 * is allocated and pointers are set to within this copy. The pointers
239 * that were already in place at that point need to be switched to point
240 * into the copy. To do this we overlay the event_table_chain structure
241 * onto sufficiently-big elements of the various lock structures so we
242 * can sweep down this list switching the pointers. The assumption is
243 * that we will not want to enable tracing before this is done (which is
244 * after all during kernel bootstrap, before any user tasks are launched).
245 *
246 * This is admittedly rather ugly but so were the alternatives:
247 * - record the event_table pointers in a statically-allocated array
248 * (dynamic allocation not yet being available) -- but there were
249 * over 8000 of them;
250 * - add a new link field to each lock structure;
251 * - change pointers to array indices -- this adds quite a bit of
252 * arithmetic to every lock operation that might be traced.
253 */
254#define lock_event_table(lockp) ((lockp)->u.s.event_tablep)
255#define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time)
256#endif /* ETAP_LOCK_TRACE */
257
258extern void lock_init (lock_t*,
259 boolean_t,
260 etap_event_t,
261 etap_event_t);
262
9bccf70c
A
263#endif /* MACH_KERNEL_PRIVATE */
264
265extern unsigned int LockTimeOut; /* Standard lock timeout value */
266
267#endif /* __APPLE_API_PRIVATE */
268
269#if !defined(MACH_KERNEL_PRIVATE)
1c79356b
A
270
271typedef struct __lock__ lock_t;
272extern lock_t *lock_alloc(boolean_t, etap_event_t, etap_event_t);
273void lock_free(lock_t *);
274
9bccf70c 275#endif /* MACH_KERNEL_PRIVATE */
1c79356b
A
276
277extern void lock_write (lock_t*);
278extern void lock_read (lock_t*);
279extern void lock_done (lock_t*);
280extern void lock_write_to_read (lock_t*);
281
282#define lock_read_done(l) lock_done(l)
283#define lock_write_done(l) lock_done(l)
284
285extern boolean_t lock_read_to_write (lock_t*); /* vm_map is only user */
1c79356b
A
286
287#endif /* _KERN_LOCK_H_ */