]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock.h
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / kern / lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
1c79356b
A
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 * File: kern/lock.h
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
53 * Date: 1985
54 *
55 * Higher Level Locking primitives definitions
56 */
57
58#ifndef _KERN_LOCK_H_
59#define _KERN_LOCK_H_
60
61/*
62 * Configuration variables:
63 *
64 *
65 * MACH_LDEBUG: record pc and thread of callers, turn on
66 * all lock debugging.
67 *
68 *
69 * ETAP: The Event Trace Analysis Package (ETAP) monitors
70 * and records micro-kernel lock behavior and general
71 * kernel events. ETAP supports two levels of
72 * tracing for locks:
73 * - cumulative (ETAP_LOCK_ACCUMULATE)
74 * - monitored (ETAP_LOCK_MONITOR)
75 *
76 * Note: If either level of tracing is configured then
77 * ETAP_LOCK_TRACE is automatically defined to
78 * equal one.
79 *
80 * Several macros are added throughout the lock code to
81 * allow for convenient configuration.
82 */
83
84#include <kern/simple_lock.h>
85#include <machine/lock.h>
86#include <mach/etap_events.h>
87#include <mach/etap.h>
88
89/*
90 * The Mach lock package exports the following high-level
91 * lock abstractions:
92 *
93 * Lock Type Properties
94 * mutex blocking mutual exclusion lock, intended for
95 * SMP synchronization (vanishes on a uniprocessor);
96 * supports debugging, statistics, and pre-emption
97 * lock blocking synchronization permitting multiple
98 * simultaneous readers or a single writer; supports
99 * debugging and statistics but not pre-emption
100 *
101 * In general, mutex locks are preferred over all others, as the
102 * mutex supports pre-emption and relinquishes the processor
103 * upon contention.
104 *
105 */
106
9bccf70c
A
107#include <sys/appleapiopts.h>
108
109#ifdef __APPLE_API_PRIVATE
110
111#ifdef MACH_KERNEL_PRIVATE
112
1c79356b
A
113/*
114 * A simple mutex lock.
115 * Do not change the order of the fields in this structure without
116 * changing the machine-dependent assembler routines which depend
117 * on them.
118 */
9bccf70c 119
1c79356b
A
120#include <mach_ldebug.h>
121#include <kern/etap_options.h>
122#include <kern/etap_pool.h>
123
124typedef struct {
125 hw_lock_data_t interlock;
126 hw_lock_data_t locked;
9bccf70c
A
127 uint16_t waiters;
128 uint16_t promoted_pri;
1c79356b
A
129#if MACH_LDEBUG
130 int type;
131#define MUTEX_TAG 0x4d4d
132 vm_offset_t pc;
133 vm_offset_t thread;
134#endif /* MACH_LDEBUG */
135#if ETAP_LOCK_TRACE
136 union { /* Must be overlaid on the event_tablep */
137 struct event_table_chain event_table_chain;
138 struct {
139 event_table_t event_tablep; /* ptr to event table entry */
140 etap_time_t start_hold_time; /* Time of last acquistion */
141 } s;
142 } u;
143#endif /* ETAP_LOCK_TRACE */
144#if ETAP_LOCK_ACCUMULATE
145 cbuff_entry_t cbuff_entry; /* cumulative buffer entry */
146#endif /* ETAP_LOCK_ACCUMULATE */
147#if ETAP_LOCK_MONITOR
148 vm_offset_t start_pc; /* pc where lock operation began */
149 vm_offset_t end_pc; /* pc where lock operation ended */
150#endif /* ETAP_LOCK_MONITOR */
151} mutex_t;
152
153#define decl_mutex_data(class,name) class mutex_t name;
154#define mutex_addr(m) (&(m))
155
55e303ae
A
156extern void mutex_init(
157 mutex_t *mutex,
158 etap_event_t tag);
159
160extern void mutex_lock_wait(
161 mutex_t *mutex,
162 thread_t holder);
163
164extern int mutex_lock_acquire(
165 mutex_t *mutex);
166
167extern void mutex_unlock_wakeup(
168 mutex_t *mutex,
169 thread_t holder);
170
171extern boolean_t mutex_preblock(
172 mutex_t *mutex,
173 thread_t thread);
174
175extern boolean_t mutex_preblock_wait(
176 mutex_t *mutex,
177 thread_t thread,
178 thread_t holder);
179
180extern void interlock_unlock(
181 hw_lock_t lock);
1c79356b 182
9bccf70c 183#endif /* MACH_KERNEL_PRIVATE */
1c79356b 184
55e303ae 185extern void mutex_pause(void);
9bccf70c
A
186
187#endif /* __APPLE_API_PRIVATE */
0b4e3aa0 188
9bccf70c 189#if !defined(MACH_KERNEL_PRIVATE)
1c79356b 190
9bccf70c
A
191typedef struct __mutex__ mutex_t;
192
193#endif /* MACH_KERNEL_PRIVATE */
1c79356b 194
55e303ae
A
195extern mutex_t *mutex_alloc(
196 etap_event_t tag);
197
198extern void mutex_free(
199 mutex_t *mutex);
200
201extern void mutex_lock(
202 mutex_t *mutex);
203
204extern void mutex_unlock(
205 mutex_t *mutex);
206
207extern boolean_t mutex_try(
208 mutex_t *mutex);
1c79356b 209
9bccf70c
A
210#ifdef __APPLE_API_PRIVATE
211
212#ifdef MACH_KERNEL_PRIVATE
1c79356b
A
213
214/*
215 * The general lock structure. Provides for multiple readers,
216 * upgrading from read to write, and sleeping until the lock
217 * can be gained.
218 *
219 * On some architectures, assembly language code in the 'inline'
220 * program fiddles the lock structures. It must be changed in
221 * concert with the structure layout.
222 *
223 * Only the "interlock" field is used for hardware exclusion;
224 * other fields are modified with normal instructions after
225 * acquiring the interlock bit.
226 */
9bccf70c 227
1c79356b
A
228typedef struct {
229 decl_simple_lock_data(,interlock) /* "hardware" interlock field */
230 volatile unsigned int
231 read_count:16, /* No. of accepted readers */
232 want_upgrade:1, /* Read-to-write upgrade waiting */
233 want_write:1, /* Writer is waiting, or
234 locked for write */
235 waiting:1, /* Someone is sleeping on lock */
236 can_sleep:1; /* Can attempts to lock go to sleep? */
237#if ETAP_LOCK_TRACE
238 union { /* Must be overlaid on the event_tablep */
239 struct event_table_chain event_table_chain;
240 struct {
241 event_table_t event_tablep; /* ptr to event table entry */
242 start_data_node_t start_list; /* linked list of start times
243 and pcs */
244 } s;
245 } u;
246#endif /* ETAP_LOCK_TRACE */
247#if ETAP_LOCK_ACCUMULATE
248 cbuff_entry_t cbuff_write; /* write cumulative buffer entry */
249 cbuff_entry_t cbuff_read; /* read cumulative buffer entry */
250#endif /* ETAP_LOCK_ACCUMULATE */
251} lock_t;
252
253/* Sleep locks must work even if no multiprocessing */
254
255/*
256 * Complex lock operations
257 */
258
259#if ETAP
260/*
261 * Locks have a pointer into an event_table entry that names the
262 * corresponding lock event and controls whether it is being traced.
263 * Initially this pointer is into a read-only table event_table_init[].
264 * Once dynamic allocation becomes possible a modifiable copy of the table
265 * is allocated and pointers are set to within this copy. The pointers
266 * that were already in place at that point need to be switched to point
267 * into the copy. To do this we overlay the event_table_chain structure
268 * onto sufficiently-big elements of the various lock structures so we
269 * can sweep down this list switching the pointers. The assumption is
270 * that we will not want to enable tracing before this is done (which is
271 * after all during kernel bootstrap, before any user tasks are launched).
272 *
273 * This is admittedly rather ugly but so were the alternatives:
274 * - record the event_table pointers in a statically-allocated array
275 * (dynamic allocation not yet being available) -- but there were
276 * over 8000 of them;
277 * - add a new link field to each lock structure;
278 * - change pointers to array indices -- this adds quite a bit of
279 * arithmetic to every lock operation that might be traced.
280 */
281#define lock_event_table(lockp) ((lockp)->u.s.event_tablep)
282#define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time)
283#endif /* ETAP_LOCK_TRACE */
284
285extern void lock_init (lock_t*,
286 boolean_t,
287 etap_event_t,
288 etap_event_t);
289
9bccf70c
A
290#endif /* MACH_KERNEL_PRIVATE */
291
292extern unsigned int LockTimeOut; /* Standard lock timeout value */
293
294#endif /* __APPLE_API_PRIVATE */
295
296#if !defined(MACH_KERNEL_PRIVATE)
1c79356b
A
297
298typedef struct __lock__ lock_t;
299extern lock_t *lock_alloc(boolean_t, etap_event_t, etap_event_t);
300void lock_free(lock_t *);
301
9bccf70c 302#endif /* MACH_KERNEL_PRIVATE */
1c79356b
A
303
304extern void lock_write (lock_t*);
305extern void lock_read (lock_t*);
306extern void lock_done (lock_t*);
307extern void lock_write_to_read (lock_t*);
308
309#define lock_read_done(l) lock_done(l)
310#define lock_write_done(l) lock_done(l)
311
312extern boolean_t lock_read_to_write (lock_t*); /* vm_map is only user */
1c79356b
A
313
314#endif /* _KERN_LOCK_H_ */