]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/lock.h
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / kern / lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26/*
27 * @OSF_COPYRIGHT@
28 */
29/*
30 * Mach Operating System
31 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54/*
55 * File: kern/lock.h
56 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * Date: 1985
58 *
59 * Higher Level Locking primitives definitions
60 */
61
62#ifndef _KERN_LOCK_H_
63#define _KERN_LOCK_H_
64
65/*
66 * Configuration variables:
67 *
68 *
69 * MACH_LDEBUG: record pc and thread of callers, turn on
70 * all lock debugging.
71 *
72 *
73 * ETAP: The Event Trace Analysis Package (ETAP) monitors
74 * and records micro-kernel lock behavior and general
75 * kernel events. ETAP supports two levels of
76 * tracing for locks:
77 * - cumulative (ETAP_LOCK_ACCUMULATE)
78 * - monitored (ETAP_LOCK_MONITOR)
79 *
80 * Note: If either level of tracing is configured then
81 * ETAP_LOCK_TRACE is automatically defined to
82 * equal one.
83 *
84 * Several macros are added throughout the lock code to
85 * allow for convenient configuration.
86 */
87
88#include <kern/simple_lock.h>
89#include <machine/lock.h>
90#include <mach/etap_events.h>
91#include <mach/etap.h>
92
93/*
94 * The Mach lock package exports the following high-level
95 * lock abstractions:
96 *
97 * Lock Type Properties
98 * mutex blocking mutual exclusion lock, intended for
99 * SMP synchronization (vanishes on a uniprocessor);
100 * supports debugging, statistics, and pre-emption
101 * lock blocking synchronization permitting multiple
102 * simultaneous readers or a single writer; supports
103 * debugging and statistics but not pre-emption
104 *
105 * In general, mutex locks are preferred over all others, as the
106 * mutex supports pre-emption and relinquishes the processor
107 * upon contention.
108 *
109 */
110
111/*
112 * A simple mutex lock.
113 * Do not change the order of the fields in this structure without
114 * changing the machine-dependent assembler routines which depend
115 * on them.
116 */
117#ifdef MACH_KERNEL_PRIVATE
118#include <mach_ldebug.h>
119#include <kern/etap_options.h>
120#include <kern/etap_pool.h>
121
122typedef struct {
123 hw_lock_data_t interlock;
124 hw_lock_data_t locked;
125 short waiters;
126#if MACH_LDEBUG
127 int type;
128#define MUTEX_TAG 0x4d4d
129 vm_offset_t pc;
130 vm_offset_t thread;
131#endif /* MACH_LDEBUG */
132#if ETAP_LOCK_TRACE
133 union { /* Must be overlaid on the event_tablep */
134 struct event_table_chain event_table_chain;
135 struct {
136 event_table_t event_tablep; /* ptr to event table entry */
137 etap_time_t start_hold_time; /* Time of last acquistion */
138 } s;
139 } u;
140#endif /* ETAP_LOCK_TRACE */
141#if ETAP_LOCK_ACCUMULATE
142 cbuff_entry_t cbuff_entry; /* cumulative buffer entry */
143#endif /* ETAP_LOCK_ACCUMULATE */
144#if ETAP_LOCK_MONITOR
145 vm_offset_t start_pc; /* pc where lock operation began */
146 vm_offset_t end_pc; /* pc where lock operation ended */
147#endif /* ETAP_LOCK_MONITOR */
148} mutex_t;
149
150#define decl_mutex_data(class,name) class mutex_t name;
151#define mutex_addr(m) (&(m))
152
153#if MACH_LDEBUG
154#define mutex_held(m) (hw_lock_held(&((m)->locked)) && \
155 ((m)->thread == (int)current_thread()))
156#else /* MACH_LDEBUG */
157#define mutex_held(m) hw_lock_held(&((m)->locked))
158#endif /* MACH_LDEBUG */
159
160#else /* MACH_KERNEL_PRIVATE */
161
162typedef struct __mutex__ mutex_t;
163extern boolean_t mutex_held(mutex_t*);
164
165#endif /* !MACH_KERNEL_PRIVATE */
166
167extern mutex_t *mutex_alloc (etap_event_t);
168extern void mutex_free (mutex_t*);
169
170extern void mutex_init (mutex_t*, etap_event_t);
171extern void _mutex_lock (mutex_t*);
172extern void mutex_unlock (mutex_t*);
173extern boolean_t _mutex_try (mutex_t*);
174
175extern void mutex_lock_wait (mutex_t*);
176extern void mutex_unlock_wakeup (mutex_t*);
177extern void mutex_pause (void);
178extern void interlock_unlock (hw_lock_t);
179
180/*
181 * The general lock structure. Provides for multiple readers,
182 * upgrading from read to write, and sleeping until the lock
183 * can be gained.
184 *
185 * On some architectures, assembly language code in the 'inline'
186 * program fiddles the lock structures. It must be changed in
187 * concert with the structure layout.
188 *
189 * Only the "interlock" field is used for hardware exclusion;
190 * other fields are modified with normal instructions after
191 * acquiring the interlock bit.
192 */
193#ifdef MACH_KERNEL_PRIVATE
194typedef struct {
195 decl_simple_lock_data(,interlock) /* "hardware" interlock field */
196 volatile unsigned int
197 read_count:16, /* No. of accepted readers */
198 want_upgrade:1, /* Read-to-write upgrade waiting */
199 want_write:1, /* Writer is waiting, or
200 locked for write */
201 waiting:1, /* Someone is sleeping on lock */
202 can_sleep:1; /* Can attempts to lock go to sleep? */
203#if ETAP_LOCK_TRACE
204 union { /* Must be overlaid on the event_tablep */
205 struct event_table_chain event_table_chain;
206 struct {
207 event_table_t event_tablep; /* ptr to event table entry */
208 start_data_node_t start_list; /* linked list of start times
209 and pcs */
210 } s;
211 } u;
212#endif /* ETAP_LOCK_TRACE */
213#if ETAP_LOCK_ACCUMULATE
214 cbuff_entry_t cbuff_write; /* write cumulative buffer entry */
215 cbuff_entry_t cbuff_read; /* read cumulative buffer entry */
216#endif /* ETAP_LOCK_ACCUMULATE */
217} lock_t;
218
219/* Sleep locks must work even if no multiprocessing */
220
221/*
222 * Complex lock operations
223 */
224
225#if ETAP
226/*
227 * Locks have a pointer into an event_table entry that names the
228 * corresponding lock event and controls whether it is being traced.
229 * Initially this pointer is into a read-only table event_table_init[].
230 * Once dynamic allocation becomes possible a modifiable copy of the table
231 * is allocated and pointers are set to within this copy. The pointers
232 * that were already in place at that point need to be switched to point
233 * into the copy. To do this we overlay the event_table_chain structure
234 * onto sufficiently-big elements of the various lock structures so we
235 * can sweep down this list switching the pointers. The assumption is
236 * that we will not want to enable tracing before this is done (which is
237 * after all during kernel bootstrap, before any user tasks are launched).
238 *
239 * This is admittedly rather ugly but so were the alternatives:
240 * - record the event_table pointers in a statically-allocated array
241 * (dynamic allocation not yet being available) -- but there were
242 * over 8000 of them;
243 * - add a new link field to each lock structure;
244 * - change pointers to array indices -- this adds quite a bit of
245 * arithmetic to every lock operation that might be traced.
246 */
247#define lock_event_table(lockp) ((lockp)->u.s.event_tablep)
248#define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time)
249#endif /* ETAP_LOCK_TRACE */
250
251extern void lock_init (lock_t*,
252 boolean_t,
253 etap_event_t,
254 etap_event_t);
255
256#else /* MACH_KERNEL_PRIVATE */
257
258typedef struct __lock__ lock_t;
259extern lock_t *lock_alloc(boolean_t, etap_event_t, etap_event_t);
260void lock_free(lock_t *);
261
262#endif /* !MACH_KERNEL_PRIVATE */
263
264extern void lock_write (lock_t*);
265extern void lock_read (lock_t*);
266extern void lock_done (lock_t*);
267extern void lock_write_to_read (lock_t*);
268
269#define lock_read_done(l) lock_done(l)
270#define lock_write_done(l) lock_done(l)
271
272extern boolean_t lock_read_to_write (lock_t*); /* vm_map is only user */
273extern unsigned int LockTimeOut; /* Standard lock timeout value */
274
275#endif /* _KERN_LOCK_H_ */