2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
52 * Author: Avadis Tevanian, Jr., Michael Wayne Young
55 * Higher Level Locking primitives definitions
62 * Configuration variables:
65 * MACH_LDEBUG: record pc and thread of callers, turn on
69 * ETAP: The Event Trace Analysis Package (ETAP) monitors
70 * and records micro-kernel lock behavior and general
71 * kernel events. ETAP supports two levels of
73 * - cumulative (ETAP_LOCK_ACCUMULATE)
74 * - monitored (ETAP_LOCK_MONITOR)
76 * Note: If either level of tracing is configured then
77 * ETAP_LOCK_TRACE is automatically defined to
80 * Several macros are added throughout the lock code to
81 * allow for convenient configuration.
84 #include <kern/simple_lock.h>
85 #include <machine/lock.h>
86 #include <mach/etap_events.h>
87 #include <mach/etap.h>
90 * The Mach lock package exports the following high-level
93 * Lock Type Properties
94 * mutex blocking mutual exclusion lock, intended for
95 * SMP synchronization (vanishes on a uniprocessor);
96 * supports debugging, statistics, and pre-emption
97 * lock blocking synchronization permitting multiple
98 * simultaneous readers or a single writer; supports
99 * debugging and statistics but not pre-emption
101 * In general, mutex locks are preferred over all others, as the
102 * mutex supports pre-emption and relinquishes the processor
107 #include <sys/appleapiopts.h>
109 #ifdef __APPLE_API_PRIVATE
111 #ifdef MACH_KERNEL_PRIVATE
114 * A simple mutex lock.
115 * Do not change the order of the fields in this structure without
116 * changing the machine-dependent assembler routines which depend
120 #include <mach_ldebug.h>
121 #include <kern/etap_options.h>
122 #include <kern/etap_pool.h>
125 hw_lock_data_t interlock
;
126 hw_lock_data_t locked
;
128 uint16_t promoted_pri
;
131 #define MUTEX_TAG 0x4d4d
134 #endif /* MACH_LDEBUG */
136 union { /* Must be overlaid on the event_tablep */
137 struct event_table_chain event_table_chain
;
139 event_table_t event_tablep
; /* ptr to event table entry */
140 etap_time_t start_hold_time
; /* Time of last acquistion */
143 #endif /* ETAP_LOCK_TRACE */
144 #if ETAP_LOCK_ACCUMULATE
145 cbuff_entry_t cbuff_entry
; /* cumulative buffer entry */
146 #endif /* ETAP_LOCK_ACCUMULATE */
147 #if ETAP_LOCK_MONITOR
148 vm_offset_t start_pc
; /* pc where lock operation began */
149 vm_offset_t end_pc
; /* pc where lock operation ended */
150 #endif /* ETAP_LOCK_MONITOR */
153 #define decl_mutex_data(class,name) class mutex_t name;
154 #define mutex_addr(m) (&(m))
156 extern void mutex_init (mutex_t
*, etap_event_t
);
157 extern void mutex_lock_wait (mutex_t
*, thread_act_t
);
158 extern int mutex_lock_acquire (mutex_t
*);
159 extern void mutex_unlock_wakeup (mutex_t
*, thread_act_t
);
160 extern void interlock_unlock (hw_lock_t
);
162 #endif /* MACH_KERNEL_PRIVATE */
164 extern void mutex_pause (void);
166 #endif /* __APPLE_API_PRIVATE */
168 #if !defined(MACH_KERNEL_PRIVATE)
170 typedef struct __mutex__ mutex_t
;
172 #endif /* MACH_KERNEL_PRIVATE */
174 extern mutex_t
*mutex_alloc (etap_event_t
);
175 extern void mutex_free (mutex_t
*);
176 extern void mutex_lock (mutex_t
*);
177 extern void mutex_unlock (mutex_t
*);
178 extern boolean_t
mutex_try (mutex_t
*);
180 #ifdef __APPLE_API_PRIVATE
182 #ifdef MACH_KERNEL_PRIVATE
185 * The general lock structure. Provides for multiple readers,
186 * upgrading from read to write, and sleeping until the lock
189 * On some architectures, assembly language code in the 'inline'
190 * program fiddles the lock structures. It must be changed in
191 * concert with the structure layout.
193 * Only the "interlock" field is used for hardware exclusion;
194 * other fields are modified with normal instructions after
195 * acquiring the interlock bit.
199 decl_simple_lock_data(,interlock
) /* "hardware" interlock field */
200 volatile unsigned int
201 read_count
:16, /* No. of accepted readers */
202 want_upgrade
:1, /* Read-to-write upgrade waiting */
203 want_write
:1, /* Writer is waiting, or
205 waiting
:1, /* Someone is sleeping on lock */
206 can_sleep
:1; /* Can attempts to lock go to sleep? */
208 union { /* Must be overlaid on the event_tablep */
209 struct event_table_chain event_table_chain
;
211 event_table_t event_tablep
; /* ptr to event table entry */
212 start_data_node_t start_list
; /* linked list of start times
216 #endif /* ETAP_LOCK_TRACE */
217 #if ETAP_LOCK_ACCUMULATE
218 cbuff_entry_t cbuff_write
; /* write cumulative buffer entry */
219 cbuff_entry_t cbuff_read
; /* read cumulative buffer entry */
220 #endif /* ETAP_LOCK_ACCUMULATE */
223 /* Sleep locks must work even if no multiprocessing */
226 * Complex lock operations
231 * Locks have a pointer into an event_table entry that names the
232 * corresponding lock event and controls whether it is being traced.
233 * Initially this pointer is into a read-only table event_table_init[].
234 * Once dynamic allocation becomes possible a modifiable copy of the table
235 * is allocated and pointers are set to within this copy. The pointers
236 * that were already in place at that point need to be switched to point
237 * into the copy. To do this we overlay the event_table_chain structure
238 * onto sufficiently-big elements of the various lock structures so we
239 * can sweep down this list switching the pointers. The assumption is
240 * that we will not want to enable tracing before this is done (which is
241 * after all during kernel bootstrap, before any user tasks are launched).
243 * This is admittedly rather ugly but so were the alternatives:
244 * - record the event_table pointers in a statically-allocated array
245 * (dynamic allocation not yet being available) -- but there were
247 * - add a new link field to each lock structure;
248 * - change pointers to array indices -- this adds quite a bit of
249 * arithmetic to every lock operation that might be traced.
251 #define lock_event_table(lockp) ((lockp)->u.s.event_tablep)
252 #define lock_start_hold_time(lockp) ((lockp)->u.s.start_hold_time)
253 #endif /* ETAP_LOCK_TRACE */
255 extern void lock_init (lock_t
*,
260 #endif /* MACH_KERNEL_PRIVATE */
262 extern unsigned int LockTimeOut
; /* Standard lock timeout value */
264 #endif /* __APPLE_API_PRIVATE */
266 #if !defined(MACH_KERNEL_PRIVATE)
268 typedef struct __lock__ lock_t
;
269 extern lock_t
*lock_alloc(boolean_t
, etap_event_t
, etap_event_t
);
270 void lock_free(lock_t
*);
272 #endif /* MACH_KERNEL_PRIVATE */
274 extern void lock_write (lock_t
*);
275 extern void lock_read (lock_t
*);
276 extern void lock_done (lock_t
*);
277 extern void lock_write_to_read (lock_t
*);
279 #define lock_read_done(l) lock_done(l)
280 #define lock_write_done(l) lock_done(l)
282 extern boolean_t
lock_read_to_write (lock_t
*); /* vm_map is only user */
284 #endif /* _KERN_LOCK_H_ */