]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/lock.h
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26/*
27 * @OSF_COPYRIGHT@
28 */
29/*
30 * Mach Operating System
31 * Copyright (c) 1991,1990 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54
55/*
56 */
57
58/*
59 * Machine-dependent simple locks for the i386.
60 */
61
62#ifndef _I386_LOCK_H_
63#define _I386_LOCK_H_
64
65#include <kern/macro_help.h>
66#include <kern/assert.h>
67#include <i386/hw_lock_types.h>
68
69#ifdef MACH_KERNEL_PRIVATE
70
71#include <mach_rt.h>
72#include <mach_ldebug.h>
73#include <cpus.h>
74
75
76#if defined(__GNUC__)
77
78/*
79 * General bit-lock routines.
80 */
81
82#define bit_lock(bit,l) \
83 __asm__ volatile(" jmp 1f \n \
84 0: btl %0, %1 \n \
85 jb 0b \n \
86 1: lock \n \
87 btsl %0,%1 \n \
88 jb 0b" : \
89 : \
90 "r" (bit), "m" (*(volatile int *)(l)) : \
91 "memory");
92
93#define bit_unlock(bit,l) \
94 __asm__ volatile(" lock \n \
95 btrl %0,%1" : \
96 : \
97 "r" (bit), "m" (*(volatile int *)(l)));
98
99/*
100 * Set or clear individual bits in a long word.
101 * The locked access is needed only to lock access
102 * to the word, not to individual bits.
103 */
104
105#define i_bit_set(bit,l) \
106 __asm__ volatile(" lock \n \
107 btsl %0,%1" : \
108 : \
109 "r" (bit), "m" (*(volatile int *)(l)));
110
111#define i_bit_clear(bit,l) \
112 __asm__ volatile(" lock \n \
113 btrl %0,%1" : \
114 : \
115 "r" (bit), "m" (*(volatile int *)(l)));
116
117extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word)
118{
119 int bit;
120
121 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
122 : "m" (word), "ir" (testbit));
123 return bit;
124}
125
126extern __inline__ char xchgb(volatile char * cp, char new);
127
128extern __inline__ void atomic_incl(long * p, long delta);
129extern __inline__ void atomic_incs(short * p, short delta);
130extern __inline__ void atomic_incb(char * p, char delta);
131
132extern __inline__ void atomic_decl(long * p, long delta);
133extern __inline__ void atomic_decs(short * p, short delta);
134extern __inline__ void atomic_decb(char * p, char delta);
135
136extern __inline__ long atomic_getl(long * p);
137extern __inline__ short atomic_gets(short * p);
138extern __inline__ char atomic_getb(char * p);
139
140extern __inline__ void atomic_setl(long * p, long value);
141extern __inline__ void atomic_sets(short * p, short value);
142extern __inline__ void atomic_setb(char * p, char value);
143
144extern __inline__ char xchgb(volatile char * cp, char new)
145{
146 register char old = new;
147
148 __asm__ volatile (" xchgb %0,%2" :
149 "=q" (old) :
150 "0" (new), "m" (*(volatile char *)cp) : "memory");
151 return (old);
152}
153
154extern __inline__ void atomic_incl(long * p, long delta)
155{
156#if NEED_ATOMIC
157 __asm__ volatile (" lock \n \
158 addl %0,%1" : \
159 : \
160 "r" (delta), "m" (*(volatile long *)p));
161#else /* NEED_ATOMIC */
162 *p += delta;
163#endif /* NEED_ATOMIC */
164}
165
166extern __inline__ void atomic_incs(short * p, short delta)
167{
168#if NEED_ATOMIC
169 __asm__ volatile (" lock \n \
170 addw %0,%1" : \
171 : \
172 "q" (delta), "m" (*(volatile short *)p));
173#else /* NEED_ATOMIC */
174 *p += delta;
175#endif /* NEED_ATOMIC */
176}
177
178extern __inline__ void atomic_incb(char * p, char delta)
179{
180#if NEED_ATOMIC
181 __asm__ volatile (" lock \n \
182 addb %0,%1" : \
183 : \
184 "q" (delta), "m" (*(volatile char *)p));
185#else /* NEED_ATOMIC */
186 *p += delta;
187#endif /* NEED_ATOMIC */
188}
189
190extern __inline__ void atomic_decl(long * p, long delta)
191{
192#if NCPUS > 1
193 __asm__ volatile (" lock \n \
194 subl %0,%1" : \
195 : \
196 "r" (delta), "m" (*(volatile long *)p));
197#else /* NCPUS > 1 */
198 *p -= delta;
199#endif /* NCPUS > 1 */
200}
201
202extern __inline__ void atomic_decs(short * p, short delta)
203{
204#if NEED_ATOMIC
205 __asm__ volatile (" lock \n \
206 subw %0,%1" : \
207 : \
208 "q" (delta), "m" (*(volatile short *)p));
209#else /* NEED_ATOMIC */
210 *p -= delta;
211#endif /* NEED_ATOMIC */
212}
213
214extern __inline__ void atomic_decb(char * p, char delta)
215{
216#if NEED_ATOMIC
217 __asm__ volatile (" lock \n \
218 subb %0,%1" : \
219 : \
220 "q" (delta), "m" (*(volatile char *)p));
221#else /* NEED_ATOMIC */
222 *p -= delta;
223#endif /* NEED_ATOMIC */
224}
225
226extern __inline__ long atomic_getl(long * p)
227{
228 return (*p);
229}
230
231extern __inline__ short atomic_gets(short * p)
232{
233 return (*p);
234}
235
236extern __inline__ char atomic_getb(char * p)
237{
238 return (*p);
239}
240
241extern __inline__ void atomic_setl(long * p, long value)
242{
243 *p = value;
244}
245
246extern __inline__ void atomic_sets(short * p, short value)
247{
248 *p = value;
249}
250
251extern __inline__ void atomic_setb(char * p, char value)
252{
253 *p = value;
254}
255
256
257#else /* !defined(__GNUC__) */
258
259extern void i_bit_set(
260 int index,
261 void *addr);
262
263extern void i_bit_clear(
264 int index,
265 void *addr);
266
267extern void bit_lock(
268 int index,
269 void *addr);
270
271extern void bit_unlock(
272 int index,
273 void *addr);
274
275/*
276 * All other routines defined in __GNUC__ case lack
277 * definitions otherwise. - XXX
278 */
279
280#endif /* !defined(__GNUC__) */
281
282
283#if !(USLOCK_DEBUG || USLOCK_STATS)
284/*
285 * Take responsibility for production-quality usimple_locks.
286 * Let the portable lock package build simple_locks in terms
287 * of usimple_locks, which is done efficiently with macros.
288 * Currently, these aren't inlined although they probably
289 * should be. The portable lock package is used for the
290 * usimple_lock prototypes and data declarations.
291 *
292 * For non-production configurations, punt entirely to the
293 * portable lock package.
294 *
295 * N.B. I've left in the hooks for ETAP, so we can
296 * compare the performance of stats-gathering on top
297 * of "production" locks v. stats-gathering on top
298 * of portable, C-based locks.
299 */
300#define USIMPLE_LOCK_CALLS
301#endif /* !(USLOCK_DEBUG || USLOCK_STATS) */
302
303
304#if MACH_RT || (NCPUS > 1) || MACH_LDEBUG
305#if MACH_LDEBUG || !MACH_RT
306#define mutex_try(m) (!(m)->interlock && _mutex_try(m))
307#define mutex_lock(m) \
308MACRO_BEGIN \
309 assert(assert_wait_possible()); \
310 _mutex_lock((m)); \
311MACRO_END
312
313#else /* MACH_LDEBUG || !MACH_RT */
314#define mutex_try(m) (!(m)->interlock && \
315 !xchgb ((volatile char *)&((m)->locked), 1))
316#define mutex_lock(m) \
317MACRO_BEGIN \
318 assert(assert_wait_possible()); \
319 _mutex_lock (m); \
320MACRO_END
321
322#endif /* MACH_LDEBUG || !MACH_RT */
323#else /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */
324#define mutex_try _mutex_try
325#define mutex_lock _mutex_lock
326#endif /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */
327
328#else /* !MACH_KERNEL_PRIVATE */
329
330#define mutex_try _mutex_try
331#define mutex_lock(m) \
332MACRO_BEGIN \
333 assert(assert_wait_possible()); \
334 _mutex_lock((m)); \
335MACRO_END
336
337#endif /* !MACH_KERNEL_PRIVATE */
338
339extern void kernel_preempt_check (void);
340
341#endif /* _I386_LOCK_H_ */
342