]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
xnu-344.21.73.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (C) 1998 Apple Computer
27 * All Rights Reserved
28 */
29 /*
30 * @OSF_COPYRIGHT@
31 */
32 /*
33 * Mach Operating System
34 * Copyright (c) 1991,1990 Carnegie Mellon University
35 * All Rights Reserved.
36 *
37 * Permission to use, copy, modify and distribute this software and its
38 * documentation is hereby granted, provided that both the copyright
39 * notice and this permission notice appear in all copies of the
40 * software, derivative works or modified versions, and any portions
41 * thereof, and that both notices appear in supporting documentation.
42 *
43 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
44 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
45 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 *
47 * Carnegie Mellon requests users of this software to return to
48 *
49 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
50 * School of Computer Science
51 * Carnegie Mellon University
52 * Pittsburgh PA 15213-3890
53 *
54 * any improvements or extensions that they make and grant Carnegie Mellon
55 * the rights to redistribute these changes.
56 */
57
58 /*
59 */
60
61 /*
62 * Machine-dependent simple locks for the i386.
63 */
64
65 #ifndef _I386_LOCK_H_
66 #define _I386_LOCK_H_
67
68 #include <sys/appleapiopts.h>
69
70 #ifdef __APPLE_API_PRIVATE
71
72 #ifdef MACH_KERNEL_PRIVATE
73
74 #include <kern/macro_help.h>
75 #include <kern/assert.h>
76 #include <i386/hw_lock_types.h>
77
78 #include <mach_rt.h>
79 #include <mach_ldebug.h>
80 #include <cpus.h>
81
82
83 #if defined(__GNUC__)
84
85 /*
86 * General bit-lock routines.
87 */
88
89 #define bit_lock(bit,l) \
90 __asm__ volatile(" jmp 1f \n \
91 0: btl %0, %1 \n \
92 jb 0b \n \
93 1: lock \n \
94 btsl %0,%1 \n \
95 jb 0b" : \
96 : \
97 "r" (bit), "m" (*(volatile int *)(l)) : \
98 "memory");
99
100 #define bit_unlock(bit,l) \
101 __asm__ volatile(" lock \n \
102 btrl %0,%1" : \
103 : \
104 "r" (bit), "m" (*(volatile int *)(l)));
105
106 /*
107 * Set or clear individual bits in a long word.
108 * The locked access is needed only to lock access
109 * to the word, not to individual bits.
110 */
111
112 #define i_bit_set(bit,l) \
113 __asm__ volatile(" lock \n \
114 btsl %0,%1" : \
115 : \
116 "r" (bit), "m" (*(volatile int *)(l)));
117
118 #define i_bit_clear(bit,l) \
119 __asm__ volatile(" lock \n \
120 btrl %0,%1" : \
121 : \
122 "r" (bit), "m" (*(volatile int *)(l)));
123
124 extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word)
125 {
126 int bit;
127
128 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
129 : "m" (word), "ir" (testbit));
130 return bit;
131 }
132
133 extern __inline__ char xchgb(volatile char * cp, char new);
134
135 extern __inline__ void atomic_incl(long * p, long delta);
136 extern __inline__ void atomic_incs(short * p, short delta);
137 extern __inline__ void atomic_incb(char * p, char delta);
138
139 extern __inline__ void atomic_decl(long * p, long delta);
140 extern __inline__ void atomic_decs(short * p, short delta);
141 extern __inline__ void atomic_decb(char * p, char delta);
142
143 extern __inline__ long atomic_getl(long * p);
144 extern __inline__ short atomic_gets(short * p);
145 extern __inline__ char atomic_getb(char * p);
146
147 extern __inline__ void atomic_setl(long * p, long value);
148 extern __inline__ void atomic_sets(short * p, short value);
149 extern __inline__ void atomic_setb(char * p, char value);
150
151 extern __inline__ char xchgb(volatile char * cp, char new)
152 {
153 register char old = new;
154
155 __asm__ volatile (" xchgb %0,%2" :
156 "=q" (old) :
157 "0" (new), "m" (*(volatile char *)cp) : "memory");
158 return (old);
159 }
160
161 extern __inline__ void atomic_incl(long * p, long delta)
162 {
163 #if NEED_ATOMIC
164 __asm__ volatile (" lock \n \
165 addl %0,%1" : \
166 : \
167 "r" (delta), "m" (*(volatile long *)p));
168 #else /* NEED_ATOMIC */
169 *p += delta;
170 #endif /* NEED_ATOMIC */
171 }
172
173 extern __inline__ void atomic_incs(short * p, short delta)
174 {
175 #if NEED_ATOMIC
176 __asm__ volatile (" lock \n \
177 addw %0,%1" : \
178 : \
179 "q" (delta), "m" (*(volatile short *)p));
180 #else /* NEED_ATOMIC */
181 *p += delta;
182 #endif /* NEED_ATOMIC */
183 }
184
185 extern __inline__ void atomic_incb(char * p, char delta)
186 {
187 #if NEED_ATOMIC
188 __asm__ volatile (" lock \n \
189 addb %0,%1" : \
190 : \
191 "q" (delta), "m" (*(volatile char *)p));
192 #else /* NEED_ATOMIC */
193 *p += delta;
194 #endif /* NEED_ATOMIC */
195 }
196
197 extern __inline__ void atomic_decl(long * p, long delta)
198 {
199 #if NCPUS > 1
200 __asm__ volatile (" lock \n \
201 subl %0,%1" : \
202 : \
203 "r" (delta), "m" (*(volatile long *)p));
204 #else /* NCPUS > 1 */
205 *p -= delta;
206 #endif /* NCPUS > 1 */
207 }
208
209 extern __inline__ void atomic_decs(short * p, short delta)
210 {
211 #if NEED_ATOMIC
212 __asm__ volatile (" lock \n \
213 subw %0,%1" : \
214 : \
215 "q" (delta), "m" (*(volatile short *)p));
216 #else /* NEED_ATOMIC */
217 *p -= delta;
218 #endif /* NEED_ATOMIC */
219 }
220
221 extern __inline__ void atomic_decb(char * p, char delta)
222 {
223 #if NEED_ATOMIC
224 __asm__ volatile (" lock \n \
225 subb %0,%1" : \
226 : \
227 "q" (delta), "m" (*(volatile char *)p));
228 #else /* NEED_ATOMIC */
229 *p -= delta;
230 #endif /* NEED_ATOMIC */
231 }
232
233 extern __inline__ long atomic_getl(long * p)
234 {
235 return (*p);
236 }
237
238 extern __inline__ short atomic_gets(short * p)
239 {
240 return (*p);
241 }
242
243 extern __inline__ char atomic_getb(char * p)
244 {
245 return (*p);
246 }
247
248 extern __inline__ void atomic_setl(long * p, long value)
249 {
250 *p = value;
251 }
252
253 extern __inline__ void atomic_sets(short * p, short value)
254 {
255 *p = value;
256 }
257
258 extern __inline__ void atomic_setb(char * p, char value)
259 {
260 *p = value;
261 }
262
263
264 #else /* !defined(__GNUC__) */
265
266 extern void i_bit_set(
267 int index,
268 void *addr);
269
270 extern void i_bit_clear(
271 int index,
272 void *addr);
273
274 extern void bit_lock(
275 int index,
276 void *addr);
277
278 extern void bit_unlock(
279 int index,
280 void *addr);
281
282 /*
283 * All other routines defined in __GNUC__ case lack
284 * definitions otherwise. - XXX
285 */
286
287 #endif /* !defined(__GNUC__) */
288
289
290 #if !(USLOCK_DEBUG || USLOCK_STATS)
291 /*
292 * Take responsibility for production-quality usimple_locks.
293 * Let the portable lock package build simple_locks in terms
294 * of usimple_locks, which is done efficiently with macros.
295 * Currently, these aren't inlined although they probably
296 * should be. The portable lock package is used for the
297 * usimple_lock prototypes and data declarations.
298 *
299 * For non-production configurations, punt entirely to the
300 * portable lock package.
301 *
302 * N.B. I've left in the hooks for ETAP, so we can
303 * compare the performance of stats-gathering on top
304 * of "production" locks v. stats-gathering on top
305 * of portable, C-based locks.
306 */
307 #define USIMPLE_LOCK_CALLS
308 #endif /* !(USLOCK_DEBUG || USLOCK_STATS) */
309
310 extern void kernel_preempt_check (void);
311
312 #endif /* MACH_KERNEL_PRIVATE */
313
314 #endif /* __APLE_API_PRIVATE */
315
316 #endif /* _I386_LOCK_H_ */