]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26 /*
27 * @OSF_COPYRIGHT@
28 */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1991,1990 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54
55 /*
56 */
57
58 /*
59 * Machine-dependent simple locks for the i386.
60 */
61
62 #ifndef _I386_LOCK_H_
63 #define _I386_LOCK_H_
64
65 #include <sys/appleapiopts.h>
66
67 #ifdef __APPLE_API_PRIVATE
68
69 #ifdef MACH_KERNEL_PRIVATE
70
71 #include <kern/macro_help.h>
72 #include <kern/assert.h>
73 #include <i386/hw_lock_types.h>
74
75 #include <mach_rt.h>
76 #include <mach_ldebug.h>
77 #include <cpus.h>
78
79
80 #if defined(__GNUC__)
81
82 /*
83 * General bit-lock routines.
84 */
85
86 #define bit_lock(bit,l) \
87 __asm__ volatile(" jmp 1f \n \
88 0: btl %0, %1 \n \
89 jb 0b \n \
90 1: lock \n \
91 btsl %0,%1 \n \
92 jb 0b" : \
93 : \
94 "r" (bit), "m" (*(volatile int *)(l)) : \
95 "memory");
96
97 #define bit_unlock(bit,l) \
98 __asm__ volatile(" lock \n \
99 btrl %0,%1" : \
100 : \
101 "r" (bit), "m" (*(volatile int *)(l)));
102
103 /*
104 * Set or clear individual bits in a long word.
105 * The locked access is needed only to lock access
106 * to the word, not to individual bits.
107 */
108
109 #define i_bit_set(bit,l) \
110 __asm__ volatile(" lock \n \
111 btsl %0,%1" : \
112 : \
113 "r" (bit), "m" (*(volatile int *)(l)));
114
115 #define i_bit_clear(bit,l) \
116 __asm__ volatile(" lock \n \
117 btrl %0,%1" : \
118 : \
119 "r" (bit), "m" (*(volatile int *)(l)));
120
121 extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word)
122 {
123 int bit;
124
125 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
126 : "m" (word), "ir" (testbit));
127 return bit;
128 }
129
130 extern __inline__ char xchgb(volatile char * cp, char new);
131
132 extern __inline__ void atomic_incl(long * p, long delta);
133 extern __inline__ void atomic_incs(short * p, short delta);
134 extern __inline__ void atomic_incb(char * p, char delta);
135
136 extern __inline__ void atomic_decl(long * p, long delta);
137 extern __inline__ void atomic_decs(short * p, short delta);
138 extern __inline__ void atomic_decb(char * p, char delta);
139
140 extern __inline__ long atomic_getl(long * p);
141 extern __inline__ short atomic_gets(short * p);
142 extern __inline__ char atomic_getb(char * p);
143
144 extern __inline__ void atomic_setl(long * p, long value);
145 extern __inline__ void atomic_sets(short * p, short value);
146 extern __inline__ void atomic_setb(char * p, char value);
147
148 extern __inline__ char xchgb(volatile char * cp, char new)
149 {
150 register char old = new;
151
152 __asm__ volatile (" xchgb %0,%2" :
153 "=q" (old) :
154 "0" (new), "m" (*(volatile char *)cp) : "memory");
155 return (old);
156 }
157
158 extern __inline__ void atomic_incl(long * p, long delta)
159 {
160 #if NEED_ATOMIC
161 __asm__ volatile (" lock \n \
162 addl %0,%1" : \
163 : \
164 "r" (delta), "m" (*(volatile long *)p));
165 #else /* NEED_ATOMIC */
166 *p += delta;
167 #endif /* NEED_ATOMIC */
168 }
169
170 extern __inline__ void atomic_incs(short * p, short delta)
171 {
172 #if NEED_ATOMIC
173 __asm__ volatile (" lock \n \
174 addw %0,%1" : \
175 : \
176 "q" (delta), "m" (*(volatile short *)p));
177 #else /* NEED_ATOMIC */
178 *p += delta;
179 #endif /* NEED_ATOMIC */
180 }
181
182 extern __inline__ void atomic_incb(char * p, char delta)
183 {
184 #if NEED_ATOMIC
185 __asm__ volatile (" lock \n \
186 addb %0,%1" : \
187 : \
188 "q" (delta), "m" (*(volatile char *)p));
189 #else /* NEED_ATOMIC */
190 *p += delta;
191 #endif /* NEED_ATOMIC */
192 }
193
194 extern __inline__ void atomic_decl(long * p, long delta)
195 {
196 #if NCPUS > 1
197 __asm__ volatile (" lock \n \
198 subl %0,%1" : \
199 : \
200 "r" (delta), "m" (*(volatile long *)p));
201 #else /* NCPUS > 1 */
202 *p -= delta;
203 #endif /* NCPUS > 1 */
204 }
205
206 extern __inline__ void atomic_decs(short * p, short delta)
207 {
208 #if NEED_ATOMIC
209 __asm__ volatile (" lock \n \
210 subw %0,%1" : \
211 : \
212 "q" (delta), "m" (*(volatile short *)p));
213 #else /* NEED_ATOMIC */
214 *p -= delta;
215 #endif /* NEED_ATOMIC */
216 }
217
218 extern __inline__ void atomic_decb(char * p, char delta)
219 {
220 #if NEED_ATOMIC
221 __asm__ volatile (" lock \n \
222 subb %0,%1" : \
223 : \
224 "q" (delta), "m" (*(volatile char *)p));
225 #else /* NEED_ATOMIC */
226 *p -= delta;
227 #endif /* NEED_ATOMIC */
228 }
229
230 extern __inline__ long atomic_getl(long * p)
231 {
232 return (*p);
233 }
234
235 extern __inline__ short atomic_gets(short * p)
236 {
237 return (*p);
238 }
239
240 extern __inline__ char atomic_getb(char * p)
241 {
242 return (*p);
243 }
244
245 extern __inline__ void atomic_setl(long * p, long value)
246 {
247 *p = value;
248 }
249
250 extern __inline__ void atomic_sets(short * p, short value)
251 {
252 *p = value;
253 }
254
255 extern __inline__ void atomic_setb(char * p, char value)
256 {
257 *p = value;
258 }
259
260
261 #else /* !defined(__GNUC__) */
262
263 extern void i_bit_set(
264 int index,
265 void *addr);
266
267 extern void i_bit_clear(
268 int index,
269 void *addr);
270
271 extern void bit_lock(
272 int index,
273 void *addr);
274
275 extern void bit_unlock(
276 int index,
277 void *addr);
278
279 /*
280 * All other routines defined in __GNUC__ case lack
281 * definitions otherwise. - XXX
282 */
283
284 #endif /* !defined(__GNUC__) */
285
286
287 #if !(USLOCK_DEBUG || USLOCK_STATS)
288 /*
289 * Take responsibility for production-quality usimple_locks.
290 * Let the portable lock package build simple_locks in terms
291 * of usimple_locks, which is done efficiently with macros.
292 * Currently, these aren't inlined although they probably
293 * should be. The portable lock package is used for the
294 * usimple_lock prototypes and data declarations.
295 *
296 * For non-production configurations, punt entirely to the
297 * portable lock package.
298 *
299 * N.B. I've left in the hooks for ETAP, so we can
300 * compare the performance of stats-gathering on top
301 * of "production" locks v. stats-gathering on top
302 * of portable, C-based locks.
303 */
304 #define USIMPLE_LOCK_CALLS
305 #endif /* !(USLOCK_DEBUG || USLOCK_STATS) */
306
307 extern void kernel_preempt_check (void);
308
309 #endif /* MACH_KERNEL_PRIVATE */
310
311 #endif /* __APLE_API_PRIVATE */
312
313 #endif /* _I386_LOCK_H_ */