]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
xnu-792.22.5.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1998 Apple Computer
30 * All Rights Reserved
31 */
32 /*
33 * @OSF_COPYRIGHT@
34 */
35 /*
36 * Mach Operating System
37 * Copyright (c) 1991,1990 Carnegie Mellon University
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
48 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie Mellon
58 * the rights to redistribute these changes.
59 */
60
61 /*
62 */
63
64 /*
65 * Machine-dependent simple locks for the i386.
66 */
67 #ifdef KERNEL_PRIVATE
68
69 #ifndef _I386_LOCK_H_
70 #define _I386_LOCK_H_
71
72 #include <sys/appleapiopts.h>
73
74 #ifdef __APPLE_API_PRIVATE
75
76 #ifdef MACH_KERNEL_PRIVATE
77
78 #include <kern/macro_help.h>
79 #include <kern/assert.h>
80 #include <i386/hw_lock_types.h>
81 #include <i386/locks.h>
82
83 #include <mach_rt.h>
84 #include <mach_ldebug.h>
85
86 typedef struct {
87 lck_mtx_t lck_mtx; /* inlined lck_mtx, need to be first */
88 #if MACH_LDEBUG
89 int type;
90 #define MUTEX_TAG 0x4d4d
91 vm_offset_t pc;
92 vm_offset_t thread;
93 #endif /* MACH_LDEBUG */
94 } mutex_t;
95
96 typedef lck_rw_t lock_t;
97
98 extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */
99
100
101 #if defined(__GNUC__)
102
103 /*
104 * General bit-lock routines.
105 */
106
107 #define bit_lock(bit,l) \
108 __asm__ volatile(" jmp 1f \n \
109 0: btl %0, %1 \n \
110 jb 0b \n \
111 1: lock \n \
112 btsl %0,%1 \n \
113 jb 0b" : \
114 : \
115 "r" (bit), "m" (*(volatile int *)(l)) : \
116 "memory");
117
118 #define bit_unlock(bit,l) \
119 __asm__ volatile(" lock \n \
120 btrl %0,%1" : \
121 : \
122 "r" (bit), "m" (*(volatile int *)(l)));
123
124 /*
125 * Set or clear individual bits in a long word.
126 * The locked access is needed only to lock access
127 * to the word, not to individual bits.
128 */
129
130 #define i_bit_set(bit,l) \
131 __asm__ volatile(" lock \n \
132 btsl %0,%1" : \
133 : \
134 "r" (bit), "m" (*(volatile int *)(l)));
135
136 #define i_bit_clear(bit,l) \
137 __asm__ volatile(" lock \n \
138 btrl %0,%1" : \
139 : \
140 "r" (bit), "m" (*(volatile int *)(l)));
141
142 static inline unsigned long i_bit_isset(unsigned int test, volatile unsigned long *word)
143 {
144 int bit;
145
146 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
147 : "m" (word), "ir" (test));
148 return bit;
149 }
150
151 static inline char xchgb(volatile char * cp, char new);
152
153 static inline void atomic_incl(volatile long * p, long delta);
154 static inline void atomic_incs(volatile short * p, short delta);
155 static inline void atomic_incb(volatile char * p, char delta);
156
157 static inline void atomic_decl(volatile long * p, long delta);
158 static inline void atomic_decs(volatile short * p, short delta);
159 static inline void atomic_decb(volatile char * p, char delta);
160
161 static inline long atomic_getl(const volatile long * p);
162 static inline short atomic_gets(const volatile short * p);
163 static inline char atomic_getb(const volatile char * p);
164
165 static inline void atomic_setl(volatile long * p, long value);
166 static inline void atomic_sets(volatile short * p, short value);
167 static inline void atomic_setb(volatile char * p, char value);
168
169 static inline char xchgb(volatile char * cp, char new)
170 {
171 register char old = new;
172
173 __asm__ volatile (" xchgb %0,%2" :
174 "=q" (old) :
175 "0" (new), "m" (*(volatile char *)cp) : "memory");
176 return (old);
177 }
178
179 /*
180 * Compare and exchange:
181 * - returns failure (0) if the location did not contain the old value,
182 * - returns success (1) if the location was set to the new value.
183 */
184 static inline uint32_t
185 atomic_cmpxchg(uint32_t *p, uint32_t old, uint32_t new)
186 {
187 uint32_t res = old;
188
189 asm volatile(
190 "lock; cmpxchgl %1,%2; \n\t"
191 " setz %%al; \n\t"
192 " movzbl %%al,%0"
193 : "+a" (res) /* %0: old value to compare, returns success */
194 : "r" (new), /* %1: new value to set */
195 "m" (*(p)) /* %2: memory address */
196 : "memory");
197 return (res);
198 }
199
200 static inline void atomic_incl(volatile long * p, long delta)
201 {
202 __asm__ volatile (" lock \n \
203 addl %0,%1" : \
204 : \
205 "r" (delta), "m" (*(volatile long *)p));
206 }
207
208 static inline void atomic_incs(volatile short * p, short delta)
209 {
210 __asm__ volatile (" lock \n \
211 addw %0,%1" : \
212 : \
213 "q" (delta), "m" (*(volatile short *)p));
214 }
215
216 static inline void atomic_incb(volatile char * p, char delta)
217 {
218 __asm__ volatile (" lock \n \
219 addb %0,%1" : \
220 : \
221 "q" (delta), "m" (*(volatile char *)p));
222 }
223
224 static inline void atomic_decl(volatile long * p, long delta)
225 {
226 __asm__ volatile (" lock \n \
227 subl %0,%1" : \
228 : \
229 "r" (delta), "m" (*(volatile long *)p));
230 }
231
232 static inline int atomic_decl_and_test(volatile long * p, long delta)
233 {
234 uint8_t ret;
235 asm volatile (
236 " lock \n\t"
237 " subl %1,%2 \n\t"
238 " sete %0"
239 : "=qm" (ret)
240 : "r" (delta), "m" (*(volatile long *)p));
241 return ret;
242 }
243
244 static inline void atomic_decs(volatile short * p, short delta)
245 {
246 __asm__ volatile (" lock \n \
247 subw %0,%1" : \
248 : \
249 "q" (delta), "m" (*(volatile short *)p));
250 }
251
252 static inline void atomic_decb(volatile char * p, char delta)
253 {
254 __asm__ volatile (" lock \n \
255 subb %0,%1" : \
256 : \
257 "q" (delta), "m" (*(volatile char *)p));
258 }
259
260 static inline long atomic_getl(const volatile long * p)
261 {
262 return (*p);
263 }
264
265 static inline short atomic_gets(const volatile short * p)
266 {
267 return (*p);
268 }
269
270 static inline char atomic_getb(const volatile char * p)
271 {
272 return (*p);
273 }
274
275 static inline void atomic_setl(volatile long * p, long value)
276 {
277 *p = value;
278 }
279
280 static inline void atomic_sets(volatile short * p, short value)
281 {
282 *p = value;
283 }
284
285 static inline void atomic_setb(volatile char * p, char value)
286 {
287 *p = value;
288 }
289
290
291 #else /* !defined(__GNUC__) */
292
293 extern void i_bit_set(
294 int index,
295 void *addr);
296
297 extern void i_bit_clear(
298 int index,
299 void *addr);
300
301 extern void bit_lock(
302 int index,
303 void *addr);
304
305 extern void bit_unlock(
306 int index,
307 void *addr);
308
309 /*
310 * All other routines defined in __GNUC__ case lack
311 * definitions otherwise. - XXX
312 */
313
314 #endif /* !defined(__GNUC__) */
315
316 extern void kernel_preempt_check (void);
317
318 #endif /* MACH_KERNEL_PRIVATE */
319
320 #endif /* __APLE_API_PRIVATE */
321
322 #endif /* _I386_LOCK_H_ */
323
324 #endif /* KERNEL_PRIVATE */