]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
e3d4649ff8a77803fcdf71ce777a3ae5067c2554
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (C) 1998 Apple Computer
32 * All Rights Reserved
33 */
34 /*
35 * @OSF_COPYRIGHT@
36 */
37 /*
38 * Mach Operating System
39 * Copyright (c) 1991,1990 Carnegie Mellon University
40 * All Rights Reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and its
43 * documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
50 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie Mellon
60 * the rights to redistribute these changes.
61 */
62
63 /*
64 */
65
66 /*
67 * Machine-dependent simple locks for the i386.
68 */
69 #ifdef KERNEL_PRIVATE
70
71 #ifndef _I386_LOCK_H_
72 #define _I386_LOCK_H_
73
74 #include <sys/appleapiopts.h>
75
76 #ifdef __APPLE_API_PRIVATE
77
78 #ifdef MACH_KERNEL_PRIVATE
79
80 #include <kern/macro_help.h>
81 #include <kern/assert.h>
82 #include <i386/hw_lock_types.h>
83 #include <i386/locks.h>
84
85 #include <mach_rt.h>
86 #include <mach_ldebug.h>
87
88 typedef struct {
89 lck_mtx_t lck_mtx; /* inlined lck_mtx, need to be first */
90 #if MACH_LDEBUG
91 int type;
92 #define MUTEX_TAG 0x4d4d
93 vm_offset_t pc;
94 vm_offset_t thread;
95 #endif /* MACH_LDEBUG */
96 } mutex_t;
97
98 typedef lck_rw_t lock_t;
99
100 extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */
101
102
103 #if defined(__GNUC__)
104
105 /*
106 * General bit-lock routines.
107 */
108
109 #define bit_lock(bit,l) \
110 __asm__ volatile(" jmp 1f \n \
111 0: btl %0, %1 \n \
112 jb 0b \n \
113 1: lock \n \
114 btsl %0,%1 \n \
115 jb 0b" : \
116 : \
117 "r" (bit), "m" (*(volatile int *)(l)) : \
118 "memory");
119
120 #define bit_unlock(bit,l) \
121 __asm__ volatile(" lock \n \
122 btrl %0,%1" : \
123 : \
124 "r" (bit), "m" (*(volatile int *)(l)));
125
126 /*
127 * Set or clear individual bits in a long word.
128 * The locked access is needed only to lock access
129 * to the word, not to individual bits.
130 */
131
132 #define i_bit_set(bit,l) \
133 __asm__ volatile(" lock \n \
134 btsl %0,%1" : \
135 : \
136 "r" (bit), "m" (*(volatile int *)(l)));
137
138 #define i_bit_clear(bit,l) \
139 __asm__ volatile(" lock \n \
140 btrl %0,%1" : \
141 : \
142 "r" (bit), "m" (*(volatile int *)(l)));
143
144 static inline unsigned long i_bit_isset(unsigned int test, volatile unsigned long *word)
145 {
146 int bit;
147
148 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
149 : "m" (word), "ir" (test));
150 return bit;
151 }
152
153 static inline char xchgb(volatile char * cp, char new);
154
155 static inline void atomic_incl(volatile long * p, long delta);
156 static inline void atomic_incs(volatile short * p, short delta);
157 static inline void atomic_incb(volatile char * p, char delta);
158
159 static inline void atomic_decl(volatile long * p, long delta);
160 static inline void atomic_decs(volatile short * p, short delta);
161 static inline void atomic_decb(volatile char * p, char delta);
162
163 static inline long atomic_getl(const volatile long * p);
164 static inline short atomic_gets(const volatile short * p);
165 static inline char atomic_getb(const volatile char * p);
166
167 static inline void atomic_setl(volatile long * p, long value);
168 static inline void atomic_sets(volatile short * p, short value);
169 static inline void atomic_setb(volatile char * p, char value);
170
171 static inline char xchgb(volatile char * cp, char new)
172 {
173 register char old = new;
174
175 __asm__ volatile (" xchgb %0,%2" :
176 "=q" (old) :
177 "0" (new), "m" (*(volatile char *)cp) : "memory");
178 return (old);
179 }
180
181 /*
182 * Compare and exchange:
183 * - returns failure (0) if the location did not contain the old value,
184 * - returns success (1) if the location was set to the new value.
185 */
186 static inline uint32_t
187 atomic_cmpxchg(uint32_t *p, uint32_t old, uint32_t new)
188 {
189 uint32_t res = old;
190
191 asm volatile(
192 "lock; cmpxchgl %1,%2; \n\t"
193 " setz %%al; \n\t"
194 " movzbl %%al,%0"
195 : "+a" (res) /* %0: old value to compare, returns success */
196 : "r" (new), /* %1: new value to set */
197 "m" (*(p)) /* %2: memory address */
198 : "memory");
199 return (res);
200 }
201
202 static inline void atomic_incl(volatile long * p, long delta)
203 {
204 __asm__ volatile (" lock \n \
205 addl %0,%1" : \
206 : \
207 "r" (delta), "m" (*(volatile long *)p));
208 }
209
210 static inline void atomic_incs(volatile short * p, short delta)
211 {
212 __asm__ volatile (" lock \n \
213 addw %0,%1" : \
214 : \
215 "q" (delta), "m" (*(volatile short *)p));
216 }
217
218 static inline void atomic_incb(volatile char * p, char delta)
219 {
220 __asm__ volatile (" lock \n \
221 addb %0,%1" : \
222 : \
223 "q" (delta), "m" (*(volatile char *)p));
224 }
225
226 static inline void atomic_decl(volatile long * p, long delta)
227 {
228 __asm__ volatile (" lock \n \
229 subl %0,%1" : \
230 : \
231 "r" (delta), "m" (*(volatile long *)p));
232 }
233
234 static inline int atomic_decl_and_test(volatile long * p, long delta)
235 {
236 uint8_t ret;
237 asm volatile (
238 " lock \n\t"
239 " subl %1,%2 \n\t"
240 " sete %0"
241 : "=qm" (ret)
242 : "r" (delta), "m" (*(volatile long *)p));
243 return ret;
244 }
245
246 static inline void atomic_decs(volatile short * p, short delta)
247 {
248 __asm__ volatile (" lock \n \
249 subw %0,%1" : \
250 : \
251 "q" (delta), "m" (*(volatile short *)p));
252 }
253
254 static inline void atomic_decb(volatile char * p, char delta)
255 {
256 __asm__ volatile (" lock \n \
257 subb %0,%1" : \
258 : \
259 "q" (delta), "m" (*(volatile char *)p));
260 }
261
262 static inline long atomic_getl(const volatile long * p)
263 {
264 return (*p);
265 }
266
267 static inline short atomic_gets(const volatile short * p)
268 {
269 return (*p);
270 }
271
272 static inline char atomic_getb(const volatile char * p)
273 {
274 return (*p);
275 }
276
277 static inline void atomic_setl(volatile long * p, long value)
278 {
279 *p = value;
280 }
281
282 static inline void atomic_sets(volatile short * p, short value)
283 {
284 *p = value;
285 }
286
287 static inline void atomic_setb(volatile char * p, char value)
288 {
289 *p = value;
290 }
291
292
293 #else /* !defined(__GNUC__) */
294
295 extern void i_bit_set(
296 int index,
297 void *addr);
298
299 extern void i_bit_clear(
300 int index,
301 void *addr);
302
303 extern void bit_lock(
304 int index,
305 void *addr);
306
307 extern void bit_unlock(
308 int index,
309 void *addr);
310
311 /*
312 * All other routines defined in __GNUC__ case lack
313 * definitions otherwise. - XXX
314 */
315
316 #endif /* !defined(__GNUC__) */
317
318 extern void kernel_preempt_check (void);
319
320 #endif /* MACH_KERNEL_PRIVATE */
321
322 #endif /* __APLE_API_PRIVATE */
323
324 #endif /* _I386_LOCK_H_ */
325
326 #endif /* KERNEL_PRIVATE */