]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (C) 1998 Apple Computer
24 * All Rights Reserved
25 */
26 /*
27 * @OSF_COPYRIGHT@
28 */
29 /*
30 * Mach Operating System
31 * Copyright (c) 1991,1990 Carnegie Mellon University
32 * All Rights Reserved.
33 *
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
39 *
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 *
44 * Carnegie Mellon requests users of this software to return to
45 *
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
50 *
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
53 */
54
55 /*
56 */
57
58 /*
59 * Machine-dependent simple locks for the i386.
60 */
61 #ifdef KERNEL_PRIVATE
62
63 #ifndef _I386_LOCK_H_
64 #define _I386_LOCK_H_
65
66 #include <sys/appleapiopts.h>
67
68 #ifdef __APPLE_API_PRIVATE
69
70 #ifdef MACH_KERNEL_PRIVATE
71
72 #include <kern/macro_help.h>
73 #include <kern/assert.h>
74 #include <i386/hw_lock_types.h>
75 #include <i386/locks.h>
76
77 #include <mach_rt.h>
78 #include <mach_ldebug.h>
79
80 typedef struct {
81 lck_mtx_t lck_mtx; /* inlined lck_mtx, need to be first */
82 #if MACH_LDEBUG
83 int type;
84 #define MUTEX_TAG 0x4d4d
85 vm_offset_t pc;
86 vm_offset_t thread;
87 #endif /* MACH_LDEBUG */
88 } mutex_t;
89
90 typedef struct {
91 decl_simple_lock_data(,interlock) /* "hardware" interlock field */
92 volatile unsigned int
93 read_count:16, /* No. of accepted readers */
94 want_upgrade:1, /* Read-to-write upgrade waiting */
95 want_write:1, /* Writer is waiting, or locked for write */
96 waiting:1, /* Someone is sleeping on lock */
97 can_sleep:1; /* Can attempts to lock go to sleep? */
98 } lock_t;
99
100 extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */
101
102
103 #if defined(__GNUC__)
104
105 /*
106 * General bit-lock routines.
107 */
108
109 #define bit_lock(bit,l) \
110 __asm__ volatile(" jmp 1f \n \
111 0: btl %0, %1 \n \
112 jb 0b \n \
113 1: lock \n \
114 btsl %0,%1 \n \
115 jb 0b" : \
116 : \
117 "r" (bit), "m" (*(volatile int *)(l)) : \
118 "memory");
119
120 #define bit_unlock(bit,l) \
121 __asm__ volatile(" lock \n \
122 btrl %0,%1" : \
123 : \
124 "r" (bit), "m" (*(volatile int *)(l)));
125
126 /*
127 * Set or clear individual bits in a long word.
128 * The locked access is needed only to lock access
129 * to the word, not to individual bits.
130 */
131
132 #define i_bit_set(bit,l) \
133 __asm__ volatile(" lock \n \
134 btsl %0,%1" : \
135 : \
136 "r" (bit), "m" (*(volatile int *)(l)));
137
138 #define i_bit_clear(bit,l) \
139 __asm__ volatile(" lock \n \
140 btrl %0,%1" : \
141 : \
142 "r" (bit), "m" (*(volatile int *)(l)));
143
144 static inline unsigned long i_bit_isset(unsigned int test, volatile unsigned long *word)
145 {
146 int bit;
147
148 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
149 : "m" (word), "ir" (test));
150 return bit;
151 }
152
153 static inline char xchgb(volatile char * cp, char new);
154
155 static inline void atomic_incl(long * p, long delta);
156 static inline void atomic_incs(short * p, short delta);
157 static inline void atomic_incb(char * p, char delta);
158
159 static inline void atomic_decl(long * p, long delta);
160 static inline void atomic_decs(short * p, short delta);
161 static inline void atomic_decb(char * p, char delta);
162
163 static inline long atomic_getl(long * p);
164 static inline short atomic_gets(short * p);
165 static inline char atomic_getb(char * p);
166
167 static inline void atomic_setl(long * p, long value);
168 static inline void atomic_sets(short * p, short value);
169 static inline void atomic_setb(char * p, char value);
170
171 static inline char xchgb(volatile char * cp, char new)
172 {
173 register char old = new;
174
175 __asm__ volatile (" xchgb %0,%2" :
176 "=q" (old) :
177 "0" (new), "m" (*(volatile char *)cp) : "memory");
178 return (old);
179 }
180
181 /*
182 * Compare and exchange:
183 * - returns failure (0) if the location did not contain the old value,
184 * - returns success (1) if the location was set to the new value.
185 */
186 static inline uint32_t
187 atomic_cmpxchg(uint32_t *p, uint32_t old, uint32_t new)
188 {
189 uint32_t res = old;
190
191 asm volatile(
192 "lock; cmpxchgl %1,%2; \n\t"
193 " setz %%al; \n\t"
194 " movzbl %%al,%0"
195 : "+a" (res) /* %0: old value to compare, returns success */
196 : "r" (new), /* %1: new value to set */
197 "m" (*(p)) /* %2: memory address */
198 : "memory");
199 return (res);
200 }
201
202 static inline uint64_t
203 atomic_load64(uint64_t *quadp)
204 {
205 uint64_t ret;
206
207 asm volatile(
208 " lock; cmpxchg8b %1"
209 : "=A" (ret)
210 : "m" (*quadp), "a" (0), "d" (0), "b" (0), "c" (0));
211 return (ret);
212 }
213
214 static inline uint64_t
215 atomic_loadstore64(uint64_t *quadp, uint64_t new)
216 {
217 uint64_t ret;
218
219 ret = *quadp;
220 asm volatile(
221 "1: \n\t"
222 " lock; cmpxchg8b %1 \n\t"
223 " jnz 1b"
224 : "+A" (ret)
225 : "m" (*quadp),
226 "b" ((uint32_t)new), "c" ((uint32_t)(new >> 32)));
227 return (ret);
228 }
229
230 static inline void atomic_incl(long * p, long delta)
231 {
232 __asm__ volatile (" lock \n \
233 addl %0,%1" : \
234 : \
235 "r" (delta), "m" (*(volatile long *)p));
236 }
237
238 static inline void atomic_incs(short * p, short delta)
239 {
240 __asm__ volatile (" lock \n \
241 addw %0,%1" : \
242 : \
243 "q" (delta), "m" (*(volatile short *)p));
244 }
245
246 static inline void atomic_incb(char * p, char delta)
247 {
248 __asm__ volatile (" lock \n \
249 addb %0,%1" : \
250 : \
251 "q" (delta), "m" (*(volatile char *)p));
252 }
253
254 static inline void atomic_decl(long * p, long delta)
255 {
256 __asm__ volatile (" lock \n \
257 subl %0,%1" : \
258 : \
259 "r" (delta), "m" (*(volatile long *)p));
260 }
261
262 static inline int atomic_decl_and_test(long * p, long delta)
263 {
264 uint8_t ret;
265 asm volatile (
266 " lock \n\t"
267 " subl %1,%2 \n\t"
268 " sete %0"
269 : "=qm" (ret)
270 : "r" (delta), "m" (*(volatile long *)p));
271 return ret;
272 }
273
274 static inline void atomic_decs(short * p, short delta)
275 {
276 __asm__ volatile (" lock \n \
277 subw %0,%1" : \
278 : \
279 "q" (delta), "m" (*(volatile short *)p));
280 }
281
282 static inline void atomic_decb(char * p, char delta)
283 {
284 __asm__ volatile (" lock \n \
285 subb %0,%1" : \
286 : \
287 "q" (delta), "m" (*(volatile char *)p));
288 }
289
290 static inline long atomic_getl(long * p)
291 {
292 return (*p);
293 }
294
295 static inline short atomic_gets(short * p)
296 {
297 return (*p);
298 }
299
300 static inline char atomic_getb(char * p)
301 {
302 return (*p);
303 }
304
305 static inline void atomic_setl(long * p, long value)
306 {
307 *p = value;
308 }
309
310 static inline void atomic_sets(short * p, short value)
311 {
312 *p = value;
313 }
314
315 static inline void atomic_setb(char * p, char value)
316 {
317 *p = value;
318 }
319
320
321 #else /* !defined(__GNUC__) */
322
323 extern void i_bit_set(
324 int index,
325 void *addr);
326
327 extern void i_bit_clear(
328 int index,
329 void *addr);
330
331 extern void bit_lock(
332 int index,
333 void *addr);
334
335 extern void bit_unlock(
336 int index,
337 void *addr);
338
339 /*
340 * All other routines defined in __GNUC__ case lack
341 * definitions otherwise. - XXX
342 */
343
344 #endif /* !defined(__GNUC__) */
345
346 extern void kernel_preempt_check (void);
347
348 #endif /* MACH_KERNEL_PRIVATE */
349
350 #endif /* __APLE_API_PRIVATE */
351
352 #endif /* _I386_LOCK_H_ */
353
354 #endif /* KERNEL_PRIVATE */