]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
4cf7d50ab255dd3df1a7fff908ec8088743d34a9
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (C) 1998 Apple Computer
32 * All Rights Reserved
33 */
34 /*
35 * @OSF_COPYRIGHT@
36 */
37 /*
38 * Mach Operating System
39 * Copyright (c) 1991,1990 Carnegie Mellon University
40 * All Rights Reserved.
41 *
42 * Permission to use, copy, modify and distribute this software and its
43 * documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
50 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie Mellon
60 * the rights to redistribute these changes.
61 */
62
63 /*
64 */
65
66 /*
67 * Machine-dependent simple locks for the i386.
68 */
69 #ifdef KERNEL_PRIVATE
70
71 #ifndef _I386_LOCK_H_
72 #define _I386_LOCK_H_
73
74 #include <sys/appleapiopts.h>
75
76 #ifdef __APPLE_API_PRIVATE
77
78 #ifdef MACH_KERNEL_PRIVATE
79
80 #include <kern/macro_help.h>
81 #include <kern/assert.h>
82 #include <i386/hw_lock_types.h>
83 #include <i386/locks.h>
84
85 #include <mach_rt.h>
86 #include <mach_ldebug.h>
87
88 typedef struct {
89 lck_mtx_t lck_mtx; /* inlined lck_mtx, need to be first */
90 #if MACH_LDEBUG
91 int type;
92 #define MUTEX_TAG 0x4d4d
93 vm_offset_t pc;
94 vm_offset_t thread;
95 #endif /* MACH_LDEBUG */
96 } mutex_t;
97
98 typedef struct {
99 decl_simple_lock_data(,interlock) /* "hardware" interlock field */
100 volatile unsigned int
101 read_count:16, /* No. of accepted readers */
102 want_upgrade:1, /* Read-to-write upgrade waiting */
103 want_write:1, /* Writer is waiting, or locked for write */
104 waiting:1, /* Someone is sleeping on lock */
105 can_sleep:1; /* Can attempts to lock go to sleep? */
106 } lock_t;
107
108 extern unsigned int LockTimeOut; /* Number of hardware ticks of a lock timeout */
109
110
111 #if defined(__GNUC__)
112
113 /*
114 * General bit-lock routines.
115 */
116
117 #define bit_lock(bit,l) \
118 __asm__ volatile(" jmp 1f \n \
119 0: btl %0, %1 \n \
120 jb 0b \n \
121 1: lock \n \
122 btsl %0,%1 \n \
123 jb 0b" : \
124 : \
125 "r" (bit), "m" (*(volatile int *)(l)) : \
126 "memory");
127
128 #define bit_unlock(bit,l) \
129 __asm__ volatile(" lock \n \
130 btrl %0,%1" : \
131 : \
132 "r" (bit), "m" (*(volatile int *)(l)));
133
134 /*
135 * Set or clear individual bits in a long word.
136 * The locked access is needed only to lock access
137 * to the word, not to individual bits.
138 */
139
140 #define i_bit_set(bit,l) \
141 __asm__ volatile(" lock \n \
142 btsl %0,%1" : \
143 : \
144 "r" (bit), "m" (*(volatile int *)(l)));
145
146 #define i_bit_clear(bit,l) \
147 __asm__ volatile(" lock \n \
148 btrl %0,%1" : \
149 : \
150 "r" (bit), "m" (*(volatile int *)(l)));
151
152 static inline unsigned long i_bit_isset(unsigned int test, volatile unsigned long *word)
153 {
154 int bit;
155
156 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
157 : "m" (word), "ir" (test));
158 return bit;
159 }
160
161 static inline char xchgb(volatile char * cp, char new);
162
163 static inline void atomic_incl(long * p, long delta);
164 static inline void atomic_incs(short * p, short delta);
165 static inline void atomic_incb(char * p, char delta);
166
167 static inline void atomic_decl(long * p, long delta);
168 static inline void atomic_decs(short * p, short delta);
169 static inline void atomic_decb(char * p, char delta);
170
171 static inline long atomic_getl(long * p);
172 static inline short atomic_gets(short * p);
173 static inline char atomic_getb(char * p);
174
175 static inline void atomic_setl(long * p, long value);
176 static inline void atomic_sets(short * p, short value);
177 static inline void atomic_setb(char * p, char value);
178
179 static inline char xchgb(volatile char * cp, char new)
180 {
181 register char old = new;
182
183 __asm__ volatile (" xchgb %0,%2" :
184 "=q" (old) :
185 "0" (new), "m" (*(volatile char *)cp) : "memory");
186 return (old);
187 }
188
189 /*
190 * Compare and exchange:
191 * - returns failure (0) if the location did not contain the old value,
192 * - returns success (1) if the location was set to the new value.
193 */
194 static inline uint32_t
195 atomic_cmpxchg(uint32_t *p, uint32_t old, uint32_t new)
196 {
197 uint32_t res = old;
198
199 asm volatile(
200 "lock; cmpxchgl %1,%2; \n\t"
201 " setz %%al; \n\t"
202 " movzbl %%al,%0"
203 : "+a" (res) /* %0: old value to compare, returns success */
204 : "r" (new), /* %1: new value to set */
205 "m" (*(p)) /* %2: memory address */
206 : "memory");
207 return (res);
208 }
209
210 static inline uint64_t
211 atomic_load64(uint64_t *quadp)
212 {
213 uint64_t ret;
214
215 asm volatile(
216 " lock; cmpxchg8b %1"
217 : "=A" (ret)
218 : "m" (*quadp), "a" (0), "d" (0), "b" (0), "c" (0));
219 return (ret);
220 }
221
222 static inline uint64_t
223 atomic_loadstore64(uint64_t *quadp, uint64_t new)
224 {
225 uint64_t ret;
226
227 ret = *quadp;
228 asm volatile(
229 "1: \n\t"
230 " lock; cmpxchg8b %1 \n\t"
231 " jnz 1b"
232 : "+A" (ret)
233 : "m" (*quadp),
234 "b" ((uint32_t)new), "c" ((uint32_t)(new >> 32)));
235 return (ret);
236 }
237
238 static inline void atomic_incl(long * p, long delta)
239 {
240 __asm__ volatile (" lock \n \
241 addl %0,%1" : \
242 : \
243 "r" (delta), "m" (*(volatile long *)p));
244 }
245
246 static inline void atomic_incs(short * p, short delta)
247 {
248 __asm__ volatile (" lock \n \
249 addw %0,%1" : \
250 : \
251 "q" (delta), "m" (*(volatile short *)p));
252 }
253
254 static inline void atomic_incb(char * p, char delta)
255 {
256 __asm__ volatile (" lock \n \
257 addb %0,%1" : \
258 : \
259 "q" (delta), "m" (*(volatile char *)p));
260 }
261
262 static inline void atomic_decl(long * p, long delta)
263 {
264 __asm__ volatile (" lock \n \
265 subl %0,%1" : \
266 : \
267 "r" (delta), "m" (*(volatile long *)p));
268 }
269
270 static inline int atomic_decl_and_test(long * p, long delta)
271 {
272 uint8_t ret;
273 asm volatile (
274 " lock \n\t"
275 " subl %1,%2 \n\t"
276 " sete %0"
277 : "=qm" (ret)
278 : "r" (delta), "m" (*(volatile long *)p));
279 return ret;
280 }
281
282 static inline void atomic_decs(short * p, short delta)
283 {
284 __asm__ volatile (" lock \n \
285 subw %0,%1" : \
286 : \
287 "q" (delta), "m" (*(volatile short *)p));
288 }
289
290 static inline void atomic_decb(char * p, char delta)
291 {
292 __asm__ volatile (" lock \n \
293 subb %0,%1" : \
294 : \
295 "q" (delta), "m" (*(volatile char *)p));
296 }
297
298 static inline long atomic_getl(long * p)
299 {
300 return (*p);
301 }
302
303 static inline short atomic_gets(short * p)
304 {
305 return (*p);
306 }
307
308 static inline char atomic_getb(char * p)
309 {
310 return (*p);
311 }
312
313 static inline void atomic_setl(long * p, long value)
314 {
315 *p = value;
316 }
317
318 static inline void atomic_sets(short * p, short value)
319 {
320 *p = value;
321 }
322
323 static inline void atomic_setb(char * p, char value)
324 {
325 *p = value;
326 }
327
328
329 #else /* !defined(__GNUC__) */
330
331 extern void i_bit_set(
332 int index,
333 void *addr);
334
335 extern void i_bit_clear(
336 int index,
337 void *addr);
338
339 extern void bit_lock(
340 int index,
341 void *addr);
342
343 extern void bit_unlock(
344 int index,
345 void *addr);
346
347 /*
348 * All other routines defined in __GNUC__ case lack
349 * definitions otherwise. - XXX
350 */
351
352 #endif /* !defined(__GNUC__) */
353
354 extern void kernel_preempt_check (void);
355
356 #endif /* MACH_KERNEL_PRIVATE */
357
358 #endif /* __APLE_API_PRIVATE */
359
360 #endif /* _I386_LOCK_H_ */
361
362 #endif /* KERNEL_PRIVATE */