]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (C) 1998 Apple Computer
30 * All Rights Reserved
31 */
32 /*
33 * @OSF_COPYRIGHT@
34 */
35 /*
36 * Mach Operating System
37 * Copyright (c) 1991,1990 Carnegie Mellon University
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
48 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie Mellon
58 * the rights to redistribute these changes.
59 */
60
61 /*
62 */
63
64 /*
65 * Machine-dependent simple locks for the i386.
66 */
67 #ifdef KERNEL_PRIVATE
68
69 #ifndef _I386_LOCK_H_
70 #define _I386_LOCK_H_
71
72 #include <sys/appleapiopts.h>
73
74 #ifdef __APPLE_API_PRIVATE
75
76 #ifdef MACH_KERNEL_PRIVATE
77
78 #include <kern/macro_help.h>
79 #include <kern/assert.h>
80 #include <i386/hw_lock_types.h>
81 #include <i386/locks.h>
82
83 #include <mach_rt.h>
84 #include <mach_ldebug.h>
85
86 typedef struct {
87 lck_mtx_t lck_mtx; /* inlined lck_mtx, need to be first */
88 #if MACH_LDEBUG
89 int type;
90 #define MUTEX_TAG 0x4d4d
91 vm_offset_t pc;
92 vm_offset_t thread;
93 #endif /* MACH_LDEBUG */
94 } mutex_t;
95
96 typedef lck_rw_t lock_t;
97
98 extern unsigned int LockTimeOutTSC; /* Lock timeout in TSC ticks */
99 extern unsigned int LockTimeOut; /* Lock timeout in absolute time */
100
101
102 #if defined(__GNUC__)
103
104 /*
105 * General bit-lock routines.
106 */
107
108 #define bit_lock(bit,l) \
109 __asm__ volatile(" jmp 1f \n \
110 0: btl %0, %1 \n \
111 jb 0b \n \
112 1: lock \n \
113 btsl %0,%1 \n \
114 jb 0b" : \
115 : \
116 "r" (bit), "m" (*(volatile int *)(l)) : \
117 "memory");
118
119 #define bit_unlock(bit,l) \
120 __asm__ volatile(" lock \n \
121 btrl %0,%1" : \
122 : \
123 "r" (bit), "m" (*(volatile int *)(l)));
124
125 /*
126 * Set or clear individual bits in a long word.
127 * The locked access is needed only to lock access
128 * to the word, not to individual bits.
129 */
130
131 #define i_bit_set(bit,l) \
132 __asm__ volatile(" lock \n \
133 btsl %0,%1" : \
134 : \
135 "r" (bit), "m" (*(volatile int *)(l)));
136
137 #define i_bit_clear(bit,l) \
138 __asm__ volatile(" lock \n \
139 btrl %0,%1" : \
140 : \
141 "r" (bit), "m" (*(volatile int *)(l)));
142
143 static inline unsigned long i_bit_isset(unsigned int test, volatile unsigned long *word)
144 {
145 int bit;
146
147 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
148 : "m" (word), "ir" (test));
149 return bit;
150 }
151
152 static inline char xchgb(volatile char * cp, char new);
153
154 static inline void atomic_incl(volatile long * p, long delta);
155 static inline void atomic_incs(volatile short * p, short delta);
156 static inline void atomic_incb(volatile char * p, char delta);
157
158 static inline void atomic_decl(volatile long * p, long delta);
159 static inline void atomic_decs(volatile short * p, short delta);
160 static inline void atomic_decb(volatile char * p, char delta);
161
162 static inline long atomic_getl(const volatile long * p);
163 static inline short atomic_gets(const volatile short * p);
164 static inline char atomic_getb(const volatile char * p);
165
166 static inline void atomic_setl(volatile long * p, long value);
167 static inline void atomic_sets(volatile short * p, short value);
168 static inline void atomic_setb(volatile char * p, char value);
169
170 static inline char xchgb(volatile char * cp, char new)
171 {
172 register char old = new;
173
174 __asm__ volatile (" xchgb %0,%2" :
175 "=q" (old) :
176 "0" (new), "m" (*(volatile char *)cp) : "memory");
177 return (old);
178 }
179
180 /*
181 * Compare and exchange:
182 * - returns failure (0) if the location did not contain the old value,
183 * - returns success (1) if the location was set to the new value.
184 */
185 static inline uint32_t
186 atomic_cmpxchg(uint32_t *p, uint32_t old, uint32_t new)
187 {
188 uint32_t res = old;
189
190 __asm__ volatile(
191 "lock; cmpxchgl %1,%2; \n\t"
192 " setz %%al; \n\t"
193 " movzbl %%al,%0"
194 : "+a" (res) /* %0: old value to compare, returns success */
195 : "r" (new), /* %1: new value to set */
196 "m" (*(p)) /* %2: memory address */
197 : "memory");
198 return (res);
199 }
200
201 static inline void atomic_incl(volatile long * p, long delta)
202 {
203 __asm__ volatile (" lock \n \
204 addl %0,%1" : \
205 : \
206 "r" (delta), "m" (*(volatile long *)p));
207 }
208
209 static inline void atomic_incs(volatile short * p, short delta)
210 {
211 __asm__ volatile (" lock \n \
212 addw %0,%1" : \
213 : \
214 "q" (delta), "m" (*(volatile short *)p));
215 }
216
217 static inline void atomic_incb(volatile char * p, char delta)
218 {
219 __asm__ volatile (" lock \n \
220 addb %0,%1" : \
221 : \
222 "q" (delta), "m" (*(volatile char *)p));
223 }
224
225 static inline void atomic_decl(volatile long * p, long delta)
226 {
227 __asm__ volatile (" lock \n \
228 subl %0,%1" : \
229 : \
230 "r" (delta), "m" (*(volatile long *)p));
231 }
232
233 static inline int atomic_decl_and_test(volatile long * p, long delta)
234 {
235 uint8_t ret;
236 __asm__ volatile (
237 " lock \n\t"
238 " subl %1,%2 \n\t"
239 " sete %0"
240 : "=qm" (ret)
241 : "r" (delta), "m" (*(volatile long *)p));
242 return ret;
243 }
244
245 static inline void atomic_decs(volatile short * p, short delta)
246 {
247 __asm__ volatile (" lock \n \
248 subw %0,%1" : \
249 : \
250 "q" (delta), "m" (*(volatile short *)p));
251 }
252
253 static inline void atomic_decb(volatile char * p, char delta)
254 {
255 __asm__ volatile (" lock \n \
256 subb %0,%1" : \
257 : \
258 "q" (delta), "m" (*(volatile char *)p));
259 }
260
261 static inline long atomic_getl(const volatile long * p)
262 {
263 return (*p);
264 }
265
266 static inline short atomic_gets(const volatile short * p)
267 {
268 return (*p);
269 }
270
271 static inline char atomic_getb(const volatile char * p)
272 {
273 return (*p);
274 }
275
276 static inline void atomic_setl(volatile long * p, long value)
277 {
278 *p = value;
279 }
280
281 static inline void atomic_sets(volatile short * p, short value)
282 {
283 *p = value;
284 }
285
286 static inline void atomic_setb(volatile char * p, char value)
287 {
288 *p = value;
289 }
290
291
292 #else /* !defined(__GNUC__) */
293
294 extern void i_bit_set(
295 int index,
296 void *addr);
297
298 extern void i_bit_clear(
299 int index,
300 void *addr);
301
302 extern void bit_lock(
303 int index,
304 void *addr);
305
306 extern void bit_unlock(
307 int index,
308 void *addr);
309
310 /*
311 * All other routines defined in __GNUC__ case lack
312 * definitions otherwise. - XXX
313 */
314
315 #endif /* !defined(__GNUC__) */
316
317 extern void kernel_preempt_check (void);
318
319 #endif /* MACH_KERNEL_PRIVATE */
320
321 #endif /* __APLE_API_PRIVATE */
322
323 #endif /* _I386_LOCK_H_ */
324
325 #endif /* KERNEL_PRIVATE */