]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/lock.h
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (C) 1998 Apple Computer
30 * Mach Operating System
31 * Copyright (c) 1991,1990 Carnegie Mellon University
32 * All Rights Reserved.
34 * Permission to use, copy, modify and distribute this software and its
35 * documentation is hereby granted, provided that both the copyright
36 * notice and this permission notice appear in all copies of the
37 * software, derivative works or modified versions, and any portions
38 * thereof, and that both notices appear in supporting documentation.
40 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
41 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
42 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 * Carnegie Mellon requests users of this software to return to
46 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
47 * School of Computer Science
48 * Carnegie Mellon University
49 * Pittsburgh PA 15213-3890
51 * any improvements or extensions that they make and grant Carnegie Mellon
52 * the rights to redistribute these changes.
59 * Machine-dependent simple locks for the i386.
65 #include <kern/macro_help.h>
66 #include <kern/assert.h>
67 #include <i386/hw_lock_types.h>
69 #ifdef MACH_KERNEL_PRIVATE
72 #include <mach_ldebug.h>
79 * General bit-lock routines.
82 #define bit_lock(bit,l) \
83 __asm__ volatile(" jmp 1f \n \
90 "r" (bit), "m" (*(volatile int *)(l)) : \
93 #define bit_unlock(bit,l) \
94 __asm__ volatile(" lock \n \
97 "r" (bit), "m" (*(volatile int *)(l)));
100 * Set or clear individual bits in a long word.
101 * The locked access is needed only to lock access
102 * to the word, not to individual bits.
105 #define i_bit_set(bit,l) \
106 __asm__ volatile(" lock \n \
109 "r" (bit), "m" (*(volatile int *)(l)));
111 #define i_bit_clear(bit,l) \
112 __asm__ volatile(" lock \n \
115 "r" (bit), "m" (*(volatile int *)(l)));
117 extern __inline__
unsigned long i_bit_isset(unsigned int testbit
, volatile unsigned long *word
)
121 __asm__
volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit
)
122 : "m" (word
), "ir" (testbit
));
126 extern __inline__
char xchgb(volatile char * cp
, char new);
128 extern __inline__
void atomic_incl(long * p
, long delta
);
129 extern __inline__
void atomic_incs(short * p
, short delta
);
130 extern __inline__
void atomic_incb(char * p
, char delta
);
132 extern __inline__
void atomic_decl(long * p
, long delta
);
133 extern __inline__
void atomic_decs(short * p
, short delta
);
134 extern __inline__
void atomic_decb(char * p
, char delta
);
136 extern __inline__
long atomic_getl(long * p
);
137 extern __inline__
short atomic_gets(short * p
);
138 extern __inline__
char atomic_getb(char * p
);
140 extern __inline__
void atomic_setl(long * p
, long value
);
141 extern __inline__
void atomic_sets(short * p
, short value
);
142 extern __inline__
void atomic_setb(char * p
, char value
);
144 extern __inline__
char xchgb(volatile char * cp
, char new)
146 register char old
= new;
148 __asm__
volatile (" xchgb %0,%2" :
150 "0" (new), "m" (*(volatile char *)cp
) : "memory");
154 extern __inline__
void atomic_incl(long * p
, long delta
)
157 __asm__
volatile (" lock \n \
160 "r" (delta
), "m" (*(volatile long *)p
));
161 #else /* NEED_ATOMIC */
163 #endif /* NEED_ATOMIC */
166 extern __inline__
void atomic_incs(short * p
, short delta
)
169 __asm__
volatile (" lock \n \
172 "q" (delta
), "m" (*(volatile short *)p
));
173 #else /* NEED_ATOMIC */
175 #endif /* NEED_ATOMIC */
178 extern __inline__
void atomic_incb(char * p
, char delta
)
181 __asm__
volatile (" lock \n \
184 "q" (delta
), "m" (*(volatile char *)p
));
185 #else /* NEED_ATOMIC */
187 #endif /* NEED_ATOMIC */
190 extern __inline__
void atomic_decl(long * p
, long delta
)
193 __asm__
volatile (" lock \n \
196 "r" (delta
), "m" (*(volatile long *)p
));
197 #else /* NCPUS > 1 */
199 #endif /* NCPUS > 1 */
202 extern __inline__
void atomic_decs(short * p
, short delta
)
205 __asm__
volatile (" lock \n \
208 "q" (delta
), "m" (*(volatile short *)p
));
209 #else /* NEED_ATOMIC */
211 #endif /* NEED_ATOMIC */
214 extern __inline__
void atomic_decb(char * p
, char delta
)
217 __asm__
volatile (" lock \n \
220 "q" (delta
), "m" (*(volatile char *)p
));
221 #else /* NEED_ATOMIC */
223 #endif /* NEED_ATOMIC */
226 extern __inline__
long atomic_getl(long * p
)
231 extern __inline__
short atomic_gets(short * p
)
236 extern __inline__
char atomic_getb(char * p
)
241 extern __inline__
void atomic_setl(long * p
, long value
)
246 extern __inline__
void atomic_sets(short * p
, short value
)
251 extern __inline__
void atomic_setb(char * p
, char value
)
257 #else /* !defined(__GNUC__) */
259 extern void i_bit_set(
263 extern void i_bit_clear(
267 extern void bit_lock(
271 extern void bit_unlock(
276 * All other routines defined in __GNUC__ case lack
277 * definitions otherwise. - XXX
280 #endif /* !defined(__GNUC__) */
283 #if !(USLOCK_DEBUG || USLOCK_STATS)
285 * Take responsibility for production-quality usimple_locks.
286 * Let the portable lock package build simple_locks in terms
287 * of usimple_locks, which is done efficiently with macros.
288 * Currently, these aren't inlined although they probably
289 * should be. The portable lock package is used for the
290 * usimple_lock prototypes and data declarations.
292 * For non-production configurations, punt entirely to the
293 * portable lock package.
295 * N.B. I've left in the hooks for ETAP, so we can
296 * compare the performance of stats-gathering on top
297 * of "production" locks v. stats-gathering on top
298 * of portable, C-based locks.
300 #define USIMPLE_LOCK_CALLS
301 #endif /* !(USLOCK_DEBUG || USLOCK_STATS) */
304 #if MACH_RT || (NCPUS > 1) || MACH_LDEBUG
305 #if MACH_LDEBUG || !MACH_RT
306 #define mutex_try(m) (!(m)->interlock && _mutex_try(m))
307 #define mutex_lock(m) \
309 assert(assert_wait_possible()); \
313 #else /* MACH_LDEBUG || !MACH_RT */
314 #define mutex_try(m) (!(m)->interlock && \
315 !xchgb ((volatile char *)&((m)->locked), 1))
316 #define mutex_lock(m) \
318 assert(assert_wait_possible()); \
322 #endif /* MACH_LDEBUG || !MACH_RT */
323 #else /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */
324 #define mutex_try _mutex_try
325 #define mutex_lock _mutex_lock
326 #endif /* MACH_RT || (NCPUS > 1) || MACH_LDEBUG */
328 #else /* !MACH_KERNEL_PRIVATE */
330 #define mutex_try _mutex_try
331 #define mutex_lock(m) \
333 assert(assert_wait_possible()); \
337 #endif /* !MACH_KERNEL_PRIVATE */
339 extern void kernel_preempt_check (void);
341 #endif /* _I386_LOCK_H_ */