]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/lock.h
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * Copyright (C) 1998 Apple Computer
27 * All Rights Reserved
28 */
29/*
30 * @OSF_COPYRIGHT@
31 */
32/*
33 * Mach Operating System
34 * Copyright (c) 1991,1990 Carnegie Mellon University
35 * All Rights Reserved.
36 *
37 * Permission to use, copy, modify and distribute this software and its
38 * documentation is hereby granted, provided that both the copyright
39 * notice and this permission notice appear in all copies of the
40 * software, derivative works or modified versions, and any portions
41 * thereof, and that both notices appear in supporting documentation.
42 *
43 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
44 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
45 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 *
47 * Carnegie Mellon requests users of this software to return to
48 *
49 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
50 * School of Computer Science
51 * Carnegie Mellon University
52 * Pittsburgh PA 15213-3890
53 *
54 * any improvements or extensions that they make and grant Carnegie Mellon
55 * the rights to redistribute these changes.
56 */
57
58/*
59 */
60
61/*
62 * Machine-dependent simple locks for the i386.
63 */
64
65#ifndef _I386_LOCK_H_
66#define _I386_LOCK_H_
67
9bccf70c
A
68#include <sys/appleapiopts.h>
69
70#ifdef __APPLE_API_PRIVATE
71
72#ifdef MACH_KERNEL_PRIVATE
73
1c79356b
A
74#include <kern/macro_help.h>
75#include <kern/assert.h>
76#include <i386/hw_lock_types.h>
77
1c79356b
A
78#include <mach_rt.h>
79#include <mach_ldebug.h>
80#include <cpus.h>
81
82
83#if defined(__GNUC__)
84
85/*
86 * General bit-lock routines.
87 */
88
89#define bit_lock(bit,l) \
90 __asm__ volatile(" jmp 1f \n \
91 0: btl %0, %1 \n \
92 jb 0b \n \
93 1: lock \n \
94 btsl %0,%1 \n \
95 jb 0b" : \
96 : \
97 "r" (bit), "m" (*(volatile int *)(l)) : \
98 "memory");
99
100#define bit_unlock(bit,l) \
101 __asm__ volatile(" lock \n \
102 btrl %0,%1" : \
103 : \
104 "r" (bit), "m" (*(volatile int *)(l)));
105
106/*
107 * Set or clear individual bits in a long word.
108 * The locked access is needed only to lock access
109 * to the word, not to individual bits.
110 */
111
112#define i_bit_set(bit,l) \
113 __asm__ volatile(" lock \n \
114 btsl %0,%1" : \
115 : \
116 "r" (bit), "m" (*(volatile int *)(l)));
117
118#define i_bit_clear(bit,l) \
119 __asm__ volatile(" lock \n \
120 btrl %0,%1" : \
121 : \
122 "r" (bit), "m" (*(volatile int *)(l)));
123
124extern __inline__ unsigned long i_bit_isset(unsigned int testbit, volatile unsigned long *word)
125{
126 int bit;
127
128 __asm__ volatile("btl %2,%1\n\tsbbl %0,%0" : "=r" (bit)
129 : "m" (word), "ir" (testbit));
130 return bit;
131}
132
133extern __inline__ char xchgb(volatile char * cp, char new);
134
135extern __inline__ void atomic_incl(long * p, long delta);
136extern __inline__ void atomic_incs(short * p, short delta);
137extern __inline__ void atomic_incb(char * p, char delta);
138
139extern __inline__ void atomic_decl(long * p, long delta);
140extern __inline__ void atomic_decs(short * p, short delta);
141extern __inline__ void atomic_decb(char * p, char delta);
142
143extern __inline__ long atomic_getl(long * p);
144extern __inline__ short atomic_gets(short * p);
145extern __inline__ char atomic_getb(char * p);
146
147extern __inline__ void atomic_setl(long * p, long value);
148extern __inline__ void atomic_sets(short * p, short value);
149extern __inline__ void atomic_setb(char * p, char value);
150
151extern __inline__ char xchgb(volatile char * cp, char new)
152{
153 register char old = new;
154
155 __asm__ volatile (" xchgb %0,%2" :
156 "=q" (old) :
157 "0" (new), "m" (*(volatile char *)cp) : "memory");
158 return (old);
159}
160
161extern __inline__ void atomic_incl(long * p, long delta)
162{
163#if NEED_ATOMIC
164 __asm__ volatile (" lock \n \
165 addl %0,%1" : \
166 : \
167 "r" (delta), "m" (*(volatile long *)p));
168#else /* NEED_ATOMIC */
169 *p += delta;
170#endif /* NEED_ATOMIC */
171}
172
173extern __inline__ void atomic_incs(short * p, short delta)
174{
175#if NEED_ATOMIC
176 __asm__ volatile (" lock \n \
177 addw %0,%1" : \
178 : \
179 "q" (delta), "m" (*(volatile short *)p));
180#else /* NEED_ATOMIC */
181 *p += delta;
182#endif /* NEED_ATOMIC */
183}
184
185extern __inline__ void atomic_incb(char * p, char delta)
186{
187#if NEED_ATOMIC
188 __asm__ volatile (" lock \n \
189 addb %0,%1" : \
190 : \
191 "q" (delta), "m" (*(volatile char *)p));
192#else /* NEED_ATOMIC */
193 *p += delta;
194#endif /* NEED_ATOMIC */
195}
196
197extern __inline__ void atomic_decl(long * p, long delta)
198{
199#if NCPUS > 1
200 __asm__ volatile (" lock \n \
201 subl %0,%1" : \
202 : \
203 "r" (delta), "m" (*(volatile long *)p));
204#else /* NCPUS > 1 */
205 *p -= delta;
206#endif /* NCPUS > 1 */
207}
208
209extern __inline__ void atomic_decs(short * p, short delta)
210{
211#if NEED_ATOMIC
212 __asm__ volatile (" lock \n \
213 subw %0,%1" : \
214 : \
215 "q" (delta), "m" (*(volatile short *)p));
216#else /* NEED_ATOMIC */
217 *p -= delta;
218#endif /* NEED_ATOMIC */
219}
220
221extern __inline__ void atomic_decb(char * p, char delta)
222{
223#if NEED_ATOMIC
224 __asm__ volatile (" lock \n \
225 subb %0,%1" : \
226 : \
227 "q" (delta), "m" (*(volatile char *)p));
228#else /* NEED_ATOMIC */
229 *p -= delta;
230#endif /* NEED_ATOMIC */
231}
232
233extern __inline__ long atomic_getl(long * p)
234{
235 return (*p);
236}
237
238extern __inline__ short atomic_gets(short * p)
239{
240 return (*p);
241}
242
243extern __inline__ char atomic_getb(char * p)
244{
245 return (*p);
246}
247
248extern __inline__ void atomic_setl(long * p, long value)
249{
250 *p = value;
251}
252
253extern __inline__ void atomic_sets(short * p, short value)
254{
255 *p = value;
256}
257
258extern __inline__ void atomic_setb(char * p, char value)
259{
260 *p = value;
261}
262
263
264#else /* !defined(__GNUC__) */
265
266extern void i_bit_set(
267 int index,
268 void *addr);
269
270extern void i_bit_clear(
271 int index,
272 void *addr);
273
274extern void bit_lock(
275 int index,
276 void *addr);
277
278extern void bit_unlock(
279 int index,
280 void *addr);
281
282/*
283 * All other routines defined in __GNUC__ case lack
284 * definitions otherwise. - XXX
285 */
286
287#endif /* !defined(__GNUC__) */
288
289
290#if !(USLOCK_DEBUG || USLOCK_STATS)
291/*
292 * Take responsibility for production-quality usimple_locks.
293 * Let the portable lock package build simple_locks in terms
294 * of usimple_locks, which is done efficiently with macros.
295 * Currently, these aren't inlined although they probably
296 * should be. The portable lock package is used for the
297 * usimple_lock prototypes and data declarations.
298 *
299 * For non-production configurations, punt entirely to the
300 * portable lock package.
301 *
302 * N.B. I've left in the hooks for ETAP, so we can
303 * compare the performance of stats-gathering on top
304 * of "production" locks v. stats-gathering on top
305 * of portable, C-based locks.
306 */
307#define USIMPLE_LOCK_CALLS
308#endif /* !(USLOCK_DEBUG || USLOCK_STATS) */
309
9bccf70c
A
310extern void kernel_preempt_check (void);
311
0b4e3aa0 312#endif /* MACH_KERNEL_PRIVATE */
1c79356b 313
9bccf70c 314#endif /* __APLE_API_PRIVATE */
1c79356b
A
315
316#endif /* _I386_LOCK_H_ */