]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/lock.h
xnu-1699.26.8.tar.gz
[apple/xnu.git] / osfmk / i386 / lock.h
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (C) 1998 Apple Computer
30 * All Rights Reserved
31 */
32/*
33 * @OSF_COPYRIGHT@
34 */
35/*
36 * Mach Operating System
37 * Copyright (c) 1991,1990 Carnegie Mellon University
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
48 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 *
52 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
53 * School of Computer Science
54 * Carnegie Mellon University
55 * Pittsburgh PA 15213-3890
56 *
57 * any improvements or extensions that they make and grant Carnegie Mellon
58 * the rights to redistribute these changes.
59 */
60
61/*
62 */
63
64/*
65 * Machine-dependent simple locks for the i386.
66 */
67#ifdef KERNEL_PRIVATE
68
69#ifndef _I386_LOCK_H_
70#define _I386_LOCK_H_
71
72#include <sys/appleapiopts.h>
73
74#ifdef __APPLE_API_PRIVATE
75
76#ifdef MACH_KERNEL_PRIVATE
77
78#include <kern/macro_help.h>
79#include <kern/assert.h>
80#include <i386/hw_lock_types.h>
81#include <i386/locks.h>
82
83#include <mach_rt.h>
84#include <mach_ldebug.h>
85
86typedef lck_rw_t lock_t;
87
88extern unsigned int LockTimeOutTSC; /* Lock timeout in TSC ticks */
89extern unsigned int LockTimeOut; /* Lock timeout in absolute time */
90
91
92#if defined(__GNUC__)
93
94/*
95 * General bit-lock routines.
96 */
97
98#define bit_lock(bit,l) \
99 __asm__ volatile(" jmp 1f \n \
100 0: btl %0, %1 \n \
101 jb 0b \n \
102 1: lock \n \
103 btsl %0,%1 \n \
104 jb 0b" : \
105 : \
106 "r" (bit), "m" (*(volatile int *)(l)) : \
107 "memory");
108
109#define bit_unlock(bit,l) \
110 __asm__ volatile(" lock \n \
111 btrl %0,%1" : \
112 : \
113 "r" (bit), "m" (*(volatile int *)(l)));
114
115/*
116 * Set or clear individual bits in a long word.
117 * The locked access is needed only to lock access
118 * to the word, not to individual bits.
119 */
120
121#define i_bit_set(bit,l) \
122 __asm__ volatile(" lock \n \
123 btsl %0,%1" : \
124 : \
125 "r" (bit), "m" (*(volatile int *)(l)));
126
127#define i_bit_clear(bit,l) \
128 __asm__ volatile(" lock \n \
129 btrl %0,%1" : \
130 : \
131 "r" (bit), "m" (*(volatile int *)(l)));
132
133static inline char xchgb(volatile char * cp, char new);
134
135static inline void atomic_incl(volatile long * p, long delta);
136static inline void atomic_incs(volatile short * p, short delta);
137static inline void atomic_incb(volatile char * p, char delta);
138
139static inline void atomic_decl(volatile long * p, long delta);
140static inline void atomic_decs(volatile short * p, short delta);
141static inline void atomic_decb(volatile char * p, char delta);
142
143static inline long atomic_getl(const volatile long * p);
144static inline short atomic_gets(const volatile short * p);
145static inline char atomic_getb(const volatile char * p);
146
147static inline void atomic_setl(volatile long * p, long value);
148static inline void atomic_sets(volatile short * p, short value);
149static inline void atomic_setb(volatile char * p, char value);
150
151static inline char xchgb(volatile char * cp, char new)
152{
153 register char old = new;
154
155 __asm__ volatile (" xchgb %0,%2" :
156 "=q" (old) :
157 "0" (new), "m" (*(volatile char *)cp) : "memory");
158 return (old);
159}
160
161static inline void atomic_incl(volatile long * p, long delta)
162{
163 __asm__ volatile (" lock \n \
164 add %0,%1" : \
165 : \
166 "r" (delta), "m" (*(volatile long *)p));
167}
168
169static inline void atomic_incs(volatile short * p, short delta)
170{
171 __asm__ volatile (" lock \n \
172 addw %0,%1" : \
173 : \
174 "q" (delta), "m" (*(volatile short *)p));
175}
176
177static inline void atomic_incb(volatile char * p, char delta)
178{
179 __asm__ volatile (" lock \n \
180 addb %0,%1" : \
181 : \
182 "q" (delta), "m" (*(volatile char *)p));
183}
184
185static inline void atomic_decl(volatile long * p, long delta)
186{
187 __asm__ volatile (" lock \n \
188 sub %0,%1" : \
189 : \
190 "r" (delta), "m" (*(volatile long *)p));
191}
192
193static inline int atomic_decl_and_test(volatile long * p, long delta)
194{
195 uint8_t ret;
196 __asm__ volatile (
197 " lock \n\t"
198 " sub %1,%2 \n\t"
199 " sete %0"
200 : "=qm" (ret)
201 : "r" (delta), "m" (*(volatile long *)p));
202 return ret;
203}
204
205static inline void atomic_decs(volatile short * p, short delta)
206{
207 __asm__ volatile (" lock \n \
208 subw %0,%1" : \
209 : \
210 "q" (delta), "m" (*(volatile short *)p));
211}
212
213static inline void atomic_decb(volatile char * p, char delta)
214{
215 __asm__ volatile (" lock \n \
216 subb %0,%1" : \
217 : \
218 "q" (delta), "m" (*(volatile char *)p));
219}
220
221static inline long atomic_getl(const volatile long * p)
222{
223 return (*p);
224}
225
226static inline short atomic_gets(const volatile short * p)
227{
228 return (*p);
229}
230
231static inline char atomic_getb(const volatile char * p)
232{
233 return (*p);
234}
235
236static inline void atomic_setl(volatile long * p, long value)
237{
238 *p = value;
239}
240
241static inline void atomic_sets(volatile short * p, short value)
242{
243 *p = value;
244}
245
246static inline void atomic_setb(volatile char * p, char value)
247{
248 *p = value;
249}
250
251
252#else /* !defined(__GNUC__) */
253
254extern void i_bit_set(
255 int index,
256 void *addr);
257
258extern void i_bit_clear(
259 int index,
260 void *addr);
261
262extern void bit_lock(
263 int index,
264 void *addr);
265
266extern void bit_unlock(
267 int index,
268 void *addr);
269
270/*
271 * All other routines defined in __GNUC__ case lack
272 * definitions otherwise. - XXX
273 */
274
275#endif /* !defined(__GNUC__) */
276
277extern void kernel_preempt_check (void);
278
279#endif /* MACH_KERNEL_PRIVATE */
280
281#endif /* __APLE_API_PRIVATE */
282
283#endif /* _I386_LOCK_H_ */
284
285#endif /* KERNEL_PRIVATE */