]> git.saurik.com Git - apple/xnu.git/blob - libkern/gen/OSAtomicOperations.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / libkern / gen / OSAtomicOperations.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <libkern/OSAtomic.h>
32
33 enum {
34 false = 0,
35 true = 1
36 };
37 #define NULL 0L
38
39
40 /*
41 * atomic operations
42 * these are _the_ atomic operations, currently cast atop CompareAndSwap,
43 * which is implemented in assembler. if we are worried about the cost of
44 * this layering (we shouldn't be), then all this stuff could be
45 * implemented in assembler, as it is in MacOS8/9
46 * (derived from SuperMario/NativeLibs/IO/DriverServices/Synchronization.s,
47 * which I wrote for NuKernel in a previous life with a different last name...)
48 *
49 * native Boolean CompareAndSwap(UInt32 oldValue, UInt32 newValue, UInt32 * oldValuePtr);
50 *
51 * We've since implemented a few more of these -- OSAddAtomic, OSDequeueAtomic,
52 * OSEnqueueAtomic etc -- in assembler, either for speed or correctness. See also the
53 * commpage atomic operations, and the platform specific versions.
54 * Like standards, there are a lot of atomic ops to choose from!
55 */
56
57 #ifndef __ppc__
58
59 SInt32 OSAddAtomic(SInt32 amount, SInt32 * value)
60 {
61 SInt32 oldValue;
62 SInt32 newValue;
63
64 do {
65 oldValue = *value;
66 newValue = oldValue + amount;
67 } while (! OSCompareAndSwap((UInt32) oldValue, (UInt32) newValue, (UInt32 *) value));
68
69 return oldValue;
70 }
71
72 SInt32 OSIncrementAtomic(SInt32 * value)
73 {
74 return OSAddAtomic(1, value);
75 }
76
77 SInt32 OSDecrementAtomic(SInt32 * value)
78 {
79 return OSAddAtomic(-1, value);
80 }
81
82 void * OSDequeueAtomic(void ** inList, SInt32 inOffset)
83 {
84 void * oldListHead;
85 void * newListHead;
86
87 do {
88 oldListHead = *inList;
89 if (oldListHead == NULL) {
90 break;
91 }
92
93 newListHead = *(void **) (((char *) oldListHead) + inOffset);
94 } while (! OSCompareAndSwap((UInt32)oldListHead,
95 (UInt32)newListHead, (UInt32 *)inList));
96
97 return oldListHead;
98 }
99
100 void OSEnqueueAtomic(void ** inList, void * inNewLink, SInt32 inOffset)
101 {
102 void * oldListHead;
103 void * newListHead = inNewLink;
104 void ** newLinkNextPtr = (void **) (((char *) inNewLink) + inOffset);
105
106 do {
107 oldListHead = *inList;
108 *newLinkNextPtr = oldListHead;
109 } while (! OSCompareAndSwap((UInt32)oldListHead, (UInt32)newListHead,
110 (UInt32 *)inList));
111 }
112
113 #endif /* !__ppc__ */
114
115 static UInt32 OSBitwiseAtomic(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt32 * value)
116 {
117 UInt32 oldValue;
118 UInt32 newValue;
119
120 do {
121 oldValue = *value;
122 newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask;
123 } while (! OSCompareAndSwap(oldValue, newValue, value));
124
125 return oldValue;
126 }
127
128 UInt32 OSBitAndAtomic(UInt32 mask, UInt32 * value)
129 {
130 return OSBitwiseAtomic(mask, 0, 0, value);
131 }
132
133 UInt32 OSBitOrAtomic(UInt32 mask, UInt32 * value)
134 {
135 return OSBitwiseAtomic((UInt32) -1, mask, 0, value);
136 }
137
138 UInt32 OSBitXorAtomic(UInt32 mask, UInt32 * value)
139 {
140 return OSBitwiseAtomic((UInt32) -1, 0, mask, value);
141 }
142
143 static Boolean OSCompareAndSwap8(UInt8 oldValue8, UInt8 newValue8, UInt8 * value8)
144 {
145 UInt32 mask = 0x000000ff;
146 UInt32 alignment = ((UInt32) value8) & (sizeof(UInt32) - 1);
147 UInt32 shiftValues = (24 << 24) | (16 << 16) | (8 << 8);
148 int shift = (UInt32) *(((UInt8 *) &shiftValues) + alignment);
149 UInt32 * value32 = (UInt32 *) (value8 - alignment);
150 UInt32 oldValue;
151 UInt32 newValue;
152
153 mask <<= shift;
154
155 oldValue = *value32;
156 oldValue = (oldValue & ~mask) | (oldValue8 << shift);
157 newValue = (oldValue & ~mask) | (newValue8 << shift);
158
159 return OSCompareAndSwap(oldValue, newValue, value32);
160 }
161
162 static Boolean OSTestAndSetClear(UInt32 bit, Boolean wantSet, UInt8 * startAddress)
163 {
164 UInt8 mask = 1;
165 UInt8 oldValue;
166 UInt8 wantValue;
167
168 startAddress += (bit / 8);
169 mask <<= (7 - (bit % 8));
170 wantValue = wantSet ? mask : 0;
171
172 do {
173 oldValue = *startAddress;
174 if ((oldValue & mask) == wantValue) {
175 break;
176 }
177 } while (! OSCompareAndSwap8(oldValue, (oldValue & ~mask) | wantValue, startAddress));
178
179 return (oldValue & mask) == wantValue;
180 }
181
182 Boolean OSTestAndSet(UInt32 bit, UInt8 * startAddress)
183 {
184 return OSTestAndSetClear(bit, true, startAddress);
185 }
186
187 Boolean OSTestAndClear(UInt32 bit, UInt8 * startAddress)
188 {
189 return OSTestAndSetClear(bit, false, startAddress);
190 }
191
192 /*
193 * silly unaligned versions
194 */
195
196 SInt8 OSIncrementAtomic8(SInt8 * value)
197 {
198 return OSAddAtomic8(1, value);
199 }
200
201 SInt8 OSDecrementAtomic8(SInt8 * value)
202 {
203 return OSAddAtomic8(-1, value);
204 }
205
206 SInt8 OSAddAtomic8(SInt32 amount, SInt8 * value)
207 {
208 SInt8 oldValue;
209 SInt8 newValue;
210
211 do {
212 oldValue = *value;
213 newValue = oldValue + amount;
214 } while (! OSCompareAndSwap8((UInt8) oldValue, (UInt8) newValue, (UInt8 *) value));
215
216 return oldValue;
217 }
218
219 static UInt8 OSBitwiseAtomic8(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt8 * value)
220 {
221 UInt8 oldValue;
222 UInt8 newValue;
223
224 do {
225 oldValue = *value;
226 newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask;
227 } while (! OSCompareAndSwap8(oldValue, newValue, value));
228
229 return oldValue;
230 }
231
232 UInt8 OSBitAndAtomic8(UInt32 mask, UInt8 * value)
233 {
234 return OSBitwiseAtomic8(mask, 0, 0, value);
235 }
236
237 UInt8 OSBitOrAtomic8(UInt32 mask, UInt8 * value)
238 {
239 return OSBitwiseAtomic8((UInt32) -1, mask, 0, value);
240 }
241
242 UInt8 OSBitXorAtomic8(UInt32 mask, UInt8 * value)
243 {
244 return OSBitwiseAtomic8((UInt32) -1, 0, mask, value);
245 }
246
247 static Boolean OSCompareAndSwap16(UInt16 oldValue16, UInt16 newValue16, UInt16 * value16)
248 {
249 UInt32 mask = 0x0000ffff;
250 UInt32 alignment = ((UInt32) value16) & (sizeof(UInt32) - 1);
251 UInt32 shiftValues = (16 << 24) | (16 << 16);
252 UInt32 shift = (UInt32) *(((UInt8 *) &shiftValues) + alignment);
253 UInt32 * value32 = (UInt32 *) (((UInt32) value16) - alignment);
254 UInt32 oldValue;
255 UInt32 newValue;
256
257 mask <<= shift;
258
259 oldValue = *value32;
260 oldValue = (oldValue & ~mask) | (oldValue16 << shift);
261 newValue = (oldValue & ~mask) | (newValue16 << shift);
262
263 return OSCompareAndSwap(oldValue, newValue, value32);
264 }
265
266 SInt16 OSIncrementAtomic16(SInt16 * value)
267 {
268 return OSAddAtomic16(1, value);
269 }
270
271 SInt16 OSDecrementAtomic16(SInt16 * value)
272 {
273 return OSAddAtomic16(-1, value);
274 }
275
276 SInt16 OSAddAtomic16(SInt32 amount, SInt16 * value)
277 {
278 SInt16 oldValue;
279 SInt16 newValue;
280
281 do {
282 oldValue = *value;
283 newValue = oldValue + amount;
284 } while (! OSCompareAndSwap16((UInt16) oldValue, (UInt16) newValue, (UInt16 *) value));
285
286 return oldValue;
287 }
288
289 static UInt16 OSBitwiseAtomic16(UInt32 and_mask, UInt32 or_mask, UInt32 xor_mask, UInt16 * value)
290 {
291 UInt16 oldValue;
292 UInt16 newValue;
293
294 do {
295 oldValue = *value;
296 newValue = ((oldValue & and_mask) | or_mask) ^ xor_mask;
297 } while (! OSCompareAndSwap16(oldValue, newValue, value));
298
299 return oldValue;
300 }
301
302 UInt16 OSBitAndAtomic16(UInt32 mask, UInt16 * value)
303 {
304 return OSBitwiseAtomic16(mask, 0, 0, value);
305 }
306
307 UInt16 OSBitOrAtomic16(UInt32 mask, UInt16 * value)
308 {
309 return OSBitwiseAtomic16((UInt32) -1, mask, 0, value);
310 }
311
312 UInt16 OSBitXorAtomic16(UInt32 mask, UInt16 * value)
313 {
314 return OSBitwiseAtomic16((UInt32) -1, 0, mask, value);
315 }
316