]> git.saurik.com Git - apple/xnu.git/blob - libkern/libkern/OSAtomic.h
375151929965ad72142c711f447b71ea85e810c0
[apple/xnu.git] / libkern / libkern / OSAtomic.h
1 /*
2 * Copyright (c) 2007-2012 Apple Inc. All rights reserved.
3 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. The rights granted to you under the License
11 * may not be used to create, or enable the creation or redistribution of,
12 * unlawful or unlicensed copies of an Apple operating system, or to
13 * circumvent, violate, or enable the circumvention or violation of, any
14 * terms of an Apple operating system software license agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 *
19 * The Original Code and all software distributed under the License are
20 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24 * Please see the License for the specific language governing rights and
25 * limitations under the License.
26 *
27 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 */
29 /*
30 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
31 *
32 * HISTORY
33 *
34 */
35
36 #ifndef _OS_OSATOMIC_H
37 #define _OS_OSATOMIC_H
38
39 #include <libkern/OSBase.h>
40
41 #if defined(__cplusplus)
42 extern "C" {
43 #endif
44
45 #ifdef XNU_KERNEL_PRIVATE
46 /*
47 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure
48 * the data the pointer is referencing is the same size. If it is not, it will cause
49 * a division by zero compiler warning. This is to work around "SInt32" being defined
50 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to
51 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size
52 * mismatches.
53 * -- var is used, but sizeof does not evaluate the
54 * argument, i.e. we're safe against "++" etc. in var --
55 */
56 #define __SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0)))
57 #else
58 #define __SAFE_CAST_PTR(type, var) ((type)(var))
59 #endif
60
61 /*!
62 * @header
63 *
64 * @abstract
65 * This header declares the OSAtomic group of functions for atomic
66 * reading and updating of values.
67 */
68
69 /*!
70 * @function OSCompareAndSwap64
71 *
72 * @abstract
73 * 64-bit compare and swap operation.
74 *
75 * @discussion
76 * See OSCompareAndSwap.
77 */
78 extern Boolean OSCompareAndSwap64(
79 UInt64 oldValue,
80 UInt64 newValue,
81 volatile UInt64 * address);
82 #define OSCompareAndSwap64(a, b, c) \
83 (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c)))
84
85 /*!
86 * @function OSAddAtomic64
87 *
88 * @abstract
89 * 64-bit atomic add operation.
90 *
91 * @discussion
92 * See OSAddAtomic.
93 */
94 extern SInt64 OSAddAtomic64(
95 SInt64 theAmount,
96 volatile SInt64 * address);
97 #define OSAddAtomic64(a, b) \
98 (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b)))
99
100 /*!
101 * @function OSIncrementAtomic64
102 *
103 * @abstract
104 * 64-bit increment.
105 *
106 * @discussion
107 * See OSIncrementAtomic.
108 */
109 inline static SInt64
110 OSIncrementAtomic64(volatile SInt64 * address)
111 {
112 return OSAddAtomic64(1LL, address);
113 }
114
115 /*!
116 * @function OSDecrementAtomic64
117 *
118 * @abstract
119 * 64-bit decrement.
120 *
121 * @discussion
122 * See OSDecrementAtomic.
123 */
124 inline static SInt64
125 OSDecrementAtomic64(volatile SInt64 * address)
126 {
127 return OSAddAtomic64(-1LL, address);
128 }
129
130 #if XNU_KERNEL_PRIVATE
131 /* Not to be included in headerdoc.
132 *
133 * @function OSAddAtomicLong
134 *
135 * @abstract
136 * 32/64-bit atomic add operation, depending on sizeof(long).
137 *
138 * @discussion
139 * See OSAddAtomic.
140 */
141 extern long OSAddAtomicLong(
142 long theAmount,
143 volatile long * address);
144 #define OSAddAtomicLong(a, b) \
145 (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b)))
146
147 /* Not to be included in headerdoc.
148 *
149 * @function OSIncrementAtomicLong
150 *
151 * @abstract
152 * 32/64-bit increment, depending on sizeof(long)
153 *
154 * @discussion
155 * See OSIncrementAtomic.
156 */
157 inline static long
158 OSIncrementAtomicLong(volatile long * address)
159 {
160 return OSAddAtomicLong(1L, address);
161 }
162
163 /* Not to be included in headerdoc.
164 *
165 * @function OSDecrementAtomicLong
166 *
167 * @abstract
168 * 32/64-bit decrement, depending on sizeof(long)
169 *@discussion See OSDecrementAtomic.
170 */
171 inline static long
172 OSDecrementAtomicLong(volatile long * address)
173 {
174 return OSAddAtomicLong(-1L, address);
175 }
176 #endif /* XNU_KERNEL_PRIVATE */
177
178 #if XNU_KERNEL_PRIVATE
179 /*!
180 * @function OSCompareAndSwap8
181 *
182 * @abstract
183 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
184 *
185 * @discussion
186 * The OSCompareAndSwap8 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
187 *
188 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
189 *
190 * @param oldValue The value to compare at address.
191 * @param newValue The value to write to address if oldValue compares true.
192 * @param address The byte aligned address of the data to update atomically.
193 * @result true if newValue was written to the address.
194 */
195 extern Boolean OSCompareAndSwap8(
196 UInt8 oldValue,
197 UInt8 newValue,
198 volatile UInt8 * address);
199 #define OSCompareAndSwap8(a, b, c) \
200 (OSCompareAndSwap8(a, b, __SAFE_CAST_PTR(volatile UInt8*,c)))
201
202 /*!
203 * @function OSCompareAndSwap16
204 *
205 * @abstract
206 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
207 *
208 * @discussion
209 * The OSCompareAndSwap16 function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
210 *
211 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
212 *
213 * @param oldValue The value to compare at address.
214 * @param newValue The value to write to address if oldValue compares true.
215 * @param address The 2-byte aligned address of the data to update atomically.
216 * @result true if newValue was written to the address.
217 */
218 extern Boolean OSCompareAndSwap16(
219 UInt16 oldValue,
220 UInt16 newValue,
221 volatile UInt16 * address);
222 #define OSCompareAndSwap16(a, b, c) \
223 (OSCompareAndSwap16(a, b, __SAFE_CAST_PTR(volatile UInt16*,c)))
224
225 #endif /* XNU_KERNEL_PRIVATE */
226
227 /*!
228 * @function OSCompareAndSwap
229 *
230 * @abstract
231 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
232 *
233 * @discussion
234 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
235 *
236 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
237 *
238 * @param oldValue The value to compare at address.
239 * @param newValue The value to write to address if oldValue compares true.
240 * @param address The 4-byte aligned address of the data to update atomically.
241 * @result true if newValue was written to the address.
242 */
243 extern Boolean OSCompareAndSwap(
244 UInt32 oldValue,
245 UInt32 newValue,
246 volatile UInt32 * address);
247 #define OSCompareAndSwap(a, b, c) \
248 (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c)))
249
250 /*!
251 * @function OSCompareAndSwapPtr
252 *
253 * @abstract
254 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
255 *
256 * @discussion
257 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false.
258 *
259 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
260 * @param oldValue The pointer value to compare at address.
261 * @param newValue The pointer value to write to address if oldValue compares true.
262 * @param address The pointer-size aligned address of the data to update atomically.
263 * @result true if newValue was written to the address.
264 */
265 extern Boolean OSCompareAndSwapPtr(
266 void * oldValue,
267 void * newValue,
268 void * volatile * address);
269 #define OSCompareAndSwapPtr(a, b, c) \
270 (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c)))
271
272 /*!
273 * @function OSAddAtomic
274 *
275 * @abstract
276 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
277 *
278 * @discussion
279 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value.
280 *
281 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
282 * @param amount The amount to add.
283 * @param address The 4-byte aligned address of the value to update atomically.
284 * @result The value before the addition
285 */
286 extern SInt32 OSAddAtomic(
287 SInt32 amount,
288 volatile SInt32 * address);
289 #define OSAddAtomic(a, b) \
290 (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b)))
291
292 /*!
293 * @function OSAddAtomic16
294 *
295 * @abstract
296 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
297 *
298 * @discussion
299 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value.
300 *
301 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
302 * @param address The 2-byte aligned address of the value to update atomically.
303 * @result The value before the addition
304 */
305 extern SInt16 OSAddAtomic16(
306 SInt32 amount,
307 volatile SInt16 * address);
308
309 /*!
310 * @function OSAddAtomic8
311 *
312 * @abstract
313 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
314 *
315 * @discussion
316 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value.
317 *
318 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
319 * @param amount The amount to add.
320 * @param address The address of the value to update atomically.
321 * @result The value before the addition.
322 */
323 extern SInt8 OSAddAtomic8(
324 SInt32 amount,
325 volatile SInt8 * address);
326
327 /*!
328 * @function OSIncrementAtomic
329 *
330 * @abstract
331 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
332 *
333 * @discussion
334 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value.
335 *
336 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
337 * @param address The 4-byte aligned address of the value to update atomically.
338 * @result The value before the increment.
339 */
340 extern SInt32 OSIncrementAtomic(volatile SInt32 * address);
341 #define OSIncrementAtomic(a) \
342 (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
343
344 /*!
345 * @function OSIncrementAtomic16
346 *
347 * @abstract
348 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
349 *
350 * @discussion
351 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value.
352 *
353 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
354 * @param address The 2-byte aligned address of the value to update atomically.
355 * @result The value before the increment.
356 */
357 extern SInt16 OSIncrementAtomic16(volatile SInt16 * address);
358
359 /*!
360 * @function OSIncrementAtomic8
361 *
362 * @abstract
363 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
364 *
365 * @discussion
366 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value.
367 *
368 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
369 * @param address The address of the value to update atomically.
370 * @result The value before the increment.
371 */
372 extern SInt8 OSIncrementAtomic8(volatile SInt8 * address);
373
374 /*!
375 * @function OSDecrementAtomic
376 *
377 * @abstract
378 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
379 *
380 * @discussion
381 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value.
382 *
383 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
384 * @param address The 4-byte aligned address of the value to update atomically.
385 * @result The value before the decrement.
386 */
387 extern SInt32 OSDecrementAtomic(volatile SInt32 * address);
388 #define OSDecrementAtomic(a) \
389 (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
390
391 /*!
392 * @function OSDecrementAtomic16
393 *
394 * @abstract
395 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
396 *
397 * @discussion
398 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value.
399 *
400 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
401 * @param address The 2-byte aligned address of the value to update atomically.
402 * @result The value before the decrement.
403 */
404 extern SInt16 OSDecrementAtomic16(volatile SInt16 * address);
405
406 /*!
407 * @function OSDecrementAtomic8
408 *
409 * @abstract
410 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
411 *
412 * @discussion
413 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value.
414 *
415 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers.
416 * @param address The address of the value to update atomically.
417 * @result The value before the decrement.
418 */
419 extern SInt8 OSDecrementAtomic8(volatile SInt8 * address);
420
421 /*!
422 * @function OSBitAndAtomic
423 *
424 * @abstract
425 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
426 *
427 * @discussion
428 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
429 *
430 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Previous incarnations of this function incorporated a memory barrier on systems with weakly-ordered memory architectures, but current versions contain no barriers..
431 * @param mask The mask to logically and with the value.
432 * @param address The 4-byte aligned address of the value to update atomically.
433 * @result The value before the bitwise operation
434 */
435 extern UInt32 OSBitAndAtomic(
436 UInt32 mask,
437 volatile UInt32 * address);
438 #define OSBitAndAtomic(a, b) \
439 (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
440
441 /*!
442 * @function OSBitAndAtomic16
443 *
444 * @abstract
445 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
446 *
447 * @discussion
448 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
449 *
450 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
451 * @param mask The mask to logically and with the value.
452 * @param address The 2-byte aligned address of the value to update atomically.
453 * @result The value before the bitwise operation.
454 */
455 extern UInt16 OSBitAndAtomic16(
456 UInt32 mask,
457 volatile UInt16 * address);
458
459 /*!
460 * @function OSBitAndAtomic8
461 *
462 * @abstract
463 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
464 *
465 * @discussion
466 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
467 *
468 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
469 * @param mask The mask to logically and with the value.
470 * @param address The address of the value to update atomically.
471 * @result The value before the bitwise operation.
472 */
473 extern UInt8 OSBitAndAtomic8(
474 UInt32 mask,
475 volatile UInt8 * address);
476
477 /*!
478 * @function OSBitOrAtomic
479 *
480 * @abstract
481 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
482 *
483 * @discussion
484 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
485 *
486 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
487 * @param mask The mask to logically or with the value.
488 * @param address The 4-byte aligned address of the value to update atomically.
489 * @result The value before the bitwise operation.
490 */
491 extern UInt32 OSBitOrAtomic(
492 UInt32 mask,
493 volatile UInt32 * address);
494 #define OSBitOrAtomic(a, b) \
495 (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
496
497 /*!
498 * @function OSBitOrAtomic16
499 *
500 * @abstract
501 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
502 *
503 * @discussion
504 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
505 *
506 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
507 * @param mask The mask to logically or with the value.
508 * @param address The 2-byte aligned address of the value to update atomically.
509 * @result The value before the bitwise operation.
510 */
511 extern UInt16 OSBitOrAtomic16(
512 UInt32 mask,
513 volatile UInt16 * address);
514
515 /*!
516 * @function OSBitOrAtomic8
517 *
518 * @abstract
519 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
520 *
521 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
522 *
523 * @discussion
524 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
525 * @param mask The mask to logically or with the value.
526 * @param address The address of the value to update atomically.
527 * @result The value before the bitwise operation.
528 */
529 extern UInt8 OSBitOrAtomic8(
530 UInt32 mask,
531 volatile UInt8 * address);
532
533 /*!
534 * @function OSBitXorAtomic
535 *
536 * @abstract
537 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
538 *
539 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
540 *
541 * @discussion
542 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
543 * @param mask The mask to logically or with the value.
544 * @param address The 4-byte aligned address of the value to update atomically.
545 * @result The value before the bitwise operation.
546 */
547 extern UInt32 OSBitXorAtomic(
548 UInt32 mask,
549 volatile UInt32 * address);
550 #define OSBitXorAtomic(a, b) \
551 (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
552
553 /*!
554 * @function OSBitXorAtomic16
555 *
556 * @abstract
557 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
558 *
559 * @discussion
560 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
561 *
562 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
563 * @param mask The mask to logically or with the value.
564 * @param address The 2-byte aligned address of the value to update atomically.
565 * @result The value before the bitwise operation.
566 */
567 extern UInt16 OSBitXorAtomic16(
568 UInt32 mask,
569 volatile UInt16 * address);
570
571 /*!
572 * @function OSBitXorAtomic8
573 *
574 * @abstract
575 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
576 *
577 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
578 *
579 * @discussion
580 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
581 * @param mask The mask to logically or with the value.
582 * @param address The address of the value to update atomically.
583 * @result The value before the bitwise operation.
584 */
585 extern UInt8 OSBitXorAtomic8(
586 UInt32 mask,
587 volatile UInt8 * address);
588
589 /*!
590 * @function OSTestAndSet
591 *
592 * @abstract
593 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
594 *
595 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
596 *
597 * @discussion
598 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise.
599 * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant.
600 * @param startAddress The address of the byte to update atomically.
601 * @result true if the bit was already set, false otherwise.
602 */
603 extern Boolean OSTestAndSet(
604 UInt32 bit,
605 volatile UInt8 * startAddress);
606
607 /*!
608 * @function OSTestAndClear
609 *
610 * @abstract
611 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
612 *
613 * @discussion
614 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise.
615 *
616 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
617 * @param bit The bit number in the range 0 through 7. Bit 0 is the most significant.
618 * @param startAddress The address of the byte to update atomically.
619 * @result true if the bit was already clear, false otherwise.
620 */
621 extern Boolean OSTestAndClear(
622 UInt32 bit,
623 volatile UInt8 * startAddress);
624
625 /*!
626 * @defined OS_SPINLOCK_INIT
627 *
628 * @abstract
629 * The default value for an OSSpinLock.
630 *
631 * @discussion
632 * The convention is that unlocked is zero, locked is nonzero.
633 */
634 #define OS_SPINLOCK_INIT 0
635
636 /*!
637 * @typedef OSSpinLock
638 *
639 * @abstract
640 * Data type for a spinlock.
641 *
642 * @discussion
643 * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it.
644 */
645 typedef SInt32 OSSpinLock;
646
647 #ifdef PRIVATE
648 /*!
649 * @function OSSpinLockTry
650 *
651 * @abstract
652 * Locks a spinlock if it would not block.
653 *
654 * @discussion
655 * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode.
656 *
657 * @result
658 * Returns false if the lock was already held by another thread, true if it took the lock successfully.
659 */
660 extern Boolean OSSpinLockTry(volatile OSSpinLock * lock);
661
662 /*!
663 * @function OSSpinLockUnlock
664 *
665 * @abstract
666 * Unlocks a spinlock.
667 *
668 * @discussion
669 * Unlocks a spinlock.
670 */
671 extern void OSSpinLockUnlock(volatile OSSpinLock * lock);
672 #endif /* PRIVATE */
673
674 /*!
675 * @function OSSynchronizeIO
676 *
677 * @abstract
678 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices.
679 *
680 * @discussion
681 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors.
682 */
683 #if defined(__arm__) || defined(__arm64__)
684 extern void OSSynchronizeIO(void);
685 #else
686 static __inline__ void
687 OSSynchronizeIO(void)
688 {
689 }
690 #endif
691
692 #if defined(KERNEL_PRIVATE)
693
694 #if defined(__arm__) || defined(__arm64__)
695 static inline void
696 OSMemoryBarrier(void)
697 {
698 __asm__ volatile ("dmb ish" ::: "memory");
699 }
700 #elif defined(__i386__) || defined(__x86_64__)
701 #if defined(XNU_KERNEL_PRIVATE)
702 static inline void
703 OSMemoryBarrier(void)
704 {
705 __asm__ volatile ("mfence" ::: "memory");
706 }
707 #endif /* XNU_KERNEL_PRIVATE */
708 #endif
709
710 #endif /* KERNEL_PRIVATE */
711
712 #if defined(__cplusplus)
713 }
714 #endif
715
716 #endif /* ! _OS_OSATOMIC_H */