]> git.saurik.com Git - apple/xnu.git/blob - libkern/libkern/OSAtomic.h
98e0eb99b145b63895f10000ca9647b6a6e205be
[apple/xnu.git] / libkern / libkern / OSAtomic.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35 #ifndef _OS_OSATOMIC_H
36 #define _OS_OSATOMIC_H
37
38 #include <libkern/OSBase.h>
39
40 #if defined(__cplusplus)
41 extern "C" {
42 #endif
43
44 #ifdef XNU_KERNEL_PRIVATE
45 /*
46 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure
47 * the data the pointer is referencing is the same size. If it is not, it will cause
48 * a division by zero compiler warning. This is to work around "SInt32" being defined
49 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to
50 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size
51 * mismatches.
52 * -- var is used, but sizeof does not evaluate the
53 * argument, i.e. we're safe against "++" etc. in var --
54 */
55 #define __SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0)))
56 #else
57 #define __SAFE_CAST_PTR(type, var) ((type)(var))
58 #endif
59
60 /*!
61 * @header
62 *
63 * @abstract
64 * This header declares the OSAtomic group of functions for atomic
65 * reading and updating of values.
66 */
67
68 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
69
70 /*!
71 * @function OSCompareAndSwap64
72 *
73 * @abstract
74 * 64-bit compare and swap operation.
75 *
76 * @discussion
77 * See OSCompareAndSwap.
78 */
79 extern Boolean OSCompareAndSwap64(
80 UInt64 oldValue,
81 UInt64 newValue,
82 volatile UInt64 * address);
83 #define OSCompareAndSwap64(a, b, c) \
84 (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c)))
85
86 #endif /* defined(__i386__) || defined(__x86_64__) */
87
88 /*!
89 * @function OSAddAtomic64
90 *
91 * @abstract
92 * 64-bit atomic add operation.
93 *
94 * @discussion
95 * See OSAddAtomic.
96 */
97 extern SInt64 OSAddAtomic64(
98 SInt64 theAmount,
99 volatile SInt64 * address);
100 #define OSAddAtomic64(a, b) \
101 (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b)))
102
103 /*!
104 * @function OSIncrementAtomic64
105 *
106 * @abstract
107 * 64-bit increment.
108 *
109 * @discussion
110 * See OSIncrementAtomic.
111 */
112 inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address)
113 {
114 return OSAddAtomic64(1LL, address);
115 }
116
117 /*!
118 * @function OSDecrementAtomic64
119 *
120 * @abstract
121 * 64-bit decrement.
122 *
123 * @discussion
124 * See OSDecrementAtomic.
125 */
126 inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address)
127 {
128 return OSAddAtomic64(-1LL, address);
129 }
130
131 #if XNU_KERNEL_PRIVATE
132 /* Not to be included in headerdoc.
133 *
134 * @function OSAddAtomicLong
135 *
136 * @abstract
137 * 32/64-bit atomic add operation, depending on sizeof(long).
138 *
139 * @discussion
140 * See OSAddAtomic.
141 */
142 extern long OSAddAtomicLong(
143 long theAmount,
144 volatile long * address);
145 #define OSAddAtomicLong(a, b) \
146 (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b)))
147
148 /* Not to be included in headerdoc.
149 *
150 * @function OSIncrementAtomicLong
151 *
152 * @abstract
153 * 32/64-bit increment, depending on sizeof(long)
154 *
155 * @discussion
156 * See OSIncrementAtomic.
157 */
158 inline static long OSIncrementAtomicLong(volatile long * address)
159 {
160 return OSAddAtomicLong(1L, address);
161 }
162
163 /* Not to be included in headerdoc.
164 *
165 * @function OSDecrementAtomicLong
166 *
167 * @abstract
168 * 32/64-bit decrement, depending on sizeof(long)
169 *@discussion See OSDecrementAtomic.
170 */
171 inline static long OSDecrementAtomicLong(volatile long * address)
172 {
173 return OSAddAtomicLong(-1L, address);
174 }
175 #endif /* XNU_KERNEL_PRIVATE */
176
177 /*!
178 * @function OSCompareAndSwap
179 *
180 * @abstract
181 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
182 *
183 * @discussion
184 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
185 *
186 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
187 *
188 * @param oldValue The value to compare at address.
189 * @param newValue The value to write to address if oldValue compares true.
190 * @param address The 4-byte aligned address of the data to update atomically.
191 * @result true if newValue was written to the address.
192 */
193 extern Boolean OSCompareAndSwap(
194 UInt32 oldValue,
195 UInt32 newValue,
196 volatile UInt32 * address);
197 #define OSCompareAndSwap(a, b, c) \
198 (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c)))
199
200 /*!
201 * @function OSCompareAndSwapPtr
202 *
203 * @abstract
204 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
205 *
206 * @discussion
207 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false.
208 *
209 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
210 * @param oldValue The pointer value to compare at address.
211 * @param newValue The pointer value to write to address if oldValue compares true.
212 * @param address The pointer-size aligned address of the data to update atomically.
213 * @result true if newValue was written to the address.
214 */
215 extern Boolean OSCompareAndSwapPtr(
216 void * oldValue,
217 void * newValue,
218 void * volatile * address);
219 #define OSCompareAndSwapPtr(a, b, c) \
220 (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c)))
221
222 /*!
223 * @function OSAddAtomic
224 *
225 * @abstract
226 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
227 *
228 * @discussion
229 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value.
230 *
231 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
232 * @param amount The amount to add.
233 * @param address The 4-byte aligned address of the value to update atomically.
234 * @result The value before the addition
235 */
236 extern SInt32 OSAddAtomic(
237 SInt32 amount,
238 volatile SInt32 * address);
239 #define OSAddAtomic(a, b) \
240 (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b)))
241
242 /*!
243 * @function OSAddAtomic16
244 *
245 * @abstract
246 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
247 *
248 * @discussion
249 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value.
250 *
251 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
252 * @param amount The amount to add.
253 * @param address The 2-byte aligned address of the value to update atomically.
254 * @result The value before the addition
255 */
256 extern SInt16 OSAddAtomic16(
257 SInt32 amount,
258 volatile SInt16 * address);
259
260 /*!
261 * @function OSAddAtomic8
262 *
263 * @abstract
264 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
265 *
266 * @discussion
267 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value.
268 *
269 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
270 * @param amount The amount to add.
271 * @param address The address of the value to update atomically.
272 * @result The value before the addition.
273 */
274 extern SInt8 OSAddAtomic8(
275 SInt32 amount,
276 volatile SInt8 * address);
277
278 /*!
279 * @function OSIncrementAtomic
280 *
281 * @abstract
282 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
283 *
284 * @discussion
285 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value.
286 *
287 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
288 * @param address The 4-byte aligned address of the value to update atomically.
289 * @result The value before the increment.
290 */
291 extern SInt32 OSIncrementAtomic(volatile SInt32 * address);
292 #define OSIncrementAtomic(a) \
293 (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
294
295 /*!
296 * @function OSIncrementAtomic16
297 *
298 * @abstract
299 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
300 *
301 * @discussion
302 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value.
303 *
304 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
305 * @param address The 2-byte aligned address of the value to update atomically.
306 * @result The value before the increment.
307 */
308 extern SInt16 OSIncrementAtomic16(volatile SInt16 * address);
309
310 /*!
311 * @function OSIncrementAtomic8
312 *
313 * @abstract
314 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
315 *
316 * @discussion
317 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value.
318 *
319 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
320 * @param address The address of the value to update atomically.
321 * @result The value before the increment.
322 */
323 extern SInt8 OSIncrementAtomic8(volatile SInt8 * address);
324
325 /*!
326 * @function OSDecrementAtomic
327 *
328 * @abstract
329 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
330 *
331 * @discussion
332 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value.
333 *
334 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
335 * @param address The 4-byte aligned address of the value to update atomically.
336 * @result The value before the decrement.
337 */
338 extern SInt32 OSDecrementAtomic(volatile SInt32 * address);
339 #define OSDecrementAtomic(a) \
340 (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
341
342 /*!
343 * @function OSDecrementAtomic16
344 *
345 * @abstract
346 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
347 *
348 * @discussion
349 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value.
350 *
351 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
352 * @param address The 2-byte aligned address of the value to update atomically.
353 * @result The value before the decrement.
354 */
355 extern SInt16 OSDecrementAtomic16(volatile SInt16 * address);
356
357 /*!
358 * @function OSDecrementAtomic8
359 *
360 * @abstract
361 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
362 *
363 * @discussion
364 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value.
365 *
366 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
367 * @param address The address of the value to update atomically.
368 * @result The value before the decrement.
369 */
370 extern SInt8 OSDecrementAtomic8(volatile SInt8 * address);
371
372 /*!
373 * @function OSBitAndAtomic
374 *
375 * @abstract
376 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
377 *
378 * @discussion
379 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
380 *
381 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
382 * @param mask The mask to logically and with the value.
383 * @param address The 4-byte aligned address of the value to update atomically.
384 * @result The value before the bitwise operation
385 */
386 extern UInt32 OSBitAndAtomic(
387 UInt32 mask,
388 volatile UInt32 * address);
389 #define OSBitAndAtomic(a, b) \
390 (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
391
392 /*!
393 * @function OSBitAndAtomic16
394 *
395 * @abstract
396 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
397 *
398 * @discussion
399 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
400 *
401 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
402 * @param mask The mask to logically and with the value.
403 * @param address The 2-byte aligned address of the value to update atomically.
404 * @result The value before the bitwise operation.
405 */
406 extern UInt16 OSBitAndAtomic16(
407 UInt32 mask,
408 volatile UInt16 * address);
409
410 /*!
411 * @function OSBitAndAtomic8
412 *
413 * @abstract
414 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
415 *
416 * @discussion
417 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
418 *
419 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
420 * @param mask The mask to logically and with the value.
421 * @param address The address of the value to update atomically.
422 * @result The value before the bitwise operation.
423 */
424 extern UInt8 OSBitAndAtomic8(
425 UInt32 mask,
426 volatile UInt8 * address);
427
428 /*!
429 * @function OSBitOrAtomic
430 *
431 * @abstract
432 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
433 *
434 * @discussion
435 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
436 *
437 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
438 * @param mask The mask to logically or with the value.
439 * @param address The 4-byte aligned address of the value to update atomically.
440 * @result The value before the bitwise operation.
441 */
442 extern UInt32 OSBitOrAtomic(
443 UInt32 mask,
444 volatile UInt32 * address);
445 #define OSBitOrAtomic(a, b) \
446 (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
447
448 /*!
449 * @function OSBitOrAtomic16
450 *
451 * @abstract
452 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
453 *
454 * @discussion
455 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
456 *
457 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
458 * @param mask The mask to logically or with the value.
459 * @param address The 2-byte aligned address of the value to update atomically.
460 * @result The value before the bitwise operation.
461 */
462 extern UInt16 OSBitOrAtomic16(
463 UInt32 mask,
464 volatile UInt16 * address);
465
466 /*!
467 * @function OSBitOrAtomic8
468 *
469 * @abstract
470 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
471 *
472 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
473 *
474 * @discussion
475 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
476 * @param mask The mask to logically or with the value.
477 * @param address The address of the value to update atomically.
478 * @result The value before the bitwise operation.
479 */
480 extern UInt8 OSBitOrAtomic8(
481 UInt32 mask,
482 volatile UInt8 * address);
483
484 /*!
485 * @function OSBitXorAtomic
486 *
487 * @abstract
488 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
489 *
490 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
491 *
492 * @discussion
493 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
494 * @param mask The mask to logically or with the value.
495 * @param address The 4-byte aligned address of the value to update atomically.
496 * @result The value before the bitwise operation.
497 */
498 extern UInt32 OSBitXorAtomic(
499 UInt32 mask,
500 volatile UInt32 * address);
501 #define OSBitXorAtomic(a, b) \
502 (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
503
504 /*!
505 * @function OSBitXorAtomic16
506 *
507 * @abstract
508 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
509 *
510 * @discussion
511 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
512 *
513 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
514 * @param mask The mask to logically or with the value.
515 * @param address The 2-byte aligned address of the value to update atomically.
516 * @result The value before the bitwise operation.
517 */
518 extern UInt16 OSBitXorAtomic16(
519 UInt32 mask,
520 volatile UInt16 * address);
521
522 /*!
523 * @function OSBitXorAtomic8
524 *
525 * @abstract
526 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
527 *
528 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
529 *
530 * @discussion
531 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
532 * @param mask The mask to logically or with the value.
533 * @param address The address of the value to update atomically.
534 * @result The value before the bitwise operation.
535 */
536 extern UInt8 OSBitXorAtomic8(
537 UInt32 mask,
538 volatile UInt8 * address);
539
540 /*!
541 * @function OSTestAndSet
542 *
543 * @abstract
544 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
545 *
546 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
547 *
548 * @discussion
549 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise.
550 * @param bit The bit number in the range 0 through 7.
551 * @param startAddress The address of the byte to update atomically.
552 * @result true if the bit was already set, false otherwise.
553 */
554 extern Boolean OSTestAndSet(
555 UInt32 bit,
556 volatile UInt8 * startAddress);
557
558 /*!
559 * @function OSTestAndClear
560 *
561 * @abstract
562 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
563 *
564 * @discussion
565 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise.
566 *
567 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
568 * @param bit The bit number in the range 0 through 7.
569 * @param startAddress The address of the byte to update atomically.
570 * @result true if the bit was already clear, false otherwise.
571 */
572 extern Boolean OSTestAndClear(
573 UInt32 bit,
574 volatile UInt8 * startAddress);
575
576 /*!
577 * @defined OS_SPINLOCK_INIT
578 *
579 * @abstract
580 * The default value for an OSSpinLock.
581 *
582 * @discussion
583 * The convention is that unlocked is zero, locked is nonzero.
584 */
585 #define OS_SPINLOCK_INIT 0
586
587 /*!
588 * @typedef OSSpinLock
589 *
590 * @abstract
591 * Data type for a spinlock.
592 *
593 * @discussion
594 * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it.
595 */
596 typedef SInt32 OSSpinLock;
597
598 #ifdef PRIVATE
599 /*!
600 * @function OSSpinLockTry
601 *
602 * @abstract
603 * Locks a spinlock if it would not block.
604 *
605 * @discussion
606 * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode.
607 *
608 * @result
609 * Returns false if the lock was already held by another thread, true if it took the lock successfully.
610 */
611 extern Boolean OSSpinLockTry(volatile OSSpinLock * lock);
612
613 /*!
614 * @function OSSpinLockUnlock
615 *
616 * @abstract
617 * Unlocks a spinlock.
618 *
619 * @discussion
620 * Unlocks a spinlock.
621 */
622 extern void OSSpinLockUnlock(volatile OSSpinLock * lock);
623 #endif /* PRIVATE */
624
625 /*!
626 * @function OSSynchronizeIO
627 *
628 * @abstract
629 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices.
630 *
631 * @discussion
632 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors.
633 */
634 static __inline__ void OSSynchronizeIO(void)
635 {
636 }
637 #if defined(XNU_KERNEL_PRIVATE)
638 #if defined(__i386__) || defined(__x86_64__)
639 static inline void OSMemoryBarrier(void) {
640 __asm__ volatile("mfence" ::: "memory");
641 }
642 #endif
643 #endif /*XNU_KERNEL_PRIVATE */
644 #if defined(__cplusplus)
645 }
646 #endif
647
648 #endif /* ! _OS_OSATOMIC_H */