]> git.saurik.com Git - apple/xnu.git/blob - libkern/libkern/OSAtomic.h
d585c4175d7e8bfbe997800678f966fc269719f1
[apple/xnu.git] / libkern / libkern / OSAtomic.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35 #ifndef _OS_OSATOMIC_H
36 #define _OS_OSATOMIC_H
37
38 #include <libkern/OSBase.h>
39
40 #if defined(__cplusplus)
41 extern "C" {
42 #endif
43
44 #ifdef XNU_KERNEL_PRIVATE
45 /*
46 * The macro SAFE_CAST_PTR() casts one type of pointer to another type, making sure
47 * the data the pointer is referencing is the same size. If it is not, it will cause
48 * a division by zero compiler warning. This is to work around "SInt32" being defined
49 * as "long" on ILP32 and as "int" on LP64, which would require an explicit cast to
50 * "SInt32*" when for instance passing an "int*" to OSAddAtomic() - which masks size
51 * mismatches.
52 * -- var is used, but sizeof does not evaluate the
53 * argument, i.e. we're safe against "++" etc. in var --
54 */
55 #define __SAFE_CAST_PTR(type, var) (((type)(var))+(0/(sizeof(*var) == sizeof(*(type)0) ? 1 : 0)))
56 #else
57 #define __SAFE_CAST_PTR(type, var) ((type)(var))
58 #endif
59
60 /*!
61 * @header
62 *
63 * @abstract
64 * This header declares the OSAtomic group of functions for atomic
65 * reading and updating of values.
66 */
67
68 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
69
70 /*!
71 * @function OSCompareAndSwap64
72 *
73 * @abstract
74 * 64-bit compare and swap operation.
75 *
76 * @discussion
77 * See OSCompareAndSwap.
78 */
79 extern Boolean OSCompareAndSwap64(
80 UInt64 oldValue,
81 UInt64 newValue,
82 volatile UInt64 * address);
83 #define OSCompareAndSwap64(a, b, c) \
84 (OSCompareAndSwap64(a, b, __SAFE_CAST_PTR(volatile UInt64*,c)))
85
86 #endif /* defined(__i386__) || defined(__x86_64__) */
87
88 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__)
89
90 /*!
91 * @function OSAddAtomic64
92 *
93 * @abstract
94 * 64-bit atomic add operation.
95 *
96 * @discussion
97 * See OSAddAtomic.
98 */
99 extern SInt64 OSAddAtomic64(
100 SInt64 theAmount,
101 volatile SInt64 * address);
102 #define OSAddAtomic64(a, b) \
103 (OSAddAtomic64(a, __SAFE_CAST_PTR(volatile SInt64*,b)))
104
105 /*!
106 * @function OSIncrementAtomic64
107 *
108 * @abstract
109 * 64-bit increment.
110 *
111 * @discussion
112 * See OSIncrementAtomic.
113 */
114 inline static SInt64 OSIncrementAtomic64(volatile SInt64 * address)
115 {
116 return OSAddAtomic64(1LL, address);
117 }
118
119 /*!
120 * @function OSDecrementAtomic64
121 *
122 * @abstract
123 * 64-bit decrement.
124 *
125 * @discussion
126 * See OSDecrementAtomic.
127 */
128 inline static SInt64 OSDecrementAtomic64(volatile SInt64 * address)
129 {
130 return OSAddAtomic64(-1LL, address);
131 }
132
133 #endif /* defined(__i386__) || defined(__x86_64__) || defined(__arm__) */
134
135 #if XNU_KERNEL_PRIVATE
136 /* Not to be included in headerdoc.
137 *
138 * @function OSAddAtomicLong
139 *
140 * @abstract
141 * 32/64-bit atomic add operation, depending on sizeof(long).
142 *
143 * @discussion
144 * See OSAddAtomic.
145 */
146 extern long OSAddAtomicLong(
147 long theAmount,
148 volatile long * address);
149 #define OSAddAtomicLong(a, b) \
150 (OSAddAtomicLong(a, __SAFE_CAST_PTR(volatile long*,b)))
151
152 /* Not to be included in headerdoc.
153 *
154 * @function OSIncrementAtomicLong
155 *
156 * @abstract
157 * 32/64-bit increment, depending on sizeof(long)
158 *
159 * @discussion
160 * See OSIncrementAtomic.
161 */
162 inline static long OSIncrementAtomicLong(volatile long * address)
163 {
164 return OSAddAtomicLong(1L, address);
165 }
166
167 /* Not to be included in headerdoc.
168 *
169 * @function OSDecrementAtomicLong
170 *
171 * @abstract
172 * 32/64-bit decrement, depending on sizeof(long)
173 *@discussion See OSDecrementAtomic.
174 */
175 inline static long OSDecrementAtomicLong(volatile long * address)
176 {
177 return OSAddAtomicLong(-1L, address);
178 }
179 #endif /* XNU_KERNEL_PRIVATE */
180
181 /*!
182 * @function OSCompareAndSwap
183 *
184 * @abstract
185 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
186 *
187 * @discussion
188 * The OSCompareAndSwap function compares the value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwap returns true if newValue is written to the address; otherwise, it returns false.
189 *
190 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
191 *
192 * @param oldValue The value to compare at address.
193 * @param newValue The value to write to address if oldValue compares true.
194 * @param address The 4-byte aligned address of the data to update atomically.
195 * @result true if newValue was written to the address.
196 */
197 extern Boolean OSCompareAndSwap(
198 UInt32 oldValue,
199 UInt32 newValue,
200 volatile UInt32 * address);
201 #define OSCompareAndSwap(a, b, c) \
202 (OSCompareAndSwap(a, b, __SAFE_CAST_PTR(volatile UInt32*,c)))
203
204 /*!
205 * @function OSCompareAndSwapPtr
206 *
207 * @abstract
208 * Compare and swap operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
209 *
210 * @discussion
211 * The OSCompareAndSwapPtr function compares the pointer-sized value at the specified address with oldVal. The value of newValue is written to the address only if oldValue and the value at the address are equal. OSCompareAndSwapPtr returns true if newValue is written to the address; otherwise, it returns false.
212 *
213 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
214 * @param oldValue The pointer value to compare at address.
215 * @param newValue The pointer value to write to address if oldValue compares true.
216 * @param address The pointer-size aligned address of the data to update atomically.
217 * @result true if newValue was written to the address.
218 */
219 extern Boolean OSCompareAndSwapPtr(
220 void * oldValue,
221 void * newValue,
222 void * volatile * address);
223 #define OSCompareAndSwapPtr(a, b, c) \
224 (OSCompareAndSwapPtr(a, b, __SAFE_CAST_PTR(void * volatile *,c)))
225
226 /*!
227 * @function OSAddAtomic
228 *
229 * @abstract
230 * 32-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
231 *
232 * @discussion
233 * The OSAddAtomic function adds the specified amount to the value at the specified address and returns the original value.
234 *
235 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
236 * @param amount The amount to add.
237 * @param address The 4-byte aligned address of the value to update atomically.
238 * @result The value before the addition
239 */
240 extern SInt32 OSAddAtomic(
241 SInt32 amount,
242 volatile SInt32 * address);
243 #define OSAddAtomic(a, b) \
244 (OSAddAtomic(a, __SAFE_CAST_PTR(volatile SInt32*,b)))
245
246 /*!
247 * @function OSAddAtomic16
248 *
249 * @abstract
250 * 16-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
251 *
252 * @discussion
253 * The OSAddAtomic16 function adds the specified amount to the value at the specified address and returns the original value.
254 *
255 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
256 * @param amount The amount to add.
257 * @param address The 2-byte aligned address of the value to update atomically.
258 * @result The value before the addition
259 */
260 extern SInt16 OSAddAtomic16(
261 SInt32 amount,
262 volatile SInt16 * address);
263
264 /*!
265 * @function OSAddAtomic8
266 *
267 * @abstract
268 * 8-bit add operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
269 *
270 * @discussion
271 * The OSAddAtomic8 function adds the specified amount to the value at the specified address and returns the original value.
272 *
273 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
274 * @param amount The amount to add.
275 * @param address The address of the value to update atomically.
276 * @result The value before the addition.
277 */
278 extern SInt8 OSAddAtomic8(
279 SInt32 amount,
280 volatile SInt8 * address);
281
282 /*!
283 * @function OSIncrementAtomic
284 *
285 * @abstract
286 * 32-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
287 *
288 * @discussion
289 * The OSIncrementAtomic function increments the value at the specified address by one and returns the original value.
290 *
291 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device.
292 * @param address The 4-byte aligned address of the value to update atomically.
293 * @result The value before the increment.
294 */
295 extern SInt32 OSIncrementAtomic(volatile SInt32 * address);
296 #define OSIncrementAtomic(a) \
297 (OSIncrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
298
299 /*!
300 * @function OSIncrementAtomic16
301 *
302 * @abstract
303 * 16-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
304 *
305 * @discussion
306 * The OSIncrementAtomic16 function increments the value at the specified address by one and returns the original value.
307 *
308 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
309 * @param address The 2-byte aligned address of the value to update atomically.
310 * @result The value before the increment.
311 */
312 extern SInt16 OSIncrementAtomic16(volatile SInt16 * address);
313
314 /*!
315 * @function OSIncrementAtomic8
316 *
317 * @abstract
318 * 8-bit increment operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
319 *
320 * @discussion
321 * The OSIncrementAtomic8 function increments the value at the specified address by one and returns the original value.
322 *
323 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
324 * @param address The address of the value to update atomically.
325 * @result The value before the increment.
326 */
327 extern SInt8 OSIncrementAtomic8(volatile SInt8 * address);
328
329 /*!
330 * @function OSDecrementAtomic
331 *
332 * @abstract
333 * 32-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
334 *
335 * @discussion
336 * The OSDecrementAtomic function decrements the value at the specified address by one and returns the original value.
337 *
338 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
339 * @param address The 4-byte aligned address of the value to update atomically.
340 * @result The value before the decrement.
341 */
342 extern SInt32 OSDecrementAtomic(volatile SInt32 * address);
343 #define OSDecrementAtomic(a) \
344 (OSDecrementAtomic(__SAFE_CAST_PTR(volatile SInt32*,a)))
345
346 /*!
347 * @function OSDecrementAtomic16
348 *
349 * @abstract
350 * 16-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
351 *
352 * @discussion
353 * The OSDecrementAtomic16 function decrements the value at the specified address by one and returns the original value.
354 *
355 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
356 * @param address The 2-byte aligned address of the value to update atomically.
357 * @result The value before the decrement.
358 */
359 extern SInt16 OSDecrementAtomic16(volatile SInt16 * address);
360
361 /*!
362 * @function OSDecrementAtomic8
363 *
364 * @abstract
365 * 8-bit decrement operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
366 *
367 * @discussion
368 * The OSDecrementAtomic8 function decrements the value at the specified address by one and returns the original value.
369 *
370 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
371 * @param address The address of the value to update atomically.
372 * @result The value before the decrement.
373 */
374 extern SInt8 OSDecrementAtomic8(volatile SInt8 * address);
375
376 /*!
377 * @function OSBitAndAtomic
378 *
379 * @abstract
380 * 32-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
381 *
382 * @discussion
383 * The OSBitAndAtomic function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
384 *
385 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
386 * @param mask The mask to logically and with the value.
387 * @param address The 4-byte aligned address of the value to update atomically.
388 * @result The value before the bitwise operation
389 */
390 extern UInt32 OSBitAndAtomic(
391 UInt32 mask,
392 volatile UInt32 * address);
393 #define OSBitAndAtomic(a, b) \
394 (OSBitAndAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
395
396 /*!
397 * @function OSBitAndAtomic16
398 *
399 * @abstract
400 * 16-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
401 *
402 * @discussion
403 * The OSBitAndAtomic16 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
404 *
405 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
406 * @param mask The mask to logically and with the value.
407 * @param address The 2-byte aligned address of the value to update atomically.
408 * @result The value before the bitwise operation.
409 */
410 extern UInt16 OSBitAndAtomic16(
411 UInt32 mask,
412 volatile UInt16 * address);
413
414 /*!
415 * @function OSBitAndAtomic8
416 *
417 * @abstract
418 * 8-bit logical and operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
419 *
420 * @discussion
421 * The OSBitAndAtomic8 function logically ands the bits of the specified mask into the value at the specified address and returns the original value.
422 *
423 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
424 * @param mask The mask to logically and with the value.
425 * @param address The address of the value to update atomically.
426 * @result The value before the bitwise operation.
427 */
428 extern UInt8 OSBitAndAtomic8(
429 UInt32 mask,
430 volatile UInt8 * address);
431
432 /*!
433 * @function OSBitOrAtomic
434 *
435 * @abstract
436 * 32-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
437 *
438 * @discussion
439 * The OSBitOrAtomic function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
440 *
441 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
442 * @param mask The mask to logically or with the value.
443 * @param address The 4-byte aligned address of the value to update atomically.
444 * @result The value before the bitwise operation.
445 */
446 extern UInt32 OSBitOrAtomic(
447 UInt32 mask,
448 volatile UInt32 * address);
449 #define OSBitOrAtomic(a, b) \
450 (OSBitOrAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
451
452 /*!
453 * @function OSBitOrAtomic16
454 *
455 * @abstract
456 * 16-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
457 *
458 * @discussion
459 * The OSBitOrAtomic16 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
460 *
461 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
462 * @param mask The mask to logically or with the value.
463 * @param address The 2-byte aligned address of the value to update atomically.
464 * @result The value before the bitwise operation.
465 */
466 extern UInt16 OSBitOrAtomic16(
467 UInt32 mask,
468 volatile UInt16 * address);
469
470 /*!
471 * @function OSBitOrAtomic8
472 *
473 * @abstract
474 * 8-bit logical or operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
475 *
476 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
477 *
478 * @discussion
479 * The OSBitOrAtomic8 function logically ors the bits of the specified mask into the value at the specified address and returns the original value.
480 * @param mask The mask to logically or with the value.
481 * @param address The address of the value to update atomically.
482 * @result The value before the bitwise operation.
483 */
484 extern UInt8 OSBitOrAtomic8(
485 UInt32 mask,
486 volatile UInt8 * address);
487
488 /*!
489 * @function OSBitXorAtomic
490 *
491 * @abstract
492 * 32-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
493 *
494 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
495 *
496 * @discussion
497 * The OSBitXorAtomic function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
498 * @param mask The mask to logically or with the value.
499 * @param address The 4-byte aligned address of the value to update atomically.
500 * @result The value before the bitwise operation.
501 */
502 extern UInt32 OSBitXorAtomic(
503 UInt32 mask,
504 volatile UInt32 * address);
505 #define OSBitXorAtomic(a, b) \
506 (OSBitXorAtomic(a, __SAFE_CAST_PTR(volatile UInt32*,b)))
507
508 /*!
509 * @function OSBitXorAtomic16
510 *
511 * @abstract
512 * 16-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
513 *
514 * @discussion
515 * The OSBitXorAtomic16 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
516 *
517 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
518 * @param mask The mask to logically or with the value.
519 * @param address The 2-byte aligned address of the value to update atomically.
520 * @result The value before the bitwise operation.
521 */
522 extern UInt16 OSBitXorAtomic16(
523 UInt32 mask,
524 volatile UInt16 * address);
525
526 /*!
527 * @function OSBitXorAtomic8
528 *
529 * @abstract
530 * 8-bit logical xor operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
531 *
532 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
533 *
534 * @discussion
535 * The OSBitXorAtomic8 function logically xors the bits of the specified mask into the value at the specified address and returns the original value.
536 * @param mask The mask to logically or with the value.
537 * @param address The address of the value to update atomically.
538 * @result The value before the bitwise operation.
539 */
540 extern UInt8 OSBitXorAtomic8(
541 UInt32 mask,
542 volatile UInt8 * address);
543
544 /*!
545 * @function OSTestAndSet
546 *
547 * @abstract
548 * Bit test and set operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
549 *
550 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
551 *
552 * @discussion
553 * The OSTestAndSet function sets a single bit in a byte at a specified address. It returns true if the bit was already set, false otherwise.
554 * @param bit The bit number in the range 0 through 7.
555 * @param startAddress The address of the byte to update atomically.
556 * @result true if the bit was already set, false otherwise.
557 */
558 extern Boolean OSTestAndSet(
559 UInt32 bit,
560 volatile UInt8 * startAddress);
561
562 /*!
563 * @function OSTestAndClear
564 *
565 * @abstract
566 * Bit test and clear operation, performed atomically with respect to all devices that participate in the coherency architecture of the platform.
567 *
568 * @discussion
569 * The OSTestAndClear function clears a single bit in a byte at a specified address. It returns true if the bit was already clear, false otherwise.
570 *
571 * This function guarantees atomicity only with main system memory. It is specifically unsuitable for use on noncacheable memory such as that in devices; this function cannot guarantee atomicity, for example, on memory mapped from a PCI device. Additionally, this function incorporates a memory barrier on systems with weakly-ordered memory architectures.
572 * @param bit The bit number in the range 0 through 7.
573 * @param startAddress The address of the byte to update atomically.
574 * @result true if the bit was already clear, false otherwise.
575 */
576 extern Boolean OSTestAndClear(
577 UInt32 bit,
578 volatile UInt8 * startAddress);
579
580 /*!
581 * @defined OS_SPINLOCK_INIT
582 *
583 * @abstract
584 * The default value for an OSSpinLock.
585 *
586 * @discussion
587 * The convention is that unlocked is zero, locked is nonzero.
588 */
589 #define OS_SPINLOCK_INIT 0
590
591 /*!
592 * @typedef OSSpinLock
593 *
594 * @abstract
595 * Data type for a spinlock.
596 *
597 * @discussion
598 * You should always initialize a spinlock to OS_SPINLOCK_INIT before using it.
599 */
600 typedef SInt32 OSSpinLock;
601
602 #ifdef PRIVATE
603 /*!
604 * @function OSSpinLockTry
605 *
606 * @abstract
607 * Locks a spinlock if it would not block.
608 *
609 * @discussion
610 * Multiprocessor locks used within the shared memory area between the kernel and event system. These must work in both user and kernel mode.
611 *
612 * @result
613 * Returns false if the lock was already held by another thread, true if it took the lock successfully.
614 */
615 extern Boolean OSSpinLockTry(volatile OSSpinLock * lock);
616
617 /*!
618 * @function OSSpinLockUnlock
619 *
620 * @abstract
621 * Unlocks a spinlock.
622 *
623 * @discussion
624 * Unlocks a spinlock.
625 */
626 extern void OSSpinLockUnlock(volatile OSSpinLock * lock);
627 #endif /* PRIVATE */
628
629 /*!
630 * @function OSSynchronizeIO
631 *
632 * @abstract
633 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices.
634 *
635 * @discussion
636 * The OSSynchronizeIO routine ensures orderly load and store operations to noncached memory mapped I/O devices. It executes the eieio instruction on PowerPC processors.
637 */
638 static __inline__ void OSSynchronizeIO(void)
639 {
640 }
641 #if defined(XNU_KERNEL_PRIVATE)
642 #if defined(__i386__) || defined(__x86_64__)
643 static inline void OSMemoryBarrier(void) {
644 __asm__ volatile("mfence" ::: "memory");
645 }
646 #endif
647 #endif /*XNU_KERNEL_PRIVATE */
648 #if defined(__cplusplus)
649 }
650 #endif
651
652 #endif /* ! _OS_OSATOMIC_H */