2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
25 #include <architecture/i386/asm_help.h>
26 #include "os/internal_asm.h"
28 #define ATOMIC_RET_ORIG 0
29 #define ATOMIC_RET_NEW 1
31 // compare and exchange 32-bit
38 // compare and exchange 64-bit
46 // int32_t OSAtomicAdd32(int32_t theAmount, volatile int32_t *theValue);
47 #define ATOMIC_ARITHMETIC(instr, orig) \
48 movl 8(%esp), %ecx /* load 2nd arg ptr into ecx */ ;\
49 movl (%ecx), %eax /* load contents of ecx into eax */ ;\
50 1: movl 4(%esp), %edx /* load 1st arg into edx */ ;\
51 instr %eax, %edx /* do the operation */ ;\
52 xchg32 %edx, %ecx /* old in %eax, new in %edx, exchange into %ecx */ ;\
53 jnz 1b /* go back if we failed to exchange */ ;\
54 .if orig == ATOMIC_RET_NEW ;\
55 movl %edx, %eax /* return new value */ ;\
58 // bool OSAtomicTestAndSet(uint32_t n, volatile void *theAddress);
59 #define ATOMIC_BIT_OP(instr) \
62 shldl $3,%edx,%ecx /* save top 3 bits of address in %ecx */ ;\
64 xorl $7,%eax /* bit position is numbered big endian so convert to little endian */ ;\
65 addl %eax,%edx /* generate bit address */ ;\
66 adcl $0,%ecx /* handle carry out of lower half of address */ ;\
67 movl %edx,%eax /* copy lower half of bit address */ ;\
68 andl $31,%eax /* keep bit offset in range 0..31 */ ;\
69 xorl %eax,%edx /* 4-byte align address */ ;\
70 shrdl $3,%ecx,%edx /* restore 32-bit byte address in %edx */ ;\
74 movzbl %al,%eax // widen in case caller assumes we return an int
76 // int64_t OSAtomicAdd64(int64_t theAmount, volatile int64_t *theValue);
77 #define ATOMIC_ADD64() \
80 movl 20(%esp), %esi ;\
83 1: movl 12(%esp), %ebx ;\
84 movl 16(%esp), %ecx ;\
94 // int64_t OSAtomicIncrement64(volatile int64_t *theValue);
95 #define ATOMIC_INC64() \
98 movl 12(%esp), %esi ;\
100 movl 4(%esi), %edx ;\
112 // int64_t OSAtomicDecrement64(volatile int64_t *theValue);
113 #define ATOMIC_DEC64() \
116 movl 12(%esp), %esi ;\
117 movl 0(%esi), %eax ;\
118 movl 4(%esi), %edx ;\
132 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32, 2)
133 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Barrier, 2)
134 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_NEW)
137 OS_ATOMIC_FUNCTION_START(OSAtomicOr32, 2)
138 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Barrier, 2)
139 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_NEW)
142 OS_ATOMIC_FUNCTION_START(OSAtomicXor32, 2)
143 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Barrier, 2)
144 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_NEW)
147 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2)
148 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2)
149 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG)
152 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2)
153 OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2)
154 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_ORIG)
157 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Orig, 2)
158 OS_ATOMIC_FUNCTION_START(OSAtomicXor32OrigBarrier, 2)
159 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_ORIG)
162 // bool OSAtomicCompareAndSwapInt(int oldValue, int newValue, volatile int *theValue);
163 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtr, 2)
164 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtrBarrier, 2)
165 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapInt, 2)
166 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapIntBarrier, 2)
167 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLong, 2)
168 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLongBarrier, 2)
169 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32, 2)
170 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32Barrier, 2)
176 movzbl %al,%eax // widen in case caller assumes we return an int
179 // bool OSAtomicCompareAndSwap64(int64_t oldValue, int64_t newValue, volatile int64_t *theValue);
180 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64, 2)
181 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64Barrier, 2)
182 pushl %ebx // push out spare stuff for space
184 movl 12(%esp), %eax // load in 1st 64-bit parameter
186 movl 20(%esp), %ebx // load in 2nd 64-bit parameter
188 movl 28(%esp), %esi // laod in destination address
189 xchg64 %esi // compare and swap 64-bit
191 movzbl %al,%eax // widen in case caller assumes we return an int
196 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32, 2)
197 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32Barrier, 2)
206 OS_VARIANT_FUNCTION_START(OSAtomicIncrement32, up, 2)
207 OS_VARIANT_FUNCTION_START(OSAtomicIncrement32Barrier, up, 2)
214 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32, 2)
215 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32Barrier, 2)
223 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32, 2)
224 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32Barrier, 2)
232 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64, 2)
233 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64Barrier, 2)
237 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64, 2)
238 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64Barrier, 2)
242 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64, 2)
243 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64Barrier, 2)
247 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSet, 2)
248 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSetBarrier, 2)
252 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClear, 2)
253 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClearBarrier, 2)
258 // These are used both in 32 and 64-bit mode. We use a fence even on UP
259 // machines, so this function can be used with nontemporal stores.
261 OS_ATOMIC_FUNCTION_START(OSMemoryBarrier, 4)
266 * typedef volatile struct {
267 * void *opaque1; <-- ptr to 1st queue element or null
268 * long opaque2; <-- generation count
271 * void OSAtomicEnqueue( OSQueueHead *list, void *new, size_t offset);
273 OS_ATOMIC_FUNCTION_START(OSAtomicEnqueue, 2)
277 movl 16(%esp),%edi // %edi == ptr to list head
278 movl 20(%esp),%ebx // %ebx == new
279 movl 24(%esp),%esi // %esi == offset
280 movl (%edi),%eax // %eax == ptr to 1st element in Q
281 movl 4(%edi),%edx // %edx == current generation count
282 1: movl %eax,(%ebx,%esi)// link to old list head from new element
284 incl %ecx // increment generation count
285 xchg64 %edi // ...push on new element
292 /* void* OSAtomicDequeue( OSQueueHead *list, size_t offset); */
293 OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2)
297 movl 16(%esp),%edi // %edi == ptr to list head
298 movl 20(%esp),%esi // %esi == offset
299 movl (%edi),%eax // %eax == ptr to 1st element in Q
300 movl 4(%edi),%edx // %edx == current generation count
301 1: testl %eax,%eax // list empty?
303 movl (%eax,%esi),%ebx // point to 2nd in Q
305 incl %ecx // increment generation count
306 xchg64 %edi // ...pop off 1st element
311 ret // ptr to 1st element in Q still in %eax
314 * typedef volatile struct {
315 * void *opaque1; <-- ptr to first queue element or null
316 * void *opaque2; <-- ptr to last queue element or null
317 * int opaque3; <-- spinlock
320 * void OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset);
322 OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2)
326 xorl %ebx,%ebx // clear "preemption pending" flag
327 movl 16(%esp),%edi // %edi == ptr to list head
328 movl 20(%esp),%esi // %esi == new
329 EXTERN_TO_REG(_commpage_pfz_base,%ecx)
331 addl $(_COMM_TEXT_PFZ_ENQUEUE_OFFSET), %ecx
332 movl 24(%esp),%edx // %edx == offset
334 testl %ebx,%ebx // pending preemption?
342 /* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */
343 OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2)
347 xorl %ebx,%ebx // clear "preemption pending" flag
348 movl 16(%esp),%edi // %edi == ptr to list head
349 PICIFY(_commpage_pfz_base)
351 movl 20(%esp),%edx // %edx == offset
352 addl $(_COMM_TEXT_PFZ_DEQUEUE_OFFSET), %ecx
354 testl %ebx,%ebx // pending preemption?
356 pushl %eax // save return value across sysenter
362 ret // ptr to 1st element in Q still in %eax