]> git.saurik.com Git - apple/libplatform.git/blob - src/atomics/x86_64/OSAtomic.s
libplatform-254.40.4.tar.gz
[apple/libplatform.git] / src / atomics / x86_64 / OSAtomic.s
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <architecture/i386/asm_help.h>
26 #include "os/internal_asm.h"
27
28 .text
29
30 #define ATOMIC_RET_ORIG 0
31 #define ATOMIC_RET_NEW 1
32
33 // compare and exchange 32-bit
34 // xchg32 <new> <dst>
35 .macro xchg32
36 lock
37 cmpxchgl $0, ($1)
38 .endm
39
40 // xchg64 <new> <dst>
41 .macro xchg64
42 lock
43 cmpxchg $0, ($1)
44 .endm
45
46 #define ATOMIC_ARITHMETIC(instr, orig) \
47 movl (%rsi), %eax /* get 2nd arg -> eax */ ;\
48 1: movl %eax, %edx /* copy value to new reg */ ;\
49 instr %edi, %edx /* apply instr to %edx with arg2 */ ;\
50 xchg32 %edx, %rsi /* do the compare swap (see macro above) */ ;\
51 jnz 1b /* jump if failed */ ;\
52 .if orig == 1 /* to return the new value, overwrite eax */ ;\
53 movl %edx, %eax /* return the new value */ ;\
54 .endif
55
56 // Used in OSAtomicTestAndSet( uint32_t n, void *value ), assumes ABI parameter loctions
57 // Manpage says bit to test/set is (0x80 >> (n & 7)) of byte (addr + (n >> 3))
58 #define ATOMIC_BIT_OP(instr) \
59 xorl $7, %edi /* bit position is numbered big endian so convert to little endian */ ;\
60 shlq $3, %rsi ;\
61 addq %rdi, %rsi /* generate bit address */ ;\
62 movq %rsi, %rdi ;\
63 andq $31, %rdi /* keep bit offset in range 0..31 */ ;\
64 xorq %rdi, %rsi /* 4-byte align address */ ;\
65 shrq $3, %rsi /* get 4-byte aligned address */ ;\
66 lock /* lock the bit test */ ;\
67 instr %edi, (%rsi) /* do the bit test, supplied into the macro */ ;\
68 setc %al ;\
69 movzbl %al,%eax /* widen in case caller assumes we return an int */
70
71 // uint32_t OSAtomicAnd32( uint32_t mask, uint32_t *value);
72 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32, 2)
73 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Barrier, 2)
74 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_NEW)
75 ret
76
77 // uint32_t OSAtomicOr32( uint32_t mask, uint32_t *value);
78 OS_ATOMIC_FUNCTION_START(OSAtomicOr32, 2)
79 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Barrier, 2)
80 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_NEW)
81 ret
82
83 // uint32_t OSAtomicXor32( uint32_t mask, uint32_t *value);
84 OS_ATOMIC_FUNCTION_START(OSAtomicXor32, 2)
85 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Barrier, 2)
86 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_NEW)
87 ret
88
89 // uint32_t OSAtomicAnd32Orig( uint32_t mask, uint32_t *value);
90 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2)
91 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2)
92 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG)
93 ret
94
95 // uint32_t OSAtomicOr32Orig( uint32_t mask, uint32_t *value);
96 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2)
97 OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2)
98 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_ORIG)
99 ret
100
101 // uint32_t OSAtomicXor32Orig( uint32_t mask, uint32_t *value);
102 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Orig, 2)
103 OS_ATOMIC_FUNCTION_START(OSAtomicXor32OrigBarrier, 2)
104 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_ORIG)
105 ret
106
107 // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
108 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapInt, 2)
109 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapIntBarrier, 2)
110 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32, 2)
111 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32Barrier, 2)
112 movl %edi, %eax
113 xchg32 %esi, %rdx
114 sete %al
115 movzbl %al,%eax // widen in case caller assumes we return an int
116 ret
117
118 // bool OSAtomicCompareAndSwap64( int64_t old, int64_t new, int64_t *value);
119 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtr, 2)
120 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtrBarrier, 2)
121 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLong, 2)
122 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLongBarrier, 2)
123 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64, 2)
124 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64Barrier, 2)
125 mov %rdi, %rax
126 xchg64 %rsi, %rdx
127 sete %al
128 movzbl %al,%eax // widen in case caller assumes we return an int
129 ret
130
131 // int32_t OSAtomicAdd32( int32_t amt, int32_t *value );
132 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32, 2)
133 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32Barrier, 2)
134 movl %edi, %eax // save amt to add
135 lock // lock prefix breaks tabs ;)
136 xaddl %edi, (%rsi) // swap and add value, returns old value in %edi
137 addl %edi, %eax // add old value to amt as return value
138 ret
139
140 // int32_t OSAtomicIncrement32(int32_t *value );
141 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32, 2)
142 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32Barrier, 2)
143 movl $1, %eax // load increment
144 lock // lock prefix breaks tabs ;)
145 xaddl %eax, (%rdi) // swap and add value, returns old value in %eax
146 incl %eax // increment old value as return value
147 ret
148
149 // int32_t OSAtomicDecrement32(int32_t *value );
150 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32, 2)
151 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32Barrier, 2)
152 movl $-1, %eax // load decrement
153 lock // lock prefix breaks tabs ;)
154 xaddl %eax, (%rdi) // swap and add value, returns old value in %eax
155 decl %eax // decrement old value as return value
156 ret
157
158 // int64_t OSAtomicAdd64( int64_t amt, int64_t *value );
159 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64, 2)
160 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64Barrier, 2)
161 movq %rdi, %rax // save amt to add
162 lock
163 xaddq %rdi, (%rsi) // swap and add value, returns old value in %rsi
164 addq %rdi, %rax // add old value to amt as return value
165 ret
166
167 // int64_t OSAtomicIncrement64(int64_t *value );
168 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64, 2)
169 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64Barrier, 2)
170 movq $1, %rax // load increment
171 lock // lock prefix breaks tabs ;)
172 xaddq %rax, (%rdi) // swap and add value, returns old value in %eax
173 incq %rax // increment old value as return value
174 ret
175
176 // int64_t OSAtomicDecrement64(int64_t *value );
177 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64, 2)
178 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64Barrier, 2)
179 movq $-1, %rax // load decrement
180 lock // lock prefix breaks tabs ;)
181 xaddq %rax, (%rdi) // swap and add value, returns old value in %eax
182 decq %rax // decrement old value as return value
183 ret
184
185 // bool OSAtomicTestAndSet( uint32_t n, void *value );
186 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSet, 2)
187 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSetBarrier, 2)
188 ATOMIC_BIT_OP(btsl)
189 ret
190
191 // bool OSAtomicTestAndClear( uint32_t n, void *value );
192 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClear, 2)
193 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClearBarrier, 2)
194 ATOMIC_BIT_OP(btrl)
195 ret
196
197 // void OSMemoryBarrier( void );
198 OS_ATOMIC_FUNCTION_START(OSMemoryBarrier, 2)
199 mfence
200 ret
201
202 /*
203 * typedef volatile struct {
204 * void *opaque1; <-- ptr to 1st queue element or null
205 * long opaque2; <-- generation count
206 * } OSQueueHead;
207 *
208 * void OSAtomicEnqueue( OSQueueHead *list, void *new, size_t offset);
209 */
210 OS_ATOMIC_FUNCTION_START(OSAtomicEnqueue, 2)
211 pushq %rbx // %rdi == list head, %rsi == new, %rdx == offset
212 movq %rsi,%rbx // %rbx == new
213 movq %rdx,%rsi // %rsi == offset
214 movq (%rdi),%rax // %rax == ptr to 1st element in Q
215 movq 8(%rdi),%rdx // %rdx == current generation count
216 1:
217 movq %rax,(%rbx,%rsi)// link to old list head from new element
218 movq %rdx,%rcx
219 incq %rcx // increment generation count
220 lock // always lock for now...
221 cmpxchg16b (%rdi) // ...push on new element
222 jnz 1b
223 popq %rbx
224 ret
225
226
227 /* void* OSAtomicDequeue( OSQueueHead *list, size_t offset); */
228 OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2)
229 pushq %rbx // %rdi == list head, %rsi == offset
230 movq (%rdi),%rax // %rax == ptr to 1st element in Q
231 movq 8(%rdi),%rdx // %rdx == current generation count
232 1:
233 testq %rax,%rax // list empty?
234 jz 2f // yes
235 movq (%rax,%rsi),%rbx // point to 2nd in Q
236 movq %rdx,%rcx
237 incq %rcx // increment generation count
238 lock // always lock for now...
239 cmpxchg16b (%rdi) // ...pop off 1st element
240 jnz 1b
241 2:
242 popq %rbx
243 ret // ptr to 1st element in Q still in %rax
244
245 /*
246 * typedef volatile struct {
247 * void *opaque1; <-- ptr to first queue element or null
248 * void *opaque2; <-- ptr to last queue element or null
249 * int opaque3; <-- spinlock
250 * } OSFifoQueueHead;
251 *
252 * void OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset);
253 */
254 OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue$VARIANT$PFZ, 2)
255 pushq %rbx
256 xorl %ebx,%ebx // clear "preemption pending" flag
257 movq _commpage_pfz_base(%rip),%rcx
258 addq $(_COMM_TEXT_PFZ_ENQUEUE_OFFSET), %rcx
259 call *%rcx
260 testl %ebx,%ebx // pending preemption?
261 jz 1f
262 call _preempt // call into the kernel to pfz_exit
263 1:
264 popq %rbx
265 ret
266
267
268 /* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */
269 OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue$VARIANT$PFZ, 2)
270 pushq %rbx
271 xorl %ebx,%ebx // clear "preemption pending" flag
272 movq _commpage_pfz_base(%rip), %rcx
273 movq %rsi,%rdx // move offset to %rdx to be like the Enqueue case
274 addq $(_COMM_TEXT_PFZ_DEQUEUE_OFFSET), %rcx
275 call *%rcx
276 testl %ebx,%ebx // pending preemption?
277 jz 1f
278 call _preempt // call into the kernel to pfz_exit
279 1:
280 popq %rbx
281 ret // ptr to 1st element in Q in %rax
282
283 // Local Variables:
284 // tab-width: 8
285 // End: