]> git.saurik.com Git - apple/libplatform.git/blob - src/atomics/i386/OSAtomic.s
libplatform-177.270.1.tar.gz
[apple/libplatform.git] / src / atomics / i386 / OSAtomic.s
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 * Copyright (c) 2004-2006 Apple Computer, Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <architecture/i386/asm_help.h>
26 #include "os/internal_asm.h"
27
28 #define ATOMIC_RET_ORIG 0
29 #define ATOMIC_RET_NEW 1
30
31 // compare and exchange 32-bit
32 // xchg32 <new> <dst>
33 .macro xchg32
34 lock
35 cmpxchgl $0, ($1)
36 .endm
37
38 // compare and exchange 64-bit
39 // xchg64 <dst>
40 .macro xchg64
41 lock
42 cmpxchg8b ($0)
43 .endm
44
45
46 // int32_t OSAtomicAdd32(int32_t theAmount, volatile int32_t *theValue);
47 #define ATOMIC_ARITHMETIC(instr, orig) \
48 movl 8(%esp), %ecx /* load 2nd arg ptr into ecx */ ;\
49 movl (%ecx), %eax /* load contents of ecx into eax */ ;\
50 1: movl 4(%esp), %edx /* load 1st arg into edx */ ;\
51 instr %eax, %edx /* do the operation */ ;\
52 xchg32 %edx, %ecx /* old in %eax, new in %edx, exchange into %ecx */ ;\
53 jnz 1b /* go back if we failed to exchange */ ;\
54 .if orig == ATOMIC_RET_NEW ;\
55 movl %edx, %eax /* return new value */ ;\
56 .endif
57
58 // bool OSAtomicTestAndSet(uint32_t n, volatile void *theAddress);
59 #define ATOMIC_BIT_OP(instr) \
60 movl 4(%esp), %eax ;\
61 movl 8(%esp), %edx ;\
62 shldl $3,%edx,%ecx /* save top 3 bits of address in %ecx */ ;\
63 shll $3,%edx ;\
64 xorl $7,%eax /* bit position is numbered big endian so convert to little endian */ ;\
65 addl %eax,%edx /* generate bit address */ ;\
66 adcl $0,%ecx /* handle carry out of lower half of address */ ;\
67 movl %edx,%eax /* copy lower half of bit address */ ;\
68 andl $31,%eax /* keep bit offset in range 0..31 */ ;\
69 xorl %eax,%edx /* 4-byte align address */ ;\
70 shrdl $3,%ecx,%edx /* restore 32-bit byte address in %edx */ ;\
71 lock ;\
72 instr %eax, (%edx) ;\
73 setc %al ;\
74 movzbl %al,%eax // widen in case caller assumes we return an int
75
76 // int64_t OSAtomicAdd64(int64_t theAmount, volatile int64_t *theValue);
77 #define ATOMIC_ADD64() \
78 pushl %ebx ;\
79 pushl %esi ;\
80 movl 20(%esp), %esi ;\
81 movl 0(%esi), %eax ;\
82 movl 4(%esi), %edx ;\
83 1: movl 12(%esp), %ebx ;\
84 movl 16(%esp), %ecx ;\
85 addl %eax, %ebx ;\
86 adcl %edx, %ecx ;\
87 xchg64 %esi ;\
88 jnz 1b ;\
89 movl %ebx, %eax ;\
90 movl %ecx, %edx ;\
91 popl %esi ;\
92 popl %ebx
93
94 // int64_t OSAtomicIncrement64(volatile int64_t *theValue);
95 #define ATOMIC_INC64() \
96 pushl %ebx ;\
97 pushl %esi ;\
98 movl 12(%esp), %esi ;\
99 movl 0(%esi), %eax ;\
100 movl 4(%esi), %edx ;\
101 1: movl $1, %ebx ;\
102 xorl %ecx, %ecx ;\
103 addl %eax, %ebx ;\
104 adcl %edx, %ecx ;\
105 xchg64 %esi ;\
106 jnz 1b ;\
107 movl %ebx, %eax ;\
108 movl %ecx, %edx ;\
109 popl %esi ;\
110 popl %ebx
111
112 // int64_t OSAtomicDecrement64(volatile int64_t *theValue);
113 #define ATOMIC_DEC64() \
114 pushl %ebx ;\
115 pushl %esi ;\
116 movl 12(%esp), %esi ;\
117 movl 0(%esi), %eax ;\
118 movl 4(%esi), %edx ;\
119 1: movl $-1, %ebx ;\
120 movl $-1, %ecx ;\
121 addl %eax, %ebx ;\
122 adcl %edx, %ecx ;\
123 xchg64 %esi ;\
124 jnz 1b ;\
125 movl %ebx, %eax ;\
126 movl %ecx, %edx ;\
127 popl %esi ;\
128 popl %ebx
129
130 .text
131
132 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32, 2)
133 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Barrier, 2)
134 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_NEW)
135 ret
136
137 OS_ATOMIC_FUNCTION_START(OSAtomicOr32, 2)
138 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Barrier, 2)
139 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_NEW)
140 ret
141
142 OS_ATOMIC_FUNCTION_START(OSAtomicXor32, 2)
143 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Barrier, 2)
144 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_NEW)
145 ret
146
147 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32Orig, 2)
148 OS_ATOMIC_FUNCTION_START(OSAtomicAnd32OrigBarrier, 2)
149 ATOMIC_ARITHMETIC(andl, ATOMIC_RET_ORIG)
150 ret
151
152 OS_ATOMIC_FUNCTION_START(OSAtomicOr32Orig, 2)
153 OS_ATOMIC_FUNCTION_START(OSAtomicOr32OrigBarrier, 2)
154 ATOMIC_ARITHMETIC(orl, ATOMIC_RET_ORIG)
155 ret
156
157 OS_ATOMIC_FUNCTION_START(OSAtomicXor32Orig, 2)
158 OS_ATOMIC_FUNCTION_START(OSAtomicXor32OrigBarrier, 2)
159 ATOMIC_ARITHMETIC(xorl, ATOMIC_RET_ORIG)
160 ret
161
162 // bool OSAtomicCompareAndSwapInt(int oldValue, int newValue, volatile int *theValue);
163 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtr, 2)
164 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapPtrBarrier, 2)
165 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapInt, 2)
166 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapIntBarrier, 2)
167 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLong, 2)
168 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwapLongBarrier, 2)
169 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32, 2)
170 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap32Barrier, 2)
171 movl 4(%esp), %eax
172 movl 8(%esp), %edx
173 movl 12(%esp), %ecx
174 xchg32 %edx, %ecx
175 sete %al
176 movzbl %al,%eax // widen in case caller assumes we return an int
177 ret
178
179 // bool OSAtomicCompareAndSwap64(int64_t oldValue, int64_t newValue, volatile int64_t *theValue);
180 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64, 2)
181 OS_ATOMIC_FUNCTION_START(OSAtomicCompareAndSwap64Barrier, 2)
182 pushl %ebx // push out spare stuff for space
183 pushl %esi
184 movl 12(%esp), %eax // load in 1st 64-bit parameter
185 movl 16(%esp), %edx
186 movl 20(%esp), %ebx // load in 2nd 64-bit parameter
187 movl 24(%esp), %ecx
188 movl 28(%esp), %esi // laod in destination address
189 xchg64 %esi // compare and swap 64-bit
190 sete %al
191 movzbl %al,%eax // widen in case caller assumes we return an int
192 popl %esi
193 popl %ebx
194 ret
195
196 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32, 2)
197 OS_ATOMIC_FUNCTION_START(OSAtomicAdd32Barrier, 2)
198 movl 4(%esp), %eax
199 movl 8(%esp), %edx
200 movl %eax, %ecx
201 lock
202 xaddl %eax, (%edx)
203 addl %ecx, %eax
204 ret
205
206 OS_VARIANT_FUNCTION_START(OSAtomicIncrement32, up, 2)
207 OS_VARIANT_FUNCTION_START(OSAtomicIncrement32Barrier, up, 2)
208 movl 4(%esp), %ecx
209 movl $1, %eax
210 xaddl %eax, (%ecx)
211 incl %eax
212 ret
213
214 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32, 2)
215 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement32Barrier, 2)
216 movl 4(%esp), %ecx
217 movl $1, %eax
218 lock
219 xaddl %eax, (%ecx)
220 incl %eax
221 ret
222
223 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32, 2)
224 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement32Barrier, 2)
225 movl 4(%esp), %ecx
226 movl $-1, %eax
227 lock
228 xaddl %eax, (%ecx)
229 decl %eax
230 ret
231
232 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64, 2)
233 OS_ATOMIC_FUNCTION_START(OSAtomicAdd64Barrier, 2)
234 ATOMIC_ADD64()
235 ret
236
237 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64, 2)
238 OS_ATOMIC_FUNCTION_START(OSAtomicIncrement64Barrier, 2)
239 ATOMIC_INC64()
240 ret
241
242 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64, 2)
243 OS_ATOMIC_FUNCTION_START(OSAtomicDecrement64Barrier, 2)
244 ATOMIC_DEC64()
245 ret
246
247 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSet, 2)
248 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndSetBarrier, 2)
249 ATOMIC_BIT_OP(btsl)
250 ret
251
252 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClear, 2)
253 OS_ATOMIC_FUNCTION_START(OSAtomicTestAndClearBarrier, 2)
254 ATOMIC_BIT_OP(btrl)
255 ret
256
257 // OSMemoryBarrier()
258 // These are used both in 32 and 64-bit mode. We use a fence even on UP
259 // machines, so this function can be used with nontemporal stores.
260
261 OS_ATOMIC_FUNCTION_START(OSMemoryBarrier, 4)
262 mfence
263 ret
264
265 /*
266 * typedef volatile struct {
267 * void *opaque1; <-- ptr to 1st queue element or null
268 * long opaque2; <-- generation count
269 * } OSQueueHead;
270 *
271 * void OSAtomicEnqueue( OSQueueHead *list, void *new, size_t offset);
272 */
273 OS_ATOMIC_FUNCTION_START(OSAtomicEnqueue, 2)
274 pushl %edi
275 pushl %esi
276 pushl %ebx
277 movl 16(%esp),%edi // %edi == ptr to list head
278 movl 20(%esp),%ebx // %ebx == new
279 movl 24(%esp),%esi // %esi == offset
280 movl (%edi),%eax // %eax == ptr to 1st element in Q
281 movl 4(%edi),%edx // %edx == current generation count
282 1: movl %eax,(%ebx,%esi)// link to old list head from new element
283 movl %edx,%ecx
284 incl %ecx // increment generation count
285 xchg64 %edi // ...push on new element
286 jnz 1b
287 popl %ebx
288 popl %esi
289 popl %edi
290 ret
291
292 /* void* OSAtomicDequeue( OSQueueHead *list, size_t offset); */
293 OS_ATOMIC_FUNCTION_START(OSAtomicDequeue, 2)
294 pushl %edi
295 pushl %esi
296 pushl %ebx
297 movl 16(%esp),%edi // %edi == ptr to list head
298 movl 20(%esp),%esi // %esi == offset
299 movl (%edi),%eax // %eax == ptr to 1st element in Q
300 movl 4(%edi),%edx // %edx == current generation count
301 1: testl %eax,%eax // list empty?
302 jz 2f // yes
303 movl (%eax,%esi),%ebx // point to 2nd in Q
304 movl %edx,%ecx
305 incl %ecx // increment generation count
306 xchg64 %edi // ...pop off 1st element
307 jnz 1b
308 2: popl %ebx
309 popl %esi
310 popl %edi
311 ret // ptr to 1st element in Q still in %eax
312
313 /*
314 * typedef volatile struct {
315 * void *opaque1; <-- ptr to first queue element or null
316 * void *opaque2; <-- ptr to last queue element or null
317 * int opaque3; <-- spinlock
318 * } OSFifoQueueHead;
319 *
320 * void OSAtomicFifoEnqueue( OSFifoQueueHead *list, void *new, size_t offset);
321 */
322 OS_ATOMIC_FUNCTION_START(OSAtomicFifoEnqueue, 2)
323 pushl %edi
324 pushl %esi
325 pushl %ebx
326 xorl %ebx,%ebx // clear "preemption pending" flag
327 movl 16(%esp),%edi // %edi == ptr to list head
328 movl 20(%esp),%esi // %esi == new
329 EXTERN_TO_REG(_commpage_pfz_base,%ecx)
330 movl (%ecx), %ecx
331 addl $(_COMM_TEXT_PFZ_ENQUEUE_OFFSET), %ecx
332 movl 24(%esp),%edx // %edx == offset
333 call *%ecx
334 testl %ebx,%ebx // pending preemption?
335 jz 1f
336 call _preempt
337 1: popl %ebx
338 popl %esi
339 popl %edi
340 ret
341
342 /* void* OSAtomicFifoDequeue( OSFifoQueueHead *list, size_t offset); */
343 OS_ATOMIC_FUNCTION_START(OSAtomicFifoDequeue, 2)
344 pushl %edi
345 pushl %esi
346 pushl %ebx
347 xorl %ebx,%ebx // clear "preemption pending" flag
348 movl 16(%esp),%edi // %edi == ptr to list head
349 PICIFY(_commpage_pfz_base)
350 movl (%edx),%ecx
351 movl 20(%esp),%edx // %edx == offset
352 addl $(_COMM_TEXT_PFZ_DEQUEUE_OFFSET), %ecx
353 call *%ecx
354 testl %ebx,%ebx // pending preemption?
355 jz 1f
356 pushl %eax // save return value across sysenter
357 call _preempt
358 popl %eax
359 1: popl %ebx
360 popl %esi
361 popl %edi
362 ret // ptr to 1st element in Q still in %eax
363
364 // Local Variables:
365 // tab-width: 8
366 // End: