]> git.saurik.com Git - apple/libc.git/blame_incremental - arm/sys/OSAtomic.s
Libc-594.9.5.tar.gz
[apple/libc.git] / arm / sys / OSAtomic.s
... / ...
CommitLineData
1/*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24#include <machine/cpu_capabilities.h>
25#include <architecture/arm/asm_help.h>
26#include <arm/arch.h>
27
28.text
29
30/*
31 * Use LDREX/STREX to perform atomic operations.
32 * Memory barriers are not needed on a UP system
33 */
34
35#if defined(_ARM_ARCH_6)
36
37/* Implement a generic atomic arithmetic operation:
38 * operand is in R0, pointer is in R1. Return new
39 * value into R0
40 */
41#define ATOMIC_ARITHMETIC(op) \
421: ldrex r2, [r1] /* load existing value and tag memory */ ;\
43 op r3, r2, r0 /* compute new value */ ;\
44 strex ip, r3, [r1] /* store new value if memory is still tagged */ ;\
45 cmp ip, #0 /* check if the store succeeded */ ;\
46 bne 1b /* if not, try again */ ;\
47 mov r0, r3 /* return new value */
48
49#define ATOMIC_ARITHMETIC_ORIG(op) \
501: ldrex r2, [r1] /* load existing value and tag memory */ ;\
51 op r3, r2, r0 /* compute new value */ ;\
52 strex ip, r3, [r1] /* store new value if memory is still tagged */ ;\
53 cmp ip, #0 /* check if the store succeeded */ ;\
54 bne 1b /* if not, try again */ ;\
55 mov r0, r2 /* return orig value */
56
57ENTRY_POINT(_OSAtomicAdd32Barrier)
58ENTRY_POINT(_OSAtomicAdd32)
59 ATOMIC_ARITHMETIC(add)
60 bx lr
61
62ENTRY_POINT(_OSAtomicOr32Barrier)
63ENTRY_POINT(_OSAtomicOr32)
64 ATOMIC_ARITHMETIC(orr)
65 bx lr
66
67ENTRY_POINT(_OSAtomicOr32OrigBarrier)
68ENTRY_POINT(_OSAtomicOr32Orig)
69 ATOMIC_ARITHMETIC_ORIG(orr)
70 bx lr
71
72ENTRY_POINT(_OSAtomicAnd32Barrier)
73ENTRY_POINT(_OSAtomicAnd32)
74 ATOMIC_ARITHMETIC(and)
75 bx lr
76
77ENTRY_POINT(_OSAtomicAnd32OrigBarrier)
78ENTRY_POINT(_OSAtomicAnd32Orig)
79 ATOMIC_ARITHMETIC_ORIG(and)
80 bx lr
81
82ENTRY_POINT(_OSAtomicXor32Barrier)
83ENTRY_POINT(_OSAtomicXor32)
84 ATOMIC_ARITHMETIC(eor)
85 bx lr
86
87ENTRY_POINT(_OSAtomicXor32OrigBarrier)
88ENTRY_POINT(_OSAtomicXor32Orig)
89 ATOMIC_ARITHMETIC_ORIG(eor)
90 bx lr
91
92ENTRY_POINT(_OSAtomicCompareAndSwap32Barrier)
93ENTRY_POINT(_OSAtomicCompareAndSwap32)
94ENTRY_POINT(_OSAtomicCompareAndSwapIntBarrier)
95ENTRY_POINT(_OSAtomicCompareAndSwapInt)
96ENTRY_POINT(_OSAtomicCompareAndSwapLongBarrier)
97ENTRY_POINT(_OSAtomicCompareAndSwapLong)
98ENTRY_POINT(_OSAtomicCompareAndSwapPtrBarrier)
99ENTRY_POINT(_OSAtomicCompareAndSwapPtr)
1001: ldrex r3, [r2] // load existing value and tag memory
101 teq r3, r0 // is it the same as oldValue?
102 movne r0, #0 // if not, return 0 immediately
103 bxne lr
104 strex r3, r1, [r2] // otherwise, try to store new value
105 cmp r3, #0 // check if the store succeeded
106 bne 1b // if not, try again
107 mov r0, #1 // return true
108 bx lr
109
110
111/* Implement a generic test-and-bit-op operation:
112 * bit to set is in R0, base address is in R1. Return
113 * previous value (0 or 1) of the bit in R0.
114 */
115#define ATOMIC_BITOP(op) \
116 /* Adjust pointer to point at the correct word ;\
117 * R1 = R1 + 4 * (R0 / 32) ;\
118 */ ;\
119 mov r3, r0, lsr #5 ;\
120 add r1, r1, r3, asl #2 ;\
121 /* Generate a bit mask for the bit we want to test ;\
122 * R0 = (0x80 >> (R0 & 7)) << (R0 & ~7 & 31) ;\
123 */ ;\
124 and r2, r0, #7 ;\
125 mov r3, #0x80 ;\
126 mov r3, r3, asr r2 ;\
127 and r0, r0, #0x18 ;\
128 mov r0, r3, asl r0 ;\
1291: ;\
130 ldrex r2, [r1] /* load existing value and tag memory */ ;\
131 op r3, r2, r0 /* compute new value */ ;\
132 strex ip, r3, [r1] /* attempt to store new value */ ;\
133 cmp ip, #0 /* check if the store succeeded */ ;\
134 bne 1b /* if so, try again */ ;\
135 ands r0, r2, r0 /* mask off the bit from the old value */ ;\
136 movne r0, #1 /* if non-zero, return exactly 1 */
137
138ENTRY_POINT(_OSAtomicTestAndSetBarrier)
139ENTRY_POINT(_OSAtomicTestAndSet)
140 ATOMIC_BITOP(orr)
141 bx lr
142
143ENTRY_POINT(_OSAtomicTestAndClearBarrier)
144ENTRY_POINT(_OSAtomicTestAndClear)
145 ATOMIC_BITOP(bic)
146 bx lr
147
148ENTRY_POINT(_OSMemoryBarrier)
149 bx lr
150
151
152#if defined(_ARM_ARCH_6K)
153/* If we can use LDREXD/STREXD, then we can implement 64-bit atomic operations */
154
155ENTRY_POINT(_OSAtomicAdd64Barrier)
156ENTRY_POINT(_OSAtomicAdd64)
157 // R0,R1 contain the amount to add
158 // R2 contains the pointer
159 stmfd sp!, {r4, r5, r8, r9, lr}
1601:
161 ldrexd r4, r5, [r2] // load existing value to R4/R5 and tag memory
162 adds r8, r4, r0 // add lower half of new value into R6 and set carry bit
163 adc r9, r5, r1 // add upper half of new value into R8 with carry
164 strexd r3, r8, r9, [r2] // store new value if memory is still tagged
165 cmp r3, #0 // check if store succeeded
166 bne 1b // if so, try again
167 mov r0, r8 // return new value
168 mov r1, r9
169 ldmfd sp!, {r4, r5, r8, r9, pc}
170
171ENTRY_POINT(_OSAtomicCompareAndSwap64Barrier)
172ENTRY_POINT(_OSAtomicCompareAndSwap64)
173 // R0,R1 contains the old value
174 // R2,R3 contains the new value
175 // the pointer is pushed onto the stack
176 ldr ip, [sp, #0] // load pointer into IP
177 stmfd sp!, {r4, r5, lr}
1781:
179 ldrexd r4, [ip] // load existing value into R4/R5 and tag memory
180 teq r0, r4 // check low word
181 teqeq r1, r5 // if low words match, check high word
182 movne r0, #0 // if either match fails, return 0
183 bne 2f
184 strexd r4, r2, [ip] // otherwise, try to store new values
185 cmp r3, #0 // check if store succeeded
186 bne 1b // if so, try again
187 mov r0, #1 // return true
1882:
189 ldmfd sp!, {r4, r5, pc}
190
191#endif /* defined(_ARM_ARCH_6K) */
192
193#endif /* defined(_ARM_ARCH_6) */
194
195/*
196 * void
197 * _spin_lock(p)
198 * int *p;
199 *
200 * Lock the lock pointed to by p. Spin (possibly forever) until the next
201 * lock is available.
202 */
203ENTRY_POINT(_spin_lock)
204ENTRY_POINT(__spin_lock)
205ENTRY_POINT(_OSSpinLockLock)
206L_spin_lock_loop:
207 mov r1, #1
208 swp r2, r1, [r0]
209 cmp r2, #0
210 bxeq lr
211 mov ip, sp
212 stmfd sp!, {r0, r8}
213 mov r0, #0 // THREAD_NULL
214 mov r1, #1 // SWITCH_OPTION_DEPRESS
215 mov r2, #1 // timeout (ms)
216 mov r12, #-61 // SYSCALL_THREAD_SWITCH
217 swi 0x80
218 ldmfd sp!, {r0, r8}
219 b L_spin_lock_loop
220
221ENTRY_POINT(_spin_lock_try)
222ENTRY_POINT(__spin_lock_try)
223ENTRY_POINT(_OSSpinLockTry)
224 mov r1, #1
225 swp r2, r1, [r0]
226 bic r0, r1, r2
227 bx lr
228
229/*
230 * void
231 * _spin_unlock(p)
232 * int *p;
233 *
234 * Unlock the lock pointed to by p.
235 */
236ENTRY_POINT(_spin_unlock)
237ENTRY_POINT(__spin_unlock)
238ENTRY_POINT(_OSSpinLockUnlock)
239 mov r1, #0
240 str r1, [r0]
241 bx lr
242