]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/commpage/atomic.s
3d0fd2ce03d1170ba8be9b88ee3dcf714426181c
[apple/xnu.git] / osfmk / ppc / commpage / atomic.s
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23
24 #include <sys/appleapiopts.h>
25 #include <ppc/asm.h> // EXT, LEXT
26 #include <machine/cpu_capabilities.h>
27 #include <machine/commpage.h>
28
29
30 /* OSAtomic.h library native implementations. */
31
32 .text
33 .align 2
34
35 atomic_add32: // int32_t OSAtomicAdd32( int32_t amt, int32_t *value );
36 1:
37 lwarx r5,0,r4
38 add r6,r3,r5
39 stwcx. r6,0,r4
40 bne-- 1b
41 mr r3,r6
42 blr
43
44 COMMPAGE_DESCRIPTOR(atomic_add32,_COMM_PAGE_ATOMIC_ADD32,0,0,kCommPageBoth)
45
46
47 atomic_add64: // int64_t OSAtomicAdd64( int64_t amt, int64_t *value );
48 1:
49 ldarx r5,0,r4
50 add r6,r3,r5
51 stdcx. r6,0,r4
52 bne-- 1b
53 mr r3,r6
54 blr
55
56 COMMPAGE_DESCRIPTOR(atomic_add64,_COMM_PAGE_ATOMIC_ADD64,k64Bit,0,kCommPage64)
57
58 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
59 /* This is the no-barrier version */
60 compare_and_swap32_on32: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
61 1:
62 lwarx r7,0,r5
63 cmplw r7,r3
64 bne- 2f
65 stwcx. r4,0,r5
66 bne- 1b
67 li r3,1
68 blr
69 2:
70 li r3,0 // return failure
71 blr
72
73 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32,_COMM_PAGE_COMPARE_AND_SWAP32,0,k64Bit,kCommPageBoth)
74
75
76 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
77 /* This is the no-barrier version */
78 compare_and_swap32_on64: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
79 1:
80 lwarx r7,0,r5
81 cmplw r7,r3
82 bne-- 2f
83 stwcx. r4,0,r5
84 bne-- 1b
85 li r3,1
86 blr
87 2:
88 li r8,-8 // on 970, must release reservation
89 li r3,0 // return failure
90 stwcx. r4,r8,r1 // store into red zone to release
91 blr
92
93 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64,_COMM_PAGE_COMPARE_AND_SWAP32,k64Bit,0,kCommPageBoth)
94
95
96 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
97 /* This is the no-barrier version */
98 compare_and_swap64: // bool OSAtomicCompareAndSwap64( int64_t old, int64_t new, int64_t *value);
99 1:
100 ldarx r7,0,r5
101 cmpld r7,r3
102 bne-- 2f
103 stdcx. r4,0,r5
104 bne-- 1b
105 li r3,1
106 blr
107 2:
108 li r8,-8 // on 970, must release reservation
109 li r3,0 // return failure
110 stdcx. r4,r8,r1 // store into red zone to release
111 blr
112
113 COMMPAGE_DESCRIPTOR(compare_and_swap64,_COMM_PAGE_COMPARE_AND_SWAP64,k64Bit,0,kCommPage64)
114
115 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
116 /* This version of compare-and-swap incorporates a memory barrier. */
117 compare_and_swap32_on32b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
118 eieio // write barrier, NOP'd on a UP
119 1:
120 lwarx r7,0,r5
121 cmplw r7,r3
122 bne- 2f
123 stwcx. r4,0,r5
124 bne- 1b
125 isync // read barrier, NOP'd on a UP
126 li r3,1
127 blr
128 2:
129 li r3,0 // return failure
130 blr
131
132 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32b,_COMM_PAGE_COMPARE_AND_SWAP32B,0,k64Bit,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
133
134
135 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
136 /* This version of compare-and-swap incorporates a memory barrier. */
137 compare_and_swap32_on64b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
138 lwsync // write barrier, NOP'd on a UP
139 1:
140 lwarx r7,0,r5
141 cmplw r7,r3
142 bne-- 2f
143 stwcx. r4,0,r5
144 bne-- 1b
145 isync // read barrier, NOP'd on a UP
146 li r3,1
147 blr
148 2:
149 li r8,-8 // on 970, must release reservation
150 li r3,0 // return failure
151 stwcx. r4,r8,r1 // store into red zone to release
152 blr
153
154 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64b,_COMM_PAGE_COMPARE_AND_SWAP32B,k64Bit,0,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
155
156
157 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
158 /* This version of compare-and-swap incorporates a memory barrier. */
159 compare_and_swap64b: // bool OSAtomicCompareAndSwapBarrier64( int64_t old, int64_t new, int64_t *value);
160 lwsync // write barrier, NOP'd on a UP
161 1:
162 ldarx r7,0,r5
163 cmpld r7,r3
164 bne-- 2f
165 stdcx. r4,0,r5
166 bne-- 1b
167 isync // read barrier, NOP'd on a UP
168 li r3,1
169 blr
170 2:
171 li r8,-8 // on 970, must release reservation
172 li r3,0 // return failure
173 stdcx. r4,r8,r1 // store into red zone to release
174 blr
175
176 COMMPAGE_DESCRIPTOR(compare_and_swap64b,_COMM_PAGE_COMPARE_AND_SWAP64B,k64Bit,0,kCommPage64+kCommPageSYNC+kCommPageISYNC)
177
178
179 atomic_enqueue32: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
180 1:
181 lwarx r6,0,r3 // get link to 1st on list
182 stwx r6,r4,r5 // hang list off new node
183 eieio // make sure the "stwx" comes before "stwcx." (nop'd on UP)
184 stwcx. r4,0,r3 // make new 1st on list
185 beqlr++
186 b 1b
187
188 COMMPAGE_DESCRIPTOR(atomic_enqueue32,_COMM_PAGE_ENQUEUE,0,0,kCommPageSYNC+kCommPage32)
189
190
191 atomic_enqueue64: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
192 1:
193 ldarx r6,0,r3 // get link to 1st on list
194 stdx r6,r4,r5 // hang list off new node
195 lwsync // make sure the "stdx" comes before the "stdcx." (nop'd on UP)
196 stdcx. r4,0,r3 // make new 1st on list
197 beqlr++
198 b 1b
199
200 COMMPAGE_DESCRIPTOR(atomic_enqueue64,_COMM_PAGE_ENQUEUE,k64Bit,0,kCommPageSYNC+kCommPage64)
201
202
203 atomic_dequeue32_on32: // void* OSAtomicDequeue( void **list, size_t offset);
204 mr r5,r3
205 1:
206 lwarx r3,0,r5 // get 1st in list
207 cmpwi r3,0 // null?
208 beqlr // yes, list empty
209 lwzx r6,r3,r4 // get 2nd
210 stwcx. r6,0,r5 // make 2nd first
211 bne-- 1b
212 isync // cancel read-aheads (nop'd on UP)
213 blr
214
215 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on32,_COMM_PAGE_DEQUEUE,0,k64Bit,kCommPageISYNC+kCommPage32)
216
217
218 atomic_dequeue32_on64: // void* OSAtomicDequeue( void **list, size_t offset);
219 mr r5,r3
220 li r7,-8 // use red zone to release reservation if necessary
221 1:
222 lwarx r3,0,r5 // get 1st in list
223 cmpwi r3,0 // null?
224 beq 2f
225 lwzx r6,r3,r4 // get 2nd
226 stwcx. r6,0,r5 // make 2nd first
227 isync // cancel read-aheads (nop'd on UP)
228 beqlr++ // return next element in r2
229 b 1b // retry (lost reservation)
230 2:
231 stwcx. r0,r7,r1 // on 970, release reservation using red zone
232 blr // return null
233
234 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage32)
235
236
237 atomic_dequeue64: // void* OSAtomicDequeue( void **list, size_t offset);
238 mr r5,r3
239 li r7,-8 // use red zone to release reservation if necessary
240 1:
241 ldarx r3,0,r5 // get 1st in list
242 cmpdi r3,0 // null?
243 beq 2f
244 ldx r6,r3,r4 // get 2nd
245 stdcx. r6,0,r5 // make 2nd first
246 isync // cancel read-aheads (nop'd on UP)
247 beqlr++ // return next element in r2
248 b 1b // retry (lost reservation)
249 2:
250 stdcx. r0,r7,r1 // on 970, release reservation using red zone
251 blr // return null
252
253 COMMPAGE_DESCRIPTOR(atomic_dequeue64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage64)
254
255
256 memory_barrier_up: // void OSMemoryBarrier( void )
257 blr // nothing to do on UP
258
259 COMMPAGE_DESCRIPTOR(memory_barrier_up,_COMM_PAGE_MEMORY_BARRIER,kUP,0,kCommPageBoth)
260
261
262 memory_barrier_mp32: // void OSMemoryBarrier( void )
263 isync // we use eieio in preference to sync...
264 eieio // ...because it is faster
265 blr
266
267 COMMPAGE_DESCRIPTOR(memory_barrier_mp32,_COMM_PAGE_MEMORY_BARRIER,0,kUP+k64Bit,kCommPage32)
268
269
270 memory_barrier_mp64: // void OSMemoryBarrier( void )
271 isync
272 lwsync // on 970, lwsync is faster than eieio
273 blr
274
275 COMMPAGE_DESCRIPTOR(memory_barrier_mp64,_COMM_PAGE_MEMORY_BARRIER,k64Bit,kUP,kCommPageBoth)