]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/commpage/atomic.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / commpage / atomic.s
1 /*
2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/appleapiopts.h>
30 #include <ppc/asm.h> // EXT, LEXT
31 #include <machine/cpu_capabilities.h>
32 #include <machine/commpage.h>
33
34
35 /* OSAtomic.h library native implementations. */
36
37 .text
38 .align 2
39
40 atomic_add32: // int32_t OSAtomicAdd32( int32_t amt, int32_t *value );
41 1:
42 lwarx r5,0,r4
43 add r6,r3,r5
44 stwcx. r6,0,r4
45 bne-- 1b
46 mr r3,r6
47 blr
48
49 COMMPAGE_DESCRIPTOR(atomic_add32,_COMM_PAGE_ATOMIC_ADD32,0,0,kCommPageBoth)
50
51
52 atomic_add64: // int64_t OSAtomicAdd64( int64_t amt, int64_t *value );
53 1:
54 ldarx r5,0,r4
55 add r6,r3,r5
56 stdcx. r6,0,r4
57 bne-- 1b
58 mr r3,r6
59 blr
60
61 COMMPAGE_DESCRIPTOR(atomic_add64,_COMM_PAGE_ATOMIC_ADD64,k64Bit,0,kCommPage64)
62
63 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
64 /* This is the no-barrier version */
65 compare_and_swap32_on32: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
66 1:
67 lwarx r7,0,r5
68 cmplw r7,r3
69 bne- 2f
70 stwcx. r4,0,r5
71 bne- 1b
72 li r3,1
73 blr
74 2:
75 li r3,0 // return failure
76 blr
77
78 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32,_COMM_PAGE_COMPARE_AND_SWAP32,0,k64Bit,kCommPageBoth)
79
80
81 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
82 /* This is the no-barrier version */
83 compare_and_swap32_on64: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
84 1:
85 lwarx r7,0,r5
86 cmplw r7,r3
87 bne-- 2f
88 stwcx. r4,0,r5
89 bne-- 1b
90 li r3,1
91 blr
92 2:
93 li r8,-8 // on 970, must release reservation
94 li r3,0 // return failure
95 stwcx. r4,r8,r1 // store into red zone to release
96 blr
97
98 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64,_COMM_PAGE_COMPARE_AND_SWAP32,k64Bit,0,kCommPageBoth)
99
100
101 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
102 /* This is the no-barrier version */
103 compare_and_swap64: // bool OSAtomicCompareAndSwap64( int64_t old, int64_t new, int64_t *value);
104 1:
105 ldarx r7,0,r5
106 cmpld r7,r3
107 bne-- 2f
108 stdcx. r4,0,r5
109 bne-- 1b
110 li r3,1
111 blr
112 2:
113 li r8,-8 // on 970, must release reservation
114 li r3,0 // return failure
115 stdcx. r4,r8,r1 // store into red zone to release
116 blr
117
118 COMMPAGE_DESCRIPTOR(compare_and_swap64,_COMM_PAGE_COMPARE_AND_SWAP64,k64Bit,0,kCommPage64)
119
120 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
121 /* This version of compare-and-swap incorporates a memory barrier. */
122 compare_and_swap32_on32b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
123 eieio // write barrier, NOP'd on a UP
124 1:
125 lwarx r7,0,r5
126 cmplw r7,r3
127 bne- 2f
128 stwcx. r4,0,r5
129 bne- 1b
130 isync // read barrier, NOP'd on a UP
131 li r3,1
132 blr
133 2:
134 li r3,0 // return failure
135 blr
136
137 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32b,_COMM_PAGE_COMPARE_AND_SWAP32B,0,k64Bit,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
138
139
140 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
141 /* This version of compare-and-swap incorporates a memory barrier. */
142 compare_and_swap32_on64b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
143 lwsync // write barrier, NOP'd on a UP
144 1:
145 lwarx r7,0,r5
146 cmplw r7,r3
147 bne-- 2f
148 stwcx. r4,0,r5
149 bne-- 1b
150 isync // read barrier, NOP'd on a UP
151 li r3,1
152 blr
153 2:
154 li r8,-8 // on 970, must release reservation
155 li r3,0 // return failure
156 stwcx. r4,r8,r1 // store into red zone to release
157 blr
158
159 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64b,_COMM_PAGE_COMPARE_AND_SWAP32B,k64Bit,0,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
160
161
162 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
163 /* This version of compare-and-swap incorporates a memory barrier. */
164 compare_and_swap64b: // bool OSAtomicCompareAndSwapBarrier64( int64_t old, int64_t new, int64_t *value);
165 lwsync // write barrier, NOP'd on a UP
166 1:
167 ldarx r7,0,r5
168 cmpld r7,r3
169 bne-- 2f
170 stdcx. r4,0,r5
171 bne-- 1b
172 isync // read barrier, NOP'd on a UP
173 li r3,1
174 blr
175 2:
176 li r8,-8 // on 970, must release reservation
177 li r3,0 // return failure
178 stdcx. r4,r8,r1 // store into red zone to release
179 blr
180
181 COMMPAGE_DESCRIPTOR(compare_and_swap64b,_COMM_PAGE_COMPARE_AND_SWAP64B,k64Bit,0,kCommPage64+kCommPageSYNC+kCommPageISYNC)
182
183
184 atomic_enqueue32: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
185 1:
186 lwarx r6,0,r3 // get link to 1st on list
187 stwx r6,r4,r5 // hang list off new node
188 eieio // make sure the "stwx" comes before "stwcx." (nop'd on UP)
189 stwcx. r4,0,r3 // make new 1st on list
190 beqlr++
191 b 1b
192
193 COMMPAGE_DESCRIPTOR(atomic_enqueue32,_COMM_PAGE_ENQUEUE,0,0,kCommPageSYNC+kCommPage32)
194
195
196 atomic_enqueue64: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
197 1:
198 ldarx r6,0,r3 // get link to 1st on list
199 stdx r6,r4,r5 // hang list off new node
200 lwsync // make sure the "stdx" comes before the "stdcx." (nop'd on UP)
201 stdcx. r4,0,r3 // make new 1st on list
202 beqlr++
203 b 1b
204
205 COMMPAGE_DESCRIPTOR(atomic_enqueue64,_COMM_PAGE_ENQUEUE,k64Bit,0,kCommPageSYNC+kCommPage64)
206
207
208 atomic_dequeue32_on32: // void* OSAtomicDequeue( void **list, size_t offset);
209 mr r5,r3
210 1:
211 lwarx r3,0,r5 // get 1st in list
212 cmpwi r3,0 // null?
213 beqlr // yes, list empty
214 lwzx r6,r3,r4 // get 2nd
215 stwcx. r6,0,r5 // make 2nd first
216 bne-- 1b
217 isync // cancel read-aheads (nop'd on UP)
218 blr
219
220 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on32,_COMM_PAGE_DEQUEUE,0,k64Bit,kCommPageISYNC+kCommPage32)
221
222
223 atomic_dequeue32_on64: // void* OSAtomicDequeue( void **list, size_t offset);
224 mr r5,r3
225 li r7,-8 // use red zone to release reservation if necessary
226 1:
227 lwarx r3,0,r5 // get 1st in list
228 cmpwi r3,0 // null?
229 beq 2f
230 lwzx r6,r3,r4 // get 2nd
231 stwcx. r6,0,r5 // make 2nd first
232 isync // cancel read-aheads (nop'd on UP)
233 beqlr++ // return next element in r2
234 b 1b // retry (lost reservation)
235 2:
236 stwcx. r0,r7,r1 // on 970, release reservation using red zone
237 blr // return null
238
239 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage32)
240
241
242 atomic_dequeue64: // void* OSAtomicDequeue( void **list, size_t offset);
243 mr r5,r3
244 li r7,-8 // use red zone to release reservation if necessary
245 1:
246 ldarx r3,0,r5 // get 1st in list
247 cmpdi r3,0 // null?
248 beq 2f
249 ldx r6,r3,r4 // get 2nd
250 stdcx. r6,0,r5 // make 2nd first
251 isync // cancel read-aheads (nop'd on UP)
252 beqlr++ // return next element in r2
253 b 1b // retry (lost reservation)
254 2:
255 stdcx. r0,r7,r1 // on 970, release reservation using red zone
256 blr // return null
257
258 COMMPAGE_DESCRIPTOR(atomic_dequeue64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage64)
259
260
261 memory_barrier_up: // void OSMemoryBarrier( void )
262 blr // nothing to do on UP
263
264 COMMPAGE_DESCRIPTOR(memory_barrier_up,_COMM_PAGE_MEMORY_BARRIER,kUP,0,kCommPageBoth)
265
266
267 memory_barrier_mp32: // void OSMemoryBarrier( void )
268 isync // we use eieio in preference to sync...
269 eieio // ...because it is faster
270 blr
271
272 COMMPAGE_DESCRIPTOR(memory_barrier_mp32,_COMM_PAGE_MEMORY_BARRIER,0,kUP+k64Bit,kCommPage32)
273
274
275 memory_barrier_mp64: // void OSMemoryBarrier( void )
276 isync
277 lwsync // on 970, lwsync is faster than eieio
278 blr
279
280 COMMPAGE_DESCRIPTOR(memory_barrier_mp64,_COMM_PAGE_MEMORY_BARRIER,k64Bit,kUP,kCommPageBoth)