2 * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <sys/appleapiopts.h>
24 #include <ppc/asm.h> // EXT, LEXT
25 #include <machine/cpu_capabilities.h>
26 #include <machine/commpage.h>
29 /* OSAtomic.h library native implementations. */
34 atomic_add32: // int32_t OSAtomicAdd32( int32_t amt, int32_t *value );
43 COMMPAGE_DESCRIPTOR(atomic_add32,_COMM_PAGE_ATOMIC_ADD32,0,0,kCommPageBoth)
46 atomic_add64: // int64_t OSAtomicAdd64( int64_t amt, int64_t *value );
55 COMMPAGE_DESCRIPTOR(atomic_add64,_COMM_PAGE_ATOMIC_ADD64,k64Bit,0,kCommPage64)
57 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
58 /* This is the no-barrier version */
59 compare_and_swap32_on32: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
69 li r3,0 // return failure
72 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32,_COMM_PAGE_COMPARE_AND_SWAP32,0,k64Bit,kCommPageBoth)
75 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
76 /* This is the no-barrier version */
77 compare_and_swap32_on64: // bool OSAtomicCompareAndSwap32( int32_t old, int32_t new, int32_t *value);
87 li r8,-8 // on 970, must release reservation
88 li r3,0 // return failure
89 stwcx. r4,r8,r1 // store into red zone to release
92 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64,_COMM_PAGE_COMPARE_AND_SWAP32,k64Bit,0,kCommPageBoth)
95 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
96 /* This is the no-barrier version */
97 compare_and_swap64: // bool OSAtomicCompareAndSwap64( int64_t old, int64_t new, int64_t *value);
107 li r8,-8 // on 970, must release reservation
108 li r3,0 // return failure
109 stdcx. r4,r8,r1 // store into red zone to release
112 COMMPAGE_DESCRIPTOR(compare_and_swap64,_COMM_PAGE_COMPARE_AND_SWAP64,k64Bit,0,kCommPage64)
114 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
115 /* This version of compare-and-swap incorporates a memory barrier. */
116 compare_and_swap32_on32b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
117 eieio // write barrier, NOP'd on a UP
124 isync // read barrier, NOP'd on a UP
128 li r3,0 // return failure
131 COMMPAGE_DESCRIPTOR(compare_and_swap32_on32b,_COMM_PAGE_COMPARE_AND_SWAP32B,0,k64Bit,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
134 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
135 /* This version of compare-and-swap incorporates a memory barrier. */
136 compare_and_swap32_on64b: // bool OSAtomicCompareAndSwapBarrier32( int32_t old, int32_t new, int32_t *value);
137 lwsync // write barrier, NOP'd on a UP
144 isync // read barrier, NOP'd on a UP
148 li r8,-8 // on 970, must release reservation
149 li r3,0 // return failure
150 stwcx. r4,r8,r1 // store into red zone to release
153 COMMPAGE_DESCRIPTOR(compare_and_swap32_on64b,_COMM_PAGE_COMPARE_AND_SWAP32B,k64Bit,0,kCommPageBoth+kCommPageSYNC+kCommPageISYNC)
156 /* WARNING: Libc clients assume compare-and-swap preserves r4, r5, and r9-r12! */
157 /* This version of compare-and-swap incorporates a memory barrier. */
158 compare_and_swap64b: // bool OSAtomicCompareAndSwapBarrier64( int64_t old, int64_t new, int64_t *value);
159 lwsync // write barrier, NOP'd on a UP
166 isync // read barrier, NOP'd on a UP
170 li r8,-8 // on 970, must release reservation
171 li r3,0 // return failure
172 stdcx. r4,r8,r1 // store into red zone to release
175 COMMPAGE_DESCRIPTOR(compare_and_swap64b,_COMM_PAGE_COMPARE_AND_SWAP64B,k64Bit,0,kCommPage64+kCommPageSYNC+kCommPageISYNC)
178 atomic_enqueue32: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
180 lwarx r6,0,r3 // get link to 1st on list
181 stwx r6,r4,r5 // hang list off new node
182 eieio // make sure the "stwx" comes before "stwcx." (nop'd on UP)
183 stwcx. r4,0,r3 // make new 1st on list
187 COMMPAGE_DESCRIPTOR(atomic_enqueue32,_COMM_PAGE_ENQUEUE,0,0,kCommPageSYNC+kCommPage32)
190 atomic_enqueue64: // void OSAtomicEnqueue( void **list, void *new, size_t offset);
192 ldarx r6,0,r3 // get link to 1st on list
193 stdx r6,r4,r5 // hang list off new node
194 lwsync // make sure the "stdx" comes before the "stdcx." (nop'd on UP)
195 stdcx. r4,0,r3 // make new 1st on list
199 COMMPAGE_DESCRIPTOR(atomic_enqueue64,_COMM_PAGE_ENQUEUE,k64Bit,0,kCommPageSYNC+kCommPage64)
202 atomic_dequeue32_on32: // void* OSAtomicDequeue( void **list, size_t offset);
205 lwarx r3,0,r5 // get 1st in list
207 beqlr // yes, list empty
208 lwzx r6,r3,r4 // get 2nd
209 stwcx. r6,0,r5 // make 2nd first
211 isync // cancel read-aheads (nop'd on UP)
214 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on32,_COMM_PAGE_DEQUEUE,0,k64Bit,kCommPageISYNC+kCommPage32)
217 atomic_dequeue32_on64: // void* OSAtomicDequeue( void **list, size_t offset);
219 li r7,-8 // use red zone to release reservation if necessary
221 lwarx r3,0,r5 // get 1st in list
224 lwzx r6,r3,r4 // get 2nd
225 stwcx. r6,0,r5 // make 2nd first
226 isync // cancel read-aheads (nop'd on UP)
227 beqlr++ // return next element in r2
228 b 1b // retry (lost reservation)
230 stwcx. r0,r7,r1 // on 970, release reservation using red zone
233 COMMPAGE_DESCRIPTOR(atomic_dequeue32_on64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage32)
236 atomic_dequeue64: // void* OSAtomicDequeue( void **list, size_t offset);
238 li r7,-8 // use red zone to release reservation if necessary
240 ldarx r3,0,r5 // get 1st in list
243 ldx r6,r3,r4 // get 2nd
244 stdcx. r6,0,r5 // make 2nd first
245 isync // cancel read-aheads (nop'd on UP)
246 beqlr++ // return next element in r2
247 b 1b // retry (lost reservation)
249 stdcx. r0,r7,r1 // on 970, release reservation using red zone
252 COMMPAGE_DESCRIPTOR(atomic_dequeue64,_COMM_PAGE_DEQUEUE,k64Bit,0,kCommPageISYNC+kCommPage64)
255 memory_barrier_up: // void OSMemoryBarrier( void )
256 blr // nothing to do on UP
258 COMMPAGE_DESCRIPTOR(memory_barrier_up,_COMM_PAGE_MEMORY_BARRIER,kUP,0,kCommPageBoth)
261 memory_barrier_mp32: // void OSMemoryBarrier( void )
262 isync // we use eieio in preference to sync...
263 eieio // ...because it is faster
266 COMMPAGE_DESCRIPTOR(memory_barrier_mp32,_COMM_PAGE_MEMORY_BARRIER,0,kUP+k64Bit,kCommPage32)
269 memory_barrier_mp64: // void OSMemoryBarrier( void )
271 lwsync // on 970, lwsync is faster than eieio
274 COMMPAGE_DESCRIPTOR(memory_barrier_mp64,_COMM_PAGE_MEMORY_BARRIER,k64Bit,kUP,kCommPageBoth)