]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ppc/commpage/spinlocks.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / commpage / spinlocks.s
index d619a6e8eba7b2fcf36c5ae4de1e435f3f31d356..480f49050f06ba6b5dc94f9dac03bc8864f1e1c4 100644 (file)
@@ -1,16 +1,19 @@
 /*
  * Copyright (c) 2003 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -20,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 
 #include <sys/appleapiopts.h>
 
         .text
         .align 2
-        .globl EXT(spinlock_32_try_mp)
-        .globl EXT(spinlock_32_try_up)
-        .globl EXT(spinlock_32_lock_mp)
-        .globl EXT(spinlock_32_lock_up)
-        .globl EXT(spinlock_32_unlock_mp)
-        .globl EXT(spinlock_32_unlock_up)
-        
-        .globl EXT(spinlock_64_try_mp)
-        .globl EXT(spinlock_64_try_up)
-        .globl EXT(spinlock_64_lock_mp)
-        .globl EXT(spinlock_64_lock_up)
-        .globl EXT(spinlock_64_unlock_mp)
-        .globl EXT(spinlock_64_unlock_up)
-        
-        .globl EXT(spinlock_relinquish)
 
 #define        MP_SPIN_TRIES   1000
 
 
-// The user mode spinlock library.  There are many versions,
-// in order to take advantage of a few special cases:
-//     - no barrier instructions (SYNC,ISYNC) are needed if UP
-//     - 64-bit processors can use LWSYNC instead of SYNC (if MP)
-//     - branch hints appropriate to the processor (+ vs ++ etc)
-//     - potentially custom relinquish strategies (not used at present)
-//     - fixes for errata as necessary
+/* The user mode spinlock library.  There are many versions,
+ * in order to take advantage of a few special cases:
+ *     - no barrier instructions (SYNC,ISYNC) are needed if UP
+ *     - 64-bit processors can use LWSYNC instead of SYNC (if MP)
+ *  - 32-bit processors can use ISYNC/EIEIO instead of SYNC (if MP)
+ *     - branch hints appropriate to the processor (+ vs ++ etc)
+ *     - potentially custom relinquish strategies (not used at present)
+ *     - fixes for errata as necessary
+ *
+ * The convention for lockwords is that 0==free and -1==locked.
+ */ 
 
 
 spinlock_32_try_mp:
+               mr              r5, r3
+               li              r3, 1
 1:
-        lwarx  r4,0,r3
+        lwarx  r4,0,r5
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne-   2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r5
         isync                          // cancel speculative execution
         beqlr+
         b              1b
 2:
-        li             r3,0
+        li             r3,0        // we did not get the lock
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,0)
+       COMMPAGE_DESCRIPTOR(spinlock_32_try_mp,_COMM_PAGE_SPINLOCK_TRY,0,k64Bit+kUP,kCommPage32)
         
 
 spinlock_32_try_up:
+               mr              r5, r3
+               li              r3, 1
 1:
-        lwarx  r4,0,r3
+        lwarx  r4,0,r5
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne-   2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r5
         beqlr+
         b              1b
 2:
-        li             r3,0
+        li             r3,0        // we did not get the lock
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,0)
+    COMMPAGE_DESCRIPTOR(spinlock_32_try_up,_COMM_PAGE_SPINLOCK_TRY,kUP,k64Bit,kCommPage32)
 
 
 spinlock_32_lock_mp:
         li             r5,MP_SPIN_TRIES
 1:
         lwarx  r4,0,r3
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne-   2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r3
         isync                          // cancel speculative execution
         beqlr+                         // we return void
         b              1b
@@ -104,28 +103,30 @@ spinlock_32_lock_mp:
         bne            1b
         ba             _COMM_PAGE_RELINQUISH
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,0)
+    COMMPAGE_DESCRIPTOR(spinlock_32_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,0,k64Bit+kUP,kCommPage32)
 
 
 spinlock_32_lock_up:
 1:
         lwarx  r4,0,r3
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bnea-  _COMM_PAGE_RELINQUISH   // always depress on UP (let lock owner run)
-        stwcx. r3,0,r3
+        stwcx. r6,0,r3
         beqlr+                         // we return void
         b              1b
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,0)
+    COMMPAGE_DESCRIPTOR(spinlock_32_lock_up,_COMM_PAGE_SPINLOCK_LOCK,kUP,k64Bit,kCommPage32)
 
 
 spinlock_32_unlock_mp:
         li             r4,0
-        sync                           // complete prior stores before unlock
+        isync                          // complete prior stores before unlock
+               eieio                           // (using isync/eieio is faster than a sync)
         stw            r4,0(r3)
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,0)
+    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,0,k64Bit+kUP,kCommPage32)
 
 
 spinlock_32_unlock_up:
@@ -133,51 +134,58 @@ spinlock_32_unlock_up:
         stw            r4,0(r3)
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,0)
+    COMMPAGE_DESCRIPTOR(spinlock_32_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,kUP,k64Bit,kCommPage32)
 
 
 spinlock_64_try_mp:
+               mr              r5, r3
+               li              r3, 1
 1:
-        lwarx  r4,0,r3
+        lwarx  r4,0,r5
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne--  2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r5
         isync                          // cancel speculative execution
         beqlr++
         b              1b
 2:
         li             r6,-4
-        stwcx. r3,r6,r1        // clear the pending reservation (using red zone)
-        li             r3,0            // Pass failure
+        stwcx. r5,r6,r1        // clear the pending reservation (using red zone)
+        li             r3,0        // we did not get the lock
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_try_mp,_COMM_PAGE_SPINLOCK_TRY,k64Bit,kUP,kCommPageBoth)
 
 
 spinlock_64_try_up:
+               mr              r5, r3
+               li              r3, 1
 1:
-        lwarx  r4,0,r3
+        lwarx  r4,0,r5
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne--  2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r5
         beqlr++
         b              1b
 2:
         li             r6,-4
-        stwcx. r3,r6,r1        // clear the pending reservation (using red zone)
-        li             r3,0
+        stwcx. r5,r6,r1        // clear the pending reservation (using red zone)
+        li             r3,0        // we did not get the lock
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_try_up,_COMM_PAGE_SPINLOCK_TRY,k64Bit+kUP,0,kCommPageBoth)
 
 
 spinlock_64_lock_mp:
         li             r5,MP_SPIN_TRIES
 1:
         lwarx  r4,0,r3
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne--  2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r3
         isync                          // cancel speculative execution
         beqlr++                                // we return void
         b              1b
@@ -188,15 +196,16 @@ spinlock_64_lock_mp:
         bne--  1b                      // mispredict this one (a cheap back-off)
         ba             _COMM_PAGE_RELINQUISH
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_lock_mp,_COMM_PAGE_SPINLOCK_LOCK,k64Bit,kUP,kCommPageBoth)
 
 
 spinlock_64_lock_up:
 1:
         lwarx  r4,0,r3
+               li              r6,-1           // locked == -1
         cmpwi  r4,0
         bne--  2f
-        stwcx. r3,0,r3
+        stwcx. r6,0,r3
         beqlr++                                // we return void
         b              1b
 2:                                                     // always relinquish on UP (let lock owner run)
@@ -204,16 +213,16 @@ spinlock_64_lock_up:
         stwcx. r3,r6,r1        // clear the pending reservation (using red zone)
                ba              _COMM_PAGE_RELINQUISH
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_lock_up,_COMM_PAGE_SPINLOCK_LOCK,k64Bit+kUP,0,kCommPageBoth)
 
 
 spinlock_64_unlock_mp:
-        li             r4,0
         lwsync                         // complete prior stores before unlock
+        li             r4,0
         stw            r4,0(r3)
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_mp,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit,kUP,kCommPageBoth)
 
 
 spinlock_64_unlock_up:
@@ -221,7 +230,7 @@ spinlock_64_unlock_up:
         stw            r4,0(r3)
         blr
 
-    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,0)
+    COMMPAGE_DESCRIPTOR(spinlock_64_unlock_up,_COMM_PAGE_SPINLOCK_UNLOCK,k64Bit+kUP,0,kCommPageBoth)
     
 
 spinlock_relinquish:
@@ -234,5 +243,5 @@ spinlock_relinquish:
         mr             r3,r12
         ba             _COMM_PAGE_SPINLOCK_LOCK
         
-    COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,0)
+    COMMPAGE_DESCRIPTOR(spinlock_relinquish,_COMM_PAGE_RELINQUISH,0,0,kCommPageBoth)