]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/i386_lock.s
xnu-2050.22.13.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
index 0f7bdba3a57ad65b81a968764df74180b58904ff..6b34073c68bc7d3b9b733bf813d26c0d206d3606 100644 (file)
@@ -54,8 +54,7 @@
  *     When performance isn't the only concern, it's
  *     nice to build stack frames...
  */
-#define        BUILD_STACK_FRAMES   (GPROF || \
-                               ((MACH_LDEBUG) && MACH_KDB))
+#define        BUILD_STACK_FRAMES   (GPROF)
 
 #if    BUILD_STACK_FRAMES
 
@@ -360,7 +359,7 @@ LEAF_ENTRY(hw_lock_init)
 
 
 /*
- *     void hw_lock_byte_init(uint8_t *)
+ *     void hw_lock_byte_init(volatile uint8_t *)
  *
  *     Initialize a hardware byte lock.
  */
@@ -454,7 +453,6 @@ LEAF_ENTRY(hw_lock_to)
 
        lfence
        rdtsc                           /* read cyclecount into %edx:%eax */
-       lfence
        addl    %ecx,%eax               /* fetch and timeout */
        adcl    $0,%edx                 /* add carry */
        mov     %edx,%ecx
@@ -464,7 +462,6 @@ LEAF_ENTRY(hw_lock_to)
        push    %r9
        lfence
        rdtsc                           /* read cyclecount into %edx:%eax */
-       lfence
        shlq    $32, %rdx
        orq     %rdx, %rax              /* load 64-bit quantity into %rax */
        addq    %rax, %rsi              /* %rsi is the timeout expiry */
@@ -498,7 +495,6 @@ LEAF_ENTRY(hw_lock_to)
        mov     %edx,%edi               /* Save %edx */
        lfence
        rdtsc                           /* cyclecount into %edx:%eax */
-       lfence
        xchg    %edx,%edi               /* cyclecount into %edi:%eax */
        cmpl    %ecx,%edi               /* compare high-order 32-bits */
        jb      4b                      /* continue spinning if less, or */
@@ -510,7 +506,6 @@ LEAF_ENTRY(hw_lock_to)
 #else
        lfence
        rdtsc                           /* cyclecount into %edx:%eax */
-       lfence
        shlq    $32, %rdx
        orq     %rdx, %rax              /* load 64-bit quantity into %rax */
        cmpq    %rsi, %rax              /* compare to timeout */
@@ -708,7 +703,7 @@ Entry(lck_rw_try_lock_shared)
        LOCKSTAT_LABEL(_lck_rw_try_lock_shared_lockstat_patch_point)
        ret
     /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER  */
-    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+    LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
 #endif
        movl    $1, %eax                        /* return TRUE */
        ret
@@ -784,7 +779,7 @@ Entry(lck_rw_lock_exclusive)
        LOCKSTAT_LABEL(_lck_rw_lock_exclusive_lockstat_patch_point)
        ret
     /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER  */
-    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, LCK_RW_REGISTER)
 #endif
        ret
 2:
@@ -828,7 +823,7 @@ Entry(lck_rw_try_lock_exclusive)
        LOCKSTAT_LABEL(_lck_rw_try_lock_exclusive_lockstat_patch_point)
        ret
     /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER  */
-    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+    LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, LCK_RW_REGISTER)
 #endif
        movl    $1, %eax                        /* return TRUE */
        ret
@@ -889,7 +884,7 @@ Entry(lck_rw_lock_shared_to_exclusive)
        LOCKSTAT_LABEL(_lck_rw_lock_shared_to_exclusive_lockstat_patch_point)
        ret
     /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER  */
-    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+    LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, LCK_RW_REGISTER)
 #endif
        movl    $1, %eax                        /* return success */
        ret
@@ -1419,14 +1414,14 @@ mutex_interlock_destroyed_str:
  * lck_mtx_convert_spin()
  */
 NONLEAF_ENTRY(lck_mtx_lock_spin_always)
-       LOAD_LMTX_REG(B_ARG0)           /* fetch lock pointer */
-       jmp     Llmls_avoid_check
-
+       LOAD_LMTX_REG(B_ARG0)           /* fetch lock pointer */
+       jmp     Llmls_avoid_check
+       
 NONLEAF_ENTRY(lck_mtx_lock_spin)
        LOAD_LMTX_REG(B_ARG0)           /* fetch lock pointer */
 
        CHECK_PREEMPTION_LEVEL()
-Llmls_avoid_check:     
+Llmls_avoid_check:
        mov     M_STATE(LMTX_REG), LMTX_C_REG32
        test    $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32  /* is the interlock or mutex held */
        jnz     Llmls_slow
@@ -2265,27 +2260,42 @@ LEAF_ENTRY(bit_unlock)
  * Atomic primitives, prototyped in kern/simple_lock.h
  */
 LEAF_ENTRY(hw_atomic_add)
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
        movl    %esi, %eax              /* Load addend */
-       lock
-       xaddl   %eax, (%rdi)            /* Atomic exchange and add */
+       lock    xaddl %eax, (%rdi)              /* Atomic exchange and add */
        addl    %esi, %eax              /* Calculate result */
        LEAF_RET
 
 LEAF_ENTRY(hw_atomic_sub)
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
        negl    %esi
        movl    %esi, %eax
-       lock
-       xaddl   %eax, (%rdi)            /* Atomic exchange and add */
+       lock    xaddl %eax, (%rdi)              /* Atomic exchange and add */
        addl    %esi, %eax              /* Calculate result */
        LEAF_RET
 
 LEAF_ENTRY(hw_atomic_or)
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
        movl    (%rdi), %eax
 1:
        movl    %esi, %edx              /* Load mask */
        orl     %eax, %edx
-       lock
-       cmpxchgl        %edx, (%rdi)    /* Atomic CAS */
+       lock    cmpxchgl %edx, (%rdi)   /* Atomic CAS */
        jne     1b
        movl    %edx, %eax              /* Result */
        LEAF_RET
@@ -2295,18 +2305,29 @@ LEAF_ENTRY(hw_atomic_or)
  */
 
 LEAF_ENTRY(hw_atomic_or_noret)
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
        lock
        orl     %esi, (%rdi)            /* Atomic OR */
        LEAF_RET
 
 
 LEAF_ENTRY(hw_atomic_and)
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
        movl    (%rdi), %eax
 1:
        movl    %esi, %edx              /* Load mask */
        andl    %eax, %edx
-       lock
-       cmpxchgl        %edx, (%rdi)    /* Atomic CAS */
+       lock    cmpxchgl %edx, (%rdi)   /* Atomic CAS */
        jne     1b
        movl    %edx, %eax              /* Result */
        LEAF_RET
@@ -2316,8 +2337,13 @@ LEAF_ENTRY(hw_atomic_and)
  */
 
 LEAF_ENTRY(hw_atomic_and_noret)
-       lock
-       andl    %esi, (%rdi)            /* Atomic OR */
+#if    MACH_LDEBUG
+       test    $3, %rdi
+       jz      1f
+       ud2
+1:
+#endif 
+       lock    andl    %esi, (%rdi)            /* Atomic OR */
        LEAF_RET
 
 #endif /* !__i386 __ */