]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/i386_lock.s
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / i386 / i386_lock.s
index 61355263f6304ec933c85c9923d713f547cf1c20..f54e040a1a78037a95a7da3630c55894fe5a0c27 100644 (file)
  * register initially, and then either a byte or register-sized
  * word is loaded/stored to the pointer
  */
-/*
- *     void hw_lock_init(hw_lock_t)
- *
- *     Initialize a hardware lock.
- */
-LEAF_ENTRY(hw_lock_init)
-       movq    $0, (%rdi)              /* clear the lock */
-       LEAF_RET
-
 
 /*
  *     void hw_lock_byte_init(volatile uint8_t *)
@@ -264,28 +254,6 @@ LEAF_ENTRY(hw_lock_byte_init)
        movb    $0, (%rdi)              /* clear the lock */
        LEAF_RET
 
-/*
- *     void hw_lock_lock(hw_lock_t)
- *
- *     Acquire lock, spinning until it becomes available.
- *     MACH_RT:  also return with preemption disabled.
- */
-LEAF_ENTRY(hw_lock_lock)
-       mov     %gs:CPU_ACTIVE_THREAD, %rcx     /* get thread pointer */
-       
-       PREEMPTION_DISABLE
-1:
-       mov     (%rdi), %rax
-       test    %rax,%rax               /* lock locked? */
-       jne     3f                      /* branch if so */
-       lock; cmpxchg %rcx,(%rdi)       /* try to acquire the HW lock */
-       jne     3f
-       movl    $1,%eax                 /* In case this was a timeout call */
-       LEAF_RET                        /* if yes, then nothing left to do */
-3:
-       PAUSE                           /* pause for hyper-threading */
-       jmp     1b                      /* try again */
-
 /*
  *     void    hw_lock_byte_lock(uint8_t *lock_byte)
  *
@@ -307,93 +275,6 @@ LEAF_ENTRY(hw_lock_byte_lock)
        PAUSE                           /* pause for hyper-threading */
        jmp     1b                      /* try again */
 
-/*
- *     unsigned int hw_lock_to(hw_lock_t, unsigned int)
- *
- *     Acquire lock, spinning until it becomes available or timeout.
- *     MACH_RT:  also return with preemption disabled.
- */
-LEAF_ENTRY(hw_lock_to)
-1:
-       mov     %gs:CPU_ACTIVE_THREAD, %rcx
-
-       /*
-        * Attempt to grab the lock immediately
-        * - fastpath without timeout nonsense.
-        */
-       PREEMPTION_DISABLE
-
-       mov     (%rdi), %rax
-       test    %rax,%rax               /* lock locked? */
-       jne     2f                      /* branch if so */
-       lock; cmpxchg %rcx,(%rdi)       /* try to acquire the HW lock */
-       jne     2f                      /* branch on failure */
-       movl    $1,%eax
-       LEAF_RET
-
-2:
-#define        INNER_LOOP_COUNT        1000
-       /*
-        * Failed to get the lock so set the timeout
-        * and then spin re-checking the lock but pausing
-        * every so many (INNER_LOOP_COUNT) spins to check for timeout.
-        */
-       push    %r9
-       lfence
-       rdtsc                           /* read cyclecount into %edx:%eax */
-       shlq    $32, %rdx
-       orq     %rdx, %rax              /* load 64-bit quantity into %rax */
-       addq    %rax, %rsi              /* %rsi is the timeout expiry */
-       
-4:
-       /*
-        * The inner-loop spin to look for the lock being freed.
-        */
-       mov     $(INNER_LOOP_COUNT),%r9
-5:
-       PAUSE                           /* pause for hyper-threading */
-       mov     (%rdi),%rax             /* spin checking lock value in cache */
-       test    %rax,%rax
-       je      6f                      /* zero => unlocked, try to grab it */
-       decq    %r9                     /* decrement inner loop count */
-       jnz     5b                      /* time to check for timeout? */
-       
-       /*
-        * Here after spinning INNER_LOOP_COUNT times, check for timeout
-        */
-       lfence
-       rdtsc                           /* cyclecount into %edx:%eax */
-       shlq    $32, %rdx
-       orq     %rdx, %rax              /* load 64-bit quantity into %rax */
-       cmpq    %rsi, %rax              /* compare to timeout */
-       jb      4b                      /* continue spinning if less, or */
-       xor     %rax,%rax               /* with 0 return value */
-       pop     %r9
-       LEAF_RET
-
-6:
-       /*
-        * Here to try to grab the lock that now appears to be free
-        * after contention.
-        */
-       mov     %gs:CPU_ACTIVE_THREAD, %rcx
-       lock; cmpxchg %rcx,(%rdi)       /* try to acquire the HW lock */
-       jne     4b                      /* no - spin again */
-       movl    $1,%eax                 /* yes */
-       pop     %r9
-       LEAF_RET
-
-/*
- *     void hw_lock_unlock(hw_lock_t)
- *
- *     Unconditionally release lock.
- *     MACH_RT:  release preemption level.
- */
-LEAF_ENTRY(hw_lock_unlock)
-       movq $0, (%rdi)         /* clear the lock */
-       PREEMPTION_ENABLE
-       LEAF_RET
-
 /*
  *     void hw_lock_byte_unlock(uint8_t *lock_byte)
  *
@@ -406,41 +287,6 @@ LEAF_ENTRY(hw_lock_byte_unlock)
        PREEMPTION_ENABLE
        LEAF_RET
 
-/*
- *     unsigned int hw_lock_try(hw_lock_t)
- *     MACH_RT:  returns with preemption disabled on success.
- */
-LEAF_ENTRY(hw_lock_try)
-       mov     %gs:CPU_ACTIVE_THREAD, %rcx
-       PREEMPTION_DISABLE
-
-       mov     (%rdi),%rax
-       test    %rax,%rax
-       jne     1f
-       lock; cmpxchg %rcx,(%rdi)       /* try to acquire the HW lock */
-       jne     1f
-       
-       movl    $1,%eax                 /* success */
-       LEAF_RET
-
-1:
-       PREEMPTION_ENABLE               /* failure:  release preemption... */
-       xorl    %eax,%eax               /* ...and return failure */
-       LEAF_RET
-
-/*
- *     unsigned int hw_lock_held(hw_lock_t)
- *     MACH_RT:  doesn't change preemption state.
- *     N.B.  Racy, of course.
- */
-LEAF_ENTRY(hw_lock_held)
-       mov     (%rdi),%rax             /* check lock value */
-       test    %rax,%rax
-       movl    $1,%ecx
-       cmovne  %ecx,%eax               /* 0 => unlocked, 1 => locked */
-       LEAF_RET
-
-
 /*
  * Reader-writer lock fastpaths. These currently exist for the
  * shared lock acquire, the exclusive lock acquire, the shared to
@@ -1711,184 +1557,3 @@ LEAF_ENTRY(preemption_underflow_panic)
        .text
 
 
-LEAF_ENTRY(_disable_preemption)
-#if    MACH_RT
-       PREEMPTION_DISABLE
-#endif /* MACH_RT */
-       LEAF_RET
-
-LEAF_ENTRY(_enable_preemption)
-#if    MACH_RT
-#if    MACH_ASSERT
-       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL
-       jg      1f
-       movl    %gs:CPU_PREEMPTION_LEVEL,%esi
-       ALIGN_STACK()
-       LOAD_STRING_ARG0(_enable_preemption_less_than_zero)
-       CALL_PANIC()
-       hlt
-       .cstring
-_enable_preemption_less_than_zero:
-       .asciz  "_enable_preemption: preemption_level(%d)  < 0!"
-       .text
-1:
-#endif /* MACH_ASSERT */
-       PREEMPTION_ENABLE
-#endif /* MACH_RT */
-       LEAF_RET
-
-LEAF_ENTRY(_enable_preemption_no_check)
-#if    MACH_RT
-#if    MACH_ASSERT
-       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL
-       jg      1f
-       ALIGN_STACK()
-       LOAD_STRING_ARG0(_enable_preemption_no_check_less_than_zero)
-       CALL_PANIC()
-       hlt
-       .cstring
-_enable_preemption_no_check_less_than_zero:
-       .asciz  "_enable_preemption_no_check: preemption_level <= 0!"
-       .text
-1:
-#endif /* MACH_ASSERT */
-       _ENABLE_PREEMPTION_NO_CHECK
-#endif /* MACH_RT */
-       LEAF_RET
-       
-       
-LEAF_ENTRY(_mp_disable_preemption)
-#if    MACH_RT
-       PREEMPTION_DISABLE
-#endif /* MACH_RT */
-       LEAF_RET
-
-LEAF_ENTRY(_mp_enable_preemption)
-#if    MACH_RT
-#if    MACH_ASSERT
-       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL
-       jg      1f
-       movl    %gs:CPU_PREEMPTION_LEVEL,%esi
-       ALIGN_PANIC()
-       LOAD_STRING_ARG0(_mp_enable_preemption_less_than_zero)
-       CALL_PANIC()
-       hlt
-       .cstring
-_mp_enable_preemption_less_than_zero:
-       .asciz "_mp_enable_preemption: preemption_level (%d) <= 0!"
-       .text
-1:
-#endif /* MACH_ASSERT */
-       PREEMPTION_ENABLE
-#endif /* MACH_RT */
-       LEAF_RET
-
-LEAF_ENTRY(_mp_enable_preemption_no_check)
-#if    MACH_RT
-#if    MACH_ASSERT
-       cmpl    $0,%gs:CPU_PREEMPTION_LEVEL
-       jg      1f
-       ALIGN_STACK()
-       LOAD_STRING_ARG0(_mp_enable_preemption_no_check_less_than_zero)
-       CALL_PANIC()
-       hlt
-       .cstring
-_mp_enable_preemption_no_check_less_than_zero:
-       .asciz "_mp_enable_preemption_no_check: preemption_level <= 0!"
-       .text
-1:
-#endif /* MACH_ASSERT */
-       _ENABLE_PREEMPTION_NO_CHECK
-#endif /* MACH_RT */
-       LEAF_RET
-       
-/*
- * Atomic primitives, prototyped in kern/simple_lock.h
- */
-LEAF_ENTRY(hw_atomic_add)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       movl    %esi, %eax              /* Load addend */
-       lock    xaddl %eax, (%rdi)      /* Atomic exchange and add */
-       addl    %esi, %eax              /* Calculate result */
-       LEAF_RET
-
-LEAF_ENTRY(hw_atomic_sub)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       negl    %esi
-       movl    %esi, %eax
-       lock    xaddl %eax, (%rdi)      /* Atomic exchange and add */
-       addl    %esi, %eax              /* Calculate result */
-       LEAF_RET
-
-LEAF_ENTRY(hw_atomic_or)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       movl    (%rdi), %eax
-1:
-       movl    %esi, %edx              /* Load mask */
-       orl     %eax, %edx
-       lock    cmpxchgl %edx, (%rdi)   /* Atomic CAS */
-       jne     1b
-       movl    %edx, %eax              /* Result */
-       LEAF_RET
-/*
- * A variant of hw_atomic_or which doesn't return a value.
- * The implementation is thus comparatively more efficient.
- */
-
-LEAF_ENTRY(hw_atomic_or_noret)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       lock
-       orl     %esi, (%rdi)            /* Atomic OR */
-       LEAF_RET
-
-
-LEAF_ENTRY(hw_atomic_and)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       movl    (%rdi), %eax
-1:
-       movl    %esi, %edx              /* Load mask */
-       andl    %eax, %edx
-       lock    cmpxchgl %edx, (%rdi)   /* Atomic CAS */
-       jne     1b
-       movl    %edx, %eax              /* Result */
-       LEAF_RET
-/*
- * A variant of hw_atomic_and which doesn't return a value.
- * The implementation is thus comparatively more efficient.
- */
-
-LEAF_ENTRY(hw_atomic_and_noret)
-#if    MACH_LDEBUG
-       test    $3, %rdi
-       jz      1f
-       ud2
-1:
-#endif 
-       lock    andl    %esi, (%rdi)    /* Atomic OR */
-       LEAF_RET
-