]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/arm/cpu_data.h
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm / cpu_data.h
index f35121e35bda0f6802d9ca6c846426208d9fade4..7b001d176e6a23f8e10df72cbc14a91e361a1623 100644 (file)
  */
 /*
  * @OSF_COPYRIGHT@
- * 
+ *
  */
 
-#ifndef        ARM_CPU_DATA
+#ifndef ARM_CPU_DATA
 #define ARM_CPU_DATA
 
 #ifdef  MACH_KERNEL_PRIVATE
 #include <mach/mach_types.h>
 #include <machine/thread.h>
 
+#define current_thread()        current_thread_fast()
 
-#define current_thread()       current_thread_fast()
+static inline __attribute__((const)) thread_t
+current_thread_fast(void)
+{
+#if defined(__arm64__)
+       return (thread_t)(__builtin_arm_rsr64("TPIDR_EL1"));
+#else
+       return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4));  // TPIDRPRW
+#endif
+}
 
-static inline thread_t current_thread_fast(void) 
+/*
+ * The "volatile" flavor of current_thread() is intended for use by
+ * scheduler code which may need to update the thread pointer in the
+ * course of a context switch.  Any call to current_thread() made
+ * prior to the thread pointer update should be safe to optimize away
+ * as it should be consistent with that thread's state to the extent
+ * the compiler can reason about it.  Likewise, the context switch
+ * path will eventually result in an arbitrary branch to the new
+ * thread's pc, about which the compiler won't be able to reason.
+ * Thus any compile-time optimization of current_thread() calls made
+ * within the new thread should be safely encapsulated in its
+ * register/stack state.  The volatile form therefore exists to cover
+ * the window between the thread pointer update and the branch to
+ * the new pc.
+ */
+static inline thread_t
+current_thread_volatile(void)
 {
-        thread_t        result;
+       /* The compiler treats rsr64 as const, which can allow
+        *  it to eliminate redundant calls, which we don't want here.
+        *  Thus we use volatile asm.  The mrc used for arm32 should be
+        *  treated as volatile however. */
 #if defined(__arm64__)
-        __asm__ volatile("mrs %0, TPIDR_EL1" : "=r" (result));
+       thread_t result;
+       __asm__ volatile ("mrs %0, TPIDR_EL1" : "=r" (result));
+       return result;
 #else
-       result = (thread_t)__builtin_arm_mrc(15, 0, 13, 0, 4);  // TPIDRPRW
+       return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4));  // TPIDRPRW
 #endif
-        return result;
 }
 
 #if defined(__arm64__)
 
-static inline vm_offset_t exception_stack_pointer(void)
+static inline vm_offset_t
+exception_stack_pointer(void)
 {
        vm_offset_t result = 0;
-       __asm__ volatile(
-               "msr            SPSel, #1  \n"
-               "mov            %0, sp     \n"
-               "msr            SPSel, #0  \n"
-               : "=r" (result));
+       __asm__ volatile (
+                 "msr          SPSel, #1  \n"
+                 "mov          %0, sp     \n"
+                 "msr          SPSel, #0  \n"
+                 : "=r" (result));
 
        return result;
 }
@@ -77,16 +107,16 @@ static inline vm_offset_t exception_stack_pointer(void)
 #endif /* defined(__arm64__) */
 
 #define getCpuDatap()            current_thread()->machine.CpuDatap
-#define current_cpu_datap()     getCpuDatap()
+#define current_cpu_datap()      getCpuDatap()
 
-extern int                                                                     get_preemption_level(void);
-extern void                                                            _enable_preemption_no_check(void);
+extern int                                                                      get_preemption_level(void);
+extern void                                                             _enable_preemption_no_check(void);
 
-#define enable_preemption_no_check()           _enable_preemption_no_check()
-#define mp_disable_preemption()                                _disable_preemption()
-#define mp_enable_preemption()                         _enable_preemption()
-#define mp_enable_preemption_no_check()                _enable_preemption_no_check()
+#define enable_preemption_no_check()            _enable_preemption_no_check()
+#define mp_disable_preemption()                         _disable_preemption()
+#define mp_enable_preemption()                          _enable_preemption()
+#define mp_enable_preemption_no_check()         _enable_preemption_no_check()
 
 #endif  /* MACH_KERNEL_PRIVATE */
 
-#endif /* ARM_CPU_DATA */
+#endif  /* ARM_CPU_DATA */