]>
git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/cpu_data.h
56d2a6eb8834535fd3f1d7a94a1452e40abf8b59
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
36 #ifdef MACH_KERNEL_PRIVATE
38 #include <mach_assert.h>
39 #include <kern/assert.h>
40 #include <kern/kern_types.h>
41 #include <kern/processor.h>
42 #include <pexpert/pexpert.h>
43 #include <arm/thread.h>
44 #include <arm/proc_reg.h>
46 #include <mach/mach_types.h>
47 #include <machine/thread.h>
49 #define current_thread() current_thread_fast()
51 static inline __attribute__((const)) thread_t
52 current_thread_fast(void)
54 #if defined(__arm64__)
55 return (thread_t
)(__builtin_arm_rsr64("TPIDR_EL1"));
57 return (thread_t
)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW
62 * The "volatile" flavor of current_thread() is intended for use by
63 * scheduler code which may need to update the thread pointer in the
64 * course of a context switch. Any call to current_thread() made
65 * prior to the thread pointer update should be safe to optimize away
66 * as it should be consistent with that thread's state to the extent
67 * the compiler can reason about it. Likewise, the context switch
68 * path will eventually result in an arbitrary branch to the new
69 * thread's pc, about which the compiler won't be able to reason.
70 * Thus any compile-time optimization of current_thread() calls made
71 * within the new thread should be safely encapsulated in its
72 * register/stack state. The volatile form therefore exists to cover
73 * the window between the thread pointer update and the branch to
76 static inline thread_t
77 current_thread_volatile(void)
79 /* The compiler treats rsr64 as const, which can allow
80 * it to eliminate redundant calls, which we don't want here.
81 * Thus we use volatile asm. The mrc used for arm32 should be
82 * treated as volatile however. */
83 #if defined(__arm64__)
85 __asm__
volatile ("mrs %0, TPIDR_EL1" : "=r" (result
));
88 return (thread_t
)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW
92 #if defined(__arm64__)
94 static inline vm_offset_t
95 exception_stack_pointer(void)
97 vm_offset_t result
= 0;
107 #endif /* defined(__arm64__) */
109 #define getCpuDatap() current_thread()->machine.CpuDatap
110 #define current_cpu_datap() getCpuDatap()
112 extern int get_preemption_level(void);
114 #define mp_disable_preemption() _disable_preemption()
115 #define mp_enable_preemption() _enable_preemption()
117 #endif /* MACH_KERNEL_PRIVATE */
119 #endif /* ARM_CPU_DATA */