]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | * | |
31 | */ | |
32 | ||
33 | #ifndef ARM_CPU_DATA | |
34 | #define ARM_CPU_DATA | |
35 | ||
36 | #ifdef MACH_KERNEL_PRIVATE | |
37 | ||
38 | #include <mach_assert.h> | |
39 | #include <kern/assert.h> | |
40 | #include <kern/kern_types.h> | |
41 | #include <kern/processor.h> | |
42 | #include <pexpert/pexpert.h> | |
43 | #include <arm/thread.h> | |
44 | #include <arm/proc_reg.h> | |
45 | ||
46 | #include <mach/mach_types.h> | |
47 | #include <machine/thread.h> | |
48 | ||
49 | #define current_thread() current_thread_fast() | |
50 | ||
51 | static inline __attribute__((const)) thread_t | |
52 | current_thread_fast(void) | |
53 | { | |
54 | #if defined(__arm64__) | |
55 | return (thread_t)(__builtin_arm_rsr64("TPIDR_EL1")); | |
56 | #else | |
57 | return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW | |
58 | #endif | |
59 | } | |
60 | ||
61 | /* | |
62 | * The "volatile" flavor of current_thread() is intended for use by | |
63 | * scheduler code which may need to update the thread pointer in the | |
64 | * course of a context switch. Any call to current_thread() made | |
65 | * prior to the thread pointer update should be safe to optimize away | |
66 | * as it should be consistent with that thread's state to the extent | |
67 | * the compiler can reason about it. Likewise, the context switch | |
68 | * path will eventually result in an arbitrary branch to the new | |
69 | * thread's pc, about which the compiler won't be able to reason. | |
70 | * Thus any compile-time optimization of current_thread() calls made | |
71 | * within the new thread should be safely encapsulated in its | |
72 | * register/stack state. The volatile form therefore exists to cover | |
73 | * the window between the thread pointer update and the branch to | |
74 | * the new pc. | |
75 | */ | |
76 | static inline thread_t | |
77 | current_thread_volatile(void) | |
78 | { | |
79 | /* The compiler treats rsr64 as const, which can allow | |
80 | * it to eliminate redundant calls, which we don't want here. | |
81 | * Thus we use volatile asm. The mrc used for arm32 should be | |
82 | * treated as volatile however. */ | |
83 | #if defined(__arm64__) | |
84 | thread_t result; | |
85 | __asm__ volatile ("mrs %0, TPIDR_EL1" : "=r" (result)); | |
86 | return result; | |
87 | #else | |
88 | return (thread_t)(__builtin_arm_mrc(15, 0, 13, 0, 4)); // TPIDRPRW | |
89 | #endif | |
90 | } | |
91 | ||
92 | #if defined(__arm64__) | |
93 | ||
94 | static inline vm_offset_t | |
95 | exception_stack_pointer(void) | |
96 | { | |
97 | vm_offset_t result = 0; | |
98 | __asm__ volatile ( | |
99 | "msr SPSel, #1 \n" | |
100 | "mov %0, sp \n" | |
101 | "msr SPSel, #0 \n" | |
102 | : "=r" (result)); | |
103 | ||
104 | return result; | |
105 | } | |
106 | ||
107 | #endif /* defined(__arm64__) */ | |
108 | ||
109 | #define getCpuDatap() current_thread()->machine.CpuDatap | |
110 | #define current_cpu_datap() getCpuDatap() | |
111 | ||
112 | extern int get_preemption_level(void); | |
113 | ||
114 | #define mp_disable_preemption() _disable_preemption() | |
115 | #define mp_enable_preemption() _enable_preemption() | |
116 | ||
117 | #endif /* MACH_KERNEL_PRIVATE */ | |
118 | ||
119 | #endif /* ARM_CPU_DATA */ |