]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/exception_asm.h
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm64 / exception_asm.h
1 /*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm64/pac_asm.h>
30 #include <pexpert/arm64/board_config.h>
31 #include "assym.s"
32
33 #if XNU_MONITOR
34 /* Exit path defines; for controlling PPL -> kernel transitions. */
35 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */
36 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
37 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */
38 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */
39
40
41 #define KERNEL_MODE_ELR ELR_GL11
42 #define KERNEL_MODE_FAR FAR_GL11
43 #define KERNEL_MODE_ESR ESR_GL11
44 #define KERNEL_MODE_SPSR SPSR_GL11
45 #define KERNEL_MODE_ASPSR ASPSR_GL11
46 #define KERNEL_MODE_VBAR VBAR_GL11
47 #define KERNEL_MODE_TPIDR TPIDR_GL11
48
49 #define GUARDED_MODE_ELR ELR_EL1
50 #define GUARDED_MODE_FAR FAR_EL1
51 #define GUARDED_MODE_ESR ESR_EL1
52 #define GUARDED_MODE_SPSR SPSR_EL1
53 #define GUARDED_MODE_ASPSR ASPSR_EL1
54 #define GUARDED_MODE_VBAR VBAR_EL1
55 #define GUARDED_MODE_TPIDR TPIDR_EL1
56
57 /*
58 * LOAD_PMAP_CPU_DATA
59 *
60 * Loads the PPL per-CPU data array entry for the current CPU.
61 * arg0 - Address of the PPL per-CPU data is returned through this
62 * arg1 - Scratch register
63 * arg2 - Scratch register
64 *
65 */
66 .macro LOAD_PMAP_CPU_DATA
67 /* Get the CPU ID. */
68 mrs $0, MPIDR_EL1
69 ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH
70 adrp $2, EXT(cluster_offsets)@page
71 add $2, $2, EXT(cluster_offsets)@pageoff
72 ldr $1, [$2, $1, lsl #3]
73
74 and $0, $0, MPIDR_AFF0_MASK
75 add $0, $0, $1
76
77 /* Get the PPL CPU data array. */
78 adrp $1, EXT(pmap_cpu_data_array)@page
79 add $1, $1, EXT(pmap_cpu_data_array)@pageoff
80
81 /*
82 * Sanity check the CPU ID (this is not a panic because this pertains to
83 * the hardware configuration; this should only fail if our
84 * understanding of the hardware is incorrect).
85 */
86 cmp $0, MAX_CPUS
87 b.hs .
88
89 mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
90 /* Get the PPL per-CPU data. */
91 madd $0, $0, $2, $1
92 .endmacro
93
94 /*
95 * GET_PMAP_CPU_DATA
96 *
97 * Retrieves the PPL per-CPU data for the current CPU.
98 * arg0 - Address of the PPL per-CPU data is returned through this
99 * arg1 - Scratch register
100 * arg2 - Scratch register
101 *
102 */
103 .macro GET_PMAP_CPU_DATA
104 LOAD_PMAP_CPU_DATA $0, $1, $2
105 .endmacro
106
107 #endif /* XNU_MONITOR */
108
109 /*
110 * INIT_SAVED_STATE_FLAVORS
111 *
112 * Initializes the saved state flavors of a new saved state structure
113 * arg0 - saved state pointer
114 * arg1 - 32-bit scratch reg
115 * arg2 - 32-bit scratch reg
116 */
117 .macro INIT_SAVED_STATE_FLAVORS
118 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
119 mov $2, ARM_SAVED_STATE64_COUNT
120 stp $1, $2, [$0, SS_FLAVOR]
121 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
122 str $1, [$0, NS_FLAVOR]
123 mov $1, ARM_NEON_SAVED_STATE64_COUNT
124 str $1, [$0, NS_COUNT]
125 .endmacro
126
127 /*
128 * SPILL_REGISTERS
129 *
130 * Spills the current set of registers (excluding x0, x1, sp) to the specified
131 * save area.
132 *
133 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
134 * These keys are deliberately kept loaded into the CPU for later kernel use.
135 *
136 * arg0 - KERNEL_MODE or HIBERNATE_MODE
137 * x0 - Address of the save area
138 */
139 #define KERNEL_MODE 0
140 #define HIBERNATE_MODE 1
141
142 .macro SPILL_REGISTERS mode
143 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
144 stp x4, x5, [x0, SS64_X4]
145 stp x6, x7, [x0, SS64_X6]
146 stp x8, x9, [x0, SS64_X8]
147 stp x10, x11, [x0, SS64_X10]
148 stp x12, x13, [x0, SS64_X12]
149 stp x14, x15, [x0, SS64_X14]
150 stp x16, x17, [x0, SS64_X16]
151 stp x18, x19, [x0, SS64_X18]
152 stp x20, x21, [x0, SS64_X20]
153 stp x22, x23, [x0, SS64_X22]
154 stp x24, x25, [x0, SS64_X24]
155 stp x26, x27, [x0, SS64_X26]
156 stp x28, fp, [x0, SS64_X28]
157 str lr, [x0, SS64_LR]
158
159 /* Save arm_neon_saved_state64 */
160
161 stp q0, q1, [x0, NS64_Q0]
162 stp q2, q3, [x0, NS64_Q2]
163 stp q4, q5, [x0, NS64_Q4]
164 stp q6, q7, [x0, NS64_Q6]
165 stp q8, q9, [x0, NS64_Q8]
166 stp q10, q11, [x0, NS64_Q10]
167 stp q12, q13, [x0, NS64_Q12]
168 stp q14, q15, [x0, NS64_Q14]
169 stp q16, q17, [x0, NS64_Q16]
170 stp q18, q19, [x0, NS64_Q18]
171 stp q20, q21, [x0, NS64_Q20]
172 stp q22, q23, [x0, NS64_Q22]
173 stp q24, q25, [x0, NS64_Q24]
174 stp q26, q27, [x0, NS64_Q26]
175 stp q28, q29, [x0, NS64_Q28]
176 stp q30, q31, [x0, NS64_Q30]
177
178 mrs x22, ELR_EL1 // Get exception link register
179 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
180 mrs x24, FPSR
181 mrs x25, FPCR
182
183 #if defined(HAS_APPLE_PAC)
184 .if \mode != HIBERNATE_MODE
185 /**
186 * Restore kernel keys if:
187 *
188 * - Entering the kernel from EL0, and
189 * - CPU lacks fast A-key switching (fast A-key switching is
190 * implemented by reprogramming KERNKey on context switch)
191 */
192 .if \mode == KERNEL_MODE
193 #if HAS_PAC_SLOW_A_KEY_SWITCHING
194 IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@, x21
195 and x21, x23, #(PSR64_MODE_EL_MASK)
196 cmp x21, #(PSR64_MODE_EL0)
197 bne Lskip_restore_kernel_keys_\@
198
199 MOV64 x2, KERNEL_JOP_ID
200 mrs x3, TPIDR_EL1
201 ldr x3, [x3, ACT_CPUDATAP]
202 REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@, x2, x3, x4
203 isb sy
204 Lskip_restore_kernel_keys_\@:
205 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
206 .endif /* \mode == KERNEL_MODE */
207
208 /* Save x1 and LR to preserve across call */
209 mov x21, x1
210 mov x20, lr
211
212 /*
213 * Create thread state signature
214 *
215 * Arg0: The ARM context pointer
216 * Arg1: The PC value to sign
217 * Arg2: The CPSR value to sign
218 * Arg3: The LR value to sign
219 * Arg4: The X16 value to sign
220 * Arg5: The X17 value to sign
221 */
222 mov x1, x22
223 mov w2, w23
224 mov x3, x20
225 mov x4, x16
226 mov x5, x17
227 bl _ml_sign_thread_state
228 mov lr, x20
229 mov x1, x21
230 .endif
231 #endif /* defined(HAS_APPLE_PAC) */
232
233 str x22, [x0, SS64_PC] // Save ELR to PCB
234 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
235 str w24, [x0, NS64_FPSR]
236 str w25, [x0, NS64_FPCR]
237
238 mrs x20, FAR_EL1
239 mrs x21, ESR_EL1
240
241 str x20, [x0, SS64_FAR]
242 str w21, [x0, SS64_ESR]
243 .endmacro
244
245 .macro DEADLOOP
246 b .
247 .endmacro
248
249 // SP0 is expected to already be selected
250 .macro SWITCH_TO_KERN_STACK
251 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
252 mov sp, x1 // Set the stack pointer to the kernel stack
253 .endmacro
254
255 // SP0 is expected to already be selected
256 .macro SWITCH_TO_INT_STACK
257 mrs x1, TPIDR_EL1
258 ldr x1, [x1, ACT_CPUDATAP]
259 ldr x1, [x1, CPU_ISTACKPTR]
260 mov sp, x1 // Set the stack pointer to the interrupt stack
261 .endmacro
262
263 /*
264 * REENABLE_DAIF
265 *
266 * Restores the DAIF bits to their original state (well, the AIF bits at least).
267 * arg0 - DAIF bits (read from the DAIF interface) to restore
268 */
269 .macro REENABLE_DAIF
270 /* AIF enable. */
271 tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
272 b.eq 3f
273
274 /* IF enable. */
275 tst $0, #(DAIF_IRQF | DAIF_FIQF)
276 b.eq 2f
277
278 /* A enable. */
279 tst $0, #(DAIF_ASYNCF)
280 b.eq 1f
281
282 /* Enable nothing. */
283 b 4f
284
285 /* A enable. */
286 1:
287 msr DAIFClr, #(DAIFSC_ASYNCF)
288 b 4f
289
290 /* IF enable. */
291 2:
292 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
293 b 4f
294
295 /* AIF enable. */
296 3:
297 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
298
299 /* Done! */
300 4:
301 .endmacro
302