]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/exception_asm.h
e3ec822bf6b030f31dba73f3c9fab6de03df4aec
[apple/xnu.git] / osfmk / arm64 / exception_asm.h
1 /*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm64/pac_asm.h>
30 #include <pexpert/arm64/board_config.h>
31 #include "assym.s"
32
33 #if XNU_MONITOR
34 /* Exit path defines; for controlling PPL -> kernel transitions. */
35 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */
36 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
37 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */
38 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */
39
40 /* Guarded mode trap numbers: these are passed as the genter immediate. */
41 #define GXF_ENTER_PPL 0
42
43 #define KERNEL_MODE_ELR ELR_GL11
44 #define KERNEL_MODE_FAR FAR_GL11
45 #define KERNEL_MODE_ESR ESR_GL11
46 #define KERNEL_MODE_SPSR SPSR_GL11
47 #define KERNEL_MODE_ASPSR ASPSR_GL11
48 #define KERNEL_MODE_VBAR VBAR_GL11
49 #define KERNEL_MODE_TPIDR TPIDR_GL11
50
51 #define GUARDED_MODE_ELR ELR_EL1
52 #define GUARDED_MODE_FAR FAR_EL1
53 #define GUARDED_MODE_ESR ESR_EL1
54 #define GUARDED_MODE_SPSR SPSR_EL1
55 #define GUARDED_MODE_ASPSR ASPSR_EL1
56 #define GUARDED_MODE_VBAR VBAR_EL1
57 #define GUARDED_MODE_TPIDR TPIDR_EL1
58
59 /*
60 * LOAD_PMAP_CPU_DATA
61 *
62 * Loads the PPL per-CPU data array entry for the current CPU.
63 * arg0 - Address of the PPL per-CPU data is returned through this
64 * arg1 - Scratch register
65 * arg2 - Scratch register
66 *
67 */
68 .macro LOAD_PMAP_CPU_DATA
69 /* Get the CPU ID. */
70 mrs $0, MPIDR_EL1
71 ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH
72 adrp $2, EXT(cluster_offsets)@page
73 add $2, $2, EXT(cluster_offsets)@pageoff
74 ldr $1, [$2, $1, lsl #3]
75
76 and $0, $0, MPIDR_AFF0_MASK
77 add $0, $0, $1
78
79 /* Get the PPL CPU data array. */
80 adrp $1, EXT(pmap_cpu_data_array)@page
81 add $1, $1, EXT(pmap_cpu_data_array)@pageoff
82
83 /*
84 * Sanity check the CPU ID (this is not a panic because this pertains to
85 * the hardware configuration; this should only fail if our
86 * understanding of the hardware is incorrect).
87 */
88 cmp $0, MAX_CPUS
89 b.hs .
90
91 mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
92 /* Get the PPL per-CPU data. */
93 madd $0, $0, $2, $1
94 .endmacro
95
96 /*
97 * GET_PMAP_CPU_DATA
98 *
99 * Retrieves the PPL per-CPU data for the current CPU.
100 * arg0 - Address of the PPL per-CPU data is returned through this
101 * arg1 - Scratch register
102 * arg2 - Scratch register
103 *
104 */
105 .macro GET_PMAP_CPU_DATA
106 LOAD_PMAP_CPU_DATA $0, $1, $2
107 .endmacro
108
109 #endif /* XNU_MONITOR */
110
111 /*
112 * INIT_SAVED_STATE_FLAVORS
113 *
114 * Initializes the saved state flavors of a new saved state structure
115 * arg0 - saved state pointer
116 * arg1 - 32-bit scratch reg
117 * arg2 - 32-bit scratch reg
118 */
119 .macro INIT_SAVED_STATE_FLAVORS
120 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
121 mov $2, ARM_SAVED_STATE64_COUNT
122 stp $1, $2, [$0, SS_FLAVOR]
123 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
124 str $1, [$0, NS_FLAVOR]
125 mov $1, ARM_NEON_SAVED_STATE64_COUNT
126 str $1, [$0, NS_COUNT]
127 .endmacro
128
129 /*
130 * SPILL_REGISTERS
131 *
132 * Spills the current set of registers (excluding x0, x1, sp) to the specified
133 * save area.
134 *
135 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
136 * These keys are deliberately kept loaded into the CPU for later kernel use.
137 *
138 * x0 - Address of the save area
139 */
140
141 .macro SPILL_REGISTERS mode
142 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
143 stp x4, x5, [x0, SS64_X4]
144 stp x6, x7, [x0, SS64_X6]
145 stp x8, x9, [x0, SS64_X8]
146 stp x10, x11, [x0, SS64_X10]
147 stp x12, x13, [x0, SS64_X12]
148 stp x14, x15, [x0, SS64_X14]
149 stp x16, x17, [x0, SS64_X16]
150 stp x18, x19, [x0, SS64_X18]
151 stp x20, x21, [x0, SS64_X20]
152 stp x22, x23, [x0, SS64_X22]
153 stp x24, x25, [x0, SS64_X24]
154 stp x26, x27, [x0, SS64_X26]
155 stp x28, fp, [x0, SS64_X28]
156 str lr, [x0, SS64_LR]
157
158 /* Save arm_neon_saved_state64 */
159
160 stp q0, q1, [x0, NS64_Q0]
161 stp q2, q3, [x0, NS64_Q2]
162 stp q4, q5, [x0, NS64_Q4]
163 stp q6, q7, [x0, NS64_Q6]
164 stp q8, q9, [x0, NS64_Q8]
165 stp q10, q11, [x0, NS64_Q10]
166 stp q12, q13, [x0, NS64_Q12]
167 stp q14, q15, [x0, NS64_Q14]
168 stp q16, q17, [x0, NS64_Q16]
169 stp q18, q19, [x0, NS64_Q18]
170 stp q20, q21, [x0, NS64_Q20]
171 stp q22, q23, [x0, NS64_Q22]
172 stp q24, q25, [x0, NS64_Q24]
173 stp q26, q27, [x0, NS64_Q26]
174 stp q28, q29, [x0, NS64_Q28]
175 stp q30, q31, [x0, NS64_Q30]
176
177 mrs x22, ELR_EL1 // Get exception link register
178 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
179 mrs x24, FPSR
180 mrs x25, FPCR
181
182 #if defined(HAS_APPLE_PAC)
183 .if \mode != HIBERNATE_MODE
184 /**
185 * Restore kernel keys if:
186 *
187 * - Entering the kernel from EL0, and
188 * - CPU lacks fast A-key switching (fast A-key switching is
189 * implemented by reprogramming KERNKey on context switch)
190 */
191 .if \mode == KERNEL_MODE
192 #if HAS_PAC_SLOW_A_KEY_SWITCHING
193 IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@, x21
194 and x21, x23, #(PSR64_MODE_EL_MASK)
195 cmp x21, #(PSR64_MODE_EL0)
196 bne Lskip_restore_kernel_keys_\@
197
198 MOV64 x2, KERNEL_JOP_ID
199 mrs x3, TPIDR_EL1
200 ldr x3, [x3, ACT_CPUDATAP]
201 REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@, x2, x3, x4
202 isb sy
203 Lskip_restore_kernel_keys_\@:
204 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
205 .endif /* \mode == KERNEL_MODE */
206
207 /* Save x1 and LR to preserve across call */
208 mov x21, x1
209 mov x20, lr
210
211 /*
212 * Create thread state signature
213 *
214 * Arg0: The ARM context pointer
215 * Arg1: The PC value to sign
216 * Arg2: The CPSR value to sign
217 * Arg3: The LR value to sign
218 * Arg4: The X16 value to sign
219 * Arg5: The X17 value to sign
220 */
221 mov x1, x22
222 mov w2, w23
223 mov x3, x20
224 mov x4, x16
225 mov x5, x17
226 bl _ml_sign_thread_state
227 mov lr, x20
228 mov x1, x21
229 .endif
230 #endif /* defined(HAS_APPLE_PAC) */
231
232 str x22, [x0, SS64_PC] // Save ELR to PCB
233 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
234 str w24, [x0, NS64_FPSR]
235 str w25, [x0, NS64_FPCR]
236
237 mrs x20, FAR_EL1
238 mrs x21, ESR_EL1
239
240 str x20, [x0, SS64_FAR]
241 str w21, [x0, SS64_ESR]
242 .endmacro
243
244 .macro DEADLOOP
245 b .
246 .endmacro
247
248 // SP0 is expected to already be selected
249 .macro SWITCH_TO_KERN_STACK
250 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
251 mov sp, x1 // Set the stack pointer to the kernel stack
252 .endmacro
253
254 // SP0 is expected to already be selected
255 .macro SWITCH_TO_INT_STACK
256 mrs x1, TPIDR_EL1
257 ldr x1, [x1, ACT_CPUDATAP]
258 ldr x1, [x1, CPU_ISTACKPTR]
259 mov sp, x1 // Set the stack pointer to the interrupt stack
260 .endmacro
261
262 /*
263 * REENABLE_DAIF
264 *
265 * Restores the DAIF bits to their original state (well, the AIF bits at least).
266 * arg0 - DAIF bits (read from the DAIF interface) to restore
267 */
268 .macro REENABLE_DAIF
269 /* AIF enable. */
270 tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
271 b.eq 3f
272
273 /* IF enable. */
274 tst $0, #(DAIF_IRQF | DAIF_FIQF)
275 b.eq 2f
276
277 /* A enable. */
278 tst $0, #(DAIF_ASYNCF)
279 b.eq 1f
280
281 /* Enable nothing. */
282 b 4f
283
284 /* A enable. */
285 1:
286 msr DAIFClr, #(DAIFSC_ASYNCF)
287 b 4f
288
289 /* IF enable. */
290 2:
291 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
292 b 4f
293
294 /* AIF enable. */
295 3:
296 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
297
298 /* Done! */
299 4:
300 .endmacro
301