]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/exception_asm.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / exception_asm.h
CommitLineData
cb323159
A
1/*
2 * Copyright (c) 2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
cb323159 29#include <pexpert/arm64/board_config.h>
f427ee49 30#include "assym.s"
cb323159 31
c6bf4f31
A
32#if XNU_MONITOR
33/* Exit path defines; for controlling PPL -> kernel transitions. */
34#define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */
35#define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
36#define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */
37#define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */
38
f427ee49 39
c6bf4f31
A
40#define KERNEL_MODE_ELR ELR_GL11
41#define KERNEL_MODE_FAR FAR_GL11
42#define KERNEL_MODE_ESR ESR_GL11
43#define KERNEL_MODE_SPSR SPSR_GL11
44#define KERNEL_MODE_ASPSR ASPSR_GL11
45#define KERNEL_MODE_VBAR VBAR_GL11
46#define KERNEL_MODE_TPIDR TPIDR_GL11
47
48#define GUARDED_MODE_ELR ELR_EL1
49#define GUARDED_MODE_FAR FAR_EL1
50#define GUARDED_MODE_ESR ESR_EL1
51#define GUARDED_MODE_SPSR SPSR_EL1
52#define GUARDED_MODE_ASPSR ASPSR_EL1
53#define GUARDED_MODE_VBAR VBAR_EL1
54#define GUARDED_MODE_TPIDR TPIDR_EL1
55
56/*
f427ee49 57 * LOAD_PMAP_CPU_DATA
c6bf4f31 58 *
f427ee49 59 * Loads the PPL per-CPU data array entry for the current CPU.
c6bf4f31
A
60 * arg0 - Address of the PPL per-CPU data is returned through this
61 * arg1 - Scratch register
62 * arg2 - Scratch register
63 *
64 */
f427ee49
A
65.macro LOAD_PMAP_CPU_DATA
66 /* Get the CPU ID. */
67 mrs $0, MPIDR_EL1
68 ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH
69 adrp $2, EXT(cluster_offsets)@page
70 add $2, $2, EXT(cluster_offsets)@pageoff
71 ldr $1, [$2, $1, lsl #3]
72
73 and $0, $0, MPIDR_AFF0_MASK
74 add $0, $0, $1
75
76 /* Get the PPL CPU data array. */
77 adrp $1, EXT(pmap_cpu_data_array)@page
78 add $1, $1, EXT(pmap_cpu_data_array)@pageoff
79
80 /*
81 * Sanity check the CPU ID (this is not a panic because this pertains to
82 * the hardware configuration; this should only fail if our
83 * understanding of the hardware is incorrect).
84 */
85 cmp $0, MAX_CPUS
86 b.hs .
87
88 mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
89 /* Get the PPL per-CPU data. */
90 madd $0, $0, $2, $1
91.endmacro
c6bf4f31
A
92
93/*
f427ee49
A
94 * GET_PMAP_CPU_DATA
95 *
96 * Retrieves the PPL per-CPU data for the current CPU.
97 * arg0 - Address of the PPL per-CPU data is returned through this
98 * arg1 - Scratch register
99 * arg2 - Scratch register
100 *
c6bf4f31 101 */
f427ee49
A
102.macro GET_PMAP_CPU_DATA
103 LOAD_PMAP_CPU_DATA $0, $1, $2
c6bf4f31 104.endmacro
f427ee49 105
c6bf4f31 106#endif /* XNU_MONITOR */
cb323159
A
107
108/*
109 * INIT_SAVED_STATE_FLAVORS
110 *
111 * Initializes the saved state flavors of a new saved state structure
112 * arg0 - saved state pointer
113 * arg1 - 32-bit scratch reg
114 * arg2 - 32-bit scratch reg
115 */
116.macro INIT_SAVED_STATE_FLAVORS
f427ee49
A
117 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor
118 mov $2, ARM_SAVED_STATE64_COUNT
119 stp $1, $2, [$0, SS_FLAVOR]
120 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor
121 str $1, [$0, NS_FLAVOR]
122 mov $1, ARM_NEON_SAVED_STATE64_COUNT
123 str $1, [$0, NS_COUNT]
cb323159
A
124.endmacro
125
126/*
127 * SPILL_REGISTERS
128 *
eb6b6ca3 129 * Spills the current set of registers (excluding x0, x1, sp) to the specified
cb323159 130 * save area.
f427ee49
A
131 *
132 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
133 * These keys are deliberately kept loaded into the CPU for later kernel use.
134 *
2a1bd2d3 135 * arg0 - KERNEL_MODE or HIBERNATE_MODE
cb323159
A
136 * x0 - Address of the save area
137 */
2a1bd2d3
A
138#define KERNEL_MODE 0
139#define HIBERNATE_MODE 1
cb323159 140
f427ee49
A
141.macro SPILL_REGISTERS mode
142 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs
143 stp x4, x5, [x0, SS64_X4]
144 stp x6, x7, [x0, SS64_X6]
145 stp x8, x9, [x0, SS64_X8]
146 stp x10, x11, [x0, SS64_X10]
147 stp x12, x13, [x0, SS64_X12]
148 stp x14, x15, [x0, SS64_X14]
149 stp x16, x17, [x0, SS64_X16]
150 stp x18, x19, [x0, SS64_X18]
151 stp x20, x21, [x0, SS64_X20]
152 stp x22, x23, [x0, SS64_X22]
153 stp x24, x25, [x0, SS64_X24]
154 stp x26, x27, [x0, SS64_X26]
155 stp x28, fp, [x0, SS64_X28]
156 str lr, [x0, SS64_LR]
157
158 /* Save arm_neon_saved_state64 */
159
160 stp q0, q1, [x0, NS64_Q0]
161 stp q2, q3, [x0, NS64_Q2]
162 stp q4, q5, [x0, NS64_Q4]
163 stp q6, q7, [x0, NS64_Q6]
164 stp q8, q9, [x0, NS64_Q8]
165 stp q10, q11, [x0, NS64_Q10]
166 stp q12, q13, [x0, NS64_Q12]
167 stp q14, q15, [x0, NS64_Q14]
168 stp q16, q17, [x0, NS64_Q16]
169 stp q18, q19, [x0, NS64_Q18]
170 stp q20, q21, [x0, NS64_Q20]
171 stp q22, q23, [x0, NS64_Q22]
172 stp q24, q25, [x0, NS64_Q24]
173 stp q26, q27, [x0, NS64_Q26]
174 stp q28, q29, [x0, NS64_Q28]
175 stp q30, q31, [x0, NS64_Q30]
176
177 mrs x22, ELR_EL1 // Get exception link register
178 mrs x23, SPSR_EL1 // Load CPSR into var reg x23
179 mrs x24, FPSR
180 mrs x25, FPCR
cb323159
A
181
182#if defined(HAS_APPLE_PAC)
f427ee49
A
183 .if \mode != HIBERNATE_MODE
184 /**
185 * Restore kernel keys if:
186 *
187 * - Entering the kernel from EL0, and
188 * - CPU lacks fast A-key switching (fast A-key switching is
189 * implemented by reprogramming KERNKey on context switch)
190 */
191 .if \mode == KERNEL_MODE
192#if HAS_PAC_SLOW_A_KEY_SWITCHING
193 IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@, x21
194 and x21, x23, #(PSR64_MODE_EL_MASK)
195 cmp x21, #(PSR64_MODE_EL0)
196 bne Lskip_restore_kernel_keys_\@
cb323159 197
f427ee49
A
198 MOV64 x2, KERNEL_JOP_ID
199 mrs x3, TPIDR_EL1
200 ldr x3, [x3, ACT_CPUDATAP]
201 REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@, x2, x3, x4
202 isb sy
203Lskip_restore_kernel_keys_\@:
204#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
205 .endif /* \mode == KERNEL_MODE */
206
207 /* Save x1 and LR to preserve across call */
208 mov x21, x1
209 mov x20, lr
210
211 /*
212 * Create thread state signature
213 *
214 * Arg0: The ARM context pointer
215 * Arg1: The PC value to sign
216 * Arg2: The CPSR value to sign
217 * Arg3: The LR value to sign
218 * Arg4: The X16 value to sign
219 * Arg5: The X17 value to sign
220 */
221 mov x1, x22
222 mov w2, w23
223 mov x3, x20
224 mov x4, x16
225 mov x5, x17
226 bl _ml_sign_thread_state
227 mov lr, x20
228 mov x1, x21
229 .endif
cb323159
A
230#endif /* defined(HAS_APPLE_PAC) */
231
f427ee49
A
232 str x22, [x0, SS64_PC] // Save ELR to PCB
233 str w23, [x0, SS64_CPSR] // Save CPSR to PCB
234 str w24, [x0, NS64_FPSR]
235 str w25, [x0, NS64_FPCR]
cb323159 236
f427ee49
A
237 mrs x20, FAR_EL1
238 mrs x21, ESR_EL1
cb323159 239
f427ee49
A
240 str x20, [x0, SS64_FAR]
241 str w21, [x0, SS64_ESR]
cb323159
A
242.endmacro
243
244.macro DEADLOOP
f427ee49 245 b .
cb323159 246.endmacro
f427ee49
A
247
248// SP0 is expected to already be selected
249.macro SWITCH_TO_KERN_STACK
250 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1
251 mov sp, x1 // Set the stack pointer to the kernel stack
252.endmacro
253
254// SP0 is expected to already be selected
255.macro SWITCH_TO_INT_STACK
256 mrs x1, TPIDR_EL1
257 ldr x1, [x1, ACT_CPUDATAP]
258 ldr x1, [x1, CPU_ISTACKPTR]
259 mov sp, x1 // Set the stack pointer to the interrupt stack
260.endmacro
261
262/*
263 * REENABLE_DAIF
264 *
265 * Restores the DAIF bits to their original state (well, the AIF bits at least).
266 * arg0 - DAIF bits (read from the DAIF interface) to restore
267 */
268.macro REENABLE_DAIF
269 /* AIF enable. */
270 tst $0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
271 b.eq 3f
272
273 /* IF enable. */
274 tst $0, #(DAIF_IRQF | DAIF_FIQF)
275 b.eq 2f
276
277 /* A enable. */
278 tst $0, #(DAIF_ASYNCF)
279 b.eq 1f
280
281 /* Enable nothing. */
282 b 4f
283
284 /* A enable. */
2851:
286 msr DAIFClr, #(DAIFSC_ASYNCF)
287 b 4f
288
289 /* IF enable. */
2902:
291 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF)
292 b 4f
293
294 /* AIF enable. */
2953:
296 msr DAIFClr, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)
297
298 /* Done! */
2994:
300.endmacro
301