2 * Copyright (c) 2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm64/pac_asm.h>
30 #include <pexpert/arm64/board_config.h>
34 /* Exit path defines; for controlling PPL -> kernel transitions. */
35 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */
36 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
37 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */
38 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */
40 /* Guarded mode trap numbers: these are passed as the genter immediate. */
41 #define GXF_ENTER_PPL 0
43 #define KERNEL_MODE_ELR ELR_GL11
44 #define KERNEL_MODE_FAR FAR_GL11
45 #define KERNEL_MODE_ESR ESR_GL11
46 #define KERNEL_MODE_SPSR SPSR_GL11
47 #define KERNEL_MODE_ASPSR ASPSR_GL11
48 #define KERNEL_MODE_VBAR VBAR_GL11
49 #define KERNEL_MODE_TPIDR TPIDR_GL11
51 #define GUARDED_MODE_ELR ELR_EL1
52 #define GUARDED_MODE_FAR FAR_EL1
53 #define GUARDED_MODE_ESR ESR_EL1
54 #define GUARDED_MODE_SPSR SPSR_EL1
55 #define GUARDED_MODE_ASPSR ASPSR_EL1
56 #define GUARDED_MODE_VBAR VBAR_EL1
57 #define GUARDED_MODE_TPIDR TPIDR_EL1
62 * Loads the PPL per-CPU data array entry for the current CPU.
63 * arg0 - Address of the PPL per-CPU data is returned through this
64 * arg1 - Scratch register
65 * arg2 - Scratch register
68 .macro LOAD_PMAP_CPU_DATA
71 ubfx $
1, $
0, MPIDR_AFF1_SHIFT
, MPIDR_AFF1_WIDTH
72 adrp $
2, EXT(cluster_offsets
)@page
73 add $
2, $
2, EXT(cluster_offsets
)@pageoff
74 ldr $
1, [$
2, $
1, lsl
#3]
76 and $
0, $
0, MPIDR_AFF0_MASK
79 /* Get the PPL CPU data array. */
80 adrp $
1, EXT(pmap_cpu_data_array
)@page
81 add $
1, $
1, EXT(pmap_cpu_data_array
)@pageoff
84 * Sanity check the CPU ID (this is not a panic because this pertains to
85 * the hardware configuration; this should only fail if our
86 * understanding of the hardware is incorrect).
91 mov $
2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
92 /* Get the PPL per-CPU data. */
99 * Retrieves the PPL per-CPU data for the current CPU.
100 * arg0 - Address of the PPL per-CPU data is returned through this
101 * arg1 - Scratch register
102 * arg2 - Scratch register
105 .macro GET_PMAP_CPU_DATA
106 LOAD_PMAP_CPU_DATA $
0, $
1, $
2
109 #endif /* XNU_MONITOR */
112 * INIT_SAVED_STATE_FLAVORS
114 * Initializes the saved state flavors of a new saved state structure
115 * arg0 - saved state pointer
116 * arg1 - 32-bit scratch reg
117 * arg2 - 32-bit scratch reg
119 .macro INIT_SAVED_STATE_FLAVORS
120 mov $
1, ARM_SAVED_STATE64
// Set saved state to 64-bit flavor
121 mov $
2, ARM_SAVED_STATE64_COUNT
122 stp $
1, $
2, [$
0, SS_FLAVOR
]
123 mov $
1, ARM_NEON_SAVED_STATE64
// Set neon state to 64-bit flavor
124 str $
1, [$
0, NS_FLAVOR
]
125 mov $
1, ARM_NEON_SAVED_STATE64_COUNT
126 str $
1, [$
0, NS_COUNT
]
132 * Spills the current set of registers (excluding x0, x1, sp) to the specified
135 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
136 * These keys are deliberately kept loaded into the CPU for later kernel use.
138 * x0 - Address of the save area
141 .macro SPILL_REGISTERS mode
142 stp x2
, x3
, [x0
, SS64_X2
] // Save remaining GPRs
143 stp x4
, x5
, [x0
, SS64_X4
]
144 stp x6
, x7
, [x0
, SS64_X6
]
145 stp x8
, x9
, [x0
, SS64_X8
]
146 stp x10
, x11
, [x0
, SS64_X10
]
147 stp x12
, x13
, [x0
, SS64_X12
]
148 stp x14
, x15
, [x0
, SS64_X14
]
149 stp x16
, x17
, [x0
, SS64_X16
]
150 stp x18
, x19
, [x0
, SS64_X18
]
151 stp x20
, x21
, [x0
, SS64_X20
]
152 stp x22
, x23
, [x0
, SS64_X22
]
153 stp x24
, x25
, [x0
, SS64_X24
]
154 stp x26
, x27
, [x0
, SS64_X26
]
155 stp x28
, fp
, [x0
, SS64_X28
]
156 str lr
, [x0
, SS64_LR
]
158 /* Save arm_neon_saved_state64 */
160 stp q0
, q1
, [x0
, NS64_Q0
]
161 stp q2
, q3
, [x0
, NS64_Q2
]
162 stp q4
, q5
, [x0
, NS64_Q4
]
163 stp q6
, q7
, [x0
, NS64_Q6
]
164 stp q8
, q9
, [x0
, NS64_Q8
]
165 stp q10
, q11
, [x0
, NS64_Q10
]
166 stp q12
, q13
, [x0
, NS64_Q12
]
167 stp q14
, q15
, [x0
, NS64_Q14
]
168 stp q16
, q17
, [x0
, NS64_Q16
]
169 stp q18
, q19
, [x0
, NS64_Q18
]
170 stp q20
, q21
, [x0
, NS64_Q20
]
171 stp q22
, q23
, [x0
, NS64_Q22
]
172 stp q24
, q25
, [x0
, NS64_Q24
]
173 stp q26
, q27
, [x0
, NS64_Q26
]
174 stp q28
, q29
, [x0
, NS64_Q28
]
175 stp q30
, q31
, [x0
, NS64_Q30
]
177 mrs x22
, ELR_EL1
// Get exception link register
178 mrs x23
, SPSR_EL1
// Load CPSR into var reg x23
182 #if defined(HAS_APPLE_PAC)
183 .if \mode
!= HIBERNATE_MODE
185 * Restore kernel keys if:
187 * - Entering the kernel from EL0, and
188 * - CPU lacks fast A-key switching (fast A-key switching is
189 * implemented by reprogramming KERNKey on context switch)
191 .if \mode
== KERNEL_MODE
192 #if HAS_PAC_SLOW_A_KEY_SWITCHING
193 IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@
, x21
194 and x21
, x23
, #(PSR64_MODE_EL_MASK)
195 cmp x21
, #(PSR64_MODE_EL0)
196 bne Lskip_restore_kernel_keys_\@
198 MOV64 x2
, KERNEL_JOP_ID
200 ldr x3
, [x3
, ACT_CPUDATAP
]
201 REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@
, x2
, x3
, x4
203 Lskip_restore_kernel_keys_\@
:
204 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
205 .endif
/* \mode == KERNEL_MODE */
207 /* Save x1 and LR to preserve across call */
212 * Create thread state signature
214 * Arg0: The ARM context pointer
215 * Arg1: The PC value to sign
216 * Arg2: The CPSR value to sign
217 * Arg3: The LR value to sign
218 * Arg4: The X16 value to sign
219 * Arg5: The X17 value to sign
226 bl _ml_sign_thread_state
230 #endif /* defined(HAS_APPLE_PAC) */
232 str x22
, [x0
, SS64_PC
] // Save ELR to PCB
233 str w23
, [x0
, SS64_CPSR
] // Save CPSR to PCB
234 str w24
, [x0
, NS64_FPSR
]
235 str w25
, [x0
, NS64_FPCR
]
240 str x20
, [x0
, SS64_FAR
]
241 str w21
, [x0
, SS64_ESR
]
248 // SP0 is expected to already be selected
249 .macro SWITCH_TO_KERN_STACK
250 ldr x1
, [x1
, TH_KSTACKPTR
] // Load the top of the kernel stack to x1
251 mov sp
, x1
// Set the stack pointer to the kernel stack
254 // SP0 is expected to already be selected
255 .macro SWITCH_TO_INT_STACK
257 ldr x1
, [x1
, ACT_CPUDATAP
]
258 ldr x1
, [x1
, CPU_ISTACKPTR
]
259 mov sp
, x1
// Set the stack pointer to the interrupt stack
265 * Restores the DAIF bits to their original state (well, the AIF bits at least).
266 * arg0 - DAIF bits (read from the DAIF interface) to restore
270 tst $
0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
274 tst $
0, #(DAIF_IRQF | DAIF_FIQF)
278 tst $
0, #(DAIF_ASYNCF)
281 /* Enable nothing. */
286 msr DAIFClr
, #(DAIFSC_ASYNCF)
291 msr DAIFClr
, #(DAIFSC_IRQF | DAIFSC_FIQF)
296 msr DAIFClr
, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)