2 * Copyright (c) 2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm64/pac_asm.h>
30 #include <pexpert/arm64/board_config.h>
34 /* Exit path defines; for controlling PPL -> kernel transitions. */
35 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */
36 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */
37 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */
38 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */
41 #define KERNEL_MODE_ELR ELR_GL11
42 #define KERNEL_MODE_FAR FAR_GL11
43 #define KERNEL_MODE_ESR ESR_GL11
44 #define KERNEL_MODE_SPSR SPSR_GL11
45 #define KERNEL_MODE_ASPSR ASPSR_GL11
46 #define KERNEL_MODE_VBAR VBAR_GL11
47 #define KERNEL_MODE_TPIDR TPIDR_GL11
49 #define GUARDED_MODE_ELR ELR_EL1
50 #define GUARDED_MODE_FAR FAR_EL1
51 #define GUARDED_MODE_ESR ESR_EL1
52 #define GUARDED_MODE_SPSR SPSR_EL1
53 #define GUARDED_MODE_ASPSR ASPSR_EL1
54 #define GUARDED_MODE_VBAR VBAR_EL1
55 #define GUARDED_MODE_TPIDR TPIDR_EL1
60 * Loads the PPL per-CPU data array entry for the current CPU.
61 * arg0 - Address of the PPL per-CPU data is returned through this
62 * arg1 - Scratch register
63 * arg2 - Scratch register
66 .macro LOAD_PMAP_CPU_DATA
69 ubfx $
1, $
0, MPIDR_AFF1_SHIFT
, MPIDR_AFF1_WIDTH
70 adrp $
2, EXT(cluster_offsets
)@page
71 add $
2, $
2, EXT(cluster_offsets
)@pageoff
72 ldr $
1, [$
2, $
1, lsl
#3]
74 and $
0, $
0, MPIDR_AFF0_MASK
77 /* Get the PPL CPU data array. */
78 adrp $
1, EXT(pmap_cpu_data_array
)@page
79 add $
1, $
1, EXT(pmap_cpu_data_array
)@pageoff
82 * Sanity check the CPU ID (this is not a panic because this pertains to
83 * the hardware configuration; this should only fail if our
84 * understanding of the hardware is incorrect).
89 mov $
2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE
90 /* Get the PPL per-CPU data. */
97 * Retrieves the PPL per-CPU data for the current CPU.
98 * arg0 - Address of the PPL per-CPU data is returned through this
99 * arg1 - Scratch register
100 * arg2 - Scratch register
103 .macro GET_PMAP_CPU_DATA
104 LOAD_PMAP_CPU_DATA $
0, $
1, $
2
107 #endif /* XNU_MONITOR */
110 * INIT_SAVED_STATE_FLAVORS
112 * Initializes the saved state flavors of a new saved state structure
113 * arg0 - saved state pointer
114 * arg1 - 32-bit scratch reg
115 * arg2 - 32-bit scratch reg
117 .macro INIT_SAVED_STATE_FLAVORS
118 mov $
1, ARM_SAVED_STATE64
// Set saved state to 64-bit flavor
119 mov $
2, ARM_SAVED_STATE64_COUNT
120 stp $
1, $
2, [$
0, SS_FLAVOR
]
121 mov $
1, ARM_NEON_SAVED_STATE64
// Set neon state to 64-bit flavor
122 str $
1, [$
0, NS_FLAVOR
]
123 mov $
1, ARM_NEON_SAVED_STATE64_COUNT
124 str $
1, [$
0, NS_COUNT
]
130 * Spills the current set of registers (excluding x0, x1, sp) to the specified
133 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature.
134 * These keys are deliberately kept loaded into the CPU for later kernel use.
136 * arg0 - KERNEL_MODE or HIBERNATE_MODE
137 * x0 - Address of the save area
139 #define KERNEL_MODE 0
140 #define HIBERNATE_MODE 1
142 .macro SPILL_REGISTERS mode
143 stp x2
, x3
, [x0
, SS64_X2
] // Save remaining GPRs
144 stp x4
, x5
, [x0
, SS64_X4
]
145 stp x6
, x7
, [x0
, SS64_X6
]
146 stp x8
, x9
, [x0
, SS64_X8
]
147 stp x10
, x11
, [x0
, SS64_X10
]
148 stp x12
, x13
, [x0
, SS64_X12
]
149 stp x14
, x15
, [x0
, SS64_X14
]
150 stp x16
, x17
, [x0
, SS64_X16
]
151 stp x18
, x19
, [x0
, SS64_X18
]
152 stp x20
, x21
, [x0
, SS64_X20
]
153 stp x22
, x23
, [x0
, SS64_X22
]
154 stp x24
, x25
, [x0
, SS64_X24
]
155 stp x26
, x27
, [x0
, SS64_X26
]
156 stp x28
, fp
, [x0
, SS64_X28
]
157 str lr
, [x0
, SS64_LR
]
159 /* Save arm_neon_saved_state64 */
161 stp q0
, q1
, [x0
, NS64_Q0
]
162 stp q2
, q3
, [x0
, NS64_Q2
]
163 stp q4
, q5
, [x0
, NS64_Q4
]
164 stp q6
, q7
, [x0
, NS64_Q6
]
165 stp q8
, q9
, [x0
, NS64_Q8
]
166 stp q10
, q11
, [x0
, NS64_Q10
]
167 stp q12
, q13
, [x0
, NS64_Q12
]
168 stp q14
, q15
, [x0
, NS64_Q14
]
169 stp q16
, q17
, [x0
, NS64_Q16
]
170 stp q18
, q19
, [x0
, NS64_Q18
]
171 stp q20
, q21
, [x0
, NS64_Q20
]
172 stp q22
, q23
, [x0
, NS64_Q22
]
173 stp q24
, q25
, [x0
, NS64_Q24
]
174 stp q26
, q27
, [x0
, NS64_Q26
]
175 stp q28
, q29
, [x0
, NS64_Q28
]
176 stp q30
, q31
, [x0
, NS64_Q30
]
178 mrs x22
, ELR_EL1
// Get exception link register
179 mrs x23
, SPSR_EL1
// Load CPSR into var reg x23
183 #if defined(HAS_APPLE_PAC)
184 .if \mode
!= HIBERNATE_MODE
186 * Restore kernel keys if:
188 * - Entering the kernel from EL0, and
189 * - CPU lacks fast A-key switching (fast A-key switching is
190 * implemented by reprogramming KERNKey on context switch)
192 .if \mode
== KERNEL_MODE
193 #if HAS_PAC_SLOW_A_KEY_SWITCHING
194 IF_PAC_FAST_A_KEY_SWITCHING Lskip_restore_kernel_keys_\@
, x21
195 and x21
, x23
, #(PSR64_MODE_EL_MASK)
196 cmp x21
, #(PSR64_MODE_EL0)
197 bne Lskip_restore_kernel_keys_\@
199 MOV64 x2
, KERNEL_JOP_ID
201 ldr x3
, [x3
, ACT_CPUDATAP
]
202 REPROGRAM_JOP_KEYS Lskip_restore_kernel_keys_\@
, x2
, x3
, x4
204 Lskip_restore_kernel_keys_\@
:
205 #endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
206 .endif
/* \mode == KERNEL_MODE */
208 /* Save x1 and LR to preserve across call */
213 * Create thread state signature
215 * Arg0: The ARM context pointer
216 * Arg1: The PC value to sign
217 * Arg2: The CPSR value to sign
218 * Arg3: The LR value to sign
219 * Arg4: The X16 value to sign
220 * Arg5: The X17 value to sign
227 bl _ml_sign_thread_state
231 #endif /* defined(HAS_APPLE_PAC) */
233 str x22
, [x0
, SS64_PC
] // Save ELR to PCB
234 str w23
, [x0
, SS64_CPSR
] // Save CPSR to PCB
235 str w24
, [x0
, NS64_FPSR
]
236 str w25
, [x0
, NS64_FPCR
]
241 str x20
, [x0
, SS64_FAR
]
242 str w21
, [x0
, SS64_ESR
]
249 // SP0 is expected to already be selected
250 .macro SWITCH_TO_KERN_STACK
251 ldr x1
, [x1
, TH_KSTACKPTR
] // Load the top of the kernel stack to x1
252 mov sp
, x1
// Set the stack pointer to the kernel stack
255 // SP0 is expected to already be selected
256 .macro SWITCH_TO_INT_STACK
258 ldr x1
, [x1
, ACT_CPUDATAP
]
259 ldr x1
, [x1
, CPU_ISTACKPTR
]
260 mov sp
, x1
// Set the stack pointer to the interrupt stack
266 * Restores the DAIF bits to their original state (well, the AIF bits at least).
267 * arg0 - DAIF bits (read from the DAIF interface) to restore
271 tst $
0, #(DAIF_IRQF | DAIF_FIQF | DAIF_ASYNCF)
275 tst $
0, #(DAIF_IRQF | DAIF_FIQF)
279 tst $
0, #(DAIF_ASYNCF)
282 /* Enable nothing. */
287 msr DAIFClr
, #(DAIFSC_ASYNCF)
292 msr DAIFClr
, #(DAIFSC_IRQF | DAIFSC_FIQF)
297 msr DAIFClr
, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF)