]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
f427ee49 | 30 | #include <arm64/exception_asm.h> |
5ba3f43e | 31 | #include <arm64/machine_machdep.h> |
f427ee49 | 32 | #include <arm64/pac_asm.h> |
5ba3f43e A |
33 | #include <arm64/proc_reg.h> |
34 | #include <arm/pmap.h> | |
35 | #include <pexpert/arm64/board_config.h> | |
36 | #include <sys/errno.h> | |
37 | #include "assym.s" | |
38 | ||
39 | ||
cb323159 | 40 | #if defined(HAS_APPLE_PAC) |
f427ee49 A |
41 | |
42 | .macro SET_KERN_KEY dst, apctl_el1 | |
43 | orr \dst, \apctl_el1, #APCTL_EL1_KernKeyEn | |
44 | .endmacro | |
45 | ||
46 | .macro CLEAR_KERN_KEY dst, apctl_el1 | |
47 | and \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn | |
48 | .endmacro | |
49 | ||
cb323159 | 50 | /* |
f427ee49 | 51 | * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key) |
cb323159 | 52 | */ |
f427ee49 A |
53 | .align 2 |
54 | .globl EXT(ml_enable_user_jop_key) | |
55 | LEXT(ml_enable_user_jop_key) | |
56 | mov x1, x0 | |
57 | mrs x2, TPIDR_EL1 | |
58 | ldr x2, [x2, ACT_CPUDATAP] | |
59 | ldr x0, [x2, CPU_JOP_KEY] | |
60 | ||
61 | cmp x0, x1 | |
62 | b.eq Lskip_program_el0_jop_key | |
63 | /* | |
64 | * We can safely write to the JOP key registers without updating | |
65 | * current_cpu_datap()->jop_key. The complementary | |
66 | * ml_disable_user_jop_key() call will put back the old value. Interrupts | |
67 | * are also disabled, so nothing else will read this field in the meantime. | |
68 | */ | |
69 | SET_JOP_KEY_REGISTERS x1, x2 | |
70 | Lskip_program_el0_jop_key: | |
71 | ||
72 | /* | |
73 | * if (cpu has APCTL_EL1.UserKeyEn) { | |
74 | * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL0 keys | |
75 | * } else { | |
76 | * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL0 keys | |
77 | * } | |
78 | */ | |
79 | mrs x1, ARM64_REG_APCTL_EL1 | |
2a1bd2d3 A |
80 | #if defined(APPLEFIRESTORM) |
81 | SET_KERN_KEY x2, x1 | |
82 | CLEAR_KERN_KEY x3, x1 | |
83 | tst x1, #(APCTL_EL1_UserKeyEn) | |
84 | csel x1, x2, x3, ne | |
85 | #elif defined(HAS_APCTL_EL1_USERKEYEN) | |
f427ee49 A |
86 | SET_KERN_KEY x1, x1 |
87 | #else | |
88 | CLEAR_KERN_KEY x1, x1 | |
89 | #endif | |
90 | msr ARM64_REG_APCTL_EL1, x1 | |
91 | isb | |
92 | ret | |
cb323159 | 93 | |
f427ee49 A |
94 | /* |
95 | * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state) | |
96 | */ | |
cb323159 | 97 | .align 2 |
f427ee49 A |
98 | .globl EXT(ml_disable_user_jop_key) |
99 | LEXT(ml_disable_user_jop_key) | |
100 | cmp x0, x1 | |
101 | b.eq Lskip_program_prev_jop_key | |
102 | SET_JOP_KEY_REGISTERS x1, x2 | |
103 | Lskip_program_prev_jop_key: | |
104 | ||
105 | /* | |
106 | * if (cpu has APCTL_EL1.UserKeyEn) { | |
107 | * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL1 keys | |
108 | * } else { | |
109 | * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL1 keys | |
110 | * } | |
111 | */ | |
cb323159 | 112 | mrs x1, ARM64_REG_APCTL_EL1 |
2a1bd2d3 A |
113 | #if defined(APPLEFIRESTORM) |
114 | CLEAR_KERN_KEY x2, x1 | |
115 | SET_KERN_KEY x3, x1 | |
116 | tst x1, #(APCTL_EL1_UserKeyEn) | |
117 | csel x1, x2, x3, ne | |
118 | #elif defined(HAS_APCTL_EL1_USERKEYEN) | |
f427ee49 A |
119 | CLEAR_KERN_KEY x1, x1 |
120 | #else | |
121 | SET_KERN_KEY x1, x1 | |
122 | #endif | |
cb323159 A |
123 | msr ARM64_REG_APCTL_EL1, x1 |
124 | isb | |
125 | ret | |
126 | ||
127 | #endif /* defined(HAS_APPLE_PAC) */ | |
128 | ||
c6bf4f31 | 129 | #if HAS_BP_RET |
cb323159 | 130 | |
c6bf4f31 A |
131 | /* |
132 | * void set_bp_ret(void) | |
133 | * Helper function to enable branch predictor state retention | |
134 | * across ACC sleep | |
135 | */ | |
136 | ||
137 | .align 2 | |
138 | .globl EXT(set_bp_ret) | |
139 | LEXT(set_bp_ret) | |
140 | // Load bpret boot-arg | |
141 | adrp x14, EXT(bp_ret)@page | |
142 | add x14, x14, EXT(bp_ret)@pageoff | |
143 | ldr w14, [x14] | |
144 | ||
145 | mrs x13, ARM64_REG_ACC_CFG | |
146 | and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift)) | |
147 | and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask) | |
148 | orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift) | |
149 | msr ARM64_REG_ACC_CFG, x13 | |
150 | ||
151 | ret | |
152 | #endif // HAS_BP_RET | |
153 | ||
154 | #if HAS_NEX_PG | |
155 | .align 2 | |
156 | .globl EXT(set_nex_pg) | |
157 | LEXT(set_nex_pg) | |
158 | mrs x14, MPIDR_EL1 | |
159 | // Skip if this isn't a p-core; NEX powergating isn't available for e-cores | |
160 | and x14, x14, #(MPIDR_PNE) | |
161 | cbz x14, Lnex_pg_done | |
162 | ||
163 | // Set the SEG-recommended value of 12 additional reset cycles | |
f427ee49 A |
164 | HID_INSERT_BITS ARM64_REG_HID13, ARM64_REG_HID13_RstCyc_mask, ARM64_REG_HID13_RstCyc_val, x13 |
165 | HID_SET_BITS ARM64_REG_HID14, ARM64_REG_HID14_NexPwgEn, x13 | |
c6bf4f31 A |
166 | |
167 | Lnex_pg_done: | |
168 | ret | |
169 | ||
170 | #endif // HAS_NEX_PG | |
d9a64523 | 171 | |
5ba3f43e A |
172 | /* uint32_t get_fpscr(void): |
173 | * Returns (FPSR | FPCR). | |
174 | */ | |
175 | .align 2 | |
176 | .globl EXT(get_fpscr) | |
177 | LEXT(get_fpscr) | |
178 | #if __ARM_VFP__ | |
179 | mrs x1, FPSR // Grab FPSR | |
180 | mov x4, #(FPSR_MASK & 0xFFFF) | |
181 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
182 | orr x0, x4, x5 | |
183 | and x1, x1, x0 // Be paranoid, and clear bits we expect to | |
184 | // be clear | |
185 | mrs x2, FPCR // Grab FPCR | |
186 | mov x4, #(FPCR_MASK & 0xFFFF) | |
187 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
188 | orr x0, x4, x5 | |
189 | and x2, x2, x0 // Be paranoid, and clear bits we expect to | |
190 | // be clear | |
191 | orr x0, x1, x2 // OR them to get FPSCR equivalent state | |
192 | #else | |
193 | mov x0, #0 | |
194 | #endif | |
195 | ret | |
196 | .align 2 | |
197 | .globl EXT(set_fpscr) | |
198 | /* void set_fpscr(uint32_t value): | |
199 | * Set the FPCR and FPSR registers, based on the given value; a | |
200 | * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR | |
201 | * and FPCR are not responsible for condition codes. | |
202 | */ | |
203 | LEXT(set_fpscr) | |
204 | #if __ARM_VFP__ | |
205 | mov x4, #(FPSR_MASK & 0xFFFF) | |
206 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
207 | orr x1, x4, x5 | |
208 | and x1, x1, x0 // Clear the bits that don't apply to FPSR | |
209 | mov x4, #(FPCR_MASK & 0xFFFF) | |
210 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
211 | orr x2, x4, x5 | |
212 | and x2, x2, x0 // Clear the bits that don't apply to FPCR | |
213 | msr FPSR, x1 // Write FPCR | |
214 | msr FPCR, x2 // Write FPSR | |
215 | dsb ish // FPCR requires synchronization | |
216 | #endif | |
217 | ret | |
218 | ||
d9a64523 A |
219 | /* |
220 | * void update_mdscr(unsigned long clear, unsigned long set) | |
221 | * Clears and sets the specified bits in MDSCR_EL1. | |
222 | * | |
223 | * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is | |
224 | * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow | |
225 | * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP, | |
226 | * so we need to put the checks after the MRS where they can't be skipped. That | |
227 | * still leaves a small window if a breakpoint is set on the instruction | |
228 | * immediately after the MRS. To handle that, we also do a check and then set of | |
229 | * the breakpoint control registers. This allows us to guarantee that a given | |
230 | * core will never have both KDE set and a breakpoint targeting EL1. | |
231 | * | |
232 | * If KDE gets set, unset it and then panic | |
233 | */ | |
234 | .align 2 | |
235 | .globl EXT(update_mdscr) | |
236 | LEXT(update_mdscr) | |
237 | mov x4, #0 | |
238 | mrs x2, MDSCR_EL1 | |
239 | bic x2, x2, x0 | |
240 | orr x2, x2, x1 | |
241 | 1: | |
242 | bic x2, x2, #0x2000 | |
243 | msr MDSCR_EL1, x2 | |
244 | #if defined(CONFIG_KERNEL_INTEGRITY) | |
245 | /* | |
246 | * verify KDE didn't get set (including via ROP) | |
247 | * If set, clear it and then panic | |
248 | */ | |
249 | ands x3, x2, #0x2000 | |
250 | orr x4, x4, x3 | |
251 | bne 1b | |
252 | cmp x4, xzr | |
253 | b.ne Lupdate_mdscr_panic | |
254 | #endif | |
255 | ret | |
256 | ||
257 | Lupdate_mdscr_panic: | |
258 | adrp x0, Lupdate_mdscr_panic_str@page | |
259 | add x0, x0, Lupdate_mdscr_panic_str@pageoff | |
260 | b EXT(panic) | |
261 | b . | |
262 | ||
263 | Lupdate_mdscr_panic_str: | |
264 | .asciz "MDSCR.KDE was set" | |
265 | ||
266 | ||
5ba3f43e A |
267 | /* |
268 | * Set MMU Translation Table Base Alternate | |
269 | */ | |
270 | .text | |
271 | .align 2 | |
272 | .globl EXT(set_mmu_ttb_alternate) | |
273 | LEXT(set_mmu_ttb_alternate) | |
274 | dsb sy | |
275 | #if defined(KERNEL_INTEGRITY_KTRR) | |
276 | mov x1, lr | |
277 | bl EXT(pinst_set_ttbr1) | |
278 | mov lr, x1 | |
279 | #else | |
c6bf4f31 | 280 | #if defined(HAS_VMSA_LOCK) |
f427ee49 | 281 | #if DEBUG || DEVELOPMENT |
c6bf4f31 A |
282 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 |
283 | and x1, x1, #(VMSA_LOCK_TTBR1_EL1) | |
284 | cbnz x1, L_set_locked_reg_panic | |
f427ee49 | 285 | #endif /* DEBUG || DEVELOPMENT */ |
c6bf4f31 | 286 | #endif /* defined(HAS_VMSA_LOCK) */ |
5ba3f43e A |
287 | msr TTBR1_EL1, x0 |
288 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
289 | isb sy | |
290 | ret | |
291 | ||
c6bf4f31 A |
292 | #if XNU_MONITOR |
293 | .section __PPLTEXT,__text,regular,pure_instructions | |
294 | #else | |
d9a64523 | 295 | .text |
c6bf4f31 | 296 | #endif |
d9a64523 A |
297 | .align 2 |
298 | .globl EXT(set_mmu_ttb) | |
299 | LEXT(set_mmu_ttb) | |
300 | #if __ARM_KERNEL_PROTECT__ | |
301 | /* All EL1-mode ASIDs are odd. */ | |
302 | orr x0, x0, #(1 << TTBR_ASID_SHIFT) | |
303 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
304 | dsb ish | |
305 | msr TTBR0_EL1, x0 | |
306 | isb sy | |
307 | ret | |
308 | ||
f427ee49 A |
309 | |
310 | #if XNU_MONITOR | |
311 | .text | |
312 | .align 2 | |
313 | .globl EXT(ml_get_ppl_cpu_data) | |
314 | LEXT(ml_get_ppl_cpu_data) | |
315 | LOAD_PMAP_CPU_DATA x0, x1, x2 | |
316 | ret | |
317 | #endif | |
318 | ||
5ba3f43e A |
319 | /* |
320 | * set AUX control register | |
321 | */ | |
322 | .text | |
323 | .align 2 | |
324 | .globl EXT(set_aux_control) | |
325 | LEXT(set_aux_control) | |
326 | msr ACTLR_EL1, x0 | |
327 | // Synchronize system | |
5ba3f43e A |
328 | isb sy |
329 | ret | |
330 | ||
5c9f4661 A |
331 | #if __ARM_KERNEL_PROTECT__ |
332 | .text | |
333 | .align 2 | |
334 | .globl EXT(set_vbar_el1) | |
335 | LEXT(set_vbar_el1) | |
336 | #if defined(KERNEL_INTEGRITY_KTRR) | |
337 | b EXT(pinst_set_vbar) | |
338 | #else | |
339 | msr VBAR_EL1, x0 | |
340 | ret | |
341 | #endif | |
342 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
343 | ||
c6bf4f31 A |
344 | #if defined(HAS_VMSA_LOCK) |
345 | .text | |
346 | .align 2 | |
347 | .globl EXT(vmsa_lock) | |
348 | LEXT(vmsa_lock) | |
349 | isb sy | |
350 | mov x1, #(VMSA_LOCK_SCTLR_M_BIT) | |
f427ee49 A |
351 | #if __ARM_MIXED_PAGE_SIZE__ |
352 | mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_VBAR_EL1) | |
353 | #else | |
c6bf4f31 | 354 | mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1) |
f427ee49 | 355 | #endif |
c6bf4f31 A |
356 | orr x0, x0, x1 |
357 | msr ARM64_REG_VMSA_LOCK_EL1, x0 | |
358 | isb sy | |
359 | ret | |
360 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
361 | |
362 | /* | |
363 | * set translation control register | |
364 | */ | |
365 | .text | |
366 | .align 2 | |
367 | .globl EXT(set_tcr) | |
368 | LEXT(set_tcr) | |
369 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
f427ee49 | 370 | #if DEBUG || DEVELOPMENT |
5ba3f43e A |
371 | // Assert that T0Z is always equal to T1Z |
372 | eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT) | |
373 | and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT) | |
374 | cbnz x1, L_set_tcr_panic | |
f427ee49 A |
375 | #endif /* DEBUG || DEVELOPMENT */ |
376 | #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ | |
5ba3f43e A |
377 | #if defined(KERNEL_INTEGRITY_KTRR) |
378 | mov x1, lr | |
cb323159 | 379 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
380 | mov lr, x1 |
381 | #else | |
c6bf4f31 | 382 | #if defined(HAS_VMSA_LOCK) |
f427ee49 | 383 | #if DEBUG || DEVELOPMENT |
c6bf4f31 A |
384 | // assert TCR unlocked |
385 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 | |
386 | and x1, x1, #(VMSA_LOCK_TCR_EL1) | |
387 | cbnz x1, L_set_locked_reg_panic | |
f427ee49 | 388 | #endif /* DEBUG || DEVELOPMENT */ |
c6bf4f31 | 389 | #endif /* defined(HAS_VMSA_LOCK) */ |
5ba3f43e A |
390 | msr TCR_EL1, x0 |
391 | #endif /* defined(KERNEL_INTRITY_KTRR) */ | |
392 | isb sy | |
393 | ret | |
394 | ||
f427ee49 | 395 | #if DEBUG || DEVELOPMENT |
5ba3f43e A |
396 | L_set_tcr_panic: |
397 | PUSH_FRAME | |
398 | sub sp, sp, #16 | |
399 | str x0, [sp] | |
400 | adr x0, L_set_tcr_panic_str | |
401 | BRANCH_EXTERN panic | |
402 | ||
403 | L_set_locked_reg_panic: | |
404 | PUSH_FRAME | |
405 | sub sp, sp, #16 | |
406 | str x0, [sp] | |
407 | adr x0, L_set_locked_reg_panic_str | |
408 | BRANCH_EXTERN panic | |
409 | b . | |
410 | ||
411 | L_set_tcr_panic_str: | |
412 | .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n" | |
413 | ||
414 | ||
415 | L_set_locked_reg_panic_str: | |
416 | .asciz "attempt to set locked register: (%llx)\n" | |
f427ee49 | 417 | #endif /* DEBUG || DEVELOPMENT */ |
5ba3f43e A |
418 | |
419 | /* | |
420 | * MMU kernel virtual to physical address translation | |
421 | */ | |
422 | .text | |
423 | .align 2 | |
424 | .globl EXT(mmu_kvtop) | |
425 | LEXT(mmu_kvtop) | |
426 | mrs x2, DAIF // Load current DAIF | |
427 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
428 | at s1e1r, x0 // Translation Stage 1 EL1 | |
f427ee49 | 429 | isb sy |
5ba3f43e A |
430 | mrs x1, PAR_EL1 // Read result |
431 | msr DAIF, x2 // Restore interrupt state | |
432 | tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid | |
433 | bfm x1, x0, #0, #11 // Add page offset | |
434 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
435 | ret | |
436 | L_mmu_kvtop_invalid: | |
d9a64523 | 437 | mov x0, #0 // Return invalid |
5ba3f43e A |
438 | ret |
439 | ||
440 | /* | |
441 | * MMU user virtual to physical address translation | |
442 | */ | |
443 | .text | |
444 | .align 2 | |
445 | .globl EXT(mmu_uvtop) | |
446 | LEXT(mmu_uvtop) | |
447 | lsr x8, x0, #56 // Extract top byte | |
448 | cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid | |
449 | mrs x2, DAIF // Load current DAIF | |
450 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
451 | at s1e0r, x0 // Translation Stage 1 EL0 | |
f427ee49 | 452 | isb sy |
5ba3f43e A |
453 | mrs x1, PAR_EL1 // Read result |
454 | msr DAIF, x2 // Restore interrupt state | |
455 | tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid | |
456 | bfm x1, x0, #0, #11 // Add page offset | |
457 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
458 | ret | |
459 | L_mmu_uvtop_invalid: | |
d9a64523 | 460 | mov x0, #0 // Return invalid |
5ba3f43e A |
461 | ret |
462 | ||
463 | /* | |
464 | * MMU kernel virtual to physical address preflight write access | |
465 | */ | |
466 | .text | |
467 | .align 2 | |
468 | .globl EXT(mmu_kvtop_wpreflight) | |
469 | LEXT(mmu_kvtop_wpreflight) | |
470 | mrs x2, DAIF // Load current DAIF | |
471 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
472 | at s1e1w, x0 // Translation Stage 1 EL1 | |
473 | mrs x1, PAR_EL1 // Read result | |
474 | msr DAIF, x2 // Restore interrupt state | |
475 | tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid | |
476 | bfm x1, x0, #0, #11 // Add page offset | |
477 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
478 | ret | |
479 | L_mmu_kvtop_wpreflight_invalid: | |
d9a64523 | 480 | mov x0, #0 // Return invalid |
5ba3f43e A |
481 | ret |
482 | ||
483 | /* | |
484 | * SET_RECOVERY_HANDLER | |
485 | * | |
bca245ac | 486 | * Sets up a page fault recovery handler. This macro clobbers x16 and x17. |
5ba3f43e | 487 | * |
bca245ac A |
488 | * label - recovery label |
489 | * tpidr - persisted thread pointer | |
490 | * old_handler - persisted recovery handler | |
491 | * label_in_adr_range - whether \label is within 1 MB of PC | |
5ba3f43e | 492 | */ |
bca245ac A |
493 | .macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0 |
494 | // Note: x16 and x17 are designated for use as temporaries in | |
495 | // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS. | |
496 | .if \label_in_adr_range==1 // Load the recovery handler address | |
497 | adr x17, \label | |
498 | .else | |
499 | adrp x17, \label@page | |
500 | add x17, x17, \label@pageoff | |
501 | .endif | |
cb323159 | 502 | #if defined(HAS_APPLE_PAC) |
bca245ac A |
503 | mrs x16, TPIDR_EL1 |
504 | add x16, x16, TH_RECOVER | |
505 | movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48 | |
506 | pacia x17, x16 // Sign with IAKey + blended discriminator | |
cb323159 | 507 | #endif |
0a7de745 | 508 | |
bca245ac A |
509 | mrs \tpidr, TPIDR_EL1 // Load thread pointer |
510 | ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler | |
511 | str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler | |
5ba3f43e A |
512 | .endmacro |
513 | ||
514 | /* | |
515 | * CLEAR_RECOVERY_HANDLER | |
516 | * | |
517 | * Clears page fault handler set by SET_RECOVERY_HANDLER | |
518 | * | |
bca245ac A |
519 | * tpidr - thread pointer saved by SET_RECOVERY_HANDLER |
520 | * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER | |
5ba3f43e | 521 | */ |
bca245ac A |
522 | .macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10 |
523 | str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler | |
5ba3f43e A |
524 | .endmacro |
525 | ||
526 | ||
527 | .text | |
528 | .align 2 | |
529 | copyio_error: | |
bca245ac | 530 | CLEAR_RECOVERY_HANDLER |
5ba3f43e A |
531 | mov x0, #EFAULT // Return an EFAULT error |
532 | POP_FRAME | |
d9a64523 | 533 | ARM64_STACK_EPILOG |
5ba3f43e A |
534 | |
535 | /* | |
536 | * int _bcopyin(const char *src, char *dst, vm_size_t len) | |
537 | */ | |
538 | .text | |
539 | .align 2 | |
540 | .globl EXT(_bcopyin) | |
541 | LEXT(_bcopyin) | |
d9a64523 | 542 | ARM64_STACK_PROLOG |
5ba3f43e | 543 | PUSH_FRAME |
bca245ac | 544 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
545 | /* If len is less than 16 bytes, just do a bytewise copy */ |
546 | cmp x2, #16 | |
547 | b.lt 2f | |
548 | sub x2, x2, #16 | |
549 | 1: | |
550 | /* 16 bytes at a time */ | |
551 | ldp x3, x4, [x0], #16 | |
552 | stp x3, x4, [x1], #16 | |
553 | subs x2, x2, #16 | |
554 | b.ge 1b | |
555 | /* Fixup the len and test for completion */ | |
556 | adds x2, x2, #16 | |
557 | b.eq 3f | |
558 | 2: /* Bytewise */ | |
559 | subs x2, x2, #1 | |
560 | ldrb w3, [x0], #1 | |
561 | strb w3, [x1], #1 | |
562 | b.hi 2b | |
563 | 3: | |
bca245ac | 564 | CLEAR_RECOVERY_HANDLER |
d9a64523 | 565 | mov x0, #0 |
5ba3f43e | 566 | POP_FRAME |
d9a64523 | 567 | ARM64_STACK_EPILOG |
5ba3f43e A |
568 | |
569 | /* | |
cb323159 | 570 | * int _copyin_atomic32(const char *src, uint32_t *dst) |
5ba3f43e A |
571 | */ |
572 | .text | |
573 | .align 2 | |
cb323159 A |
574 | .globl EXT(_copyin_atomic32) |
575 | LEXT(_copyin_atomic32) | |
d9a64523 | 576 | ARM64_STACK_PROLOG |
5ba3f43e | 577 | PUSH_FRAME |
bca245ac | 578 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e | 579 | ldr w8, [x0] |
cb323159 A |
580 | str w8, [x1] |
581 | mov x0, #0 | |
bca245ac | 582 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
583 | POP_FRAME |
584 | ARM64_STACK_EPILOG | |
585 | ||
586 | /* | |
587 | * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value) | |
588 | */ | |
589 | .text | |
590 | .align 2 | |
591 | .globl EXT(_copyin_atomic32_wait_if_equals) | |
592 | LEXT(_copyin_atomic32_wait_if_equals) | |
593 | ARM64_STACK_PROLOG | |
594 | PUSH_FRAME | |
bca245ac | 595 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
596 | ldxr w8, [x0] |
597 | cmp w8, w1 | |
598 | mov x0, ESTALE | |
599 | b.ne 1f | |
600 | mov x0, #0 | |
601 | wfe | |
602 | 1: | |
603 | clrex | |
bca245ac | 604 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
605 | POP_FRAME |
606 | ARM64_STACK_EPILOG | |
607 | ||
608 | /* | |
609 | * int _copyin_atomic64(const char *src, uint32_t *dst) | |
610 | */ | |
611 | .text | |
612 | .align 2 | |
613 | .globl EXT(_copyin_atomic64) | |
614 | LEXT(_copyin_atomic64) | |
615 | ARM64_STACK_PROLOG | |
616 | PUSH_FRAME | |
bca245ac | 617 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e | 618 | ldr x8, [x0] |
5ba3f43e | 619 | str x8, [x1] |
d9a64523 | 620 | mov x0, #0 |
bca245ac | 621 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 622 | POP_FRAME |
d9a64523 A |
623 | ARM64_STACK_EPILOG |
624 | ||
5ba3f43e | 625 | |
cb323159 A |
626 | /* |
627 | * int _copyout_atomic32(uint32_t value, char *dst) | |
628 | */ | |
629 | .text | |
630 | .align 2 | |
631 | .globl EXT(_copyout_atomic32) | |
632 | LEXT(_copyout_atomic32) | |
633 | ARM64_STACK_PROLOG | |
634 | PUSH_FRAME | |
bca245ac | 635 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
636 | str w0, [x1] |
637 | mov x0, #0 | |
bca245ac | 638 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
639 | POP_FRAME |
640 | ARM64_STACK_EPILOG | |
641 | ||
642 | /* | |
643 | * int _copyout_atomic64(uint64_t value, char *dst) | |
644 | */ | |
645 | .text | |
646 | .align 2 | |
647 | .globl EXT(_copyout_atomic64) | |
648 | LEXT(_copyout_atomic64) | |
649 | ARM64_STACK_PROLOG | |
650 | PUSH_FRAME | |
bca245ac | 651 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
652 | str x0, [x1] |
653 | mov x0, #0 | |
bca245ac | 654 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
655 | POP_FRAME |
656 | ARM64_STACK_EPILOG | |
657 | ||
5ba3f43e A |
658 | |
659 | /* | |
660 | * int _bcopyout(const char *src, char *dst, vm_size_t len) | |
661 | */ | |
662 | .text | |
663 | .align 2 | |
664 | .globl EXT(_bcopyout) | |
665 | LEXT(_bcopyout) | |
d9a64523 | 666 | ARM64_STACK_PROLOG |
5ba3f43e | 667 | PUSH_FRAME |
bca245ac | 668 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
669 | /* If len is less than 16 bytes, just do a bytewise copy */ |
670 | cmp x2, #16 | |
671 | b.lt 2f | |
672 | sub x2, x2, #16 | |
673 | 1: | |
674 | /* 16 bytes at a time */ | |
675 | ldp x3, x4, [x0], #16 | |
676 | stp x3, x4, [x1], #16 | |
677 | subs x2, x2, #16 | |
678 | b.ge 1b | |
679 | /* Fixup the len and test for completion */ | |
680 | adds x2, x2, #16 | |
681 | b.eq 3f | |
682 | 2: /* Bytewise */ | |
683 | subs x2, x2, #1 | |
684 | ldrb w3, [x0], #1 | |
685 | strb w3, [x1], #1 | |
686 | b.hi 2b | |
687 | 3: | |
bca245ac | 688 | CLEAR_RECOVERY_HANDLER |
d9a64523 | 689 | mov x0, #0 |
5ba3f43e | 690 | POP_FRAME |
d9a64523 | 691 | ARM64_STACK_EPILOG |
5ba3f43e A |
692 | |
693 | /* | |
694 | * int _bcopyinstr( | |
695 | * const user_addr_t user_addr, | |
696 | * char *kernel_addr, | |
697 | * vm_size_t max, | |
698 | * vm_size_t *actual) | |
699 | */ | |
700 | .text | |
701 | .align 2 | |
702 | .globl EXT(_bcopyinstr) | |
703 | LEXT(_bcopyinstr) | |
d9a64523 | 704 | ARM64_STACK_PROLOG |
5ba3f43e | 705 | PUSH_FRAME |
bca245ac | 706 | SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1 |
d9a64523 | 707 | mov x4, #0 // x4 - total bytes copied |
5ba3f43e A |
708 | Lcopyinstr_loop: |
709 | ldrb w5, [x0], #1 // Load a byte from the user source | |
710 | strb w5, [x1], #1 // Store a byte to the kernel dest | |
711 | add x4, x4, #1 // Increment bytes copied | |
712 | cbz x5, Lcopyinstr_done // If this byte is null, we're done | |
713 | cmp x4, x2 // If we're out of space, return an error | |
714 | b.ne Lcopyinstr_loop | |
715 | Lcopyinstr_too_long: | |
716 | mov x5, #ENAMETOOLONG // Set current byte to error code for later return | |
717 | Lcopyinstr_done: | |
718 | str x4, [x3] // Return number of bytes copied | |
719 | mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure) | |
720 | b Lcopyinstr_exit | |
721 | Lcopyinstr_error: | |
722 | mov x0, #EFAULT // Return EFAULT on error | |
723 | Lcopyinstr_exit: | |
bca245ac | 724 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 725 | POP_FRAME |
d9a64523 | 726 | ARM64_STACK_EPILOG |
5ba3f43e A |
727 | |
728 | /* | |
729 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit) | |
730 | * | |
731 | * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from | |
732 | * either user or kernel memory, or 8 bytes (AArch32) from user only. | |
733 | * | |
734 | * x0 : address of frame to copy. | |
735 | * x1 : kernel address at which to store data. | |
736 | * w2 : whether to copy an AArch32 or AArch64 frame. | |
737 | * x3 : temp | |
738 | * x5 : temp (kernel virtual base) | |
739 | * x9 : temp | |
bca245ac | 740 | * x10 : old recovery function (set by SET_RECOVERY_HANDLER) |
5ba3f43e | 741 | * x12, x13 : backtrace data |
bca245ac | 742 | * x16 : thread pointer (set by SET_RECOVERY_HANDLER) |
5ba3f43e A |
743 | * |
744 | */ | |
745 | .text | |
746 | .align 2 | |
747 | .globl EXT(copyinframe) | |
748 | LEXT(copyinframe) | |
d9a64523 | 749 | ARM64_STACK_PROLOG |
5ba3f43e | 750 | PUSH_FRAME |
bca245ac | 751 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
752 | cbnz w2, Lcopyinframe64 // Check frame size |
753 | adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel | |
754 | add x5, x5, EXT(gVirtBase)@pageoff | |
755 | ldr x5, [x5] | |
756 | cmp x5, x0 // See if address is in kernel virtual range | |
757 | b.hi Lcopyinframe32 // If below kernel virtual range, proceed. | |
758 | mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range | |
759 | b Lcopyinframe_done | |
760 | ||
761 | Lcopyinframe32: | |
762 | ldr x12, [x0] // Copy 8 bytes | |
763 | str x12, [x1] | |
764 | mov w0, #0 // Success | |
765 | b Lcopyinframe_done | |
766 | ||
767 | Lcopyinframe64: | |
768 | mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address | |
769 | orr x9, x0, TBI_MASK // Hide tags in address comparison | |
770 | cmp x9, x3 // If in kernel address range, skip tag test | |
771 | b.hs Lcopyinframe_valid | |
772 | tst x0, TBI_MASK // Detect tagged pointers | |
773 | b.eq Lcopyinframe_valid | |
774 | mov w0, #EFAULT // Tagged address, fail | |
775 | b Lcopyinframe_done | |
776 | Lcopyinframe_valid: | |
777 | ldp x12, x13, [x0] // Copy 16 bytes | |
778 | stp x12, x13, [x1] | |
779 | mov w0, #0 // Success | |
780 | ||
781 | Lcopyinframe_done: | |
bca245ac | 782 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 783 | POP_FRAME |
d9a64523 | 784 | ARM64_STACK_EPILOG |
5ba3f43e | 785 | |
5ba3f43e A |
786 | |
787 | /* | |
788 | * uint32_t arm_debug_read_dscr(void) | |
789 | */ | |
790 | .text | |
791 | .align 2 | |
792 | .globl EXT(arm_debug_read_dscr) | |
793 | LEXT(arm_debug_read_dscr) | |
794 | PANIC_UNIMPLEMENTED | |
795 | ||
796 | /* | |
797 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
798 | * | |
799 | * Set debug registers to match the current thread state | |
800 | * (NULL to disable). Assume 6 breakpoints and 2 | |
801 | * watchpoints, since that has been the case in all cores | |
802 | * thus far. | |
803 | */ | |
804 | .text | |
805 | .align 2 | |
806 | .globl EXT(arm_debug_set_cp14) | |
807 | LEXT(arm_debug_set_cp14) | |
808 | PANIC_UNIMPLEMENTED | |
809 | ||
5ba3f43e A |
810 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
811 | /* | |
812 | * Note: still have to ISB before executing wfi! | |
813 | */ | |
814 | .text | |
815 | .align 2 | |
816 | .globl EXT(arm64_prepare_for_sleep) | |
817 | LEXT(arm64_prepare_for_sleep) | |
818 | PUSH_FRAME | |
819 | ||
cb323159 A |
820 | #if defined(APPLETYPHOON) |
821 | // <rdar://problem/15827409> | |
f427ee49 | 822 | HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x9 |
5ba3f43e A |
823 | dsb sy |
824 | isb sy | |
825 | #endif | |
826 | ||
f427ee49 A |
827 | #if HAS_CLUSTER |
828 | cbnz x0, 1f // Skip if deep_sleep == true | |
829 | // Mask FIQ and IRQ to avoid spurious wakeups | |
830 | mrs x9, ARM64_REG_CYC_OVRD | |
831 | and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask)) | |
832 | mov x10, #(ARM64_REG_CYC_OVRD_irq_disable | ARM64_REG_CYC_OVRD_fiq_disable) | |
833 | orr x9, x9, x10 | |
834 | msr ARM64_REG_CYC_OVRD, x9 | |
835 | isb | |
836 | 1: | |
837 | #endif | |
838 | ||
839 | cbz x0, 1f // Skip if deep_sleep == false | |
5ba3f43e A |
840 | #if __ARM_GLOBAL_SLEEP_BIT__ |
841 | // Enable deep sleep | |
842 | mrs x1, ARM64_REG_ACC_OVRD | |
843 | orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep) | |
844 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask)) | |
845 | orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep) | |
846 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask)) | |
847 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep) | |
848 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask)) | |
849 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep) | |
850 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask)) | |
851 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep) | |
c6bf4f31 A |
852 | #if HAS_RETENTION_STATE |
853 | orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu) | |
854 | #endif | |
5ba3f43e A |
855 | msr ARM64_REG_ACC_OVRD, x1 |
856 | ||
857 | ||
858 | #else | |
859 | // Enable deep sleep | |
860 | mov x1, ARM64_REG_CYC_CFG_deepSleep | |
861 | msr ARM64_REG_CYC_CFG, x1 | |
862 | #endif | |
f427ee49 A |
863 | |
864 | 1: | |
5ba3f43e | 865 | // Set "OK to power down" (<rdar://problem/12390433>) |
f427ee49 A |
866 | mrs x9, ARM64_REG_CYC_OVRD |
867 | orr x9, x9, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) | |
c6bf4f31 | 868 | #if HAS_RETENTION_STATE |
f427ee49 | 869 | orr x9, x9, #(ARM64_REG_CYC_OVRD_disWfiRetn) |
c6bf4f31 | 870 | #endif |
f427ee49 | 871 | msr ARM64_REG_CYC_OVRD, x9 |
5ba3f43e | 872 | |
c6bf4f31 | 873 | #if defined(APPLEMONSOON) || defined(APPLEVORTEX) |
f427ee49 A |
874 | ARM64_IS_PCORE x9 |
875 | cbz x9, Lwfi_inst // skip if not p-core | |
d9a64523 A |
876 | |
877 | /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to | |
878 | * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores | |
879 | * to be left with valid entries that fail to drain if a | |
880 | * subsequent wfi is issued. This can prevent the core from | |
881 | * power-gating. For the idle case that is recoverable, but | |
882 | * for the deep-sleep (S2R) case in which cores MUST power-gate, | |
883 | * it can lead to a hang. This can be prevented by disabling | |
884 | * and re-enabling GUPS, which forces the prefetch queue to | |
885 | * drain. This should be done as close to wfi as possible, i.e. | |
886 | * at the very end of arm64_prepare_for_sleep(). */ | |
c6bf4f31 A |
887 | #if defined(APPLEVORTEX) |
888 | /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar | |
889 | * bug in the HSP prefetcher that can be worked around through | |
890 | * the same method mentioned above for Skye. */ | |
f427ee49 A |
891 | mrs x9, MIDR_EL1 |
892 | EXEC_COREALL_REVLO CPU_VERSION_B0, x9, x10 | |
c6bf4f31 | 893 | #endif |
f427ee49 A |
894 | mrs x9, ARM64_REG_HID10 |
895 | orr x9, x9, #(ARM64_REG_HID10_DisHwpGups) | |
896 | msr ARM64_REG_HID10, x9 | |
d9a64523 | 897 | isb sy |
f427ee49 A |
898 | and x9, x9, #(~(ARM64_REG_HID10_DisHwpGups)) |
899 | msr ARM64_REG_HID10, x9 | |
d9a64523 A |
900 | isb sy |
901 | #endif | |
f427ee49 A |
902 | EXEC_END |
903 | ||
5ba3f43e A |
904 | Lwfi_inst: |
905 | dsb sy | |
906 | isb sy | |
907 | wfi | |
908 | b Lwfi_inst | |
909 | ||
910 | /* | |
911 | * Force WFI to use clock gating only | |
912 | * | |
913 | */ | |
914 | .text | |
915 | .align 2 | |
916 | .globl EXT(arm64_force_wfi_clock_gate) | |
917 | LEXT(arm64_force_wfi_clock_gate) | |
d9a64523 | 918 | ARM64_STACK_PROLOG |
5ba3f43e A |
919 | PUSH_FRAME |
920 | ||
921 | mrs x0, ARM64_REG_CYC_OVRD | |
922 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up) | |
923 | msr ARM64_REG_CYC_OVRD, x0 | |
924 | ||
925 | POP_FRAME | |
d9a64523 | 926 | ARM64_STACK_EPILOG |
5ba3f43e A |
927 | |
928 | ||
c6bf4f31 A |
929 | #if HAS_RETENTION_STATE |
930 | .text | |
931 | .align 2 | |
932 | .globl EXT(arm64_retention_wfi) | |
933 | LEXT(arm64_retention_wfi) | |
934 | wfi | |
935 | cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc | |
936 | ret // Otherwise just return to cpu_idle() | |
937 | Lwfi_retention: | |
938 | mov x0, #1 | |
939 | bl EXT(ClearIdlePop) | |
940 | mov x0, #0 | |
941 | bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE) | |
942 | b . // cpu_idle_exit() should never return | |
943 | #endif | |
5ba3f43e | 944 | |
cb323159 | 945 | #if defined(APPLETYPHOON) |
5ba3f43e A |
946 | |
947 | .text | |
948 | .align 2 | |
cb323159 | 949 | .globl EXT(typhoon_prepare_for_wfi) |
5ba3f43e | 950 | |
cb323159 | 951 | LEXT(typhoon_prepare_for_wfi) |
5ba3f43e A |
952 | PUSH_FRAME |
953 | ||
cb323159 | 954 | // <rdar://problem/15827409> |
f427ee49 | 955 | HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0 |
5ba3f43e A |
956 | dsb sy |
957 | isb sy | |
958 | ||
959 | POP_FRAME | |
960 | ret | |
961 | ||
962 | ||
963 | .text | |
964 | .align 2 | |
cb323159 A |
965 | .globl EXT(typhoon_return_from_wfi) |
966 | LEXT(typhoon_return_from_wfi) | |
5ba3f43e A |
967 | PUSH_FRAME |
968 | ||
cb323159 | 969 | // <rdar://problem/15827409> |
f427ee49 | 970 | HID_CLEAR_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0 |
5ba3f43e A |
971 | dsb sy |
972 | isb sy | |
973 | ||
974 | POP_FRAME | |
975 | ret | |
976 | #endif | |
977 | ||
978 | #ifdef APPLETYPHOON | |
979 | ||
980 | #define HID0_DEFEATURES_1 0x0000a0c000064010ULL | |
981 | #define HID1_DEFEATURES_1 0x000000004005bf20ULL | |
982 | #define HID2_DEFEATURES_1 0x0000000000102074ULL | |
983 | #define HID3_DEFEATURES_1 0x0000000000400003ULL | |
984 | #define HID4_DEFEATURES_1 0x83ff00e100000268ULL | |
985 | #define HID7_DEFEATURES_1 0x000000000000000eULL | |
986 | ||
987 | #define HID0_DEFEATURES_2 0x0000a1c000020010ULL | |
988 | #define HID1_DEFEATURES_2 0x000000000005d720ULL | |
989 | #define HID2_DEFEATURES_2 0x0000000000002074ULL | |
990 | #define HID3_DEFEATURES_2 0x0000000000400001ULL | |
991 | #define HID4_DEFEATURES_2 0x8390000200000208ULL | |
992 | #define HID7_DEFEATURES_2 0x0000000000000000ULL | |
993 | ||
994 | /* | |
995 | arg0 = target register | |
996 | arg1 = 64-bit constant | |
997 | */ | |
998 | .macro LOAD_UINT64 | |
999 | movz $0, #(($1 >> 48) & 0xffff), lsl #48 | |
1000 | movk $0, #(($1 >> 32) & 0xffff), lsl #32 | |
1001 | movk $0, #(($1 >> 16) & 0xffff), lsl #16 | |
1002 | movk $0, #(($1) & 0xffff) | |
1003 | .endmacro | |
1004 | ||
1005 | .text | |
1006 | .align 2 | |
1007 | .globl EXT(cpu_defeatures_set) | |
1008 | LEXT(cpu_defeatures_set) | |
1009 | PUSH_FRAME | |
1010 | cmp x0, #2 | |
1011 | b.eq cpu_defeatures_set_2 | |
1012 | cmp x0, #1 | |
1013 | b.ne cpu_defeatures_set_ret | |
1014 | LOAD_UINT64 x1, HID0_DEFEATURES_1 | |
1015 | mrs x0, ARM64_REG_HID0 | |
1016 | orr x0, x0, x1 | |
1017 | msr ARM64_REG_HID0, x0 | |
1018 | LOAD_UINT64 x1, HID1_DEFEATURES_1 | |
1019 | mrs x0, ARM64_REG_HID1 | |
1020 | orr x0, x0, x1 | |
1021 | msr ARM64_REG_HID1, x0 | |
1022 | LOAD_UINT64 x1, HID2_DEFEATURES_1 | |
1023 | mrs x0, ARM64_REG_HID2 | |
1024 | orr x0, x0, x1 | |
1025 | msr ARM64_REG_HID2, x0 | |
1026 | LOAD_UINT64 x1, HID3_DEFEATURES_1 | |
1027 | mrs x0, ARM64_REG_HID3 | |
1028 | orr x0, x0, x1 | |
1029 | msr ARM64_REG_HID3, x0 | |
1030 | LOAD_UINT64 x1, HID4_DEFEATURES_1 | |
1031 | mrs x0, ARM64_REG_HID4 | |
1032 | orr x0, x0, x1 | |
1033 | msr ARM64_REG_HID4, x0 | |
1034 | LOAD_UINT64 x1, HID7_DEFEATURES_1 | |
1035 | mrs x0, ARM64_REG_HID7 | |
1036 | orr x0, x0, x1 | |
1037 | msr ARM64_REG_HID7, x0 | |
1038 | dsb sy | |
1039 | isb sy | |
1040 | b cpu_defeatures_set_ret | |
1041 | cpu_defeatures_set_2: | |
1042 | LOAD_UINT64 x1, HID0_DEFEATURES_2 | |
1043 | mrs x0, ARM64_REG_HID0 | |
1044 | orr x0, x0, x1 | |
1045 | msr ARM64_REG_HID0, x0 | |
1046 | LOAD_UINT64 x1, HID1_DEFEATURES_2 | |
1047 | mrs x0, ARM64_REG_HID1 | |
1048 | orr x0, x0, x1 | |
1049 | msr ARM64_REG_HID1, x0 | |
1050 | LOAD_UINT64 x1, HID2_DEFEATURES_2 | |
1051 | mrs x0, ARM64_REG_HID2 | |
1052 | orr x0, x0, x1 | |
1053 | msr ARM64_REG_HID2, x0 | |
1054 | LOAD_UINT64 x1, HID3_DEFEATURES_2 | |
1055 | mrs x0, ARM64_REG_HID3 | |
1056 | orr x0, x0, x1 | |
1057 | msr ARM64_REG_HID3, x0 | |
1058 | LOAD_UINT64 x1, HID4_DEFEATURES_2 | |
1059 | mrs x0, ARM64_REG_HID4 | |
1060 | orr x0, x0, x1 | |
1061 | msr ARM64_REG_HID4, x0 | |
1062 | LOAD_UINT64 x1, HID7_DEFEATURES_2 | |
1063 | mrs x0, ARM64_REG_HID7 | |
1064 | orr x0, x0, x1 | |
1065 | msr ARM64_REG_HID7, x0 | |
1066 | dsb sy | |
1067 | isb sy | |
1068 | b cpu_defeatures_set_ret | |
1069 | cpu_defeatures_set_ret: | |
1070 | POP_FRAME | |
1071 | ret | |
1072 | #endif | |
1073 | ||
d9a64523 A |
1074 | #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ |
1075 | .text | |
1076 | .align 2 | |
1077 | .globl EXT(arm64_prepare_for_sleep) | |
1078 | LEXT(arm64_prepare_for_sleep) | |
1079 | PUSH_FRAME | |
1080 | Lwfi_inst: | |
1081 | dsb sy | |
1082 | isb sy | |
1083 | wfi | |
1084 | b Lwfi_inst | |
1085 | ||
1086 | /* | |
1087 | * Force WFI to use clock gating only | |
1088 | * Note: for non-Apple device, do nothing. | |
1089 | */ | |
1090 | .text | |
1091 | .align 2 | |
1092 | .globl EXT(arm64_force_wfi_clock_gate) | |
1093 | LEXT(arm64_force_wfi_clock_gate) | |
1094 | PUSH_FRAME | |
1095 | nop | |
1096 | POP_FRAME | |
1097 | ||
1098 | #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ | |
1099 | ||
1100 | /* | |
1101 | * void arm64_replace_bootstack(cpu_data_t *cpu_data) | |
1102 | * | |
1103 | * This must be called from a kernel thread context running on the boot CPU, | |
1104 | * after setting up new exception stacks in per-CPU data. That will guarantee | |
1105 | * that the stack(s) we're trying to replace aren't currently in use. For | |
1106 | * KTRR-protected devices, this must also be called prior to VM prot finalization | |
1107 | * and lockdown, as updating SP1 requires a sensitive instruction. | |
1108 | */ | |
1109 | .text | |
1110 | .align 2 | |
1111 | .globl EXT(arm64_replace_bootstack) | |
1112 | LEXT(arm64_replace_bootstack) | |
1113 | ARM64_STACK_PROLOG | |
1114 | PUSH_FRAME | |
1115 | // Set the exception stack pointer | |
1116 | ldr x0, [x0, CPU_EXCEPSTACK_TOP] | |
1117 | mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3 | |
1118 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror | |
1119 | // Set SP_EL1 to exception stack | |
c6bf4f31 | 1120 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
d9a64523 | 1121 | mov x1, lr |
cb323159 | 1122 | bl EXT(pinst_spsel_1) |
d9a64523 A |
1123 | mov lr, x1 |
1124 | #else | |
1125 | msr SPSel, #1 | |
5ba3f43e | 1126 | #endif |
d9a64523 A |
1127 | mov sp, x0 |
1128 | msr SPSel, #0 | |
1129 | msr DAIF, x4 // Restore interrupt state | |
1130 | POP_FRAME | |
1131 | ARM64_STACK_EPILOG | |
5ba3f43e A |
1132 | |
1133 | #ifdef MONITOR | |
1134 | /* | |
1135 | * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
1136 | uintptr_t arg2, uintptr_t arg3) | |
1137 | * | |
1138 | * Call the EL3 monitor with 4 arguments in registers | |
1139 | * The monitor interface maintains the same ABI as the C function call standard. Callee-saved | |
1140 | * registers are preserved, temporary registers are not. Parameters and results are passed in | |
1141 | * the usual manner. | |
1142 | */ | |
1143 | .text | |
1144 | .align 2 | |
1145 | .globl EXT(monitor_call) | |
1146 | LEXT(monitor_call) | |
1147 | smc 0x11 | |
1148 | ret | |
1149 | #endif | |
1150 | ||
cb323159 | 1151 | #ifdef HAS_APPLE_PAC |
f427ee49 A |
1152 | /* |
1153 | * SIGN_THREAD_STATE | |
1154 | * | |
1155 | * Macro that signs thread state. | |
1156 | * $0 - Offset in arm_saved_state to store JOPHASH value. | |
cb323159 | 1157 | */ |
f427ee49 | 1158 | .macro SIGN_THREAD_STATE |
cb323159 A |
1159 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ |
1160 | /* | |
1161 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1162 | * touched by the system call return path. | |
1163 | */ | |
1164 | bic x2, x2, PSR_CF | |
1165 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1166 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1167 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1168 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
f427ee49 A |
1169 | str x1, [x0, $0] |
1170 | #if DEBUG || DEVELOPMENT | |
1171 | mrs x1, DAIF | |
1172 | tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic | |
1173 | #endif /* DEBUG || DEVELOPMENT */ | |
1174 | .endmacro | |
cb323159 | 1175 | |
f427ee49 A |
1176 | /* |
1177 | * CHECK_SIGNED_STATE | |
1178 | * | |
1179 | * Macro that checks signed thread state. | |
1180 | * $0 - Offset in arm_saved_state to to read the JOPHASH value from. | |
1181 | * $1 - Label to jump to when check is unsuccessful. | |
cb323159 | 1182 | */ |
f427ee49 | 1183 | .macro CHECK_SIGNED_STATE |
cb323159 A |
1184 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ |
1185 | /* | |
1186 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1187 | * touched by the system call return path. | |
1188 | */ | |
1189 | bic x2, x2, PSR_CF | |
1190 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1191 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1192 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1193 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
f427ee49 | 1194 | ldr x2, [x0, $0] |
cb323159 | 1195 | cmp x1, x2 |
f427ee49 A |
1196 | b.ne $1 |
1197 | #if DEBUG || DEVELOPMENT | |
1198 | mrs x1, DAIF | |
1199 | tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic | |
1200 | #endif /* DEBUG || DEVELOPMENT */ | |
1201 | .endmacro | |
1202 | ||
1203 | /** | |
1204 | * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc, | |
1205 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1206 | * uint64_t x17) | |
1207 | */ | |
1208 | .text | |
1209 | .align 2 | |
1210 | .globl EXT(ml_sign_thread_state) | |
1211 | LEXT(ml_sign_thread_state) | |
1212 | SIGN_THREAD_STATE SS64_JOPHASH | |
1213 | ret | |
1214 | ||
1215 | /** | |
1216 | * void ml_sign_kernel_thread_state(arm_kernel_saved_state *ss, uint64_t pc, | |
1217 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1218 | * uint64_t x17) | |
1219 | */ | |
1220 | .text | |
1221 | .align 2 | |
1222 | .globl EXT(ml_sign_kernel_thread_state) | |
1223 | LEXT(ml_sign_kernel_thread_state) | |
1224 | SIGN_THREAD_STATE SS64_KERNEL_JOPHASH | |
1225 | ret | |
1226 | ||
1227 | /** | |
1228 | * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc, | |
1229 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1230 | * uint64_t x17) | |
1231 | */ | |
1232 | .text | |
1233 | .align 2 | |
1234 | .globl EXT(ml_check_signed_state) | |
1235 | LEXT(ml_check_signed_state) | |
1236 | CHECK_SIGNED_STATE SS64_JOPHASH, Lcheck_hash_panic | |
cb323159 A |
1237 | ret |
1238 | Lcheck_hash_panic: | |
f427ee49 A |
1239 | /* |
1240 | * ml_check_signed_state normally doesn't set up a stack frame, since it | |
1241 | * needs to work in the face of attackers that can modify the stack. | |
1242 | * However we lazily create one in the panic path: at this point we're | |
1243 | * *only* using the stack frame for unwinding purposes, and without one | |
1244 | * we'd be missing information about the caller. | |
1245 | */ | |
1246 | ARM64_STACK_PROLOG | |
1247 | PUSH_FRAME | |
cb323159 A |
1248 | mov x1, x0 |
1249 | adr x0, Lcheck_hash_str | |
1250 | CALL_EXTERN panic_with_thread_kernel_state | |
f427ee49 A |
1251 | |
1252 | /** | |
1253 | * void ml_check_kernel_signed_state(arm_kernel_saved_state *ss, uint64_t pc, | |
1254 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1255 | * uint64_t x17) | |
1256 | */ | |
1257 | .text | |
1258 | .align 2 | |
1259 | .globl EXT(ml_check_kernel_signed_state) | |
1260 | LEXT(ml_check_kernel_signed_state) | |
1261 | CHECK_SIGNED_STATE SS64_KERNEL_JOPHASH, Lcheck_kernel_hash_panic | |
1262 | ret | |
1263 | Lcheck_kernel_hash_panic: | |
1264 | ARM64_STACK_PROLOG | |
1265 | PUSH_FRAME | |
1266 | adr x0, Lcheck_hash_str | |
1267 | CALL_EXTERN panic | |
1268 | ||
cb323159 A |
1269 | Lcheck_hash_str: |
1270 | .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)" | |
bca245ac | 1271 | |
f427ee49 A |
1272 | #if DEBUG || DEVELOPMENT |
1273 | Lintr_enabled_panic: | |
1274 | ARM64_STACK_PROLOG | |
1275 | PUSH_FRAME | |
1276 | adr x0, Lintr_enabled_str | |
1277 | CALL_EXTERN panic | |
1278 | Lintr_enabled_str: | |
1279 | /* | |
1280 | * Please see the "Signing spilled register state" section of doc/pac.md | |
1281 | * for an explanation of why this is bad and how it should be fixed. | |
1282 | */ | |
1283 | .asciz "Signed thread state manipulated with interrupts enabled" | |
1284 | #endif /* DEBUG || DEVELOPMENT */ | |
1285 | ||
bca245ac A |
1286 | /** |
1287 | * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss) | |
1288 | * | |
1289 | * Panics due to an invalid CPSR value in ss. | |
1290 | */ | |
1291 | .text | |
1292 | .align 2 | |
1293 | .globl EXT(ml_auth_thread_state_invalid_cpsr) | |
1294 | LEXT(ml_auth_thread_state_invalid_cpsr) | |
1295 | ARM64_STACK_PROLOG | |
1296 | PUSH_FRAME | |
1297 | mov x1, x0 | |
1298 | adr x0, Linvalid_cpsr_str | |
1299 | CALL_EXTERN panic_with_thread_kernel_state | |
1300 | ||
1301 | Linvalid_cpsr_str: | |
1302 | .asciz "Thread state corruption detected (PE mode == 0)" | |
cb323159 A |
1303 | #endif /* HAS_APPLE_PAC */ |
1304 | ||
1305 | .text | |
1306 | .align 2 | |
1307 | .globl EXT(fill32_dczva) | |
1308 | LEXT(fill32_dczva) | |
1309 | 0: | |
1310 | dc zva, x0 | |
1311 | add x0, x0, #64 | |
1312 | subs x1, x1, #64 | |
1313 | b.hi 0b | |
1314 | ret | |
1315 | ||
1316 | .text | |
1317 | .align 2 | |
1318 | .globl EXT(fill32_nt) | |
1319 | LEXT(fill32_nt) | |
1320 | dup.4s v0, w2 | |
1321 | 0: | |
1322 | stnp q0, q0, [x0] | |
1323 | stnp q0, q0, [x0, #0x20] | |
1324 | stnp q0, q0, [x0, #0x40] | |
1325 | stnp q0, q0, [x0, #0x60] | |
1326 | add x0, x0, #128 | |
1327 | subs x1, x1, #128 | |
1328 | b.hi 0b | |
1329 | ret | |
d9a64523 | 1330 | |
5ba3f43e | 1331 | /* vim: set sw=4 ts=4: */ |