]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm64/machine_machdep.h> | |
31 | #include <arm64/proc_reg.h> | |
32 | #include <arm/pmap.h> | |
33 | #include <pexpert/arm64/board_config.h> | |
34 | #include <sys/errno.h> | |
35 | #include "assym.s" | |
36 | ||
37 | ||
cb323159 A |
38 | #if defined(HAS_APPLE_PAC) |
39 | /* | |
40 | * void | |
41 | * ml_set_kernelkey_enabled(boolean_t enable) | |
42 | * | |
43 | * Toggle pointer auth kernel domain key diversification. Assembly to prevent compiler reordering. | |
44 | * | |
45 | */ | |
46 | ||
47 | .align 2 | |
48 | .globl EXT(ml_set_kernelkey_enabled) | |
49 | LEXT(ml_set_kernelkey_enabled) | |
50 | mrs x1, ARM64_REG_APCTL_EL1 | |
51 | orr x2, x1, #APCTL_EL1_KernKeyEn | |
52 | and x1, x1, #~APCTL_EL1_KernKeyEn | |
53 | cmp w0, #0 | |
54 | csel x1, x1, x2, eq | |
55 | msr ARM64_REG_APCTL_EL1, x1 | |
56 | isb | |
57 | ret | |
58 | ||
59 | #endif /* defined(HAS_APPLE_PAC) */ | |
60 | ||
c6bf4f31 | 61 | #if HAS_BP_RET |
cb323159 | 62 | |
c6bf4f31 A |
63 | /* |
64 | * void set_bp_ret(void) | |
65 | * Helper function to enable branch predictor state retention | |
66 | * across ACC sleep | |
67 | */ | |
68 | ||
69 | .align 2 | |
70 | .globl EXT(set_bp_ret) | |
71 | LEXT(set_bp_ret) | |
72 | // Load bpret boot-arg | |
73 | adrp x14, EXT(bp_ret)@page | |
74 | add x14, x14, EXT(bp_ret)@pageoff | |
75 | ldr w14, [x14] | |
76 | ||
77 | mrs x13, ARM64_REG_ACC_CFG | |
78 | and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift)) | |
79 | and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask) | |
80 | orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift) | |
81 | msr ARM64_REG_ACC_CFG, x13 | |
82 | ||
83 | ret | |
84 | #endif // HAS_BP_RET | |
85 | ||
86 | #if HAS_NEX_PG | |
87 | .align 2 | |
88 | .globl EXT(set_nex_pg) | |
89 | LEXT(set_nex_pg) | |
90 | mrs x14, MPIDR_EL1 | |
91 | // Skip if this isn't a p-core; NEX powergating isn't available for e-cores | |
92 | and x14, x14, #(MPIDR_PNE) | |
93 | cbz x14, Lnex_pg_done | |
94 | ||
95 | // Set the SEG-recommended value of 12 additional reset cycles | |
96 | mrs x14, ARM64_REG_HID13 | |
97 | and x14, x14, (~ARM64_REG_HID13_RstCyc_mask) | |
98 | orr x14, x14, ARM64_REG_HID13_RstCyc_val | |
99 | msr ARM64_REG_HID13, x14 | |
100 | ||
101 | // Load nexpg boot-arg | |
102 | adrp x14, EXT(nex_pg)@page | |
103 | add x14, x14, EXT(nex_pg)@pageoff | |
104 | ldr w14, [x14] | |
105 | ||
106 | mrs x13, ARM64_REG_HID14 | |
107 | and x13, x13, (~ARM64_REG_HID14_NexPwgEn) | |
108 | cbz w14, Lset_nex_pg | |
109 | orr x13, x13, ARM64_REG_HID14_NexPwgEn | |
110 | Lset_nex_pg: | |
111 | msr ARM64_REG_HID14, x13 | |
112 | ||
113 | Lnex_pg_done: | |
114 | ret | |
115 | ||
116 | #endif // HAS_NEX_PG | |
d9a64523 | 117 | |
5ba3f43e A |
118 | /* uint32_t get_fpscr(void): |
119 | * Returns (FPSR | FPCR). | |
120 | */ | |
121 | .align 2 | |
122 | .globl EXT(get_fpscr) | |
123 | LEXT(get_fpscr) | |
124 | #if __ARM_VFP__ | |
125 | mrs x1, FPSR // Grab FPSR | |
126 | mov x4, #(FPSR_MASK & 0xFFFF) | |
127 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
128 | orr x0, x4, x5 | |
129 | and x1, x1, x0 // Be paranoid, and clear bits we expect to | |
130 | // be clear | |
131 | mrs x2, FPCR // Grab FPCR | |
132 | mov x4, #(FPCR_MASK & 0xFFFF) | |
133 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
134 | orr x0, x4, x5 | |
135 | and x2, x2, x0 // Be paranoid, and clear bits we expect to | |
136 | // be clear | |
137 | orr x0, x1, x2 // OR them to get FPSCR equivalent state | |
138 | #else | |
139 | mov x0, #0 | |
140 | #endif | |
141 | ret | |
142 | .align 2 | |
143 | .globl EXT(set_fpscr) | |
144 | /* void set_fpscr(uint32_t value): | |
145 | * Set the FPCR and FPSR registers, based on the given value; a | |
146 | * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR | |
147 | * and FPCR are not responsible for condition codes. | |
148 | */ | |
149 | LEXT(set_fpscr) | |
150 | #if __ARM_VFP__ | |
151 | mov x4, #(FPSR_MASK & 0xFFFF) | |
152 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
153 | orr x1, x4, x5 | |
154 | and x1, x1, x0 // Clear the bits that don't apply to FPSR | |
155 | mov x4, #(FPCR_MASK & 0xFFFF) | |
156 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
157 | orr x2, x4, x5 | |
158 | and x2, x2, x0 // Clear the bits that don't apply to FPCR | |
159 | msr FPSR, x1 // Write FPCR | |
160 | msr FPCR, x2 // Write FPSR | |
161 | dsb ish // FPCR requires synchronization | |
162 | #endif | |
163 | ret | |
164 | ||
d9a64523 A |
165 | /* |
166 | * void update_mdscr(unsigned long clear, unsigned long set) | |
167 | * Clears and sets the specified bits in MDSCR_EL1. | |
168 | * | |
169 | * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is | |
170 | * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow | |
171 | * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP, | |
172 | * so we need to put the checks after the MRS where they can't be skipped. That | |
173 | * still leaves a small window if a breakpoint is set on the instruction | |
174 | * immediately after the MRS. To handle that, we also do a check and then set of | |
175 | * the breakpoint control registers. This allows us to guarantee that a given | |
176 | * core will never have both KDE set and a breakpoint targeting EL1. | |
177 | * | |
178 | * If KDE gets set, unset it and then panic | |
179 | */ | |
180 | .align 2 | |
181 | .globl EXT(update_mdscr) | |
182 | LEXT(update_mdscr) | |
183 | mov x4, #0 | |
184 | mrs x2, MDSCR_EL1 | |
185 | bic x2, x2, x0 | |
186 | orr x2, x2, x1 | |
187 | 1: | |
188 | bic x2, x2, #0x2000 | |
189 | msr MDSCR_EL1, x2 | |
190 | #if defined(CONFIG_KERNEL_INTEGRITY) | |
191 | /* | |
192 | * verify KDE didn't get set (including via ROP) | |
193 | * If set, clear it and then panic | |
194 | */ | |
195 | ands x3, x2, #0x2000 | |
196 | orr x4, x4, x3 | |
197 | bne 1b | |
198 | cmp x4, xzr | |
199 | b.ne Lupdate_mdscr_panic | |
200 | #endif | |
201 | ret | |
202 | ||
203 | Lupdate_mdscr_panic: | |
204 | adrp x0, Lupdate_mdscr_panic_str@page | |
205 | add x0, x0, Lupdate_mdscr_panic_str@pageoff | |
206 | b EXT(panic) | |
207 | b . | |
208 | ||
209 | Lupdate_mdscr_panic_str: | |
210 | .asciz "MDSCR.KDE was set" | |
211 | ||
212 | ||
5ba3f43e A |
213 | /* |
214 | * Set MMU Translation Table Base Alternate | |
215 | */ | |
216 | .text | |
217 | .align 2 | |
218 | .globl EXT(set_mmu_ttb_alternate) | |
219 | LEXT(set_mmu_ttb_alternate) | |
220 | dsb sy | |
221 | #if defined(KERNEL_INTEGRITY_KTRR) | |
222 | mov x1, lr | |
223 | bl EXT(pinst_set_ttbr1) | |
224 | mov lr, x1 | |
225 | #else | |
c6bf4f31 A |
226 | #if defined(HAS_VMSA_LOCK) |
227 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 | |
228 | and x1, x1, #(VMSA_LOCK_TTBR1_EL1) | |
229 | cbnz x1, L_set_locked_reg_panic | |
230 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
231 | msr TTBR1_EL1, x0 |
232 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
233 | isb sy | |
234 | ret | |
235 | ||
c6bf4f31 A |
236 | #if XNU_MONITOR |
237 | .section __PPLTEXT,__text,regular,pure_instructions | |
238 | #else | |
d9a64523 | 239 | .text |
c6bf4f31 | 240 | #endif |
d9a64523 A |
241 | .align 2 |
242 | .globl EXT(set_mmu_ttb) | |
243 | LEXT(set_mmu_ttb) | |
244 | #if __ARM_KERNEL_PROTECT__ | |
245 | /* All EL1-mode ASIDs are odd. */ | |
246 | orr x0, x0, #(1 << TTBR_ASID_SHIFT) | |
247 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
248 | dsb ish | |
249 | msr TTBR0_EL1, x0 | |
250 | isb sy | |
251 | ret | |
252 | ||
5ba3f43e A |
253 | /* |
254 | * set AUX control register | |
255 | */ | |
256 | .text | |
257 | .align 2 | |
258 | .globl EXT(set_aux_control) | |
259 | LEXT(set_aux_control) | |
260 | msr ACTLR_EL1, x0 | |
261 | // Synchronize system | |
5ba3f43e A |
262 | isb sy |
263 | ret | |
264 | ||
5c9f4661 A |
265 | #if __ARM_KERNEL_PROTECT__ |
266 | .text | |
267 | .align 2 | |
268 | .globl EXT(set_vbar_el1) | |
269 | LEXT(set_vbar_el1) | |
270 | #if defined(KERNEL_INTEGRITY_KTRR) | |
271 | b EXT(pinst_set_vbar) | |
272 | #else | |
273 | msr VBAR_EL1, x0 | |
274 | ret | |
275 | #endif | |
276 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
277 | ||
c6bf4f31 A |
278 | #if defined(HAS_VMSA_LOCK) |
279 | .text | |
280 | .align 2 | |
281 | .globl EXT(vmsa_lock) | |
282 | LEXT(vmsa_lock) | |
283 | isb sy | |
284 | mov x1, #(VMSA_LOCK_SCTLR_M_BIT) | |
285 | mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1) | |
286 | orr x0, x0, x1 | |
287 | msr ARM64_REG_VMSA_LOCK_EL1, x0 | |
288 | isb sy | |
289 | ret | |
290 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
291 | |
292 | /* | |
293 | * set translation control register | |
294 | */ | |
295 | .text | |
296 | .align 2 | |
297 | .globl EXT(set_tcr) | |
298 | LEXT(set_tcr) | |
299 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
300 | // Assert that T0Z is always equal to T1Z | |
301 | eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT) | |
302 | and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT) | |
303 | cbnz x1, L_set_tcr_panic | |
304 | #if defined(KERNEL_INTEGRITY_KTRR) | |
305 | mov x1, lr | |
cb323159 | 306 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
307 | mov lr, x1 |
308 | #else | |
c6bf4f31 A |
309 | #if defined(HAS_VMSA_LOCK) |
310 | // assert TCR unlocked | |
311 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 | |
312 | and x1, x1, #(VMSA_LOCK_TCR_EL1) | |
313 | cbnz x1, L_set_locked_reg_panic | |
314 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
315 | msr TCR_EL1, x0 |
316 | #endif /* defined(KERNEL_INTRITY_KTRR) */ | |
317 | isb sy | |
318 | ret | |
319 | ||
320 | L_set_tcr_panic: | |
321 | PUSH_FRAME | |
322 | sub sp, sp, #16 | |
323 | str x0, [sp] | |
324 | adr x0, L_set_tcr_panic_str | |
325 | BRANCH_EXTERN panic | |
326 | ||
327 | L_set_locked_reg_panic: | |
328 | PUSH_FRAME | |
329 | sub sp, sp, #16 | |
330 | str x0, [sp] | |
331 | adr x0, L_set_locked_reg_panic_str | |
332 | BRANCH_EXTERN panic | |
333 | b . | |
334 | ||
335 | L_set_tcr_panic_str: | |
336 | .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n" | |
337 | ||
338 | ||
339 | L_set_locked_reg_panic_str: | |
340 | .asciz "attempt to set locked register: (%llx)\n" | |
341 | #else | |
c6bf4f31 | 342 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e | 343 | mov x1, lr |
cb323159 | 344 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
345 | mov lr, x1 |
346 | #else | |
347 | msr TCR_EL1, x0 | |
348 | #endif | |
349 | isb sy | |
350 | ret | |
351 | #endif // defined(APPLE_ARM64_ARCH_FAMILY) | |
352 | ||
353 | /* | |
354 | * MMU kernel virtual to physical address translation | |
355 | */ | |
356 | .text | |
357 | .align 2 | |
358 | .globl EXT(mmu_kvtop) | |
359 | LEXT(mmu_kvtop) | |
360 | mrs x2, DAIF // Load current DAIF | |
361 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
362 | at s1e1r, x0 // Translation Stage 1 EL1 | |
363 | mrs x1, PAR_EL1 // Read result | |
364 | msr DAIF, x2 // Restore interrupt state | |
365 | tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid | |
366 | bfm x1, x0, #0, #11 // Add page offset | |
367 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
368 | ret | |
369 | L_mmu_kvtop_invalid: | |
d9a64523 | 370 | mov x0, #0 // Return invalid |
5ba3f43e A |
371 | ret |
372 | ||
373 | /* | |
374 | * MMU user virtual to physical address translation | |
375 | */ | |
376 | .text | |
377 | .align 2 | |
378 | .globl EXT(mmu_uvtop) | |
379 | LEXT(mmu_uvtop) | |
380 | lsr x8, x0, #56 // Extract top byte | |
381 | cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid | |
382 | mrs x2, DAIF // Load current DAIF | |
383 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
384 | at s1e0r, x0 // Translation Stage 1 EL0 | |
385 | mrs x1, PAR_EL1 // Read result | |
386 | msr DAIF, x2 // Restore interrupt state | |
387 | tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid | |
388 | bfm x1, x0, #0, #11 // Add page offset | |
389 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
390 | ret | |
391 | L_mmu_uvtop_invalid: | |
d9a64523 | 392 | mov x0, #0 // Return invalid |
5ba3f43e A |
393 | ret |
394 | ||
395 | /* | |
396 | * MMU kernel virtual to physical address preflight write access | |
397 | */ | |
398 | .text | |
399 | .align 2 | |
400 | .globl EXT(mmu_kvtop_wpreflight) | |
401 | LEXT(mmu_kvtop_wpreflight) | |
402 | mrs x2, DAIF // Load current DAIF | |
403 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
404 | at s1e1w, x0 // Translation Stage 1 EL1 | |
405 | mrs x1, PAR_EL1 // Read result | |
406 | msr DAIF, x2 // Restore interrupt state | |
407 | tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid | |
408 | bfm x1, x0, #0, #11 // Add page offset | |
409 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
410 | ret | |
411 | L_mmu_kvtop_wpreflight_invalid: | |
d9a64523 | 412 | mov x0, #0 // Return invalid |
5ba3f43e A |
413 | ret |
414 | ||
415 | /* | |
416 | * SET_RECOVERY_HANDLER | |
417 | * | |
418 | * Sets up a page fault recovery handler | |
419 | * | |
420 | * arg0 - persisted thread pointer | |
421 | * arg1 - persisted recovery handler | |
422 | * arg2 - scratch reg | |
423 | * arg3 - recovery label | |
424 | */ | |
425 | .macro SET_RECOVERY_HANDLER | |
426 | mrs $0, TPIDR_EL1 // Load thread pointer | |
5ba3f43e A |
427 | adrp $2, $3@page // Load the recovery handler address |
428 | add $2, $2, $3@pageoff | |
cb323159 A |
429 | #if defined(HAS_APPLE_PAC) |
430 | add $1, $0, TH_RECOVER | |
431 | movk $1, #PAC_DISCRIMINATOR_RECOVER, lsl 48 | |
432 | pacia $2, $1 // Sign with IAKey + blended discriminator | |
433 | #endif | |
0a7de745 A |
434 | |
435 | ldr $1, [$0, TH_RECOVER] // Save previous recovery handler | |
436 | str $2, [$0, TH_RECOVER] // Set new signed recovery handler | |
5ba3f43e A |
437 | .endmacro |
438 | ||
439 | /* | |
440 | * CLEAR_RECOVERY_HANDLER | |
441 | * | |
442 | * Clears page fault handler set by SET_RECOVERY_HANDLER | |
443 | * | |
444 | * arg0 - thread pointer saved by SET_RECOVERY_HANDLER | |
445 | * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER | |
446 | */ | |
447 | .macro CLEAR_RECOVERY_HANDLER | |
448 | str $1, [$0, TH_RECOVER] // Restore the previous recovery handler | |
449 | .endmacro | |
450 | ||
451 | ||
452 | .text | |
453 | .align 2 | |
454 | copyio_error: | |
455 | CLEAR_RECOVERY_HANDLER x10, x11 | |
456 | mov x0, #EFAULT // Return an EFAULT error | |
457 | POP_FRAME | |
d9a64523 | 458 | ARM64_STACK_EPILOG |
5ba3f43e A |
459 | |
460 | /* | |
461 | * int _bcopyin(const char *src, char *dst, vm_size_t len) | |
462 | */ | |
463 | .text | |
464 | .align 2 | |
465 | .globl EXT(_bcopyin) | |
466 | LEXT(_bcopyin) | |
d9a64523 | 467 | ARM64_STACK_PROLOG |
5ba3f43e A |
468 | PUSH_FRAME |
469 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
470 | /* If len is less than 16 bytes, just do a bytewise copy */ | |
471 | cmp x2, #16 | |
472 | b.lt 2f | |
473 | sub x2, x2, #16 | |
474 | 1: | |
475 | /* 16 bytes at a time */ | |
476 | ldp x3, x4, [x0], #16 | |
477 | stp x3, x4, [x1], #16 | |
478 | subs x2, x2, #16 | |
479 | b.ge 1b | |
480 | /* Fixup the len and test for completion */ | |
481 | adds x2, x2, #16 | |
482 | b.eq 3f | |
483 | 2: /* Bytewise */ | |
484 | subs x2, x2, #1 | |
485 | ldrb w3, [x0], #1 | |
486 | strb w3, [x1], #1 | |
487 | b.hi 2b | |
488 | 3: | |
489 | CLEAR_RECOVERY_HANDLER x10, x11 | |
d9a64523 | 490 | mov x0, #0 |
5ba3f43e | 491 | POP_FRAME |
d9a64523 | 492 | ARM64_STACK_EPILOG |
5ba3f43e A |
493 | |
494 | /* | |
cb323159 | 495 | * int _copyin_atomic32(const char *src, uint32_t *dst) |
5ba3f43e A |
496 | */ |
497 | .text | |
498 | .align 2 | |
cb323159 A |
499 | .globl EXT(_copyin_atomic32) |
500 | LEXT(_copyin_atomic32) | |
d9a64523 | 501 | ARM64_STACK_PROLOG |
5ba3f43e A |
502 | PUSH_FRAME |
503 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
5ba3f43e | 504 | ldr w8, [x0] |
cb323159 A |
505 | str w8, [x1] |
506 | mov x0, #0 | |
507 | CLEAR_RECOVERY_HANDLER x10, x11 | |
508 | POP_FRAME | |
509 | ARM64_STACK_EPILOG | |
510 | ||
511 | /* | |
512 | * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value) | |
513 | */ | |
514 | .text | |
515 | .align 2 | |
516 | .globl EXT(_copyin_atomic32_wait_if_equals) | |
517 | LEXT(_copyin_atomic32_wait_if_equals) | |
518 | ARM64_STACK_PROLOG | |
519 | PUSH_FRAME | |
520 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
521 | ldxr w8, [x0] | |
522 | cmp w8, w1 | |
523 | mov x0, ESTALE | |
524 | b.ne 1f | |
525 | mov x0, #0 | |
526 | wfe | |
527 | 1: | |
528 | clrex | |
529 | CLEAR_RECOVERY_HANDLER x10, x11 | |
530 | POP_FRAME | |
531 | ARM64_STACK_EPILOG | |
532 | ||
533 | /* | |
534 | * int _copyin_atomic64(const char *src, uint32_t *dst) | |
535 | */ | |
536 | .text | |
537 | .align 2 | |
538 | .globl EXT(_copyin_atomic64) | |
539 | LEXT(_copyin_atomic64) | |
540 | ARM64_STACK_PROLOG | |
541 | PUSH_FRAME | |
542 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
5ba3f43e | 543 | ldr x8, [x0] |
5ba3f43e | 544 | str x8, [x1] |
d9a64523 | 545 | mov x0, #0 |
5ba3f43e | 546 | CLEAR_RECOVERY_HANDLER x10, x11 |
5ba3f43e | 547 | POP_FRAME |
d9a64523 A |
548 | ARM64_STACK_EPILOG |
549 | ||
5ba3f43e | 550 | |
cb323159 A |
551 | /* |
552 | * int _copyout_atomic32(uint32_t value, char *dst) | |
553 | */ | |
554 | .text | |
555 | .align 2 | |
556 | .globl EXT(_copyout_atomic32) | |
557 | LEXT(_copyout_atomic32) | |
558 | ARM64_STACK_PROLOG | |
559 | PUSH_FRAME | |
560 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
561 | str w0, [x1] | |
562 | mov x0, #0 | |
563 | CLEAR_RECOVERY_HANDLER x10, x11 | |
564 | POP_FRAME | |
565 | ARM64_STACK_EPILOG | |
566 | ||
567 | /* | |
568 | * int _copyout_atomic64(uint64_t value, char *dst) | |
569 | */ | |
570 | .text | |
571 | .align 2 | |
572 | .globl EXT(_copyout_atomic64) | |
573 | LEXT(_copyout_atomic64) | |
574 | ARM64_STACK_PROLOG | |
575 | PUSH_FRAME | |
576 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
577 | str x0, [x1] | |
578 | mov x0, #0 | |
579 | CLEAR_RECOVERY_HANDLER x10, x11 | |
580 | POP_FRAME | |
581 | ARM64_STACK_EPILOG | |
582 | ||
5ba3f43e A |
583 | |
584 | /* | |
585 | * int _bcopyout(const char *src, char *dst, vm_size_t len) | |
586 | */ | |
587 | .text | |
588 | .align 2 | |
589 | .globl EXT(_bcopyout) | |
590 | LEXT(_bcopyout) | |
d9a64523 | 591 | ARM64_STACK_PROLOG |
5ba3f43e A |
592 | PUSH_FRAME |
593 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
594 | /* If len is less than 16 bytes, just do a bytewise copy */ | |
595 | cmp x2, #16 | |
596 | b.lt 2f | |
597 | sub x2, x2, #16 | |
598 | 1: | |
599 | /* 16 bytes at a time */ | |
600 | ldp x3, x4, [x0], #16 | |
601 | stp x3, x4, [x1], #16 | |
602 | subs x2, x2, #16 | |
603 | b.ge 1b | |
604 | /* Fixup the len and test for completion */ | |
605 | adds x2, x2, #16 | |
606 | b.eq 3f | |
607 | 2: /* Bytewise */ | |
608 | subs x2, x2, #1 | |
609 | ldrb w3, [x0], #1 | |
610 | strb w3, [x1], #1 | |
611 | b.hi 2b | |
612 | 3: | |
613 | CLEAR_RECOVERY_HANDLER x10, x11 | |
d9a64523 | 614 | mov x0, #0 |
5ba3f43e | 615 | POP_FRAME |
d9a64523 | 616 | ARM64_STACK_EPILOG |
5ba3f43e A |
617 | |
618 | /* | |
619 | * int _bcopyinstr( | |
620 | * const user_addr_t user_addr, | |
621 | * char *kernel_addr, | |
622 | * vm_size_t max, | |
623 | * vm_size_t *actual) | |
624 | */ | |
625 | .text | |
626 | .align 2 | |
627 | .globl EXT(_bcopyinstr) | |
628 | LEXT(_bcopyinstr) | |
d9a64523 | 629 | ARM64_STACK_PROLOG |
5ba3f43e A |
630 | PUSH_FRAME |
631 | adr x4, Lcopyinstr_error // Get address for recover | |
632 | mrs x10, TPIDR_EL1 // Get thread pointer | |
633 | ldr x11, [x10, TH_RECOVER] // Save previous recover | |
0a7de745 | 634 | |
cb323159 A |
635 | #if defined(HAS_APPLE_PAC) |
636 | add x5, x10, TH_RECOVER // Sign new pointer with IAKey + blended discriminator | |
637 | movk x5, #PAC_DISCRIMINATOR_RECOVER, lsl 48 | |
638 | pacia x4, x5 | |
639 | #endif | |
5ba3f43e | 640 | str x4, [x10, TH_RECOVER] // Store new recover |
0a7de745 | 641 | |
d9a64523 | 642 | mov x4, #0 // x4 - total bytes copied |
5ba3f43e A |
643 | Lcopyinstr_loop: |
644 | ldrb w5, [x0], #1 // Load a byte from the user source | |
645 | strb w5, [x1], #1 // Store a byte to the kernel dest | |
646 | add x4, x4, #1 // Increment bytes copied | |
647 | cbz x5, Lcopyinstr_done // If this byte is null, we're done | |
648 | cmp x4, x2 // If we're out of space, return an error | |
649 | b.ne Lcopyinstr_loop | |
650 | Lcopyinstr_too_long: | |
651 | mov x5, #ENAMETOOLONG // Set current byte to error code for later return | |
652 | Lcopyinstr_done: | |
653 | str x4, [x3] // Return number of bytes copied | |
654 | mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure) | |
655 | b Lcopyinstr_exit | |
656 | Lcopyinstr_error: | |
657 | mov x0, #EFAULT // Return EFAULT on error | |
658 | Lcopyinstr_exit: | |
659 | str x11, [x10, TH_RECOVER] // Restore old recover | |
660 | POP_FRAME | |
d9a64523 | 661 | ARM64_STACK_EPILOG |
5ba3f43e A |
662 | |
663 | /* | |
664 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit) | |
665 | * | |
666 | * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from | |
667 | * either user or kernel memory, or 8 bytes (AArch32) from user only. | |
668 | * | |
669 | * x0 : address of frame to copy. | |
670 | * x1 : kernel address at which to store data. | |
671 | * w2 : whether to copy an AArch32 or AArch64 frame. | |
672 | * x3 : temp | |
673 | * x5 : temp (kernel virtual base) | |
674 | * x9 : temp | |
675 | * x10 : thread pointer (set by SET_RECOVERY_HANDLER) | |
676 | * x11 : old recovery function (set by SET_RECOVERY_HANDLER) | |
677 | * x12, x13 : backtrace data | |
678 | * | |
679 | */ | |
680 | .text | |
681 | .align 2 | |
682 | .globl EXT(copyinframe) | |
683 | LEXT(copyinframe) | |
d9a64523 | 684 | ARM64_STACK_PROLOG |
5ba3f43e A |
685 | PUSH_FRAME |
686 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
687 | cbnz w2, Lcopyinframe64 // Check frame size | |
688 | adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel | |
689 | add x5, x5, EXT(gVirtBase)@pageoff | |
690 | ldr x5, [x5] | |
691 | cmp x5, x0 // See if address is in kernel virtual range | |
692 | b.hi Lcopyinframe32 // If below kernel virtual range, proceed. | |
693 | mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range | |
694 | b Lcopyinframe_done | |
695 | ||
696 | Lcopyinframe32: | |
697 | ldr x12, [x0] // Copy 8 bytes | |
698 | str x12, [x1] | |
699 | mov w0, #0 // Success | |
700 | b Lcopyinframe_done | |
701 | ||
702 | Lcopyinframe64: | |
703 | mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address | |
704 | orr x9, x0, TBI_MASK // Hide tags in address comparison | |
705 | cmp x9, x3 // If in kernel address range, skip tag test | |
706 | b.hs Lcopyinframe_valid | |
707 | tst x0, TBI_MASK // Detect tagged pointers | |
708 | b.eq Lcopyinframe_valid | |
709 | mov w0, #EFAULT // Tagged address, fail | |
710 | b Lcopyinframe_done | |
711 | Lcopyinframe_valid: | |
712 | ldp x12, x13, [x0] // Copy 16 bytes | |
713 | stp x12, x13, [x1] | |
714 | mov w0, #0 // Success | |
715 | ||
716 | Lcopyinframe_done: | |
717 | CLEAR_RECOVERY_HANDLER x10, x11 | |
718 | POP_FRAME | |
d9a64523 | 719 | ARM64_STACK_EPILOG |
5ba3f43e | 720 | |
5ba3f43e A |
721 | |
722 | /* | |
723 | * uint32_t arm_debug_read_dscr(void) | |
724 | */ | |
725 | .text | |
726 | .align 2 | |
727 | .globl EXT(arm_debug_read_dscr) | |
728 | LEXT(arm_debug_read_dscr) | |
729 | PANIC_UNIMPLEMENTED | |
730 | ||
731 | /* | |
732 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
733 | * | |
734 | * Set debug registers to match the current thread state | |
735 | * (NULL to disable). Assume 6 breakpoints and 2 | |
736 | * watchpoints, since that has been the case in all cores | |
737 | * thus far. | |
738 | */ | |
739 | .text | |
740 | .align 2 | |
741 | .globl EXT(arm_debug_set_cp14) | |
742 | LEXT(arm_debug_set_cp14) | |
743 | PANIC_UNIMPLEMENTED | |
744 | ||
5ba3f43e A |
745 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
746 | /* | |
747 | * Note: still have to ISB before executing wfi! | |
748 | */ | |
749 | .text | |
750 | .align 2 | |
751 | .globl EXT(arm64_prepare_for_sleep) | |
752 | LEXT(arm64_prepare_for_sleep) | |
753 | PUSH_FRAME | |
754 | ||
cb323159 A |
755 | #if defined(APPLETYPHOON) |
756 | // <rdar://problem/15827409> | |
5ba3f43e A |
757 | mrs x0, ARM64_REG_HID2 // Read HID2 |
758 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
759 | msr ARM64_REG_HID2, x0 // Write HID2 | |
760 | dsb sy | |
761 | isb sy | |
762 | #endif | |
763 | ||
764 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
765 | // Enable deep sleep | |
766 | mrs x1, ARM64_REG_ACC_OVRD | |
767 | orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep) | |
768 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask)) | |
769 | orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep) | |
770 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask)) | |
771 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep) | |
772 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask)) | |
773 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep) | |
774 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask)) | |
775 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep) | |
c6bf4f31 A |
776 | #if HAS_RETENTION_STATE |
777 | orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu) | |
778 | #endif | |
5ba3f43e A |
779 | msr ARM64_REG_ACC_OVRD, x1 |
780 | ||
781 | ||
782 | #else | |
783 | // Enable deep sleep | |
784 | mov x1, ARM64_REG_CYC_CFG_deepSleep | |
785 | msr ARM64_REG_CYC_CFG, x1 | |
786 | #endif | |
787 | // Set "OK to power down" (<rdar://problem/12390433>) | |
788 | mrs x0, ARM64_REG_CYC_OVRD | |
789 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) | |
c6bf4f31 A |
790 | #if HAS_RETENTION_STATE |
791 | orr x0, x0, #(ARM64_REG_CYC_OVRD_disWfiRetn) | |
792 | #endif | |
5ba3f43e A |
793 | msr ARM64_REG_CYC_OVRD, x0 |
794 | ||
c6bf4f31 | 795 | #if defined(APPLEMONSOON) || defined(APPLEVORTEX) |
d9a64523 A |
796 | ARM64_IS_PCORE x0 |
797 | cbz x0, Lwfi_inst // skip if not p-core | |
798 | ||
799 | /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to | |
800 | * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores | |
801 | * to be left with valid entries that fail to drain if a | |
802 | * subsequent wfi is issued. This can prevent the core from | |
803 | * power-gating. For the idle case that is recoverable, but | |
804 | * for the deep-sleep (S2R) case in which cores MUST power-gate, | |
805 | * it can lead to a hang. This can be prevented by disabling | |
806 | * and re-enabling GUPS, which forces the prefetch queue to | |
807 | * drain. This should be done as close to wfi as possible, i.e. | |
808 | * at the very end of arm64_prepare_for_sleep(). */ | |
c6bf4f31 A |
809 | #if defined(APPLEVORTEX) |
810 | /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar | |
811 | * bug in the HSP prefetcher that can be worked around through | |
812 | * the same method mentioned above for Skye. */ | |
813 | SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x0, VORTEX_CPU_VERSION_B0, Lwfi_inst | |
814 | #endif | |
d9a64523 A |
815 | mrs x0, ARM64_REG_HID10 |
816 | orr x0, x0, #(ARM64_REG_HID10_DisHwpGups) | |
817 | msr ARM64_REG_HID10, x0 | |
818 | isb sy | |
819 | and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups)) | |
820 | msr ARM64_REG_HID10, x0 | |
821 | isb sy | |
822 | #endif | |
5ba3f43e A |
823 | Lwfi_inst: |
824 | dsb sy | |
825 | isb sy | |
826 | wfi | |
827 | b Lwfi_inst | |
828 | ||
829 | /* | |
830 | * Force WFI to use clock gating only | |
831 | * | |
832 | */ | |
833 | .text | |
834 | .align 2 | |
835 | .globl EXT(arm64_force_wfi_clock_gate) | |
836 | LEXT(arm64_force_wfi_clock_gate) | |
d9a64523 | 837 | ARM64_STACK_PROLOG |
5ba3f43e A |
838 | PUSH_FRAME |
839 | ||
840 | mrs x0, ARM64_REG_CYC_OVRD | |
841 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up) | |
842 | msr ARM64_REG_CYC_OVRD, x0 | |
843 | ||
844 | POP_FRAME | |
d9a64523 | 845 | ARM64_STACK_EPILOG |
5ba3f43e A |
846 | |
847 | ||
c6bf4f31 A |
848 | #if HAS_RETENTION_STATE |
849 | .text | |
850 | .align 2 | |
851 | .globl EXT(arm64_retention_wfi) | |
852 | LEXT(arm64_retention_wfi) | |
853 | wfi | |
854 | cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc | |
855 | ret // Otherwise just return to cpu_idle() | |
856 | Lwfi_retention: | |
857 | mov x0, #1 | |
858 | bl EXT(ClearIdlePop) | |
859 | mov x0, #0 | |
860 | bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE) | |
861 | b . // cpu_idle_exit() should never return | |
862 | #endif | |
5ba3f43e | 863 | |
cb323159 | 864 | #if defined(APPLETYPHOON) |
5ba3f43e A |
865 | |
866 | .text | |
867 | .align 2 | |
cb323159 | 868 | .globl EXT(typhoon_prepare_for_wfi) |
5ba3f43e | 869 | |
cb323159 | 870 | LEXT(typhoon_prepare_for_wfi) |
5ba3f43e A |
871 | PUSH_FRAME |
872 | ||
cb323159 | 873 | // <rdar://problem/15827409> |
5ba3f43e A |
874 | mrs x0, ARM64_REG_HID2 // Read HID2 |
875 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
876 | msr ARM64_REG_HID2, x0 // Write HID2 | |
877 | dsb sy | |
878 | isb sy | |
879 | ||
880 | POP_FRAME | |
881 | ret | |
882 | ||
883 | ||
884 | .text | |
885 | .align 2 | |
cb323159 A |
886 | .globl EXT(typhoon_return_from_wfi) |
887 | LEXT(typhoon_return_from_wfi) | |
5ba3f43e A |
888 | PUSH_FRAME |
889 | ||
cb323159 | 890 | // <rdar://problem/15827409> |
5ba3f43e A |
891 | mrs x0, ARM64_REG_HID2 // Read HID2 |
892 | mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // | |
893 | bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch | |
894 | msr ARM64_REG_HID2, x0 // Write HID2 | |
895 | dsb sy | |
896 | isb sy | |
897 | ||
898 | POP_FRAME | |
899 | ret | |
900 | #endif | |
901 | ||
902 | #ifdef APPLETYPHOON | |
903 | ||
904 | #define HID0_DEFEATURES_1 0x0000a0c000064010ULL | |
905 | #define HID1_DEFEATURES_1 0x000000004005bf20ULL | |
906 | #define HID2_DEFEATURES_1 0x0000000000102074ULL | |
907 | #define HID3_DEFEATURES_1 0x0000000000400003ULL | |
908 | #define HID4_DEFEATURES_1 0x83ff00e100000268ULL | |
909 | #define HID7_DEFEATURES_1 0x000000000000000eULL | |
910 | ||
911 | #define HID0_DEFEATURES_2 0x0000a1c000020010ULL | |
912 | #define HID1_DEFEATURES_2 0x000000000005d720ULL | |
913 | #define HID2_DEFEATURES_2 0x0000000000002074ULL | |
914 | #define HID3_DEFEATURES_2 0x0000000000400001ULL | |
915 | #define HID4_DEFEATURES_2 0x8390000200000208ULL | |
916 | #define HID7_DEFEATURES_2 0x0000000000000000ULL | |
917 | ||
918 | /* | |
919 | arg0 = target register | |
920 | arg1 = 64-bit constant | |
921 | */ | |
922 | .macro LOAD_UINT64 | |
923 | movz $0, #(($1 >> 48) & 0xffff), lsl #48 | |
924 | movk $0, #(($1 >> 32) & 0xffff), lsl #32 | |
925 | movk $0, #(($1 >> 16) & 0xffff), lsl #16 | |
926 | movk $0, #(($1) & 0xffff) | |
927 | .endmacro | |
928 | ||
929 | .text | |
930 | .align 2 | |
931 | .globl EXT(cpu_defeatures_set) | |
932 | LEXT(cpu_defeatures_set) | |
933 | PUSH_FRAME | |
934 | cmp x0, #2 | |
935 | b.eq cpu_defeatures_set_2 | |
936 | cmp x0, #1 | |
937 | b.ne cpu_defeatures_set_ret | |
938 | LOAD_UINT64 x1, HID0_DEFEATURES_1 | |
939 | mrs x0, ARM64_REG_HID0 | |
940 | orr x0, x0, x1 | |
941 | msr ARM64_REG_HID0, x0 | |
942 | LOAD_UINT64 x1, HID1_DEFEATURES_1 | |
943 | mrs x0, ARM64_REG_HID1 | |
944 | orr x0, x0, x1 | |
945 | msr ARM64_REG_HID1, x0 | |
946 | LOAD_UINT64 x1, HID2_DEFEATURES_1 | |
947 | mrs x0, ARM64_REG_HID2 | |
948 | orr x0, x0, x1 | |
949 | msr ARM64_REG_HID2, x0 | |
950 | LOAD_UINT64 x1, HID3_DEFEATURES_1 | |
951 | mrs x0, ARM64_REG_HID3 | |
952 | orr x0, x0, x1 | |
953 | msr ARM64_REG_HID3, x0 | |
954 | LOAD_UINT64 x1, HID4_DEFEATURES_1 | |
955 | mrs x0, ARM64_REG_HID4 | |
956 | orr x0, x0, x1 | |
957 | msr ARM64_REG_HID4, x0 | |
958 | LOAD_UINT64 x1, HID7_DEFEATURES_1 | |
959 | mrs x0, ARM64_REG_HID7 | |
960 | orr x0, x0, x1 | |
961 | msr ARM64_REG_HID7, x0 | |
962 | dsb sy | |
963 | isb sy | |
964 | b cpu_defeatures_set_ret | |
965 | cpu_defeatures_set_2: | |
966 | LOAD_UINT64 x1, HID0_DEFEATURES_2 | |
967 | mrs x0, ARM64_REG_HID0 | |
968 | orr x0, x0, x1 | |
969 | msr ARM64_REG_HID0, x0 | |
970 | LOAD_UINT64 x1, HID1_DEFEATURES_2 | |
971 | mrs x0, ARM64_REG_HID1 | |
972 | orr x0, x0, x1 | |
973 | msr ARM64_REG_HID1, x0 | |
974 | LOAD_UINT64 x1, HID2_DEFEATURES_2 | |
975 | mrs x0, ARM64_REG_HID2 | |
976 | orr x0, x0, x1 | |
977 | msr ARM64_REG_HID2, x0 | |
978 | LOAD_UINT64 x1, HID3_DEFEATURES_2 | |
979 | mrs x0, ARM64_REG_HID3 | |
980 | orr x0, x0, x1 | |
981 | msr ARM64_REG_HID3, x0 | |
982 | LOAD_UINT64 x1, HID4_DEFEATURES_2 | |
983 | mrs x0, ARM64_REG_HID4 | |
984 | orr x0, x0, x1 | |
985 | msr ARM64_REG_HID4, x0 | |
986 | LOAD_UINT64 x1, HID7_DEFEATURES_2 | |
987 | mrs x0, ARM64_REG_HID7 | |
988 | orr x0, x0, x1 | |
989 | msr ARM64_REG_HID7, x0 | |
990 | dsb sy | |
991 | isb sy | |
992 | b cpu_defeatures_set_ret | |
993 | cpu_defeatures_set_ret: | |
994 | POP_FRAME | |
995 | ret | |
996 | #endif | |
997 | ||
d9a64523 A |
998 | #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ |
999 | .text | |
1000 | .align 2 | |
1001 | .globl EXT(arm64_prepare_for_sleep) | |
1002 | LEXT(arm64_prepare_for_sleep) | |
1003 | PUSH_FRAME | |
1004 | Lwfi_inst: | |
1005 | dsb sy | |
1006 | isb sy | |
1007 | wfi | |
1008 | b Lwfi_inst | |
1009 | ||
1010 | /* | |
1011 | * Force WFI to use clock gating only | |
1012 | * Note: for non-Apple device, do nothing. | |
1013 | */ | |
1014 | .text | |
1015 | .align 2 | |
1016 | .globl EXT(arm64_force_wfi_clock_gate) | |
1017 | LEXT(arm64_force_wfi_clock_gate) | |
1018 | PUSH_FRAME | |
1019 | nop | |
1020 | POP_FRAME | |
1021 | ||
1022 | #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ | |
1023 | ||
1024 | /* | |
1025 | * void arm64_replace_bootstack(cpu_data_t *cpu_data) | |
1026 | * | |
1027 | * This must be called from a kernel thread context running on the boot CPU, | |
1028 | * after setting up new exception stacks in per-CPU data. That will guarantee | |
1029 | * that the stack(s) we're trying to replace aren't currently in use. For | |
1030 | * KTRR-protected devices, this must also be called prior to VM prot finalization | |
1031 | * and lockdown, as updating SP1 requires a sensitive instruction. | |
1032 | */ | |
1033 | .text | |
1034 | .align 2 | |
1035 | .globl EXT(arm64_replace_bootstack) | |
1036 | LEXT(arm64_replace_bootstack) | |
1037 | ARM64_STACK_PROLOG | |
1038 | PUSH_FRAME | |
1039 | // Set the exception stack pointer | |
1040 | ldr x0, [x0, CPU_EXCEPSTACK_TOP] | |
1041 | mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3 | |
1042 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror | |
1043 | // Set SP_EL1 to exception stack | |
c6bf4f31 | 1044 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
d9a64523 | 1045 | mov x1, lr |
cb323159 | 1046 | bl EXT(pinst_spsel_1) |
d9a64523 A |
1047 | mov lr, x1 |
1048 | #else | |
1049 | msr SPSel, #1 | |
5ba3f43e | 1050 | #endif |
d9a64523 A |
1051 | mov sp, x0 |
1052 | msr SPSel, #0 | |
1053 | msr DAIF, x4 // Restore interrupt state | |
1054 | POP_FRAME | |
1055 | ARM64_STACK_EPILOG | |
5ba3f43e A |
1056 | |
1057 | #ifdef MONITOR | |
1058 | /* | |
1059 | * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
1060 | uintptr_t arg2, uintptr_t arg3) | |
1061 | * | |
1062 | * Call the EL3 monitor with 4 arguments in registers | |
1063 | * The monitor interface maintains the same ABI as the C function call standard. Callee-saved | |
1064 | * registers are preserved, temporary registers are not. Parameters and results are passed in | |
1065 | * the usual manner. | |
1066 | */ | |
1067 | .text | |
1068 | .align 2 | |
1069 | .globl EXT(monitor_call) | |
1070 | LEXT(monitor_call) | |
1071 | smc 0x11 | |
1072 | ret | |
1073 | #endif | |
1074 | ||
cb323159 A |
1075 | #ifdef HAS_APPLE_PAC |
1076 | /** | |
1077 | * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc, | |
1078 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1079 | * uint64_t x17) | |
1080 | */ | |
1081 | .text | |
1082 | .align 2 | |
1083 | .globl EXT(ml_sign_thread_state) | |
1084 | LEXT(ml_sign_thread_state) | |
1085 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ | |
1086 | /* | |
1087 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1088 | * touched by the system call return path. | |
1089 | */ | |
1090 | bic x2, x2, PSR_CF | |
1091 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1092 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1093 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1094 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
1095 | str x1, [x0, SS64_JOPHASH] | |
1096 | ret | |
1097 | ||
1098 | /** | |
1099 | * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc, | |
1100 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1101 | * uint64_t x17) | |
1102 | */ | |
1103 | .text | |
1104 | .align 2 | |
1105 | .globl EXT(ml_check_signed_state) | |
1106 | LEXT(ml_check_signed_state) | |
1107 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ | |
1108 | /* | |
1109 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1110 | * touched by the system call return path. | |
1111 | */ | |
1112 | bic x2, x2, PSR_CF | |
1113 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1114 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1115 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1116 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
1117 | ldr x2, [x0, SS64_JOPHASH] | |
1118 | cmp x1, x2 | |
1119 | b.ne Lcheck_hash_panic | |
1120 | ret | |
1121 | Lcheck_hash_panic: | |
1122 | mov x1, x0 | |
1123 | adr x0, Lcheck_hash_str | |
1124 | CALL_EXTERN panic_with_thread_kernel_state | |
1125 | Lcheck_hash_str: | |
1126 | .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)" | |
1127 | #endif /* HAS_APPLE_PAC */ | |
1128 | ||
1129 | .text | |
1130 | .align 2 | |
1131 | .globl EXT(fill32_dczva) | |
1132 | LEXT(fill32_dczva) | |
1133 | 0: | |
1134 | dc zva, x0 | |
1135 | add x0, x0, #64 | |
1136 | subs x1, x1, #64 | |
1137 | b.hi 0b | |
1138 | ret | |
1139 | ||
1140 | .text | |
1141 | .align 2 | |
1142 | .globl EXT(fill32_nt) | |
1143 | LEXT(fill32_nt) | |
1144 | dup.4s v0, w2 | |
1145 | 0: | |
1146 | stnp q0, q0, [x0] | |
1147 | stnp q0, q0, [x0, #0x20] | |
1148 | stnp q0, q0, [x0, #0x40] | |
1149 | stnp q0, q0, [x0, #0x60] | |
1150 | add x0, x0, #128 | |
1151 | subs x1, x1, #128 | |
1152 | b.hi 0b | |
1153 | ret | |
d9a64523 | 1154 | |
5ba3f43e | 1155 | /* vim: set sw=4 ts=4: */ |