]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/machine_routines_asm.s
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / machine_routines_asm.s
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/proc_reg.h>
32 #include <arm/pmap.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <sys/errno.h>
35 #include "assym.s"
36
37
38 #if defined(HAS_APPLE_PAC)
39 /*
40 * void
41 * ml_set_kernelkey_enabled(boolean_t enable)
42 *
43 * Toggle pointer auth kernel domain key diversification. Assembly to prevent compiler reordering.
44 *
45 */
46
47 .align 2
48 .globl EXT(ml_set_kernelkey_enabled)
49 LEXT(ml_set_kernelkey_enabled)
50 mrs x1, ARM64_REG_APCTL_EL1
51 orr x2, x1, #APCTL_EL1_KernKeyEn
52 and x1, x1, #~APCTL_EL1_KernKeyEn
53 cmp w0, #0
54 csel x1, x1, x2, eq
55 msr ARM64_REG_APCTL_EL1, x1
56 isb
57 ret
58
59 #endif /* defined(HAS_APPLE_PAC) */
60
61
62
63 /* uint32_t get_fpscr(void):
64 * Returns (FPSR | FPCR).
65 */
66 .align 2
67 .globl EXT(get_fpscr)
68 LEXT(get_fpscr)
69 #if __ARM_VFP__
70 mrs x1, FPSR // Grab FPSR
71 mov x4, #(FPSR_MASK & 0xFFFF)
72 mov x5, #(FPSR_MASK & 0xFFFF0000)
73 orr x0, x4, x5
74 and x1, x1, x0 // Be paranoid, and clear bits we expect to
75 // be clear
76 mrs x2, FPCR // Grab FPCR
77 mov x4, #(FPCR_MASK & 0xFFFF)
78 mov x5, #(FPCR_MASK & 0xFFFF0000)
79 orr x0, x4, x5
80 and x2, x2, x0 // Be paranoid, and clear bits we expect to
81 // be clear
82 orr x0, x1, x2 // OR them to get FPSCR equivalent state
83 #else
84 mov x0, #0
85 #endif
86 ret
87 .align 2
88 .globl EXT(set_fpscr)
89 /* void set_fpscr(uint32_t value):
90 * Set the FPCR and FPSR registers, based on the given value; a
91 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
92 * and FPCR are not responsible for condition codes.
93 */
94 LEXT(set_fpscr)
95 #if __ARM_VFP__
96 mov x4, #(FPSR_MASK & 0xFFFF)
97 mov x5, #(FPSR_MASK & 0xFFFF0000)
98 orr x1, x4, x5
99 and x1, x1, x0 // Clear the bits that don't apply to FPSR
100 mov x4, #(FPCR_MASK & 0xFFFF)
101 mov x5, #(FPCR_MASK & 0xFFFF0000)
102 orr x2, x4, x5
103 and x2, x2, x0 // Clear the bits that don't apply to FPCR
104 msr FPSR, x1 // Write FPCR
105 msr FPCR, x2 // Write FPSR
106 dsb ish // FPCR requires synchronization
107 #endif
108 ret
109
110 /*
111 * void update_mdscr(unsigned long clear, unsigned long set)
112 * Clears and sets the specified bits in MDSCR_EL1.
113 *
114 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
115 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
116 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
117 * so we need to put the checks after the MRS where they can't be skipped. That
118 * still leaves a small window if a breakpoint is set on the instruction
119 * immediately after the MRS. To handle that, we also do a check and then set of
120 * the breakpoint control registers. This allows us to guarantee that a given
121 * core will never have both KDE set and a breakpoint targeting EL1.
122 *
123 * If KDE gets set, unset it and then panic
124 */
125 .align 2
126 .globl EXT(update_mdscr)
127 LEXT(update_mdscr)
128 mov x4, #0
129 mrs x2, MDSCR_EL1
130 bic x2, x2, x0
131 orr x2, x2, x1
132 1:
133 bic x2, x2, #0x2000
134 msr MDSCR_EL1, x2
135 #if defined(CONFIG_KERNEL_INTEGRITY)
136 /*
137 * verify KDE didn't get set (including via ROP)
138 * If set, clear it and then panic
139 */
140 ands x3, x2, #0x2000
141 orr x4, x4, x3
142 bne 1b
143 cmp x4, xzr
144 b.ne Lupdate_mdscr_panic
145 #endif
146 ret
147
148 Lupdate_mdscr_panic:
149 adrp x0, Lupdate_mdscr_panic_str@page
150 add x0, x0, Lupdate_mdscr_panic_str@pageoff
151 b EXT(panic)
152 b .
153
154 Lupdate_mdscr_panic_str:
155 .asciz "MDSCR.KDE was set"
156
157
158 /*
159 * Set MMU Translation Table Base Alternate
160 */
161 .text
162 .align 2
163 .globl EXT(set_mmu_ttb_alternate)
164 LEXT(set_mmu_ttb_alternate)
165 dsb sy
166 #if defined(KERNEL_INTEGRITY_KTRR)
167 mov x1, lr
168 bl EXT(pinst_set_ttbr1)
169 mov lr, x1
170 #else
171 msr TTBR1_EL1, x0
172 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
173 isb sy
174 ret
175
176 .text
177 .align 2
178 .globl EXT(set_mmu_ttb)
179 LEXT(set_mmu_ttb)
180 #if __ARM_KERNEL_PROTECT__
181 /* All EL1-mode ASIDs are odd. */
182 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
183 #endif /* __ARM_KERNEL_PROTECT__ */
184 dsb ish
185 msr TTBR0_EL1, x0
186 isb sy
187 ret
188
189 /*
190 * set AUX control register
191 */
192 .text
193 .align 2
194 .globl EXT(set_aux_control)
195 LEXT(set_aux_control)
196 msr ACTLR_EL1, x0
197 // Synchronize system
198 isb sy
199 ret
200
201 #if __ARM_KERNEL_PROTECT__
202 .text
203 .align 2
204 .globl EXT(set_vbar_el1)
205 LEXT(set_vbar_el1)
206 #if defined(KERNEL_INTEGRITY_KTRR)
207 b EXT(pinst_set_vbar)
208 #else
209 msr VBAR_EL1, x0
210 ret
211 #endif
212 #endif /* __ARM_KERNEL_PROTECT__ */
213
214
215 /*
216 * set translation control register
217 */
218 .text
219 .align 2
220 .globl EXT(set_tcr)
221 LEXT(set_tcr)
222 #if defined(APPLE_ARM64_ARCH_FAMILY)
223 // Assert that T0Z is always equal to T1Z
224 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
225 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
226 cbnz x1, L_set_tcr_panic
227 #if defined(KERNEL_INTEGRITY_KTRR)
228 mov x1, lr
229 bl EXT(pinst_set_tcr)
230 mov lr, x1
231 #else
232 msr TCR_EL1, x0
233 #endif /* defined(KERNEL_INTRITY_KTRR) */
234 isb sy
235 ret
236
237 L_set_tcr_panic:
238 PUSH_FRAME
239 sub sp, sp, #16
240 str x0, [sp]
241 adr x0, L_set_tcr_panic_str
242 BRANCH_EXTERN panic
243
244 L_set_locked_reg_panic:
245 PUSH_FRAME
246 sub sp, sp, #16
247 str x0, [sp]
248 adr x0, L_set_locked_reg_panic_str
249 BRANCH_EXTERN panic
250 b .
251
252 L_set_tcr_panic_str:
253 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
254
255
256 L_set_locked_reg_panic_str:
257 .asciz "attempt to set locked register: (%llx)\n"
258 #else
259 #if defined(KERNEL_INTEGRITY_KTRR)
260 mov x1, lr
261 bl EXT(pinst_set_tcr)
262 mov lr, x1
263 #else
264 msr TCR_EL1, x0
265 #endif
266 isb sy
267 ret
268 #endif // defined(APPLE_ARM64_ARCH_FAMILY)
269
270 /*
271 * MMU kernel virtual to physical address translation
272 */
273 .text
274 .align 2
275 .globl EXT(mmu_kvtop)
276 LEXT(mmu_kvtop)
277 mrs x2, DAIF // Load current DAIF
278 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
279 at s1e1r, x0 // Translation Stage 1 EL1
280 mrs x1, PAR_EL1 // Read result
281 msr DAIF, x2 // Restore interrupt state
282 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
283 bfm x1, x0, #0, #11 // Add page offset
284 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
285 ret
286 L_mmu_kvtop_invalid:
287 mov x0, #0 // Return invalid
288 ret
289
290 /*
291 * MMU user virtual to physical address translation
292 */
293 .text
294 .align 2
295 .globl EXT(mmu_uvtop)
296 LEXT(mmu_uvtop)
297 lsr x8, x0, #56 // Extract top byte
298 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
299 mrs x2, DAIF // Load current DAIF
300 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
301 at s1e0r, x0 // Translation Stage 1 EL0
302 mrs x1, PAR_EL1 // Read result
303 msr DAIF, x2 // Restore interrupt state
304 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
305 bfm x1, x0, #0, #11 // Add page offset
306 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
307 ret
308 L_mmu_uvtop_invalid:
309 mov x0, #0 // Return invalid
310 ret
311
312 /*
313 * MMU kernel virtual to physical address preflight write access
314 */
315 .text
316 .align 2
317 .globl EXT(mmu_kvtop_wpreflight)
318 LEXT(mmu_kvtop_wpreflight)
319 mrs x2, DAIF // Load current DAIF
320 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
321 at s1e1w, x0 // Translation Stage 1 EL1
322 mrs x1, PAR_EL1 // Read result
323 msr DAIF, x2 // Restore interrupt state
324 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
325 bfm x1, x0, #0, #11 // Add page offset
326 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
327 ret
328 L_mmu_kvtop_wpreflight_invalid:
329 mov x0, #0 // Return invalid
330 ret
331
332 /*
333 * SET_RECOVERY_HANDLER
334 *
335 * Sets up a page fault recovery handler
336 *
337 * arg0 - persisted thread pointer
338 * arg1 - persisted recovery handler
339 * arg2 - scratch reg
340 * arg3 - recovery label
341 */
342 .macro SET_RECOVERY_HANDLER
343 mrs $0, TPIDR_EL1 // Load thread pointer
344 adrp $2, $3@page // Load the recovery handler address
345 add $2, $2, $3@pageoff
346 #if defined(HAS_APPLE_PAC)
347 add $1, $0, TH_RECOVER
348 movk $1, #PAC_DISCRIMINATOR_RECOVER, lsl 48
349 pacia $2, $1 // Sign with IAKey + blended discriminator
350 #endif
351
352 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
353 str $2, [$0, TH_RECOVER] // Set new signed recovery handler
354 .endmacro
355
356 /*
357 * CLEAR_RECOVERY_HANDLER
358 *
359 * Clears page fault handler set by SET_RECOVERY_HANDLER
360 *
361 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
362 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
363 */
364 .macro CLEAR_RECOVERY_HANDLER
365 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
366 .endmacro
367
368
369 .text
370 .align 2
371 copyio_error:
372 CLEAR_RECOVERY_HANDLER x10, x11
373 mov x0, #EFAULT // Return an EFAULT error
374 POP_FRAME
375 ARM64_STACK_EPILOG
376
377 /*
378 * int _bcopyin(const char *src, char *dst, vm_size_t len)
379 */
380 .text
381 .align 2
382 .globl EXT(_bcopyin)
383 LEXT(_bcopyin)
384 ARM64_STACK_PROLOG
385 PUSH_FRAME
386 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
387 /* If len is less than 16 bytes, just do a bytewise copy */
388 cmp x2, #16
389 b.lt 2f
390 sub x2, x2, #16
391 1:
392 /* 16 bytes at a time */
393 ldp x3, x4, [x0], #16
394 stp x3, x4, [x1], #16
395 subs x2, x2, #16
396 b.ge 1b
397 /* Fixup the len and test for completion */
398 adds x2, x2, #16
399 b.eq 3f
400 2: /* Bytewise */
401 subs x2, x2, #1
402 ldrb w3, [x0], #1
403 strb w3, [x1], #1
404 b.hi 2b
405 3:
406 CLEAR_RECOVERY_HANDLER x10, x11
407 mov x0, #0
408 POP_FRAME
409 ARM64_STACK_EPILOG
410
411 /*
412 * int _copyin_atomic32(const char *src, uint32_t *dst)
413 */
414 .text
415 .align 2
416 .globl EXT(_copyin_atomic32)
417 LEXT(_copyin_atomic32)
418 ARM64_STACK_PROLOG
419 PUSH_FRAME
420 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
421 ldr w8, [x0]
422 str w8, [x1]
423 mov x0, #0
424 CLEAR_RECOVERY_HANDLER x10, x11
425 POP_FRAME
426 ARM64_STACK_EPILOG
427
428 /*
429 * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
430 */
431 .text
432 .align 2
433 .globl EXT(_copyin_atomic32_wait_if_equals)
434 LEXT(_copyin_atomic32_wait_if_equals)
435 ARM64_STACK_PROLOG
436 PUSH_FRAME
437 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
438 ldxr w8, [x0]
439 cmp w8, w1
440 mov x0, ESTALE
441 b.ne 1f
442 mov x0, #0
443 wfe
444 1:
445 clrex
446 CLEAR_RECOVERY_HANDLER x10, x11
447 POP_FRAME
448 ARM64_STACK_EPILOG
449
450 /*
451 * int _copyin_atomic64(const char *src, uint32_t *dst)
452 */
453 .text
454 .align 2
455 .globl EXT(_copyin_atomic64)
456 LEXT(_copyin_atomic64)
457 ARM64_STACK_PROLOG
458 PUSH_FRAME
459 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
460 ldr x8, [x0]
461 str x8, [x1]
462 mov x0, #0
463 CLEAR_RECOVERY_HANDLER x10, x11
464 POP_FRAME
465 ARM64_STACK_EPILOG
466
467
468 /*
469 * int _copyout_atomic32(uint32_t value, char *dst)
470 */
471 .text
472 .align 2
473 .globl EXT(_copyout_atomic32)
474 LEXT(_copyout_atomic32)
475 ARM64_STACK_PROLOG
476 PUSH_FRAME
477 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
478 str w0, [x1]
479 mov x0, #0
480 CLEAR_RECOVERY_HANDLER x10, x11
481 POP_FRAME
482 ARM64_STACK_EPILOG
483
484 /*
485 * int _copyout_atomic64(uint64_t value, char *dst)
486 */
487 .text
488 .align 2
489 .globl EXT(_copyout_atomic64)
490 LEXT(_copyout_atomic64)
491 ARM64_STACK_PROLOG
492 PUSH_FRAME
493 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
494 str x0, [x1]
495 mov x0, #0
496 CLEAR_RECOVERY_HANDLER x10, x11
497 POP_FRAME
498 ARM64_STACK_EPILOG
499
500
501 /*
502 * int _bcopyout(const char *src, char *dst, vm_size_t len)
503 */
504 .text
505 .align 2
506 .globl EXT(_bcopyout)
507 LEXT(_bcopyout)
508 ARM64_STACK_PROLOG
509 PUSH_FRAME
510 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
511 /* If len is less than 16 bytes, just do a bytewise copy */
512 cmp x2, #16
513 b.lt 2f
514 sub x2, x2, #16
515 1:
516 /* 16 bytes at a time */
517 ldp x3, x4, [x0], #16
518 stp x3, x4, [x1], #16
519 subs x2, x2, #16
520 b.ge 1b
521 /* Fixup the len and test for completion */
522 adds x2, x2, #16
523 b.eq 3f
524 2: /* Bytewise */
525 subs x2, x2, #1
526 ldrb w3, [x0], #1
527 strb w3, [x1], #1
528 b.hi 2b
529 3:
530 CLEAR_RECOVERY_HANDLER x10, x11
531 mov x0, #0
532 POP_FRAME
533 ARM64_STACK_EPILOG
534
535 /*
536 * int _bcopyinstr(
537 * const user_addr_t user_addr,
538 * char *kernel_addr,
539 * vm_size_t max,
540 * vm_size_t *actual)
541 */
542 .text
543 .align 2
544 .globl EXT(_bcopyinstr)
545 LEXT(_bcopyinstr)
546 ARM64_STACK_PROLOG
547 PUSH_FRAME
548 adr x4, Lcopyinstr_error // Get address for recover
549 mrs x10, TPIDR_EL1 // Get thread pointer
550 ldr x11, [x10, TH_RECOVER] // Save previous recover
551
552 #if defined(HAS_APPLE_PAC)
553 add x5, x10, TH_RECOVER // Sign new pointer with IAKey + blended discriminator
554 movk x5, #PAC_DISCRIMINATOR_RECOVER, lsl 48
555 pacia x4, x5
556 #endif
557 str x4, [x10, TH_RECOVER] // Store new recover
558
559 mov x4, #0 // x4 - total bytes copied
560 Lcopyinstr_loop:
561 ldrb w5, [x0], #1 // Load a byte from the user source
562 strb w5, [x1], #1 // Store a byte to the kernel dest
563 add x4, x4, #1 // Increment bytes copied
564 cbz x5, Lcopyinstr_done // If this byte is null, we're done
565 cmp x4, x2 // If we're out of space, return an error
566 b.ne Lcopyinstr_loop
567 Lcopyinstr_too_long:
568 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
569 Lcopyinstr_done:
570 str x4, [x3] // Return number of bytes copied
571 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
572 b Lcopyinstr_exit
573 Lcopyinstr_error:
574 mov x0, #EFAULT // Return EFAULT on error
575 Lcopyinstr_exit:
576 str x11, [x10, TH_RECOVER] // Restore old recover
577 POP_FRAME
578 ARM64_STACK_EPILOG
579
580 /*
581 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
582 *
583 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
584 * either user or kernel memory, or 8 bytes (AArch32) from user only.
585 *
586 * x0 : address of frame to copy.
587 * x1 : kernel address at which to store data.
588 * w2 : whether to copy an AArch32 or AArch64 frame.
589 * x3 : temp
590 * x5 : temp (kernel virtual base)
591 * x9 : temp
592 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
593 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
594 * x12, x13 : backtrace data
595 *
596 */
597 .text
598 .align 2
599 .globl EXT(copyinframe)
600 LEXT(copyinframe)
601 ARM64_STACK_PROLOG
602 PUSH_FRAME
603 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
604 cbnz w2, Lcopyinframe64 // Check frame size
605 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
606 add x5, x5, EXT(gVirtBase)@pageoff
607 ldr x5, [x5]
608 cmp x5, x0 // See if address is in kernel virtual range
609 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
610 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
611 b Lcopyinframe_done
612
613 Lcopyinframe32:
614 ldr x12, [x0] // Copy 8 bytes
615 str x12, [x1]
616 mov w0, #0 // Success
617 b Lcopyinframe_done
618
619 Lcopyinframe64:
620 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
621 orr x9, x0, TBI_MASK // Hide tags in address comparison
622 cmp x9, x3 // If in kernel address range, skip tag test
623 b.hs Lcopyinframe_valid
624 tst x0, TBI_MASK // Detect tagged pointers
625 b.eq Lcopyinframe_valid
626 mov w0, #EFAULT // Tagged address, fail
627 b Lcopyinframe_done
628 Lcopyinframe_valid:
629 ldp x12, x13, [x0] // Copy 16 bytes
630 stp x12, x13, [x1]
631 mov w0, #0 // Success
632
633 Lcopyinframe_done:
634 CLEAR_RECOVERY_HANDLER x10, x11
635 POP_FRAME
636 ARM64_STACK_EPILOG
637
638
639 /*
640 * uint32_t arm_debug_read_dscr(void)
641 */
642 .text
643 .align 2
644 .globl EXT(arm_debug_read_dscr)
645 LEXT(arm_debug_read_dscr)
646 PANIC_UNIMPLEMENTED
647
648 /*
649 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
650 *
651 * Set debug registers to match the current thread state
652 * (NULL to disable). Assume 6 breakpoints and 2
653 * watchpoints, since that has been the case in all cores
654 * thus far.
655 */
656 .text
657 .align 2
658 .globl EXT(arm_debug_set_cp14)
659 LEXT(arm_debug_set_cp14)
660 PANIC_UNIMPLEMENTED
661
662 #if defined(APPLE_ARM64_ARCH_FAMILY)
663 /*
664 * Note: still have to ISB before executing wfi!
665 */
666 .text
667 .align 2
668 .globl EXT(arm64_prepare_for_sleep)
669 LEXT(arm64_prepare_for_sleep)
670 PUSH_FRAME
671
672 #if defined(APPLETYPHOON)
673 // <rdar://problem/15827409>
674 mrs x0, ARM64_REG_HID2 // Read HID2
675 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
676 msr ARM64_REG_HID2, x0 // Write HID2
677 dsb sy
678 isb sy
679 #endif
680
681 #if __ARM_GLOBAL_SLEEP_BIT__
682 // Enable deep sleep
683 mrs x1, ARM64_REG_ACC_OVRD
684 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
685 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
686 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
687 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
688 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
689 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
690 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
691 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
692 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
693 msr ARM64_REG_ACC_OVRD, x1
694
695
696 #else
697 // Enable deep sleep
698 mov x1, ARM64_REG_CYC_CFG_deepSleep
699 msr ARM64_REG_CYC_CFG, x1
700 #endif
701 // Set "OK to power down" (<rdar://problem/12390433>)
702 mrs x0, ARM64_REG_CYC_OVRD
703 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
704 msr ARM64_REG_CYC_OVRD, x0
705
706 #if defined(APPLEMONSOON)
707 ARM64_IS_PCORE x0
708 cbz x0, Lwfi_inst // skip if not p-core
709
710 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
711 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
712 * to be left with valid entries that fail to drain if a
713 * subsequent wfi is issued. This can prevent the core from
714 * power-gating. For the idle case that is recoverable, but
715 * for the deep-sleep (S2R) case in which cores MUST power-gate,
716 * it can lead to a hang. This can be prevented by disabling
717 * and re-enabling GUPS, which forces the prefetch queue to
718 * drain. This should be done as close to wfi as possible, i.e.
719 * at the very end of arm64_prepare_for_sleep(). */
720 mrs x0, ARM64_REG_HID10
721 orr x0, x0, #(ARM64_REG_HID10_DisHwpGups)
722 msr ARM64_REG_HID10, x0
723 isb sy
724 and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups))
725 msr ARM64_REG_HID10, x0
726 isb sy
727 #endif
728 Lwfi_inst:
729 dsb sy
730 isb sy
731 wfi
732 b Lwfi_inst
733
734 /*
735 * Force WFI to use clock gating only
736 *
737 */
738 .text
739 .align 2
740 .globl EXT(arm64_force_wfi_clock_gate)
741 LEXT(arm64_force_wfi_clock_gate)
742 ARM64_STACK_PROLOG
743 PUSH_FRAME
744
745 mrs x0, ARM64_REG_CYC_OVRD
746 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
747 msr ARM64_REG_CYC_OVRD, x0
748
749 POP_FRAME
750 ARM64_STACK_EPILOG
751
752
753
754 #if defined(APPLETYPHOON)
755
756 .text
757 .align 2
758 .globl EXT(typhoon_prepare_for_wfi)
759
760 LEXT(typhoon_prepare_for_wfi)
761 PUSH_FRAME
762
763 // <rdar://problem/15827409>
764 mrs x0, ARM64_REG_HID2 // Read HID2
765 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
766 msr ARM64_REG_HID2, x0 // Write HID2
767 dsb sy
768 isb sy
769
770 POP_FRAME
771 ret
772
773
774 .text
775 .align 2
776 .globl EXT(typhoon_return_from_wfi)
777 LEXT(typhoon_return_from_wfi)
778 PUSH_FRAME
779
780 // <rdar://problem/15827409>
781 mrs x0, ARM64_REG_HID2 // Read HID2
782 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
783 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
784 msr ARM64_REG_HID2, x0 // Write HID2
785 dsb sy
786 isb sy
787
788 POP_FRAME
789 ret
790 #endif
791
792 #ifdef APPLETYPHOON
793
794 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
795 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
796 #define HID2_DEFEATURES_1 0x0000000000102074ULL
797 #define HID3_DEFEATURES_1 0x0000000000400003ULL
798 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
799 #define HID7_DEFEATURES_1 0x000000000000000eULL
800
801 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
802 #define HID1_DEFEATURES_2 0x000000000005d720ULL
803 #define HID2_DEFEATURES_2 0x0000000000002074ULL
804 #define HID3_DEFEATURES_2 0x0000000000400001ULL
805 #define HID4_DEFEATURES_2 0x8390000200000208ULL
806 #define HID7_DEFEATURES_2 0x0000000000000000ULL
807
808 /*
809 arg0 = target register
810 arg1 = 64-bit constant
811 */
812 .macro LOAD_UINT64
813 movz $0, #(($1 >> 48) & 0xffff), lsl #48
814 movk $0, #(($1 >> 32) & 0xffff), lsl #32
815 movk $0, #(($1 >> 16) & 0xffff), lsl #16
816 movk $0, #(($1) & 0xffff)
817 .endmacro
818
819 .text
820 .align 2
821 .globl EXT(cpu_defeatures_set)
822 LEXT(cpu_defeatures_set)
823 PUSH_FRAME
824 cmp x0, #2
825 b.eq cpu_defeatures_set_2
826 cmp x0, #1
827 b.ne cpu_defeatures_set_ret
828 LOAD_UINT64 x1, HID0_DEFEATURES_1
829 mrs x0, ARM64_REG_HID0
830 orr x0, x0, x1
831 msr ARM64_REG_HID0, x0
832 LOAD_UINT64 x1, HID1_DEFEATURES_1
833 mrs x0, ARM64_REG_HID1
834 orr x0, x0, x1
835 msr ARM64_REG_HID1, x0
836 LOAD_UINT64 x1, HID2_DEFEATURES_1
837 mrs x0, ARM64_REG_HID2
838 orr x0, x0, x1
839 msr ARM64_REG_HID2, x0
840 LOAD_UINT64 x1, HID3_DEFEATURES_1
841 mrs x0, ARM64_REG_HID3
842 orr x0, x0, x1
843 msr ARM64_REG_HID3, x0
844 LOAD_UINT64 x1, HID4_DEFEATURES_1
845 mrs x0, ARM64_REG_HID4
846 orr x0, x0, x1
847 msr ARM64_REG_HID4, x0
848 LOAD_UINT64 x1, HID7_DEFEATURES_1
849 mrs x0, ARM64_REG_HID7
850 orr x0, x0, x1
851 msr ARM64_REG_HID7, x0
852 dsb sy
853 isb sy
854 b cpu_defeatures_set_ret
855 cpu_defeatures_set_2:
856 LOAD_UINT64 x1, HID0_DEFEATURES_2
857 mrs x0, ARM64_REG_HID0
858 orr x0, x0, x1
859 msr ARM64_REG_HID0, x0
860 LOAD_UINT64 x1, HID1_DEFEATURES_2
861 mrs x0, ARM64_REG_HID1
862 orr x0, x0, x1
863 msr ARM64_REG_HID1, x0
864 LOAD_UINT64 x1, HID2_DEFEATURES_2
865 mrs x0, ARM64_REG_HID2
866 orr x0, x0, x1
867 msr ARM64_REG_HID2, x0
868 LOAD_UINT64 x1, HID3_DEFEATURES_2
869 mrs x0, ARM64_REG_HID3
870 orr x0, x0, x1
871 msr ARM64_REG_HID3, x0
872 LOAD_UINT64 x1, HID4_DEFEATURES_2
873 mrs x0, ARM64_REG_HID4
874 orr x0, x0, x1
875 msr ARM64_REG_HID4, x0
876 LOAD_UINT64 x1, HID7_DEFEATURES_2
877 mrs x0, ARM64_REG_HID7
878 orr x0, x0, x1
879 msr ARM64_REG_HID7, x0
880 dsb sy
881 isb sy
882 b cpu_defeatures_set_ret
883 cpu_defeatures_set_ret:
884 POP_FRAME
885 ret
886 #endif
887
888 #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
889 .text
890 .align 2
891 .globl EXT(arm64_prepare_for_sleep)
892 LEXT(arm64_prepare_for_sleep)
893 PUSH_FRAME
894 Lwfi_inst:
895 dsb sy
896 isb sy
897 wfi
898 b Lwfi_inst
899
900 /*
901 * Force WFI to use clock gating only
902 * Note: for non-Apple device, do nothing.
903 */
904 .text
905 .align 2
906 .globl EXT(arm64_force_wfi_clock_gate)
907 LEXT(arm64_force_wfi_clock_gate)
908 PUSH_FRAME
909 nop
910 POP_FRAME
911
912 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
913
914 /*
915 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
916 *
917 * This must be called from a kernel thread context running on the boot CPU,
918 * after setting up new exception stacks in per-CPU data. That will guarantee
919 * that the stack(s) we're trying to replace aren't currently in use. For
920 * KTRR-protected devices, this must also be called prior to VM prot finalization
921 * and lockdown, as updating SP1 requires a sensitive instruction.
922 */
923 .text
924 .align 2
925 .globl EXT(arm64_replace_bootstack)
926 LEXT(arm64_replace_bootstack)
927 ARM64_STACK_PROLOG
928 PUSH_FRAME
929 // Set the exception stack pointer
930 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
931 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
932 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
933 // Set SP_EL1 to exception stack
934 #if defined(KERNEL_INTEGRITY_KTRR)
935 mov x1, lr
936 bl EXT(pinst_spsel_1)
937 mov lr, x1
938 #else
939 msr SPSel, #1
940 #endif
941 mov sp, x0
942 msr SPSel, #0
943 msr DAIF, x4 // Restore interrupt state
944 POP_FRAME
945 ARM64_STACK_EPILOG
946
947 #ifdef MONITOR
948 /*
949 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
950 uintptr_t arg2, uintptr_t arg3)
951 *
952 * Call the EL3 monitor with 4 arguments in registers
953 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
954 * registers are preserved, temporary registers are not. Parameters and results are passed in
955 * the usual manner.
956 */
957 .text
958 .align 2
959 .globl EXT(monitor_call)
960 LEXT(monitor_call)
961 smc 0x11
962 ret
963 #endif
964
965 #ifdef HAS_APPLE_PAC
966 /**
967 * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc,
968 * uint32_t cpsr, uint64_t lr, uint64_t x16,
969 * uint64_t x17)
970 */
971 .text
972 .align 2
973 .globl EXT(ml_sign_thread_state)
974 LEXT(ml_sign_thread_state)
975 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
976 /*
977 * Mask off the carry flag so we don't need to re-sign when that flag is
978 * touched by the system call return path.
979 */
980 bic x2, x2, PSR_CF
981 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
982 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
983 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
984 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
985 str x1, [x0, SS64_JOPHASH]
986 ret
987
988 /**
989 * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc,
990 * uint32_t cpsr, uint64_t lr, uint64_t x16,
991 * uint64_t x17)
992 */
993 .text
994 .align 2
995 .globl EXT(ml_check_signed_state)
996 LEXT(ml_check_signed_state)
997 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
998 /*
999 * Mask off the carry flag so we don't need to re-sign when that flag is
1000 * touched by the system call return path.
1001 */
1002 bic x2, x2, PSR_CF
1003 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1004 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1005 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1006 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1007 ldr x2, [x0, SS64_JOPHASH]
1008 cmp x1, x2
1009 b.ne Lcheck_hash_panic
1010 ret
1011 Lcheck_hash_panic:
1012 mov x1, x0
1013 adr x0, Lcheck_hash_str
1014 CALL_EXTERN panic_with_thread_kernel_state
1015 Lcheck_hash_str:
1016 .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
1017 #endif /* HAS_APPLE_PAC */
1018
1019 .text
1020 .align 2
1021 .globl EXT(fill32_dczva)
1022 LEXT(fill32_dczva)
1023 0:
1024 dc zva, x0
1025 add x0, x0, #64
1026 subs x1, x1, #64
1027 b.hi 0b
1028 ret
1029
1030 .text
1031 .align 2
1032 .globl EXT(fill32_nt)
1033 LEXT(fill32_nt)
1034 dup.4s v0, w2
1035 0:
1036 stnp q0, q0, [x0]
1037 stnp q0, q0, [x0, #0x20]
1038 stnp q0, q0, [x0, #0x40]
1039 stnp q0, q0, [x0, #0x60]
1040 add x0, x0, #128
1041 subs x1, x1, #128
1042 b.hi 0b
1043 ret
1044
1045 /* vim: set sw=4 ts=4: */