]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/machine_routines_asm.s
19e4b96d3a36d3c4972256f7ccdc9dd25ea98b89
[apple/xnu.git] / osfmk / arm64 / machine_routines_asm.s
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/exception_asm.h>
31 #include <arm64/machine_machdep.h>
32 #include <arm64/pac_asm.h>
33 #include <arm64/proc_reg.h>
34 #include <arm/pmap.h>
35 #include <pexpert/arm64/board_config.h>
36 #include <sys/errno.h>
37 #include "assym.s"
38
39
40 #if defined(HAS_APPLE_PAC)
41
42 .macro SET_KERN_KEY dst, apctl_el1
43 orr \dst, \apctl_el1, #APCTL_EL1_KernKeyEn
44 .endmacro
45
46 .macro CLEAR_KERN_KEY dst, apctl_el1
47 and \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn
48 .endmacro
49
50 /*
51 * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key)
52 */
53 .align 2
54 .globl EXT(ml_enable_user_jop_key)
55 LEXT(ml_enable_user_jop_key)
56 mov x1, x0
57 mrs x2, TPIDR_EL1
58 ldr x2, [x2, ACT_CPUDATAP]
59 ldr x0, [x2, CPU_JOP_KEY]
60
61 cmp x0, x1
62 b.eq Lskip_program_el0_jop_key
63 /*
64 * We can safely write to the JOP key registers without updating
65 * current_cpu_datap()->jop_key. The complementary
66 * ml_disable_user_jop_key() call will put back the old value. Interrupts
67 * are also disabled, so nothing else will read this field in the meantime.
68 */
69 SET_JOP_KEY_REGISTERS x1, x2
70 Lskip_program_el0_jop_key:
71
72 /*
73 * if (cpu has APCTL_EL1.UserKeyEn) {
74 * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL0 keys
75 * } else {
76 * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL0 keys
77 * }
78 */
79 mrs x1, ARM64_REG_APCTL_EL1
80 #if defined(HAS_APCTL_EL1_USERKEYEN)
81 SET_KERN_KEY x1, x1
82 #else
83 CLEAR_KERN_KEY x1, x1
84 #endif
85 msr ARM64_REG_APCTL_EL1, x1
86 isb
87 ret
88
89 /*
90 * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state)
91 */
92 .align 2
93 .globl EXT(ml_disable_user_jop_key)
94 LEXT(ml_disable_user_jop_key)
95 cmp x0, x1
96 b.eq Lskip_program_prev_jop_key
97 SET_JOP_KEY_REGISTERS x1, x2
98 Lskip_program_prev_jop_key:
99
100 /*
101 * if (cpu has APCTL_EL1.UserKeyEn) {
102 * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL1 keys
103 * } else {
104 * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL1 keys
105 * }
106 */
107 mrs x1, ARM64_REG_APCTL_EL1
108 #if defined(HAS_APCTL_EL1_USERKEYEN)
109 CLEAR_KERN_KEY x1, x1
110 #else
111 SET_KERN_KEY x1, x1
112 #endif
113 msr ARM64_REG_APCTL_EL1, x1
114 isb
115 ret
116
117 #endif /* defined(HAS_APPLE_PAC) */
118
119 #if HAS_BP_RET
120
121 /*
122 * void set_bp_ret(void)
123 * Helper function to enable branch predictor state retention
124 * across ACC sleep
125 */
126
127 .align 2
128 .globl EXT(set_bp_ret)
129 LEXT(set_bp_ret)
130 // Load bpret boot-arg
131 adrp x14, EXT(bp_ret)@page
132 add x14, x14, EXT(bp_ret)@pageoff
133 ldr w14, [x14]
134
135 mrs x13, ARM64_REG_ACC_CFG
136 and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift))
137 and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask)
138 orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift)
139 msr ARM64_REG_ACC_CFG, x13
140
141 ret
142 #endif // HAS_BP_RET
143
144 #if HAS_NEX_PG
145 .align 2
146 .globl EXT(set_nex_pg)
147 LEXT(set_nex_pg)
148 mrs x14, MPIDR_EL1
149 // Skip if this isn't a p-core; NEX powergating isn't available for e-cores
150 and x14, x14, #(MPIDR_PNE)
151 cbz x14, Lnex_pg_done
152
153 // Set the SEG-recommended value of 12 additional reset cycles
154 HID_INSERT_BITS ARM64_REG_HID13, ARM64_REG_HID13_RstCyc_mask, ARM64_REG_HID13_RstCyc_val, x13
155 HID_SET_BITS ARM64_REG_HID14, ARM64_REG_HID14_NexPwgEn, x13
156
157 Lnex_pg_done:
158 ret
159
160 #endif // HAS_NEX_PG
161
162 /* uint32_t get_fpscr(void):
163 * Returns (FPSR | FPCR).
164 */
165 .align 2
166 .globl EXT(get_fpscr)
167 LEXT(get_fpscr)
168 #if __ARM_VFP__
169 mrs x1, FPSR // Grab FPSR
170 mov x4, #(FPSR_MASK & 0xFFFF)
171 mov x5, #(FPSR_MASK & 0xFFFF0000)
172 orr x0, x4, x5
173 and x1, x1, x0 // Be paranoid, and clear bits we expect to
174 // be clear
175 mrs x2, FPCR // Grab FPCR
176 mov x4, #(FPCR_MASK & 0xFFFF)
177 mov x5, #(FPCR_MASK & 0xFFFF0000)
178 orr x0, x4, x5
179 and x2, x2, x0 // Be paranoid, and clear bits we expect to
180 // be clear
181 orr x0, x1, x2 // OR them to get FPSCR equivalent state
182 #else
183 mov x0, #0
184 #endif
185 ret
186 .align 2
187 .globl EXT(set_fpscr)
188 /* void set_fpscr(uint32_t value):
189 * Set the FPCR and FPSR registers, based on the given value; a
190 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
191 * and FPCR are not responsible for condition codes.
192 */
193 LEXT(set_fpscr)
194 #if __ARM_VFP__
195 mov x4, #(FPSR_MASK & 0xFFFF)
196 mov x5, #(FPSR_MASK & 0xFFFF0000)
197 orr x1, x4, x5
198 and x1, x1, x0 // Clear the bits that don't apply to FPSR
199 mov x4, #(FPCR_MASK & 0xFFFF)
200 mov x5, #(FPCR_MASK & 0xFFFF0000)
201 orr x2, x4, x5
202 and x2, x2, x0 // Clear the bits that don't apply to FPCR
203 msr FPSR, x1 // Write FPCR
204 msr FPCR, x2 // Write FPSR
205 dsb ish // FPCR requires synchronization
206 #endif
207 ret
208
209 /*
210 * void update_mdscr(unsigned long clear, unsigned long set)
211 * Clears and sets the specified bits in MDSCR_EL1.
212 *
213 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
214 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
215 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
216 * so we need to put the checks after the MRS where they can't be skipped. That
217 * still leaves a small window if a breakpoint is set on the instruction
218 * immediately after the MRS. To handle that, we also do a check and then set of
219 * the breakpoint control registers. This allows us to guarantee that a given
220 * core will never have both KDE set and a breakpoint targeting EL1.
221 *
222 * If KDE gets set, unset it and then panic
223 */
224 .align 2
225 .globl EXT(update_mdscr)
226 LEXT(update_mdscr)
227 mov x4, #0
228 mrs x2, MDSCR_EL1
229 bic x2, x2, x0
230 orr x2, x2, x1
231 1:
232 bic x2, x2, #0x2000
233 msr MDSCR_EL1, x2
234 #if defined(CONFIG_KERNEL_INTEGRITY)
235 /*
236 * verify KDE didn't get set (including via ROP)
237 * If set, clear it and then panic
238 */
239 ands x3, x2, #0x2000
240 orr x4, x4, x3
241 bne 1b
242 cmp x4, xzr
243 b.ne Lupdate_mdscr_panic
244 #endif
245 ret
246
247 Lupdate_mdscr_panic:
248 adrp x0, Lupdate_mdscr_panic_str@page
249 add x0, x0, Lupdate_mdscr_panic_str@pageoff
250 b EXT(panic)
251 b .
252
253 Lupdate_mdscr_panic_str:
254 .asciz "MDSCR.KDE was set"
255
256
257 /*
258 * Set MMU Translation Table Base Alternate
259 */
260 .text
261 .align 2
262 .globl EXT(set_mmu_ttb_alternate)
263 LEXT(set_mmu_ttb_alternate)
264 dsb sy
265 #if defined(KERNEL_INTEGRITY_KTRR)
266 mov x1, lr
267 bl EXT(pinst_set_ttbr1)
268 mov lr, x1
269 #else
270 #if defined(HAS_VMSA_LOCK)
271 #if DEBUG || DEVELOPMENT
272 mrs x1, ARM64_REG_VMSA_LOCK_EL1
273 and x1, x1, #(VMSA_LOCK_TTBR1_EL1)
274 cbnz x1, L_set_locked_reg_panic
275 #endif /* DEBUG || DEVELOPMENT */
276 #endif /* defined(HAS_VMSA_LOCK) */
277 msr TTBR1_EL1, x0
278 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
279 isb sy
280 ret
281
282 #if XNU_MONITOR
283 .section __PPLTEXT,__text,regular,pure_instructions
284 #else
285 .text
286 #endif
287 .align 2
288 .globl EXT(set_mmu_ttb)
289 LEXT(set_mmu_ttb)
290 #if __ARM_KERNEL_PROTECT__
291 /* All EL1-mode ASIDs are odd. */
292 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
293 #endif /* __ARM_KERNEL_PROTECT__ */
294 dsb ish
295 msr TTBR0_EL1, x0
296 isb sy
297 ret
298
299
300 #if XNU_MONITOR
301 .text
302 .align 2
303 .globl EXT(ml_get_ppl_cpu_data)
304 LEXT(ml_get_ppl_cpu_data)
305 LOAD_PMAP_CPU_DATA x0, x1, x2
306 ret
307 #endif
308
309 /*
310 * set AUX control register
311 */
312 .text
313 .align 2
314 .globl EXT(set_aux_control)
315 LEXT(set_aux_control)
316 msr ACTLR_EL1, x0
317 // Synchronize system
318 isb sy
319 ret
320
321 #if __ARM_KERNEL_PROTECT__
322 .text
323 .align 2
324 .globl EXT(set_vbar_el1)
325 LEXT(set_vbar_el1)
326 #if defined(KERNEL_INTEGRITY_KTRR)
327 b EXT(pinst_set_vbar)
328 #else
329 msr VBAR_EL1, x0
330 ret
331 #endif
332 #endif /* __ARM_KERNEL_PROTECT__ */
333
334 #if defined(HAS_VMSA_LOCK)
335 .text
336 .align 2
337 .globl EXT(vmsa_lock)
338 LEXT(vmsa_lock)
339 isb sy
340 mov x1, #(VMSA_LOCK_SCTLR_M_BIT)
341 #if __ARM_MIXED_PAGE_SIZE__
342 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_VBAR_EL1)
343 #else
344 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1)
345 #endif
346 orr x0, x0, x1
347 msr ARM64_REG_VMSA_LOCK_EL1, x0
348 isb sy
349 ret
350 #endif /* defined(HAS_VMSA_LOCK) */
351
352 /*
353 * set translation control register
354 */
355 .text
356 .align 2
357 .globl EXT(set_tcr)
358 LEXT(set_tcr)
359 #if defined(APPLE_ARM64_ARCH_FAMILY)
360 #if DEBUG || DEVELOPMENT
361 // Assert that T0Z is always equal to T1Z
362 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
363 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
364 cbnz x1, L_set_tcr_panic
365 #endif /* DEBUG || DEVELOPMENT */
366 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
367 #if defined(KERNEL_INTEGRITY_KTRR)
368 mov x1, lr
369 bl EXT(pinst_set_tcr)
370 mov lr, x1
371 #else
372 #if defined(HAS_VMSA_LOCK)
373 #if DEBUG || DEVELOPMENT
374 // assert TCR unlocked
375 mrs x1, ARM64_REG_VMSA_LOCK_EL1
376 and x1, x1, #(VMSA_LOCK_TCR_EL1)
377 cbnz x1, L_set_locked_reg_panic
378 #endif /* DEBUG || DEVELOPMENT */
379 #endif /* defined(HAS_VMSA_LOCK) */
380 msr TCR_EL1, x0
381 #endif /* defined(KERNEL_INTRITY_KTRR) */
382 isb sy
383 ret
384
385 #if DEBUG || DEVELOPMENT
386 L_set_tcr_panic:
387 PUSH_FRAME
388 sub sp, sp, #16
389 str x0, [sp]
390 adr x0, L_set_tcr_panic_str
391 BRANCH_EXTERN panic
392
393 L_set_locked_reg_panic:
394 PUSH_FRAME
395 sub sp, sp, #16
396 str x0, [sp]
397 adr x0, L_set_locked_reg_panic_str
398 BRANCH_EXTERN panic
399 b .
400
401 L_set_tcr_panic_str:
402 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
403
404
405 L_set_locked_reg_panic_str:
406 .asciz "attempt to set locked register: (%llx)\n"
407 #endif /* DEBUG || DEVELOPMENT */
408
409 /*
410 * MMU kernel virtual to physical address translation
411 */
412 .text
413 .align 2
414 .globl EXT(mmu_kvtop)
415 LEXT(mmu_kvtop)
416 mrs x2, DAIF // Load current DAIF
417 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
418 at s1e1r, x0 // Translation Stage 1 EL1
419 isb sy
420 mrs x1, PAR_EL1 // Read result
421 msr DAIF, x2 // Restore interrupt state
422 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
423 bfm x1, x0, #0, #11 // Add page offset
424 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
425 ret
426 L_mmu_kvtop_invalid:
427 mov x0, #0 // Return invalid
428 ret
429
430 /*
431 * MMU user virtual to physical address translation
432 */
433 .text
434 .align 2
435 .globl EXT(mmu_uvtop)
436 LEXT(mmu_uvtop)
437 lsr x8, x0, #56 // Extract top byte
438 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
439 mrs x2, DAIF // Load current DAIF
440 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
441 at s1e0r, x0 // Translation Stage 1 EL0
442 isb sy
443 mrs x1, PAR_EL1 // Read result
444 msr DAIF, x2 // Restore interrupt state
445 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
446 bfm x1, x0, #0, #11 // Add page offset
447 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
448 ret
449 L_mmu_uvtop_invalid:
450 mov x0, #0 // Return invalid
451 ret
452
453 /*
454 * MMU kernel virtual to physical address preflight write access
455 */
456 .text
457 .align 2
458 .globl EXT(mmu_kvtop_wpreflight)
459 LEXT(mmu_kvtop_wpreflight)
460 mrs x2, DAIF // Load current DAIF
461 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
462 at s1e1w, x0 // Translation Stage 1 EL1
463 mrs x1, PAR_EL1 // Read result
464 msr DAIF, x2 // Restore interrupt state
465 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
466 bfm x1, x0, #0, #11 // Add page offset
467 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
468 ret
469 L_mmu_kvtop_wpreflight_invalid:
470 mov x0, #0 // Return invalid
471 ret
472
473 /*
474 * SET_RECOVERY_HANDLER
475 *
476 * Sets up a page fault recovery handler. This macro clobbers x16 and x17.
477 *
478 * label - recovery label
479 * tpidr - persisted thread pointer
480 * old_handler - persisted recovery handler
481 * label_in_adr_range - whether \label is within 1 MB of PC
482 */
483 .macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0
484 // Note: x16 and x17 are designated for use as temporaries in
485 // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS.
486 .if \label_in_adr_range==1 // Load the recovery handler address
487 adr x17, \label
488 .else
489 adrp x17, \label@page
490 add x17, x17, \label@pageoff
491 .endif
492 #if defined(HAS_APPLE_PAC)
493 mrs x16, TPIDR_EL1
494 add x16, x16, TH_RECOVER
495 movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48
496 pacia x17, x16 // Sign with IAKey + blended discriminator
497 #endif
498
499 mrs \tpidr, TPIDR_EL1 // Load thread pointer
500 ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler
501 str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler
502 .endmacro
503
504 /*
505 * CLEAR_RECOVERY_HANDLER
506 *
507 * Clears page fault handler set by SET_RECOVERY_HANDLER
508 *
509 * tpidr - thread pointer saved by SET_RECOVERY_HANDLER
510 * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER
511 */
512 .macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10
513 str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler
514 .endmacro
515
516
517 .text
518 .align 2
519 copyio_error:
520 CLEAR_RECOVERY_HANDLER
521 mov x0, #EFAULT // Return an EFAULT error
522 POP_FRAME
523 ARM64_STACK_EPILOG
524
525 /*
526 * int _bcopyin(const char *src, char *dst, vm_size_t len)
527 */
528 .text
529 .align 2
530 .globl EXT(_bcopyin)
531 LEXT(_bcopyin)
532 ARM64_STACK_PROLOG
533 PUSH_FRAME
534 SET_RECOVERY_HANDLER copyio_error
535 /* If len is less than 16 bytes, just do a bytewise copy */
536 cmp x2, #16
537 b.lt 2f
538 sub x2, x2, #16
539 1:
540 /* 16 bytes at a time */
541 ldp x3, x4, [x0], #16
542 stp x3, x4, [x1], #16
543 subs x2, x2, #16
544 b.ge 1b
545 /* Fixup the len and test for completion */
546 adds x2, x2, #16
547 b.eq 3f
548 2: /* Bytewise */
549 subs x2, x2, #1
550 ldrb w3, [x0], #1
551 strb w3, [x1], #1
552 b.hi 2b
553 3:
554 CLEAR_RECOVERY_HANDLER
555 mov x0, #0
556 POP_FRAME
557 ARM64_STACK_EPILOG
558
559 /*
560 * int _copyin_atomic32(const char *src, uint32_t *dst)
561 */
562 .text
563 .align 2
564 .globl EXT(_copyin_atomic32)
565 LEXT(_copyin_atomic32)
566 ARM64_STACK_PROLOG
567 PUSH_FRAME
568 SET_RECOVERY_HANDLER copyio_error
569 ldr w8, [x0]
570 str w8, [x1]
571 mov x0, #0
572 CLEAR_RECOVERY_HANDLER
573 POP_FRAME
574 ARM64_STACK_EPILOG
575
576 /*
577 * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
578 */
579 .text
580 .align 2
581 .globl EXT(_copyin_atomic32_wait_if_equals)
582 LEXT(_copyin_atomic32_wait_if_equals)
583 ARM64_STACK_PROLOG
584 PUSH_FRAME
585 SET_RECOVERY_HANDLER copyio_error
586 ldxr w8, [x0]
587 cmp w8, w1
588 mov x0, ESTALE
589 b.ne 1f
590 mov x0, #0
591 wfe
592 1:
593 clrex
594 CLEAR_RECOVERY_HANDLER
595 POP_FRAME
596 ARM64_STACK_EPILOG
597
598 /*
599 * int _copyin_atomic64(const char *src, uint32_t *dst)
600 */
601 .text
602 .align 2
603 .globl EXT(_copyin_atomic64)
604 LEXT(_copyin_atomic64)
605 ARM64_STACK_PROLOG
606 PUSH_FRAME
607 SET_RECOVERY_HANDLER copyio_error
608 ldr x8, [x0]
609 str x8, [x1]
610 mov x0, #0
611 CLEAR_RECOVERY_HANDLER
612 POP_FRAME
613 ARM64_STACK_EPILOG
614
615
616 /*
617 * int _copyout_atomic32(uint32_t value, char *dst)
618 */
619 .text
620 .align 2
621 .globl EXT(_copyout_atomic32)
622 LEXT(_copyout_atomic32)
623 ARM64_STACK_PROLOG
624 PUSH_FRAME
625 SET_RECOVERY_HANDLER copyio_error
626 str w0, [x1]
627 mov x0, #0
628 CLEAR_RECOVERY_HANDLER
629 POP_FRAME
630 ARM64_STACK_EPILOG
631
632 /*
633 * int _copyout_atomic64(uint64_t value, char *dst)
634 */
635 .text
636 .align 2
637 .globl EXT(_copyout_atomic64)
638 LEXT(_copyout_atomic64)
639 ARM64_STACK_PROLOG
640 PUSH_FRAME
641 SET_RECOVERY_HANDLER copyio_error
642 str x0, [x1]
643 mov x0, #0
644 CLEAR_RECOVERY_HANDLER
645 POP_FRAME
646 ARM64_STACK_EPILOG
647
648
649 /*
650 * int _bcopyout(const char *src, char *dst, vm_size_t len)
651 */
652 .text
653 .align 2
654 .globl EXT(_bcopyout)
655 LEXT(_bcopyout)
656 ARM64_STACK_PROLOG
657 PUSH_FRAME
658 SET_RECOVERY_HANDLER copyio_error
659 /* If len is less than 16 bytes, just do a bytewise copy */
660 cmp x2, #16
661 b.lt 2f
662 sub x2, x2, #16
663 1:
664 /* 16 bytes at a time */
665 ldp x3, x4, [x0], #16
666 stp x3, x4, [x1], #16
667 subs x2, x2, #16
668 b.ge 1b
669 /* Fixup the len and test for completion */
670 adds x2, x2, #16
671 b.eq 3f
672 2: /* Bytewise */
673 subs x2, x2, #1
674 ldrb w3, [x0], #1
675 strb w3, [x1], #1
676 b.hi 2b
677 3:
678 CLEAR_RECOVERY_HANDLER
679 mov x0, #0
680 POP_FRAME
681 ARM64_STACK_EPILOG
682
683 /*
684 * int _bcopyinstr(
685 * const user_addr_t user_addr,
686 * char *kernel_addr,
687 * vm_size_t max,
688 * vm_size_t *actual)
689 */
690 .text
691 .align 2
692 .globl EXT(_bcopyinstr)
693 LEXT(_bcopyinstr)
694 ARM64_STACK_PROLOG
695 PUSH_FRAME
696 SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1
697 mov x4, #0 // x4 - total bytes copied
698 Lcopyinstr_loop:
699 ldrb w5, [x0], #1 // Load a byte from the user source
700 strb w5, [x1], #1 // Store a byte to the kernel dest
701 add x4, x4, #1 // Increment bytes copied
702 cbz x5, Lcopyinstr_done // If this byte is null, we're done
703 cmp x4, x2 // If we're out of space, return an error
704 b.ne Lcopyinstr_loop
705 Lcopyinstr_too_long:
706 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
707 Lcopyinstr_done:
708 str x4, [x3] // Return number of bytes copied
709 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
710 b Lcopyinstr_exit
711 Lcopyinstr_error:
712 mov x0, #EFAULT // Return EFAULT on error
713 Lcopyinstr_exit:
714 CLEAR_RECOVERY_HANDLER
715 POP_FRAME
716 ARM64_STACK_EPILOG
717
718 /*
719 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
720 *
721 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
722 * either user or kernel memory, or 8 bytes (AArch32) from user only.
723 *
724 * x0 : address of frame to copy.
725 * x1 : kernel address at which to store data.
726 * w2 : whether to copy an AArch32 or AArch64 frame.
727 * x3 : temp
728 * x5 : temp (kernel virtual base)
729 * x9 : temp
730 * x10 : old recovery function (set by SET_RECOVERY_HANDLER)
731 * x12, x13 : backtrace data
732 * x16 : thread pointer (set by SET_RECOVERY_HANDLER)
733 *
734 */
735 .text
736 .align 2
737 .globl EXT(copyinframe)
738 LEXT(copyinframe)
739 ARM64_STACK_PROLOG
740 PUSH_FRAME
741 SET_RECOVERY_HANDLER copyio_error
742 cbnz w2, Lcopyinframe64 // Check frame size
743 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
744 add x5, x5, EXT(gVirtBase)@pageoff
745 ldr x5, [x5]
746 cmp x5, x0 // See if address is in kernel virtual range
747 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
748 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
749 b Lcopyinframe_done
750
751 Lcopyinframe32:
752 ldr x12, [x0] // Copy 8 bytes
753 str x12, [x1]
754 mov w0, #0 // Success
755 b Lcopyinframe_done
756
757 Lcopyinframe64:
758 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
759 orr x9, x0, TBI_MASK // Hide tags in address comparison
760 cmp x9, x3 // If in kernel address range, skip tag test
761 b.hs Lcopyinframe_valid
762 tst x0, TBI_MASK // Detect tagged pointers
763 b.eq Lcopyinframe_valid
764 mov w0, #EFAULT // Tagged address, fail
765 b Lcopyinframe_done
766 Lcopyinframe_valid:
767 ldp x12, x13, [x0] // Copy 16 bytes
768 stp x12, x13, [x1]
769 mov w0, #0 // Success
770
771 Lcopyinframe_done:
772 CLEAR_RECOVERY_HANDLER
773 POP_FRAME
774 ARM64_STACK_EPILOG
775
776
777 /*
778 * uint32_t arm_debug_read_dscr(void)
779 */
780 .text
781 .align 2
782 .globl EXT(arm_debug_read_dscr)
783 LEXT(arm_debug_read_dscr)
784 PANIC_UNIMPLEMENTED
785
786 /*
787 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
788 *
789 * Set debug registers to match the current thread state
790 * (NULL to disable). Assume 6 breakpoints and 2
791 * watchpoints, since that has been the case in all cores
792 * thus far.
793 */
794 .text
795 .align 2
796 .globl EXT(arm_debug_set_cp14)
797 LEXT(arm_debug_set_cp14)
798 PANIC_UNIMPLEMENTED
799
800 #if defined(APPLE_ARM64_ARCH_FAMILY)
801 /*
802 * Note: still have to ISB before executing wfi!
803 */
804 .text
805 .align 2
806 .globl EXT(arm64_prepare_for_sleep)
807 LEXT(arm64_prepare_for_sleep)
808 PUSH_FRAME
809
810 #if defined(APPLETYPHOON)
811 // <rdar://problem/15827409>
812 HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x9
813 dsb sy
814 isb sy
815 #endif
816
817 #if HAS_CLUSTER
818 cbnz x0, 1f // Skip if deep_sleep == true
819 // Mask FIQ and IRQ to avoid spurious wakeups
820 mrs x9, ARM64_REG_CYC_OVRD
821 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
822 mov x10, #(ARM64_REG_CYC_OVRD_irq_disable | ARM64_REG_CYC_OVRD_fiq_disable)
823 orr x9, x9, x10
824 msr ARM64_REG_CYC_OVRD, x9
825 isb
826 1:
827 #endif
828
829 cbz x0, 1f // Skip if deep_sleep == false
830 #if __ARM_GLOBAL_SLEEP_BIT__
831 // Enable deep sleep
832 mrs x1, ARM64_REG_ACC_OVRD
833 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
834 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
835 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
836 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
837 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
838 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
839 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
840 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
841 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
842 #if HAS_RETENTION_STATE
843 orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu)
844 #endif
845 msr ARM64_REG_ACC_OVRD, x1
846
847
848 #else
849 // Enable deep sleep
850 mov x1, ARM64_REG_CYC_CFG_deepSleep
851 msr ARM64_REG_CYC_CFG, x1
852 #endif
853
854 1:
855 // Set "OK to power down" (<rdar://problem/12390433>)
856 mrs x9, ARM64_REG_CYC_OVRD
857 orr x9, x9, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
858 #if HAS_RETENTION_STATE
859 orr x9, x9, #(ARM64_REG_CYC_OVRD_disWfiRetn)
860 #endif
861 msr ARM64_REG_CYC_OVRD, x9
862
863 #if defined(APPLEMONSOON) || defined(APPLEVORTEX)
864 ARM64_IS_PCORE x9
865 cbz x9, Lwfi_inst // skip if not p-core
866
867 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
868 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
869 * to be left with valid entries that fail to drain if a
870 * subsequent wfi is issued. This can prevent the core from
871 * power-gating. For the idle case that is recoverable, but
872 * for the deep-sleep (S2R) case in which cores MUST power-gate,
873 * it can lead to a hang. This can be prevented by disabling
874 * and re-enabling GUPS, which forces the prefetch queue to
875 * drain. This should be done as close to wfi as possible, i.e.
876 * at the very end of arm64_prepare_for_sleep(). */
877 #if defined(APPLEVORTEX)
878 /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar
879 * bug in the HSP prefetcher that can be worked around through
880 * the same method mentioned above for Skye. */
881 mrs x9, MIDR_EL1
882 EXEC_COREALL_REVLO CPU_VERSION_B0, x9, x10
883 #endif
884 mrs x9, ARM64_REG_HID10
885 orr x9, x9, #(ARM64_REG_HID10_DisHwpGups)
886 msr ARM64_REG_HID10, x9
887 isb sy
888 and x9, x9, #(~(ARM64_REG_HID10_DisHwpGups))
889 msr ARM64_REG_HID10, x9
890 isb sy
891 #endif
892 EXEC_END
893
894 Lwfi_inst:
895 dsb sy
896 isb sy
897 wfi
898 b Lwfi_inst
899
900 /*
901 * Force WFI to use clock gating only
902 *
903 */
904 .text
905 .align 2
906 .globl EXT(arm64_force_wfi_clock_gate)
907 LEXT(arm64_force_wfi_clock_gate)
908 ARM64_STACK_PROLOG
909 PUSH_FRAME
910
911 mrs x0, ARM64_REG_CYC_OVRD
912 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
913 msr ARM64_REG_CYC_OVRD, x0
914
915 POP_FRAME
916 ARM64_STACK_EPILOG
917
918
919 #if HAS_RETENTION_STATE
920 .text
921 .align 2
922 .globl EXT(arm64_retention_wfi)
923 LEXT(arm64_retention_wfi)
924 wfi
925 cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc
926 ret // Otherwise just return to cpu_idle()
927 Lwfi_retention:
928 mov x0, #1
929 bl EXT(ClearIdlePop)
930 mov x0, #0
931 bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE)
932 b . // cpu_idle_exit() should never return
933 #endif
934
935 #if defined(APPLETYPHOON)
936
937 .text
938 .align 2
939 .globl EXT(typhoon_prepare_for_wfi)
940
941 LEXT(typhoon_prepare_for_wfi)
942 PUSH_FRAME
943
944 // <rdar://problem/15827409>
945 HID_SET_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
946 dsb sy
947 isb sy
948
949 POP_FRAME
950 ret
951
952
953 .text
954 .align 2
955 .globl EXT(typhoon_return_from_wfi)
956 LEXT(typhoon_return_from_wfi)
957 PUSH_FRAME
958
959 // <rdar://problem/15827409>
960 HID_CLEAR_BITS ARM64_REG_HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
961 dsb sy
962 isb sy
963
964 POP_FRAME
965 ret
966 #endif
967
968 #ifdef APPLETYPHOON
969
970 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
971 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
972 #define HID2_DEFEATURES_1 0x0000000000102074ULL
973 #define HID3_DEFEATURES_1 0x0000000000400003ULL
974 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
975 #define HID7_DEFEATURES_1 0x000000000000000eULL
976
977 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
978 #define HID1_DEFEATURES_2 0x000000000005d720ULL
979 #define HID2_DEFEATURES_2 0x0000000000002074ULL
980 #define HID3_DEFEATURES_2 0x0000000000400001ULL
981 #define HID4_DEFEATURES_2 0x8390000200000208ULL
982 #define HID7_DEFEATURES_2 0x0000000000000000ULL
983
984 /*
985 arg0 = target register
986 arg1 = 64-bit constant
987 */
988 .macro LOAD_UINT64
989 movz $0, #(($1 >> 48) & 0xffff), lsl #48
990 movk $0, #(($1 >> 32) & 0xffff), lsl #32
991 movk $0, #(($1 >> 16) & 0xffff), lsl #16
992 movk $0, #(($1) & 0xffff)
993 .endmacro
994
995 .text
996 .align 2
997 .globl EXT(cpu_defeatures_set)
998 LEXT(cpu_defeatures_set)
999 PUSH_FRAME
1000 cmp x0, #2
1001 b.eq cpu_defeatures_set_2
1002 cmp x0, #1
1003 b.ne cpu_defeatures_set_ret
1004 LOAD_UINT64 x1, HID0_DEFEATURES_1
1005 mrs x0, ARM64_REG_HID0
1006 orr x0, x0, x1
1007 msr ARM64_REG_HID0, x0
1008 LOAD_UINT64 x1, HID1_DEFEATURES_1
1009 mrs x0, ARM64_REG_HID1
1010 orr x0, x0, x1
1011 msr ARM64_REG_HID1, x0
1012 LOAD_UINT64 x1, HID2_DEFEATURES_1
1013 mrs x0, ARM64_REG_HID2
1014 orr x0, x0, x1
1015 msr ARM64_REG_HID2, x0
1016 LOAD_UINT64 x1, HID3_DEFEATURES_1
1017 mrs x0, ARM64_REG_HID3
1018 orr x0, x0, x1
1019 msr ARM64_REG_HID3, x0
1020 LOAD_UINT64 x1, HID4_DEFEATURES_1
1021 mrs x0, ARM64_REG_HID4
1022 orr x0, x0, x1
1023 msr ARM64_REG_HID4, x0
1024 LOAD_UINT64 x1, HID7_DEFEATURES_1
1025 mrs x0, ARM64_REG_HID7
1026 orr x0, x0, x1
1027 msr ARM64_REG_HID7, x0
1028 dsb sy
1029 isb sy
1030 b cpu_defeatures_set_ret
1031 cpu_defeatures_set_2:
1032 LOAD_UINT64 x1, HID0_DEFEATURES_2
1033 mrs x0, ARM64_REG_HID0
1034 orr x0, x0, x1
1035 msr ARM64_REG_HID0, x0
1036 LOAD_UINT64 x1, HID1_DEFEATURES_2
1037 mrs x0, ARM64_REG_HID1
1038 orr x0, x0, x1
1039 msr ARM64_REG_HID1, x0
1040 LOAD_UINT64 x1, HID2_DEFEATURES_2
1041 mrs x0, ARM64_REG_HID2
1042 orr x0, x0, x1
1043 msr ARM64_REG_HID2, x0
1044 LOAD_UINT64 x1, HID3_DEFEATURES_2
1045 mrs x0, ARM64_REG_HID3
1046 orr x0, x0, x1
1047 msr ARM64_REG_HID3, x0
1048 LOAD_UINT64 x1, HID4_DEFEATURES_2
1049 mrs x0, ARM64_REG_HID4
1050 orr x0, x0, x1
1051 msr ARM64_REG_HID4, x0
1052 LOAD_UINT64 x1, HID7_DEFEATURES_2
1053 mrs x0, ARM64_REG_HID7
1054 orr x0, x0, x1
1055 msr ARM64_REG_HID7, x0
1056 dsb sy
1057 isb sy
1058 b cpu_defeatures_set_ret
1059 cpu_defeatures_set_ret:
1060 POP_FRAME
1061 ret
1062 #endif
1063
1064 #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
1065 .text
1066 .align 2
1067 .globl EXT(arm64_prepare_for_sleep)
1068 LEXT(arm64_prepare_for_sleep)
1069 PUSH_FRAME
1070 Lwfi_inst:
1071 dsb sy
1072 isb sy
1073 wfi
1074 b Lwfi_inst
1075
1076 /*
1077 * Force WFI to use clock gating only
1078 * Note: for non-Apple device, do nothing.
1079 */
1080 .text
1081 .align 2
1082 .globl EXT(arm64_force_wfi_clock_gate)
1083 LEXT(arm64_force_wfi_clock_gate)
1084 PUSH_FRAME
1085 nop
1086 POP_FRAME
1087
1088 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
1089
1090 /*
1091 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
1092 *
1093 * This must be called from a kernel thread context running on the boot CPU,
1094 * after setting up new exception stacks in per-CPU data. That will guarantee
1095 * that the stack(s) we're trying to replace aren't currently in use. For
1096 * KTRR-protected devices, this must also be called prior to VM prot finalization
1097 * and lockdown, as updating SP1 requires a sensitive instruction.
1098 */
1099 .text
1100 .align 2
1101 .globl EXT(arm64_replace_bootstack)
1102 LEXT(arm64_replace_bootstack)
1103 ARM64_STACK_PROLOG
1104 PUSH_FRAME
1105 // Set the exception stack pointer
1106 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
1107 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
1108 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
1109 // Set SP_EL1 to exception stack
1110 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1111 mov x1, lr
1112 bl EXT(pinst_spsel_1)
1113 mov lr, x1
1114 #else
1115 msr SPSel, #1
1116 #endif
1117 mov sp, x0
1118 msr SPSel, #0
1119 msr DAIF, x4 // Restore interrupt state
1120 POP_FRAME
1121 ARM64_STACK_EPILOG
1122
1123 #ifdef MONITOR
1124 /*
1125 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1126 uintptr_t arg2, uintptr_t arg3)
1127 *
1128 * Call the EL3 monitor with 4 arguments in registers
1129 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1130 * registers are preserved, temporary registers are not. Parameters and results are passed in
1131 * the usual manner.
1132 */
1133 .text
1134 .align 2
1135 .globl EXT(monitor_call)
1136 LEXT(monitor_call)
1137 smc 0x11
1138 ret
1139 #endif
1140
1141 #ifdef HAS_APPLE_PAC
1142 /*
1143 * SIGN_THREAD_STATE
1144 *
1145 * Macro that signs thread state.
1146 * $0 - Offset in arm_saved_state to store JOPHASH value.
1147 */
1148 .macro SIGN_THREAD_STATE
1149 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1150 /*
1151 * Mask off the carry flag so we don't need to re-sign when that flag is
1152 * touched by the system call return path.
1153 */
1154 bic x2, x2, PSR_CF
1155 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1156 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1157 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1158 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1159 str x1, [x0, $0]
1160 #if DEBUG || DEVELOPMENT
1161 mrs x1, DAIF
1162 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1163 #endif /* DEBUG || DEVELOPMENT */
1164 .endmacro
1165
1166 /*
1167 * CHECK_SIGNED_STATE
1168 *
1169 * Macro that checks signed thread state.
1170 * $0 - Offset in arm_saved_state to to read the JOPHASH value from.
1171 * $1 - Label to jump to when check is unsuccessful.
1172 */
1173 .macro CHECK_SIGNED_STATE
1174 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1175 /*
1176 * Mask off the carry flag so we don't need to re-sign when that flag is
1177 * touched by the system call return path.
1178 */
1179 bic x2, x2, PSR_CF
1180 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1181 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1182 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1183 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1184 ldr x2, [x0, $0]
1185 cmp x1, x2
1186 b.ne $1
1187 #if DEBUG || DEVELOPMENT
1188 mrs x1, DAIF
1189 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1190 #endif /* DEBUG || DEVELOPMENT */
1191 .endmacro
1192
1193 /**
1194 * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc,
1195 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1196 * uint64_t x17)
1197 */
1198 .text
1199 .align 2
1200 .globl EXT(ml_sign_thread_state)
1201 LEXT(ml_sign_thread_state)
1202 SIGN_THREAD_STATE SS64_JOPHASH
1203 ret
1204
1205 /**
1206 * void ml_sign_kernel_thread_state(arm_kernel_saved_state *ss, uint64_t pc,
1207 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1208 * uint64_t x17)
1209 */
1210 .text
1211 .align 2
1212 .globl EXT(ml_sign_kernel_thread_state)
1213 LEXT(ml_sign_kernel_thread_state)
1214 SIGN_THREAD_STATE SS64_KERNEL_JOPHASH
1215 ret
1216
1217 /**
1218 * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc,
1219 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1220 * uint64_t x17)
1221 */
1222 .text
1223 .align 2
1224 .globl EXT(ml_check_signed_state)
1225 LEXT(ml_check_signed_state)
1226 CHECK_SIGNED_STATE SS64_JOPHASH, Lcheck_hash_panic
1227 ret
1228 Lcheck_hash_panic:
1229 /*
1230 * ml_check_signed_state normally doesn't set up a stack frame, since it
1231 * needs to work in the face of attackers that can modify the stack.
1232 * However we lazily create one in the panic path: at this point we're
1233 * *only* using the stack frame for unwinding purposes, and without one
1234 * we'd be missing information about the caller.
1235 */
1236 ARM64_STACK_PROLOG
1237 PUSH_FRAME
1238 mov x1, x0
1239 adr x0, Lcheck_hash_str
1240 CALL_EXTERN panic_with_thread_kernel_state
1241
1242 /**
1243 * void ml_check_kernel_signed_state(arm_kernel_saved_state *ss, uint64_t pc,
1244 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1245 * uint64_t x17)
1246 */
1247 .text
1248 .align 2
1249 .globl EXT(ml_check_kernel_signed_state)
1250 LEXT(ml_check_kernel_signed_state)
1251 CHECK_SIGNED_STATE SS64_KERNEL_JOPHASH, Lcheck_kernel_hash_panic
1252 ret
1253 Lcheck_kernel_hash_panic:
1254 ARM64_STACK_PROLOG
1255 PUSH_FRAME
1256 adr x0, Lcheck_hash_str
1257 CALL_EXTERN panic
1258
1259 Lcheck_hash_str:
1260 .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
1261
1262 #if DEBUG || DEVELOPMENT
1263 Lintr_enabled_panic:
1264 ARM64_STACK_PROLOG
1265 PUSH_FRAME
1266 adr x0, Lintr_enabled_str
1267 CALL_EXTERN panic
1268 Lintr_enabled_str:
1269 /*
1270 * Please see the "Signing spilled register state" section of doc/pac.md
1271 * for an explanation of why this is bad and how it should be fixed.
1272 */
1273 .asciz "Signed thread state manipulated with interrupts enabled"
1274 #endif /* DEBUG || DEVELOPMENT */
1275
1276 /**
1277 * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss)
1278 *
1279 * Panics due to an invalid CPSR value in ss.
1280 */
1281 .text
1282 .align 2
1283 .globl EXT(ml_auth_thread_state_invalid_cpsr)
1284 LEXT(ml_auth_thread_state_invalid_cpsr)
1285 ARM64_STACK_PROLOG
1286 PUSH_FRAME
1287 mov x1, x0
1288 adr x0, Linvalid_cpsr_str
1289 CALL_EXTERN panic_with_thread_kernel_state
1290
1291 Linvalid_cpsr_str:
1292 .asciz "Thread state corruption detected (PE mode == 0)"
1293 #endif /* HAS_APPLE_PAC */
1294
1295 .text
1296 .align 2
1297 .globl EXT(fill32_dczva)
1298 LEXT(fill32_dczva)
1299 0:
1300 dc zva, x0
1301 add x0, x0, #64
1302 subs x1, x1, #64
1303 b.hi 0b
1304 ret
1305
1306 .text
1307 .align 2
1308 .globl EXT(fill32_nt)
1309 LEXT(fill32_nt)
1310 dup.4s v0, w2
1311 0:
1312 stnp q0, q0, [x0]
1313 stnp q0, q0, [x0, #0x20]
1314 stnp q0, q0, [x0, #0x40]
1315 stnp q0, q0, [x0, #0x60]
1316 add x0, x0, #128
1317 subs x1, x1, #128
1318 b.hi 0b
1319 ret
1320
1321 /* vim: set sw=4 ts=4: */