]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/machine_routines_asm.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / machine_routines_asm.s
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/exception_asm.h>
31 #include <arm64/machine_machdep.h>
32 #include <arm64/proc_reg.h>
33 #include <arm/pmap.h>
34 #include <pexpert/arm64/board_config.h>
35 #include <sys/errno.h>
36 #include "assym.s"
37
38
39 #if defined(HAS_APPLE_PAC)
40
41
42 .macro LOAD_CPU_JOP_KEY dst, tmp
43 mrs \tmp, TPIDR_EL1
44 ldr \tmp, [\tmp, ACT_CPUDATAP]
45 ldr \dst, [\tmp, CPU_JOP_KEY]
46 .endmacro
47
48 /*
49 * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key)
50 */
51 .align 2
52 .globl EXT(ml_enable_user_jop_key)
53 LEXT(ml_enable_user_jop_key)
54
55 /*
56 * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state)
57 */
58 .align 2
59 .globl EXT(ml_disable_user_jop_key)
60 LEXT(ml_disable_user_jop_key)
61
62 #endif /* defined(HAS_APPLE_PAC) */
63
64 #if HAS_BP_RET
65
66 /*
67 * void set_bp_ret(void)
68 * Helper function to enable branch predictor state retention
69 * across ACC sleep
70 */
71
72 .align 2
73 .globl EXT(set_bp_ret)
74 LEXT(set_bp_ret)
75 // Load bpret boot-arg
76 adrp x14, EXT(bp_ret)@page
77 add x14, x14, EXT(bp_ret)@pageoff
78 ldr w14, [x14]
79
80 mrs x13, CPU_CFG
81 and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift))
82 and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask)
83 orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift)
84 msr CPU_CFG, x13
85
86 ret
87 #endif // HAS_BP_RET
88
89 #if HAS_NEX_PG
90 .align 2
91 .globl EXT(set_nex_pg)
92 LEXT(set_nex_pg)
93 mrs x14, MPIDR_EL1
94 // Skip if this isn't a p-core; NEX powergating isn't available for e-cores
95 and x14, x14, #(MPIDR_PNE)
96 cbz x14, Lnex_pg_done
97
98 // Set the SEG-recommended value of 12 additional reset cycles
99 HID_INSERT_BITS HID13, ARM64_REG_HID13_RstCyc_mask, ARM64_REG_HID13_RstCyc_val, x13
100 HID_SET_BITS HID14, ARM64_REG_HID14_NexPwgEn, x13
101
102 Lnex_pg_done:
103 ret
104
105 #endif // HAS_NEX_PG
106
107 /* uint32_t get_fpscr(void):
108 * Returns (FPSR | FPCR).
109 */
110 .align 2
111 .globl EXT(get_fpscr)
112 LEXT(get_fpscr)
113 #if __ARM_VFP__
114 mrs x1, FPSR // Grab FPSR
115 mov x4, #(FPSR_MASK & 0xFFFF)
116 mov x5, #(FPSR_MASK & 0xFFFF0000)
117 orr x0, x4, x5
118 and x1, x1, x0 // Be paranoid, and clear bits we expect to
119 // be clear
120 mrs x2, FPCR // Grab FPCR
121 mov x4, #(FPCR_MASK & 0xFFFF)
122 mov x5, #(FPCR_MASK & 0xFFFF0000)
123 orr x0, x4, x5
124 and x2, x2, x0 // Be paranoid, and clear bits we expect to
125 // be clear
126 orr x0, x1, x2 // OR them to get FPSCR equivalent state
127 #else
128 mov x0, #0
129 #endif
130 ret
131 .align 2
132 .globl EXT(set_fpscr)
133 /* void set_fpscr(uint32_t value):
134 * Set the FPCR and FPSR registers, based on the given value; a
135 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
136 * and FPCR are not responsible for condition codes.
137 */
138 LEXT(set_fpscr)
139 #if __ARM_VFP__
140 mov x4, #(FPSR_MASK & 0xFFFF)
141 mov x5, #(FPSR_MASK & 0xFFFF0000)
142 orr x1, x4, x5
143 and x1, x1, x0 // Clear the bits that don't apply to FPSR
144 mov x4, #(FPCR_MASK & 0xFFFF)
145 mov x5, #(FPCR_MASK & 0xFFFF0000)
146 orr x2, x4, x5
147 and x2, x2, x0 // Clear the bits that don't apply to FPCR
148 msr FPSR, x1 // Write FPCR
149 msr FPCR, x2 // Write FPSR
150 dsb ish // FPCR requires synchronization
151 #endif
152 ret
153
154 /*
155 * void update_mdscr(unsigned long clear, unsigned long set)
156 * Clears and sets the specified bits in MDSCR_EL1.
157 *
158 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
159 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
160 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
161 * so we need to put the checks after the MRS where they can't be skipped. That
162 * still leaves a small window if a breakpoint is set on the instruction
163 * immediately after the MRS. To handle that, we also do a check and then set of
164 * the breakpoint control registers. This allows us to guarantee that a given
165 * core will never have both KDE set and a breakpoint targeting EL1.
166 *
167 * If KDE gets set, unset it and then panic
168 */
169 .align 2
170 .globl EXT(update_mdscr)
171 LEXT(update_mdscr)
172 mov x4, #0
173 mrs x2, MDSCR_EL1
174 bic x2, x2, x0
175 orr x2, x2, x1
176 1:
177 bic x2, x2, #0x2000
178 msr MDSCR_EL1, x2
179 #if defined(CONFIG_KERNEL_INTEGRITY)
180 /*
181 * verify KDE didn't get set (including via ROP)
182 * If set, clear it and then panic
183 */
184 ands x3, x2, #0x2000
185 orr x4, x4, x3
186 bne 1b
187 cmp x4, xzr
188 b.ne Lupdate_mdscr_panic
189 #endif
190 ret
191
192 Lupdate_mdscr_panic:
193 adrp x0, Lupdate_mdscr_panic_str@page
194 add x0, x0, Lupdate_mdscr_panic_str@pageoff
195 b EXT(panic)
196 b .
197
198 Lupdate_mdscr_panic_str:
199 .asciz "MDSCR.KDE was set"
200
201
202 /*
203 * Set MMU Translation Table Base Alternate
204 */
205 .text
206 .align 2
207 .globl EXT(set_mmu_ttb_alternate)
208 LEXT(set_mmu_ttb_alternate)
209 dsb sy
210 #if defined(KERNEL_INTEGRITY_KTRR)
211 mov x1, lr
212 bl EXT(pinst_set_ttbr1)
213 mov lr, x1
214 #else
215 #if defined(HAS_VMSA_LOCK)
216 #if DEBUG || DEVELOPMENT
217 mrs x1, VMSA_LOCK_EL1
218 and x1, x1, #(VMSA_LOCK_TTBR1_EL1)
219 cbnz x1, L_set_locked_reg_panic
220 #endif /* DEBUG || DEVELOPMENT */
221 #endif /* defined(HAS_VMSA_LOCK) */
222 msr TTBR1_EL1, x0
223 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
224 isb sy
225 ret
226
227 #if XNU_MONITOR
228 .section __PPLTEXT,__text,regular,pure_instructions
229 #else
230 .text
231 #endif
232 .align 2
233 .globl EXT(set_mmu_ttb)
234 LEXT(set_mmu_ttb)
235 #if __ARM_KERNEL_PROTECT__
236 /* All EL1-mode ASIDs are odd. */
237 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
238 #endif /* __ARM_KERNEL_PROTECT__ */
239 dsb ish
240 msr TTBR0_EL1, x0
241 isb sy
242 ret
243
244
245 #if XNU_MONITOR
246 .text
247 .align 2
248 .globl EXT(ml_get_ppl_cpu_data)
249 LEXT(ml_get_ppl_cpu_data)
250 LOAD_PMAP_CPU_DATA x0, x1, x2
251 ret
252 #endif
253
254 /*
255 * set AUX control register
256 */
257 .text
258 .align 2
259 .globl EXT(set_aux_control)
260 LEXT(set_aux_control)
261 msr ACTLR_EL1, x0
262 // Synchronize system
263 isb sy
264 ret
265
266 #if __ARM_KERNEL_PROTECT__
267 .text
268 .align 2
269 .globl EXT(set_vbar_el1)
270 LEXT(set_vbar_el1)
271 #if defined(KERNEL_INTEGRITY_KTRR)
272 b EXT(pinst_set_vbar)
273 #else
274 msr VBAR_EL1, x0
275 ret
276 #endif
277 #endif /* __ARM_KERNEL_PROTECT__ */
278
279 #if defined(HAS_VMSA_LOCK)
280 .text
281 .align 2
282 .globl EXT(vmsa_lock)
283 LEXT(vmsa_lock)
284 isb sy
285 mov x1, #(VMSA_LOCK_SCTLR_M_BIT)
286 #if __ARM_MIXED_PAGE_SIZE__
287 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_VBAR_EL1)
288 #else
289 mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1)
290 #endif
291 orr x0, x0, x1
292 msr VMSA_LOCK_EL1, x0
293 isb sy
294 ret
295 #endif /* defined(HAS_VMSA_LOCK) */
296
297 /*
298 * set translation control register
299 */
300 .text
301 .align 2
302 .globl EXT(set_tcr)
303 LEXT(set_tcr)
304 #if defined(APPLE_ARM64_ARCH_FAMILY)
305 #if DEBUG || DEVELOPMENT
306 // Assert that T0Z is always equal to T1Z
307 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
308 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
309 cbnz x1, L_set_tcr_panic
310 #endif /* DEBUG || DEVELOPMENT */
311 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
312 #if defined(KERNEL_INTEGRITY_KTRR)
313 mov x1, lr
314 bl EXT(pinst_set_tcr)
315 mov lr, x1
316 #else
317 #if defined(HAS_VMSA_LOCK)
318 #if DEBUG || DEVELOPMENT
319 // assert TCR unlocked
320 mrs x1, VMSA_LOCK_EL1
321 and x1, x1, #(VMSA_LOCK_TCR_EL1)
322 cbnz x1, L_set_locked_reg_panic
323 #endif /* DEBUG || DEVELOPMENT */
324 #endif /* defined(HAS_VMSA_LOCK) */
325 msr TCR_EL1, x0
326 #endif /* defined(KERNEL_INTRITY_KTRR) */
327 isb sy
328 ret
329
330 #if DEBUG || DEVELOPMENT
331 L_set_tcr_panic:
332 PUSH_FRAME
333 sub sp, sp, #16
334 str x0, [sp]
335 adr x0, L_set_tcr_panic_str
336 BRANCH_EXTERN panic
337
338 L_set_locked_reg_panic:
339 PUSH_FRAME
340 sub sp, sp, #16
341 str x0, [sp]
342 adr x0, L_set_locked_reg_panic_str
343 BRANCH_EXTERN panic
344 b .
345
346 L_set_tcr_panic_str:
347 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
348
349
350 L_set_locked_reg_panic_str:
351 .asciz "attempt to set locked register: (%llx)\n"
352 #endif /* DEBUG || DEVELOPMENT */
353
354 /*
355 * MMU kernel virtual to physical address translation
356 */
357 .text
358 .align 2
359 .globl EXT(mmu_kvtop)
360 LEXT(mmu_kvtop)
361 mrs x2, DAIF // Load current DAIF
362 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
363 at s1e1r, x0 // Translation Stage 1 EL1
364 isb sy
365 mrs x1, PAR_EL1 // Read result
366 msr DAIF, x2 // Restore interrupt state
367 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
368 bfm x1, x0, #0, #11 // Add page offset
369 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
370 ret
371 L_mmu_kvtop_invalid:
372 mov x0, #0 // Return invalid
373 ret
374
375 /*
376 * MMU user virtual to physical address translation
377 */
378 .text
379 .align 2
380 .globl EXT(mmu_uvtop)
381 LEXT(mmu_uvtop)
382 lsr x8, x0, #56 // Extract top byte
383 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
384 mrs x2, DAIF // Load current DAIF
385 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
386 at s1e0r, x0 // Translation Stage 1 EL0
387 isb sy
388 mrs x1, PAR_EL1 // Read result
389 msr DAIF, x2 // Restore interrupt state
390 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
391 bfm x1, x0, #0, #11 // Add page offset
392 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
393 ret
394 L_mmu_uvtop_invalid:
395 mov x0, #0 // Return invalid
396 ret
397
398 /*
399 * MMU kernel virtual to physical address preflight write access
400 */
401 .text
402 .align 2
403 .globl EXT(mmu_kvtop_wpreflight)
404 LEXT(mmu_kvtop_wpreflight)
405 mrs x2, DAIF // Load current DAIF
406 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
407 at s1e1w, x0 // Translation Stage 1 EL1
408 mrs x1, PAR_EL1 // Read result
409 msr DAIF, x2 // Restore interrupt state
410 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
411 bfm x1, x0, #0, #11 // Add page offset
412 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
413 ret
414 L_mmu_kvtop_wpreflight_invalid:
415 mov x0, #0 // Return invalid
416 ret
417
418 /*
419 * SET_RECOVERY_HANDLER
420 *
421 * Sets up a page fault recovery handler. This macro clobbers x16 and x17.
422 *
423 * label - recovery label
424 * tpidr - persisted thread pointer
425 * old_handler - persisted recovery handler
426 * label_in_adr_range - whether \label is within 1 MB of PC
427 */
428 .macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0
429 // Note: x16 and x17 are designated for use as temporaries in
430 // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS.
431 .if \label_in_adr_range==1 // Load the recovery handler address
432 adr x17, \label
433 .else
434 adrp x17, \label@page
435 add x17, x17, \label@pageoff
436 .endif
437 #if defined(HAS_APPLE_PAC)
438 mrs x16, TPIDR_EL1
439 add x16, x16, TH_RECOVER
440 movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48
441 pacia x17, x16 // Sign with IAKey + blended discriminator
442 #endif
443
444 mrs \tpidr, TPIDR_EL1 // Load thread pointer
445 ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler
446 str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler
447 .endmacro
448
449 /*
450 * CLEAR_RECOVERY_HANDLER
451 *
452 * Clears page fault handler set by SET_RECOVERY_HANDLER
453 *
454 * tpidr - thread pointer saved by SET_RECOVERY_HANDLER
455 * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER
456 */
457 .macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10
458 str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler
459 .endmacro
460
461
462 .text
463 .align 2
464 copyio_error:
465 CLEAR_RECOVERY_HANDLER
466 mov x0, #EFAULT // Return an EFAULT error
467 POP_FRAME
468 ARM64_STACK_EPILOG
469
470 /*
471 * int _bcopyin(const char *src, char *dst, vm_size_t len)
472 */
473 .text
474 .align 2
475 .globl EXT(_bcopyin)
476 LEXT(_bcopyin)
477 ARM64_STACK_PROLOG
478 PUSH_FRAME
479 SET_RECOVERY_HANDLER copyio_error
480 /* If len is less than 16 bytes, just do a bytewise copy */
481 cmp x2, #16
482 b.lt 2f
483 sub x2, x2, #16
484 1:
485 /* 16 bytes at a time */
486 ldp x3, x4, [x0], #16
487 stp x3, x4, [x1], #16
488 subs x2, x2, #16
489 b.ge 1b
490 /* Fixup the len and test for completion */
491 adds x2, x2, #16
492 b.eq 3f
493 2: /* Bytewise */
494 subs x2, x2, #1
495 ldrb w3, [x0], #1
496 strb w3, [x1], #1
497 b.hi 2b
498 3:
499 CLEAR_RECOVERY_HANDLER
500 mov x0, #0
501 POP_FRAME
502 ARM64_STACK_EPILOG
503
504 /*
505 * int _copyin_atomic32(const char *src, uint32_t *dst)
506 */
507 .text
508 .align 2
509 .globl EXT(_copyin_atomic32)
510 LEXT(_copyin_atomic32)
511 ARM64_STACK_PROLOG
512 PUSH_FRAME
513 SET_RECOVERY_HANDLER copyio_error
514 ldr w8, [x0]
515 str w8, [x1]
516 mov x0, #0
517 CLEAR_RECOVERY_HANDLER
518 POP_FRAME
519 ARM64_STACK_EPILOG
520
521 /*
522 * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value)
523 */
524 .text
525 .align 2
526 .globl EXT(_copyin_atomic32_wait_if_equals)
527 LEXT(_copyin_atomic32_wait_if_equals)
528 ARM64_STACK_PROLOG
529 PUSH_FRAME
530 SET_RECOVERY_HANDLER copyio_error
531 ldxr w8, [x0]
532 cmp w8, w1
533 mov x0, ESTALE
534 b.ne 1f
535 mov x0, #0
536 wfe
537 1:
538 clrex
539 CLEAR_RECOVERY_HANDLER
540 POP_FRAME
541 ARM64_STACK_EPILOG
542
543 /*
544 * int _copyin_atomic64(const char *src, uint32_t *dst)
545 */
546 .text
547 .align 2
548 .globl EXT(_copyin_atomic64)
549 LEXT(_copyin_atomic64)
550 ARM64_STACK_PROLOG
551 PUSH_FRAME
552 SET_RECOVERY_HANDLER copyio_error
553 ldr x8, [x0]
554 str x8, [x1]
555 mov x0, #0
556 CLEAR_RECOVERY_HANDLER
557 POP_FRAME
558 ARM64_STACK_EPILOG
559
560
561 /*
562 * int _copyout_atomic32(uint32_t value, char *dst)
563 */
564 .text
565 .align 2
566 .globl EXT(_copyout_atomic32)
567 LEXT(_copyout_atomic32)
568 ARM64_STACK_PROLOG
569 PUSH_FRAME
570 SET_RECOVERY_HANDLER copyio_error
571 str w0, [x1]
572 mov x0, #0
573 CLEAR_RECOVERY_HANDLER
574 POP_FRAME
575 ARM64_STACK_EPILOG
576
577 /*
578 * int _copyout_atomic64(uint64_t value, char *dst)
579 */
580 .text
581 .align 2
582 .globl EXT(_copyout_atomic64)
583 LEXT(_copyout_atomic64)
584 ARM64_STACK_PROLOG
585 PUSH_FRAME
586 SET_RECOVERY_HANDLER copyio_error
587 str x0, [x1]
588 mov x0, #0
589 CLEAR_RECOVERY_HANDLER
590 POP_FRAME
591 ARM64_STACK_EPILOG
592
593
594 /*
595 * int _bcopyout(const char *src, char *dst, vm_size_t len)
596 */
597 .text
598 .align 2
599 .globl EXT(_bcopyout)
600 LEXT(_bcopyout)
601 ARM64_STACK_PROLOG
602 PUSH_FRAME
603 SET_RECOVERY_HANDLER copyio_error
604 /* If len is less than 16 bytes, just do a bytewise copy */
605 cmp x2, #16
606 b.lt 2f
607 sub x2, x2, #16
608 1:
609 /* 16 bytes at a time */
610 ldp x3, x4, [x0], #16
611 stp x3, x4, [x1], #16
612 subs x2, x2, #16
613 b.ge 1b
614 /* Fixup the len and test for completion */
615 adds x2, x2, #16
616 b.eq 3f
617 2: /* Bytewise */
618 subs x2, x2, #1
619 ldrb w3, [x0], #1
620 strb w3, [x1], #1
621 b.hi 2b
622 3:
623 CLEAR_RECOVERY_HANDLER
624 mov x0, #0
625 POP_FRAME
626 ARM64_STACK_EPILOG
627
628 /*
629 * int _bcopyinstr(
630 * const user_addr_t user_addr,
631 * char *kernel_addr,
632 * vm_size_t max,
633 * vm_size_t *actual)
634 */
635 .text
636 .align 2
637 .globl EXT(_bcopyinstr)
638 LEXT(_bcopyinstr)
639 ARM64_STACK_PROLOG
640 PUSH_FRAME
641 SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1
642 mov x4, #0 // x4 - total bytes copied
643 Lcopyinstr_loop:
644 ldrb w5, [x0], #1 // Load a byte from the user source
645 strb w5, [x1], #1 // Store a byte to the kernel dest
646 add x4, x4, #1 // Increment bytes copied
647 cbz x5, Lcopyinstr_done // If this byte is null, we're done
648 cmp x4, x2 // If we're out of space, return an error
649 b.ne Lcopyinstr_loop
650 Lcopyinstr_too_long:
651 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
652 Lcopyinstr_done:
653 str x4, [x3] // Return number of bytes copied
654 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
655 b Lcopyinstr_exit
656 Lcopyinstr_error:
657 mov x0, #EFAULT // Return EFAULT on error
658 Lcopyinstr_exit:
659 CLEAR_RECOVERY_HANDLER
660 POP_FRAME
661 ARM64_STACK_EPILOG
662
663 /*
664 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
665 *
666 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
667 * either user or kernel memory, or 8 bytes (AArch32) from user only.
668 *
669 * x0 : address of frame to copy.
670 * x1 : kernel address at which to store data.
671 * w2 : whether to copy an AArch32 or AArch64 frame.
672 * x3 : temp
673 * x5 : temp (kernel virtual base)
674 * x9 : temp
675 * x10 : old recovery function (set by SET_RECOVERY_HANDLER)
676 * x12, x13 : backtrace data
677 * x16 : thread pointer (set by SET_RECOVERY_HANDLER)
678 *
679 */
680 .text
681 .align 2
682 .globl EXT(copyinframe)
683 LEXT(copyinframe)
684 ARM64_STACK_PROLOG
685 PUSH_FRAME
686 SET_RECOVERY_HANDLER copyio_error
687 cbnz w2, Lcopyinframe64 // Check frame size
688 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
689 add x5, x5, EXT(gVirtBase)@pageoff
690 ldr x5, [x5]
691 cmp x5, x0 // See if address is in kernel virtual range
692 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
693 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
694 b Lcopyinframe_done
695
696 Lcopyinframe32:
697 ldr x12, [x0] // Copy 8 bytes
698 str x12, [x1]
699 mov w0, #0 // Success
700 b Lcopyinframe_done
701
702 Lcopyinframe64:
703 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
704 orr x9, x0, TBI_MASK // Hide tags in address comparison
705 cmp x9, x3 // If in kernel address range, skip tag test
706 b.hs Lcopyinframe_valid
707 tst x0, TBI_MASK // Detect tagged pointers
708 b.eq Lcopyinframe_valid
709 mov w0, #EFAULT // Tagged address, fail
710 b Lcopyinframe_done
711 Lcopyinframe_valid:
712 ldp x12, x13, [x0] // Copy 16 bytes
713 stp x12, x13, [x1]
714 mov w0, #0 // Success
715
716 Lcopyinframe_done:
717 CLEAR_RECOVERY_HANDLER
718 POP_FRAME
719 ARM64_STACK_EPILOG
720
721
722 /*
723 * uint32_t arm_debug_read_dscr(void)
724 */
725 .text
726 .align 2
727 .globl EXT(arm_debug_read_dscr)
728 LEXT(arm_debug_read_dscr)
729 PANIC_UNIMPLEMENTED
730
731 /*
732 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
733 *
734 * Set debug registers to match the current thread state
735 * (NULL to disable). Assume 6 breakpoints and 2
736 * watchpoints, since that has been the case in all cores
737 * thus far.
738 */
739 .text
740 .align 2
741 .globl EXT(arm_debug_set_cp14)
742 LEXT(arm_debug_set_cp14)
743 PANIC_UNIMPLEMENTED
744
745 #if defined(APPLE_ARM64_ARCH_FAMILY)
746 /*
747 * Note: still have to ISB before executing wfi!
748 */
749 .text
750 .align 2
751 .globl EXT(arm64_prepare_for_sleep)
752 LEXT(arm64_prepare_for_sleep)
753 PUSH_FRAME
754
755 #if defined(APPLETYPHOON)
756 // <rdar://problem/15827409>
757 HID_SET_BITS HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x9
758 dsb sy
759 isb sy
760 #endif
761
762 #if HAS_CLUSTER
763 cbnz x0, 1f // Skip if deep_sleep == true
764 // Mask FIQ and IRQ to avoid spurious wakeups
765 mrs x9, CPU_OVRD
766 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask))
767 mov x10, #(ARM64_REG_CYC_OVRD_irq_disable | ARM64_REG_CYC_OVRD_fiq_disable)
768 orr x9, x9, x10
769 msr CPU_OVRD, x9
770 isb
771 1:
772 #endif
773
774 cbz x0, 1f // Skip if deep_sleep == false
775 #if __ARM_GLOBAL_SLEEP_BIT__
776 // Enable deep sleep
777 mrs x1, ACC_OVRD
778 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
779 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
780 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
781 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
782 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
783 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
784 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
785 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
786 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
787 #if HAS_RETENTION_STATE
788 orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu)
789 #endif
790 msr ACC_OVRD, x1
791
792 #if defined(APPLEMONSOON)
793 // Skye has an ACC_OVRD register for EBLK and PBLK. Same bitfield layout for these bits
794 mrs x1, EBLK_OVRD
795 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
796 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
797 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
798 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
799 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
800 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
801 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
802 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
803 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
804 msr EBLK_OVRD, x1
805
806 #endif
807
808 #else
809 #if defined(APPLETYPHOON) || defined(APPLETWISTER)
810 // Enable deep sleep
811 mov x1, ARM64_REG_CYC_CFG_deepSleep
812 msr CPU_CFG, x1
813 #endif
814 #endif
815
816 1:
817 // Set "OK to power down" (<rdar://problem/12390433>)
818 mrs x9, CPU_OVRD
819 orr x9, x9, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
820 #if HAS_RETENTION_STATE
821 orr x9, x9, #(ARM64_REG_CYC_OVRD_disWfiRetn)
822 #endif
823 msr CPU_OVRD, x9
824
825 #if defined(APPLEMONSOON) || defined(APPLEVORTEX)
826 ARM64_IS_PCORE x9
827 cbz x9, Lwfi_inst // skip if not p-core
828
829 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
830 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
831 * to be left with valid entries that fail to drain if a
832 * subsequent wfi is issued. This can prevent the core from
833 * power-gating. For the idle case that is recoverable, but
834 * for the deep-sleep (S2R) case in which cores MUST power-gate,
835 * it can lead to a hang. This can be prevented by disabling
836 * and re-enabling GUPS, which forces the prefetch queue to
837 * drain. This should be done as close to wfi as possible, i.e.
838 * at the very end of arm64_prepare_for_sleep(). */
839 #if defined(APPLEVORTEX)
840 /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar
841 * bug in the HSP prefetcher that can be worked around through
842 * the same method mentioned above for Skye. */
843 mrs x9, MIDR_EL1
844 EXEC_COREALL_REVLO CPU_VERSION_B0, x9, x10
845 #endif
846 mrs x9, HID10
847 orr x9, x9, #(ARM64_REG_HID10_DisHwpGups)
848 msr HID10, x9
849 isb sy
850 and x9, x9, #(~(ARM64_REG_HID10_DisHwpGups))
851 msr HID10, x9
852 isb sy
853 #endif
854 EXEC_END
855
856 Lwfi_inst:
857 dsb sy
858 isb sy
859 wfi
860 b Lwfi_inst
861
862 /*
863 * Force WFI to use clock gating only
864 *
865 */
866 .text
867 .align 2
868 .globl EXT(arm64_force_wfi_clock_gate)
869 LEXT(arm64_force_wfi_clock_gate)
870 ARM64_STACK_PROLOG
871 PUSH_FRAME
872
873 mrs x0, CPU_OVRD
874 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
875 msr CPU_OVRD, x0
876
877 POP_FRAME
878 ARM64_STACK_EPILOG
879
880
881 #if HAS_RETENTION_STATE
882 .text
883 .align 2
884 .globl EXT(arm64_retention_wfi)
885 LEXT(arm64_retention_wfi)
886 wfi
887 cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc
888 ret // Otherwise just return to cpu_idle()
889 Lwfi_retention:
890 mov x0, #1
891 bl EXT(ClearIdlePop)
892 mov x0, #0
893 bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE)
894 b . // cpu_idle_exit() should never return
895 #endif
896
897 #if defined(APPLETYPHOON)
898
899 .text
900 .align 2
901 .globl EXT(typhoon_prepare_for_wfi)
902
903 LEXT(typhoon_prepare_for_wfi)
904 PUSH_FRAME
905
906 // <rdar://problem/15827409>
907 HID_SET_BITS HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
908 dsb sy
909 isb sy
910
911 POP_FRAME
912 ret
913
914
915 .text
916 .align 2
917 .globl EXT(typhoon_return_from_wfi)
918 LEXT(typhoon_return_from_wfi)
919 PUSH_FRAME
920
921 // <rdar://problem/15827409>
922 HID_CLEAR_BITS HID2, ARM64_REG_HID2_disMMUmtlbPrefetch, x0
923 dsb sy
924 isb sy
925
926 POP_FRAME
927 ret
928 #endif
929
930 #ifdef APPLETYPHOON
931
932 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
933 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
934 #define HID2_DEFEATURES_1 0x0000000000102074ULL
935 #define HID3_DEFEATURES_1 0x0000000000400003ULL
936 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
937 #define HID7_DEFEATURES_1 0x000000000000000eULL
938
939 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
940 #define HID1_DEFEATURES_2 0x000000000005d720ULL
941 #define HID2_DEFEATURES_2 0x0000000000002074ULL
942 #define HID3_DEFEATURES_2 0x0000000000400001ULL
943 #define HID4_DEFEATURES_2 0x8390000200000208ULL
944 #define HID7_DEFEATURES_2 0x0000000000000000ULL
945
946 /*
947 arg0 = target register
948 arg1 = 64-bit constant
949 */
950 .macro LOAD_UINT64
951 movz $0, #(($1 >> 48) & 0xffff), lsl #48
952 movk $0, #(($1 >> 32) & 0xffff), lsl #32
953 movk $0, #(($1 >> 16) & 0xffff), lsl #16
954 movk $0, #(($1) & 0xffff)
955 .endmacro
956
957 .text
958 .align 2
959 .globl EXT(cpu_defeatures_set)
960 LEXT(cpu_defeatures_set)
961 PUSH_FRAME
962 cmp x0, #2
963 b.eq cpu_defeatures_set_2
964 cmp x0, #1
965 b.ne cpu_defeatures_set_ret
966 LOAD_UINT64 x1, HID0_DEFEATURES_1
967 mrs x0, HID0
968 orr x0, x0, x1
969 msr HID0, x0
970 LOAD_UINT64 x1, HID1_DEFEATURES_1
971 mrs x0, HID1
972 orr x0, x0, x1
973 msr HID1, x0
974 LOAD_UINT64 x1, HID2_DEFEATURES_1
975 mrs x0, HID2
976 orr x0, x0, x1
977 msr HID2, x0
978 LOAD_UINT64 x1, HID3_DEFEATURES_1
979 mrs x0, HID3
980 orr x0, x0, x1
981 msr HID3, x0
982 LOAD_UINT64 x1, HID4_DEFEATURES_1
983 mrs x0, S3_0_C15_C4_0
984 orr x0, x0, x1
985 msr S3_0_C15_C4_0, x0
986 LOAD_UINT64 x1, HID7_DEFEATURES_1
987 mrs x0, HID7
988 orr x0, x0, x1
989 msr HID7, x0
990 dsb sy
991 isb sy
992 b cpu_defeatures_set_ret
993 cpu_defeatures_set_2:
994 LOAD_UINT64 x1, HID0_DEFEATURES_2
995 mrs x0, HID0
996 orr x0, x0, x1
997 msr HID0, x0
998 LOAD_UINT64 x1, HID1_DEFEATURES_2
999 mrs x0, HID1
1000 orr x0, x0, x1
1001 msr HID1, x0
1002 LOAD_UINT64 x1, HID2_DEFEATURES_2
1003 mrs x0, HID2
1004 orr x0, x0, x1
1005 msr HID2, x0
1006 LOAD_UINT64 x1, HID3_DEFEATURES_2
1007 mrs x0, HID3
1008 orr x0, x0, x1
1009 msr HID3, x0
1010 LOAD_UINT64 x1, HID4_DEFEATURES_2
1011 mrs x0, S3_0_C15_C4_0
1012 orr x0, x0, x1
1013 msr S3_0_C15_C4_0, x0
1014 LOAD_UINT64 x1, HID7_DEFEATURES_2
1015 mrs x0, HID7
1016 orr x0, x0, x1
1017 msr HID7, x0
1018 dsb sy
1019 isb sy
1020 b cpu_defeatures_set_ret
1021 cpu_defeatures_set_ret:
1022 POP_FRAME
1023 ret
1024 #endif
1025
1026 #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
1027 .text
1028 .align 2
1029 .globl EXT(arm64_prepare_for_sleep)
1030 LEXT(arm64_prepare_for_sleep)
1031 PUSH_FRAME
1032 Lwfi_inst:
1033 dsb sy
1034 isb sy
1035 wfi
1036 b Lwfi_inst
1037
1038 /*
1039 * Force WFI to use clock gating only
1040 * Note: for non-Apple device, do nothing.
1041 */
1042 .text
1043 .align 2
1044 .globl EXT(arm64_force_wfi_clock_gate)
1045 LEXT(arm64_force_wfi_clock_gate)
1046 PUSH_FRAME
1047 nop
1048 POP_FRAME
1049
1050 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
1051
1052 /*
1053 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
1054 *
1055 * This must be called from a kernel thread context running on the boot CPU,
1056 * after setting up new exception stacks in per-CPU data. That will guarantee
1057 * that the stack(s) we're trying to replace aren't currently in use. For
1058 * KTRR-protected devices, this must also be called prior to VM prot finalization
1059 * and lockdown, as updating SP1 requires a sensitive instruction.
1060 */
1061 .text
1062 .align 2
1063 .globl EXT(arm64_replace_bootstack)
1064 LEXT(arm64_replace_bootstack)
1065 ARM64_STACK_PROLOG
1066 PUSH_FRAME
1067 // Set the exception stack pointer
1068 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
1069 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
1070 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
1071 // Set SP_EL1 to exception stack
1072 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
1073 mov x1, lr
1074 bl EXT(pinst_spsel_1)
1075 mov lr, x1
1076 #else
1077 msr SPSel, #1
1078 #endif
1079 mov sp, x0
1080 msr SPSel, #0
1081 msr DAIF, x4 // Restore interrupt state
1082 POP_FRAME
1083 ARM64_STACK_EPILOG
1084
1085 #ifdef MONITOR
1086 /*
1087 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1088 uintptr_t arg2, uintptr_t arg3)
1089 *
1090 * Call the EL3 monitor with 4 arguments in registers
1091 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1092 * registers are preserved, temporary registers are not. Parameters and results are passed in
1093 * the usual manner.
1094 */
1095 .text
1096 .align 2
1097 .globl EXT(monitor_call)
1098 LEXT(monitor_call)
1099 smc 0x11
1100 ret
1101 #endif
1102
1103 #ifdef HAS_APPLE_PAC
1104 /*
1105 * SIGN_THREAD_STATE
1106 *
1107 * Macro that signs thread state.
1108 * $0 - Offset in arm_saved_state to store JOPHASH value.
1109 */
1110 .macro SIGN_THREAD_STATE
1111 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1112 /*
1113 * Mask off the carry flag so we don't need to re-sign when that flag is
1114 * touched by the system call return path.
1115 */
1116 bic x2, x2, PSR_CF
1117 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1118 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1119 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1120 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1121 str x1, [x0, $0]
1122 #if DEBUG || DEVELOPMENT
1123 mrs x1, DAIF
1124 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1125 #endif /* DEBUG || DEVELOPMENT */
1126 .endmacro
1127
1128 /*
1129 * CHECK_SIGNED_STATE
1130 *
1131 * Macro that checks signed thread state.
1132 * $0 - Offset in arm_saved_state to to read the JOPHASH value from.
1133 * $1 - Label to jump to when check is unsuccessful.
1134 */
1135 .macro CHECK_SIGNED_STATE
1136 pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */
1137 /*
1138 * Mask off the carry flag so we don't need to re-sign when that flag is
1139 * touched by the system call return path.
1140 */
1141 bic x2, x2, PSR_CF
1142 pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */
1143 pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */
1144 pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */
1145 pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */
1146 ldr x2, [x0, $0]
1147 cmp x1, x2
1148 b.ne $1
1149 #if DEBUG || DEVELOPMENT
1150 mrs x1, DAIF
1151 tbz x1, #DAIF_IRQF_SHIFT, Lintr_enabled_panic
1152 #endif /* DEBUG || DEVELOPMENT */
1153 .endmacro
1154
1155 /**
1156 * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc,
1157 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1158 * uint64_t x17)
1159 */
1160 .text
1161 .align 2
1162 .globl EXT(ml_sign_thread_state)
1163 LEXT(ml_sign_thread_state)
1164 SIGN_THREAD_STATE SS64_JOPHASH
1165 ret
1166
1167 /**
1168 * void ml_sign_kernel_thread_state(arm_kernel_saved_state *ss, uint64_t pc,
1169 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1170 * uint64_t x17)
1171 */
1172 .text
1173 .align 2
1174 .globl EXT(ml_sign_kernel_thread_state)
1175 LEXT(ml_sign_kernel_thread_state)
1176 SIGN_THREAD_STATE SS64_KERNEL_JOPHASH
1177 ret
1178
1179 /**
1180 * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc,
1181 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1182 * uint64_t x17)
1183 */
1184 .text
1185 .align 2
1186 .globl EXT(ml_check_signed_state)
1187 LEXT(ml_check_signed_state)
1188 CHECK_SIGNED_STATE SS64_JOPHASH, Lcheck_hash_panic
1189 ret
1190 Lcheck_hash_panic:
1191 /*
1192 * ml_check_signed_state normally doesn't set up a stack frame, since it
1193 * needs to work in the face of attackers that can modify the stack.
1194 * However we lazily create one in the panic path: at this point we're
1195 * *only* using the stack frame for unwinding purposes, and without one
1196 * we'd be missing information about the caller.
1197 */
1198 ARM64_STACK_PROLOG
1199 PUSH_FRAME
1200 mov x1, x0
1201 adr x0, Lcheck_hash_str
1202 CALL_EXTERN panic_with_thread_kernel_state
1203
1204 /**
1205 * void ml_check_kernel_signed_state(arm_kernel_saved_state *ss, uint64_t pc,
1206 * uint32_t cpsr, uint64_t lr, uint64_t x16,
1207 * uint64_t x17)
1208 */
1209 .text
1210 .align 2
1211 .globl EXT(ml_check_kernel_signed_state)
1212 LEXT(ml_check_kernel_signed_state)
1213 CHECK_SIGNED_STATE SS64_KERNEL_JOPHASH, Lcheck_kernel_hash_panic
1214 ret
1215 Lcheck_kernel_hash_panic:
1216 ARM64_STACK_PROLOG
1217 PUSH_FRAME
1218 adr x0, Lcheck_hash_str
1219 CALL_EXTERN panic
1220
1221 Lcheck_hash_str:
1222 .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)"
1223
1224 #if DEBUG || DEVELOPMENT
1225 Lintr_enabled_panic:
1226 ARM64_STACK_PROLOG
1227 PUSH_FRAME
1228 adr x0, Lintr_enabled_str
1229 CALL_EXTERN panic
1230 Lintr_enabled_str:
1231 /*
1232 * Please see the "Signing spilled register state" section of doc/pac.md
1233 * for an explanation of why this is bad and how it should be fixed.
1234 */
1235 .asciz "Signed thread state manipulated with interrupts enabled"
1236 #endif /* DEBUG || DEVELOPMENT */
1237
1238 /**
1239 * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss)
1240 *
1241 * Panics due to an invalid CPSR value in ss.
1242 */
1243 .text
1244 .align 2
1245 .globl EXT(ml_auth_thread_state_invalid_cpsr)
1246 LEXT(ml_auth_thread_state_invalid_cpsr)
1247 ARM64_STACK_PROLOG
1248 PUSH_FRAME
1249 mov x1, x0
1250 adr x0, Linvalid_cpsr_str
1251 CALL_EXTERN panic_with_thread_kernel_state
1252
1253 Linvalid_cpsr_str:
1254 .asciz "Thread state corruption detected (PE mode == 0)"
1255 #endif /* HAS_APPLE_PAC */
1256
1257 .text
1258 .align 2
1259 .globl EXT(fill32_dczva)
1260 LEXT(fill32_dczva)
1261 0:
1262 dc zva, x0
1263 add x0, x0, #64
1264 subs x1, x1, #64
1265 b.hi 0b
1266 ret
1267
1268 .text
1269 .align 2
1270 .globl EXT(fill32_nt)
1271 LEXT(fill32_nt)
1272 dup.4s v0, w2
1273 0:
1274 stnp q0, q0, [x0]
1275 stnp q0, q0, [x0, #0x20]
1276 stnp q0, q0, [x0, #0x40]
1277 stnp q0, q0, [x0, #0x60]
1278 add x0, x0, #128
1279 subs x1, x1, #128
1280 b.hi 0b
1281 ret
1282
1283 /* vim: set sw=4 ts=4: */