]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/machine_routines_asm.s
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / machine_routines_asm.s
1 /*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <machine/asm.h>
30 #include <arm64/machine_machdep.h>
31 #include <arm64/proc_reg.h>
32 #include <arm/pmap.h>
33 #include <pexpert/arm64/board_config.h>
34 #include <sys/errno.h>
35 #include "assym.s"
36
37
38 /* uint32_t get_fpscr(void):
39 * Returns (FPSR | FPCR).
40 */
41 .align 2
42 .globl EXT(get_fpscr)
43 LEXT(get_fpscr)
44 #if __ARM_VFP__
45 mrs x1, FPSR // Grab FPSR
46 mov x4, #(FPSR_MASK & 0xFFFF)
47 mov x5, #(FPSR_MASK & 0xFFFF0000)
48 orr x0, x4, x5
49 and x1, x1, x0 // Be paranoid, and clear bits we expect to
50 // be clear
51 mrs x2, FPCR // Grab FPCR
52 mov x4, #(FPCR_MASK & 0xFFFF)
53 mov x5, #(FPCR_MASK & 0xFFFF0000)
54 orr x0, x4, x5
55 and x2, x2, x0 // Be paranoid, and clear bits we expect to
56 // be clear
57 orr x0, x1, x2 // OR them to get FPSCR equivalent state
58 #else
59 mov x0, #0
60 #endif
61 ret
62 .align 2
63 .globl EXT(set_fpscr)
64 /* void set_fpscr(uint32_t value):
65 * Set the FPCR and FPSR registers, based on the given value; a
66 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
67 * and FPCR are not responsible for condition codes.
68 */
69 LEXT(set_fpscr)
70 #if __ARM_VFP__
71 mov x4, #(FPSR_MASK & 0xFFFF)
72 mov x5, #(FPSR_MASK & 0xFFFF0000)
73 orr x1, x4, x5
74 and x1, x1, x0 // Clear the bits that don't apply to FPSR
75 mov x4, #(FPCR_MASK & 0xFFFF)
76 mov x5, #(FPCR_MASK & 0xFFFF0000)
77 orr x2, x4, x5
78 and x2, x2, x0 // Clear the bits that don't apply to FPCR
79 msr FPSR, x1 // Write FPCR
80 msr FPCR, x2 // Write FPSR
81 dsb ish // FPCR requires synchronization
82 #endif
83 ret
84
85 #if __ARM_KERNEL_PROTECT__
86 /*
87 * __ARM_KERNEL_PROTECT__ adds two complications to TLB management:
88 *
89 * 1. As each pmap has two ASIDs, every TLB operation that targets an ASID must
90 * target both ASIDs for the pmap that owns the target ASID.
91 *
92 * 2. Any TLB operation targeting the kernel_pmap ASID (ASID 0) must target all
93 * ASIDs (as kernel_pmap mappings may be referenced while using an ASID that
94 * belongs to another pmap). We expect these routines to be called with the
95 * EL0 ASID for the target; not the EL1 ASID.
96 */
97 #endif /* __ARM_KERNEL_PROTECT__ */
98
99 /*
100 * void flush_mmu_tlb(void)
101 *
102 * Flush all TLBs
103 */
104 .text
105 .align 2
106 .globl EXT(flush_mmu_tlb)
107 LEXT(flush_mmu_tlb)
108 tlbi vmalle1is
109 dsb ish
110 isb sy
111 ret
112
113 /*
114 * void flush_core_tlb(void)
115 *
116 * Flush core TLB
117 */
118 .text
119 .align 2
120 .globl EXT(flush_core_tlb)
121 LEXT(flush_core_tlb)
122 tlbi vmalle1
123 dsb ish
124 isb sy
125 ret
126
127 /*
128 * void flush_mmu_tlb_allentries(uint64_t, uint64_t)
129 *
130 * Flush TLB entries
131 */
132 .text
133 .align 2
134 .globl EXT(flush_mmu_tlb_allentries)
135 LEXT(flush_mmu_tlb_allentries)
136 #if __ARM_16K_PG__
137 and x0, x0, #~0x3
138
139 /*
140 * The code below is not necessarily correct. From an overview of
141 * the client code, the expected contract for TLB flushes is that
142 * we will expand from an "address, length" pair to "start address,
143 * end address" in the course of a TLB flush. This suggests that
144 * a flush for "X, X+4" is actually only asking for a flush of a
145 * single 16KB page. At the same time, we'd like to be prepared
146 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
147 * number to a 16KB page boundary. This should deal correctly with
148 * unaligned inputs.
149 *
150 * If our expecations about client behavior are wrong however, this
151 * will lead to occasional TLB corruption on platforms with 16KB
152 * pages.
153 */
154 add x1, x1, #0x3
155 and x1, x1, #~0x3
156 #endif
157 Lflush_mmu_tlb_allentries_loop:
158 tlbi vaae1is, x0
159 add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA
160 cmp x0, x1
161 b.lt Lflush_mmu_tlb_allentries_loop
162 dsb ish
163 isb sy
164 ret
165
166 /*
167 * void flush_mmu_tlb_entry(uint64_t)
168 *
169 * Flush TLB entry
170 */
171 .text
172 .align 2
173 .globl EXT(flush_mmu_tlb_entry)
174 LEXT(flush_mmu_tlb_entry)
175 #if __ARM_KERNEL_PROTECT__
176 /*
177 * If we are flushing ASID 0, this is a kernel operation. With this
178 * ASID scheme, this means we should flush all ASIDs.
179 */
180 lsr x2, x0, #TLBI_ASID_SHIFT
181 cmp x2, #0
182 b.eq Lflush_mmu_tlb_entry_globally
183
184 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
185 tlbi vae1is, x0
186 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
187 #endif /* __ARM_KERNEL_PROTECT__ */
188 tlbi vae1is, x0
189 dsb ish
190 isb sy
191 ret
192 #if __ARM_KERNEL_PROTECT__
193 Lflush_mmu_tlb_entry_globally:
194 tlbi vaae1is, x0
195 dsb ish
196 isb sy
197 ret
198 #endif /* __ARM_KERNEL_PROTECT__ */
199
200 /*
201 * void flush_mmu_tlb_entries(uint64_t, uint64_t)
202 *
203 * Flush TLB entries
204 */
205 .text
206 .align 2
207 .globl EXT(flush_mmu_tlb_entries)
208 LEXT(flush_mmu_tlb_entries)
209 #if __ARM_16K_PG__
210 and x0, x0, #~0x3
211
212 /*
213 * The code below is not necessarily correct. From an overview of
214 * the client code, the expected contract for TLB flushes is that
215 * we will expand from an "address, length" pair to "start address,
216 * end address" in the course of a TLB flush. This suggests that
217 * a flush for "X, X+4" is actually only asking for a flush of a
218 * single 16KB page. At the same time, we'd like to be prepared
219 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
220 * number to a 16KB page boundary. This should deal correctly with
221 * unaligned inputs.
222 *
223 * If our expecations about client behavior are wrong however, this
224 * will lead to occasional TLB corruption on platforms with 16KB
225 * pages.
226 */
227 add x1, x1, #0x3
228 and x1, x1, #~0x3
229 #endif /* __ARM_KERNEL_PROTECT__ */
230 #if __ARM_KERNEL_PROTECT__
231 /*
232 * If we are flushing ASID 0, this is a kernel operation. With this
233 * ASID scheme, this means we should flush all ASIDs.
234 */
235 lsr x2, x0, #TLBI_ASID_SHIFT
236 cmp x2, #0
237 b.eq Lflush_mmu_tlb_entries_globally_loop
238
239 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
240 #endif /* __ARM_KERNEL_PROTECT__ */
241 Lflush_mmu_tlb_entries_loop:
242 tlbi vae1is, x0
243 #if __ARM_KERNEL_PROTECT__
244 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
245 tlbi vae1is, x0
246 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
247 #endif /* __ARM_KERNEL_PROTECT__ */
248 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
249 cmp x0, x1
250 b.lt Lflush_mmu_tlb_entries_loop
251 dsb ish
252 isb sy
253 ret
254 #if __ARM_KERNEL_PROTECT__
255 Lflush_mmu_tlb_entries_globally_loop:
256 tlbi vaae1is, x0
257 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
258 cmp x0, x1
259 b.lt Lflush_mmu_tlb_entries_globally_loop
260 dsb ish
261 isb sy
262 ret
263 #endif /* __ARM_KERNEL_PROTECT__ */
264
265 /*
266 * void flush_mmu_tlb_asid(uint64_t)
267 *
268 * Flush TLB entriesfor requested asid
269 */
270 .text
271 .align 2
272 .globl EXT(flush_mmu_tlb_asid)
273 LEXT(flush_mmu_tlb_asid)
274 #if __ARM_KERNEL_PROTECT__
275 /*
276 * If we are flushing ASID 0, this is a kernel operation. With this
277 * ASID scheme, this means we should flush all ASIDs.
278 */
279 lsr x1, x0, #TLBI_ASID_SHIFT
280 cmp x1, #0
281 b.eq Lflush_mmu_tlb_globally
282
283 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
284 tlbi aside1is, x0
285 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
286 #endif /* __ARM_KERNEL_PROTECT__ */
287 tlbi aside1is, x0
288 dsb ish
289 isb sy
290 ret
291 #if __ARM_KERNEL_PROTECT__
292 Lflush_mmu_tlb_globally:
293 tlbi vmalle1is
294 dsb ish
295 isb sy
296 ret
297 #endif /* __ARM_KERNEL_PROTECT__ */
298
299 /*
300 * void flush_core_tlb_asid(uint64_t)
301 *
302 * Flush TLB entries for core for requested asid
303 */
304 .text
305 .align 2
306 .globl EXT(flush_core_tlb_asid)
307 LEXT(flush_core_tlb_asid)
308 #if __ARM_KERNEL_PROTECT__
309 /*
310 * If we are flushing ASID 0, this is a kernel operation. With this
311 * ASID scheme, this means we should flush all ASIDs.
312 */
313 lsr x1, x0, #TLBI_ASID_SHIFT
314 cmp x1, #0
315 b.eq Lflush_core_tlb_asid_globally
316
317 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
318 tlbi aside1, x0
319 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
320 #endif /* __ARM_KERNEL_PROTECT__ */
321 tlbi aside1, x0
322 dsb ish
323 isb sy
324 ret
325 #if __ARM_KERNEL_PROTECT__
326 Lflush_core_tlb_asid_globally:
327 tlbi vmalle1
328 dsb ish
329 isb sy
330 ret
331 #endif /* __ARM_KERNEL_PROTECT__ */
332
333 /*
334 * Set MMU Translation Table Base Alternate
335 */
336 .text
337 .align 2
338 .globl EXT(set_mmu_ttb_alternate)
339 LEXT(set_mmu_ttb_alternate)
340 dsb sy
341 #if defined(KERNEL_INTEGRITY_KTRR)
342 mov x1, lr
343 bl EXT(pinst_set_ttbr1)
344 mov lr, x1
345 #else
346 msr TTBR1_EL1, x0
347 #endif /* defined(KERNEL_INTEGRITY_KTRR) */
348 isb sy
349 ret
350
351 /*
352 * set AUX control register
353 */
354 .text
355 .align 2
356 .globl EXT(set_aux_control)
357 LEXT(set_aux_control)
358 msr ACTLR_EL1, x0
359 // Synchronize system
360 dsb sy
361 isb sy
362 ret
363
364 #if __ARM_KERNEL_PROTECT__
365 .text
366 .align 2
367 .globl EXT(set_vbar_el1)
368 LEXT(set_vbar_el1)
369 #if defined(KERNEL_INTEGRITY_KTRR)
370 b EXT(pinst_set_vbar)
371 #else
372 msr VBAR_EL1, x0
373 ret
374 #endif
375 #endif /* __ARM_KERNEL_PROTECT__ */
376
377
378 /*
379 * set translation control register
380 */
381 .text
382 .align 2
383 .globl EXT(set_tcr)
384 LEXT(set_tcr)
385 #if defined(APPLE_ARM64_ARCH_FAMILY)
386 // Assert that T0Z is always equal to T1Z
387 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
388 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
389 cbnz x1, L_set_tcr_panic
390 #if defined(KERNEL_INTEGRITY_KTRR)
391 mov x1, lr
392 bl _pinst_set_tcr
393 mov lr, x1
394 #else
395 msr TCR_EL1, x0
396 #endif /* defined(KERNEL_INTRITY_KTRR) */
397 isb sy
398 ret
399
400 L_set_tcr_panic:
401 PUSH_FRAME
402 sub sp, sp, #16
403 str x0, [sp]
404 adr x0, L_set_tcr_panic_str
405 BRANCH_EXTERN panic
406
407 L_set_locked_reg_panic:
408 PUSH_FRAME
409 sub sp, sp, #16
410 str x0, [sp]
411 adr x0, L_set_locked_reg_panic_str
412 BRANCH_EXTERN panic
413 b .
414
415 L_set_tcr_panic_str:
416 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
417
418
419 L_set_locked_reg_panic_str:
420 .asciz "attempt to set locked register: (%llx)\n"
421 #else
422 #if defined(KERNEL_INTEGRITY_KTRR)
423 mov x1, lr
424 bl _pinst_set_tcr
425 mov lr, x1
426 #else
427 msr TCR_EL1, x0
428 #endif
429 isb sy
430 ret
431 #endif // defined(APPLE_ARM64_ARCH_FAMILY)
432
433 /*
434 * MMU kernel virtual to physical address translation
435 */
436 .text
437 .align 2
438 .globl EXT(mmu_kvtop)
439 LEXT(mmu_kvtop)
440 mrs x2, DAIF // Load current DAIF
441 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
442 at s1e1r, x0 // Translation Stage 1 EL1
443 mrs x1, PAR_EL1 // Read result
444 msr DAIF, x2 // Restore interrupt state
445 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
446 bfm x1, x0, #0, #11 // Add page offset
447 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
448 ret
449 L_mmu_kvtop_invalid:
450 mov x0, xzr // Return invalid
451 ret
452
453 /*
454 * MMU user virtual to physical address translation
455 */
456 .text
457 .align 2
458 .globl EXT(mmu_uvtop)
459 LEXT(mmu_uvtop)
460 lsr x8, x0, #56 // Extract top byte
461 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
462 mrs x2, DAIF // Load current DAIF
463 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
464 at s1e0r, x0 // Translation Stage 1 EL0
465 mrs x1, PAR_EL1 // Read result
466 msr DAIF, x2 // Restore interrupt state
467 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
468 bfm x1, x0, #0, #11 // Add page offset
469 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
470 ret
471 L_mmu_uvtop_invalid:
472 mov x0, xzr // Return invalid
473 ret
474
475 /*
476 * MMU kernel virtual to physical address preflight write access
477 */
478 .text
479 .align 2
480 .globl EXT(mmu_kvtop_wpreflight)
481 LEXT(mmu_kvtop_wpreflight)
482 mrs x2, DAIF // Load current DAIF
483 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
484 at s1e1w, x0 // Translation Stage 1 EL1
485 mrs x1, PAR_EL1 // Read result
486 msr DAIF, x2 // Restore interrupt state
487 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
488 bfm x1, x0, #0, #11 // Add page offset
489 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
490 ret
491 L_mmu_kvtop_wpreflight_invalid:
492 mov x0, xzr // Return invalid
493 ret
494
495 /*
496 * SET_RECOVERY_HANDLER
497 *
498 * Sets up a page fault recovery handler
499 *
500 * arg0 - persisted thread pointer
501 * arg1 - persisted recovery handler
502 * arg2 - scratch reg
503 * arg3 - recovery label
504 */
505 .macro SET_RECOVERY_HANDLER
506 mrs $0, TPIDR_EL1 // Load thread pointer
507 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
508 adrp $2, $3@page // Load the recovery handler address
509 add $2, $2, $3@pageoff
510 str $2, [$0, TH_RECOVER] // Set new recovery handler
511 .endmacro
512
513 /*
514 * CLEAR_RECOVERY_HANDLER
515 *
516 * Clears page fault handler set by SET_RECOVERY_HANDLER
517 *
518 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
519 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
520 */
521 .macro CLEAR_RECOVERY_HANDLER
522 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
523 .endmacro
524
525
526 .text
527 .align 2
528 copyio_error:
529 CLEAR_RECOVERY_HANDLER x10, x11
530 mov x0, #EFAULT // Return an EFAULT error
531 POP_FRAME
532 ret
533
534 /*
535 * int _bcopyin(const char *src, char *dst, vm_size_t len)
536 */
537 .text
538 .align 2
539 .globl EXT(_bcopyin)
540 LEXT(_bcopyin)
541 PUSH_FRAME
542 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
543 /* If len is less than 16 bytes, just do a bytewise copy */
544 cmp x2, #16
545 b.lt 2f
546 sub x2, x2, #16
547 1:
548 /* 16 bytes at a time */
549 ldp x3, x4, [x0], #16
550 stp x3, x4, [x1], #16
551 subs x2, x2, #16
552 b.ge 1b
553 /* Fixup the len and test for completion */
554 adds x2, x2, #16
555 b.eq 3f
556 2: /* Bytewise */
557 subs x2, x2, #1
558 ldrb w3, [x0], #1
559 strb w3, [x1], #1
560 b.hi 2b
561 3:
562 CLEAR_RECOVERY_HANDLER x10, x11
563 mov x0, xzr
564 POP_FRAME
565 ret
566
567 /*
568 * int _copyin_word(const char *src, uint64_t *dst, vm_size_t len)
569 */
570 .text
571 .align 2
572 .globl EXT(_copyin_word)
573 LEXT(_copyin_word)
574 PUSH_FRAME
575 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
576 cmp x2, #4
577 b.eq L_copyin_word_4
578 cmp x2, #8
579 b.eq L_copyin_word_8
580 mov x0, EINVAL
581 b L_copying_exit
582 L_copyin_word_4:
583 ldr w8, [x0]
584 b L_copyin_word_store
585 L_copyin_word_8:
586 ldr x8, [x0]
587 L_copyin_word_store:
588 str x8, [x1]
589 mov x0, xzr
590 CLEAR_RECOVERY_HANDLER x10, x11
591 L_copying_exit:
592 POP_FRAME
593 ret
594
595
596 /*
597 * int _bcopyout(const char *src, char *dst, vm_size_t len)
598 */
599 .text
600 .align 2
601 .globl EXT(_bcopyout)
602 LEXT(_bcopyout)
603 PUSH_FRAME
604 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
605 /* If len is less than 16 bytes, just do a bytewise copy */
606 cmp x2, #16
607 b.lt 2f
608 sub x2, x2, #16
609 1:
610 /* 16 bytes at a time */
611 ldp x3, x4, [x0], #16
612 stp x3, x4, [x1], #16
613 subs x2, x2, #16
614 b.ge 1b
615 /* Fixup the len and test for completion */
616 adds x2, x2, #16
617 b.eq 3f
618 2: /* Bytewise */
619 subs x2, x2, #1
620 ldrb w3, [x0], #1
621 strb w3, [x1], #1
622 b.hi 2b
623 3:
624 CLEAR_RECOVERY_HANDLER x10, x11
625 mov x0, xzr
626 POP_FRAME
627 ret
628
629 /*
630 * int _bcopyinstr(
631 * const user_addr_t user_addr,
632 * char *kernel_addr,
633 * vm_size_t max,
634 * vm_size_t *actual)
635 */
636 .text
637 .align 2
638 .globl EXT(_bcopyinstr)
639 LEXT(_bcopyinstr)
640 PUSH_FRAME
641 adr x4, Lcopyinstr_error // Get address for recover
642 mrs x10, TPIDR_EL1 // Get thread pointer
643 ldr x11, [x10, TH_RECOVER] // Save previous recover
644 str x4, [x10, TH_RECOVER] // Store new recover
645 mov x4, xzr // x4 - total bytes copied
646 Lcopyinstr_loop:
647 ldrb w5, [x0], #1 // Load a byte from the user source
648 strb w5, [x1], #1 // Store a byte to the kernel dest
649 add x4, x4, #1 // Increment bytes copied
650 cbz x5, Lcopyinstr_done // If this byte is null, we're done
651 cmp x4, x2 // If we're out of space, return an error
652 b.ne Lcopyinstr_loop
653 Lcopyinstr_too_long:
654 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
655 Lcopyinstr_done:
656 str x4, [x3] // Return number of bytes copied
657 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
658 b Lcopyinstr_exit
659 Lcopyinstr_error:
660 mov x0, #EFAULT // Return EFAULT on error
661 Lcopyinstr_exit:
662 str x11, [x10, TH_RECOVER] // Restore old recover
663 POP_FRAME
664 ret
665
666 /*
667 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
668 *
669 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
670 * either user or kernel memory, or 8 bytes (AArch32) from user only.
671 *
672 * x0 : address of frame to copy.
673 * x1 : kernel address at which to store data.
674 * w2 : whether to copy an AArch32 or AArch64 frame.
675 * x3 : temp
676 * x5 : temp (kernel virtual base)
677 * x9 : temp
678 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
679 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
680 * x12, x13 : backtrace data
681 *
682 */
683 .text
684 .align 2
685 .globl EXT(copyinframe)
686 LEXT(copyinframe)
687 PUSH_FRAME
688 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
689 cbnz w2, Lcopyinframe64 // Check frame size
690 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
691 add x5, x5, EXT(gVirtBase)@pageoff
692 ldr x5, [x5]
693 cmp x5, x0 // See if address is in kernel virtual range
694 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
695 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
696 b Lcopyinframe_done
697
698 Lcopyinframe32:
699 ldr x12, [x0] // Copy 8 bytes
700 str x12, [x1]
701 mov w0, #0 // Success
702 b Lcopyinframe_done
703
704 Lcopyinframe64:
705 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
706 orr x9, x0, TBI_MASK // Hide tags in address comparison
707 cmp x9, x3 // If in kernel address range, skip tag test
708 b.hs Lcopyinframe_valid
709 tst x0, TBI_MASK // Detect tagged pointers
710 b.eq Lcopyinframe_valid
711 mov w0, #EFAULT // Tagged address, fail
712 b Lcopyinframe_done
713 Lcopyinframe_valid:
714 ldp x12, x13, [x0] // Copy 16 bytes
715 stp x12, x13, [x1]
716 mov w0, #0 // Success
717
718 Lcopyinframe_done:
719 CLEAR_RECOVERY_HANDLER x10, x11
720 POP_FRAME
721 ret
722
723
724 /*
725 * int _emulate_swp(user_addr_t addr, uint32_t newval, uint32_t *oldval)
726 *
727 * Securely emulates the swp instruction removed from armv8.
728 * Returns true on success.
729 * Returns false if the user address is not user accessible.
730 *
731 * x0 : address to swap
732 * x1 : new value to store
733 * x2 : address to save old value
734 * x3 : scratch reg
735 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
736 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
737 * x12 : interrupt state
738 * x13 : return value
739 */
740 .text
741 .align 2
742 .globl EXT(_emulate_swp)
743 LEXT(_emulate_swp)
744 PUSH_FRAME
745 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
746
747 // Perform swap
748 Lswp_try:
749 ldxr w3, [x0] // Load data at target address
750 stxr w4, w1, [x0] // Store new value to target address
751 cbnz w4, Lswp_try // Retry if store failed
752 str w3, [x2] // Save old value
753 mov x13, #1 // Set successful return value
754
755 Lswp_exit:
756 mov x0, x13 // Set return value
757 CLEAR_RECOVERY_HANDLER x10, x11
758 POP_FRAME
759 ret
760
761 /*
762 * int _emulate_swpb(user_addr_t addr, uint32_t newval, uint32_t *oldval)
763 *
764 * Securely emulates the swpb instruction removed from armv8.
765 * Returns true on success.
766 * Returns false if the user address is not user accessible.
767 *
768 * x0 : address to swap
769 * x1 : new value to store
770 * x2 : address to save old value
771 * x3 : scratch reg
772 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
773 * x11 : old recovery handler (set by SET_RECOVERY_HANDLER)
774 * x12 : interrupt state
775 * x13 : return value
776 */
777 .text
778 .align 2
779 .globl EXT(_emulate_swpb)
780 LEXT(_emulate_swpb)
781 PUSH_FRAME
782 SET_RECOVERY_HANDLER x10, x11, x3, swp_error
783
784 // Perform swap
785 Lswpb_try:
786 ldxrb w3, [x0] // Load data at target address
787 stxrb w4, w1, [x0] // Store new value to target address
788 cbnz w4, Lswp_try // Retry if store failed
789 str w3, [x2] // Save old value
790 mov x13, #1 // Set successful return value
791
792 Lswpb_exit:
793 mov x0, x13 // Set return value
794 CLEAR_RECOVERY_HANDLER x10, x11
795 POP_FRAME
796 ret
797
798 .text
799 .align 2
800 swp_error:
801 mov x0, xzr // Return false
802 CLEAR_RECOVERY_HANDLER x10, x11
803 POP_FRAME
804 ret
805
806 /*
807 * uint32_t arm_debug_read_dscr(void)
808 */
809 .text
810 .align 2
811 .globl EXT(arm_debug_read_dscr)
812 LEXT(arm_debug_read_dscr)
813 PANIC_UNIMPLEMENTED
814
815 /*
816 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
817 *
818 * Set debug registers to match the current thread state
819 * (NULL to disable). Assume 6 breakpoints and 2
820 * watchpoints, since that has been the case in all cores
821 * thus far.
822 */
823 .text
824 .align 2
825 .globl EXT(arm_debug_set_cp14)
826 LEXT(arm_debug_set_cp14)
827 PANIC_UNIMPLEMENTED
828
829
830 #if defined(APPLE_ARM64_ARCH_FAMILY)
831 /*
832 * Note: still have to ISB before executing wfi!
833 */
834 .text
835 .align 2
836 .globl EXT(arm64_prepare_for_sleep)
837 LEXT(arm64_prepare_for_sleep)
838 PUSH_FRAME
839
840 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
841 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
842 mrs x0, ARM64_REG_HID2 // Read HID2
843 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
844 msr ARM64_REG_HID2, x0 // Write HID2
845 dsb sy
846 isb sy
847 #endif
848
849 #if __ARM_GLOBAL_SLEEP_BIT__
850 // Enable deep sleep
851 mrs x1, ARM64_REG_ACC_OVRD
852 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
853 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
854 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
855 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
856 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
857 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
858 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
859 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
860 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
861 msr ARM64_REG_ACC_OVRD, x1
862
863
864 #else
865 // Enable deep sleep
866 mov x1, ARM64_REG_CYC_CFG_deepSleep
867 msr ARM64_REG_CYC_CFG, x1
868 #endif
869 // Set "OK to power down" (<rdar://problem/12390433>)
870 mrs x0, ARM64_REG_CYC_OVRD
871 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
872 msr ARM64_REG_CYC_OVRD, x0
873
874 Lwfi_inst:
875 dsb sy
876 isb sy
877 wfi
878 b Lwfi_inst
879
880 /*
881 * Force WFI to use clock gating only
882 *
883 */
884 .text
885 .align 2
886 .globl EXT(arm64_force_wfi_clock_gate)
887 LEXT(arm64_force_wfi_clock_gate)
888 PUSH_FRAME
889
890 mrs x0, ARM64_REG_CYC_OVRD
891 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
892 msr ARM64_REG_CYC_OVRD, x0
893
894 POP_FRAME
895 ret
896
897
898
899 #if defined(APPLECYCLONE) || defined(APPLETYPHOON)
900
901 .text
902 .align 2
903 .globl EXT(cyclone_typhoon_prepare_for_wfi)
904
905 LEXT(cyclone_typhoon_prepare_for_wfi)
906 PUSH_FRAME
907
908 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
909 mrs x0, ARM64_REG_HID2 // Read HID2
910 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
911 msr ARM64_REG_HID2, x0 // Write HID2
912 dsb sy
913 isb sy
914
915 POP_FRAME
916 ret
917
918
919 .text
920 .align 2
921 .globl EXT(cyclone_typhoon_return_from_wfi)
922 LEXT(cyclone_typhoon_return_from_wfi)
923 PUSH_FRAME
924
925 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
926 mrs x0, ARM64_REG_HID2 // Read HID2
927 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
928 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
929 msr ARM64_REG_HID2, x0 // Write HID2
930 dsb sy
931 isb sy
932
933 POP_FRAME
934 ret
935 #endif
936
937 #ifdef APPLETYPHOON
938
939 #define HID0_DEFEATURES_1 0x0000a0c000064010ULL
940 #define HID1_DEFEATURES_1 0x000000004005bf20ULL
941 #define HID2_DEFEATURES_1 0x0000000000102074ULL
942 #define HID3_DEFEATURES_1 0x0000000000400003ULL
943 #define HID4_DEFEATURES_1 0x83ff00e100000268ULL
944 #define HID7_DEFEATURES_1 0x000000000000000eULL
945
946 #define HID0_DEFEATURES_2 0x0000a1c000020010ULL
947 #define HID1_DEFEATURES_2 0x000000000005d720ULL
948 #define HID2_DEFEATURES_2 0x0000000000002074ULL
949 #define HID3_DEFEATURES_2 0x0000000000400001ULL
950 #define HID4_DEFEATURES_2 0x8390000200000208ULL
951 #define HID7_DEFEATURES_2 0x0000000000000000ULL
952
953 /*
954 arg0 = target register
955 arg1 = 64-bit constant
956 */
957 .macro LOAD_UINT64
958 movz $0, #(($1 >> 48) & 0xffff), lsl #48
959 movk $0, #(($1 >> 32) & 0xffff), lsl #32
960 movk $0, #(($1 >> 16) & 0xffff), lsl #16
961 movk $0, #(($1) & 0xffff)
962 .endmacro
963
964 .text
965 .align 2
966 .globl EXT(cpu_defeatures_set)
967 LEXT(cpu_defeatures_set)
968 PUSH_FRAME
969 cmp x0, #2
970 b.eq cpu_defeatures_set_2
971 cmp x0, #1
972 b.ne cpu_defeatures_set_ret
973 LOAD_UINT64 x1, HID0_DEFEATURES_1
974 mrs x0, ARM64_REG_HID0
975 orr x0, x0, x1
976 msr ARM64_REG_HID0, x0
977 LOAD_UINT64 x1, HID1_DEFEATURES_1
978 mrs x0, ARM64_REG_HID1
979 orr x0, x0, x1
980 msr ARM64_REG_HID1, x0
981 LOAD_UINT64 x1, HID2_DEFEATURES_1
982 mrs x0, ARM64_REG_HID2
983 orr x0, x0, x1
984 msr ARM64_REG_HID2, x0
985 LOAD_UINT64 x1, HID3_DEFEATURES_1
986 mrs x0, ARM64_REG_HID3
987 orr x0, x0, x1
988 msr ARM64_REG_HID3, x0
989 LOAD_UINT64 x1, HID4_DEFEATURES_1
990 mrs x0, ARM64_REG_HID4
991 orr x0, x0, x1
992 msr ARM64_REG_HID4, x0
993 LOAD_UINT64 x1, HID7_DEFEATURES_1
994 mrs x0, ARM64_REG_HID7
995 orr x0, x0, x1
996 msr ARM64_REG_HID7, x0
997 dsb sy
998 isb sy
999 b cpu_defeatures_set_ret
1000 cpu_defeatures_set_2:
1001 LOAD_UINT64 x1, HID0_DEFEATURES_2
1002 mrs x0, ARM64_REG_HID0
1003 orr x0, x0, x1
1004 msr ARM64_REG_HID0, x0
1005 LOAD_UINT64 x1, HID1_DEFEATURES_2
1006 mrs x0, ARM64_REG_HID1
1007 orr x0, x0, x1
1008 msr ARM64_REG_HID1, x0
1009 LOAD_UINT64 x1, HID2_DEFEATURES_2
1010 mrs x0, ARM64_REG_HID2
1011 orr x0, x0, x1
1012 msr ARM64_REG_HID2, x0
1013 LOAD_UINT64 x1, HID3_DEFEATURES_2
1014 mrs x0, ARM64_REG_HID3
1015 orr x0, x0, x1
1016 msr ARM64_REG_HID3, x0
1017 LOAD_UINT64 x1, HID4_DEFEATURES_2
1018 mrs x0, ARM64_REG_HID4
1019 orr x0, x0, x1
1020 msr ARM64_REG_HID4, x0
1021 LOAD_UINT64 x1, HID7_DEFEATURES_2
1022 mrs x0, ARM64_REG_HID7
1023 orr x0, x0, x1
1024 msr ARM64_REG_HID7, x0
1025 dsb sy
1026 isb sy
1027 b cpu_defeatures_set_ret
1028 cpu_defeatures_set_ret:
1029 POP_FRAME
1030 ret
1031 #endif
1032
1033 #endif
1034
1035 #ifdef MONITOR
1036 /*
1037 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1038 uintptr_t arg2, uintptr_t arg3)
1039 *
1040 * Call the EL3 monitor with 4 arguments in registers
1041 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1042 * registers are preserved, temporary registers are not. Parameters and results are passed in
1043 * the usual manner.
1044 */
1045 .text
1046 .align 2
1047 .globl EXT(monitor_call)
1048 LEXT(monitor_call)
1049 smc 0x11
1050 ret
1051 #endif
1052
1053 /* vim: set sw=4 ts=4: */