]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/machine_routines_asm.s
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / machine_routines_asm.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007-2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <machine/asm.h>
30#include <arm64/machine_machdep.h>
31#include <arm64/proc_reg.h>
32#include <arm/pmap.h>
33#include <pexpert/arm64/board_config.h>
34#include <sys/errno.h>
35#include "assym.s"
36
37
d9a64523 38
5ba3f43e
A
39/* uint32_t get_fpscr(void):
40 * Returns (FPSR | FPCR).
41 */
42 .align 2
43 .globl EXT(get_fpscr)
44LEXT(get_fpscr)
45#if __ARM_VFP__
46 mrs x1, FPSR // Grab FPSR
47 mov x4, #(FPSR_MASK & 0xFFFF)
48 mov x5, #(FPSR_MASK & 0xFFFF0000)
49 orr x0, x4, x5
50 and x1, x1, x0 // Be paranoid, and clear bits we expect to
51 // be clear
52 mrs x2, FPCR // Grab FPCR
53 mov x4, #(FPCR_MASK & 0xFFFF)
54 mov x5, #(FPCR_MASK & 0xFFFF0000)
55 orr x0, x4, x5
56 and x2, x2, x0 // Be paranoid, and clear bits we expect to
57 // be clear
58 orr x0, x1, x2 // OR them to get FPSCR equivalent state
59#else
60 mov x0, #0
61#endif
62 ret
63 .align 2
64 .globl EXT(set_fpscr)
65/* void set_fpscr(uint32_t value):
66 * Set the FPCR and FPSR registers, based on the given value; a
67 * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR
68 * and FPCR are not responsible for condition codes.
69 */
70LEXT(set_fpscr)
71#if __ARM_VFP__
72 mov x4, #(FPSR_MASK & 0xFFFF)
73 mov x5, #(FPSR_MASK & 0xFFFF0000)
74 orr x1, x4, x5
75 and x1, x1, x0 // Clear the bits that don't apply to FPSR
76 mov x4, #(FPCR_MASK & 0xFFFF)
77 mov x5, #(FPCR_MASK & 0xFFFF0000)
78 orr x2, x4, x5
79 and x2, x2, x0 // Clear the bits that don't apply to FPCR
80 msr FPSR, x1 // Write FPCR
81 msr FPCR, x2 // Write FPSR
82 dsb ish // FPCR requires synchronization
83#endif
84 ret
85
d9a64523
A
86/*
87 * void update_mdscr(unsigned long clear, unsigned long set)
88 * Clears and sets the specified bits in MDSCR_EL1.
89 *
90 * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
91 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
92 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
93 * so we need to put the checks after the MRS where they can't be skipped. That
94 * still leaves a small window if a breakpoint is set on the instruction
95 * immediately after the MRS. To handle that, we also do a check and then set of
96 * the breakpoint control registers. This allows us to guarantee that a given
97 * core will never have both KDE set and a breakpoint targeting EL1.
98 *
99 * If KDE gets set, unset it and then panic
100 */
101 .align 2
102 .globl EXT(update_mdscr)
103LEXT(update_mdscr)
104 mov x4, #0
105 mrs x2, MDSCR_EL1
106 bic x2, x2, x0
107 orr x2, x2, x1
1081:
109 bic x2, x2, #0x2000
110 msr MDSCR_EL1, x2
111#if defined(CONFIG_KERNEL_INTEGRITY)
112 /*
113 * verify KDE didn't get set (including via ROP)
114 * If set, clear it and then panic
115 */
116 ands x3, x2, #0x2000
117 orr x4, x4, x3
118 bne 1b
119 cmp x4, xzr
120 b.ne Lupdate_mdscr_panic
121#endif
122 ret
123
124Lupdate_mdscr_panic:
125 adrp x0, Lupdate_mdscr_panic_str@page
126 add x0, x0, Lupdate_mdscr_panic_str@pageoff
127 b EXT(panic)
128 b .
129
130Lupdate_mdscr_panic_str:
131 .asciz "MDSCR.KDE was set"
132
133
5c9f4661
A
134#if __ARM_KERNEL_PROTECT__
135/*
136 * __ARM_KERNEL_PROTECT__ adds two complications to TLB management:
137 *
138 * 1. As each pmap has two ASIDs, every TLB operation that targets an ASID must
139 * target both ASIDs for the pmap that owns the target ASID.
140 *
141 * 2. Any TLB operation targeting the kernel_pmap ASID (ASID 0) must target all
142 * ASIDs (as kernel_pmap mappings may be referenced while using an ASID that
143 * belongs to another pmap). We expect these routines to be called with the
144 * EL0 ASID for the target; not the EL1 ASID.
145 */
146#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e 147
d9a64523
A
148.macro SYNC_TLB_FLUSH
149 dsb ish
150 isb sy
151.endmacro
152
153
154/*
155 * void sync_tlb_flush(void)
156 *
157 * Synchronize one or more prior TLB flush operations
158 */
159 .text
160 .align 2
161 .globl EXT(sync_tlb_flush)
162LEXT(sync_tlb_flush)
163 SYNC_TLB_FLUSH
164 ret
165
166
167.macro FLUSH_MMU_TLB
168 tlbi vmalle1is
169.endmacro
170/*
171 * void flush_mmu_tlb_async(void)
172 *
173 * Flush all TLBs, don't wait for completion
174 */
175 .text
176 .align 2
177 .globl EXT(flush_mmu_tlb_async)
178LEXT(flush_mmu_tlb_async)
179 FLUSH_MMU_TLB
180 ret
181
5ba3f43e
A
182/*
183 * void flush_mmu_tlb(void)
184 *
185 * Flush all TLBs
186 */
187 .text
188 .align 2
189 .globl EXT(flush_mmu_tlb)
190LEXT(flush_mmu_tlb)
d9a64523
A
191 FLUSH_MMU_TLB
192 SYNC_TLB_FLUSH
5ba3f43e
A
193 ret
194
d9a64523
A
195.macro FLUSH_CORE_TLB
196 tlbi vmalle1
197.endmacro
198
5ba3f43e 199/*
d9a64523 200 * void flush_core_tlb_async(void)
5ba3f43e 201 *
d9a64523 202 * Flush local core TLB, don't wait for completion
5ba3f43e
A
203 */
204 .text
205 .align 2
d9a64523
A
206 .globl EXT(flush_core_tlb_async)
207LEXT(flush_core_tlb_async)
208 FLUSH_CORE_TLB
5ba3f43e
A
209 ret
210
211/*
d9a64523 212 * void flush_core_tlb(void)
5ba3f43e 213 *
d9a64523 214 * Flush local core TLB
5ba3f43e
A
215 */
216 .text
217 .align 2
d9a64523
A
218 .globl EXT(flush_core_tlb)
219LEXT(flush_core_tlb)
220 FLUSH_CORE_TLB
221 SYNC_TLB_FLUSH
222 ret
223
224.macro FLUSH_MMU_TLB_ALLENTRIES
5ba3f43e
A
225#if __ARM_16K_PG__
226 and x0, x0, #~0x3
227
228 /*
229 * The code below is not necessarily correct. From an overview of
230 * the client code, the expected contract for TLB flushes is that
231 * we will expand from an "address, length" pair to "start address,
232 * end address" in the course of a TLB flush. This suggests that
233 * a flush for "X, X+4" is actually only asking for a flush of a
234 * single 16KB page. At the same time, we'd like to be prepared
235 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
236 * number to a 16KB page boundary. This should deal correctly with
237 * unaligned inputs.
238 *
239 * If our expecations about client behavior are wrong however, this
240 * will lead to occasional TLB corruption on platforms with 16KB
241 * pages.
242 */
243 add x1, x1, #0x3
244 and x1, x1, #~0x3
245#endif
d9a64523 2461: // Lflush_mmu_tlb_allentries_loop:
5ba3f43e
A
247 tlbi vaae1is, x0
248 add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA
249 cmp x0, x1
d9a64523
A
250 b.lt 1b // Lflush_mmu_tlb_allentries_loop
251.endmacro
5ba3f43e
A
252
253/*
d9a64523 254 * void flush_mmu_tlb_allentries_async(uint64_t, uint64_t)
5ba3f43e 255 *
d9a64523 256 * Flush TLB entries, don't wait for completion
5ba3f43e
A
257 */
258 .text
259 .align 2
d9a64523
A
260 .globl EXT(flush_mmu_tlb_allentries_async)
261LEXT(flush_mmu_tlb_allentries_async)
262 FLUSH_MMU_TLB_ALLENTRIES
263 ret
264
265/*
266 * void flush_mmu_tlb_allentries(uint64_t, uint64_t)
267 *
268 * Flush TLB entries
269 */
270 .globl EXT(flush_mmu_tlb_allentries)
271LEXT(flush_mmu_tlb_allentries)
272 FLUSH_MMU_TLB_ALLENTRIES
273 SYNC_TLB_FLUSH
274 ret
275
276.macro FLUSH_MMU_TLB_ENTRY
5c9f4661
A
277#if __ARM_KERNEL_PROTECT__
278 /*
279 * If we are flushing ASID 0, this is a kernel operation. With this
280 * ASID scheme, this means we should flush all ASIDs.
281 */
282 lsr x2, x0, #TLBI_ASID_SHIFT
283 cmp x2, #0
d9a64523 284 b.eq 1f // Lflush_mmu_tlb_entry_globally
5c9f4661
A
285
286 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
287 tlbi vae1is, x0
288 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
289#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e 290 tlbi vae1is, x0
5c9f4661 291#if __ARM_KERNEL_PROTECT__
d9a64523
A
292 b 2f // Lflush_mmu_tlb_entry_done
2931: // Lflush_mmu_tlb_entry_globally:
5c9f4661 294 tlbi vaae1is, x0
d9a64523 2952: // Lflush_mmu_tlb_entry_done
5c9f4661 296#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523
A
297.endmacro
298/*
299 * void flush_mmu_tlb_entry_async(uint64_t)
300 *
301 * Flush TLB entry, don't wait for completion
302 */
303 .text
304 .align 2
305 .globl EXT(flush_mmu_tlb_entry_async)
306LEXT(flush_mmu_tlb_entry_async)
307 FLUSH_MMU_TLB_ENTRY
308 ret
5ba3f43e
A
309
310/*
d9a64523 311 * void flush_mmu_tlb_entry(uint64_t)
5ba3f43e 312 *
d9a64523 313 * Flush TLB entry
5ba3f43e
A
314 */
315 .text
316 .align 2
d9a64523
A
317 .globl EXT(flush_mmu_tlb_entry)
318LEXT(flush_mmu_tlb_entry)
319 FLUSH_MMU_TLB_ENTRY
320 SYNC_TLB_FLUSH
321 ret
322
323.macro FLUSH_MMU_TLB_ENTRIES
5ba3f43e
A
324#if __ARM_16K_PG__
325 and x0, x0, #~0x3
326
327 /*
328 * The code below is not necessarily correct. From an overview of
329 * the client code, the expected contract for TLB flushes is that
330 * we will expand from an "address, length" pair to "start address,
331 * end address" in the course of a TLB flush. This suggests that
332 * a flush for "X, X+4" is actually only asking for a flush of a
333 * single 16KB page. At the same time, we'd like to be prepared
334 * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page
335 * number to a 16KB page boundary. This should deal correctly with
336 * unaligned inputs.
337 *
338 * If our expecations about client behavior are wrong however, this
339 * will lead to occasional TLB corruption on platforms with 16KB
340 * pages.
341 */
342 add x1, x1, #0x3
343 and x1, x1, #~0x3
d9a64523 344#endif /* __ARM_16K_PG__ */
5c9f4661
A
345#if __ARM_KERNEL_PROTECT__
346 /*
347 * If we are flushing ASID 0, this is a kernel operation. With this
348 * ASID scheme, this means we should flush all ASIDs.
349 */
350 lsr x2, x0, #TLBI_ASID_SHIFT
351 cmp x2, #0
d9a64523 352 b.eq 2f // Lflush_mmu_tlb_entries_globally_loop
5ba3f43e 353
5c9f4661
A
354 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
355#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523 3561: // Lflush_mmu_tlb_entries_loop
5ba3f43e 357 tlbi vae1is, x0
5c9f4661
A
358#if __ARM_KERNEL_PROTECT__
359 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
360 tlbi vae1is, x0
361 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
362#endif /* __ARM_KERNEL_PROTECT__ */
363 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
364 cmp x0, x1
d9a64523 365 b.lt 1b // Lflush_mmu_tlb_entries_loop
5c9f4661 366#if __ARM_KERNEL_PROTECT__
d9a64523
A
367 b 3f // Lflush_mmu_tlb_entries_done
3682: // Lflush_mmu_tlb_entries_globally_loop:
5c9f4661 369 tlbi vaae1is, x0
5ba3f43e
A
370 add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages
371 cmp x0, x1
d9a64523
A
372 b.lt 2b // Lflush_mmu_tlb_entries_globally_loop
3733: // Lflush_mmu_tlb_entries_done
5c9f4661 374#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523 375.endmacro
5ba3f43e
A
376
377/*
d9a64523 378 * void flush_mmu_tlb_entries_async(uint64_t, uint64_t)
5ba3f43e 379 *
d9a64523 380 * Flush TLB entries, don't wait for completion
5ba3f43e
A
381 */
382 .text
383 .align 2
d9a64523
A
384 .globl EXT(flush_mmu_tlb_entries_async)
385LEXT(flush_mmu_tlb_entries_async)
386 FLUSH_MMU_TLB_ENTRIES
387 ret
388
389/*
390 * void flush_mmu_tlb_entries(uint64_t, uint64_t)
391 *
392 * Flush TLB entries
393 */
394 .text
395 .align 2
396 .globl EXT(flush_mmu_tlb_entries)
397LEXT(flush_mmu_tlb_entries)
398 FLUSH_MMU_TLB_ENTRIES
399 SYNC_TLB_FLUSH
400 ret
401
402.macro FLUSH_MMU_TLB_ASID
5c9f4661
A
403#if __ARM_KERNEL_PROTECT__
404 /*
405 * If we are flushing ASID 0, this is a kernel operation. With this
406 * ASID scheme, this means we should flush all ASIDs.
407 */
408 lsr x1, x0, #TLBI_ASID_SHIFT
409 cmp x1, #0
d9a64523 410 b.eq 1f // Lflush_mmu_tlb_globally
5c9f4661
A
411
412 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
413 tlbi aside1is, x0
414 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
415#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e 416 tlbi aside1is, x0
5c9f4661 417#if __ARM_KERNEL_PROTECT__
d9a64523
A
418 b 2f // Lflush_mmu_tlb_asid_done
4191: // Lflush_mmu_tlb_globally:
5c9f4661 420 tlbi vmalle1is
d9a64523 4212: // Lflush_mmu_tlb_asid_done:
5c9f4661 422#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523 423.endmacro
5ba3f43e
A
424
425/*
d9a64523 426 * void flush_mmu_tlb_asid_async(uint64_t)
5ba3f43e 427 *
d9a64523 428 * Flush TLB entriesfor requested asid, don't wait for completion
5ba3f43e
A
429 */
430 .text
431 .align 2
d9a64523
A
432 .globl EXT(flush_mmu_tlb_asid_async)
433LEXT(flush_mmu_tlb_asid_async)
434 FLUSH_MMU_TLB_ASID
435 ret
436
437/*
438 * void flush_mmu_tlb_asid(uint64_t)
439 *
440 * Flush TLB entriesfor requested asid
441 */
442 .text
443 .align 2
444 .globl EXT(flush_mmu_tlb_asid)
445LEXT(flush_mmu_tlb_asid)
446 FLUSH_MMU_TLB_ASID
447 SYNC_TLB_FLUSH
448 ret
449
450.macro FLUSH_CORE_TLB_ASID
5c9f4661
A
451#if __ARM_KERNEL_PROTECT__
452 /*
453 * If we are flushing ASID 0, this is a kernel operation. With this
454 * ASID scheme, this means we should flush all ASIDs.
455 */
456 lsr x1, x0, #TLBI_ASID_SHIFT
457 cmp x1, #0
d9a64523 458 b.eq 1f // Lflush_core_tlb_asid_globally
5c9f4661
A
459
460 bic x0, x0, #(1 << TLBI_ASID_SHIFT)
461 tlbi aside1, x0
462 orr x0, x0, #(1 << TLBI_ASID_SHIFT)
463#endif /* __ARM_KERNEL_PROTECT__ */
5ba3f43e 464 tlbi aside1, x0
5c9f4661 465#if __ARM_KERNEL_PROTECT__
d9a64523
A
466 b 2f // Lflush_core_tlb_asid_done
4671: // Lflush_core_tlb_asid_globally:
5c9f4661 468 tlbi vmalle1
d9a64523 4692: // Lflush_core_tlb_asid_done:
5c9f4661 470#endif /* __ARM_KERNEL_PROTECT__ */
d9a64523
A
471.endmacro
472
473/*
474 * void flush_core_tlb_asid_async(uint64_t)
475 *
476 * Flush TLB entries for core for requested asid, don't wait for completion
477 */
478 .text
479 .align 2
480 .globl EXT(flush_core_tlb_asid_async)
481LEXT(flush_core_tlb_asid_async)
482 FLUSH_CORE_TLB_ASID
483 ret
484/*
485 * void flush_core_tlb_asid(uint64_t)
486 *
487 * Flush TLB entries for core for requested asid
488 */
489 .text
490 .align 2
491 .globl EXT(flush_core_tlb_asid)
492LEXT(flush_core_tlb_asid)
493 FLUSH_CORE_TLB_ASID
494 SYNC_TLB_FLUSH
495 ret
5ba3f43e
A
496
497/*
498 * Set MMU Translation Table Base Alternate
499 */
500 .text
501 .align 2
502 .globl EXT(set_mmu_ttb_alternate)
503LEXT(set_mmu_ttb_alternate)
504 dsb sy
505#if defined(KERNEL_INTEGRITY_KTRR)
506 mov x1, lr
507 bl EXT(pinst_set_ttbr1)
508 mov lr, x1
509#else
510 msr TTBR1_EL1, x0
511#endif /* defined(KERNEL_INTEGRITY_KTRR) */
512 isb sy
513 ret
514
d9a64523
A
515 .text
516 .align 2
517 .globl EXT(set_mmu_ttb)
518LEXT(set_mmu_ttb)
519#if __ARM_KERNEL_PROTECT__
520 /* All EL1-mode ASIDs are odd. */
521 orr x0, x0, #(1 << TTBR_ASID_SHIFT)
522#endif /* __ARM_KERNEL_PROTECT__ */
523 dsb ish
524 msr TTBR0_EL1, x0
525 isb sy
526 ret
527
5ba3f43e
A
528/*
529 * set AUX control register
530 */
531 .text
532 .align 2
533 .globl EXT(set_aux_control)
534LEXT(set_aux_control)
535 msr ACTLR_EL1, x0
536 // Synchronize system
537 dsb sy
538 isb sy
539 ret
540
5c9f4661
A
541#if __ARM_KERNEL_PROTECT__
542 .text
543 .align 2
544 .globl EXT(set_vbar_el1)
545LEXT(set_vbar_el1)
546#if defined(KERNEL_INTEGRITY_KTRR)
547 b EXT(pinst_set_vbar)
548#else
549 msr VBAR_EL1, x0
550 ret
551#endif
552#endif /* __ARM_KERNEL_PROTECT__ */
553
5ba3f43e
A
554
555/*
556 * set translation control register
557 */
558 .text
559 .align 2
560 .globl EXT(set_tcr)
561LEXT(set_tcr)
562#if defined(APPLE_ARM64_ARCH_FAMILY)
563 // Assert that T0Z is always equal to T1Z
564 eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT)
565 and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT)
566 cbnz x1, L_set_tcr_panic
567#if defined(KERNEL_INTEGRITY_KTRR)
568 mov x1, lr
569 bl _pinst_set_tcr
570 mov lr, x1
571#else
572 msr TCR_EL1, x0
573#endif /* defined(KERNEL_INTRITY_KTRR) */
574 isb sy
575 ret
576
577L_set_tcr_panic:
578 PUSH_FRAME
579 sub sp, sp, #16
580 str x0, [sp]
581 adr x0, L_set_tcr_panic_str
582 BRANCH_EXTERN panic
583
584L_set_locked_reg_panic:
585 PUSH_FRAME
586 sub sp, sp, #16
587 str x0, [sp]
588 adr x0, L_set_locked_reg_panic_str
589 BRANCH_EXTERN panic
590 b .
591
592L_set_tcr_panic_str:
593 .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n"
594
595
596L_set_locked_reg_panic_str:
597 .asciz "attempt to set locked register: (%llx)\n"
598#else
599#if defined(KERNEL_INTEGRITY_KTRR)
600 mov x1, lr
601 bl _pinst_set_tcr
602 mov lr, x1
603#else
604 msr TCR_EL1, x0
605#endif
606 isb sy
607 ret
608#endif // defined(APPLE_ARM64_ARCH_FAMILY)
609
610/*
611 * MMU kernel virtual to physical address translation
612 */
613 .text
614 .align 2
615 .globl EXT(mmu_kvtop)
616LEXT(mmu_kvtop)
617 mrs x2, DAIF // Load current DAIF
618 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
619 at s1e1r, x0 // Translation Stage 1 EL1
620 mrs x1, PAR_EL1 // Read result
621 msr DAIF, x2 // Restore interrupt state
622 tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid
623 bfm x1, x0, #0, #11 // Add page offset
624 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
625 ret
626L_mmu_kvtop_invalid:
d9a64523 627 mov x0, #0 // Return invalid
5ba3f43e
A
628 ret
629
630/*
631 * MMU user virtual to physical address translation
632 */
633 .text
634 .align 2
635 .globl EXT(mmu_uvtop)
636LEXT(mmu_uvtop)
637 lsr x8, x0, #56 // Extract top byte
638 cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid
639 mrs x2, DAIF // Load current DAIF
640 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
641 at s1e0r, x0 // Translation Stage 1 EL0
642 mrs x1, PAR_EL1 // Read result
643 msr DAIF, x2 // Restore interrupt state
644 tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid
645 bfm x1, x0, #0, #11 // Add page offset
646 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
647 ret
648L_mmu_uvtop_invalid:
d9a64523 649 mov x0, #0 // Return invalid
5ba3f43e
A
650 ret
651
652/*
653 * MMU kernel virtual to physical address preflight write access
654 */
655 .text
656 .align 2
657 .globl EXT(mmu_kvtop_wpreflight)
658LEXT(mmu_kvtop_wpreflight)
659 mrs x2, DAIF // Load current DAIF
660 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ
661 at s1e1w, x0 // Translation Stage 1 EL1
662 mrs x1, PAR_EL1 // Read result
663 msr DAIF, x2 // Restore interrupt state
664 tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid
665 bfm x1, x0, #0, #11 // Add page offset
666 and x0, x1, #0x0000ffffffffffff // Clear non-address bits
667 ret
668L_mmu_kvtop_wpreflight_invalid:
d9a64523 669 mov x0, #0 // Return invalid
5ba3f43e
A
670 ret
671
672/*
673 * SET_RECOVERY_HANDLER
674 *
675 * Sets up a page fault recovery handler
676 *
677 * arg0 - persisted thread pointer
678 * arg1 - persisted recovery handler
679 * arg2 - scratch reg
680 * arg3 - recovery label
681 */
682.macro SET_RECOVERY_HANDLER
683 mrs $0, TPIDR_EL1 // Load thread pointer
684 ldr $1, [$0, TH_RECOVER] // Save previous recovery handler
685 adrp $2, $3@page // Load the recovery handler address
686 add $2, $2, $3@pageoff
687 str $2, [$0, TH_RECOVER] // Set new recovery handler
688.endmacro
689
690/*
691 * CLEAR_RECOVERY_HANDLER
692 *
693 * Clears page fault handler set by SET_RECOVERY_HANDLER
694 *
695 * arg0 - thread pointer saved by SET_RECOVERY_HANDLER
696 * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER
697 */
698.macro CLEAR_RECOVERY_HANDLER
699 str $1, [$0, TH_RECOVER] // Restore the previous recovery handler
700.endmacro
701
702
703 .text
704 .align 2
705copyio_error:
706 CLEAR_RECOVERY_HANDLER x10, x11
707 mov x0, #EFAULT // Return an EFAULT error
708 POP_FRAME
d9a64523 709 ARM64_STACK_EPILOG
5ba3f43e
A
710
711/*
712 * int _bcopyin(const char *src, char *dst, vm_size_t len)
713 */
714 .text
715 .align 2
716 .globl EXT(_bcopyin)
717LEXT(_bcopyin)
d9a64523 718 ARM64_STACK_PROLOG
5ba3f43e
A
719 PUSH_FRAME
720 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
721 /* If len is less than 16 bytes, just do a bytewise copy */
722 cmp x2, #16
723 b.lt 2f
724 sub x2, x2, #16
7251:
726 /* 16 bytes at a time */
727 ldp x3, x4, [x0], #16
728 stp x3, x4, [x1], #16
729 subs x2, x2, #16
730 b.ge 1b
731 /* Fixup the len and test for completion */
732 adds x2, x2, #16
733 b.eq 3f
7342: /* Bytewise */
735 subs x2, x2, #1
736 ldrb w3, [x0], #1
737 strb w3, [x1], #1
738 b.hi 2b
7393:
740 CLEAR_RECOVERY_HANDLER x10, x11
d9a64523 741 mov x0, #0
5ba3f43e 742 POP_FRAME
d9a64523 743 ARM64_STACK_EPILOG
5ba3f43e
A
744
745/*
746 * int _copyin_word(const char *src, uint64_t *dst, vm_size_t len)
747 */
748 .text
749 .align 2
750 .globl EXT(_copyin_word)
751LEXT(_copyin_word)
d9a64523 752 ARM64_STACK_PROLOG
5ba3f43e
A
753 PUSH_FRAME
754 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
755 cmp x2, #4
756 b.eq L_copyin_word_4
757 cmp x2, #8
758 b.eq L_copyin_word_8
759 mov x0, EINVAL
760 b L_copying_exit
761L_copyin_word_4:
762 ldr w8, [x0]
763 b L_copyin_word_store
764L_copyin_word_8:
765 ldr x8, [x0]
766L_copyin_word_store:
767 str x8, [x1]
d9a64523 768 mov x0, #0
5ba3f43e
A
769 CLEAR_RECOVERY_HANDLER x10, x11
770L_copying_exit:
771 POP_FRAME
d9a64523
A
772 ARM64_STACK_EPILOG
773
5ba3f43e
A
774
775
776/*
777 * int _bcopyout(const char *src, char *dst, vm_size_t len)
778 */
779 .text
780 .align 2
781 .globl EXT(_bcopyout)
782LEXT(_bcopyout)
d9a64523 783 ARM64_STACK_PROLOG
5ba3f43e
A
784 PUSH_FRAME
785 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
786 /* If len is less than 16 bytes, just do a bytewise copy */
787 cmp x2, #16
788 b.lt 2f
789 sub x2, x2, #16
7901:
791 /* 16 bytes at a time */
792 ldp x3, x4, [x0], #16
793 stp x3, x4, [x1], #16
794 subs x2, x2, #16
795 b.ge 1b
796 /* Fixup the len and test for completion */
797 adds x2, x2, #16
798 b.eq 3f
7992: /* Bytewise */
800 subs x2, x2, #1
801 ldrb w3, [x0], #1
802 strb w3, [x1], #1
803 b.hi 2b
8043:
805 CLEAR_RECOVERY_HANDLER x10, x11
d9a64523 806 mov x0, #0
5ba3f43e 807 POP_FRAME
d9a64523 808 ARM64_STACK_EPILOG
5ba3f43e
A
809
810/*
811 * int _bcopyinstr(
812 * const user_addr_t user_addr,
813 * char *kernel_addr,
814 * vm_size_t max,
815 * vm_size_t *actual)
816 */
817 .text
818 .align 2
819 .globl EXT(_bcopyinstr)
820LEXT(_bcopyinstr)
d9a64523 821 ARM64_STACK_PROLOG
5ba3f43e
A
822 PUSH_FRAME
823 adr x4, Lcopyinstr_error // Get address for recover
824 mrs x10, TPIDR_EL1 // Get thread pointer
825 ldr x11, [x10, TH_RECOVER] // Save previous recover
826 str x4, [x10, TH_RECOVER] // Store new recover
d9a64523 827 mov x4, #0 // x4 - total bytes copied
5ba3f43e
A
828Lcopyinstr_loop:
829 ldrb w5, [x0], #1 // Load a byte from the user source
830 strb w5, [x1], #1 // Store a byte to the kernel dest
831 add x4, x4, #1 // Increment bytes copied
832 cbz x5, Lcopyinstr_done // If this byte is null, we're done
833 cmp x4, x2 // If we're out of space, return an error
834 b.ne Lcopyinstr_loop
835Lcopyinstr_too_long:
836 mov x5, #ENAMETOOLONG // Set current byte to error code for later return
837Lcopyinstr_done:
838 str x4, [x3] // Return number of bytes copied
839 mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure)
840 b Lcopyinstr_exit
841Lcopyinstr_error:
842 mov x0, #EFAULT // Return EFAULT on error
843Lcopyinstr_exit:
844 str x11, [x10, TH_RECOVER] // Restore old recover
845 POP_FRAME
d9a64523 846 ARM64_STACK_EPILOG
5ba3f43e
A
847
848/*
849 * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit)
850 *
851 * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from
852 * either user or kernel memory, or 8 bytes (AArch32) from user only.
853 *
854 * x0 : address of frame to copy.
855 * x1 : kernel address at which to store data.
856 * w2 : whether to copy an AArch32 or AArch64 frame.
857 * x3 : temp
858 * x5 : temp (kernel virtual base)
859 * x9 : temp
860 * x10 : thread pointer (set by SET_RECOVERY_HANDLER)
861 * x11 : old recovery function (set by SET_RECOVERY_HANDLER)
862 * x12, x13 : backtrace data
863 *
864 */
865 .text
866 .align 2
867 .globl EXT(copyinframe)
868LEXT(copyinframe)
d9a64523 869 ARM64_STACK_PROLOG
5ba3f43e
A
870 PUSH_FRAME
871 SET_RECOVERY_HANDLER x10, x11, x3, copyio_error
872 cbnz w2, Lcopyinframe64 // Check frame size
873 adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel
874 add x5, x5, EXT(gVirtBase)@pageoff
875 ldr x5, [x5]
876 cmp x5, x0 // See if address is in kernel virtual range
877 b.hi Lcopyinframe32 // If below kernel virtual range, proceed.
878 mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range
879 b Lcopyinframe_done
880
881Lcopyinframe32:
882 ldr x12, [x0] // Copy 8 bytes
883 str x12, [x1]
884 mov w0, #0 // Success
885 b Lcopyinframe_done
886
887Lcopyinframe64:
888 mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address
889 orr x9, x0, TBI_MASK // Hide tags in address comparison
890 cmp x9, x3 // If in kernel address range, skip tag test
891 b.hs Lcopyinframe_valid
892 tst x0, TBI_MASK // Detect tagged pointers
893 b.eq Lcopyinframe_valid
894 mov w0, #EFAULT // Tagged address, fail
895 b Lcopyinframe_done
896Lcopyinframe_valid:
897 ldp x12, x13, [x0] // Copy 16 bytes
898 stp x12, x13, [x1]
899 mov w0, #0 // Success
900
901Lcopyinframe_done:
902 CLEAR_RECOVERY_HANDLER x10, x11
903 POP_FRAME
d9a64523 904 ARM64_STACK_EPILOG
5ba3f43e 905
5ba3f43e
A
906
907/*
908 * uint32_t arm_debug_read_dscr(void)
909 */
910 .text
911 .align 2
912 .globl EXT(arm_debug_read_dscr)
913LEXT(arm_debug_read_dscr)
914 PANIC_UNIMPLEMENTED
915
916/*
917 * void arm_debug_set_cp14(arm_debug_state_t *debug_state)
918 *
919 * Set debug registers to match the current thread state
920 * (NULL to disable). Assume 6 breakpoints and 2
921 * watchpoints, since that has been the case in all cores
922 * thus far.
923 */
924 .text
925 .align 2
926 .globl EXT(arm_debug_set_cp14)
927LEXT(arm_debug_set_cp14)
928 PANIC_UNIMPLEMENTED
929
5ba3f43e
A
930#if defined(APPLE_ARM64_ARCH_FAMILY)
931/*
932 * Note: still have to ISB before executing wfi!
933 */
934 .text
935 .align 2
936 .globl EXT(arm64_prepare_for_sleep)
937LEXT(arm64_prepare_for_sleep)
938 PUSH_FRAME
939
940#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
941 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
942 mrs x0, ARM64_REG_HID2 // Read HID2
943 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
944 msr ARM64_REG_HID2, x0 // Write HID2
945 dsb sy
946 isb sy
947#endif
948
949#if __ARM_GLOBAL_SLEEP_BIT__
950 // Enable deep sleep
951 mrs x1, ARM64_REG_ACC_OVRD
952 orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep)
953 and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask))
954 orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep)
955 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask))
956 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep)
957 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask))
958 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep)
959 and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask))
960 orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep)
961 msr ARM64_REG_ACC_OVRD, x1
962
963
964#else
965 // Enable deep sleep
966 mov x1, ARM64_REG_CYC_CFG_deepSleep
967 msr ARM64_REG_CYC_CFG, x1
968#endif
969 // Set "OK to power down" (<rdar://problem/12390433>)
970 mrs x0, ARM64_REG_CYC_OVRD
971 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down)
972 msr ARM64_REG_CYC_OVRD, x0
973
d9a64523
A
974#if defined(APPLEMONSOON)
975 ARM64_IS_PCORE x0
976 cbz x0, Lwfi_inst // skip if not p-core
977
978 /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to
979 * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores
980 * to be left with valid entries that fail to drain if a
981 * subsequent wfi is issued. This can prevent the core from
982 * power-gating. For the idle case that is recoverable, but
983 * for the deep-sleep (S2R) case in which cores MUST power-gate,
984 * it can lead to a hang. This can be prevented by disabling
985 * and re-enabling GUPS, which forces the prefetch queue to
986 * drain. This should be done as close to wfi as possible, i.e.
987 * at the very end of arm64_prepare_for_sleep(). */
988 mrs x0, ARM64_REG_HID10
989 orr x0, x0, #(ARM64_REG_HID10_DisHwpGups)
990 msr ARM64_REG_HID10, x0
991 isb sy
992 and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups))
993 msr ARM64_REG_HID10, x0
994 isb sy
995#endif
5ba3f43e
A
996Lwfi_inst:
997 dsb sy
998 isb sy
999 wfi
1000 b Lwfi_inst
1001
1002/*
1003 * Force WFI to use clock gating only
1004 *
1005 */
1006 .text
1007 .align 2
1008 .globl EXT(arm64_force_wfi_clock_gate)
1009LEXT(arm64_force_wfi_clock_gate)
d9a64523 1010 ARM64_STACK_PROLOG
5ba3f43e
A
1011 PUSH_FRAME
1012
1013 mrs x0, ARM64_REG_CYC_OVRD
1014 orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up)
1015 msr ARM64_REG_CYC_OVRD, x0
1016
1017 POP_FRAME
d9a64523 1018 ARM64_STACK_EPILOG
5ba3f43e
A
1019
1020
1021
1022#if defined(APPLECYCLONE) || defined(APPLETYPHOON)
1023
1024 .text
1025 .align 2
1026 .globl EXT(cyclone_typhoon_prepare_for_wfi)
1027
1028LEXT(cyclone_typhoon_prepare_for_wfi)
1029 PUSH_FRAME
1030
1031 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
1032 mrs x0, ARM64_REG_HID2 // Read HID2
1033 orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch
1034 msr ARM64_REG_HID2, x0 // Write HID2
1035 dsb sy
1036 isb sy
1037
1038 POP_FRAME
1039 ret
1040
1041
1042 .text
1043 .align 2
1044 .globl EXT(cyclone_typhoon_return_from_wfi)
1045LEXT(cyclone_typhoon_return_from_wfi)
1046 PUSH_FRAME
1047
1048 // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch
1049 mrs x0, ARM64_REG_HID2 // Read HID2
1050 mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) //
1051 bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch
1052 msr ARM64_REG_HID2, x0 // Write HID2
1053 dsb sy
1054 isb sy
1055
1056 POP_FRAME
1057 ret
1058#endif
1059
1060#ifdef APPLETYPHOON
1061
1062#define HID0_DEFEATURES_1 0x0000a0c000064010ULL
1063#define HID1_DEFEATURES_1 0x000000004005bf20ULL
1064#define HID2_DEFEATURES_1 0x0000000000102074ULL
1065#define HID3_DEFEATURES_1 0x0000000000400003ULL
1066#define HID4_DEFEATURES_1 0x83ff00e100000268ULL
1067#define HID7_DEFEATURES_1 0x000000000000000eULL
1068
1069#define HID0_DEFEATURES_2 0x0000a1c000020010ULL
1070#define HID1_DEFEATURES_2 0x000000000005d720ULL
1071#define HID2_DEFEATURES_2 0x0000000000002074ULL
1072#define HID3_DEFEATURES_2 0x0000000000400001ULL
1073#define HID4_DEFEATURES_2 0x8390000200000208ULL
1074#define HID7_DEFEATURES_2 0x0000000000000000ULL
1075
1076/*
1077 arg0 = target register
1078 arg1 = 64-bit constant
1079*/
1080.macro LOAD_UINT64
1081 movz $0, #(($1 >> 48) & 0xffff), lsl #48
1082 movk $0, #(($1 >> 32) & 0xffff), lsl #32
1083 movk $0, #(($1 >> 16) & 0xffff), lsl #16
1084 movk $0, #(($1) & 0xffff)
1085.endmacro
1086
1087 .text
1088 .align 2
1089 .globl EXT(cpu_defeatures_set)
1090LEXT(cpu_defeatures_set)
1091 PUSH_FRAME
1092 cmp x0, #2
1093 b.eq cpu_defeatures_set_2
1094 cmp x0, #1
1095 b.ne cpu_defeatures_set_ret
1096 LOAD_UINT64 x1, HID0_DEFEATURES_1
1097 mrs x0, ARM64_REG_HID0
1098 orr x0, x0, x1
1099 msr ARM64_REG_HID0, x0
1100 LOAD_UINT64 x1, HID1_DEFEATURES_1
1101 mrs x0, ARM64_REG_HID1
1102 orr x0, x0, x1
1103 msr ARM64_REG_HID1, x0
1104 LOAD_UINT64 x1, HID2_DEFEATURES_1
1105 mrs x0, ARM64_REG_HID2
1106 orr x0, x0, x1
1107 msr ARM64_REG_HID2, x0
1108 LOAD_UINT64 x1, HID3_DEFEATURES_1
1109 mrs x0, ARM64_REG_HID3
1110 orr x0, x0, x1
1111 msr ARM64_REG_HID3, x0
1112 LOAD_UINT64 x1, HID4_DEFEATURES_1
1113 mrs x0, ARM64_REG_HID4
1114 orr x0, x0, x1
1115 msr ARM64_REG_HID4, x0
1116 LOAD_UINT64 x1, HID7_DEFEATURES_1
1117 mrs x0, ARM64_REG_HID7
1118 orr x0, x0, x1
1119 msr ARM64_REG_HID7, x0
1120 dsb sy
1121 isb sy
1122 b cpu_defeatures_set_ret
1123cpu_defeatures_set_2:
1124 LOAD_UINT64 x1, HID0_DEFEATURES_2
1125 mrs x0, ARM64_REG_HID0
1126 orr x0, x0, x1
1127 msr ARM64_REG_HID0, x0
1128 LOAD_UINT64 x1, HID1_DEFEATURES_2
1129 mrs x0, ARM64_REG_HID1
1130 orr x0, x0, x1
1131 msr ARM64_REG_HID1, x0
1132 LOAD_UINT64 x1, HID2_DEFEATURES_2
1133 mrs x0, ARM64_REG_HID2
1134 orr x0, x0, x1
1135 msr ARM64_REG_HID2, x0
1136 LOAD_UINT64 x1, HID3_DEFEATURES_2
1137 mrs x0, ARM64_REG_HID3
1138 orr x0, x0, x1
1139 msr ARM64_REG_HID3, x0
1140 LOAD_UINT64 x1, HID4_DEFEATURES_2
1141 mrs x0, ARM64_REG_HID4
1142 orr x0, x0, x1
1143 msr ARM64_REG_HID4, x0
1144 LOAD_UINT64 x1, HID7_DEFEATURES_2
1145 mrs x0, ARM64_REG_HID7
1146 orr x0, x0, x1
1147 msr ARM64_REG_HID7, x0
1148 dsb sy
1149 isb sy
1150 b cpu_defeatures_set_ret
1151cpu_defeatures_set_ret:
1152 POP_FRAME
1153 ret
1154#endif
1155
d9a64523
A
1156#else /* !defined(APPLE_ARM64_ARCH_FAMILY) */
1157 .text
1158 .align 2
1159 .globl EXT(arm64_prepare_for_sleep)
1160LEXT(arm64_prepare_for_sleep)
1161 PUSH_FRAME
1162Lwfi_inst:
1163 dsb sy
1164 isb sy
1165 wfi
1166 b Lwfi_inst
1167
1168/*
1169 * Force WFI to use clock gating only
1170 * Note: for non-Apple device, do nothing.
1171 */
1172 .text
1173 .align 2
1174 .globl EXT(arm64_force_wfi_clock_gate)
1175LEXT(arm64_force_wfi_clock_gate)
1176 PUSH_FRAME
1177 nop
1178 POP_FRAME
1179
1180#endif /* defined(APPLE_ARM64_ARCH_FAMILY) */
1181
1182/*
1183 * void arm64_replace_bootstack(cpu_data_t *cpu_data)
1184 *
1185 * This must be called from a kernel thread context running on the boot CPU,
1186 * after setting up new exception stacks in per-CPU data. That will guarantee
1187 * that the stack(s) we're trying to replace aren't currently in use. For
1188 * KTRR-protected devices, this must also be called prior to VM prot finalization
1189 * and lockdown, as updating SP1 requires a sensitive instruction.
1190 */
1191 .text
1192 .align 2
1193 .globl EXT(arm64_replace_bootstack)
1194LEXT(arm64_replace_bootstack)
1195 ARM64_STACK_PROLOG
1196 PUSH_FRAME
1197 // Set the exception stack pointer
1198 ldr x0, [x0, CPU_EXCEPSTACK_TOP]
1199 mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3
1200 msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror
1201 // Set SP_EL1 to exception stack
1202#if defined(KERNEL_INTEGRITY_KTRR)
1203 mov x1, lr
1204 bl _pinst_spsel_1
1205 mov lr, x1
1206#else
1207 msr SPSel, #1
5ba3f43e 1208#endif
d9a64523
A
1209 mov sp, x0
1210 msr SPSel, #0
1211 msr DAIF, x4 // Restore interrupt state
1212 POP_FRAME
1213 ARM64_STACK_EPILOG
5ba3f43e
A
1214
1215#ifdef MONITOR
1216/*
1217 * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1,
1218 uintptr_t arg2, uintptr_t arg3)
1219 *
1220 * Call the EL3 monitor with 4 arguments in registers
1221 * The monitor interface maintains the same ABI as the C function call standard. Callee-saved
1222 * registers are preserved, temporary registers are not. Parameters and results are passed in
1223 * the usual manner.
1224 */
1225 .text
1226 .align 2
1227 .globl EXT(monitor_call)
1228LEXT(monitor_call)
1229 smc 0x11
1230 ret
1231#endif
1232
d9a64523 1233
5ba3f43e 1234/* vim: set sw=4 ts=4: */