]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm64/machine_machdep.h> | |
31 | #include <arm64/proc_reg.h> | |
32 | #include <arm/pmap.h> | |
33 | #include <pexpert/arm64/board_config.h> | |
34 | #include <sys/errno.h> | |
35 | #include "assym.s" | |
36 | ||
37 | ||
38 | /* uint32_t get_fpscr(void): | |
39 | * Returns (FPSR | FPCR). | |
40 | */ | |
41 | .align 2 | |
42 | .globl EXT(get_fpscr) | |
43 | LEXT(get_fpscr) | |
44 | #if __ARM_VFP__ | |
45 | mrs x1, FPSR // Grab FPSR | |
46 | mov x4, #(FPSR_MASK & 0xFFFF) | |
47 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
48 | orr x0, x4, x5 | |
49 | and x1, x1, x0 // Be paranoid, and clear bits we expect to | |
50 | // be clear | |
51 | mrs x2, FPCR // Grab FPCR | |
52 | mov x4, #(FPCR_MASK & 0xFFFF) | |
53 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
54 | orr x0, x4, x5 | |
55 | and x2, x2, x0 // Be paranoid, and clear bits we expect to | |
56 | // be clear | |
57 | orr x0, x1, x2 // OR them to get FPSCR equivalent state | |
58 | #else | |
59 | mov x0, #0 | |
60 | #endif | |
61 | ret | |
62 | .align 2 | |
63 | .globl EXT(set_fpscr) | |
64 | /* void set_fpscr(uint32_t value): | |
65 | * Set the FPCR and FPSR registers, based on the given value; a | |
66 | * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR | |
67 | * and FPCR are not responsible for condition codes. | |
68 | */ | |
69 | LEXT(set_fpscr) | |
70 | #if __ARM_VFP__ | |
71 | mov x4, #(FPSR_MASK & 0xFFFF) | |
72 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
73 | orr x1, x4, x5 | |
74 | and x1, x1, x0 // Clear the bits that don't apply to FPSR | |
75 | mov x4, #(FPCR_MASK & 0xFFFF) | |
76 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
77 | orr x2, x4, x5 | |
78 | and x2, x2, x0 // Clear the bits that don't apply to FPCR | |
79 | msr FPSR, x1 // Write FPCR | |
80 | msr FPCR, x2 // Write FPSR | |
81 | dsb ish // FPCR requires synchronization | |
82 | #endif | |
83 | ret | |
84 | ||
85 | #if (__ARM_VFP__ >= 3) | |
86 | .align 2 | |
87 | .globl EXT(get_mvfr0) | |
88 | LEXT(get_mvfr0) | |
89 | mrs x0, MVFR0_EL1 | |
90 | ret | |
91 | ||
92 | .globl EXT(get_mvfr1) | |
93 | LEXT(get_mvfr1) | |
94 | mrs x0, MVFR1_EL1 | |
95 | ret | |
96 | ||
97 | #endif | |
98 | ||
99 | /* | |
100 | * void flush_mmu_tlb(void) | |
101 | * | |
102 | * Flush all TLBs | |
103 | */ | |
104 | .text | |
105 | .align 2 | |
106 | .globl EXT(flush_mmu_tlb) | |
107 | LEXT(flush_mmu_tlb) | |
108 | tlbi vmalle1is | |
109 | dsb ish | |
110 | isb sy | |
111 | ret | |
112 | ||
113 | /* | |
114 | * void flush_core_tlb(void) | |
115 | * | |
116 | * Flush core TLB | |
117 | */ | |
118 | .text | |
119 | .align 2 | |
120 | .globl EXT(flush_core_tlb) | |
121 | LEXT(flush_core_tlb) | |
122 | tlbi vmalle1 | |
123 | dsb ish | |
124 | isb sy | |
125 | ret | |
126 | ||
127 | /* | |
128 | * void flush_mmu_tlb_allentries(uint64_t, uint64_t) | |
129 | * | |
130 | * Flush TLB entries | |
131 | */ | |
132 | .text | |
133 | .align 2 | |
134 | .globl EXT(flush_mmu_tlb_allentries) | |
135 | LEXT(flush_mmu_tlb_allentries) | |
136 | #if __ARM_16K_PG__ | |
137 | and x0, x0, #~0x3 | |
138 | ||
139 | /* | |
140 | * The code below is not necessarily correct. From an overview of | |
141 | * the client code, the expected contract for TLB flushes is that | |
142 | * we will expand from an "address, length" pair to "start address, | |
143 | * end address" in the course of a TLB flush. This suggests that | |
144 | * a flush for "X, X+4" is actually only asking for a flush of a | |
145 | * single 16KB page. At the same time, we'd like to be prepared | |
146 | * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page | |
147 | * number to a 16KB page boundary. This should deal correctly with | |
148 | * unaligned inputs. | |
149 | * | |
150 | * If our expecations about client behavior are wrong however, this | |
151 | * will lead to occasional TLB corruption on platforms with 16KB | |
152 | * pages. | |
153 | */ | |
154 | add x1, x1, #0x3 | |
155 | and x1, x1, #~0x3 | |
156 | #endif | |
157 | ||
158 | 1: | |
159 | tlbi vaae1is, x0 | |
160 | add x0, x0, #(ARM_PGBYTES / 4096) // Units are 4KB pages, as defined by the ISA | |
161 | cmp x0, x1 | |
162 | b.lt 1b | |
163 | dsb ish | |
164 | isb sy | |
165 | ret | |
166 | ||
167 | /* | |
168 | * void flush_mmu_tlb_entry(uint64_t) | |
169 | * | |
170 | * Flush TLB entry | |
171 | */ | |
172 | .text | |
173 | .align 2 | |
174 | .globl EXT(flush_mmu_tlb_entry) | |
175 | LEXT(flush_mmu_tlb_entry) | |
176 | tlbi vae1is, x0 | |
177 | dsb ish | |
178 | isb sy | |
179 | ret | |
180 | ||
181 | /* | |
182 | * void flush_mmu_tlb_entries(uint64_t, uint64_t) | |
183 | * | |
184 | * Flush TLB entries | |
185 | */ | |
186 | .text | |
187 | .align 2 | |
188 | .globl EXT(flush_mmu_tlb_entries) | |
189 | LEXT(flush_mmu_tlb_entries) | |
190 | #if __ARM_16K_PG__ | |
191 | and x0, x0, #~0x3 | |
192 | ||
193 | /* | |
194 | * The code below is not necessarily correct. From an overview of | |
195 | * the client code, the expected contract for TLB flushes is that | |
196 | * we will expand from an "address, length" pair to "start address, | |
197 | * end address" in the course of a TLB flush. This suggests that | |
198 | * a flush for "X, X+4" is actually only asking for a flush of a | |
199 | * single 16KB page. At the same time, we'd like to be prepared | |
200 | * for bad inputs (X, X+3), so add 3 and then truncate the 4KB page | |
201 | * number to a 16KB page boundary. This should deal correctly with | |
202 | * unaligned inputs. | |
203 | * | |
204 | * If our expecations about client behavior are wrong however, this | |
205 | * will lead to occasional TLB corruption on platforms with 16KB | |
206 | * pages. | |
207 | */ | |
208 | add x1, x1, #0x3 | |
209 | and x1, x1, #~0x3 | |
210 | #endif | |
211 | ||
212 | 1: | |
213 | tlbi vae1is, x0 | |
214 | add x0, x0, #(ARM_PGBYTES / 4096) // Units are pages | |
215 | cmp x0, x1 | |
216 | b.lt 1b | |
217 | dsb ish | |
218 | isb sy | |
219 | ret | |
220 | ||
221 | /* | |
222 | * void flush_mmu_tlb_asid(uint64_t) | |
223 | * | |
224 | * Flush TLB entriesfor requested asid | |
225 | */ | |
226 | .text | |
227 | .align 2 | |
228 | .globl EXT(flush_mmu_tlb_asid) | |
229 | LEXT(flush_mmu_tlb_asid) | |
230 | tlbi aside1is, x0 | |
231 | dsb ish | |
232 | isb sy | |
233 | ret | |
234 | ||
235 | /* | |
236 | * void flush_core_tlb_asid(uint64_t) | |
237 | * | |
238 | * Flush TLB entries for core for requested asid | |
239 | */ | |
240 | .text | |
241 | .align 2 | |
242 | .globl EXT(flush_core_tlb_asid) | |
243 | LEXT(flush_core_tlb_asid) | |
244 | tlbi aside1, x0 | |
245 | dsb ish | |
246 | isb sy | |
247 | ret | |
248 | ||
249 | /* | |
250 | * Set MMU Translation Table Base Alternate | |
251 | */ | |
252 | .text | |
253 | .align 2 | |
254 | .globl EXT(set_mmu_ttb_alternate) | |
255 | LEXT(set_mmu_ttb_alternate) | |
256 | dsb sy | |
257 | #if defined(KERNEL_INTEGRITY_KTRR) | |
258 | mov x1, lr | |
259 | bl EXT(pinst_set_ttbr1) | |
260 | mov lr, x1 | |
261 | #else | |
262 | msr TTBR1_EL1, x0 | |
263 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
264 | isb sy | |
265 | ret | |
266 | ||
267 | /* | |
268 | * set AUX control register | |
269 | */ | |
270 | .text | |
271 | .align 2 | |
272 | .globl EXT(set_aux_control) | |
273 | LEXT(set_aux_control) | |
274 | msr ACTLR_EL1, x0 | |
275 | // Synchronize system | |
276 | dsb sy | |
277 | isb sy | |
278 | ret | |
279 | ||
280 | #if (DEVELOPMENT || DEBUG) | |
281 | /* | |
282 | * set MMU control register | |
283 | */ | |
284 | .text | |
285 | .align 2 | |
286 | .globl EXT(set_mmu_control) | |
287 | LEXT(set_mmu_control) | |
288 | msr SCTLR_EL1, x0 | |
289 | dsb sy | |
290 | isb sy | |
291 | ret | |
292 | #endif | |
293 | ||
294 | ||
295 | /* | |
296 | * set translation control register | |
297 | */ | |
298 | .text | |
299 | .align 2 | |
300 | .globl EXT(set_tcr) | |
301 | LEXT(set_tcr) | |
302 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
303 | // Assert that T0Z is always equal to T1Z | |
304 | eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT) | |
305 | and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT) | |
306 | cbnz x1, L_set_tcr_panic | |
307 | #if defined(KERNEL_INTEGRITY_KTRR) | |
308 | mov x1, lr | |
309 | bl _pinst_set_tcr | |
310 | mov lr, x1 | |
311 | #else | |
312 | msr TCR_EL1, x0 | |
313 | #endif /* defined(KERNEL_INTRITY_KTRR) */ | |
314 | isb sy | |
315 | ret | |
316 | ||
317 | L_set_tcr_panic: | |
318 | PUSH_FRAME | |
319 | sub sp, sp, #16 | |
320 | str x0, [sp] | |
321 | adr x0, L_set_tcr_panic_str | |
322 | BRANCH_EXTERN panic | |
323 | ||
324 | L_set_locked_reg_panic: | |
325 | PUSH_FRAME | |
326 | sub sp, sp, #16 | |
327 | str x0, [sp] | |
328 | adr x0, L_set_locked_reg_panic_str | |
329 | BRANCH_EXTERN panic | |
330 | b . | |
331 | ||
332 | L_set_tcr_panic_str: | |
333 | .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n" | |
334 | ||
335 | ||
336 | L_set_locked_reg_panic_str: | |
337 | .asciz "attempt to set locked register: (%llx)\n" | |
338 | #else | |
339 | #if defined(KERNEL_INTEGRITY_KTRR) | |
340 | mov x1, lr | |
341 | bl _pinst_set_tcr | |
342 | mov lr, x1 | |
343 | #else | |
344 | msr TCR_EL1, x0 | |
345 | #endif | |
346 | isb sy | |
347 | ret | |
348 | #endif // defined(APPLE_ARM64_ARCH_FAMILY) | |
349 | ||
350 | /* | |
351 | * MMU kernel virtual to physical address translation | |
352 | */ | |
353 | .text | |
354 | .align 2 | |
355 | .globl EXT(mmu_kvtop) | |
356 | LEXT(mmu_kvtop) | |
357 | mrs x2, DAIF // Load current DAIF | |
358 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
359 | at s1e1r, x0 // Translation Stage 1 EL1 | |
360 | mrs x1, PAR_EL1 // Read result | |
361 | msr DAIF, x2 // Restore interrupt state | |
362 | tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid | |
363 | bfm x1, x0, #0, #11 // Add page offset | |
364 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
365 | ret | |
366 | L_mmu_kvtop_invalid: | |
367 | mov x0, xzr // Return invalid | |
368 | ret | |
369 | ||
370 | /* | |
371 | * MMU user virtual to physical address translation | |
372 | */ | |
373 | .text | |
374 | .align 2 | |
375 | .globl EXT(mmu_uvtop) | |
376 | LEXT(mmu_uvtop) | |
377 | lsr x8, x0, #56 // Extract top byte | |
378 | cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid | |
379 | mrs x2, DAIF // Load current DAIF | |
380 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
381 | at s1e0r, x0 // Translation Stage 1 EL0 | |
382 | mrs x1, PAR_EL1 // Read result | |
383 | msr DAIF, x2 // Restore interrupt state | |
384 | tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid | |
385 | bfm x1, x0, #0, #11 // Add page offset | |
386 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
387 | ret | |
388 | L_mmu_uvtop_invalid: | |
389 | mov x0, xzr // Return invalid | |
390 | ret | |
391 | ||
392 | /* | |
393 | * MMU kernel virtual to physical address preflight write access | |
394 | */ | |
395 | .text | |
396 | .align 2 | |
397 | .globl EXT(mmu_kvtop_wpreflight) | |
398 | LEXT(mmu_kvtop_wpreflight) | |
399 | mrs x2, DAIF // Load current DAIF | |
400 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
401 | at s1e1w, x0 // Translation Stage 1 EL1 | |
402 | mrs x1, PAR_EL1 // Read result | |
403 | msr DAIF, x2 // Restore interrupt state | |
404 | tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid | |
405 | bfm x1, x0, #0, #11 // Add page offset | |
406 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
407 | ret | |
408 | L_mmu_kvtop_wpreflight_invalid: | |
409 | mov x0, xzr // Return invalid | |
410 | ret | |
411 | ||
412 | /* | |
413 | * SET_RECOVERY_HANDLER | |
414 | * | |
415 | * Sets up a page fault recovery handler | |
416 | * | |
417 | * arg0 - persisted thread pointer | |
418 | * arg1 - persisted recovery handler | |
419 | * arg2 - scratch reg | |
420 | * arg3 - recovery label | |
421 | */ | |
422 | .macro SET_RECOVERY_HANDLER | |
423 | mrs $0, TPIDR_EL1 // Load thread pointer | |
424 | ldr $1, [$0, TH_RECOVER] // Save previous recovery handler | |
425 | adrp $2, $3@page // Load the recovery handler address | |
426 | add $2, $2, $3@pageoff | |
427 | str $2, [$0, TH_RECOVER] // Set new recovery handler | |
428 | .endmacro | |
429 | ||
430 | /* | |
431 | * CLEAR_RECOVERY_HANDLER | |
432 | * | |
433 | * Clears page fault handler set by SET_RECOVERY_HANDLER | |
434 | * | |
435 | * arg0 - thread pointer saved by SET_RECOVERY_HANDLER | |
436 | * arg1 - old recovery handler saved by SET_RECOVERY_HANDLER | |
437 | */ | |
438 | .macro CLEAR_RECOVERY_HANDLER | |
439 | str $1, [$0, TH_RECOVER] // Restore the previous recovery handler | |
440 | .endmacro | |
441 | ||
442 | ||
443 | .text | |
444 | .align 2 | |
445 | copyio_error: | |
446 | CLEAR_RECOVERY_HANDLER x10, x11 | |
447 | mov x0, #EFAULT // Return an EFAULT error | |
448 | POP_FRAME | |
449 | ret | |
450 | ||
451 | /* | |
452 | * int _bcopyin(const char *src, char *dst, vm_size_t len) | |
453 | */ | |
454 | .text | |
455 | .align 2 | |
456 | .globl EXT(_bcopyin) | |
457 | LEXT(_bcopyin) | |
458 | PUSH_FRAME | |
459 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
460 | /* If len is less than 16 bytes, just do a bytewise copy */ | |
461 | cmp x2, #16 | |
462 | b.lt 2f | |
463 | sub x2, x2, #16 | |
464 | 1: | |
465 | /* 16 bytes at a time */ | |
466 | ldp x3, x4, [x0], #16 | |
467 | stp x3, x4, [x1], #16 | |
468 | subs x2, x2, #16 | |
469 | b.ge 1b | |
470 | /* Fixup the len and test for completion */ | |
471 | adds x2, x2, #16 | |
472 | b.eq 3f | |
473 | 2: /* Bytewise */ | |
474 | subs x2, x2, #1 | |
475 | ldrb w3, [x0], #1 | |
476 | strb w3, [x1], #1 | |
477 | b.hi 2b | |
478 | 3: | |
479 | CLEAR_RECOVERY_HANDLER x10, x11 | |
480 | mov x0, xzr | |
481 | POP_FRAME | |
482 | ret | |
483 | ||
484 | /* | |
485 | * int _copyin_word(const char *src, uint64_t *dst, vm_size_t len) | |
486 | */ | |
487 | .text | |
488 | .align 2 | |
489 | .globl EXT(_copyin_word) | |
490 | LEXT(_copyin_word) | |
491 | PUSH_FRAME | |
492 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
493 | cmp x2, #4 | |
494 | b.eq L_copyin_word_4 | |
495 | cmp x2, #8 | |
496 | b.eq L_copyin_word_8 | |
497 | mov x0, EINVAL | |
498 | b L_copying_exit | |
499 | L_copyin_word_4: | |
500 | ldr w8, [x0] | |
501 | b L_copyin_word_store | |
502 | L_copyin_word_8: | |
503 | ldr x8, [x0] | |
504 | L_copyin_word_store: | |
505 | str x8, [x1] | |
506 | mov x0, xzr | |
507 | CLEAR_RECOVERY_HANDLER x10, x11 | |
508 | L_copying_exit: | |
509 | POP_FRAME | |
510 | ret | |
511 | ||
512 | ||
513 | /* | |
514 | * int _bcopyout(const char *src, char *dst, vm_size_t len) | |
515 | */ | |
516 | .text | |
517 | .align 2 | |
518 | .globl EXT(_bcopyout) | |
519 | LEXT(_bcopyout) | |
520 | PUSH_FRAME | |
521 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
522 | /* If len is less than 16 bytes, just do a bytewise copy */ | |
523 | cmp x2, #16 | |
524 | b.lt 2f | |
525 | sub x2, x2, #16 | |
526 | 1: | |
527 | /* 16 bytes at a time */ | |
528 | ldp x3, x4, [x0], #16 | |
529 | stp x3, x4, [x1], #16 | |
530 | subs x2, x2, #16 | |
531 | b.ge 1b | |
532 | /* Fixup the len and test for completion */ | |
533 | adds x2, x2, #16 | |
534 | b.eq 3f | |
535 | 2: /* Bytewise */ | |
536 | subs x2, x2, #1 | |
537 | ldrb w3, [x0], #1 | |
538 | strb w3, [x1], #1 | |
539 | b.hi 2b | |
540 | 3: | |
541 | CLEAR_RECOVERY_HANDLER x10, x11 | |
542 | mov x0, xzr | |
543 | POP_FRAME | |
544 | ret | |
545 | ||
546 | /* | |
547 | * int _bcopyinstr( | |
548 | * const user_addr_t user_addr, | |
549 | * char *kernel_addr, | |
550 | * vm_size_t max, | |
551 | * vm_size_t *actual) | |
552 | */ | |
553 | .text | |
554 | .align 2 | |
555 | .globl EXT(_bcopyinstr) | |
556 | LEXT(_bcopyinstr) | |
557 | PUSH_FRAME | |
558 | adr x4, Lcopyinstr_error // Get address for recover | |
559 | mrs x10, TPIDR_EL1 // Get thread pointer | |
560 | ldr x11, [x10, TH_RECOVER] // Save previous recover | |
561 | str x4, [x10, TH_RECOVER] // Store new recover | |
562 | mov x4, xzr // x4 - total bytes copied | |
563 | Lcopyinstr_loop: | |
564 | ldrb w5, [x0], #1 // Load a byte from the user source | |
565 | strb w5, [x1], #1 // Store a byte to the kernel dest | |
566 | add x4, x4, #1 // Increment bytes copied | |
567 | cbz x5, Lcopyinstr_done // If this byte is null, we're done | |
568 | cmp x4, x2 // If we're out of space, return an error | |
569 | b.ne Lcopyinstr_loop | |
570 | Lcopyinstr_too_long: | |
571 | mov x5, #ENAMETOOLONG // Set current byte to error code for later return | |
572 | Lcopyinstr_done: | |
573 | str x4, [x3] // Return number of bytes copied | |
574 | mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure) | |
575 | b Lcopyinstr_exit | |
576 | Lcopyinstr_error: | |
577 | mov x0, #EFAULT // Return EFAULT on error | |
578 | Lcopyinstr_exit: | |
579 | str x11, [x10, TH_RECOVER] // Restore old recover | |
580 | POP_FRAME | |
581 | ret | |
582 | ||
583 | /* | |
584 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit) | |
585 | * | |
586 | * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from | |
587 | * either user or kernel memory, or 8 bytes (AArch32) from user only. | |
588 | * | |
589 | * x0 : address of frame to copy. | |
590 | * x1 : kernel address at which to store data. | |
591 | * w2 : whether to copy an AArch32 or AArch64 frame. | |
592 | * x3 : temp | |
593 | * x5 : temp (kernel virtual base) | |
594 | * x9 : temp | |
595 | * x10 : thread pointer (set by SET_RECOVERY_HANDLER) | |
596 | * x11 : old recovery function (set by SET_RECOVERY_HANDLER) | |
597 | * x12, x13 : backtrace data | |
598 | * | |
599 | */ | |
600 | .text | |
601 | .align 2 | |
602 | .globl EXT(copyinframe) | |
603 | LEXT(copyinframe) | |
604 | PUSH_FRAME | |
605 | SET_RECOVERY_HANDLER x10, x11, x3, copyio_error | |
606 | cbnz w2, Lcopyinframe64 // Check frame size | |
607 | adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel | |
608 | add x5, x5, EXT(gVirtBase)@pageoff | |
609 | ldr x5, [x5] | |
610 | cmp x5, x0 // See if address is in kernel virtual range | |
611 | b.hi Lcopyinframe32 // If below kernel virtual range, proceed. | |
612 | mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range | |
613 | b Lcopyinframe_done | |
614 | ||
615 | Lcopyinframe32: | |
616 | ldr x12, [x0] // Copy 8 bytes | |
617 | str x12, [x1] | |
618 | mov w0, #0 // Success | |
619 | b Lcopyinframe_done | |
620 | ||
621 | Lcopyinframe64: | |
622 | mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address | |
623 | orr x9, x0, TBI_MASK // Hide tags in address comparison | |
624 | cmp x9, x3 // If in kernel address range, skip tag test | |
625 | b.hs Lcopyinframe_valid | |
626 | tst x0, TBI_MASK // Detect tagged pointers | |
627 | b.eq Lcopyinframe_valid | |
628 | mov w0, #EFAULT // Tagged address, fail | |
629 | b Lcopyinframe_done | |
630 | Lcopyinframe_valid: | |
631 | ldp x12, x13, [x0] // Copy 16 bytes | |
632 | stp x12, x13, [x1] | |
633 | mov w0, #0 // Success | |
634 | ||
635 | Lcopyinframe_done: | |
636 | CLEAR_RECOVERY_HANDLER x10, x11 | |
637 | POP_FRAME | |
638 | ret | |
639 | ||
640 | ||
641 | /* | |
642 | * int _emulate_swp(user_addr_t addr, uint32_t newval, uint32_t *oldval) | |
643 | * | |
644 | * Securely emulates the swp instruction removed from armv8. | |
645 | * Returns true on success. | |
646 | * Returns false if the user address is not user accessible. | |
647 | * | |
648 | * x0 : address to swap | |
649 | * x1 : new value to store | |
650 | * x2 : address to save old value | |
651 | * x3 : scratch reg | |
652 | * x10 : thread pointer (set by SET_RECOVERY_HANDLER) | |
653 | * x11 : old recovery handler (set by SET_RECOVERY_HANDLER) | |
654 | * x12 : interrupt state | |
655 | * x13 : return value | |
656 | */ | |
657 | .text | |
658 | .align 2 | |
659 | .globl EXT(_emulate_swp) | |
660 | LEXT(_emulate_swp) | |
661 | PUSH_FRAME | |
662 | SET_RECOVERY_HANDLER x10, x11, x3, swp_error | |
663 | ||
664 | // Perform swap | |
665 | Lswp_try: | |
666 | ldxr w3, [x0] // Load data at target address | |
667 | stxr w4, w1, [x0] // Store new value to target address | |
668 | cbnz w4, Lswp_try // Retry if store failed | |
669 | str w3, [x2] // Save old value | |
670 | mov x13, #1 // Set successful return value | |
671 | ||
672 | Lswp_exit: | |
673 | mov x0, x13 // Set return value | |
674 | CLEAR_RECOVERY_HANDLER x10, x11 | |
675 | POP_FRAME | |
676 | ret | |
677 | ||
678 | /* | |
679 | * int _emulate_swpb(user_addr_t addr, uint32_t newval, uint32_t *oldval) | |
680 | * | |
681 | * Securely emulates the swpb instruction removed from armv8. | |
682 | * Returns true on success. | |
683 | * Returns false if the user address is not user accessible. | |
684 | * | |
685 | * x0 : address to swap | |
686 | * x1 : new value to store | |
687 | * x2 : address to save old value | |
688 | * x3 : scratch reg | |
689 | * x10 : thread pointer (set by SET_RECOVERY_HANDLER) | |
690 | * x11 : old recovery handler (set by SET_RECOVERY_HANDLER) | |
691 | * x12 : interrupt state | |
692 | * x13 : return value | |
693 | */ | |
694 | .text | |
695 | .align 2 | |
696 | .globl EXT(_emulate_swpb) | |
697 | LEXT(_emulate_swpb) | |
698 | PUSH_FRAME | |
699 | SET_RECOVERY_HANDLER x10, x11, x3, swp_error | |
700 | ||
701 | // Perform swap | |
702 | Lswpb_try: | |
703 | ldxrb w3, [x0] // Load data at target address | |
704 | stxrb w4, w1, [x0] // Store new value to target address | |
705 | cbnz w4, Lswp_try // Retry if store failed | |
706 | str w3, [x2] // Save old value | |
707 | mov x13, #1 // Set successful return value | |
708 | ||
709 | Lswpb_exit: | |
710 | mov x0, x13 // Set return value | |
711 | CLEAR_RECOVERY_HANDLER x10, x11 | |
712 | POP_FRAME | |
713 | ret | |
714 | ||
715 | .text | |
716 | .align 2 | |
717 | swp_error: | |
718 | mov x0, xzr // Return false | |
719 | CLEAR_RECOVERY_HANDLER x10, x11 | |
720 | POP_FRAME | |
721 | ret | |
722 | ||
723 | /* | |
724 | * uint32_t arm_debug_read_dscr(void) | |
725 | */ | |
726 | .text | |
727 | .align 2 | |
728 | .globl EXT(arm_debug_read_dscr) | |
729 | LEXT(arm_debug_read_dscr) | |
730 | PANIC_UNIMPLEMENTED | |
731 | ||
732 | /* | |
733 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
734 | * | |
735 | * Set debug registers to match the current thread state | |
736 | * (NULL to disable). Assume 6 breakpoints and 2 | |
737 | * watchpoints, since that has been the case in all cores | |
738 | * thus far. | |
739 | */ | |
740 | .text | |
741 | .align 2 | |
742 | .globl EXT(arm_debug_set_cp14) | |
743 | LEXT(arm_debug_set_cp14) | |
744 | PANIC_UNIMPLEMENTED | |
745 | ||
746 | ||
747 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
748 | /* | |
749 | * Note: still have to ISB before executing wfi! | |
750 | */ | |
751 | .text | |
752 | .align 2 | |
753 | .globl EXT(arm64_prepare_for_sleep) | |
754 | LEXT(arm64_prepare_for_sleep) | |
755 | PUSH_FRAME | |
756 | ||
757 | #if defined(APPLECYCLONE) || defined(APPLETYPHOON) | |
758 | // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch | |
759 | mrs x0, ARM64_REG_HID2 // Read HID2 | |
760 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
761 | msr ARM64_REG_HID2, x0 // Write HID2 | |
762 | dsb sy | |
763 | isb sy | |
764 | #endif | |
765 | ||
766 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
767 | // Enable deep sleep | |
768 | mrs x1, ARM64_REG_ACC_OVRD | |
769 | orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep) | |
770 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask)) | |
771 | orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep) | |
772 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask)) | |
773 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep) | |
774 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask)) | |
775 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep) | |
776 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask)) | |
777 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep) | |
778 | msr ARM64_REG_ACC_OVRD, x1 | |
779 | ||
780 | ||
781 | #else | |
782 | // Enable deep sleep | |
783 | mov x1, ARM64_REG_CYC_CFG_deepSleep | |
784 | msr ARM64_REG_CYC_CFG, x1 | |
785 | #endif | |
786 | // Set "OK to power down" (<rdar://problem/12390433>) | |
787 | mrs x0, ARM64_REG_CYC_OVRD | |
788 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) | |
789 | msr ARM64_REG_CYC_OVRD, x0 | |
790 | ||
791 | Lwfi_inst: | |
792 | dsb sy | |
793 | isb sy | |
794 | wfi | |
795 | b Lwfi_inst | |
796 | ||
797 | /* | |
798 | * Force WFI to use clock gating only | |
799 | * | |
800 | */ | |
801 | .text | |
802 | .align 2 | |
803 | .globl EXT(arm64_force_wfi_clock_gate) | |
804 | LEXT(arm64_force_wfi_clock_gate) | |
805 | PUSH_FRAME | |
806 | ||
807 | mrs x0, ARM64_REG_CYC_OVRD | |
808 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up) | |
809 | msr ARM64_REG_CYC_OVRD, x0 | |
810 | ||
811 | POP_FRAME | |
812 | ret | |
813 | ||
814 | ||
815 | ||
816 | #if defined(APPLECYCLONE) || defined(APPLETYPHOON) | |
817 | ||
818 | .text | |
819 | .align 2 | |
820 | .globl EXT(cyclone_typhoon_prepare_for_wfi) | |
821 | ||
822 | LEXT(cyclone_typhoon_prepare_for_wfi) | |
823 | PUSH_FRAME | |
824 | ||
825 | // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch | |
826 | mrs x0, ARM64_REG_HID2 // Read HID2 | |
827 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
828 | msr ARM64_REG_HID2, x0 // Write HID2 | |
829 | dsb sy | |
830 | isb sy | |
831 | ||
832 | POP_FRAME | |
833 | ret | |
834 | ||
835 | ||
836 | .text | |
837 | .align 2 | |
838 | .globl EXT(cyclone_typhoon_return_from_wfi) | |
839 | LEXT(cyclone_typhoon_return_from_wfi) | |
840 | PUSH_FRAME | |
841 | ||
842 | // <rdar://problem/15827409> CPU1 Stuck in WFIWT Because of MMU Prefetch | |
843 | mrs x0, ARM64_REG_HID2 // Read HID2 | |
844 | mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // | |
845 | bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch | |
846 | msr ARM64_REG_HID2, x0 // Write HID2 | |
847 | dsb sy | |
848 | isb sy | |
849 | ||
850 | POP_FRAME | |
851 | ret | |
852 | #endif | |
853 | ||
854 | #ifdef APPLETYPHOON | |
855 | ||
856 | #define HID0_DEFEATURES_1 0x0000a0c000064010ULL | |
857 | #define HID1_DEFEATURES_1 0x000000004005bf20ULL | |
858 | #define HID2_DEFEATURES_1 0x0000000000102074ULL | |
859 | #define HID3_DEFEATURES_1 0x0000000000400003ULL | |
860 | #define HID4_DEFEATURES_1 0x83ff00e100000268ULL | |
861 | #define HID7_DEFEATURES_1 0x000000000000000eULL | |
862 | ||
863 | #define HID0_DEFEATURES_2 0x0000a1c000020010ULL | |
864 | #define HID1_DEFEATURES_2 0x000000000005d720ULL | |
865 | #define HID2_DEFEATURES_2 0x0000000000002074ULL | |
866 | #define HID3_DEFEATURES_2 0x0000000000400001ULL | |
867 | #define HID4_DEFEATURES_2 0x8390000200000208ULL | |
868 | #define HID7_DEFEATURES_2 0x0000000000000000ULL | |
869 | ||
870 | /* | |
871 | arg0 = target register | |
872 | arg1 = 64-bit constant | |
873 | */ | |
874 | .macro LOAD_UINT64 | |
875 | movz $0, #(($1 >> 48) & 0xffff), lsl #48 | |
876 | movk $0, #(($1 >> 32) & 0xffff), lsl #32 | |
877 | movk $0, #(($1 >> 16) & 0xffff), lsl #16 | |
878 | movk $0, #(($1) & 0xffff) | |
879 | .endmacro | |
880 | ||
881 | .text | |
882 | .align 2 | |
883 | .globl EXT(cpu_defeatures_set) | |
884 | LEXT(cpu_defeatures_set) | |
885 | PUSH_FRAME | |
886 | cmp x0, #2 | |
887 | b.eq cpu_defeatures_set_2 | |
888 | cmp x0, #1 | |
889 | b.ne cpu_defeatures_set_ret | |
890 | LOAD_UINT64 x1, HID0_DEFEATURES_1 | |
891 | mrs x0, ARM64_REG_HID0 | |
892 | orr x0, x0, x1 | |
893 | msr ARM64_REG_HID0, x0 | |
894 | LOAD_UINT64 x1, HID1_DEFEATURES_1 | |
895 | mrs x0, ARM64_REG_HID1 | |
896 | orr x0, x0, x1 | |
897 | msr ARM64_REG_HID1, x0 | |
898 | LOAD_UINT64 x1, HID2_DEFEATURES_1 | |
899 | mrs x0, ARM64_REG_HID2 | |
900 | orr x0, x0, x1 | |
901 | msr ARM64_REG_HID2, x0 | |
902 | LOAD_UINT64 x1, HID3_DEFEATURES_1 | |
903 | mrs x0, ARM64_REG_HID3 | |
904 | orr x0, x0, x1 | |
905 | msr ARM64_REG_HID3, x0 | |
906 | LOAD_UINT64 x1, HID4_DEFEATURES_1 | |
907 | mrs x0, ARM64_REG_HID4 | |
908 | orr x0, x0, x1 | |
909 | msr ARM64_REG_HID4, x0 | |
910 | LOAD_UINT64 x1, HID7_DEFEATURES_1 | |
911 | mrs x0, ARM64_REG_HID7 | |
912 | orr x0, x0, x1 | |
913 | msr ARM64_REG_HID7, x0 | |
914 | dsb sy | |
915 | isb sy | |
916 | b cpu_defeatures_set_ret | |
917 | cpu_defeatures_set_2: | |
918 | LOAD_UINT64 x1, HID0_DEFEATURES_2 | |
919 | mrs x0, ARM64_REG_HID0 | |
920 | orr x0, x0, x1 | |
921 | msr ARM64_REG_HID0, x0 | |
922 | LOAD_UINT64 x1, HID1_DEFEATURES_2 | |
923 | mrs x0, ARM64_REG_HID1 | |
924 | orr x0, x0, x1 | |
925 | msr ARM64_REG_HID1, x0 | |
926 | LOAD_UINT64 x1, HID2_DEFEATURES_2 | |
927 | mrs x0, ARM64_REG_HID2 | |
928 | orr x0, x0, x1 | |
929 | msr ARM64_REG_HID2, x0 | |
930 | LOAD_UINT64 x1, HID3_DEFEATURES_2 | |
931 | mrs x0, ARM64_REG_HID3 | |
932 | orr x0, x0, x1 | |
933 | msr ARM64_REG_HID3, x0 | |
934 | LOAD_UINT64 x1, HID4_DEFEATURES_2 | |
935 | mrs x0, ARM64_REG_HID4 | |
936 | orr x0, x0, x1 | |
937 | msr ARM64_REG_HID4, x0 | |
938 | LOAD_UINT64 x1, HID7_DEFEATURES_2 | |
939 | mrs x0, ARM64_REG_HID7 | |
940 | orr x0, x0, x1 | |
941 | msr ARM64_REG_HID7, x0 | |
942 | dsb sy | |
943 | isb sy | |
944 | b cpu_defeatures_set_ret | |
945 | cpu_defeatures_set_ret: | |
946 | POP_FRAME | |
947 | ret | |
948 | #endif | |
949 | ||
950 | #endif | |
951 | ||
952 | #ifdef MONITOR | |
953 | /* | |
954 | * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
955 | uintptr_t arg2, uintptr_t arg3) | |
956 | * | |
957 | * Call the EL3 monitor with 4 arguments in registers | |
958 | * The monitor interface maintains the same ABI as the C function call standard. Callee-saved | |
959 | * registers are preserved, temporary registers are not. Parameters and results are passed in | |
960 | * the usual manner. | |
961 | */ | |
962 | .text | |
963 | .align 2 | |
964 | .globl EXT(monitor_call) | |
965 | LEXT(monitor_call) | |
966 | smc 0x11 | |
967 | ret | |
968 | #endif | |
969 | ||
970 | /* vim: set sw=4 ts=4: */ |