]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2007-2015 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <machine/asm.h> | |
30 | #include <arm64/machine_machdep.h> | |
31 | #include <arm64/proc_reg.h> | |
32 | #include <arm/pmap.h> | |
33 | #include <pexpert/arm64/board_config.h> | |
34 | #include <sys/errno.h> | |
35 | #include "assym.s" | |
36 | ||
37 | ||
cb323159 A |
38 | #if defined(HAS_APPLE_PAC) |
39 | /* | |
40 | * void | |
41 | * ml_set_kernelkey_enabled(boolean_t enable) | |
42 | * | |
43 | * Toggle pointer auth kernel domain key diversification. Assembly to prevent compiler reordering. | |
44 | * | |
45 | */ | |
46 | ||
47 | .align 2 | |
48 | .globl EXT(ml_set_kernelkey_enabled) | |
49 | LEXT(ml_set_kernelkey_enabled) | |
50 | mrs x1, ARM64_REG_APCTL_EL1 | |
51 | orr x2, x1, #APCTL_EL1_KernKeyEn | |
52 | and x1, x1, #~APCTL_EL1_KernKeyEn | |
53 | cmp w0, #0 | |
54 | csel x1, x1, x2, eq | |
55 | msr ARM64_REG_APCTL_EL1, x1 | |
56 | isb | |
57 | ret | |
58 | ||
59 | #endif /* defined(HAS_APPLE_PAC) */ | |
60 | ||
c6bf4f31 | 61 | #if HAS_BP_RET |
cb323159 | 62 | |
c6bf4f31 A |
63 | /* |
64 | * void set_bp_ret(void) | |
65 | * Helper function to enable branch predictor state retention | |
66 | * across ACC sleep | |
67 | */ | |
68 | ||
69 | .align 2 | |
70 | .globl EXT(set_bp_ret) | |
71 | LEXT(set_bp_ret) | |
72 | // Load bpret boot-arg | |
73 | adrp x14, EXT(bp_ret)@page | |
74 | add x14, x14, EXT(bp_ret)@pageoff | |
75 | ldr w14, [x14] | |
76 | ||
77 | mrs x13, ARM64_REG_ACC_CFG | |
78 | and x13, x13, (~(ARM64_REG_ACC_CFG_bpSlp_mask << ARM64_REG_ACC_CFG_bpSlp_shift)) | |
79 | and x14, x14, #(ARM64_REG_ACC_CFG_bpSlp_mask) | |
80 | orr x13, x13, x14, lsl #(ARM64_REG_ACC_CFG_bpSlp_shift) | |
81 | msr ARM64_REG_ACC_CFG, x13 | |
82 | ||
83 | ret | |
84 | #endif // HAS_BP_RET | |
85 | ||
86 | #if HAS_NEX_PG | |
87 | .align 2 | |
88 | .globl EXT(set_nex_pg) | |
89 | LEXT(set_nex_pg) | |
90 | mrs x14, MPIDR_EL1 | |
91 | // Skip if this isn't a p-core; NEX powergating isn't available for e-cores | |
92 | and x14, x14, #(MPIDR_PNE) | |
93 | cbz x14, Lnex_pg_done | |
94 | ||
95 | // Set the SEG-recommended value of 12 additional reset cycles | |
96 | mrs x14, ARM64_REG_HID13 | |
97 | and x14, x14, (~ARM64_REG_HID13_RstCyc_mask) | |
98 | orr x14, x14, ARM64_REG_HID13_RstCyc_val | |
99 | msr ARM64_REG_HID13, x14 | |
100 | ||
101 | // Load nexpg boot-arg | |
102 | adrp x14, EXT(nex_pg)@page | |
103 | add x14, x14, EXT(nex_pg)@pageoff | |
104 | ldr w14, [x14] | |
105 | ||
106 | mrs x13, ARM64_REG_HID14 | |
107 | and x13, x13, (~ARM64_REG_HID14_NexPwgEn) | |
108 | cbz w14, Lset_nex_pg | |
109 | orr x13, x13, ARM64_REG_HID14_NexPwgEn | |
110 | Lset_nex_pg: | |
111 | msr ARM64_REG_HID14, x13 | |
112 | ||
113 | Lnex_pg_done: | |
114 | ret | |
115 | ||
116 | #endif // HAS_NEX_PG | |
d9a64523 | 117 | |
5ba3f43e A |
118 | /* uint32_t get_fpscr(void): |
119 | * Returns (FPSR | FPCR). | |
120 | */ | |
121 | .align 2 | |
122 | .globl EXT(get_fpscr) | |
123 | LEXT(get_fpscr) | |
124 | #if __ARM_VFP__ | |
125 | mrs x1, FPSR // Grab FPSR | |
126 | mov x4, #(FPSR_MASK & 0xFFFF) | |
127 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
128 | orr x0, x4, x5 | |
129 | and x1, x1, x0 // Be paranoid, and clear bits we expect to | |
130 | // be clear | |
131 | mrs x2, FPCR // Grab FPCR | |
132 | mov x4, #(FPCR_MASK & 0xFFFF) | |
133 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
134 | orr x0, x4, x5 | |
135 | and x2, x2, x0 // Be paranoid, and clear bits we expect to | |
136 | // be clear | |
137 | orr x0, x1, x2 // OR them to get FPSCR equivalent state | |
138 | #else | |
139 | mov x0, #0 | |
140 | #endif | |
141 | ret | |
142 | .align 2 | |
143 | .globl EXT(set_fpscr) | |
144 | /* void set_fpscr(uint32_t value): | |
145 | * Set the FPCR and FPSR registers, based on the given value; a | |
146 | * noteworthy point is that unlike 32-bit mode, 64-bit mode FPSR | |
147 | * and FPCR are not responsible for condition codes. | |
148 | */ | |
149 | LEXT(set_fpscr) | |
150 | #if __ARM_VFP__ | |
151 | mov x4, #(FPSR_MASK & 0xFFFF) | |
152 | mov x5, #(FPSR_MASK & 0xFFFF0000) | |
153 | orr x1, x4, x5 | |
154 | and x1, x1, x0 // Clear the bits that don't apply to FPSR | |
155 | mov x4, #(FPCR_MASK & 0xFFFF) | |
156 | mov x5, #(FPCR_MASK & 0xFFFF0000) | |
157 | orr x2, x4, x5 | |
158 | and x2, x2, x0 // Clear the bits that don't apply to FPCR | |
159 | msr FPSR, x1 // Write FPCR | |
160 | msr FPCR, x2 // Write FPSR | |
161 | dsb ish // FPCR requires synchronization | |
162 | #endif | |
163 | ret | |
164 | ||
d9a64523 A |
165 | /* |
166 | * void update_mdscr(unsigned long clear, unsigned long set) | |
167 | * Clears and sets the specified bits in MDSCR_EL1. | |
168 | * | |
169 | * Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is | |
170 | * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow | |
171 | * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP, | |
172 | * so we need to put the checks after the MRS where they can't be skipped. That | |
173 | * still leaves a small window if a breakpoint is set on the instruction | |
174 | * immediately after the MRS. To handle that, we also do a check and then set of | |
175 | * the breakpoint control registers. This allows us to guarantee that a given | |
176 | * core will never have both KDE set and a breakpoint targeting EL1. | |
177 | * | |
178 | * If KDE gets set, unset it and then panic | |
179 | */ | |
180 | .align 2 | |
181 | .globl EXT(update_mdscr) | |
182 | LEXT(update_mdscr) | |
183 | mov x4, #0 | |
184 | mrs x2, MDSCR_EL1 | |
185 | bic x2, x2, x0 | |
186 | orr x2, x2, x1 | |
187 | 1: | |
188 | bic x2, x2, #0x2000 | |
189 | msr MDSCR_EL1, x2 | |
190 | #if defined(CONFIG_KERNEL_INTEGRITY) | |
191 | /* | |
192 | * verify KDE didn't get set (including via ROP) | |
193 | * If set, clear it and then panic | |
194 | */ | |
195 | ands x3, x2, #0x2000 | |
196 | orr x4, x4, x3 | |
197 | bne 1b | |
198 | cmp x4, xzr | |
199 | b.ne Lupdate_mdscr_panic | |
200 | #endif | |
201 | ret | |
202 | ||
203 | Lupdate_mdscr_panic: | |
204 | adrp x0, Lupdate_mdscr_panic_str@page | |
205 | add x0, x0, Lupdate_mdscr_panic_str@pageoff | |
206 | b EXT(panic) | |
207 | b . | |
208 | ||
209 | Lupdate_mdscr_panic_str: | |
210 | .asciz "MDSCR.KDE was set" | |
211 | ||
212 | ||
5ba3f43e A |
213 | /* |
214 | * Set MMU Translation Table Base Alternate | |
215 | */ | |
216 | .text | |
217 | .align 2 | |
218 | .globl EXT(set_mmu_ttb_alternate) | |
219 | LEXT(set_mmu_ttb_alternate) | |
220 | dsb sy | |
221 | #if defined(KERNEL_INTEGRITY_KTRR) | |
222 | mov x1, lr | |
223 | bl EXT(pinst_set_ttbr1) | |
224 | mov lr, x1 | |
225 | #else | |
c6bf4f31 A |
226 | #if defined(HAS_VMSA_LOCK) |
227 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 | |
228 | and x1, x1, #(VMSA_LOCK_TTBR1_EL1) | |
229 | cbnz x1, L_set_locked_reg_panic | |
230 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
231 | msr TTBR1_EL1, x0 |
232 | #endif /* defined(KERNEL_INTEGRITY_KTRR) */ | |
233 | isb sy | |
234 | ret | |
235 | ||
c6bf4f31 A |
236 | #if XNU_MONITOR |
237 | .section __PPLTEXT,__text,regular,pure_instructions | |
238 | #else | |
d9a64523 | 239 | .text |
c6bf4f31 | 240 | #endif |
d9a64523 A |
241 | .align 2 |
242 | .globl EXT(set_mmu_ttb) | |
243 | LEXT(set_mmu_ttb) | |
244 | #if __ARM_KERNEL_PROTECT__ | |
245 | /* All EL1-mode ASIDs are odd. */ | |
246 | orr x0, x0, #(1 << TTBR_ASID_SHIFT) | |
247 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
248 | dsb ish | |
249 | msr TTBR0_EL1, x0 | |
250 | isb sy | |
251 | ret | |
252 | ||
5ba3f43e A |
253 | /* |
254 | * set AUX control register | |
255 | */ | |
256 | .text | |
257 | .align 2 | |
258 | .globl EXT(set_aux_control) | |
259 | LEXT(set_aux_control) | |
260 | msr ACTLR_EL1, x0 | |
261 | // Synchronize system | |
5ba3f43e A |
262 | isb sy |
263 | ret | |
264 | ||
5c9f4661 A |
265 | #if __ARM_KERNEL_PROTECT__ |
266 | .text | |
267 | .align 2 | |
268 | .globl EXT(set_vbar_el1) | |
269 | LEXT(set_vbar_el1) | |
270 | #if defined(KERNEL_INTEGRITY_KTRR) | |
271 | b EXT(pinst_set_vbar) | |
272 | #else | |
273 | msr VBAR_EL1, x0 | |
274 | ret | |
275 | #endif | |
276 | #endif /* __ARM_KERNEL_PROTECT__ */ | |
277 | ||
c6bf4f31 A |
278 | #if defined(HAS_VMSA_LOCK) |
279 | .text | |
280 | .align 2 | |
281 | .globl EXT(vmsa_lock) | |
282 | LEXT(vmsa_lock) | |
283 | isb sy | |
284 | mov x1, #(VMSA_LOCK_SCTLR_M_BIT) | |
285 | mov x0, #(VMSA_LOCK_TTBR1_EL1 | VMSA_LOCK_TCR_EL1 | VMSA_LOCK_VBAR_EL1) | |
286 | orr x0, x0, x1 | |
287 | msr ARM64_REG_VMSA_LOCK_EL1, x0 | |
288 | isb sy | |
289 | ret | |
290 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
291 | |
292 | /* | |
293 | * set translation control register | |
294 | */ | |
295 | .text | |
296 | .align 2 | |
297 | .globl EXT(set_tcr) | |
298 | LEXT(set_tcr) | |
299 | #if defined(APPLE_ARM64_ARCH_FAMILY) | |
300 | // Assert that T0Z is always equal to T1Z | |
301 | eor x1, x0, x0, lsr #(TCR_T1SZ_SHIFT - TCR_T0SZ_SHIFT) | |
302 | and x1, x1, #(TCR_TSZ_MASK << TCR_T0SZ_SHIFT) | |
303 | cbnz x1, L_set_tcr_panic | |
304 | #if defined(KERNEL_INTEGRITY_KTRR) | |
305 | mov x1, lr | |
cb323159 | 306 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
307 | mov lr, x1 |
308 | #else | |
c6bf4f31 A |
309 | #if defined(HAS_VMSA_LOCK) |
310 | // assert TCR unlocked | |
311 | mrs x1, ARM64_REG_VMSA_LOCK_EL1 | |
312 | and x1, x1, #(VMSA_LOCK_TCR_EL1) | |
313 | cbnz x1, L_set_locked_reg_panic | |
314 | #endif /* defined(HAS_VMSA_LOCK) */ | |
5ba3f43e A |
315 | msr TCR_EL1, x0 |
316 | #endif /* defined(KERNEL_INTRITY_KTRR) */ | |
317 | isb sy | |
318 | ret | |
319 | ||
320 | L_set_tcr_panic: | |
321 | PUSH_FRAME | |
322 | sub sp, sp, #16 | |
323 | str x0, [sp] | |
324 | adr x0, L_set_tcr_panic_str | |
325 | BRANCH_EXTERN panic | |
326 | ||
327 | L_set_locked_reg_panic: | |
328 | PUSH_FRAME | |
329 | sub sp, sp, #16 | |
330 | str x0, [sp] | |
331 | adr x0, L_set_locked_reg_panic_str | |
332 | BRANCH_EXTERN panic | |
333 | b . | |
334 | ||
335 | L_set_tcr_panic_str: | |
336 | .asciz "set_tcr: t0sz, t1sz not equal (%llx)\n" | |
337 | ||
338 | ||
339 | L_set_locked_reg_panic_str: | |
340 | .asciz "attempt to set locked register: (%llx)\n" | |
341 | #else | |
c6bf4f31 | 342 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
5ba3f43e | 343 | mov x1, lr |
cb323159 | 344 | bl EXT(pinst_set_tcr) |
5ba3f43e A |
345 | mov lr, x1 |
346 | #else | |
347 | msr TCR_EL1, x0 | |
348 | #endif | |
349 | isb sy | |
350 | ret | |
351 | #endif // defined(APPLE_ARM64_ARCH_FAMILY) | |
352 | ||
353 | /* | |
354 | * MMU kernel virtual to physical address translation | |
355 | */ | |
356 | .text | |
357 | .align 2 | |
358 | .globl EXT(mmu_kvtop) | |
359 | LEXT(mmu_kvtop) | |
360 | mrs x2, DAIF // Load current DAIF | |
361 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
362 | at s1e1r, x0 // Translation Stage 1 EL1 | |
363 | mrs x1, PAR_EL1 // Read result | |
364 | msr DAIF, x2 // Restore interrupt state | |
365 | tbnz x1, #0, L_mmu_kvtop_invalid // Test Translation not valid | |
366 | bfm x1, x0, #0, #11 // Add page offset | |
367 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
368 | ret | |
369 | L_mmu_kvtop_invalid: | |
d9a64523 | 370 | mov x0, #0 // Return invalid |
5ba3f43e A |
371 | ret |
372 | ||
373 | /* | |
374 | * MMU user virtual to physical address translation | |
375 | */ | |
376 | .text | |
377 | .align 2 | |
378 | .globl EXT(mmu_uvtop) | |
379 | LEXT(mmu_uvtop) | |
380 | lsr x8, x0, #56 // Extract top byte | |
381 | cbnz x8, L_mmu_uvtop_invalid // Tagged pointers are invalid | |
382 | mrs x2, DAIF // Load current DAIF | |
383 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
384 | at s1e0r, x0 // Translation Stage 1 EL0 | |
385 | mrs x1, PAR_EL1 // Read result | |
386 | msr DAIF, x2 // Restore interrupt state | |
387 | tbnz x1, #0, L_mmu_uvtop_invalid // Test Translation not valid | |
388 | bfm x1, x0, #0, #11 // Add page offset | |
389 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
390 | ret | |
391 | L_mmu_uvtop_invalid: | |
d9a64523 | 392 | mov x0, #0 // Return invalid |
5ba3f43e A |
393 | ret |
394 | ||
395 | /* | |
396 | * MMU kernel virtual to physical address preflight write access | |
397 | */ | |
398 | .text | |
399 | .align 2 | |
400 | .globl EXT(mmu_kvtop_wpreflight) | |
401 | LEXT(mmu_kvtop_wpreflight) | |
402 | mrs x2, DAIF // Load current DAIF | |
403 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF) // Disable IRQ | |
404 | at s1e1w, x0 // Translation Stage 1 EL1 | |
405 | mrs x1, PAR_EL1 // Read result | |
406 | msr DAIF, x2 // Restore interrupt state | |
407 | tbnz x1, #0, L_mmu_kvtop_wpreflight_invalid // Test Translation not valid | |
408 | bfm x1, x0, #0, #11 // Add page offset | |
409 | and x0, x1, #0x0000ffffffffffff // Clear non-address bits | |
410 | ret | |
411 | L_mmu_kvtop_wpreflight_invalid: | |
d9a64523 | 412 | mov x0, #0 // Return invalid |
5ba3f43e A |
413 | ret |
414 | ||
415 | /* | |
416 | * SET_RECOVERY_HANDLER | |
417 | * | |
bca245ac | 418 | * Sets up a page fault recovery handler. This macro clobbers x16 and x17. |
5ba3f43e | 419 | * |
bca245ac A |
420 | * label - recovery label |
421 | * tpidr - persisted thread pointer | |
422 | * old_handler - persisted recovery handler | |
423 | * label_in_adr_range - whether \label is within 1 MB of PC | |
5ba3f43e | 424 | */ |
bca245ac A |
425 | .macro SET_RECOVERY_HANDLER label, tpidr=x16, old_handler=x10, label_in_adr_range=0 |
426 | // Note: x16 and x17 are designated for use as temporaries in | |
427 | // interruptible PAC routines. DO NOT CHANGE THESE REGISTER ASSIGNMENTS. | |
428 | .if \label_in_adr_range==1 // Load the recovery handler address | |
429 | adr x17, \label | |
430 | .else | |
431 | adrp x17, \label@page | |
432 | add x17, x17, \label@pageoff | |
433 | .endif | |
cb323159 | 434 | #if defined(HAS_APPLE_PAC) |
bca245ac A |
435 | mrs x16, TPIDR_EL1 |
436 | add x16, x16, TH_RECOVER | |
437 | movk x16, #PAC_DISCRIMINATOR_RECOVER, lsl 48 | |
438 | pacia x17, x16 // Sign with IAKey + blended discriminator | |
cb323159 | 439 | #endif |
0a7de745 | 440 | |
bca245ac A |
441 | mrs \tpidr, TPIDR_EL1 // Load thread pointer |
442 | ldr \old_handler, [\tpidr, TH_RECOVER] // Save previous recovery handler | |
443 | str x17, [\tpidr, TH_RECOVER] // Set new signed recovery handler | |
5ba3f43e A |
444 | .endmacro |
445 | ||
446 | /* | |
447 | * CLEAR_RECOVERY_HANDLER | |
448 | * | |
449 | * Clears page fault handler set by SET_RECOVERY_HANDLER | |
450 | * | |
bca245ac A |
451 | * tpidr - thread pointer saved by SET_RECOVERY_HANDLER |
452 | * old_handler - old recovery handler saved by SET_RECOVERY_HANDLER | |
5ba3f43e | 453 | */ |
bca245ac A |
454 | .macro CLEAR_RECOVERY_HANDLER tpidr=x16, old_handler=x10 |
455 | str \old_handler, [\tpidr, TH_RECOVER] // Restore the previous recovery handler | |
5ba3f43e A |
456 | .endmacro |
457 | ||
458 | ||
459 | .text | |
460 | .align 2 | |
461 | copyio_error: | |
bca245ac | 462 | CLEAR_RECOVERY_HANDLER |
5ba3f43e A |
463 | mov x0, #EFAULT // Return an EFAULT error |
464 | POP_FRAME | |
d9a64523 | 465 | ARM64_STACK_EPILOG |
5ba3f43e A |
466 | |
467 | /* | |
468 | * int _bcopyin(const char *src, char *dst, vm_size_t len) | |
469 | */ | |
470 | .text | |
471 | .align 2 | |
472 | .globl EXT(_bcopyin) | |
473 | LEXT(_bcopyin) | |
d9a64523 | 474 | ARM64_STACK_PROLOG |
5ba3f43e | 475 | PUSH_FRAME |
bca245ac | 476 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
477 | /* If len is less than 16 bytes, just do a bytewise copy */ |
478 | cmp x2, #16 | |
479 | b.lt 2f | |
480 | sub x2, x2, #16 | |
481 | 1: | |
482 | /* 16 bytes at a time */ | |
483 | ldp x3, x4, [x0], #16 | |
484 | stp x3, x4, [x1], #16 | |
485 | subs x2, x2, #16 | |
486 | b.ge 1b | |
487 | /* Fixup the len and test for completion */ | |
488 | adds x2, x2, #16 | |
489 | b.eq 3f | |
490 | 2: /* Bytewise */ | |
491 | subs x2, x2, #1 | |
492 | ldrb w3, [x0], #1 | |
493 | strb w3, [x1], #1 | |
494 | b.hi 2b | |
495 | 3: | |
bca245ac | 496 | CLEAR_RECOVERY_HANDLER |
d9a64523 | 497 | mov x0, #0 |
5ba3f43e | 498 | POP_FRAME |
d9a64523 | 499 | ARM64_STACK_EPILOG |
5ba3f43e A |
500 | |
501 | /* | |
cb323159 | 502 | * int _copyin_atomic32(const char *src, uint32_t *dst) |
5ba3f43e A |
503 | */ |
504 | .text | |
505 | .align 2 | |
cb323159 A |
506 | .globl EXT(_copyin_atomic32) |
507 | LEXT(_copyin_atomic32) | |
d9a64523 | 508 | ARM64_STACK_PROLOG |
5ba3f43e | 509 | PUSH_FRAME |
bca245ac | 510 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e | 511 | ldr w8, [x0] |
cb323159 A |
512 | str w8, [x1] |
513 | mov x0, #0 | |
bca245ac | 514 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
515 | POP_FRAME |
516 | ARM64_STACK_EPILOG | |
517 | ||
518 | /* | |
519 | * int _copyin_atomic32_wait_if_equals(const char *src, uint32_t value) | |
520 | */ | |
521 | .text | |
522 | .align 2 | |
523 | .globl EXT(_copyin_atomic32_wait_if_equals) | |
524 | LEXT(_copyin_atomic32_wait_if_equals) | |
525 | ARM64_STACK_PROLOG | |
526 | PUSH_FRAME | |
bca245ac | 527 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
528 | ldxr w8, [x0] |
529 | cmp w8, w1 | |
530 | mov x0, ESTALE | |
531 | b.ne 1f | |
532 | mov x0, #0 | |
533 | wfe | |
534 | 1: | |
535 | clrex | |
bca245ac | 536 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
537 | POP_FRAME |
538 | ARM64_STACK_EPILOG | |
539 | ||
540 | /* | |
541 | * int _copyin_atomic64(const char *src, uint32_t *dst) | |
542 | */ | |
543 | .text | |
544 | .align 2 | |
545 | .globl EXT(_copyin_atomic64) | |
546 | LEXT(_copyin_atomic64) | |
547 | ARM64_STACK_PROLOG | |
548 | PUSH_FRAME | |
bca245ac | 549 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e | 550 | ldr x8, [x0] |
5ba3f43e | 551 | str x8, [x1] |
d9a64523 | 552 | mov x0, #0 |
bca245ac | 553 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 554 | POP_FRAME |
d9a64523 A |
555 | ARM64_STACK_EPILOG |
556 | ||
5ba3f43e | 557 | |
cb323159 A |
558 | /* |
559 | * int _copyout_atomic32(uint32_t value, char *dst) | |
560 | */ | |
561 | .text | |
562 | .align 2 | |
563 | .globl EXT(_copyout_atomic32) | |
564 | LEXT(_copyout_atomic32) | |
565 | ARM64_STACK_PROLOG | |
566 | PUSH_FRAME | |
bca245ac | 567 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
568 | str w0, [x1] |
569 | mov x0, #0 | |
bca245ac | 570 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
571 | POP_FRAME |
572 | ARM64_STACK_EPILOG | |
573 | ||
574 | /* | |
575 | * int _copyout_atomic64(uint64_t value, char *dst) | |
576 | */ | |
577 | .text | |
578 | .align 2 | |
579 | .globl EXT(_copyout_atomic64) | |
580 | LEXT(_copyout_atomic64) | |
581 | ARM64_STACK_PROLOG | |
582 | PUSH_FRAME | |
bca245ac | 583 | SET_RECOVERY_HANDLER copyio_error |
cb323159 A |
584 | str x0, [x1] |
585 | mov x0, #0 | |
bca245ac | 586 | CLEAR_RECOVERY_HANDLER |
cb323159 A |
587 | POP_FRAME |
588 | ARM64_STACK_EPILOG | |
589 | ||
5ba3f43e A |
590 | |
591 | /* | |
592 | * int _bcopyout(const char *src, char *dst, vm_size_t len) | |
593 | */ | |
594 | .text | |
595 | .align 2 | |
596 | .globl EXT(_bcopyout) | |
597 | LEXT(_bcopyout) | |
d9a64523 | 598 | ARM64_STACK_PROLOG |
5ba3f43e | 599 | PUSH_FRAME |
bca245ac | 600 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
601 | /* If len is less than 16 bytes, just do a bytewise copy */ |
602 | cmp x2, #16 | |
603 | b.lt 2f | |
604 | sub x2, x2, #16 | |
605 | 1: | |
606 | /* 16 bytes at a time */ | |
607 | ldp x3, x4, [x0], #16 | |
608 | stp x3, x4, [x1], #16 | |
609 | subs x2, x2, #16 | |
610 | b.ge 1b | |
611 | /* Fixup the len and test for completion */ | |
612 | adds x2, x2, #16 | |
613 | b.eq 3f | |
614 | 2: /* Bytewise */ | |
615 | subs x2, x2, #1 | |
616 | ldrb w3, [x0], #1 | |
617 | strb w3, [x1], #1 | |
618 | b.hi 2b | |
619 | 3: | |
bca245ac | 620 | CLEAR_RECOVERY_HANDLER |
d9a64523 | 621 | mov x0, #0 |
5ba3f43e | 622 | POP_FRAME |
d9a64523 | 623 | ARM64_STACK_EPILOG |
5ba3f43e A |
624 | |
625 | /* | |
626 | * int _bcopyinstr( | |
627 | * const user_addr_t user_addr, | |
628 | * char *kernel_addr, | |
629 | * vm_size_t max, | |
630 | * vm_size_t *actual) | |
631 | */ | |
632 | .text | |
633 | .align 2 | |
634 | .globl EXT(_bcopyinstr) | |
635 | LEXT(_bcopyinstr) | |
d9a64523 | 636 | ARM64_STACK_PROLOG |
5ba3f43e | 637 | PUSH_FRAME |
bca245ac | 638 | SET_RECOVERY_HANDLER Lcopyinstr_error, label_in_adr_range=1 |
d9a64523 | 639 | mov x4, #0 // x4 - total bytes copied |
5ba3f43e A |
640 | Lcopyinstr_loop: |
641 | ldrb w5, [x0], #1 // Load a byte from the user source | |
642 | strb w5, [x1], #1 // Store a byte to the kernel dest | |
643 | add x4, x4, #1 // Increment bytes copied | |
644 | cbz x5, Lcopyinstr_done // If this byte is null, we're done | |
645 | cmp x4, x2 // If we're out of space, return an error | |
646 | b.ne Lcopyinstr_loop | |
647 | Lcopyinstr_too_long: | |
648 | mov x5, #ENAMETOOLONG // Set current byte to error code for later return | |
649 | Lcopyinstr_done: | |
650 | str x4, [x3] // Return number of bytes copied | |
651 | mov x0, x5 // Set error code (0 on success, ENAMETOOLONG on failure) | |
652 | b Lcopyinstr_exit | |
653 | Lcopyinstr_error: | |
654 | mov x0, #EFAULT // Return EFAULT on error | |
655 | Lcopyinstr_exit: | |
bca245ac | 656 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 657 | POP_FRAME |
d9a64523 | 658 | ARM64_STACK_EPILOG |
5ba3f43e A |
659 | |
660 | /* | |
661 | * int copyinframe(const vm_address_t frame_addr, char *kernel_addr, bool is64bit) | |
662 | * | |
663 | * Safely copy sixteen bytes (the fixed top of an ARM64 frame) from | |
664 | * either user or kernel memory, or 8 bytes (AArch32) from user only. | |
665 | * | |
666 | * x0 : address of frame to copy. | |
667 | * x1 : kernel address at which to store data. | |
668 | * w2 : whether to copy an AArch32 or AArch64 frame. | |
669 | * x3 : temp | |
670 | * x5 : temp (kernel virtual base) | |
671 | * x9 : temp | |
bca245ac | 672 | * x10 : old recovery function (set by SET_RECOVERY_HANDLER) |
5ba3f43e | 673 | * x12, x13 : backtrace data |
bca245ac | 674 | * x16 : thread pointer (set by SET_RECOVERY_HANDLER) |
5ba3f43e A |
675 | * |
676 | */ | |
677 | .text | |
678 | .align 2 | |
679 | .globl EXT(copyinframe) | |
680 | LEXT(copyinframe) | |
d9a64523 | 681 | ARM64_STACK_PROLOG |
5ba3f43e | 682 | PUSH_FRAME |
bca245ac | 683 | SET_RECOVERY_HANDLER copyio_error |
5ba3f43e A |
684 | cbnz w2, Lcopyinframe64 // Check frame size |
685 | adrp x5, EXT(gVirtBase)@page // For 32-bit frame, make sure we're not trying to copy from kernel | |
686 | add x5, x5, EXT(gVirtBase)@pageoff | |
687 | ldr x5, [x5] | |
688 | cmp x5, x0 // See if address is in kernel virtual range | |
689 | b.hi Lcopyinframe32 // If below kernel virtual range, proceed. | |
690 | mov w0, #EFAULT // Should never have a 32-bit frame in kernel virtual range | |
691 | b Lcopyinframe_done | |
692 | ||
693 | Lcopyinframe32: | |
694 | ldr x12, [x0] // Copy 8 bytes | |
695 | str x12, [x1] | |
696 | mov w0, #0 // Success | |
697 | b Lcopyinframe_done | |
698 | ||
699 | Lcopyinframe64: | |
700 | mov x3, VM_MIN_KERNEL_ADDRESS // Check if kernel address | |
701 | orr x9, x0, TBI_MASK // Hide tags in address comparison | |
702 | cmp x9, x3 // If in kernel address range, skip tag test | |
703 | b.hs Lcopyinframe_valid | |
704 | tst x0, TBI_MASK // Detect tagged pointers | |
705 | b.eq Lcopyinframe_valid | |
706 | mov w0, #EFAULT // Tagged address, fail | |
707 | b Lcopyinframe_done | |
708 | Lcopyinframe_valid: | |
709 | ldp x12, x13, [x0] // Copy 16 bytes | |
710 | stp x12, x13, [x1] | |
711 | mov w0, #0 // Success | |
712 | ||
713 | Lcopyinframe_done: | |
bca245ac | 714 | CLEAR_RECOVERY_HANDLER |
5ba3f43e | 715 | POP_FRAME |
d9a64523 | 716 | ARM64_STACK_EPILOG |
5ba3f43e | 717 | |
5ba3f43e A |
718 | |
719 | /* | |
720 | * uint32_t arm_debug_read_dscr(void) | |
721 | */ | |
722 | .text | |
723 | .align 2 | |
724 | .globl EXT(arm_debug_read_dscr) | |
725 | LEXT(arm_debug_read_dscr) | |
726 | PANIC_UNIMPLEMENTED | |
727 | ||
728 | /* | |
729 | * void arm_debug_set_cp14(arm_debug_state_t *debug_state) | |
730 | * | |
731 | * Set debug registers to match the current thread state | |
732 | * (NULL to disable). Assume 6 breakpoints and 2 | |
733 | * watchpoints, since that has been the case in all cores | |
734 | * thus far. | |
735 | */ | |
736 | .text | |
737 | .align 2 | |
738 | .globl EXT(arm_debug_set_cp14) | |
739 | LEXT(arm_debug_set_cp14) | |
740 | PANIC_UNIMPLEMENTED | |
741 | ||
5ba3f43e A |
742 | #if defined(APPLE_ARM64_ARCH_FAMILY) |
743 | /* | |
744 | * Note: still have to ISB before executing wfi! | |
745 | */ | |
746 | .text | |
747 | .align 2 | |
748 | .globl EXT(arm64_prepare_for_sleep) | |
749 | LEXT(arm64_prepare_for_sleep) | |
750 | PUSH_FRAME | |
751 | ||
cb323159 A |
752 | #if defined(APPLETYPHOON) |
753 | // <rdar://problem/15827409> | |
5ba3f43e A |
754 | mrs x0, ARM64_REG_HID2 // Read HID2 |
755 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
756 | msr ARM64_REG_HID2, x0 // Write HID2 | |
757 | dsb sy | |
758 | isb sy | |
759 | #endif | |
760 | ||
761 | #if __ARM_GLOBAL_SLEEP_BIT__ | |
762 | // Enable deep sleep | |
763 | mrs x1, ARM64_REG_ACC_OVRD | |
764 | orr x1, x1, #(ARM64_REG_ACC_OVRD_enDeepSleep) | |
765 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_mask)) | |
766 | orr x1, x1, #( ARM64_REG_ACC_OVRD_disL2Flush4AccSlp_deepsleep) | |
767 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnSRM_mask)) | |
768 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnSRM_deepsleep) | |
769 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2TrDnLnk_mask)) | |
770 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2TrDnLnk_deepsleep) | |
771 | and x1, x1, #(~(ARM64_REG_ACC_OVRD_ok2PwrDnCPM_mask)) | |
772 | orr x1, x1, #( ARM64_REG_ACC_OVRD_ok2PwrDnCPM_deepsleep) | |
c6bf4f31 A |
773 | #if HAS_RETENTION_STATE |
774 | orr x1, x1, #(ARM64_REG_ACC_OVRD_disPioOnWfiCpu) | |
775 | #endif | |
5ba3f43e A |
776 | msr ARM64_REG_ACC_OVRD, x1 |
777 | ||
778 | ||
779 | #else | |
780 | // Enable deep sleep | |
781 | mov x1, ARM64_REG_CYC_CFG_deepSleep | |
782 | msr ARM64_REG_CYC_CFG, x1 | |
783 | #endif | |
784 | // Set "OK to power down" (<rdar://problem/12390433>) | |
785 | mrs x0, ARM64_REG_CYC_OVRD | |
786 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_down) | |
c6bf4f31 A |
787 | #if HAS_RETENTION_STATE |
788 | orr x0, x0, #(ARM64_REG_CYC_OVRD_disWfiRetn) | |
789 | #endif | |
5ba3f43e A |
790 | msr ARM64_REG_CYC_OVRD, x0 |
791 | ||
c6bf4f31 | 792 | #if defined(APPLEMONSOON) || defined(APPLEVORTEX) |
d9a64523 A |
793 | ARM64_IS_PCORE x0 |
794 | cbz x0, Lwfi_inst // skip if not p-core | |
795 | ||
796 | /* <rdar://problem/32512947>: Flush the GUPS prefetcher prior to | |
797 | * wfi. A Skye HW bug can cause the GUPS prefetcher on p-cores | |
798 | * to be left with valid entries that fail to drain if a | |
799 | * subsequent wfi is issued. This can prevent the core from | |
800 | * power-gating. For the idle case that is recoverable, but | |
801 | * for the deep-sleep (S2R) case in which cores MUST power-gate, | |
802 | * it can lead to a hang. This can be prevented by disabling | |
803 | * and re-enabling GUPS, which forces the prefetch queue to | |
804 | * drain. This should be done as close to wfi as possible, i.e. | |
805 | * at the very end of arm64_prepare_for_sleep(). */ | |
c6bf4f31 A |
806 | #if defined(APPLEVORTEX) |
807 | /* <rdar://problem/32821461>: Cyprus A0/A1 parts have a similar | |
808 | * bug in the HSP prefetcher that can be worked around through | |
809 | * the same method mentioned above for Skye. */ | |
810 | SKIP_IF_CPU_VERSION_GREATER_OR_EQUAL x0, VORTEX_CPU_VERSION_B0, Lwfi_inst | |
811 | #endif | |
d9a64523 A |
812 | mrs x0, ARM64_REG_HID10 |
813 | orr x0, x0, #(ARM64_REG_HID10_DisHwpGups) | |
814 | msr ARM64_REG_HID10, x0 | |
815 | isb sy | |
816 | and x0, x0, #(~(ARM64_REG_HID10_DisHwpGups)) | |
817 | msr ARM64_REG_HID10, x0 | |
818 | isb sy | |
819 | #endif | |
5ba3f43e A |
820 | Lwfi_inst: |
821 | dsb sy | |
822 | isb sy | |
823 | wfi | |
824 | b Lwfi_inst | |
825 | ||
826 | /* | |
827 | * Force WFI to use clock gating only | |
828 | * | |
829 | */ | |
830 | .text | |
831 | .align 2 | |
832 | .globl EXT(arm64_force_wfi_clock_gate) | |
833 | LEXT(arm64_force_wfi_clock_gate) | |
d9a64523 | 834 | ARM64_STACK_PROLOG |
5ba3f43e A |
835 | PUSH_FRAME |
836 | ||
837 | mrs x0, ARM64_REG_CYC_OVRD | |
838 | orr x0, x0, #(ARM64_REG_CYC_OVRD_ok2pwrdn_force_up) | |
839 | msr ARM64_REG_CYC_OVRD, x0 | |
840 | ||
841 | POP_FRAME | |
d9a64523 | 842 | ARM64_STACK_EPILOG |
5ba3f43e A |
843 | |
844 | ||
c6bf4f31 A |
845 | #if HAS_RETENTION_STATE |
846 | .text | |
847 | .align 2 | |
848 | .globl EXT(arm64_retention_wfi) | |
849 | LEXT(arm64_retention_wfi) | |
850 | wfi | |
851 | cbz lr, Lwfi_retention // If lr is 0, we entered retention state and lost all GPRs except sp and pc | |
852 | ret // Otherwise just return to cpu_idle() | |
853 | Lwfi_retention: | |
854 | mov x0, #1 | |
855 | bl EXT(ClearIdlePop) | |
856 | mov x0, #0 | |
857 | bl EXT(cpu_idle_exit) // cpu_idle_exit(from_reset = FALSE) | |
858 | b . // cpu_idle_exit() should never return | |
859 | #endif | |
5ba3f43e | 860 | |
cb323159 | 861 | #if defined(APPLETYPHOON) |
5ba3f43e A |
862 | |
863 | .text | |
864 | .align 2 | |
cb323159 | 865 | .globl EXT(typhoon_prepare_for_wfi) |
5ba3f43e | 866 | |
cb323159 | 867 | LEXT(typhoon_prepare_for_wfi) |
5ba3f43e A |
868 | PUSH_FRAME |
869 | ||
cb323159 | 870 | // <rdar://problem/15827409> |
5ba3f43e A |
871 | mrs x0, ARM64_REG_HID2 // Read HID2 |
872 | orr x0, x0, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // Set HID.DisableMTLBPrefetch | |
873 | msr ARM64_REG_HID2, x0 // Write HID2 | |
874 | dsb sy | |
875 | isb sy | |
876 | ||
877 | POP_FRAME | |
878 | ret | |
879 | ||
880 | ||
881 | .text | |
882 | .align 2 | |
cb323159 A |
883 | .globl EXT(typhoon_return_from_wfi) |
884 | LEXT(typhoon_return_from_wfi) | |
5ba3f43e A |
885 | PUSH_FRAME |
886 | ||
cb323159 | 887 | // <rdar://problem/15827409> |
5ba3f43e A |
888 | mrs x0, ARM64_REG_HID2 // Read HID2 |
889 | mov x1, #(ARM64_REG_HID2_disMMUmtlbPrefetch) // | |
890 | bic x0, x0, x1 // Clear HID.DisableMTLBPrefetchMTLBPrefetch | |
891 | msr ARM64_REG_HID2, x0 // Write HID2 | |
892 | dsb sy | |
893 | isb sy | |
894 | ||
895 | POP_FRAME | |
896 | ret | |
897 | #endif | |
898 | ||
899 | #ifdef APPLETYPHOON | |
900 | ||
901 | #define HID0_DEFEATURES_1 0x0000a0c000064010ULL | |
902 | #define HID1_DEFEATURES_1 0x000000004005bf20ULL | |
903 | #define HID2_DEFEATURES_1 0x0000000000102074ULL | |
904 | #define HID3_DEFEATURES_1 0x0000000000400003ULL | |
905 | #define HID4_DEFEATURES_1 0x83ff00e100000268ULL | |
906 | #define HID7_DEFEATURES_1 0x000000000000000eULL | |
907 | ||
908 | #define HID0_DEFEATURES_2 0x0000a1c000020010ULL | |
909 | #define HID1_DEFEATURES_2 0x000000000005d720ULL | |
910 | #define HID2_DEFEATURES_2 0x0000000000002074ULL | |
911 | #define HID3_DEFEATURES_2 0x0000000000400001ULL | |
912 | #define HID4_DEFEATURES_2 0x8390000200000208ULL | |
913 | #define HID7_DEFEATURES_2 0x0000000000000000ULL | |
914 | ||
915 | /* | |
916 | arg0 = target register | |
917 | arg1 = 64-bit constant | |
918 | */ | |
919 | .macro LOAD_UINT64 | |
920 | movz $0, #(($1 >> 48) & 0xffff), lsl #48 | |
921 | movk $0, #(($1 >> 32) & 0xffff), lsl #32 | |
922 | movk $0, #(($1 >> 16) & 0xffff), lsl #16 | |
923 | movk $0, #(($1) & 0xffff) | |
924 | .endmacro | |
925 | ||
926 | .text | |
927 | .align 2 | |
928 | .globl EXT(cpu_defeatures_set) | |
929 | LEXT(cpu_defeatures_set) | |
930 | PUSH_FRAME | |
931 | cmp x0, #2 | |
932 | b.eq cpu_defeatures_set_2 | |
933 | cmp x0, #1 | |
934 | b.ne cpu_defeatures_set_ret | |
935 | LOAD_UINT64 x1, HID0_DEFEATURES_1 | |
936 | mrs x0, ARM64_REG_HID0 | |
937 | orr x0, x0, x1 | |
938 | msr ARM64_REG_HID0, x0 | |
939 | LOAD_UINT64 x1, HID1_DEFEATURES_1 | |
940 | mrs x0, ARM64_REG_HID1 | |
941 | orr x0, x0, x1 | |
942 | msr ARM64_REG_HID1, x0 | |
943 | LOAD_UINT64 x1, HID2_DEFEATURES_1 | |
944 | mrs x0, ARM64_REG_HID2 | |
945 | orr x0, x0, x1 | |
946 | msr ARM64_REG_HID2, x0 | |
947 | LOAD_UINT64 x1, HID3_DEFEATURES_1 | |
948 | mrs x0, ARM64_REG_HID3 | |
949 | orr x0, x0, x1 | |
950 | msr ARM64_REG_HID3, x0 | |
951 | LOAD_UINT64 x1, HID4_DEFEATURES_1 | |
952 | mrs x0, ARM64_REG_HID4 | |
953 | orr x0, x0, x1 | |
954 | msr ARM64_REG_HID4, x0 | |
955 | LOAD_UINT64 x1, HID7_DEFEATURES_1 | |
956 | mrs x0, ARM64_REG_HID7 | |
957 | orr x0, x0, x1 | |
958 | msr ARM64_REG_HID7, x0 | |
959 | dsb sy | |
960 | isb sy | |
961 | b cpu_defeatures_set_ret | |
962 | cpu_defeatures_set_2: | |
963 | LOAD_UINT64 x1, HID0_DEFEATURES_2 | |
964 | mrs x0, ARM64_REG_HID0 | |
965 | orr x0, x0, x1 | |
966 | msr ARM64_REG_HID0, x0 | |
967 | LOAD_UINT64 x1, HID1_DEFEATURES_2 | |
968 | mrs x0, ARM64_REG_HID1 | |
969 | orr x0, x0, x1 | |
970 | msr ARM64_REG_HID1, x0 | |
971 | LOAD_UINT64 x1, HID2_DEFEATURES_2 | |
972 | mrs x0, ARM64_REG_HID2 | |
973 | orr x0, x0, x1 | |
974 | msr ARM64_REG_HID2, x0 | |
975 | LOAD_UINT64 x1, HID3_DEFEATURES_2 | |
976 | mrs x0, ARM64_REG_HID3 | |
977 | orr x0, x0, x1 | |
978 | msr ARM64_REG_HID3, x0 | |
979 | LOAD_UINT64 x1, HID4_DEFEATURES_2 | |
980 | mrs x0, ARM64_REG_HID4 | |
981 | orr x0, x0, x1 | |
982 | msr ARM64_REG_HID4, x0 | |
983 | LOAD_UINT64 x1, HID7_DEFEATURES_2 | |
984 | mrs x0, ARM64_REG_HID7 | |
985 | orr x0, x0, x1 | |
986 | msr ARM64_REG_HID7, x0 | |
987 | dsb sy | |
988 | isb sy | |
989 | b cpu_defeatures_set_ret | |
990 | cpu_defeatures_set_ret: | |
991 | POP_FRAME | |
992 | ret | |
993 | #endif | |
994 | ||
d9a64523 A |
995 | #else /* !defined(APPLE_ARM64_ARCH_FAMILY) */ |
996 | .text | |
997 | .align 2 | |
998 | .globl EXT(arm64_prepare_for_sleep) | |
999 | LEXT(arm64_prepare_for_sleep) | |
1000 | PUSH_FRAME | |
1001 | Lwfi_inst: | |
1002 | dsb sy | |
1003 | isb sy | |
1004 | wfi | |
1005 | b Lwfi_inst | |
1006 | ||
1007 | /* | |
1008 | * Force WFI to use clock gating only | |
1009 | * Note: for non-Apple device, do nothing. | |
1010 | */ | |
1011 | .text | |
1012 | .align 2 | |
1013 | .globl EXT(arm64_force_wfi_clock_gate) | |
1014 | LEXT(arm64_force_wfi_clock_gate) | |
1015 | PUSH_FRAME | |
1016 | nop | |
1017 | POP_FRAME | |
1018 | ||
1019 | #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ | |
1020 | ||
1021 | /* | |
1022 | * void arm64_replace_bootstack(cpu_data_t *cpu_data) | |
1023 | * | |
1024 | * This must be called from a kernel thread context running on the boot CPU, | |
1025 | * after setting up new exception stacks in per-CPU data. That will guarantee | |
1026 | * that the stack(s) we're trying to replace aren't currently in use. For | |
1027 | * KTRR-protected devices, this must also be called prior to VM prot finalization | |
1028 | * and lockdown, as updating SP1 requires a sensitive instruction. | |
1029 | */ | |
1030 | .text | |
1031 | .align 2 | |
1032 | .globl EXT(arm64_replace_bootstack) | |
1033 | LEXT(arm64_replace_bootstack) | |
1034 | ARM64_STACK_PROLOG | |
1035 | PUSH_FRAME | |
1036 | // Set the exception stack pointer | |
1037 | ldr x0, [x0, CPU_EXCEPSTACK_TOP] | |
1038 | mrs x4, DAIF // Load current DAIF; use x4 as pinst may trash x1-x3 | |
1039 | msr DAIFSet, #(DAIFSC_IRQF | DAIFSC_FIQF | DAIFSC_ASYNCF) // Disable IRQ/FIQ/serror | |
1040 | // Set SP_EL1 to exception stack | |
c6bf4f31 | 1041 | #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) |
d9a64523 | 1042 | mov x1, lr |
cb323159 | 1043 | bl EXT(pinst_spsel_1) |
d9a64523 A |
1044 | mov lr, x1 |
1045 | #else | |
1046 | msr SPSel, #1 | |
5ba3f43e | 1047 | #endif |
d9a64523 A |
1048 | mov sp, x0 |
1049 | msr SPSel, #0 | |
1050 | msr DAIF, x4 // Restore interrupt state | |
1051 | POP_FRAME | |
1052 | ARM64_STACK_EPILOG | |
5ba3f43e A |
1053 | |
1054 | #ifdef MONITOR | |
1055 | /* | |
1056 | * unsigned long monitor_call(uintptr_t callnum, uintptr_t arg1, | |
1057 | uintptr_t arg2, uintptr_t arg3) | |
1058 | * | |
1059 | * Call the EL3 monitor with 4 arguments in registers | |
1060 | * The monitor interface maintains the same ABI as the C function call standard. Callee-saved | |
1061 | * registers are preserved, temporary registers are not. Parameters and results are passed in | |
1062 | * the usual manner. | |
1063 | */ | |
1064 | .text | |
1065 | .align 2 | |
1066 | .globl EXT(monitor_call) | |
1067 | LEXT(monitor_call) | |
1068 | smc 0x11 | |
1069 | ret | |
1070 | #endif | |
1071 | ||
cb323159 A |
1072 | #ifdef HAS_APPLE_PAC |
1073 | /** | |
1074 | * void ml_sign_thread_state(arm_saved_state_t *ss, uint64_t pc, | |
1075 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1076 | * uint64_t x17) | |
1077 | */ | |
1078 | .text | |
1079 | .align 2 | |
1080 | .globl EXT(ml_sign_thread_state) | |
1081 | LEXT(ml_sign_thread_state) | |
1082 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ | |
1083 | /* | |
1084 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1085 | * touched by the system call return path. | |
1086 | */ | |
1087 | bic x2, x2, PSR_CF | |
1088 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1089 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1090 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1091 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
1092 | str x1, [x0, SS64_JOPHASH] | |
1093 | ret | |
1094 | ||
1095 | /** | |
1096 | * void ml_check_signed_state(arm_saved_state_t *ss, uint64_t pc, | |
1097 | * uint32_t cpsr, uint64_t lr, uint64_t x16, | |
1098 | * uint64_t x17) | |
1099 | */ | |
1100 | .text | |
1101 | .align 2 | |
1102 | .globl EXT(ml_check_signed_state) | |
1103 | LEXT(ml_check_signed_state) | |
1104 | pacga x1, x1, x0 /* PC hash (gkey + &arm_saved_state) */ | |
1105 | /* | |
1106 | * Mask off the carry flag so we don't need to re-sign when that flag is | |
1107 | * touched by the system call return path. | |
1108 | */ | |
1109 | bic x2, x2, PSR_CF | |
1110 | pacga x1, x2, x1 /* SPSR hash (gkey + pc hash) */ | |
1111 | pacga x1, x3, x1 /* LR Hash (gkey + spsr hash) */ | |
1112 | pacga x1, x4, x1 /* X16 hash (gkey + lr hash) */ | |
1113 | pacga x1, x5, x1 /* X17 hash (gkey + x16 hash) */ | |
1114 | ldr x2, [x0, SS64_JOPHASH] | |
1115 | cmp x1, x2 | |
1116 | b.ne Lcheck_hash_panic | |
1117 | ret | |
1118 | Lcheck_hash_panic: | |
1119 | mov x1, x0 | |
1120 | adr x0, Lcheck_hash_str | |
1121 | CALL_EXTERN panic_with_thread_kernel_state | |
1122 | Lcheck_hash_str: | |
1123 | .asciz "JOP Hash Mismatch Detected (PC, CPSR, or LR corruption)" | |
bca245ac A |
1124 | |
1125 | /** | |
1126 | * void ml_auth_thread_state_invalid_cpsr(arm_saved_state_t *ss) | |
1127 | * | |
1128 | * Panics due to an invalid CPSR value in ss. | |
1129 | */ | |
1130 | .text | |
1131 | .align 2 | |
1132 | .globl EXT(ml_auth_thread_state_invalid_cpsr) | |
1133 | LEXT(ml_auth_thread_state_invalid_cpsr) | |
1134 | ARM64_STACK_PROLOG | |
1135 | PUSH_FRAME | |
1136 | mov x1, x0 | |
1137 | adr x0, Linvalid_cpsr_str | |
1138 | CALL_EXTERN panic_with_thread_kernel_state | |
1139 | ||
1140 | Linvalid_cpsr_str: | |
1141 | .asciz "Thread state corruption detected (PE mode == 0)" | |
cb323159 A |
1142 | #endif /* HAS_APPLE_PAC */ |
1143 | ||
1144 | .text | |
1145 | .align 2 | |
1146 | .globl EXT(fill32_dczva) | |
1147 | LEXT(fill32_dczva) | |
1148 | 0: | |
1149 | dc zva, x0 | |
1150 | add x0, x0, #64 | |
1151 | subs x1, x1, #64 | |
1152 | b.hi 0b | |
1153 | ret | |
1154 | ||
1155 | .text | |
1156 | .align 2 | |
1157 | .globl EXT(fill32_nt) | |
1158 | LEXT(fill32_nt) | |
1159 | dup.4s v0, w2 | |
1160 | 0: | |
1161 | stnp q0, q0, [x0] | |
1162 | stnp q0, q0, [x0, #0x20] | |
1163 | stnp q0, q0, [x0, #0x40] | |
1164 | stnp q0, q0, [x0, #0x60] | |
1165 | add x0, x0, #128 | |
1166 | subs x1, x1, #128 | |
1167 | b.hi 0b | |
1168 | ret | |
d9a64523 | 1169 | |
5ba3f43e | 1170 | /* vim: set sw=4 ts=4: */ |