]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kpc.h>
45
46 #if MONOTONIC
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
49
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
57
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
60
61 #include <sys/kdebug.h>
62
63
64 extern int debug_task;
65 extern bool need_wa_rdar_55577508;
66
67 /* zone for debug_state area */
68 ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE);
69 ZONE_DECLARE(user_ss_zone, "user save state", sizeof(arm_context_t), ZC_NONE);
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90
91
92
93 static inline void
94 machine_thread_switch_cpu_data(thread_t old, thread_t new)
95 {
96 /*
97 * We build with -fno-strict-aliasing, so the load through temporaries
98 * is required so that this generates a single load / store pair.
99 */
100 cpu_data_t *datap = old->machine.CpuDatap;
101 vm_offset_t base = old->machine.pcpu_data_base;
102
103 /* TODO: Should this be ordered? */
104
105 old->machine.CpuDatap = NULL;
106 old->machine.pcpu_data_base = 0;
107
108 new->machine.CpuDatap = datap;
109 new->machine.pcpu_data_base = base;
110 }
111
112 /*
113 * Routine: machine_switch_context
114 *
115 */
116 thread_t
117 machine_switch_context(thread_t old,
118 thread_continue_t continuation,
119 thread_t new)
120 {
121 thread_t retval;
122 pmap_t new_pmap;
123
124 #if __ARM_PAN_AVAILABLE__
125 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
126 panic("context switch with PAN disabled");
127 }
128 #endif
129
130 #define machine_switch_context_kprintf(x...) \
131 /* kprintf("machine_switch_context: " x) */
132
133 if (old == new) {
134 panic("machine_switch_context");
135 }
136
137 kpc_off_cpu(old);
138
139
140
141
142 new_pmap = new->map->pmap;
143 if (old->map->pmap != new_pmap) {
144 pmap_switch(new_pmap);
145 } else {
146 /*
147 * If the thread is preempted while performing cache or TLB maintenance,
148 * it may be migrated to a different CPU between the completion of the relevant
149 * maintenance instruction and the synchronizing DSB. ARM requires that the
150 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
151 * in order to guarantee completion of the instruction and visibility of its effects.
152 * Issue DSB here to enforce that guarantee. We only do this for the case in which
153 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
154 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
155 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
156 * a pending kernel TLB or cache maintenance instruction.
157 */
158 __builtin_arm_dsb(DSB_ISH);
159 }
160
161
162 machine_thread_switch_cpu_data(old, new);
163
164 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
165
166 retval = Switch_context(old, continuation, new);
167 assert(retval != NULL);
168
169 return retval;
170 }
171
172 boolean_t
173 machine_thread_on_core(thread_t thread)
174 {
175 return thread->machine.CpuDatap != NULL;
176 }
177
178
179 /*
180 * Routine: machine_thread_create
181 *
182 */
183 kern_return_t
184 machine_thread_create(thread_t thread,
185 task_t task)
186 {
187 arm_context_t *thread_user_ss = NULL;
188 kern_return_t result = KERN_SUCCESS;
189
190 #define machine_thread_create_kprintf(x...) \
191 /* kprintf("machine_thread_create: " x) */
192
193 machine_thread_create_kprintf("thread = %x\n", thread);
194
195 if (current_thread() != thread) {
196 thread->machine.CpuDatap = (cpu_data_t *)0;
197 // setting this offset will cause trying to use it to panic
198 thread->machine.pcpu_data_base = (vm_offset_t)VM_MIN_KERNEL_ADDRESS;
199 }
200 thread->machine.preemption_count = 0;
201 thread->machine.cthread_self = 0;
202 thread->machine.kpcb = NULL;
203 thread->machine.exception_trace_code = 0;
204 #if defined(HAS_APPLE_PAC)
205 thread->machine.rop_pid = task->rop_pid;
206 thread->machine.jop_pid = task->jop_pid;
207 thread->machine.disable_user_jop = task->disable_user_jop;
208 #endif
209
210
211
212 if (task != kernel_task) {
213 /* If this isn't a kernel thread, we'll have userspace state. */
214 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
215
216 if (!thread->machine.contextData) {
217 result = KERN_FAILURE;
218 goto done;
219 }
220
221 thread->machine.upcb = &thread->machine.contextData->ss;
222 thread->machine.uNeon = &thread->machine.contextData->ns;
223
224 if (task_has_64Bit_data(task)) {
225 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
226 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
227 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
228 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
229 } else {
230 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
231 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
232 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
233 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
234 }
235 } else {
236 thread->machine.upcb = NULL;
237 thread->machine.uNeon = NULL;
238 thread->machine.contextData = NULL;
239 }
240
241
242
243 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
244 result = machine_thread_state_initialize(thread);
245
246 done:
247 if (result != KERN_SUCCESS) {
248 thread_user_ss = thread->machine.contextData;
249
250 if (thread_user_ss) {
251 thread->machine.upcb = NULL;
252 thread->machine.uNeon = NULL;
253 thread->machine.contextData = NULL;
254 zfree(user_ss_zone, thread_user_ss);
255 }
256 }
257
258 return result;
259 }
260
261 /*
262 * Routine: machine_thread_destroy
263 *
264 */
265 void
266 machine_thread_destroy(thread_t thread)
267 {
268 arm_context_t *thread_user_ss;
269
270 if (thread->machine.contextData) {
271 /* Disassociate the user save state from the thread before we free it. */
272 thread_user_ss = thread->machine.contextData;
273 thread->machine.upcb = NULL;
274 thread->machine.uNeon = NULL;
275 thread->machine.contextData = NULL;
276
277
278 zfree(user_ss_zone, thread_user_ss);
279 }
280
281 if (thread->machine.DebugData != NULL) {
282 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
283 arm_debug_set(NULL);
284 }
285
286 zfree(ads_zone, thread->machine.DebugData);
287 }
288 }
289
290
291 /*
292 * Routine: machine_thread_init
293 *
294 */
295 void
296 machine_thread_init(void)
297 {
298 }
299
300 /*
301 * Routine: machine_thread_template_init
302 *
303 */
304 void
305 machine_thread_template_init(thread_t __unused thr_template)
306 {
307 /* Nothing to do on this platform. */
308 }
309
310 /*
311 * Routine: get_useraddr
312 *
313 */
314 user_addr_t
315 get_useraddr()
316 {
317 return get_saved_state_pc(current_thread()->machine.upcb);
318 }
319
320 /*
321 * Routine: machine_stack_detach
322 *
323 */
324 vm_offset_t
325 machine_stack_detach(thread_t thread)
326 {
327 vm_offset_t stack;
328
329 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
330 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
331
332 stack = thread->kernel_stack;
333 thread->kernel_stack = 0;
334 thread->machine.kstackptr = 0;
335
336 return stack;
337 }
338
339
340 /*
341 * Routine: machine_stack_attach
342 *
343 */
344 void
345 machine_stack_attach(thread_t thread,
346 vm_offset_t stack)
347 {
348 struct arm_kernel_context *context;
349 struct arm_kernel_saved_state *savestate;
350 struct arm_kernel_neon_saved_state *neon_savestate;
351 uint32_t current_el;
352
353 #define machine_stack_attach_kprintf(x...) \
354 /* kprintf("machine_stack_attach: " x) */
355
356 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
357 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
358
359 thread->kernel_stack = stack;
360 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
361 thread_initialize_kernel_state(thread);
362
363 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
364
365 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
366 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
367 savestate = &context->ss;
368 savestate->fp = 0;
369 savestate->sp = thread->machine.kstackptr;
370
371 /*
372 * The PC and CPSR of the kernel stack saved state are never used by context switch
373 * code, and should never be used on exception return either. We're going to poison
374 * these values to ensure they never get copied to the exception frame and used to
375 * hijack control flow or privilege level on exception return.
376 */
377
378 const uint32_t default_cpsr = PSR64_KERNEL_POISON;
379 #if defined(HAS_APPLE_PAC)
380 /* Sign the initial kernel stack saved state */
381 boolean_t intr = ml_set_interrupts_enabled(FALSE);
382 asm volatile (
383 "mov x0, %[ss]" "\n"
384
385 "mov x1, xzr" "\n"
386 "str x1, [x0, %[SS64_PC]]" "\n"
387
388 "mov x2, %[default_cpsr_lo]" "\n"
389 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
390 "str w2, [x0, %[SS64_CPSR]]" "\n"
391
392 "adrp x3, _thread_continue@page" "\n"
393 "add x3, x3, _thread_continue@pageoff" "\n"
394 "str x3, [x0, %[SS64_LR]]" "\n"
395
396 "mov x4, xzr" "\n"
397 "mov x5, xzr" "\n"
398 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
399
400 "mov x6, lr" "\n"
401 "bl _ml_sign_kernel_thread_state" "\n"
402 "mov lr, x6" "\n"
403 :
404 : [ss] "r"(&context->ss),
405 [default_cpsr_lo] "M"(default_cpsr & 0xFFFF),
406 [default_cpsr_hi] "M"(default_cpsr >> 16),
407 [SS64_X16] "i"(offsetof(struct arm_kernel_saved_state, x[0])),
408 [SS64_PC] "i"(offsetof(struct arm_kernel_saved_state, pc)),
409 [SS64_CPSR] "i"(offsetof(struct arm_kernel_saved_state, cpsr)),
410 [SS64_LR] "i"(offsetof(struct arm_kernel_saved_state, lr))
411 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
412 );
413 ml_set_interrupts_enabled(intr);
414 #else
415 savestate->lr = (uintptr_t)thread_continue;
416 savestate->cpsr = default_cpsr;
417 savestate->pc = 0;
418 #endif /* defined(HAS_APPLE_PAC) */
419 neon_savestate = &context->ns;
420 neon_savestate->fpcr = FPCR_DEFAULT;
421 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
422 }
423
424
425 /*
426 * Routine: machine_stack_handoff
427 *
428 */
429 void
430 machine_stack_handoff(thread_t old,
431 thread_t new)
432 {
433 vm_offset_t stack;
434 pmap_t new_pmap;
435
436 #if __ARM_PAN_AVAILABLE__
437 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
438 panic("stack handoff with PAN disabled");
439 }
440 #endif
441
442 kpc_off_cpu(old);
443
444 stack = machine_stack_detach(old);
445 new->kernel_stack = stack;
446 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
447 if (stack == old->reserved_stack) {
448 assert(new->reserved_stack);
449 old->reserved_stack = new->reserved_stack;
450 new->reserved_stack = stack;
451 }
452
453
454
455
456 new_pmap = new->map->pmap;
457 if (old->map->pmap != new_pmap) {
458 pmap_switch(new_pmap);
459 } else {
460 /*
461 * If the thread is preempted while performing cache or TLB maintenance,
462 * it may be migrated to a different CPU between the completion of the relevant
463 * maintenance instruction and the synchronizing DSB. ARM requires that the
464 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
465 * in order to guarantee completion of the instruction and visibility of its effects.
466 * Issue DSB here to enforce that guarantee. We only do this for the case in which
467 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
468 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
469 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
470 * a pending kernel TLB or cache maintenance instruction.
471 */
472 __builtin_arm_dsb(DSB_ISH);
473 }
474
475
476 machine_thread_switch_cpu_data(old, new);
477
478 machine_set_current_thread(new);
479 thread_initialize_kernel_state(new);
480 }
481
482
483 /*
484 * Routine: call_continuation
485 *
486 */
487 void
488 call_continuation(thread_continue_t continuation,
489 void *parameter,
490 wait_result_t wresult,
491 boolean_t enable_interrupts)
492 {
493 #define call_continuation_kprintf(x...) \
494 /* kprintf("call_continuation_kprintf:" x) */
495
496 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
497 Call_continuation(continuation, parameter, wresult, enable_interrupts);
498 }
499
500 #define SET_DBGBCRn(n, value, accum) \
501 __asm__ volatile( \
502 "msr DBGBCR" #n "_EL1, %[val]\n" \
503 "orr %[result], %[result], %[val]\n" \
504 : [result] "+r"(accum) : [val] "r"((value)))
505
506 #define SET_DBGBVRn(n, value) \
507 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
508
509 #define SET_DBGWCRn(n, value, accum) \
510 __asm__ volatile( \
511 "msr DBGWCR" #n "_EL1, %[val]\n" \
512 "orr %[result], %[result], %[val]\n" \
513 : [result] "+r"(accum) : [val] "r"((value)))
514
515 #define SET_DBGWVRn(n, value) \
516 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
517
518 void
519 arm_debug_set32(arm_debug_state_t *debug_state)
520 {
521 struct cpu_data * cpu_data_ptr;
522 arm_debug_info_t * debug_info = arm_debug_info();
523 boolean_t intr;
524 arm_debug_state_t off_state;
525 uint64_t all_ctrls = 0;
526
527 intr = ml_set_interrupts_enabled(FALSE);
528 cpu_data_ptr = getCpuDatap();
529
530 // Set current user debug
531 cpu_data_ptr->cpu_user_debug = debug_state;
532
533 if (NULL == debug_state) {
534 bzero(&off_state, sizeof(off_state));
535 debug_state = &off_state;
536 }
537
538 switch (debug_info->num_breakpoint_pairs) {
539 case 16:
540 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
541 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
542 OS_FALLTHROUGH;
543 case 15:
544 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
545 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
546 OS_FALLTHROUGH;
547 case 14:
548 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
549 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
550 OS_FALLTHROUGH;
551 case 13:
552 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
553 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
554 OS_FALLTHROUGH;
555 case 12:
556 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
557 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
558 OS_FALLTHROUGH;
559 case 11:
560 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
561 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
562 OS_FALLTHROUGH;
563 case 10:
564 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
565 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
566 OS_FALLTHROUGH;
567 case 9:
568 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
569 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
570 OS_FALLTHROUGH;
571 case 8:
572 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
573 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
574 OS_FALLTHROUGH;
575 case 7:
576 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
577 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
578 OS_FALLTHROUGH;
579 case 6:
580 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
581 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
582 OS_FALLTHROUGH;
583 case 5:
584 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
585 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
586 OS_FALLTHROUGH;
587 case 4:
588 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
589 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
590 OS_FALLTHROUGH;
591 case 3:
592 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
593 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
594 OS_FALLTHROUGH;
595 case 2:
596 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
597 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
598 OS_FALLTHROUGH;
599 case 1:
600 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
601 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
602 OS_FALLTHROUGH;
603 default:
604 break;
605 }
606
607 switch (debug_info->num_watchpoint_pairs) {
608 case 16:
609 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
610 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
611 OS_FALLTHROUGH;
612 case 15:
613 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
614 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
615 OS_FALLTHROUGH;
616 case 14:
617 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
618 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
619 OS_FALLTHROUGH;
620 case 13:
621 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
622 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
623 OS_FALLTHROUGH;
624 case 12:
625 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
626 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
627 OS_FALLTHROUGH;
628 case 11:
629 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
630 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
631 OS_FALLTHROUGH;
632 case 10:
633 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
634 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
635 OS_FALLTHROUGH;
636 case 9:
637 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
638 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
639 OS_FALLTHROUGH;
640 case 8:
641 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
642 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
643 OS_FALLTHROUGH;
644 case 7:
645 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
646 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
647 OS_FALLTHROUGH;
648 case 6:
649 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
650 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
651 OS_FALLTHROUGH;
652 case 5:
653 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
654 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
655 OS_FALLTHROUGH;
656 case 4:
657 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
658 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
659 OS_FALLTHROUGH;
660 case 3:
661 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
662 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
663 OS_FALLTHROUGH;
664 case 2:
665 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
666 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
667 OS_FALLTHROUGH;
668 case 1:
669 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
670 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
671 OS_FALLTHROUGH;
672 default:
673 break;
674 }
675
676 #if defined(CONFIG_KERNEL_INTEGRITY)
677 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
678 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
679 }
680 #endif
681
682 /*
683 * Breakpoint/Watchpoint Enable
684 */
685 if (all_ctrls != 0) {
686 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
687 } else {
688 update_mdscr(0x8000, 0);
689 }
690
691 /*
692 * Software debug single step enable
693 */
694 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
695 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
696
697 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
698 } else {
699 update_mdscr(0x1, 0);
700
701 #if SINGLE_STEP_RETIRE_ERRATA
702 // Workaround for radar 20619637
703 __builtin_arm_isb(ISB_SY);
704 #endif
705 }
706
707 (void) ml_set_interrupts_enabled(intr);
708 }
709
710 void
711 arm_debug_set64(arm_debug_state_t *debug_state)
712 {
713 struct cpu_data * cpu_data_ptr;
714 arm_debug_info_t * debug_info = arm_debug_info();
715 boolean_t intr;
716 arm_debug_state_t off_state;
717 uint64_t all_ctrls = 0;
718
719 intr = ml_set_interrupts_enabled(FALSE);
720 cpu_data_ptr = getCpuDatap();
721
722 // Set current user debug
723 cpu_data_ptr->cpu_user_debug = debug_state;
724
725 if (NULL == debug_state) {
726 bzero(&off_state, sizeof(off_state));
727 debug_state = &off_state;
728 }
729
730 switch (debug_info->num_breakpoint_pairs) {
731 case 16:
732 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
733 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
734 OS_FALLTHROUGH;
735 case 15:
736 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
737 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
738 OS_FALLTHROUGH;
739 case 14:
740 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
741 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
742 OS_FALLTHROUGH;
743 case 13:
744 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
745 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
746 OS_FALLTHROUGH;
747 case 12:
748 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
749 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
750 OS_FALLTHROUGH;
751 case 11:
752 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
753 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
754 OS_FALLTHROUGH;
755 case 10:
756 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
757 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
758 OS_FALLTHROUGH;
759 case 9:
760 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
761 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
762 OS_FALLTHROUGH;
763 case 8:
764 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
765 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
766 OS_FALLTHROUGH;
767 case 7:
768 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
769 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
770 OS_FALLTHROUGH;
771 case 6:
772 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
773 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
774 OS_FALLTHROUGH;
775 case 5:
776 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
777 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
778 OS_FALLTHROUGH;
779 case 4:
780 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
781 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
782 OS_FALLTHROUGH;
783 case 3:
784 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
785 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
786 OS_FALLTHROUGH;
787 case 2:
788 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
789 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
790 OS_FALLTHROUGH;
791 case 1:
792 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
793 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
794 OS_FALLTHROUGH;
795 default:
796 break;
797 }
798
799 switch (debug_info->num_watchpoint_pairs) {
800 case 16:
801 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
802 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
803 OS_FALLTHROUGH;
804 case 15:
805 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
806 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
807 OS_FALLTHROUGH;
808 case 14:
809 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
810 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
811 OS_FALLTHROUGH;
812 case 13:
813 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
814 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
815 OS_FALLTHROUGH;
816 case 12:
817 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
818 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
819 OS_FALLTHROUGH;
820 case 11:
821 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
822 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
823 OS_FALLTHROUGH;
824 case 10:
825 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
826 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
827 OS_FALLTHROUGH;
828 case 9:
829 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
830 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
831 OS_FALLTHROUGH;
832 case 8:
833 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
834 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
835 OS_FALLTHROUGH;
836 case 7:
837 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
838 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
839 OS_FALLTHROUGH;
840 case 6:
841 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
842 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
843 OS_FALLTHROUGH;
844 case 5:
845 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
846 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
847 OS_FALLTHROUGH;
848 case 4:
849 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
850 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
851 OS_FALLTHROUGH;
852 case 3:
853 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
854 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
855 OS_FALLTHROUGH;
856 case 2:
857 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
858 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
859 OS_FALLTHROUGH;
860 case 1:
861 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
862 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
863 OS_FALLTHROUGH;
864 default:
865 break;
866 }
867
868 #if defined(CONFIG_KERNEL_INTEGRITY)
869 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
870 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
871 }
872 #endif
873
874 /*
875 * Breakpoint/Watchpoint Enable
876 */
877 if (all_ctrls != 0) {
878 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
879 } else {
880 update_mdscr(0x8000, 0);
881 }
882
883 /*
884 * Software debug single step enable
885 */
886 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
887 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
888
889 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
890 } else {
891 update_mdscr(0x1, 0);
892
893 #if SINGLE_STEP_RETIRE_ERRATA
894 // Workaround for radar 20619637
895 __builtin_arm_isb(ISB_SY);
896 #endif
897 }
898
899 (void) ml_set_interrupts_enabled(intr);
900 }
901
902 void
903 arm_debug_set(arm_debug_state_t *debug_state)
904 {
905 if (debug_state) {
906 switch (debug_state->dsh.flavor) {
907 case ARM_DEBUG_STATE32:
908 arm_debug_set32(debug_state);
909 break;
910 case ARM_DEBUG_STATE64:
911 arm_debug_set64(debug_state);
912 break;
913 default:
914 panic("arm_debug_set");
915 break;
916 }
917 } else {
918 if (thread_is_64bit_data(current_thread())) {
919 arm_debug_set64(debug_state);
920 } else {
921 arm_debug_set32(debug_state);
922 }
923 }
924 }
925
926 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
927 boolean_t
928 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
929 {
930 arm_debug_info_t *debug_info = arm_debug_info();
931 uint32_t i;
932 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
933 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
934 return FALSE;
935 }
936 }
937
938 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
939 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
940 return FALSE;
941 }
942 }
943 return TRUE;
944 }
945
946 boolean_t
947 debug_state_is_valid32(arm_debug_state32_t *debug_state)
948 {
949 arm_debug_info_t *debug_info = arm_debug_info();
950 uint32_t i;
951 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
952 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i]) {
953 return FALSE;
954 }
955 }
956
957 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
958 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i]) {
959 return FALSE;
960 }
961 }
962 return TRUE;
963 }
964
965 boolean_t
966 debug_state_is_valid64(arm_debug_state64_t *debug_state)
967 {
968 arm_debug_info_t *debug_info = arm_debug_info();
969 uint32_t i;
970 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
971 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i]) {
972 return FALSE;
973 }
974 }
975
976 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
977 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i]) {
978 return FALSE;
979 }
980 }
981 return TRUE;
982 }
983
984 /*
985 * Duplicate one arm_debug_state_t to another. "all" parameter
986 * is ignored in the case of ARM -- Is this the right assumption?
987 */
988 void
989 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
990 arm_legacy_debug_state_t * target,
991 __unused boolean_t all)
992 {
993 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
994 }
995
996 void
997 copy_debug_state32(arm_debug_state32_t * src,
998 arm_debug_state32_t * target,
999 __unused boolean_t all)
1000 {
1001 bcopy(src, target, sizeof(arm_debug_state32_t));
1002 }
1003
1004 void
1005 copy_debug_state64(arm_debug_state64_t * src,
1006 arm_debug_state64_t * target,
1007 __unused boolean_t all)
1008 {
1009 bcopy(src, target, sizeof(arm_debug_state64_t));
1010 }
1011
1012 kern_return_t
1013 machine_thread_set_tsd_base(thread_t thread,
1014 mach_vm_offset_t tsd_base)
1015 {
1016 if (thread->task == kernel_task) {
1017 return KERN_INVALID_ARGUMENT;
1018 }
1019
1020 if (tsd_base & MACHDEP_CPUNUM_MASK) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
1023
1024 if (thread_is_64bit_addr(thread)) {
1025 if (tsd_base > vm_map_max(thread->map)) {
1026 tsd_base = 0ULL;
1027 }
1028 } else {
1029 if (tsd_base > UINT32_MAX) {
1030 tsd_base = 0ULL;
1031 }
1032 }
1033
1034 thread->machine.cthread_self = tsd_base;
1035
1036 /* For current thread, make the TSD base active immediately */
1037 if (thread == current_thread()) {
1038 uint64_t cpunum, tpidrro_el0;
1039
1040 mp_disable_preemption();
1041 tpidrro_el0 = get_tpidrro();
1042 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
1043 set_tpidrro(tsd_base | cpunum);
1044 mp_enable_preemption();
1045 }
1046
1047 return KERN_SUCCESS;
1048 }
1049
1050 void
1051 machine_tecs(__unused thread_t thr)
1052 {
1053 }
1054
1055 int
1056 machine_csv(__unused cpuvn_e cve)
1057 {
1058 return 0;
1059 }
1060
1061 #if __ARM_ARCH_8_5__
1062 void
1063 arm_context_switch_requires_sync()
1064 {
1065 current_cpu_datap()->sync_on_cswitch = 1;
1066 }
1067 #endif
1068
1069 #if __has_feature(ptrauth_calls)
1070 boolean_t
1071 arm_user_jop_disabled(void)
1072 {
1073 return FALSE;
1074 }
1075 #endif /* __has_feature(ptrauth_calls) */