2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
43 #include <kern/machine.h>
47 #include <kern/monotonic.h>
48 #endif /* MONOTONIC */
50 #include <machine/atomic.h>
51 #include <arm64/proc_reg.h>
52 #include <arm64/machine_machdep.h>
53 #include <arm/cpu_data_internal.h>
54 #include <arm/machdep_call.h>
55 #include <arm/misc_protos.h>
56 #include <arm/cpuid.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_protos.h>
61 #include <sys/kdebug.h>
64 extern int debug_task
;
65 extern bool need_wa_rdar_55577508
;
67 /* zone for debug_state area */
68 ZONE_DECLARE(ads_zone
, "arm debug state", sizeof(arm_debug_state_t
), ZC_NONE
);
69 ZONE_DECLARE(user_ss_zone
, "user save state", sizeof(arm_context_t
), ZC_NONE
);
72 * Routine: consider_machine_collect
76 consider_machine_collect(void)
82 * Routine: consider_machine_adjust
86 consider_machine_adjust(void)
94 machine_thread_switch_cpu_data(thread_t old
, thread_t
new)
97 * We build with -fno-strict-aliasing, so the load through temporaries
98 * is required so that this generates a single load / store pair.
100 cpu_data_t
*datap
= old
->machine
.CpuDatap
;
101 vm_offset_t base
= old
->machine
.pcpu_data_base
;
103 /* TODO: Should this be ordered? */
105 old
->machine
.CpuDatap
= NULL
;
106 old
->machine
.pcpu_data_base
= 0;
108 new->machine
.CpuDatap
= datap
;
109 new->machine
.pcpu_data_base
= base
;
113 * Routine: machine_switch_context
117 machine_switch_context(thread_t old
,
118 thread_continue_t continuation
,
124 #if __ARM_PAN_AVAILABLE__
125 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
126 panic("context switch with PAN disabled");
130 #define machine_switch_context_kprintf(x...) \
131 /* kprintf("machine_switch_context: " x) */
134 panic("machine_switch_context");
142 new_pmap
= new->map
->pmap
;
143 if (old
->map
->pmap
!= new_pmap
) {
144 pmap_switch(new_pmap
);
147 * If the thread is preempted while performing cache or TLB maintenance,
148 * it may be migrated to a different CPU between the completion of the relevant
149 * maintenance instruction and the synchronizing DSB. ARM requires that the
150 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
151 * in order to guarantee completion of the instruction and visibility of its effects.
152 * Issue DSB here to enforce that guarantee. We only do this for the case in which
153 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
154 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
155 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
156 * a pending kernel TLB or cache maintenance instruction.
158 __builtin_arm_dsb(DSB_ISH
);
162 machine_thread_switch_cpu_data(old
, new);
164 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
166 retval
= Switch_context(old
, continuation
, new);
167 assert(retval
!= NULL
);
173 machine_thread_on_core(thread_t thread
)
175 return thread
->machine
.CpuDatap
!= NULL
;
180 * Routine: machine_thread_create
184 machine_thread_create(thread_t thread
,
187 arm_context_t
*thread_user_ss
= NULL
;
188 kern_return_t result
= KERN_SUCCESS
;
190 #define machine_thread_create_kprintf(x...) \
191 /* kprintf("machine_thread_create: " x) */
193 machine_thread_create_kprintf("thread = %x\n", thread
);
195 if (current_thread() != thread
) {
196 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
197 // setting this offset will cause trying to use it to panic
198 thread
->machine
.pcpu_data_base
= (vm_offset_t
)VM_MIN_KERNEL_ADDRESS
;
200 thread
->machine
.preemption_count
= 0;
201 thread
->machine
.cthread_self
= 0;
202 thread
->machine
.kpcb
= NULL
;
203 thread
->machine
.exception_trace_code
= 0;
204 #if defined(HAS_APPLE_PAC)
205 thread
->machine
.rop_pid
= task
->rop_pid
;
206 thread
->machine
.jop_pid
= task
->jop_pid
;
207 thread
->machine
.disable_user_jop
= task
->disable_user_jop
;
212 if (task
!= kernel_task
) {
213 /* If this isn't a kernel thread, we'll have userspace state. */
214 thread
->machine
.contextData
= (arm_context_t
*)zalloc(user_ss_zone
);
216 if (!thread
->machine
.contextData
) {
217 result
= KERN_FAILURE
;
221 thread
->machine
.upcb
= &thread
->machine
.contextData
->ss
;
222 thread
->machine
.uNeon
= &thread
->machine
.contextData
->ns
;
224 if (task_has_64Bit_data(task
)) {
225 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
226 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
227 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
228 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
230 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
231 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
232 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
233 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
236 thread
->machine
.upcb
= NULL
;
237 thread
->machine
.uNeon
= NULL
;
238 thread
->machine
.contextData
= NULL
;
243 bzero(&thread
->machine
.perfctrl_state
, sizeof(thread
->machine
.perfctrl_state
));
244 result
= machine_thread_state_initialize(thread
);
247 if (result
!= KERN_SUCCESS
) {
248 thread_user_ss
= thread
->machine
.contextData
;
250 if (thread_user_ss
) {
251 thread
->machine
.upcb
= NULL
;
252 thread
->machine
.uNeon
= NULL
;
253 thread
->machine
.contextData
= NULL
;
254 zfree(user_ss_zone
, thread_user_ss
);
262 * Routine: machine_thread_destroy
266 machine_thread_destroy(thread_t thread
)
268 arm_context_t
*thread_user_ss
;
270 if (thread
->machine
.contextData
) {
271 /* Disassociate the user save state from the thread before we free it. */
272 thread_user_ss
= thread
->machine
.contextData
;
273 thread
->machine
.upcb
= NULL
;
274 thread
->machine
.uNeon
= NULL
;
275 thread
->machine
.contextData
= NULL
;
278 zfree(user_ss_zone
, thread_user_ss
);
281 if (thread
->machine
.DebugData
!= NULL
) {
282 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
) {
286 zfree(ads_zone
, thread
->machine
.DebugData
);
292 * Routine: machine_thread_init
296 machine_thread_init(void)
301 * Routine: machine_thread_template_init
305 machine_thread_template_init(thread_t __unused thr_template
)
307 /* Nothing to do on this platform. */
311 * Routine: get_useraddr
317 return get_saved_state_pc(current_thread()->machine
.upcb
);
321 * Routine: machine_stack_detach
325 machine_stack_detach(thread_t thread
)
329 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
330 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
332 stack
= thread
->kernel_stack
;
333 thread
->kernel_stack
= 0;
334 thread
->machine
.kstackptr
= 0;
341 * Routine: machine_stack_attach
345 machine_stack_attach(thread_t thread
,
348 struct arm_kernel_context
*context
;
349 struct arm_kernel_saved_state
*savestate
;
350 struct arm_kernel_neon_saved_state
*neon_savestate
;
353 #define machine_stack_attach_kprintf(x...) \
354 /* kprintf("machine_stack_attach: " x) */
356 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
357 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
359 thread
->kernel_stack
= stack
;
360 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
361 thread_initialize_kernel_state(thread
);
363 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t
)thread
->machine
.kstackptr
);
365 current_el
= (uint32_t) __builtin_arm_rsr64("CurrentEL");
366 context
= &((thread_kernel_state_t
) thread
->machine
.kstackptr
)->machine
;
367 savestate
= &context
->ss
;
369 savestate
->sp
= thread
->machine
.kstackptr
;
372 * The PC and CPSR of the kernel stack saved state are never used by context switch
373 * code, and should never be used on exception return either. We're going to poison
374 * these values to ensure they never get copied to the exception frame and used to
375 * hijack control flow or privilege level on exception return.
378 const uint32_t default_cpsr
= PSR64_KERNEL_POISON
;
379 #if defined(HAS_APPLE_PAC)
380 /* Sign the initial kernel stack saved state */
381 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
386 "str x1, [x0, %[SS64_PC]]" "\n"
388 "mov x2, %[default_cpsr_lo]" "\n"
389 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
390 "str w2, [x0, %[SS64_CPSR]]" "\n"
392 "adrp x3, _thread_continue@page" "\n"
393 "add x3, x3, _thread_continue@pageoff" "\n"
394 "str x3, [x0, %[SS64_LR]]" "\n"
398 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
401 "bl _ml_sign_kernel_thread_state" "\n"
404 : [ss
] "r"(&context
->ss
),
405 [default_cpsr_lo
] "M"(default_cpsr
& 0xFFFF),
406 [default_cpsr_hi
] "M"(default_cpsr
>> 16),
407 [SS64_X16
] "i"(offsetof(struct arm_kernel_saved_state
, x
[0])),
408 [SS64_PC
] "i"(offsetof(struct arm_kernel_saved_state
, pc
)),
409 [SS64_CPSR
] "i"(offsetof(struct arm_kernel_saved_state
, cpsr
)),
410 [SS64_LR
] "i"(offsetof(struct arm_kernel_saved_state
, lr
))
411 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
413 ml_set_interrupts_enabled(intr
);
415 savestate
->lr
= (uintptr_t)thread_continue
;
416 savestate
->cpsr
= default_cpsr
;
418 #endif /* defined(HAS_APPLE_PAC) */
419 neon_savestate
= &context
->ns
;
420 neon_savestate
->fpcr
= FPCR_DEFAULT
;
421 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread
, savestate
->lr
, savestate
->sp
);
426 * Routine: machine_stack_handoff
430 machine_stack_handoff(thread_t old
,
436 #if __ARM_PAN_AVAILABLE__
437 if (__improbable(__builtin_arm_rsr("pan") == 0)) {
438 panic("stack handoff with PAN disabled");
444 stack
= machine_stack_detach(old
);
445 new->kernel_stack
= stack
;
446 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
447 if (stack
== old
->reserved_stack
) {
448 assert(new->reserved_stack
);
449 old
->reserved_stack
= new->reserved_stack
;
450 new->reserved_stack
= stack
;
456 new_pmap
= new->map
->pmap
;
457 if (old
->map
->pmap
!= new_pmap
) {
458 pmap_switch(new_pmap
);
461 * If the thread is preempted while performing cache or TLB maintenance,
462 * it may be migrated to a different CPU between the completion of the relevant
463 * maintenance instruction and the synchronizing DSB. ARM requires that the
464 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
465 * in order to guarantee completion of the instruction and visibility of its effects.
466 * Issue DSB here to enforce that guarantee. We only do this for the case in which
467 * the pmap isn't changing, as we expect pmap_switch() to issue DSB when it updates
468 * TTBR0. Note also that cache maintenance may be performed in userspace, so we
469 * cannot further limit this operation e.g. by setting a per-thread flag to indicate
470 * a pending kernel TLB or cache maintenance instruction.
472 __builtin_arm_dsb(DSB_ISH
);
476 machine_thread_switch_cpu_data(old
, new);
478 machine_set_current_thread(new);
479 thread_initialize_kernel_state(new);
484 * Routine: call_continuation
488 call_continuation(thread_continue_t continuation
,
490 wait_result_t wresult
,
491 boolean_t enable_interrupts
)
493 #define call_continuation_kprintf(x...) \
494 /* kprintf("call_continuation_kprintf:" x) */
496 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
497 Call_continuation(continuation
, parameter
, wresult
, enable_interrupts
);
500 #define SET_DBGBCRn(n, value, accum) \
502 "msr DBGBCR" #n "_EL1, %[val]\n" \
503 "orr %[result], %[result], %[val]\n" \
504 : [result] "+r"(accum) : [val] "r"((value)))
506 #define SET_DBGBVRn(n, value) \
507 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
509 #define SET_DBGWCRn(n, value, accum) \
511 "msr DBGWCR" #n "_EL1, %[val]\n" \
512 "orr %[result], %[result], %[val]\n" \
513 : [result] "+r"(accum) : [val] "r"((value)))
515 #define SET_DBGWVRn(n, value) \
516 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
519 arm_debug_set32(arm_debug_state_t
*debug_state
)
521 struct cpu_data
* cpu_data_ptr
;
522 arm_debug_info_t
* debug_info
= arm_debug_info();
524 arm_debug_state_t off_state
;
525 uint64_t all_ctrls
= 0;
527 intr
= ml_set_interrupts_enabled(FALSE
);
528 cpu_data_ptr
= getCpuDatap();
530 // Set current user debug
531 cpu_data_ptr
->cpu_user_debug
= debug_state
;
533 if (NULL
== debug_state
) {
534 bzero(&off_state
, sizeof(off_state
));
535 debug_state
= &off_state
;
538 switch (debug_info
->num_breakpoint_pairs
) {
540 SET_DBGBVRn(15, (uint64_t)debug_state
->uds
.ds32
.bvr
[15]);
541 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds32
.bcr
[15], all_ctrls
);
544 SET_DBGBVRn(14, (uint64_t)debug_state
->uds
.ds32
.bvr
[14]);
545 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds32
.bcr
[14], all_ctrls
);
548 SET_DBGBVRn(13, (uint64_t)debug_state
->uds
.ds32
.bvr
[13]);
549 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds32
.bcr
[13], all_ctrls
);
552 SET_DBGBVRn(12, (uint64_t)debug_state
->uds
.ds32
.bvr
[12]);
553 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds32
.bcr
[12], all_ctrls
);
556 SET_DBGBVRn(11, (uint64_t)debug_state
->uds
.ds32
.bvr
[11]);
557 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds32
.bcr
[11], all_ctrls
);
560 SET_DBGBVRn(10, (uint64_t)debug_state
->uds
.ds32
.bvr
[10]);
561 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds32
.bcr
[10], all_ctrls
);
564 SET_DBGBVRn(9, (uint64_t)debug_state
->uds
.ds32
.bvr
[9]);
565 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds32
.bcr
[9], all_ctrls
);
568 SET_DBGBVRn(8, (uint64_t)debug_state
->uds
.ds32
.bvr
[8]);
569 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds32
.bcr
[8], all_ctrls
);
572 SET_DBGBVRn(7, (uint64_t)debug_state
->uds
.ds32
.bvr
[7]);
573 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds32
.bcr
[7], all_ctrls
);
576 SET_DBGBVRn(6, (uint64_t)debug_state
->uds
.ds32
.bvr
[6]);
577 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds32
.bcr
[6], all_ctrls
);
580 SET_DBGBVRn(5, (uint64_t)debug_state
->uds
.ds32
.bvr
[5]);
581 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds32
.bcr
[5], all_ctrls
);
584 SET_DBGBVRn(4, (uint64_t)debug_state
->uds
.ds32
.bvr
[4]);
585 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds32
.bcr
[4], all_ctrls
);
588 SET_DBGBVRn(3, (uint64_t)debug_state
->uds
.ds32
.bvr
[3]);
589 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds32
.bcr
[3], all_ctrls
);
592 SET_DBGBVRn(2, (uint64_t)debug_state
->uds
.ds32
.bvr
[2]);
593 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds32
.bcr
[2], all_ctrls
);
596 SET_DBGBVRn(1, (uint64_t)debug_state
->uds
.ds32
.bvr
[1]);
597 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds32
.bcr
[1], all_ctrls
);
600 SET_DBGBVRn(0, (uint64_t)debug_state
->uds
.ds32
.bvr
[0]);
601 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds32
.bcr
[0], all_ctrls
);
607 switch (debug_info
->num_watchpoint_pairs
) {
609 SET_DBGWVRn(15, (uint64_t)debug_state
->uds
.ds32
.wvr
[15]);
610 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds32
.wcr
[15], all_ctrls
);
613 SET_DBGWVRn(14, (uint64_t)debug_state
->uds
.ds32
.wvr
[14]);
614 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds32
.wcr
[14], all_ctrls
);
617 SET_DBGWVRn(13, (uint64_t)debug_state
->uds
.ds32
.wvr
[13]);
618 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds32
.wcr
[13], all_ctrls
);
621 SET_DBGWVRn(12, (uint64_t)debug_state
->uds
.ds32
.wvr
[12]);
622 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds32
.wcr
[12], all_ctrls
);
625 SET_DBGWVRn(11, (uint64_t)debug_state
->uds
.ds32
.wvr
[11]);
626 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds32
.wcr
[11], all_ctrls
);
629 SET_DBGWVRn(10, (uint64_t)debug_state
->uds
.ds32
.wvr
[10]);
630 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds32
.wcr
[10], all_ctrls
);
633 SET_DBGWVRn(9, (uint64_t)debug_state
->uds
.ds32
.wvr
[9]);
634 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds32
.wcr
[9], all_ctrls
);
637 SET_DBGWVRn(8, (uint64_t)debug_state
->uds
.ds32
.wvr
[8]);
638 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds32
.wcr
[8], all_ctrls
);
641 SET_DBGWVRn(7, (uint64_t)debug_state
->uds
.ds32
.wvr
[7]);
642 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds32
.wcr
[7], all_ctrls
);
645 SET_DBGWVRn(6, (uint64_t)debug_state
->uds
.ds32
.wvr
[6]);
646 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds32
.wcr
[6], all_ctrls
);
649 SET_DBGWVRn(5, (uint64_t)debug_state
->uds
.ds32
.wvr
[5]);
650 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds32
.wcr
[5], all_ctrls
);
653 SET_DBGWVRn(4, (uint64_t)debug_state
->uds
.ds32
.wvr
[4]);
654 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds32
.wcr
[4], all_ctrls
);
657 SET_DBGWVRn(3, (uint64_t)debug_state
->uds
.ds32
.wvr
[3]);
658 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds32
.wcr
[3], all_ctrls
);
661 SET_DBGWVRn(2, (uint64_t)debug_state
->uds
.ds32
.wvr
[2]);
662 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds32
.wcr
[2], all_ctrls
);
665 SET_DBGWVRn(1, (uint64_t)debug_state
->uds
.ds32
.wvr
[1]);
666 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds32
.wcr
[1], all_ctrls
);
669 SET_DBGWVRn(0, (uint64_t)debug_state
->uds
.ds32
.wvr
[0]);
670 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds32
.wcr
[0], all_ctrls
);
676 #if defined(CONFIG_KERNEL_INTEGRITY)
677 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
678 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
683 * Breakpoint/Watchpoint Enable
685 if (all_ctrls
!= 0) {
686 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
688 update_mdscr(0x8000, 0);
692 * Software debug single step enable
694 if (debug_state
->uds
.ds32
.mdscr_el1
& 0x1) {
695 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
697 mask_saved_state_cpsr(current_thread()->machine
.upcb
, PSR64_SS
, 0);
699 update_mdscr(0x1, 0);
701 #if SINGLE_STEP_RETIRE_ERRATA
702 // Workaround for radar 20619637
703 __builtin_arm_isb(ISB_SY
);
707 (void) ml_set_interrupts_enabled(intr
);
711 arm_debug_set64(arm_debug_state_t
*debug_state
)
713 struct cpu_data
* cpu_data_ptr
;
714 arm_debug_info_t
* debug_info
= arm_debug_info();
716 arm_debug_state_t off_state
;
717 uint64_t all_ctrls
= 0;
719 intr
= ml_set_interrupts_enabled(FALSE
);
720 cpu_data_ptr
= getCpuDatap();
722 // Set current user debug
723 cpu_data_ptr
->cpu_user_debug
= debug_state
;
725 if (NULL
== debug_state
) {
726 bzero(&off_state
, sizeof(off_state
));
727 debug_state
= &off_state
;
730 switch (debug_info
->num_breakpoint_pairs
) {
732 SET_DBGBVRn(15, debug_state
->uds
.ds64
.bvr
[15]);
733 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds64
.bcr
[15], all_ctrls
);
736 SET_DBGBVRn(14, debug_state
->uds
.ds64
.bvr
[14]);
737 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds64
.bcr
[14], all_ctrls
);
740 SET_DBGBVRn(13, debug_state
->uds
.ds64
.bvr
[13]);
741 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds64
.bcr
[13], all_ctrls
);
744 SET_DBGBVRn(12, debug_state
->uds
.ds64
.bvr
[12]);
745 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds64
.bcr
[12], all_ctrls
);
748 SET_DBGBVRn(11, debug_state
->uds
.ds64
.bvr
[11]);
749 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds64
.bcr
[11], all_ctrls
);
752 SET_DBGBVRn(10, debug_state
->uds
.ds64
.bvr
[10]);
753 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds64
.bcr
[10], all_ctrls
);
756 SET_DBGBVRn(9, debug_state
->uds
.ds64
.bvr
[9]);
757 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds64
.bcr
[9], all_ctrls
);
760 SET_DBGBVRn(8, debug_state
->uds
.ds64
.bvr
[8]);
761 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds64
.bcr
[8], all_ctrls
);
764 SET_DBGBVRn(7, debug_state
->uds
.ds64
.bvr
[7]);
765 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds64
.bcr
[7], all_ctrls
);
768 SET_DBGBVRn(6, debug_state
->uds
.ds64
.bvr
[6]);
769 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds64
.bcr
[6], all_ctrls
);
772 SET_DBGBVRn(5, debug_state
->uds
.ds64
.bvr
[5]);
773 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds64
.bcr
[5], all_ctrls
);
776 SET_DBGBVRn(4, debug_state
->uds
.ds64
.bvr
[4]);
777 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds64
.bcr
[4], all_ctrls
);
780 SET_DBGBVRn(3, debug_state
->uds
.ds64
.bvr
[3]);
781 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds64
.bcr
[3], all_ctrls
);
784 SET_DBGBVRn(2, debug_state
->uds
.ds64
.bvr
[2]);
785 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds64
.bcr
[2], all_ctrls
);
788 SET_DBGBVRn(1, debug_state
->uds
.ds64
.bvr
[1]);
789 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds64
.bcr
[1], all_ctrls
);
792 SET_DBGBVRn(0, debug_state
->uds
.ds64
.bvr
[0]);
793 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds64
.bcr
[0], all_ctrls
);
799 switch (debug_info
->num_watchpoint_pairs
) {
801 SET_DBGWVRn(15, debug_state
->uds
.ds64
.wvr
[15]);
802 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds64
.wcr
[15], all_ctrls
);
805 SET_DBGWVRn(14, debug_state
->uds
.ds64
.wvr
[14]);
806 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds64
.wcr
[14], all_ctrls
);
809 SET_DBGWVRn(13, debug_state
->uds
.ds64
.wvr
[13]);
810 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds64
.wcr
[13], all_ctrls
);
813 SET_DBGWVRn(12, debug_state
->uds
.ds64
.wvr
[12]);
814 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds64
.wcr
[12], all_ctrls
);
817 SET_DBGWVRn(11, debug_state
->uds
.ds64
.wvr
[11]);
818 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds64
.wcr
[11], all_ctrls
);
821 SET_DBGWVRn(10, debug_state
->uds
.ds64
.wvr
[10]);
822 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds64
.wcr
[10], all_ctrls
);
825 SET_DBGWVRn(9, debug_state
->uds
.ds64
.wvr
[9]);
826 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds64
.wcr
[9], all_ctrls
);
829 SET_DBGWVRn(8, debug_state
->uds
.ds64
.wvr
[8]);
830 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds64
.wcr
[8], all_ctrls
);
833 SET_DBGWVRn(7, debug_state
->uds
.ds64
.wvr
[7]);
834 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds64
.wcr
[7], all_ctrls
);
837 SET_DBGWVRn(6, debug_state
->uds
.ds64
.wvr
[6]);
838 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds64
.wcr
[6], all_ctrls
);
841 SET_DBGWVRn(5, debug_state
->uds
.ds64
.wvr
[5]);
842 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds64
.wcr
[5], all_ctrls
);
845 SET_DBGWVRn(4, debug_state
->uds
.ds64
.wvr
[4]);
846 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds64
.wcr
[4], all_ctrls
);
849 SET_DBGWVRn(3, debug_state
->uds
.ds64
.wvr
[3]);
850 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds64
.wcr
[3], all_ctrls
);
853 SET_DBGWVRn(2, debug_state
->uds
.ds64
.wvr
[2]);
854 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds64
.wcr
[2], all_ctrls
);
857 SET_DBGWVRn(1, debug_state
->uds
.ds64
.wvr
[1]);
858 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds64
.wcr
[1], all_ctrls
);
861 SET_DBGWVRn(0, debug_state
->uds
.ds64
.wvr
[0]);
862 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds64
.wcr
[0], all_ctrls
);
868 #if defined(CONFIG_KERNEL_INTEGRITY)
869 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
870 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
875 * Breakpoint/Watchpoint Enable
877 if (all_ctrls
!= 0) {
878 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
880 update_mdscr(0x8000, 0);
884 * Software debug single step enable
886 if (debug_state
->uds
.ds64
.mdscr_el1
& 0x1) {
887 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
889 mask_saved_state_cpsr(current_thread()->machine
.upcb
, PSR64_SS
, 0);
891 update_mdscr(0x1, 0);
893 #if SINGLE_STEP_RETIRE_ERRATA
894 // Workaround for radar 20619637
895 __builtin_arm_isb(ISB_SY
);
899 (void) ml_set_interrupts_enabled(intr
);
903 arm_debug_set(arm_debug_state_t
*debug_state
)
906 switch (debug_state
->dsh
.flavor
) {
907 case ARM_DEBUG_STATE32
:
908 arm_debug_set32(debug_state
);
910 case ARM_DEBUG_STATE64
:
911 arm_debug_set64(debug_state
);
914 panic("arm_debug_set");
918 if (thread_is_64bit_data(current_thread())) {
919 arm_debug_set64(debug_state
);
921 arm_debug_set32(debug_state
);
926 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
928 debug_legacy_state_is_valid(arm_legacy_debug_state_t
*debug_state
)
930 arm_debug_info_t
*debug_info
= arm_debug_info();
932 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
933 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
]) {
938 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
939 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
]) {
947 debug_state_is_valid32(arm_debug_state32_t
*debug_state
)
949 arm_debug_info_t
*debug_info
= arm_debug_info();
951 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
952 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
]) {
957 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
958 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
]) {
966 debug_state_is_valid64(arm_debug_state64_t
*debug_state
)
968 arm_debug_info_t
*debug_info
= arm_debug_info();
970 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
971 if (0 != debug_state
->bcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->bvr
[i
]) {
976 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
977 if (0 != debug_state
->wcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->wvr
[i
]) {
985 * Duplicate one arm_debug_state_t to another. "all" parameter
986 * is ignored in the case of ARM -- Is this the right assumption?
989 copy_legacy_debug_state(arm_legacy_debug_state_t
* src
,
990 arm_legacy_debug_state_t
* target
,
991 __unused boolean_t all
)
993 bcopy(src
, target
, sizeof(arm_legacy_debug_state_t
));
997 copy_debug_state32(arm_debug_state32_t
* src
,
998 arm_debug_state32_t
* target
,
999 __unused boolean_t all
)
1001 bcopy(src
, target
, sizeof(arm_debug_state32_t
));
1005 copy_debug_state64(arm_debug_state64_t
* src
,
1006 arm_debug_state64_t
* target
,
1007 __unused boolean_t all
)
1009 bcopy(src
, target
, sizeof(arm_debug_state64_t
));
1013 machine_thread_set_tsd_base(thread_t thread
,
1014 mach_vm_offset_t tsd_base
)
1016 if (thread
->task
== kernel_task
) {
1017 return KERN_INVALID_ARGUMENT
;
1020 if (tsd_base
& MACHDEP_CPUNUM_MASK
) {
1021 return KERN_INVALID_ARGUMENT
;
1024 if (thread_is_64bit_addr(thread
)) {
1025 if (tsd_base
> vm_map_max(thread
->map
)) {
1029 if (tsd_base
> UINT32_MAX
) {
1034 thread
->machine
.cthread_self
= tsd_base
;
1036 /* For current thread, make the TSD base active immediately */
1037 if (thread
== current_thread()) {
1038 uint64_t cpunum
, tpidrro_el0
;
1040 mp_disable_preemption();
1041 tpidrro_el0
= get_tpidrro();
1042 cpunum
= tpidrro_el0
& (MACHDEP_CPUNUM_MASK
);
1043 set_tpidrro(tsd_base
| cpunum
);
1044 mp_enable_preemption();
1047 return KERN_SUCCESS
;
1051 machine_tecs(__unused thread_t thr
)
1056 machine_csv(__unused cpuvn_e cve
)
1061 #if __ARM_ARCH_8_5__
1063 arm_context_switch_requires_sync()
1065 current_cpu_datap()->sync_on_cswitch
= 1;
1069 #if __has_feature(ptrauth_calls)
1071 arm_user_jop_disabled(void)
1075 #endif /* __has_feature(ptrauth_calls) */