2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
62 #include <sys/kdebug.h>
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
66 extern int debug_task
;
68 zone_t ads_zone
; /* zone for debug_state area */
69 zone_t user_ss_zone
; /* zone for user arm_context_t allocations */
72 * Routine: consider_machine_collect
76 consider_machine_collect(void)
82 * Routine: consider_machine_adjust
86 consider_machine_adjust(void)
92 * Routine: machine_switch_context
96 machine_switch_context(thread_t old
,
97 thread_continue_t continuation
,
102 cpu_data_t
* cpu_data_ptr
;
104 #define machine_switch_context_kprintf(x...) \
105 /* kprintf("machine_switch_context: " x) */
107 cpu_data_ptr
= getCpuDatap();
109 panic("machine_switch_context");
115 new_pmap
= new->map
->pmap
;
116 if (old
->map
->pmap
!= new_pmap
)
117 pmap_switch(new_pmap
);
120 new->machine
.CpuDatap
= cpu_data_ptr
;
122 /* TODO: Should this be ordered? */
123 old
->machine
.machine_thread_flags
&= ~MACHINE_THREAD_FLAGS_ON_CPU
;
124 new->machine
.machine_thread_flags
|= MACHINE_THREAD_FLAGS_ON_CPU
;
126 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
128 retval
= Switch_context(old
, continuation
, new);
129 assert(retval
!= NULL
);
135 machine_thread_on_core(thread_t thread
)
137 return thread
->machine
.machine_thread_flags
& MACHINE_THREAD_FLAGS_ON_CPU
;
141 * Routine: machine_thread_create
145 machine_thread_create(thread_t thread
,
148 arm_context_t
*thread_user_ss
= NULL
;
149 kern_return_t result
= KERN_SUCCESS
;
151 #define machine_thread_create_kprintf(x...) \
152 /* kprintf("machine_thread_create: " x) */
154 machine_thread_create_kprintf("thread = %x\n", thread
);
156 if (current_thread() != thread
) {
157 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
159 thread
->machine
.preemption_count
= 0;
160 thread
->machine
.cthread_self
= 0;
161 #if defined(HAS_APPLE_PAC)
162 thread
->machine
.rop_pid
= task
->rop_pid
;
163 thread
->machine
.disable_user_jop
= task
->disable_user_jop
;
167 if (task
!= kernel_task
) {
168 /* If this isn't a kernel thread, we'll have userspace state. */
169 thread
->machine
.contextData
= (arm_context_t
*)zalloc(user_ss_zone
);
171 if (!thread
->machine
.contextData
) {
172 result
= KERN_FAILURE
;
176 thread
->machine
.upcb
= &thread
->machine
.contextData
->ss
;
177 thread
->machine
.uNeon
= &thread
->machine
.contextData
->ns
;
179 if (task_has_64Bit_data(task
)) {
180 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
181 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
182 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
183 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
185 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
186 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
187 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
188 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
192 thread
->machine
.upcb
= NULL
;
193 thread
->machine
.uNeon
= NULL
;
194 thread
->machine
.contextData
= NULL
;
198 bzero(&thread
->machine
.perfctrl_state
, sizeof(thread
->machine
.perfctrl_state
));
199 result
= machine_thread_state_initialize(thread
);
202 if (result
!= KERN_SUCCESS
) {
203 thread_user_ss
= thread
->machine
.contextData
;
205 if (thread_user_ss
) {
206 thread
->machine
.upcb
= NULL
;
207 thread
->machine
.uNeon
= NULL
;
208 thread
->machine
.contextData
= NULL
;
209 zfree(user_ss_zone
, thread_user_ss
);
217 * Routine: machine_thread_destroy
221 machine_thread_destroy(thread_t thread
)
223 arm_context_t
*thread_user_ss
;
225 if (thread
->machine
.contextData
) {
226 /* Disassociate the user save state from the thread before we free it. */
227 thread_user_ss
= thread
->machine
.contextData
;
228 thread
->machine
.upcb
= NULL
;
229 thread
->machine
.uNeon
= NULL
;
230 thread
->machine
.contextData
= NULL
;
233 zfree(user_ss_zone
, thread_user_ss
);
236 if (thread
->machine
.DebugData
!= NULL
) {
237 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
) {
241 zfree(ads_zone
, thread
->machine
.DebugData
);
247 * Routine: machine_thread_init
251 machine_thread_init(void)
253 ads_zone
= zinit(sizeof(arm_debug_state_t
),
254 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
255 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
259 * Create a zone for the user save state. At the time this zone was created,
260 * the user save state was 848 bytes, and the matching kalloc zone was 1024
261 * bytes, which would result in significant amounts of wasted space if we
262 * simply used kalloc to allocate the user saved state.
264 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
265 * of wasted space per chunk, which should correspond to 19 allocations.
267 user_ss_zone
= zinit(sizeof(arm_context_t
),
268 CONFIG_THREAD_MAX
* (sizeof(arm_context_t
)),
269 USER_SS_ZONE_ALLOC_SIZE
,
275 * Routine: machine_thread_template_init
279 machine_thread_template_init(thread_t __unused thr_template
)
281 /* Nothing to do on this platform. */
285 * Routine: get_useraddr
291 return (get_saved_state_pc(current_thread()->machine
.upcb
));
295 * Routine: machine_stack_detach
299 machine_stack_detach(thread_t thread
)
303 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
304 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
306 stack
= thread
->kernel_stack
;
307 thread
->kernel_stack
= 0;
308 thread
->machine
.kstackptr
= 0;
315 * Routine: machine_stack_attach
319 machine_stack_attach(thread_t thread
,
322 struct arm_context
*context
;
323 struct arm_saved_state64
*savestate
;
326 #define machine_stack_attach_kprintf(x...) \
327 /* kprintf("machine_stack_attach: " x) */
329 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
330 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
332 thread
->kernel_stack
= stack
;
333 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
334 thread_initialize_kernel_state(thread
);
336 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t
)thread
->machine
.kstackptr
);
338 current_el
= (uint32_t) __builtin_arm_rsr64("CurrentEL");
339 context
= &((thread_kernel_state_t
) thread
->machine
.kstackptr
)->machine
;
340 savestate
= saved_state64(&context
->ss
);
342 savestate
->sp
= thread
->machine
.kstackptr
;
343 #if defined(HAS_APPLE_PAC)
344 /* Sign the initial kernel stack saved state */
345 const uint32_t default_cpsr
= PSR64_KERNEL_DEFAULT
& ~PSR64_MODE_EL_MASK
;
346 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
351 "str x1, [x0, %[SS64_PC]]" "\n"
353 "mov x2, %[default_cpsr_lo]" "\n"
354 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
355 "mrs x3, CurrentEL" "\n"
356 "orr w2, w2, w3" "\n"
357 "str w2, [x0, %[SS64_CPSR]]" "\n"
359 "adrp x3, _thread_continue@page" "\n"
360 "add x3, x3, _thread_continue@pageoff" "\n"
361 "str x3, [x0, %[SS64_LR]]" "\n"
365 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
368 "bl _ml_sign_thread_state" "\n"
371 : [ss
] "r"(&context
->ss
),
372 [default_cpsr_lo
] "M"(default_cpsr
& 0xFFFF),
373 [default_cpsr_hi
] "M"(default_cpsr
>> 16),
374 [SS64_X16
] "i"(offsetof(struct arm_saved_state
, ss_64
.x
[16])),
375 [SS64_PC
] "i"(offsetof(struct arm_saved_state
, ss_64
.pc
)),
376 [SS64_CPSR
] "i"(offsetof(struct arm_saved_state
, ss_64
.cpsr
)),
377 [SS64_LR
] "i"(offsetof(struct arm_saved_state
, ss_64
.lr
))
378 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
380 ml_set_interrupts_enabled(intr
);
382 savestate
->lr
= (uintptr_t)thread_continue
;
383 savestate
->cpsr
= (PSR64_KERNEL_DEFAULT
& ~PSR64_MODE_EL_MASK
) | current_el
;
384 #endif /* defined(HAS_APPLE_PAC) */
385 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread
, savestate
->lr
, savestate
->sp
);
390 * Routine: machine_stack_handoff
394 machine_stack_handoff(thread_t old
,
399 cpu_data_t
* cpu_data_ptr
;
403 stack
= machine_stack_detach(old
);
404 cpu_data_ptr
= getCpuDatap();
405 new->kernel_stack
= stack
;
406 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
407 if (stack
== old
->reserved_stack
) {
408 assert(new->reserved_stack
);
409 old
->reserved_stack
= new->reserved_stack
;
410 new->reserved_stack
= stack
;
415 new_pmap
= new->map
->pmap
;
416 if (old
->map
->pmap
!= new_pmap
)
417 pmap_switch(new_pmap
);
420 new->machine
.CpuDatap
= cpu_data_ptr
;
422 /* TODO: Should this be ordered? */
423 old
->machine
.machine_thread_flags
&= ~MACHINE_THREAD_FLAGS_ON_CPU
;
424 new->machine
.machine_thread_flags
|= MACHINE_THREAD_FLAGS_ON_CPU
;
426 machine_set_current_thread(new);
427 thread_initialize_kernel_state(new);
434 * Routine: call_continuation
438 call_continuation(thread_continue_t continuation
,
440 wait_result_t wresult
,
441 boolean_t enable_interrupts
)
443 #define call_continuation_kprintf(x...) \
444 /* kprintf("call_continuation_kprintf:" x) */
446 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
447 Call_continuation(continuation
, parameter
, wresult
, enable_interrupts
);
450 #define SET_DBGBCRn(n, value, accum) \
452 "msr DBGBCR" #n "_EL1, %[val]\n" \
453 "orr %[result], %[result], %[val]\n" \
454 : [result] "+r"(accum) : [val] "r"((value)))
456 #define SET_DBGBVRn(n, value) \
457 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
459 #define SET_DBGWCRn(n, value, accum) \
461 "msr DBGWCR" #n "_EL1, %[val]\n" \
462 "orr %[result], %[result], %[val]\n" \
463 : [result] "+r"(accum) : [val] "r"((value)))
465 #define SET_DBGWVRn(n, value) \
466 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
468 void arm_debug_set32(arm_debug_state_t
*debug_state
)
470 struct cpu_data
* cpu_data_ptr
;
471 arm_debug_info_t
* debug_info
= arm_debug_info();
472 boolean_t intr
, set_mde
= 0;
473 arm_debug_state_t off_state
;
475 uint64_t all_ctrls
= 0;
477 intr
= ml_set_interrupts_enabled(FALSE
);
478 cpu_data_ptr
= getCpuDatap();
480 // Set current user debug
481 cpu_data_ptr
->cpu_user_debug
= debug_state
;
483 if (NULL
== debug_state
) {
484 bzero(&off_state
, sizeof(off_state
));
485 debug_state
= &off_state
;
488 switch (debug_info
->num_breakpoint_pairs
) {
490 SET_DBGBVRn(15, (uint64_t)debug_state
->uds
.ds32
.bvr
[15]);
491 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds32
.bcr
[15], all_ctrls
);
493 SET_DBGBVRn(14, (uint64_t)debug_state
->uds
.ds32
.bvr
[14]);
494 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds32
.bcr
[14], all_ctrls
);
496 SET_DBGBVRn(13, (uint64_t)debug_state
->uds
.ds32
.bvr
[13]);
497 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds32
.bcr
[13], all_ctrls
);
499 SET_DBGBVRn(12, (uint64_t)debug_state
->uds
.ds32
.bvr
[12]);
500 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds32
.bcr
[12], all_ctrls
);
502 SET_DBGBVRn(11, (uint64_t)debug_state
->uds
.ds32
.bvr
[11]);
503 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds32
.bcr
[11], all_ctrls
);
505 SET_DBGBVRn(10, (uint64_t)debug_state
->uds
.ds32
.bvr
[10]);
506 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds32
.bcr
[10], all_ctrls
);
508 SET_DBGBVRn(9, (uint64_t)debug_state
->uds
.ds32
.bvr
[9]);
509 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds32
.bcr
[9], all_ctrls
);
511 SET_DBGBVRn(8, (uint64_t)debug_state
->uds
.ds32
.bvr
[8]);
512 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds32
.bcr
[8], all_ctrls
);
514 SET_DBGBVRn(7, (uint64_t)debug_state
->uds
.ds32
.bvr
[7]);
515 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds32
.bcr
[7], all_ctrls
);
517 SET_DBGBVRn(6, (uint64_t)debug_state
->uds
.ds32
.bvr
[6]);
518 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds32
.bcr
[6], all_ctrls
);
520 SET_DBGBVRn(5, (uint64_t)debug_state
->uds
.ds32
.bvr
[5]);
521 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds32
.bcr
[5], all_ctrls
);
523 SET_DBGBVRn(4, (uint64_t)debug_state
->uds
.ds32
.bvr
[4]);
524 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds32
.bcr
[4], all_ctrls
);
526 SET_DBGBVRn(3, (uint64_t)debug_state
->uds
.ds32
.bvr
[3]);
527 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds32
.bcr
[3], all_ctrls
);
529 SET_DBGBVRn(2, (uint64_t)debug_state
->uds
.ds32
.bvr
[2]);
530 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds32
.bcr
[2], all_ctrls
);
532 SET_DBGBVRn(1, (uint64_t)debug_state
->uds
.ds32
.bvr
[1]);
533 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds32
.bcr
[1], all_ctrls
);
535 SET_DBGBVRn(0, (uint64_t)debug_state
->uds
.ds32
.bvr
[0]);
536 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds32
.bcr
[0], all_ctrls
);
541 switch (debug_info
->num_watchpoint_pairs
) {
543 SET_DBGWVRn(15, (uint64_t)debug_state
->uds
.ds32
.wvr
[15]);
544 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds32
.wcr
[15], all_ctrls
);
546 SET_DBGWVRn(14, (uint64_t)debug_state
->uds
.ds32
.wvr
[14]);
547 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds32
.wcr
[14], all_ctrls
);
549 SET_DBGWVRn(13, (uint64_t)debug_state
->uds
.ds32
.wvr
[13]);
550 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds32
.wcr
[13], all_ctrls
);
552 SET_DBGWVRn(12, (uint64_t)debug_state
->uds
.ds32
.wvr
[12]);
553 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds32
.wcr
[12], all_ctrls
);
555 SET_DBGWVRn(11, (uint64_t)debug_state
->uds
.ds32
.wvr
[11]);
556 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds32
.wcr
[11], all_ctrls
);
558 SET_DBGWVRn(10, (uint64_t)debug_state
->uds
.ds32
.wvr
[10]);
559 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds32
.wcr
[10], all_ctrls
);
561 SET_DBGWVRn(9, (uint64_t)debug_state
->uds
.ds32
.wvr
[9]);
562 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds32
.wcr
[9], all_ctrls
);
564 SET_DBGWVRn(8, (uint64_t)debug_state
->uds
.ds32
.wvr
[8]);
565 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds32
.wcr
[8], all_ctrls
);
567 SET_DBGWVRn(7, (uint64_t)debug_state
->uds
.ds32
.wvr
[7]);
568 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds32
.wcr
[7], all_ctrls
);
570 SET_DBGWVRn(6, (uint64_t)debug_state
->uds
.ds32
.wvr
[6]);
571 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds32
.wcr
[6], all_ctrls
);
573 SET_DBGWVRn(5, (uint64_t)debug_state
->uds
.ds32
.wvr
[5]);
574 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds32
.wcr
[5], all_ctrls
);
576 SET_DBGWVRn(4, (uint64_t)debug_state
->uds
.ds32
.wvr
[4]);
577 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds32
.wcr
[4], all_ctrls
);
579 SET_DBGWVRn(3, (uint64_t)debug_state
->uds
.ds32
.wvr
[3]);
580 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds32
.wcr
[3], all_ctrls
);
582 SET_DBGWVRn(2, (uint64_t)debug_state
->uds
.ds32
.wvr
[2]);
583 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds32
.wcr
[2], all_ctrls
);
585 SET_DBGWVRn(1, (uint64_t)debug_state
->uds
.ds32
.wvr
[1]);
586 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds32
.wcr
[1], all_ctrls
);
588 SET_DBGWVRn(0, (uint64_t)debug_state
->uds
.ds32
.wvr
[0]);
589 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds32
.wcr
[0], all_ctrls
);
594 #if defined(CONFIG_KERNEL_INTEGRITY)
595 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
596 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
600 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
601 if (0 != debug_state
->uds
.ds32
.bcr
[i
]) {
607 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
608 if (0 != debug_state
->uds
.ds32
.wcr
[i
]) {
615 * Breakpoint/Watchpoint Enable
618 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
620 update_mdscr(0x8000, 0);
624 * Software debug single step enable
626 if (debug_state
->uds
.ds32
.mdscr_el1
& 0x1) {
627 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
629 mask_saved_state_cpsr(current_thread()->machine
.upcb
, PSR64_SS
, 0);
632 update_mdscr(0x1, 0);
634 #if SINGLE_STEP_RETIRE_ERRATA
635 // Workaround for radar 20619637
636 __builtin_arm_isb(ISB_SY
);
640 (void) ml_set_interrupts_enabled(intr
);
645 void arm_debug_set64(arm_debug_state_t
*debug_state
)
647 struct cpu_data
* cpu_data_ptr
;
648 arm_debug_info_t
* debug_info
= arm_debug_info();
649 boolean_t intr
, set_mde
= 0;
650 arm_debug_state_t off_state
;
652 uint64_t all_ctrls
= 0;
654 intr
= ml_set_interrupts_enabled(FALSE
);
655 cpu_data_ptr
= getCpuDatap();
657 // Set current user debug
658 cpu_data_ptr
->cpu_user_debug
= debug_state
;
660 if (NULL
== debug_state
) {
661 bzero(&off_state
, sizeof(off_state
));
662 debug_state
= &off_state
;
665 switch (debug_info
->num_breakpoint_pairs
) {
667 SET_DBGBVRn(15, debug_state
->uds
.ds64
.bvr
[15]);
668 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds64
.bcr
[15], all_ctrls
);
670 SET_DBGBVRn(14, debug_state
->uds
.ds64
.bvr
[14]);
671 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds64
.bcr
[14], all_ctrls
);
673 SET_DBGBVRn(13, debug_state
->uds
.ds64
.bvr
[13]);
674 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds64
.bcr
[13], all_ctrls
);
676 SET_DBGBVRn(12, debug_state
->uds
.ds64
.bvr
[12]);
677 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds64
.bcr
[12], all_ctrls
);
679 SET_DBGBVRn(11, debug_state
->uds
.ds64
.bvr
[11]);
680 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds64
.bcr
[11], all_ctrls
);
682 SET_DBGBVRn(10, debug_state
->uds
.ds64
.bvr
[10]);
683 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds64
.bcr
[10], all_ctrls
);
685 SET_DBGBVRn(9, debug_state
->uds
.ds64
.bvr
[9]);
686 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds64
.bcr
[9], all_ctrls
);
688 SET_DBGBVRn(8, debug_state
->uds
.ds64
.bvr
[8]);
689 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds64
.bcr
[8], all_ctrls
);
691 SET_DBGBVRn(7, debug_state
->uds
.ds64
.bvr
[7]);
692 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds64
.bcr
[7], all_ctrls
);
694 SET_DBGBVRn(6, debug_state
->uds
.ds64
.bvr
[6]);
695 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds64
.bcr
[6], all_ctrls
);
697 SET_DBGBVRn(5, debug_state
->uds
.ds64
.bvr
[5]);
698 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds64
.bcr
[5], all_ctrls
);
700 SET_DBGBVRn(4, debug_state
->uds
.ds64
.bvr
[4]);
701 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds64
.bcr
[4], all_ctrls
);
703 SET_DBGBVRn(3, debug_state
->uds
.ds64
.bvr
[3]);
704 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds64
.bcr
[3], all_ctrls
);
706 SET_DBGBVRn(2, debug_state
->uds
.ds64
.bvr
[2]);
707 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds64
.bcr
[2], all_ctrls
);
709 SET_DBGBVRn(1, debug_state
->uds
.ds64
.bvr
[1]);
710 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds64
.bcr
[1], all_ctrls
);
712 SET_DBGBVRn(0, debug_state
->uds
.ds64
.bvr
[0]);
713 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds64
.bcr
[0], all_ctrls
);
718 switch (debug_info
->num_watchpoint_pairs
) {
720 SET_DBGWVRn(15, debug_state
->uds
.ds64
.wvr
[15]);
721 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds64
.wcr
[15], all_ctrls
);
723 SET_DBGWVRn(14, debug_state
->uds
.ds64
.wvr
[14]);
724 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds64
.wcr
[14], all_ctrls
);
726 SET_DBGWVRn(13, debug_state
->uds
.ds64
.wvr
[13]);
727 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds64
.wcr
[13], all_ctrls
);
729 SET_DBGWVRn(12, debug_state
->uds
.ds64
.wvr
[12]);
730 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds64
.wcr
[12], all_ctrls
);
732 SET_DBGWVRn(11, debug_state
->uds
.ds64
.wvr
[11]);
733 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds64
.wcr
[11], all_ctrls
);
735 SET_DBGWVRn(10, debug_state
->uds
.ds64
.wvr
[10]);
736 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds64
.wcr
[10], all_ctrls
);
738 SET_DBGWVRn(9, debug_state
->uds
.ds64
.wvr
[9]);
739 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds64
.wcr
[9], all_ctrls
);
741 SET_DBGWVRn(8, debug_state
->uds
.ds64
.wvr
[8]);
742 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds64
.wcr
[8], all_ctrls
);
744 SET_DBGWVRn(7, debug_state
->uds
.ds64
.wvr
[7]);
745 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds64
.wcr
[7], all_ctrls
);
747 SET_DBGWVRn(6, debug_state
->uds
.ds64
.wvr
[6]);
748 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds64
.wcr
[6], all_ctrls
);
750 SET_DBGWVRn(5, debug_state
->uds
.ds64
.wvr
[5]);
751 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds64
.wcr
[5], all_ctrls
);
753 SET_DBGWVRn(4, debug_state
->uds
.ds64
.wvr
[4]);
754 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds64
.wcr
[4], all_ctrls
);
756 SET_DBGWVRn(3, debug_state
->uds
.ds64
.wvr
[3]);
757 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds64
.wcr
[3], all_ctrls
);
759 SET_DBGWVRn(2, debug_state
->uds
.ds64
.wvr
[2]);
760 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds64
.wcr
[2], all_ctrls
);
762 SET_DBGWVRn(1, debug_state
->uds
.ds64
.wvr
[1]);
763 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds64
.wcr
[1], all_ctrls
);
765 SET_DBGWVRn(0, debug_state
->uds
.ds64
.wvr
[0]);
766 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds64
.wcr
[0], all_ctrls
);
771 #if defined(CONFIG_KERNEL_INTEGRITY)
772 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
773 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
777 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
778 if (0 != debug_state
->uds
.ds64
.bcr
[i
]) {
784 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
785 if (0 != debug_state
->uds
.ds64
.wcr
[i
]) {
792 * Breakpoint/Watchpoint Enable
795 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
799 * Software debug single step enable
801 if (debug_state
->uds
.ds64
.mdscr_el1
& 0x1) {
803 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
805 mask_saved_state_cpsr(current_thread()->machine
.upcb
, PSR64_SS
, 0);
808 update_mdscr(0x1, 0);
810 #if SINGLE_STEP_RETIRE_ERRATA
811 // Workaround for radar 20619637
812 __builtin_arm_isb(ISB_SY
);
816 (void) ml_set_interrupts_enabled(intr
);
821 void arm_debug_set(arm_debug_state_t
*debug_state
)
824 switch (debug_state
->dsh
.flavor
) {
825 case ARM_DEBUG_STATE32
:
826 arm_debug_set32(debug_state
);
828 case ARM_DEBUG_STATE64
:
829 arm_debug_set64(debug_state
);
832 panic("arm_debug_set");
836 if (thread_is_64bit_data(current_thread()))
837 arm_debug_set64(debug_state
);
839 arm_debug_set32(debug_state
);
843 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
845 debug_legacy_state_is_valid(arm_legacy_debug_state_t
*debug_state
)
847 arm_debug_info_t
*debug_info
= arm_debug_info();
849 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
850 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
854 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
855 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
862 debug_state_is_valid32(arm_debug_state32_t
*debug_state
)
864 arm_debug_info_t
*debug_info
= arm_debug_info();
866 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
867 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
871 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
872 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
879 debug_state_is_valid64(arm_debug_state64_t
*debug_state
)
881 arm_debug_info_t
*debug_info
= arm_debug_info();
883 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
884 if (0 != debug_state
->bcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->bvr
[i
])
888 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
889 if (0 != debug_state
->wcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->wvr
[i
])
896 * Duplicate one arm_debug_state_t to another. "all" parameter
897 * is ignored in the case of ARM -- Is this the right assumption?
900 copy_legacy_debug_state(arm_legacy_debug_state_t
* src
,
901 arm_legacy_debug_state_t
* target
,
902 __unused boolean_t all
)
904 bcopy(src
, target
, sizeof(arm_legacy_debug_state_t
));
908 copy_debug_state32(arm_debug_state32_t
* src
,
909 arm_debug_state32_t
* target
,
910 __unused boolean_t all
)
912 bcopy(src
, target
, sizeof(arm_debug_state32_t
));
916 copy_debug_state64(arm_debug_state64_t
* src
,
917 arm_debug_state64_t
* target
,
918 __unused boolean_t all
)
920 bcopy(src
, target
, sizeof(arm_debug_state64_t
));
924 machine_thread_set_tsd_base(thread_t thread
,
925 mach_vm_offset_t tsd_base
)
927 if (thread
->task
== kernel_task
) {
928 return KERN_INVALID_ARGUMENT
;
931 if (tsd_base
& MACHDEP_CPUNUM_MASK
) {
932 return KERN_INVALID_ARGUMENT
;
935 if (thread_is_64bit_addr(thread
)) {
936 if (tsd_base
> vm_map_max(thread
->map
))
939 if (tsd_base
> UINT32_MAX
)
943 thread
->machine
.cthread_self
= tsd_base
;
945 /* For current thread, make the TSD base active immediately */
946 if (thread
== current_thread()) {
947 uint64_t cpunum
, tpidrro_el0
;
949 mp_disable_preemption();
950 tpidrro_el0
= get_tpidrro();
951 cpunum
= tpidrro_el0
& (MACHDEP_CPUNUM_MASK
);
952 set_tpidrro(tsd_base
| cpunum
);
953 mp_enable_preemption();
961 machine_tecs(__unused thread_t thr
)
966 machine_csv(__unused cpuvn_e cve
)