2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
62 #include <sys/kdebug.h>
65 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
67 extern int debug_task
;
69 zone_t ads_zone
; /* zone for debug_state area */
70 zone_t user_ss_zone
; /* zone for user arm_context_t allocations */
73 * Routine: consider_machine_collect
77 consider_machine_collect(void)
83 * Routine: consider_machine_adjust
87 consider_machine_adjust(void)
92 * Routine: machine_switch_context
96 machine_switch_context(
98 thread_continue_t continuation
,
103 cpu_data_t
*cpu_data_ptr
;
105 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
108 cpu_data_ptr
= getCpuDatap();
110 panic("machine_switch_context");
115 new_pmap
= new->map
->pmap
;
116 if (old
->map
->pmap
!= new_pmap
)
117 pmap_switch(new_pmap
);
119 new->machine
.CpuDatap
= cpu_data_ptr
;
121 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
123 retval
= Switch_context(old
, continuation
, new);
124 assert(retval
!= NULL
);
130 * Routine: machine_thread_create
134 machine_thread_create(
138 arm_context_t
*thread_user_ss
= NULL
;
139 kern_return_t result
= KERN_SUCCESS
;
141 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
143 machine_thread_create_kprintf("thread = %x\n", thread
);
145 if (current_thread() != thread
) {
146 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
148 thread
->machine
.preemption_count
= 0;
149 thread
->machine
.cthread_self
= 0;
150 thread
->machine
.cthread_data
= 0;
153 if (task
!= kernel_task
) {
154 /* If this isn't a kernel thread, we'll have userspace state. */
155 thread
->machine
.contextData
= (arm_context_t
*)zalloc(user_ss_zone
);
157 if (!thread
->machine
.contextData
) {
161 thread
->machine
.upcb
= &thread
->machine
.contextData
->ss
;
162 thread
->machine
.uNeon
= &thread
->machine
.contextData
->ns
;
164 if (task_has_64Bit_data(task
)) {
165 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
166 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
167 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
168 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
170 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
171 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
172 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
173 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
176 thread
->machine
.upcb
= NULL
;
177 thread
->machine
.uNeon
= NULL
;
178 thread
->machine
.contextData
= NULL
;
181 bzero(&thread
->machine
.perfctrl_state
, sizeof(thread
->machine
.perfctrl_state
));
183 result
= machine_thread_state_initialize(thread
);
185 if (result
!= KERN_SUCCESS
) {
186 thread_user_ss
= thread
->machine
.contextData
;
187 thread
->machine
.upcb
= NULL
;
188 thread
->machine
.uNeon
= NULL
;
189 thread
->machine
.contextData
= NULL
;
190 zfree(user_ss_zone
, thread_user_ss
);
197 * Routine: machine_thread_destroy
201 machine_thread_destroy(
204 arm_context_t
*thread_user_ss
;
206 if (thread
->machine
.contextData
) {
207 /* Disassociate the user save state from the thread before we free it. */
208 thread_user_ss
= thread
->machine
.contextData
;
209 thread
->machine
.upcb
= NULL
;
210 thread
->machine
.uNeon
= NULL
;
211 thread
->machine
.contextData
= NULL
;
212 zfree(user_ss_zone
, thread_user_ss
);
215 if (thread
->machine
.DebugData
!= NULL
) {
216 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
) {
220 zfree(ads_zone
, thread
->machine
.DebugData
);
226 * Routine: machine_thread_init
230 machine_thread_init(void)
232 ads_zone
= zinit(sizeof(arm_debug_state_t
),
233 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
234 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
238 * Create a zone for the user save state. At the time this zone was created,
239 * the user save state was 848 bytes, and the matching kalloc zone was 1024
240 * bytes, which would result in significant amounts of wasted space if we
241 * simply used kalloc to allocate the user saved state.
243 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
244 * of wasted space per chunk, which should correspond to 19 allocations.
246 user_ss_zone
= zinit(sizeof(arm_context_t
),
247 CONFIG_THREAD_MAX
* (sizeof(arm_context_t
)),
248 USER_SS_ZONE_ALLOC_SIZE
,
254 * Routine: get_useraddr
260 return (get_saved_state_pc(current_thread()->machine
.upcb
));
264 * Routine: machine_stack_detach
268 machine_stack_detach(
273 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
274 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
276 stack
= thread
->kernel_stack
;
277 thread
->kernel_stack
= 0;
278 thread
->machine
.kstackptr
= 0;
285 * Routine: machine_stack_attach
289 machine_stack_attach(
293 struct arm_context
*context
;
294 struct arm_saved_state64
*savestate
;
296 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
298 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
299 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
301 thread
->kernel_stack
= stack
;
302 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
303 thread_initialize_kernel_state(thread
);
305 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t
)thread
->machine
.kstackptr
);
307 context
= &((thread_kernel_state_t
) thread
->machine
.kstackptr
)->machine
;
308 savestate
= saved_state64(&context
->ss
);
310 savestate
->lr
= (uintptr_t)thread_continue
;
311 savestate
->sp
= thread
->machine
.kstackptr
;
312 savestate
->cpsr
= PSR64_KERNEL_DEFAULT
;
313 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread
, savestate
->lr
, savestate
->sp
);
318 * Routine: machine_stack_handoff
322 machine_stack_handoff(
328 cpu_data_t
*cpu_data_ptr
;
332 stack
= machine_stack_detach(old
);
333 cpu_data_ptr
= getCpuDatap();
334 new->kernel_stack
= stack
;
335 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
336 if (stack
== old
->reserved_stack
) {
337 assert(new->reserved_stack
);
338 old
->reserved_stack
= new->reserved_stack
;
339 new->reserved_stack
= stack
;
343 new_pmap
= new->map
->pmap
;
344 if (old
->map
->pmap
!= new_pmap
)
345 pmap_switch(new_pmap
);
347 new->machine
.CpuDatap
= cpu_data_ptr
;
348 machine_set_current_thread(new);
349 thread_initialize_kernel_state(new);
356 * Routine: call_continuation
361 thread_continue_t continuation
,
363 wait_result_t wresult
,
364 boolean_t enable_interrupts
)
366 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:" x) */
368 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
369 Call_continuation(continuation
, parameter
, wresult
, enable_interrupts
);
372 #define SET_DBGBCRn(n, value, accum) \
374 "msr DBGBCR" #n "_EL1, %[val]\n" \
375 "orr %[result], %[result], %[val]\n" \
376 : [result] "+r"(accum) : [val] "r"((value)))
378 #define SET_DBGBVRn(n, value) \
379 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
381 #define SET_DBGWCRn(n, value, accum) \
383 "msr DBGWCR" #n "_EL1, %[val]\n" \
384 "orr %[result], %[result], %[val]\n" \
385 : [result] "+r"(accum) : [val] "r"((value)))
387 #define SET_DBGWVRn(n, value) \
388 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
390 void arm_debug_set32(arm_debug_state_t
*debug_state
)
392 struct cpu_data
*cpu_data_ptr
;
393 arm_debug_info_t
*debug_info
= arm_debug_info();
394 boolean_t intr
, set_mde
= 0;
395 arm_debug_state_t off_state
;
397 uint64_t all_ctrls
= 0;
399 intr
= ml_set_interrupts_enabled(FALSE
);
400 cpu_data_ptr
= getCpuDatap();
402 // Set current user debug
403 cpu_data_ptr
->cpu_user_debug
= debug_state
;
405 if (NULL
== debug_state
) {
406 bzero(&off_state
, sizeof(off_state
));
407 debug_state
= &off_state
;
410 switch (debug_info
->num_breakpoint_pairs
) {
412 SET_DBGBVRn(15, (uint64_t)debug_state
->uds
.ds32
.bvr
[15]);
413 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds32
.bcr
[15], all_ctrls
);
415 SET_DBGBVRn(14, (uint64_t)debug_state
->uds
.ds32
.bvr
[14]);
416 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds32
.bcr
[14], all_ctrls
);
418 SET_DBGBVRn(13, (uint64_t)debug_state
->uds
.ds32
.bvr
[13]);
419 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds32
.bcr
[13], all_ctrls
);
421 SET_DBGBVRn(12, (uint64_t)debug_state
->uds
.ds32
.bvr
[12]);
422 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds32
.bcr
[12], all_ctrls
);
424 SET_DBGBVRn(11, (uint64_t)debug_state
->uds
.ds32
.bvr
[11]);
425 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds32
.bcr
[11], all_ctrls
);
427 SET_DBGBVRn(10, (uint64_t)debug_state
->uds
.ds32
.bvr
[10]);
428 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds32
.bcr
[10], all_ctrls
);
430 SET_DBGBVRn(9, (uint64_t)debug_state
->uds
.ds32
.bvr
[9]);
431 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds32
.bcr
[9], all_ctrls
);
433 SET_DBGBVRn(8, (uint64_t)debug_state
->uds
.ds32
.bvr
[8]);
434 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds32
.bcr
[8], all_ctrls
);
436 SET_DBGBVRn(7, (uint64_t)debug_state
->uds
.ds32
.bvr
[7]);
437 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds32
.bcr
[7], all_ctrls
);
439 SET_DBGBVRn(6, (uint64_t)debug_state
->uds
.ds32
.bvr
[6]);
440 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds32
.bcr
[6], all_ctrls
);
442 SET_DBGBVRn(5, (uint64_t)debug_state
->uds
.ds32
.bvr
[5]);
443 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds32
.bcr
[5], all_ctrls
);
445 SET_DBGBVRn(4, (uint64_t)debug_state
->uds
.ds32
.bvr
[4]);
446 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds32
.bcr
[4], all_ctrls
);
448 SET_DBGBVRn(3, (uint64_t)debug_state
->uds
.ds32
.bvr
[3]);
449 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds32
.bcr
[3], all_ctrls
);
451 SET_DBGBVRn(2, (uint64_t)debug_state
->uds
.ds32
.bvr
[2]);
452 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds32
.bcr
[2], all_ctrls
);
454 SET_DBGBVRn(1, (uint64_t)debug_state
->uds
.ds32
.bvr
[1]);
455 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds32
.bcr
[1], all_ctrls
);
457 SET_DBGBVRn(0, (uint64_t)debug_state
->uds
.ds32
.bvr
[0]);
458 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds32
.bcr
[0], all_ctrls
);
463 switch (debug_info
->num_watchpoint_pairs
) {
465 SET_DBGWVRn(15, (uint64_t)debug_state
->uds
.ds32
.wvr
[15]);
466 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds32
.wcr
[15], all_ctrls
);
468 SET_DBGWVRn(14, (uint64_t)debug_state
->uds
.ds32
.wvr
[14]);
469 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds32
.wcr
[14], all_ctrls
);
471 SET_DBGWVRn(13, (uint64_t)debug_state
->uds
.ds32
.wvr
[13]);
472 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds32
.wcr
[13], all_ctrls
);
474 SET_DBGWVRn(12, (uint64_t)debug_state
->uds
.ds32
.wvr
[12]);
475 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds32
.wcr
[12], all_ctrls
);
477 SET_DBGWVRn(11, (uint64_t)debug_state
->uds
.ds32
.wvr
[11]);
478 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds32
.wcr
[11], all_ctrls
);
480 SET_DBGWVRn(10, (uint64_t)debug_state
->uds
.ds32
.wvr
[10]);
481 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds32
.wcr
[10], all_ctrls
);
483 SET_DBGWVRn(9, (uint64_t)debug_state
->uds
.ds32
.wvr
[9]);
484 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds32
.wcr
[9], all_ctrls
);
486 SET_DBGWVRn(8, (uint64_t)debug_state
->uds
.ds32
.wvr
[8]);
487 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds32
.wcr
[8], all_ctrls
);
489 SET_DBGWVRn(7, (uint64_t)debug_state
->uds
.ds32
.wvr
[7]);
490 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds32
.wcr
[7], all_ctrls
);
492 SET_DBGWVRn(6, (uint64_t)debug_state
->uds
.ds32
.wvr
[6]);
493 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds32
.wcr
[6], all_ctrls
);
495 SET_DBGWVRn(5, (uint64_t)debug_state
->uds
.ds32
.wvr
[5]);
496 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds32
.wcr
[5], all_ctrls
);
498 SET_DBGWVRn(4, (uint64_t)debug_state
->uds
.ds32
.wvr
[4]);
499 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds32
.wcr
[4], all_ctrls
);
501 SET_DBGWVRn(3, (uint64_t)debug_state
->uds
.ds32
.wvr
[3]);
502 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds32
.wcr
[3], all_ctrls
);
504 SET_DBGWVRn(2, (uint64_t)debug_state
->uds
.ds32
.wvr
[2]);
505 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds32
.wcr
[2], all_ctrls
);
507 SET_DBGWVRn(1, (uint64_t)debug_state
->uds
.ds32
.wvr
[1]);
508 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds32
.wcr
[1], all_ctrls
);
510 SET_DBGWVRn(0, (uint64_t)debug_state
->uds
.ds32
.wvr
[0]);
511 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds32
.wcr
[0], all_ctrls
);
516 #if defined(CONFIG_KERNEL_INTEGRITY)
517 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
518 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
522 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
523 if (0 != debug_state
->uds
.ds32
.bcr
[i
]) {
529 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
530 if (0 != debug_state
->uds
.ds32
.wcr
[i
]) {
537 * Breakpoint/Watchpoint Enable
540 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
542 update_mdscr(0x8000, 0);
546 * Software debug single step enable
548 if (debug_state
->uds
.ds32
.mdscr_el1
& 0x1) {
549 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
551 set_saved_state_cpsr((current_thread()->machine
.upcb
),
552 get_saved_state_cpsr((current_thread()->machine
.upcb
)) | PSR64_SS
);
556 update_mdscr(0x1, 0);
558 #if SINGLE_STEP_RETIRE_ERRATA
559 // Workaround for radar 20619637
560 __builtin_arm_isb(ISB_SY
);
564 (void) ml_set_interrupts_enabled(intr
);
569 void arm_debug_set64(arm_debug_state_t
*debug_state
)
571 struct cpu_data
*cpu_data_ptr
;
572 arm_debug_info_t
*debug_info
= arm_debug_info();
573 boolean_t intr
, set_mde
= 0;
574 arm_debug_state_t off_state
;
576 uint64_t all_ctrls
= 0;
578 intr
= ml_set_interrupts_enabled(FALSE
);
579 cpu_data_ptr
= getCpuDatap();
581 // Set current user debug
582 cpu_data_ptr
->cpu_user_debug
= debug_state
;
584 if (NULL
== debug_state
) {
585 bzero(&off_state
, sizeof(off_state
));
586 debug_state
= &off_state
;
589 switch (debug_info
->num_breakpoint_pairs
) {
591 SET_DBGBVRn(15, debug_state
->uds
.ds64
.bvr
[15]);
592 SET_DBGBCRn(15, (uint64_t)debug_state
->uds
.ds64
.bcr
[15], all_ctrls
);
594 SET_DBGBVRn(14, debug_state
->uds
.ds64
.bvr
[14]);
595 SET_DBGBCRn(14, (uint64_t)debug_state
->uds
.ds64
.bcr
[14], all_ctrls
);
597 SET_DBGBVRn(13, debug_state
->uds
.ds64
.bvr
[13]);
598 SET_DBGBCRn(13, (uint64_t)debug_state
->uds
.ds64
.bcr
[13], all_ctrls
);
600 SET_DBGBVRn(12, debug_state
->uds
.ds64
.bvr
[12]);
601 SET_DBGBCRn(12, (uint64_t)debug_state
->uds
.ds64
.bcr
[12], all_ctrls
);
603 SET_DBGBVRn(11, debug_state
->uds
.ds64
.bvr
[11]);
604 SET_DBGBCRn(11, (uint64_t)debug_state
->uds
.ds64
.bcr
[11], all_ctrls
);
606 SET_DBGBVRn(10, debug_state
->uds
.ds64
.bvr
[10]);
607 SET_DBGBCRn(10, (uint64_t)debug_state
->uds
.ds64
.bcr
[10], all_ctrls
);
609 SET_DBGBVRn(9, debug_state
->uds
.ds64
.bvr
[9]);
610 SET_DBGBCRn(9, (uint64_t)debug_state
->uds
.ds64
.bcr
[9], all_ctrls
);
612 SET_DBGBVRn(8, debug_state
->uds
.ds64
.bvr
[8]);
613 SET_DBGBCRn(8, (uint64_t)debug_state
->uds
.ds64
.bcr
[8], all_ctrls
);
615 SET_DBGBVRn(7, debug_state
->uds
.ds64
.bvr
[7]);
616 SET_DBGBCRn(7, (uint64_t)debug_state
->uds
.ds64
.bcr
[7], all_ctrls
);
618 SET_DBGBVRn(6, debug_state
->uds
.ds64
.bvr
[6]);
619 SET_DBGBCRn(6, (uint64_t)debug_state
->uds
.ds64
.bcr
[6], all_ctrls
);
621 SET_DBGBVRn(5, debug_state
->uds
.ds64
.bvr
[5]);
622 SET_DBGBCRn(5, (uint64_t)debug_state
->uds
.ds64
.bcr
[5], all_ctrls
);
624 SET_DBGBVRn(4, debug_state
->uds
.ds64
.bvr
[4]);
625 SET_DBGBCRn(4, (uint64_t)debug_state
->uds
.ds64
.bcr
[4], all_ctrls
);
627 SET_DBGBVRn(3, debug_state
->uds
.ds64
.bvr
[3]);
628 SET_DBGBCRn(3, (uint64_t)debug_state
->uds
.ds64
.bcr
[3], all_ctrls
);
630 SET_DBGBVRn(2, debug_state
->uds
.ds64
.bvr
[2]);
631 SET_DBGBCRn(2, (uint64_t)debug_state
->uds
.ds64
.bcr
[2], all_ctrls
);
633 SET_DBGBVRn(1, debug_state
->uds
.ds64
.bvr
[1]);
634 SET_DBGBCRn(1, (uint64_t)debug_state
->uds
.ds64
.bcr
[1], all_ctrls
);
636 SET_DBGBVRn(0, debug_state
->uds
.ds64
.bvr
[0]);
637 SET_DBGBCRn(0, (uint64_t)debug_state
->uds
.ds64
.bcr
[0], all_ctrls
);
642 switch (debug_info
->num_watchpoint_pairs
) {
644 SET_DBGWVRn(15, debug_state
->uds
.ds64
.wvr
[15]);
645 SET_DBGWCRn(15, (uint64_t)debug_state
->uds
.ds64
.wcr
[15], all_ctrls
);
647 SET_DBGWVRn(14, debug_state
->uds
.ds64
.wvr
[14]);
648 SET_DBGWCRn(14, (uint64_t)debug_state
->uds
.ds64
.wcr
[14], all_ctrls
);
650 SET_DBGWVRn(13, debug_state
->uds
.ds64
.wvr
[13]);
651 SET_DBGWCRn(13, (uint64_t)debug_state
->uds
.ds64
.wcr
[13], all_ctrls
);
653 SET_DBGWVRn(12, debug_state
->uds
.ds64
.wvr
[12]);
654 SET_DBGWCRn(12, (uint64_t)debug_state
->uds
.ds64
.wcr
[12], all_ctrls
);
656 SET_DBGWVRn(11, debug_state
->uds
.ds64
.wvr
[11]);
657 SET_DBGWCRn(11, (uint64_t)debug_state
->uds
.ds64
.wcr
[11], all_ctrls
);
659 SET_DBGWVRn(10, debug_state
->uds
.ds64
.wvr
[10]);
660 SET_DBGWCRn(10, (uint64_t)debug_state
->uds
.ds64
.wcr
[10], all_ctrls
);
662 SET_DBGWVRn(9, debug_state
->uds
.ds64
.wvr
[9]);
663 SET_DBGWCRn(9, (uint64_t)debug_state
->uds
.ds64
.wcr
[9], all_ctrls
);
665 SET_DBGWVRn(8, debug_state
->uds
.ds64
.wvr
[8]);
666 SET_DBGWCRn(8, (uint64_t)debug_state
->uds
.ds64
.wcr
[8], all_ctrls
);
668 SET_DBGWVRn(7, debug_state
->uds
.ds64
.wvr
[7]);
669 SET_DBGWCRn(7, (uint64_t)debug_state
->uds
.ds64
.wcr
[7], all_ctrls
);
671 SET_DBGWVRn(6, debug_state
->uds
.ds64
.wvr
[6]);
672 SET_DBGWCRn(6, (uint64_t)debug_state
->uds
.ds64
.wcr
[6], all_ctrls
);
674 SET_DBGWVRn(5, debug_state
->uds
.ds64
.wvr
[5]);
675 SET_DBGWCRn(5, (uint64_t)debug_state
->uds
.ds64
.wcr
[5], all_ctrls
);
677 SET_DBGWVRn(4, debug_state
->uds
.ds64
.wvr
[4]);
678 SET_DBGWCRn(4, (uint64_t)debug_state
->uds
.ds64
.wcr
[4], all_ctrls
);
680 SET_DBGWVRn(3, debug_state
->uds
.ds64
.wvr
[3]);
681 SET_DBGWCRn(3, (uint64_t)debug_state
->uds
.ds64
.wcr
[3], all_ctrls
);
683 SET_DBGWVRn(2, debug_state
->uds
.ds64
.wvr
[2]);
684 SET_DBGWCRn(2, (uint64_t)debug_state
->uds
.ds64
.wcr
[2], all_ctrls
);
686 SET_DBGWVRn(1, debug_state
->uds
.ds64
.wvr
[1]);
687 SET_DBGWCRn(1, (uint64_t)debug_state
->uds
.ds64
.wcr
[1], all_ctrls
);
689 SET_DBGWVRn(0, debug_state
->uds
.ds64
.wvr
[0]);
690 SET_DBGWCRn(0, (uint64_t)debug_state
->uds
.ds64
.wcr
[0], all_ctrls
);
695 #if defined(CONFIG_KERNEL_INTEGRITY)
696 if ((all_ctrls
& (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED
| ARM_DBG_CR_HIGHER_MODE_ENABLE
)) != 0) {
697 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls
);
701 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
702 if (0 != debug_state
->uds
.ds64
.bcr
[i
]) {
708 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
709 if (0 != debug_state
->uds
.ds64
.wcr
[i
]) {
716 * Breakpoint/Watchpoint Enable
719 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
723 * Software debug single step enable
725 if (debug_state
->uds
.ds64
.mdscr_el1
& 0x1) {
727 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
729 set_saved_state_cpsr((current_thread()->machine
.upcb
),
730 get_saved_state_cpsr((current_thread()->machine
.upcb
)) | PSR64_SS
);
734 update_mdscr(0x1, 0);
736 #if SINGLE_STEP_RETIRE_ERRATA
737 // Workaround for radar 20619637
738 __builtin_arm_isb(ISB_SY
);
742 (void) ml_set_interrupts_enabled(intr
);
747 void arm_debug_set(arm_debug_state_t
*debug_state
)
750 switch (debug_state
->dsh
.flavor
) {
751 case ARM_DEBUG_STATE32
:
752 arm_debug_set32(debug_state
);
754 case ARM_DEBUG_STATE64
:
755 arm_debug_set64(debug_state
);
758 panic("arm_debug_set");
762 if (thread_is_64bit_data(current_thread()))
763 arm_debug_set64(debug_state
);
765 arm_debug_set32(debug_state
);
769 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
771 debug_legacy_state_is_valid(arm_legacy_debug_state_t
*debug_state
)
773 arm_debug_info_t
*debug_info
= arm_debug_info();
775 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
776 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
780 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
781 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
788 debug_state_is_valid32(arm_debug_state32_t
*debug_state
)
790 arm_debug_info_t
*debug_info
= arm_debug_info();
792 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
793 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
797 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
798 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
805 debug_state_is_valid64(arm_debug_state64_t
*debug_state
)
807 arm_debug_info_t
*debug_info
= arm_debug_info();
809 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
810 if (0 != debug_state
->bcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->bvr
[i
])
814 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
815 if (0 != debug_state
->wcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->wvr
[i
])
822 * Duplicate one arm_debug_state_t to another. "all" parameter
823 * is ignored in the case of ARM -- Is this the right assumption?
826 copy_legacy_debug_state(
827 arm_legacy_debug_state_t
*src
,
828 arm_legacy_debug_state_t
*target
,
829 __unused boolean_t all
)
831 bcopy(src
, target
, sizeof(arm_legacy_debug_state_t
));
836 arm_debug_state32_t
*src
,
837 arm_debug_state32_t
*target
,
838 __unused boolean_t all
)
840 bcopy(src
, target
, sizeof(arm_debug_state32_t
));
845 arm_debug_state64_t
*src
,
846 arm_debug_state64_t
*target
,
847 __unused boolean_t all
)
849 bcopy(src
, target
, sizeof(arm_debug_state64_t
));
853 machine_thread_set_tsd_base(
855 mach_vm_offset_t tsd_base
)
858 if (thread
->task
== kernel_task
) {
859 return KERN_INVALID_ARGUMENT
;
862 if (tsd_base
& MACHDEP_CPUNUM_MASK
) {
863 return KERN_INVALID_ARGUMENT
;
866 if (thread_is_64bit_addr(thread
)) {
867 if (tsd_base
> vm_map_max(thread
->map
))
870 if (tsd_base
> UINT32_MAX
)
874 thread
->machine
.cthread_self
= tsd_base
;
876 /* For current thread, make the TSD base active immediately */
877 if (thread
== current_thread()) {
878 uint64_t cpunum
, tpidrro_el0
;
880 mp_disable_preemption();
881 tpidrro_el0
= get_tpidrro();
882 cpunum
= tpidrro_el0
& (MACHDEP_CPUNUM_MASK
);
883 set_tpidrro(tsd_base
| cpunum
);
884 mp_enable_preemption();