2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
62 #include <sys/kdebug.h>
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
66 extern int debug_task
;
68 zone_t ads_zone
; /* zone for debug_state area */
69 zone_t user_ss_zone
; /* zone for user arm_context_t allocations */
72 * Routine: consider_machine_collect
76 consider_machine_collect(void)
82 * Routine: consider_machine_adjust
86 consider_machine_adjust(void)
91 * Routine: machine_switch_context
95 machine_switch_context(
97 thread_continue_t continuation
,
102 cpu_data_t
*cpu_data_ptr
;
104 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
107 cpu_data_ptr
= getCpuDatap();
109 panic("machine_switch_context");
114 new_pmap
= new->map
->pmap
;
115 if (old
->map
->pmap
!= new_pmap
)
116 pmap_switch(new_pmap
);
118 new->machine
.CpuDatap
= cpu_data_ptr
;
120 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
122 retval
= Switch_context(old
, continuation
, new);
123 assert(retval
!= NULL
);
129 * Routine: machine_thread_create
133 machine_thread_create(
137 arm_context_t
*thread_user_ss
= NULL
;
138 kern_return_t result
= KERN_SUCCESS
;
140 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
142 machine_thread_create_kprintf("thread = %x\n", thread
);
144 if (current_thread() != thread
) {
145 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
147 thread
->machine
.preemption_count
= 0;
148 thread
->machine
.cthread_self
= 0;
149 thread
->machine
.cthread_data
= 0;
152 if (task
!= kernel_task
) {
153 /* If this isn't a kernel thread, we'll have userspace state. */
154 thread
->machine
.contextData
= (arm_context_t
*)zalloc(user_ss_zone
);
156 if (!thread
->machine
.contextData
) {
160 thread
->machine
.upcb
= &thread
->machine
.contextData
->ss
;
161 thread
->machine
.uNeon
= &thread
->machine
.contextData
->ns
;
163 if (task_has_64BitAddr(task
)) {
164 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
165 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
166 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
167 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
169 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
170 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
171 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
172 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
175 thread
->machine
.upcb
= NULL
;
176 thread
->machine
.uNeon
= NULL
;
177 thread
->machine
.contextData
= NULL
;
180 bzero(&thread
->machine
.perfctrl_state
, sizeof(thread
->machine
.perfctrl_state
));
182 result
= machine_thread_state_initialize(thread
);
184 if (result
!= KERN_SUCCESS
) {
185 thread_user_ss
= thread
->machine
.contextData
;
186 thread
->machine
.upcb
= NULL
;
187 thread
->machine
.uNeon
= NULL
;
188 thread
->machine
.contextData
= NULL
;
189 zfree(user_ss_zone
, thread_user_ss
);
196 * Routine: machine_thread_destroy
200 machine_thread_destroy(
203 arm_context_t
*thread_user_ss
;
205 if (thread
->machine
.contextData
) {
206 /* Disassociate the user save state from the thread before we free it. */
207 thread_user_ss
= thread
->machine
.contextData
;
208 thread
->machine
.upcb
= NULL
;
209 thread
->machine
.uNeon
= NULL
;
210 thread
->machine
.contextData
= NULL
;
211 zfree(user_ss_zone
, thread_user_ss
);
214 if (thread
->machine
.DebugData
!= NULL
) {
215 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
) {
219 zfree(ads_zone
, thread
->machine
.DebugData
);
225 * Routine: machine_thread_init
229 machine_thread_init(void)
231 ads_zone
= zinit(sizeof(arm_debug_state_t
),
232 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
233 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
237 * Create a zone for the user save state. At the time this zone was created,
238 * the user save state was 848 bytes, and the matching kalloc zone was 1024
239 * bytes, which would result in significant amounts of wasted space if we
240 * simply used kalloc to allocate the user saved state.
242 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
243 * of wasted space per chunk, which should correspond to 19 allocations.
245 user_ss_zone
= zinit(sizeof(arm_context_t
),
246 CONFIG_THREAD_MAX
* (sizeof(arm_context_t
)),
247 USER_SS_ZONE_ALLOC_SIZE
,
253 * Routine: get_useraddr
259 return (get_saved_state_pc(current_thread()->machine
.upcb
));
263 * Routine: machine_stack_detach
267 machine_stack_detach(
272 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
273 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
275 stack
= thread
->kernel_stack
;
276 thread
->kernel_stack
= 0;
277 thread
->machine
.kstackptr
= 0;
284 * Routine: machine_stack_attach
288 machine_stack_attach(
292 struct arm_context
*context
;
293 struct arm_saved_state64
*savestate
;
295 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
297 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
298 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
300 thread
->kernel_stack
= stack
;
301 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
302 thread_initialize_kernel_state(thread
);
304 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t
)thread
->machine
.kstackptr
);
306 context
= &((thread_kernel_state_t
) thread
->machine
.kstackptr
)->machine
;
307 savestate
= saved_state64(&context
->ss
);
309 savestate
->lr
= (uintptr_t)thread_continue
;
310 savestate
->sp
= thread
->machine
.kstackptr
;
311 savestate
->cpsr
= PSR64_KERNEL_DEFAULT
;
312 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread
, savestate
->lr
, savestate
->sp
);
317 * Routine: machine_stack_handoff
321 machine_stack_handoff(
327 cpu_data_t
*cpu_data_ptr
;
331 stack
= machine_stack_detach(old
);
332 cpu_data_ptr
= getCpuDatap();
333 new->kernel_stack
= stack
;
334 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
335 if (stack
== old
->reserved_stack
) {
336 assert(new->reserved_stack
);
337 old
->reserved_stack
= new->reserved_stack
;
338 new->reserved_stack
= stack
;
342 new_pmap
= new->map
->pmap
;
343 if (old
->map
->pmap
!= new_pmap
)
344 pmap_switch(new_pmap
);
346 new->machine
.CpuDatap
= cpu_data_ptr
;
347 machine_set_current_thread(new);
348 thread_initialize_kernel_state(new);
355 * Routine: call_continuation
360 thread_continue_t continuation
,
362 wait_result_t wresult
)
364 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:" x) */
366 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
367 Call_continuation(continuation
, parameter
, wresult
, current_thread()->machine
.kstackptr
);
370 void arm_debug_set32(arm_debug_state_t
*debug_state
)
372 struct cpu_data
*cpu_data_ptr
;
373 arm_debug_info_t
*debug_info
= arm_debug_info();
374 volatile uint64_t state
;
375 boolean_t intr
, set_mde
= 0;
376 arm_debug_state_t off_state
;
379 intr
= ml_set_interrupts_enabled(FALSE
);
380 cpu_data_ptr
= getCpuDatap();
382 // Set current user debug
383 cpu_data_ptr
->cpu_user_debug
= debug_state
;
385 if (NULL
== debug_state
) {
386 bzero(&off_state
, sizeof(off_state
));
387 debug_state
= &off_state
;
390 switch (debug_info
->num_breakpoint_pairs
) {
392 __asm__
volatile("msr DBGBVR15_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[15]));
393 __asm__
volatile("msr DBGBCR15_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[15]));
395 __asm__
volatile("msr DBGBVR14_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[14]));
396 __asm__
volatile("msr DBGBCR14_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[14]));
398 __asm__
volatile("msr DBGBVR13_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[13]));
399 __asm__
volatile("msr DBGBCR13_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[13]));
401 __asm__
volatile("msr DBGBVR12_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[12]));
402 __asm__
volatile("msr DBGBCR12_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[12]));
404 __asm__
volatile("msr DBGBVR11_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[11]));
405 __asm__
volatile("msr DBGBCR11_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[11]));
407 __asm__
volatile("msr DBGBVR10_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[10]));
408 __asm__
volatile("msr DBGBCR10_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[10]));
410 __asm__
volatile("msr DBGBVR9_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[9]));
411 __asm__
volatile("msr DBGBCR9_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[9]));
413 __asm__
volatile("msr DBGBVR8_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[8]));
414 __asm__
volatile("msr DBGBCR8_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[8]));
416 __asm__
volatile("msr DBGBVR7_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[7]));
417 __asm__
volatile("msr DBGBCR7_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[7]));
419 __asm__
volatile("msr DBGBVR6_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[6]));
420 __asm__
volatile("msr DBGBCR6_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[6]));
422 __asm__
volatile("msr DBGBVR5_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[5]));
423 __asm__
volatile("msr DBGBCR5_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[5]));
425 __asm__
volatile("msr DBGBVR4_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[4]));
426 __asm__
volatile("msr DBGBCR4_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[4]));
428 __asm__
volatile("msr DBGBVR3_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[3]));
429 __asm__
volatile("msr DBGBCR3_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[3]));
431 __asm__
volatile("msr DBGBVR2_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[2]));
432 __asm__
volatile("msr DBGBCR2_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[2]));
434 __asm__
volatile("msr DBGBVR1_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[1]));
435 __asm__
volatile("msr DBGBCR1_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[1]));
437 __asm__
volatile("msr DBGBVR0_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bvr
[0]));
438 __asm__
volatile("msr DBGBCR0_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.bcr
[0]));
443 switch (debug_info
->num_watchpoint_pairs
) {
445 __asm__
volatile("msr DBGWVR15_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[15]));
446 __asm__
volatile("msr DBGWCR15_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[15]));
448 __asm__
volatile("msr DBGWVR14_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[14]));
449 __asm__
volatile("msr DBGWCR14_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[14]));
451 __asm__
volatile("msr DBGWVR13_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[13]));
452 __asm__
volatile("msr DBGWCR13_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[13]));
454 __asm__
volatile("msr DBGWVR12_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[12]));
455 __asm__
volatile("msr DBGWCR12_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[12]));
457 __asm__
volatile("msr DBGWVR11_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[11]));
458 __asm__
volatile("msr DBGWCR11_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[11]));
460 __asm__
volatile("msr DBGWVR10_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[10]));
461 __asm__
volatile("msr DBGWCR10_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[10]));
463 __asm__
volatile("msr DBGWVR9_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[9]));
464 __asm__
volatile("msr DBGWCR9_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[9]));
466 __asm__
volatile("msr DBGWVR8_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[8]));
467 __asm__
volatile("msr DBGWCR8_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[8]));
469 __asm__
volatile("msr DBGWVR7_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[7]));
470 __asm__
volatile("msr DBGWCR7_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[7]));
472 __asm__
volatile("msr DBGWVR6_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[6]));
473 __asm__
volatile("msr DBGWCR6_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[6]));
475 __asm__
volatile("msr DBGWVR5_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[5]));
476 __asm__
volatile("msr DBGWCR5_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[5]));
478 __asm__
volatile("msr DBGWVR4_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[4]));
479 __asm__
volatile("msr DBGWCR4_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[4]));
481 __asm__
volatile("msr DBGWVR3_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[3]));
482 __asm__
volatile("msr DBGWCR3_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[3]));
484 __asm__
volatile("msr DBGWVR2_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[2]));
485 __asm__
volatile("msr DBGWCR2_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[2]));
487 __asm__
volatile("msr DBGWVR1_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[1]));
488 __asm__
volatile("msr DBGWCR1_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[1]));
490 __asm__
volatile("msr DBGWVR0_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wvr
[0]));
491 __asm__
volatile("msr DBGWCR0_EL1, %0" : : "r"((uint64_t)debug_state
->uds
.ds32
.wcr
[0]));
496 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
497 if (0 != debug_state
->uds
.ds32
.bcr
[i
]) {
503 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
504 if (0 != debug_state
->uds
.ds32
.wcr
[i
]) {
511 * Breakpoint/Watchpoint Enable
515 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
516 state
|= 0x8000; // MDSCR_EL1[MDE]
517 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
521 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
523 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
528 * Software debug single step enable
530 if (debug_state
->uds
.ds32
.mdscr_el1
& 0x1) {
532 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
533 state
= (state
& ~0x8000) | 0x1; // ~MDE | SS : no brk/watch while single stepping (which we've set)
534 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
536 set_saved_state_cpsr((current_thread()->machine
.upcb
),
537 get_saved_state_cpsr((current_thread()->machine
.upcb
)) | PSR64_SS
);
541 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
543 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
545 #if SINGLE_STEP_RETIRE_ERRATA
546 // Workaround for radar 20619637
547 __builtin_arm_isb(ISB_SY
);
551 (void) ml_set_interrupts_enabled(intr
);
556 void arm_debug_set64(arm_debug_state_t
*debug_state
)
558 struct cpu_data
*cpu_data_ptr
;
559 arm_debug_info_t
*debug_info
= arm_debug_info();
560 volatile uint64_t state
;
561 boolean_t intr
, set_mde
= 0;
562 arm_debug_state_t off_state
;
565 intr
= ml_set_interrupts_enabled(FALSE
);
566 cpu_data_ptr
= getCpuDatap();
568 // Set current user debug
569 cpu_data_ptr
->cpu_user_debug
= debug_state
;
571 if (NULL
== debug_state
) {
572 bzero(&off_state
, sizeof(off_state
));
573 debug_state
= &off_state
;
576 switch (debug_info
->num_breakpoint_pairs
) {
578 __asm__
volatile("msr DBGBVR15_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[15]));
579 __asm__
volatile("msr DBGBCR15_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[15]));
581 __asm__
volatile("msr DBGBVR14_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[14]));
582 __asm__
volatile("msr DBGBCR14_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[14]));
584 __asm__
volatile("msr DBGBVR13_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[13]));
585 __asm__
volatile("msr DBGBCR13_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[13]));
587 __asm__
volatile("msr DBGBVR12_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[12]));
588 __asm__
volatile("msr DBGBCR12_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[12]));
590 __asm__
volatile("msr DBGBVR11_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[11]));
591 __asm__
volatile("msr DBGBCR11_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[11]));
593 __asm__
volatile("msr DBGBVR10_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[10]));
594 __asm__
volatile("msr DBGBCR10_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[10]));
596 __asm__
volatile("msr DBGBVR9_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[9]));
597 __asm__
volatile("msr DBGBCR9_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[9]));
599 __asm__
volatile("msr DBGBVR8_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[8]));
600 __asm__
volatile("msr DBGBCR8_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[8]));
602 __asm__
volatile("msr DBGBVR7_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[7]));
603 __asm__
volatile("msr DBGBCR7_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[7]));
605 __asm__
volatile("msr DBGBVR6_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[6]));
606 __asm__
volatile("msr DBGBCR6_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[6]));
608 __asm__
volatile("msr DBGBVR5_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[5]));
609 __asm__
volatile("msr DBGBCR5_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[5]));
611 __asm__
volatile("msr DBGBVR4_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[4]));
612 __asm__
volatile("msr DBGBCR4_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[4]));
614 __asm__
volatile("msr DBGBVR3_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[3]));
615 __asm__
volatile("msr DBGBCR3_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[3]));
617 __asm__
volatile("msr DBGBVR2_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[2]));
618 __asm__
volatile("msr DBGBCR2_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[2]));
620 __asm__
volatile("msr DBGBVR1_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[1]));
621 __asm__
volatile("msr DBGBCR1_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[1]));
623 __asm__
volatile("msr DBGBVR0_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bvr
[0]));
624 __asm__
volatile("msr DBGBCR0_EL1, %0" : : "r"(debug_state
->uds
.ds64
.bcr
[0]));
629 switch (debug_info
->num_watchpoint_pairs
) {
631 __asm__
volatile("msr DBGWVR15_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[15]));
632 __asm__
volatile("msr DBGWCR15_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[15]));
634 __asm__
volatile("msr DBGWVR14_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[14]));
635 __asm__
volatile("msr DBGWCR14_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[14]));
637 __asm__
volatile("msr DBGWVR13_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[13]));
638 __asm__
volatile("msr DBGWCR13_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[13]));
640 __asm__
volatile("msr DBGWVR12_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[12]));
641 __asm__
volatile("msr DBGWCR12_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[12]));
643 __asm__
volatile("msr DBGWVR11_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[11]));
644 __asm__
volatile("msr DBGWCR11_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[11]));
646 __asm__
volatile("msr DBGWVR10_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[10]));
647 __asm__
volatile("msr DBGWCR10_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[10]));
649 __asm__
volatile("msr DBGWVR9_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[9]));
650 __asm__
volatile("msr DBGWCR9_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[9]));
652 __asm__
volatile("msr DBGWVR8_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[8]));
653 __asm__
volatile("msr DBGWCR8_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[8]));
655 __asm__
volatile("msr DBGWVR7_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[7]));
656 __asm__
volatile("msr DBGWCR7_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[7]));
658 __asm__
volatile("msr DBGWVR6_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[6]));
659 __asm__
volatile("msr DBGWCR6_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[6]));
661 __asm__
volatile("msr DBGWVR5_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[5]));
662 __asm__
volatile("msr DBGWCR5_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[5]));
664 __asm__
volatile("msr DBGWVR4_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[4]));
665 __asm__
volatile("msr DBGWCR4_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[4]));
667 __asm__
volatile("msr DBGWVR3_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[3]));
668 __asm__
volatile("msr DBGWCR3_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[3]));
670 __asm__
volatile("msr DBGWVR2_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[2]));
671 __asm__
volatile("msr DBGWCR2_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[2]));
673 __asm__
volatile("msr DBGWVR1_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[1]));
674 __asm__
volatile("msr DBGWCR1_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[1]));
676 __asm__
volatile("msr DBGWVR0_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wvr
[0]));
677 __asm__
volatile("msr DBGWCR0_EL1, %0" : : "r"(debug_state
->uds
.ds64
.wcr
[0]));
682 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
683 if (0 != debug_state
->uds
.ds64
.bcr
[i
]) {
689 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
690 if (0 != debug_state
->uds
.ds64
.wcr
[i
]) {
697 * Breakpoint/Watchpoint Enable
701 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
702 state
|= 0x8000; // MDSCR_EL1[MDE]
703 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
708 * Software debug single step enable
710 if (debug_state
->uds
.ds64
.mdscr_el1
& 0x1) {
712 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
713 state
= (state
& ~0x8000) | 0x1; // ~MDE | SS : no brk/watch while single stepping (which we've set)
714 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
716 set_saved_state_cpsr((current_thread()->machine
.upcb
),
717 get_saved_state_cpsr((current_thread()->machine
.upcb
)) | PSR64_SS
);
721 __asm__
volatile("mrs %0, MDSCR_EL1" : "=r"(state
));
723 __asm__
volatile("msr MDSCR_EL1, %0" : : "r"(state
));
725 #if SINGLE_STEP_RETIRE_ERRATA
726 // Workaround for radar 20619637
727 __builtin_arm_isb(ISB_SY
);
731 (void) ml_set_interrupts_enabled(intr
);
736 void arm_debug_set(arm_debug_state_t
*debug_state
)
739 switch (debug_state
->dsh
.flavor
) {
740 case ARM_DEBUG_STATE32
:
741 arm_debug_set32(debug_state
);
743 case ARM_DEBUG_STATE64
:
744 arm_debug_set64(debug_state
);
747 panic("arm_debug_set");
751 if (thread_is_64bit(current_thread()))
752 arm_debug_set64(debug_state
);
754 arm_debug_set32(debug_state
);
758 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
760 debug_legacy_state_is_valid(arm_legacy_debug_state_t
*debug_state
)
762 arm_debug_info_t
*debug_info
= arm_debug_info();
764 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
765 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
769 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
770 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
777 debug_state_is_valid32(arm_debug_state32_t
*debug_state
)
779 arm_debug_info_t
*debug_info
= arm_debug_info();
781 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
782 if (0 != debug_state
->bcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->bvr
[i
])
786 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
787 if (0 != debug_state
->wcr
[i
] && VM_MAX_ADDRESS32
<= debug_state
->wvr
[i
])
794 debug_state_is_valid64(arm_debug_state64_t
*debug_state
)
796 arm_debug_info_t
*debug_info
= arm_debug_info();
798 for (i
= 0; i
< debug_info
->num_breakpoint_pairs
; i
++) {
799 if (0 != debug_state
->bcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->bvr
[i
])
803 for (i
= 0; i
< debug_info
->num_watchpoint_pairs
; i
++) {
804 if (0 != debug_state
->wcr
[i
] && MACH_VM_MAX_ADDRESS
<= debug_state
->wvr
[i
])
811 * Duplicate one arm_debug_state_t to another. "all" parameter
812 * is ignored in the case of ARM -- Is this the right assumption?
815 copy_legacy_debug_state(
816 arm_legacy_debug_state_t
*src
,
817 arm_legacy_debug_state_t
*target
,
818 __unused boolean_t all
)
820 bcopy(src
, target
, sizeof(arm_legacy_debug_state_t
));
825 arm_debug_state32_t
*src
,
826 arm_debug_state32_t
*target
,
827 __unused boolean_t all
)
829 bcopy(src
, target
, sizeof(arm_debug_state32_t
));
834 arm_debug_state64_t
*src
,
835 arm_debug_state64_t
*target
,
836 __unused boolean_t all
)
838 bcopy(src
, target
, sizeof(arm_debug_state64_t
));
842 machine_thread_set_tsd_base(
844 mach_vm_offset_t tsd_base
)
847 if (thread
->task
== kernel_task
) {
848 return KERN_INVALID_ARGUMENT
;
851 if (tsd_base
& MACHDEP_CPUNUM_MASK
) {
852 return KERN_INVALID_ARGUMENT
;
855 if (thread_is_64bit(thread
)) {
856 if (tsd_base
> vm_map_max(thread
->map
))
859 if (tsd_base
> UINT32_MAX
)
863 thread
->machine
.cthread_self
= tsd_base
;
865 /* For current thread, make the TSD base active immediately */
866 if (thread
== current_thread()) {
867 uint64_t cpunum
, tpidrro_el0
;
869 mp_disable_preemption();
870 tpidrro_el0
= get_tpidrro();
871 cpunum
= tpidrro_el0
& (MACHDEP_CPUNUM_MASK
);
872 set_tpidrro(tsd_base
| cpunum
);
873 mp_enable_preemption();