]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90 /*
91 * Routine: machine_switch_context
92 *
93 */
94 thread_t
95 machine_switch_context(
96 thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t *cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
105 * text: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114 new_pmap = new->map->pmap;
115 if (old->map->pmap != new_pmap)
116 pmap_switch(new_pmap);
117
118 new->machine.CpuDatap = cpu_data_ptr;
119
120 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
121
122 retval = Switch_context(old, continuation, new);
123 assert(retval != NULL);
124
125 return retval;
126 }
127
128 /*
129 * Routine: machine_thread_create
130 *
131 */
132 kern_return_t
133 machine_thread_create(
134 thread_t thread,
135 task_t task)
136 {
137 arm_context_t *thread_user_ss = NULL;
138 kern_return_t result = KERN_SUCCESS;
139
140 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
141
142 machine_thread_create_kprintf("thread = %x\n", thread);
143
144 if (current_thread() != thread) {
145 thread->machine.CpuDatap = (cpu_data_t *)0;
146 }
147 thread->machine.preemption_count = 0;
148 thread->machine.cthread_self = 0;
149 thread->machine.cthread_data = 0;
150
151
152 if (task != kernel_task) {
153 /* If this isn't a kernel thread, we'll have userspace state. */
154 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
155
156 if (!thread->machine.contextData) {
157 return KERN_FAILURE;
158 }
159
160 thread->machine.upcb = &thread->machine.contextData->ss;
161 thread->machine.uNeon = &thread->machine.contextData->ns;
162
163 if (task_has_64BitAddr(task)) {
164 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
165 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
166 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
167 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
168 } else {
169 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
170 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
171 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
172 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
173 }
174 } else {
175 thread->machine.upcb = NULL;
176 thread->machine.uNeon = NULL;
177 thread->machine.contextData = NULL;
178 }
179
180 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
181
182 result = machine_thread_state_initialize(thread);
183
184 if (result != KERN_SUCCESS) {
185 thread_user_ss = thread->machine.contextData;
186 thread->machine.upcb = NULL;
187 thread->machine.uNeon = NULL;
188 thread->machine.contextData = NULL;
189 zfree(user_ss_zone, thread_user_ss);
190 }
191
192 return result;
193 }
194
195 /*
196 * Routine: machine_thread_destroy
197 *
198 */
199 void
200 machine_thread_destroy(
201 thread_t thread)
202 {
203 arm_context_t *thread_user_ss;
204
205 if (thread->machine.contextData) {
206 /* Disassociate the user save state from the thread before we free it. */
207 thread_user_ss = thread->machine.contextData;
208 thread->machine.upcb = NULL;
209 thread->machine.uNeon = NULL;
210 thread->machine.contextData = NULL;
211 zfree(user_ss_zone, thread_user_ss);
212 }
213
214 if (thread->machine.DebugData != NULL) {
215 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
216 arm_debug_set(NULL);
217 }
218
219 zfree(ads_zone, thread->machine.DebugData);
220 }
221 }
222
223
224 /*
225 * Routine: machine_thread_init
226 *
227 */
228 void
229 machine_thread_init(void)
230 {
231 ads_zone = zinit(sizeof(arm_debug_state_t),
232 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
233 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
234 "arm debug state");
235
236 /*
237 * Create a zone for the user save state. At the time this zone was created,
238 * the user save state was 848 bytes, and the matching kalloc zone was 1024
239 * bytes, which would result in significant amounts of wasted space if we
240 * simply used kalloc to allocate the user saved state.
241 *
242 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
243 * of wasted space per chunk, which should correspond to 19 allocations.
244 */
245 user_ss_zone = zinit(sizeof(arm_context_t),
246 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
247 USER_SS_ZONE_ALLOC_SIZE,
248 "user save state");
249 }
250
251
252 /*
253 * Routine: get_useraddr
254 *
255 */
256 user_addr_t
257 get_useraddr()
258 {
259 return (get_saved_state_pc(current_thread()->machine.upcb));
260 }
261
262 /*
263 * Routine: machine_stack_detach
264 *
265 */
266 vm_offset_t
267 machine_stack_detach(
268 thread_t thread)
269 {
270 vm_offset_t stack;
271
272 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
273 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
274
275 stack = thread->kernel_stack;
276 thread->kernel_stack = 0;
277 thread->machine.kstackptr = 0;
278
279 return (stack);
280 }
281
282
283 /*
284 * Routine: machine_stack_attach
285 *
286 */
287 void
288 machine_stack_attach(
289 thread_t thread,
290 vm_offset_t stack)
291 {
292 struct arm_context *context;
293 struct arm_saved_state64 *savestate;
294
295 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
296
297 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
298 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
299
300 thread->kernel_stack = stack;
301 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
302 thread_initialize_kernel_state(thread);
303
304 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
305
306 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
307 savestate = saved_state64(&context->ss);
308 savestate->fp = 0;
309 savestate->lr = (uintptr_t)thread_continue;
310 savestate->sp = thread->machine.kstackptr;
311 savestate->cpsr = PSR64_KERNEL_DEFAULT;
312 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
313 }
314
315
316 /*
317 * Routine: machine_stack_handoff
318 *
319 */
320 void
321 machine_stack_handoff(
322 thread_t old,
323 thread_t new)
324 {
325 vm_offset_t stack;
326 pmap_t new_pmap;
327 cpu_data_t *cpu_data_ptr;
328
329 kpc_off_cpu(old);
330
331 stack = machine_stack_detach(old);
332 cpu_data_ptr = getCpuDatap();
333 new->kernel_stack = stack;
334 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
335 if (stack == old->reserved_stack) {
336 assert(new->reserved_stack);
337 old->reserved_stack = new->reserved_stack;
338 new->reserved_stack = stack;
339 }
340
341
342 new_pmap = new->map->pmap;
343 if (old->map->pmap != new_pmap)
344 pmap_switch(new_pmap);
345
346 new->machine.CpuDatap = cpu_data_ptr;
347 machine_set_current_thread(new);
348 thread_initialize_kernel_state(new);
349
350 return;
351 }
352
353
354 /*
355 * Routine: call_continuation
356 *
357 */
358 void
359 call_continuation(
360 thread_continue_t continuation,
361 void *parameter,
362 wait_result_t wresult)
363 {
364 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:" x) */
365
366 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
367 Call_continuation(continuation, parameter, wresult, current_thread()->machine.kstackptr);
368 }
369
370 /* Setting breakpoints in EL1 is effectively a KTRR bypass. The ability to do so is
371 * controlled by MDSCR.KDE. The MSR to set MDSCR must be present to allow
372 * self-hosted user mode debug. Any checks before the MRS can be skipped with ROP,
373 * so we need to put the checks after the MRS where they can't be skipped. That
374 * still leaves a small window if a breakpoint is set on the instruction
375 * immediately after the MRS. To handle that, we also do a check and then set of
376 * the breakpoint control registers. This allows us to guarantee that a given
377 * core will never have both KDE set and a breakpoint targeting EL1.
378 *
379 * If KDE gets set, unset it and then panic */
380 static void
381 update_mdscr(uint64_t clear, uint64_t set)
382 {
383 uint64_t result = 0;
384 uint64_t tmp1, tmp2;
385 __asm__ volatile(
386 "mrs %[reg], MDSCR_EL1\n"
387 "bic %[reg], %[reg], %[clear]\n"
388 "orr %[reg], %[reg], %[set]\n"
389 "1:\n"
390 "bic %[reg], %[reg], #0x2000\n"
391 "msr MDSCR_EL1, %[reg]\n"
392 #if defined(CONFIG_KERNEL_INTEGRITY)
393 /* verify KDE didn't get set (including via ROP)
394 * If set, clear it and then panic */
395 "ands %[tmp], %[reg], #0x2000\n"
396 "orr %[res], %[res], %[tmp]\n"
397 "bne 1b\n"
398 #endif
399 : [res] "+r" (result), [tmp] "=r" (tmp1), [reg] "=r" (tmp2)
400 : [clear] "r" (clear), [set] "r" (set) : "x0");
401 #if defined(CONFIG_KERNEL_INTEGRITY)
402 if (result)
403 panic("MDSCR.KDE was set: %llx %llx %llx", tmp1, tmp2, result);
404 #endif
405 }
406
407 #define SET_DBGBCRn(n, value, accum) \
408 __asm__ volatile( \
409 "msr DBGBCR" #n "_EL1, %[val]\n" \
410 "orr %[result], %[result], %[val]\n" \
411 : [result] "+r"(accum) : [val] "r"((value)))
412
413 #define SET_DBGBVRn(n, value) \
414 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
415
416 #define SET_DBGWCRn(n, value, accum) \
417 __asm__ volatile( \
418 "msr DBGWCR" #n "_EL1, %[val]\n" \
419 "orr %[result], %[result], %[val]\n" \
420 : [result] "+r"(accum) : [val] "r"((value)))
421
422 #define SET_DBGWVRn(n, value) \
423 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
424
425 void arm_debug_set32(arm_debug_state_t *debug_state)
426 {
427 struct cpu_data *cpu_data_ptr;
428 arm_debug_info_t *debug_info = arm_debug_info();
429 boolean_t intr, set_mde = 0;
430 arm_debug_state_t off_state;
431 uint32_t i;
432 uint64_t all_ctrls = 0;
433
434 intr = ml_set_interrupts_enabled(FALSE);
435 cpu_data_ptr = getCpuDatap();
436
437 // Set current user debug
438 cpu_data_ptr->cpu_user_debug = debug_state;
439
440 if (NULL == debug_state) {
441 bzero(&off_state, sizeof(off_state));
442 debug_state = &off_state;
443 }
444
445 switch (debug_info->num_breakpoint_pairs) {
446 case 16:
447 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
448 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
449 case 15:
450 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
451 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
452 case 14:
453 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
454 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
455 case 13:
456 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
457 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
458 case 12:
459 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
460 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
461 case 11:
462 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
463 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
464 case 10:
465 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
466 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
467 case 9:
468 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
469 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
470 case 8:
471 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
472 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
473 case 7:
474 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
475 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
476 case 6:
477 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
478 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
479 case 5:
480 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
481 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
482 case 4:
483 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
484 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
485 case 3:
486 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
487 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
488 case 2:
489 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
490 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
491 case 1:
492 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
493 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
494 default:
495 break;
496 }
497
498 switch (debug_info->num_watchpoint_pairs) {
499 case 16:
500 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
501 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
502 case 15:
503 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
504 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
505 case 14:
506 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
507 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
508 case 13:
509 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
510 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
511 case 12:
512 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
513 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
514 case 11:
515 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
516 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
517 case 10:
518 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
519 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
520 case 9:
521 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
522 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
523 case 8:
524 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
525 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
526 case 7:
527 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
528 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
529 case 6:
530 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
531 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
532 case 5:
533 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
534 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
535 case 4:
536 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
537 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
538 case 3:
539 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
540 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
541 case 2:
542 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
543 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
544 case 1:
545 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
546 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
547 default:
548 break;
549 }
550
551 #if defined(CONFIG_KERNEL_INTEGRITY)
552 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
553 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
554 }
555 #endif
556
557 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
558 if (0 != debug_state->uds.ds32.bcr[i]) {
559 set_mde = 1;
560 break;
561 }
562 }
563
564 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
565 if (0 != debug_state->uds.ds32.wcr[i]) {
566 set_mde = 1;
567 break;
568 }
569 }
570
571 /*
572 * Breakpoint/Watchpoint Enable
573 */
574 if (set_mde) {
575 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
576 } else {
577 update_mdscr(0x8000, 0);
578 }
579
580 /*
581 * Software debug single step enable
582 */
583 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
584 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
585
586 set_saved_state_cpsr((current_thread()->machine.upcb),
587 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
588
589 } else {
590
591 update_mdscr(0x1, 0);
592
593 #if SINGLE_STEP_RETIRE_ERRATA
594 // Workaround for radar 20619637
595 __builtin_arm_isb(ISB_SY);
596 #endif
597 }
598
599 (void) ml_set_interrupts_enabled(intr);
600
601 return;
602 }
603
604 void arm_debug_set64(arm_debug_state_t *debug_state)
605 {
606 struct cpu_data *cpu_data_ptr;
607 arm_debug_info_t *debug_info = arm_debug_info();
608 boolean_t intr, set_mde = 0;
609 arm_debug_state_t off_state;
610 uint32_t i;
611 uint64_t all_ctrls = 0;
612
613 intr = ml_set_interrupts_enabled(FALSE);
614 cpu_data_ptr = getCpuDatap();
615
616 // Set current user debug
617 cpu_data_ptr->cpu_user_debug = debug_state;
618
619 if (NULL == debug_state) {
620 bzero(&off_state, sizeof(off_state));
621 debug_state = &off_state;
622 }
623
624 switch (debug_info->num_breakpoint_pairs) {
625 case 16:
626 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
627 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
628 case 15:
629 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
630 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
631 case 14:
632 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
633 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
634 case 13:
635 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
636 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
637 case 12:
638 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
639 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
640 case 11:
641 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
642 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
643 case 10:
644 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
645 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
646 case 9:
647 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
648 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
649 case 8:
650 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
651 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
652 case 7:
653 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
654 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
655 case 6:
656 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
657 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
658 case 5:
659 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
660 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
661 case 4:
662 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
663 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
664 case 3:
665 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
666 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
667 case 2:
668 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
669 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
670 case 1:
671 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
672 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
673 default:
674 break;
675 }
676
677 switch (debug_info->num_watchpoint_pairs) {
678 case 16:
679 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
680 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
681 case 15:
682 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
683 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
684 case 14:
685 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
686 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
687 case 13:
688 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
689 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
690 case 12:
691 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
692 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
693 case 11:
694 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
695 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
696 case 10:
697 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
698 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
699 case 9:
700 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
701 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
702 case 8:
703 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
704 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
705 case 7:
706 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
707 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
708 case 6:
709 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
710 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
711 case 5:
712 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
713 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
714 case 4:
715 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
716 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
717 case 3:
718 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
719 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
720 case 2:
721 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
722 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
723 case 1:
724 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
725 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
726 default:
727 break;
728 }
729
730 #if defined(CONFIG_KERNEL_INTEGRITY)
731 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
732 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
733 }
734 #endif
735
736 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
737 if (0 != debug_state->uds.ds64.bcr[i]) {
738 set_mde = 1;
739 break;
740 }
741 }
742
743 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
744 if (0 != debug_state->uds.ds64.wcr[i]) {
745 set_mde = 1;
746 break;
747 }
748 }
749
750 /*
751 * Breakpoint/Watchpoint Enable
752 */
753 if (set_mde) {
754 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
755 }
756
757 /*
758 * Software debug single step enable
759 */
760 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
761
762 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
763
764 set_saved_state_cpsr((current_thread()->machine.upcb),
765 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
766
767 } else {
768
769 update_mdscr(0x1, 0);
770
771 #if SINGLE_STEP_RETIRE_ERRATA
772 // Workaround for radar 20619637
773 __builtin_arm_isb(ISB_SY);
774 #endif
775 }
776
777 (void) ml_set_interrupts_enabled(intr);
778
779 return;
780 }
781
782 void arm_debug_set(arm_debug_state_t *debug_state)
783 {
784 if (debug_state) {
785 switch (debug_state->dsh.flavor) {
786 case ARM_DEBUG_STATE32:
787 arm_debug_set32(debug_state);
788 break;
789 case ARM_DEBUG_STATE64:
790 arm_debug_set64(debug_state);
791 break;
792 default:
793 panic("arm_debug_set");
794 break;
795 }
796 } else {
797 if (thread_is_64bit(current_thread()))
798 arm_debug_set64(debug_state);
799 else
800 arm_debug_set32(debug_state);
801 }
802 }
803
804 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
805 boolean_t
806 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
807 {
808 arm_debug_info_t *debug_info = arm_debug_info();
809 uint32_t i;
810 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
811 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
812 return FALSE;
813 }
814
815 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
816 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
817 return FALSE;
818 }
819 return TRUE;
820 }
821
822 boolean_t
823 debug_state_is_valid32(arm_debug_state32_t *debug_state)
824 {
825 arm_debug_info_t *debug_info = arm_debug_info();
826 uint32_t i;
827 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
828 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
829 return FALSE;
830 }
831
832 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
833 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
834 return FALSE;
835 }
836 return TRUE;
837 }
838
839 boolean_t
840 debug_state_is_valid64(arm_debug_state64_t *debug_state)
841 {
842 arm_debug_info_t *debug_info = arm_debug_info();
843 uint32_t i;
844 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
845 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
846 return FALSE;
847 }
848
849 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
850 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
851 return FALSE;
852 }
853 return TRUE;
854 }
855
856 /*
857 * Duplicate one arm_debug_state_t to another. "all" parameter
858 * is ignored in the case of ARM -- Is this the right assumption?
859 */
860 void
861 copy_legacy_debug_state(
862 arm_legacy_debug_state_t *src,
863 arm_legacy_debug_state_t *target,
864 __unused boolean_t all)
865 {
866 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
867 }
868
869 void
870 copy_debug_state32(
871 arm_debug_state32_t *src,
872 arm_debug_state32_t *target,
873 __unused boolean_t all)
874 {
875 bcopy(src, target, sizeof(arm_debug_state32_t));
876 }
877
878 void
879 copy_debug_state64(
880 arm_debug_state64_t *src,
881 arm_debug_state64_t *target,
882 __unused boolean_t all)
883 {
884 bcopy(src, target, sizeof(arm_debug_state64_t));
885 }
886
887 kern_return_t
888 machine_thread_set_tsd_base(
889 thread_t thread,
890 mach_vm_offset_t tsd_base)
891 {
892
893 if (thread->task == kernel_task) {
894 return KERN_INVALID_ARGUMENT;
895 }
896
897 if (tsd_base & MACHDEP_CPUNUM_MASK) {
898 return KERN_INVALID_ARGUMENT;
899 }
900
901 if (thread_is_64bit(thread)) {
902 if (tsd_base > vm_map_max(thread->map))
903 tsd_base = 0ULL;
904 } else {
905 if (tsd_base > UINT32_MAX)
906 tsd_base = 0ULL;
907 }
908
909 thread->machine.cthread_self = tsd_base;
910
911 /* For current thread, make the TSD base active immediately */
912 if (thread == current_thread()) {
913 uint64_t cpunum, tpidrro_el0;
914
915 mp_disable_preemption();
916 tpidrro_el0 = get_tpidrro();
917 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
918 set_tpidrro(tsd_base | cpunum);
919 mp_enable_preemption();
920
921 }
922
923 return KERN_SUCCESS;
924 }