]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90
91 /*
92 * Routine: machine_switch_context
93 *
94 */
95 thread_t
96 machine_switch_context(thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t * cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) \
105 /* kprintf("machine_switch_context: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114
115 new_pmap = new->map->pmap;
116 if (old->map->pmap != new_pmap)
117 pmap_switch(new_pmap);
118
119
120 new->machine.CpuDatap = cpu_data_ptr;
121
122 /* TODO: Should this be ordered? */
123 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
124 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
125
126 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
127
128 retval = Switch_context(old, continuation, new);
129 assert(retval != NULL);
130
131 return retval;
132 }
133
134 boolean_t
135 machine_thread_on_core(thread_t thread)
136 {
137 return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU;
138 }
139
140 /*
141 * Routine: machine_thread_create
142 *
143 */
144 kern_return_t
145 machine_thread_create(thread_t thread,
146 task_t task)
147 {
148 arm_context_t *thread_user_ss = NULL;
149 kern_return_t result = KERN_SUCCESS;
150
151 #define machine_thread_create_kprintf(x...) \
152 /* kprintf("machine_thread_create: " x) */
153
154 machine_thread_create_kprintf("thread = %x\n", thread);
155
156 if (current_thread() != thread) {
157 thread->machine.CpuDatap = (cpu_data_t *)0;
158 }
159 thread->machine.preemption_count = 0;
160 thread->machine.cthread_self = 0;
161 #if defined(HAS_APPLE_PAC)
162 thread->machine.rop_pid = task->rop_pid;
163 thread->machine.disable_user_jop = task->disable_user_jop;
164 #endif
165
166
167 if (task != kernel_task) {
168 /* If this isn't a kernel thread, we'll have userspace state. */
169 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
170
171 if (!thread->machine.contextData) {
172 result = KERN_FAILURE;
173 goto done;
174 }
175
176 thread->machine.upcb = &thread->machine.contextData->ss;
177 thread->machine.uNeon = &thread->machine.contextData->ns;
178
179 if (task_has_64Bit_data(task)) {
180 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
181 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
182 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
183 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
184 } else {
185 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
186 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
187 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
188 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
189 }
190
191 } else {
192 thread->machine.upcb = NULL;
193 thread->machine.uNeon = NULL;
194 thread->machine.contextData = NULL;
195 }
196
197
198 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
199 result = machine_thread_state_initialize(thread);
200
201 done:
202 if (result != KERN_SUCCESS) {
203 thread_user_ss = thread->machine.contextData;
204
205 if (thread_user_ss) {
206 thread->machine.upcb = NULL;
207 thread->machine.uNeon = NULL;
208 thread->machine.contextData = NULL;
209 zfree(user_ss_zone, thread_user_ss);
210 }
211 }
212
213 return result;
214 }
215
216 /*
217 * Routine: machine_thread_destroy
218 *
219 */
220 void
221 machine_thread_destroy(thread_t thread)
222 {
223 arm_context_t *thread_user_ss;
224
225 if (thread->machine.contextData) {
226 /* Disassociate the user save state from the thread before we free it. */
227 thread_user_ss = thread->machine.contextData;
228 thread->machine.upcb = NULL;
229 thread->machine.uNeon = NULL;
230 thread->machine.contextData = NULL;
231
232
233 zfree(user_ss_zone, thread_user_ss);
234 }
235
236 if (thread->machine.DebugData != NULL) {
237 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
238 arm_debug_set(NULL);
239 }
240
241 zfree(ads_zone, thread->machine.DebugData);
242 }
243 }
244
245
246 /*
247 * Routine: machine_thread_init
248 *
249 */
250 void
251 machine_thread_init(void)
252 {
253 ads_zone = zinit(sizeof(arm_debug_state_t),
254 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
255 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
256 "arm debug state");
257
258 /*
259 * Create a zone for the user save state. At the time this zone was created,
260 * the user save state was 848 bytes, and the matching kalloc zone was 1024
261 * bytes, which would result in significant amounts of wasted space if we
262 * simply used kalloc to allocate the user saved state.
263 *
264 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
265 * of wasted space per chunk, which should correspond to 19 allocations.
266 */
267 user_ss_zone = zinit(sizeof(arm_context_t),
268 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
269 USER_SS_ZONE_ALLOC_SIZE,
270 "user save state");
271
272 }
273
274 /*
275 * Routine: machine_thread_template_init
276 *
277 */
278 void
279 machine_thread_template_init(thread_t __unused thr_template)
280 {
281 /* Nothing to do on this platform. */
282 }
283
284 /*
285 * Routine: get_useraddr
286 *
287 */
288 user_addr_t
289 get_useraddr()
290 {
291 return (get_saved_state_pc(current_thread()->machine.upcb));
292 }
293
294 /*
295 * Routine: machine_stack_detach
296 *
297 */
298 vm_offset_t
299 machine_stack_detach(thread_t thread)
300 {
301 vm_offset_t stack;
302
303 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
304 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
305
306 stack = thread->kernel_stack;
307 thread->kernel_stack = 0;
308 thread->machine.kstackptr = 0;
309
310 return (stack);
311 }
312
313
314 /*
315 * Routine: machine_stack_attach
316 *
317 */
318 void
319 machine_stack_attach(thread_t thread,
320 vm_offset_t stack)
321 {
322 struct arm_context *context;
323 struct arm_saved_state64 *savestate;
324 uint32_t current_el;
325
326 #define machine_stack_attach_kprintf(x...) \
327 /* kprintf("machine_stack_attach: " x) */
328
329 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
330 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
331
332 thread->kernel_stack = stack;
333 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
334 thread_initialize_kernel_state(thread);
335
336 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
337
338 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
339 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
340 savestate = saved_state64(&context->ss);
341 savestate->fp = 0;
342 savestate->sp = thread->machine.kstackptr;
343 #if defined(HAS_APPLE_PAC)
344 /* Sign the initial kernel stack saved state */
345 const uint32_t default_cpsr = PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK;
346 boolean_t intr = ml_set_interrupts_enabled(FALSE);
347 asm volatile (
348 "mov x0, %[ss]" "\n"
349
350 "mov x1, xzr" "\n"
351 "str x1, [x0, %[SS64_PC]]" "\n"
352
353 "mov x2, %[default_cpsr_lo]" "\n"
354 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
355 "mrs x3, CurrentEL" "\n"
356 "orr w2, w2, w3" "\n"
357 "str w2, [x0, %[SS64_CPSR]]" "\n"
358
359 "adrp x3, _thread_continue@page" "\n"
360 "add x3, x3, _thread_continue@pageoff" "\n"
361 "str x3, [x0, %[SS64_LR]]" "\n"
362
363 "mov x4, xzr" "\n"
364 "mov x5, xzr" "\n"
365 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
366
367 "mov x6, lr" "\n"
368 "bl _ml_sign_thread_state" "\n"
369 "mov lr, x6" "\n"
370 :
371 : [ss] "r"(&context->ss),
372 [default_cpsr_lo] "M"(default_cpsr & 0xFFFF),
373 [default_cpsr_hi] "M"(default_cpsr >> 16),
374 [SS64_X16] "i"(offsetof(struct arm_saved_state, ss_64.x[16])),
375 [SS64_PC] "i"(offsetof(struct arm_saved_state, ss_64.pc)),
376 [SS64_CPSR] "i"(offsetof(struct arm_saved_state, ss_64.cpsr)),
377 [SS64_LR] "i"(offsetof(struct arm_saved_state, ss_64.lr))
378 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
379 );
380 ml_set_interrupts_enabled(intr);
381 #else
382 savestate->lr = (uintptr_t)thread_continue;
383 savestate->cpsr = (PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK) | current_el;
384 #endif /* defined(HAS_APPLE_PAC) */
385 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
386 }
387
388
389 /*
390 * Routine: machine_stack_handoff
391 *
392 */
393 void
394 machine_stack_handoff(thread_t old,
395 thread_t new)
396 {
397 vm_offset_t stack;
398 pmap_t new_pmap;
399 cpu_data_t * cpu_data_ptr;
400
401 kpc_off_cpu(old);
402
403 stack = machine_stack_detach(old);
404 cpu_data_ptr = getCpuDatap();
405 new->kernel_stack = stack;
406 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
407 if (stack == old->reserved_stack) {
408 assert(new->reserved_stack);
409 old->reserved_stack = new->reserved_stack;
410 new->reserved_stack = stack;
411 }
412
413
414
415 new_pmap = new->map->pmap;
416 if (old->map->pmap != new_pmap)
417 pmap_switch(new_pmap);
418
419
420 new->machine.CpuDatap = cpu_data_ptr;
421
422 /* TODO: Should this be ordered? */
423 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
424 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
425
426 machine_set_current_thread(new);
427 thread_initialize_kernel_state(new);
428
429 return;
430 }
431
432
433 /*
434 * Routine: call_continuation
435 *
436 */
437 void
438 call_continuation(thread_continue_t continuation,
439 void *parameter,
440 wait_result_t wresult,
441 boolean_t enable_interrupts)
442 {
443 #define call_continuation_kprintf(x...) \
444 /* kprintf("call_continuation_kprintf:" x) */
445
446 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
447 Call_continuation(continuation, parameter, wresult, enable_interrupts);
448 }
449
450 #define SET_DBGBCRn(n, value, accum) \
451 __asm__ volatile( \
452 "msr DBGBCR" #n "_EL1, %[val]\n" \
453 "orr %[result], %[result], %[val]\n" \
454 : [result] "+r"(accum) : [val] "r"((value)))
455
456 #define SET_DBGBVRn(n, value) \
457 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
458
459 #define SET_DBGWCRn(n, value, accum) \
460 __asm__ volatile( \
461 "msr DBGWCR" #n "_EL1, %[val]\n" \
462 "orr %[result], %[result], %[val]\n" \
463 : [result] "+r"(accum) : [val] "r"((value)))
464
465 #define SET_DBGWVRn(n, value) \
466 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
467
468 void arm_debug_set32(arm_debug_state_t *debug_state)
469 {
470 struct cpu_data * cpu_data_ptr;
471 arm_debug_info_t * debug_info = arm_debug_info();
472 boolean_t intr, set_mde = 0;
473 arm_debug_state_t off_state;
474 uint32_t i;
475 uint64_t all_ctrls = 0;
476
477 intr = ml_set_interrupts_enabled(FALSE);
478 cpu_data_ptr = getCpuDatap();
479
480 // Set current user debug
481 cpu_data_ptr->cpu_user_debug = debug_state;
482
483 if (NULL == debug_state) {
484 bzero(&off_state, sizeof(off_state));
485 debug_state = &off_state;
486 }
487
488 switch (debug_info->num_breakpoint_pairs) {
489 case 16:
490 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
491 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
492 case 15:
493 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
494 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
495 case 14:
496 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
497 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
498 case 13:
499 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
500 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
501 case 12:
502 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
503 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
504 case 11:
505 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
506 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
507 case 10:
508 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
509 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
510 case 9:
511 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
512 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
513 case 8:
514 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
515 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
516 case 7:
517 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
518 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
519 case 6:
520 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
521 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
522 case 5:
523 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
524 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
525 case 4:
526 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
527 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
528 case 3:
529 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
530 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
531 case 2:
532 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
533 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
534 case 1:
535 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
536 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
537 default:
538 break;
539 }
540
541 switch (debug_info->num_watchpoint_pairs) {
542 case 16:
543 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
544 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
545 case 15:
546 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
547 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
548 case 14:
549 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
550 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
551 case 13:
552 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
553 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
554 case 12:
555 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
556 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
557 case 11:
558 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
559 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
560 case 10:
561 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
562 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
563 case 9:
564 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
565 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
566 case 8:
567 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
568 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
569 case 7:
570 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
571 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
572 case 6:
573 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
574 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
575 case 5:
576 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
577 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
578 case 4:
579 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
580 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
581 case 3:
582 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
583 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
584 case 2:
585 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
586 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
587 case 1:
588 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
589 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
590 default:
591 break;
592 }
593
594 #if defined(CONFIG_KERNEL_INTEGRITY)
595 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
596 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
597 }
598 #endif
599
600 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
601 if (0 != debug_state->uds.ds32.bcr[i]) {
602 set_mde = 1;
603 break;
604 }
605 }
606
607 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
608 if (0 != debug_state->uds.ds32.wcr[i]) {
609 set_mde = 1;
610 break;
611 }
612 }
613
614 /*
615 * Breakpoint/Watchpoint Enable
616 */
617 if (set_mde) {
618 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
619 } else {
620 update_mdscr(0x8000, 0);
621 }
622
623 /*
624 * Software debug single step enable
625 */
626 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
627 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
628
629 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
630 } else {
631
632 update_mdscr(0x1, 0);
633
634 #if SINGLE_STEP_RETIRE_ERRATA
635 // Workaround for radar 20619637
636 __builtin_arm_isb(ISB_SY);
637 #endif
638 }
639
640 (void) ml_set_interrupts_enabled(intr);
641
642 return;
643 }
644
645 void arm_debug_set64(arm_debug_state_t *debug_state)
646 {
647 struct cpu_data * cpu_data_ptr;
648 arm_debug_info_t * debug_info = arm_debug_info();
649 boolean_t intr, set_mde = 0;
650 arm_debug_state_t off_state;
651 uint32_t i;
652 uint64_t all_ctrls = 0;
653
654 intr = ml_set_interrupts_enabled(FALSE);
655 cpu_data_ptr = getCpuDatap();
656
657 // Set current user debug
658 cpu_data_ptr->cpu_user_debug = debug_state;
659
660 if (NULL == debug_state) {
661 bzero(&off_state, sizeof(off_state));
662 debug_state = &off_state;
663 }
664
665 switch (debug_info->num_breakpoint_pairs) {
666 case 16:
667 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
668 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
669 case 15:
670 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
671 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
672 case 14:
673 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
674 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
675 case 13:
676 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
677 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
678 case 12:
679 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
680 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
681 case 11:
682 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
683 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
684 case 10:
685 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
686 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
687 case 9:
688 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
689 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
690 case 8:
691 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
692 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
693 case 7:
694 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
695 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
696 case 6:
697 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
698 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
699 case 5:
700 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
701 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
702 case 4:
703 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
704 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
705 case 3:
706 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
707 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
708 case 2:
709 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
710 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
711 case 1:
712 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
713 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
714 default:
715 break;
716 }
717
718 switch (debug_info->num_watchpoint_pairs) {
719 case 16:
720 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
721 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
722 case 15:
723 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
724 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
725 case 14:
726 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
727 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
728 case 13:
729 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
730 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
731 case 12:
732 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
733 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
734 case 11:
735 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
736 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
737 case 10:
738 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
739 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
740 case 9:
741 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
742 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
743 case 8:
744 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
745 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
746 case 7:
747 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
748 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
749 case 6:
750 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
751 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
752 case 5:
753 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
754 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
755 case 4:
756 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
757 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
758 case 3:
759 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
760 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
761 case 2:
762 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
763 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
764 case 1:
765 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
766 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
767 default:
768 break;
769 }
770
771 #if defined(CONFIG_KERNEL_INTEGRITY)
772 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
773 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
774 }
775 #endif
776
777 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
778 if (0 != debug_state->uds.ds64.bcr[i]) {
779 set_mde = 1;
780 break;
781 }
782 }
783
784 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
785 if (0 != debug_state->uds.ds64.wcr[i]) {
786 set_mde = 1;
787 break;
788 }
789 }
790
791 /*
792 * Breakpoint/Watchpoint Enable
793 */
794 if (set_mde) {
795 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
796 }
797
798 /*
799 * Software debug single step enable
800 */
801 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
802
803 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
804
805 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
806 } else {
807
808 update_mdscr(0x1, 0);
809
810 #if SINGLE_STEP_RETIRE_ERRATA
811 // Workaround for radar 20619637
812 __builtin_arm_isb(ISB_SY);
813 #endif
814 }
815
816 (void) ml_set_interrupts_enabled(intr);
817
818 return;
819 }
820
821 void arm_debug_set(arm_debug_state_t *debug_state)
822 {
823 if (debug_state) {
824 switch (debug_state->dsh.flavor) {
825 case ARM_DEBUG_STATE32:
826 arm_debug_set32(debug_state);
827 break;
828 case ARM_DEBUG_STATE64:
829 arm_debug_set64(debug_state);
830 break;
831 default:
832 panic("arm_debug_set");
833 break;
834 }
835 } else {
836 if (thread_is_64bit_data(current_thread()))
837 arm_debug_set64(debug_state);
838 else
839 arm_debug_set32(debug_state);
840 }
841 }
842
843 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
844 boolean_t
845 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
846 {
847 arm_debug_info_t *debug_info = arm_debug_info();
848 uint32_t i;
849 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
850 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
851 return FALSE;
852 }
853
854 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
855 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
856 return FALSE;
857 }
858 return TRUE;
859 }
860
861 boolean_t
862 debug_state_is_valid32(arm_debug_state32_t *debug_state)
863 {
864 arm_debug_info_t *debug_info = arm_debug_info();
865 uint32_t i;
866 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
867 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
868 return FALSE;
869 }
870
871 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
872 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
873 return FALSE;
874 }
875 return TRUE;
876 }
877
878 boolean_t
879 debug_state_is_valid64(arm_debug_state64_t *debug_state)
880 {
881 arm_debug_info_t *debug_info = arm_debug_info();
882 uint32_t i;
883 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
884 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
885 return FALSE;
886 }
887
888 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
889 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
890 return FALSE;
891 }
892 return TRUE;
893 }
894
895 /*
896 * Duplicate one arm_debug_state_t to another. "all" parameter
897 * is ignored in the case of ARM -- Is this the right assumption?
898 */
899 void
900 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
901 arm_legacy_debug_state_t * target,
902 __unused boolean_t all)
903 {
904 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
905 }
906
907 void
908 copy_debug_state32(arm_debug_state32_t * src,
909 arm_debug_state32_t * target,
910 __unused boolean_t all)
911 {
912 bcopy(src, target, sizeof(arm_debug_state32_t));
913 }
914
915 void
916 copy_debug_state64(arm_debug_state64_t * src,
917 arm_debug_state64_t * target,
918 __unused boolean_t all)
919 {
920 bcopy(src, target, sizeof(arm_debug_state64_t));
921 }
922
923 kern_return_t
924 machine_thread_set_tsd_base(thread_t thread,
925 mach_vm_offset_t tsd_base)
926 {
927 if (thread->task == kernel_task) {
928 return KERN_INVALID_ARGUMENT;
929 }
930
931 if (tsd_base & MACHDEP_CPUNUM_MASK) {
932 return KERN_INVALID_ARGUMENT;
933 }
934
935 if (thread_is_64bit_addr(thread)) {
936 if (tsd_base > vm_map_max(thread->map))
937 tsd_base = 0ULL;
938 } else {
939 if (tsd_base > UINT32_MAX)
940 tsd_base = 0ULL;
941 }
942
943 thread->machine.cthread_self = tsd_base;
944
945 /* For current thread, make the TSD base active immediately */
946 if (thread == current_thread()) {
947 uint64_t cpunum, tpidrro_el0;
948
949 mp_disable_preemption();
950 tpidrro_el0 = get_tpidrro();
951 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
952 set_tpidrro(tsd_base | cpunum);
953 mp_enable_preemption();
954
955 }
956
957 return KERN_SUCCESS;
958 }
959
960 void
961 machine_tecs(__unused thread_t thr)
962 {
963 }
964
965 int
966 machine_csv(__unused cpuvn_e cve)
967 {
968 return 0;
969 }