]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
4303f45fe81c5967db4fadb0327b8d38416aebb4
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90
91 /*
92 * Routine: machine_switch_context
93 *
94 */
95 thread_t
96 machine_switch_context(thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t * cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) \
105 /* kprintf("machine_switch_context: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114
115 new_pmap = new->map->pmap;
116 if (old->map->pmap != new_pmap)
117 pmap_switch(new_pmap);
118
119
120 new->machine.CpuDatap = cpu_data_ptr;
121
122 /* TODO: Should this be ordered? */
123 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
124 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
125
126 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
127
128 retval = Switch_context(old, continuation, new);
129 assert(retval != NULL);
130
131 return retval;
132 }
133
134 boolean_t
135 machine_thread_on_core(thread_t thread)
136 {
137 return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU;
138 }
139
140 /*
141 * Routine: machine_thread_create
142 *
143 */
144 kern_return_t
145 machine_thread_create(thread_t thread,
146 task_t task)
147 {
148 arm_context_t *thread_user_ss = NULL;
149 kern_return_t result = KERN_SUCCESS;
150
151 #define machine_thread_create_kprintf(x...) \
152 /* kprintf("machine_thread_create: " x) */
153
154 machine_thread_create_kprintf("thread = %x\n", thread);
155
156 if (current_thread() != thread) {
157 thread->machine.CpuDatap = (cpu_data_t *)0;
158 }
159 thread->machine.preemption_count = 0;
160 thread->machine.cthread_self = 0;
161 thread->machine.cthread_data = 0;
162 #if defined(HAS_APPLE_PAC)
163 thread->machine.rop_pid = task->rop_pid;
164 thread->machine.disable_user_jop = task->disable_user_jop;
165 #endif
166
167
168 if (task != kernel_task) {
169 /* If this isn't a kernel thread, we'll have userspace state. */
170 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
171
172 if (!thread->machine.contextData) {
173 result = KERN_FAILURE;
174 goto done;
175 }
176
177 thread->machine.upcb = &thread->machine.contextData->ss;
178 thread->machine.uNeon = &thread->machine.contextData->ns;
179
180 if (task_has_64Bit_data(task)) {
181 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
182 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
183 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
184 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
185 } else {
186 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
187 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
188 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
189 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
190 }
191
192 } else {
193 thread->machine.upcb = NULL;
194 thread->machine.uNeon = NULL;
195 thread->machine.contextData = NULL;
196 }
197
198
199 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
200 result = machine_thread_state_initialize(thread);
201
202 done:
203 if (result != KERN_SUCCESS) {
204 thread_user_ss = thread->machine.contextData;
205
206 if (thread_user_ss) {
207 thread->machine.upcb = NULL;
208 thread->machine.uNeon = NULL;
209 thread->machine.contextData = NULL;
210 zfree(user_ss_zone, thread_user_ss);
211 }
212 }
213
214 return result;
215 }
216
217 /*
218 * Routine: machine_thread_destroy
219 *
220 */
221 void
222 machine_thread_destroy(thread_t thread)
223 {
224 arm_context_t *thread_user_ss;
225
226 if (thread->machine.contextData) {
227 /* Disassociate the user save state from the thread before we free it. */
228 thread_user_ss = thread->machine.contextData;
229 thread->machine.upcb = NULL;
230 thread->machine.uNeon = NULL;
231 thread->machine.contextData = NULL;
232
233
234 zfree(user_ss_zone, thread_user_ss);
235 }
236
237 if (thread->machine.DebugData != NULL) {
238 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
239 arm_debug_set(NULL);
240 }
241
242 zfree(ads_zone, thread->machine.DebugData);
243 }
244 }
245
246
247 /*
248 * Routine: machine_thread_init
249 *
250 */
251 void
252 machine_thread_init(void)
253 {
254 ads_zone = zinit(sizeof(arm_debug_state_t),
255 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
256 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
257 "arm debug state");
258
259 /*
260 * Create a zone for the user save state. At the time this zone was created,
261 * the user save state was 848 bytes, and the matching kalloc zone was 1024
262 * bytes, which would result in significant amounts of wasted space if we
263 * simply used kalloc to allocate the user saved state.
264 *
265 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
266 * of wasted space per chunk, which should correspond to 19 allocations.
267 */
268 user_ss_zone = zinit(sizeof(arm_context_t),
269 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
270 USER_SS_ZONE_ALLOC_SIZE,
271 "user save state");
272
273 }
274
275
276 /*
277 * Routine: get_useraddr
278 *
279 */
280 user_addr_t
281 get_useraddr()
282 {
283 return (get_saved_state_pc(current_thread()->machine.upcb));
284 }
285
286 /*
287 * Routine: machine_stack_detach
288 *
289 */
290 vm_offset_t
291 machine_stack_detach(thread_t thread)
292 {
293 vm_offset_t stack;
294
295 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
296 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
297
298 stack = thread->kernel_stack;
299 thread->kernel_stack = 0;
300 thread->machine.kstackptr = 0;
301
302 return (stack);
303 }
304
305
306 /*
307 * Routine: machine_stack_attach
308 *
309 */
310 void
311 machine_stack_attach(thread_t thread,
312 vm_offset_t stack)
313 {
314 struct arm_context *context;
315 struct arm_saved_state64 *savestate;
316 uint32_t current_el;
317
318 #define machine_stack_attach_kprintf(x...) \
319 /* kprintf("machine_stack_attach: " x) */
320
321 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
322 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
323
324 thread->kernel_stack = stack;
325 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
326 thread_initialize_kernel_state(thread);
327
328 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
329
330 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
331 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
332 savestate = saved_state64(&context->ss);
333 savestate->fp = 0;
334 savestate->sp = thread->machine.kstackptr;
335 #if defined(HAS_APPLE_PAC)
336 /* Sign the initial kernel stack saved state */
337 const uint32_t default_cpsr = PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK;
338 asm volatile (
339 "mov x0, %[ss]" "\n"
340
341 "mov x1, xzr" "\n"
342 "str x1, [x0, %[SS64_PC]]" "\n"
343
344 "mov x2, %[default_cpsr_lo]" "\n"
345 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
346 "mrs x3, CurrentEL" "\n"
347 "orr w2, w2, w3" "\n"
348 "str w2, [x0, %[SS64_CPSR]]" "\n"
349
350 "adrp x3, _thread_continue@page" "\n"
351 "add x3, x3, _thread_continue@pageoff" "\n"
352 "str x3, [x0, %[SS64_LR]]" "\n"
353
354 "mov x4, xzr" "\n"
355 "mov x5, xzr" "\n"
356 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
357
358 "mov x6, lr" "\n"
359 "bl _ml_sign_thread_state" "\n"
360 "mov lr, x6" "\n"
361 :
362 : [ss] "r"(&context->ss),
363 [default_cpsr_lo] "M"(default_cpsr & 0xFFFF),
364 [default_cpsr_hi] "M"(default_cpsr >> 16),
365 [SS64_X16] "i"(offsetof(struct arm_saved_state, ss_64.x[16])),
366 [SS64_PC] "i"(offsetof(struct arm_saved_state, ss_64.pc)),
367 [SS64_CPSR] "i"(offsetof(struct arm_saved_state, ss_64.cpsr)),
368 [SS64_LR] "i"(offsetof(struct arm_saved_state, ss_64.lr))
369 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
370 );
371 #else
372 savestate->lr = (uintptr_t)thread_continue;
373 savestate->cpsr = (PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK) | current_el;
374 #endif /* defined(HAS_APPLE_PAC) */
375 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
376 }
377
378
379 /*
380 * Routine: machine_stack_handoff
381 *
382 */
383 void
384 machine_stack_handoff(thread_t old,
385 thread_t new)
386 {
387 vm_offset_t stack;
388 pmap_t new_pmap;
389 cpu_data_t * cpu_data_ptr;
390
391 kpc_off_cpu(old);
392
393 stack = machine_stack_detach(old);
394 cpu_data_ptr = getCpuDatap();
395 new->kernel_stack = stack;
396 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
397 if (stack == old->reserved_stack) {
398 assert(new->reserved_stack);
399 old->reserved_stack = new->reserved_stack;
400 new->reserved_stack = stack;
401 }
402
403
404
405 new_pmap = new->map->pmap;
406 if (old->map->pmap != new_pmap)
407 pmap_switch(new_pmap);
408
409
410 new->machine.CpuDatap = cpu_data_ptr;
411
412 /* TODO: Should this be ordered? */
413 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
414 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
415
416 machine_set_current_thread(new);
417 thread_initialize_kernel_state(new);
418
419 return;
420 }
421
422
423 /*
424 * Routine: call_continuation
425 *
426 */
427 void
428 call_continuation(thread_continue_t continuation,
429 void *parameter,
430 wait_result_t wresult,
431 boolean_t enable_interrupts)
432 {
433 #define call_continuation_kprintf(x...) \
434 /* kprintf("call_continuation_kprintf:" x) */
435
436 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
437 Call_continuation(continuation, parameter, wresult, enable_interrupts);
438 }
439
440 #define SET_DBGBCRn(n, value, accum) \
441 __asm__ volatile( \
442 "msr DBGBCR" #n "_EL1, %[val]\n" \
443 "orr %[result], %[result], %[val]\n" \
444 : [result] "+r"(accum) : [val] "r"((value)))
445
446 #define SET_DBGBVRn(n, value) \
447 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
448
449 #define SET_DBGWCRn(n, value, accum) \
450 __asm__ volatile( \
451 "msr DBGWCR" #n "_EL1, %[val]\n" \
452 "orr %[result], %[result], %[val]\n" \
453 : [result] "+r"(accum) : [val] "r"((value)))
454
455 #define SET_DBGWVRn(n, value) \
456 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
457
458 void arm_debug_set32(arm_debug_state_t *debug_state)
459 {
460 struct cpu_data * cpu_data_ptr;
461 arm_debug_info_t * debug_info = arm_debug_info();
462 boolean_t intr, set_mde = 0;
463 arm_debug_state_t off_state;
464 uint32_t i;
465 uint64_t all_ctrls = 0;
466
467 intr = ml_set_interrupts_enabled(FALSE);
468 cpu_data_ptr = getCpuDatap();
469
470 // Set current user debug
471 cpu_data_ptr->cpu_user_debug = debug_state;
472
473 if (NULL == debug_state) {
474 bzero(&off_state, sizeof(off_state));
475 debug_state = &off_state;
476 }
477
478 switch (debug_info->num_breakpoint_pairs) {
479 case 16:
480 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
481 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
482 case 15:
483 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
484 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
485 case 14:
486 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
487 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
488 case 13:
489 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
490 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
491 case 12:
492 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
493 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
494 case 11:
495 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
496 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
497 case 10:
498 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
499 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
500 case 9:
501 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
502 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
503 case 8:
504 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
505 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
506 case 7:
507 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
508 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
509 case 6:
510 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
511 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
512 case 5:
513 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
514 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
515 case 4:
516 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
517 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
518 case 3:
519 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
520 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
521 case 2:
522 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
523 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
524 case 1:
525 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
526 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
527 default:
528 break;
529 }
530
531 switch (debug_info->num_watchpoint_pairs) {
532 case 16:
533 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
534 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
535 case 15:
536 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
537 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
538 case 14:
539 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
540 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
541 case 13:
542 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
543 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
544 case 12:
545 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
546 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
547 case 11:
548 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
549 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
550 case 10:
551 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
552 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
553 case 9:
554 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
555 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
556 case 8:
557 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
558 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
559 case 7:
560 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
561 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
562 case 6:
563 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
564 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
565 case 5:
566 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
567 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
568 case 4:
569 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
570 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
571 case 3:
572 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
573 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
574 case 2:
575 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
576 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
577 case 1:
578 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
579 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
580 default:
581 break;
582 }
583
584 #if defined(CONFIG_KERNEL_INTEGRITY)
585 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
586 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
587 }
588 #endif
589
590 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
591 if (0 != debug_state->uds.ds32.bcr[i]) {
592 set_mde = 1;
593 break;
594 }
595 }
596
597 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
598 if (0 != debug_state->uds.ds32.wcr[i]) {
599 set_mde = 1;
600 break;
601 }
602 }
603
604 /*
605 * Breakpoint/Watchpoint Enable
606 */
607 if (set_mde) {
608 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
609 } else {
610 update_mdscr(0x8000, 0);
611 }
612
613 /*
614 * Software debug single step enable
615 */
616 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
617 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
618
619 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
620 } else {
621
622 update_mdscr(0x1, 0);
623
624 #if SINGLE_STEP_RETIRE_ERRATA
625 // Workaround for radar 20619637
626 __builtin_arm_isb(ISB_SY);
627 #endif
628 }
629
630 (void) ml_set_interrupts_enabled(intr);
631
632 return;
633 }
634
635 void arm_debug_set64(arm_debug_state_t *debug_state)
636 {
637 struct cpu_data * cpu_data_ptr;
638 arm_debug_info_t * debug_info = arm_debug_info();
639 boolean_t intr, set_mde = 0;
640 arm_debug_state_t off_state;
641 uint32_t i;
642 uint64_t all_ctrls = 0;
643
644 intr = ml_set_interrupts_enabled(FALSE);
645 cpu_data_ptr = getCpuDatap();
646
647 // Set current user debug
648 cpu_data_ptr->cpu_user_debug = debug_state;
649
650 if (NULL == debug_state) {
651 bzero(&off_state, sizeof(off_state));
652 debug_state = &off_state;
653 }
654
655 switch (debug_info->num_breakpoint_pairs) {
656 case 16:
657 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
658 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
659 case 15:
660 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
661 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
662 case 14:
663 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
664 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
665 case 13:
666 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
667 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
668 case 12:
669 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
670 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
671 case 11:
672 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
673 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
674 case 10:
675 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
676 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
677 case 9:
678 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
679 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
680 case 8:
681 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
682 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
683 case 7:
684 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
685 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
686 case 6:
687 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
688 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
689 case 5:
690 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
691 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
692 case 4:
693 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
694 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
695 case 3:
696 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
697 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
698 case 2:
699 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
700 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
701 case 1:
702 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
703 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
704 default:
705 break;
706 }
707
708 switch (debug_info->num_watchpoint_pairs) {
709 case 16:
710 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
711 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
712 case 15:
713 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
714 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
715 case 14:
716 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
717 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
718 case 13:
719 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
720 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
721 case 12:
722 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
723 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
724 case 11:
725 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
726 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
727 case 10:
728 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
729 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
730 case 9:
731 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
732 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
733 case 8:
734 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
735 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
736 case 7:
737 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
738 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
739 case 6:
740 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
741 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
742 case 5:
743 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
744 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
745 case 4:
746 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
747 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
748 case 3:
749 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
750 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
751 case 2:
752 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
753 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
754 case 1:
755 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
756 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
757 default:
758 break;
759 }
760
761 #if defined(CONFIG_KERNEL_INTEGRITY)
762 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
763 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
764 }
765 #endif
766
767 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
768 if (0 != debug_state->uds.ds64.bcr[i]) {
769 set_mde = 1;
770 break;
771 }
772 }
773
774 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
775 if (0 != debug_state->uds.ds64.wcr[i]) {
776 set_mde = 1;
777 break;
778 }
779 }
780
781 /*
782 * Breakpoint/Watchpoint Enable
783 */
784 if (set_mde) {
785 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
786 }
787
788 /*
789 * Software debug single step enable
790 */
791 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
792
793 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
794
795 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
796 } else {
797
798 update_mdscr(0x1, 0);
799
800 #if SINGLE_STEP_RETIRE_ERRATA
801 // Workaround for radar 20619637
802 __builtin_arm_isb(ISB_SY);
803 #endif
804 }
805
806 (void) ml_set_interrupts_enabled(intr);
807
808 return;
809 }
810
811 void arm_debug_set(arm_debug_state_t *debug_state)
812 {
813 if (debug_state) {
814 switch (debug_state->dsh.flavor) {
815 case ARM_DEBUG_STATE32:
816 arm_debug_set32(debug_state);
817 break;
818 case ARM_DEBUG_STATE64:
819 arm_debug_set64(debug_state);
820 break;
821 default:
822 panic("arm_debug_set");
823 break;
824 }
825 } else {
826 if (thread_is_64bit_data(current_thread()))
827 arm_debug_set64(debug_state);
828 else
829 arm_debug_set32(debug_state);
830 }
831 }
832
833 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
834 boolean_t
835 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
836 {
837 arm_debug_info_t *debug_info = arm_debug_info();
838 uint32_t i;
839 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
840 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
841 return FALSE;
842 }
843
844 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
845 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
846 return FALSE;
847 }
848 return TRUE;
849 }
850
851 boolean_t
852 debug_state_is_valid32(arm_debug_state32_t *debug_state)
853 {
854 arm_debug_info_t *debug_info = arm_debug_info();
855 uint32_t i;
856 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
857 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
858 return FALSE;
859 }
860
861 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
862 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
863 return FALSE;
864 }
865 return TRUE;
866 }
867
868 boolean_t
869 debug_state_is_valid64(arm_debug_state64_t *debug_state)
870 {
871 arm_debug_info_t *debug_info = arm_debug_info();
872 uint32_t i;
873 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
874 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
875 return FALSE;
876 }
877
878 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
879 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
880 return FALSE;
881 }
882 return TRUE;
883 }
884
885 /*
886 * Duplicate one arm_debug_state_t to another. "all" parameter
887 * is ignored in the case of ARM -- Is this the right assumption?
888 */
889 void
890 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
891 arm_legacy_debug_state_t * target,
892 __unused boolean_t all)
893 {
894 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
895 }
896
897 void
898 copy_debug_state32(arm_debug_state32_t * src,
899 arm_debug_state32_t * target,
900 __unused boolean_t all)
901 {
902 bcopy(src, target, sizeof(arm_debug_state32_t));
903 }
904
905 void
906 copy_debug_state64(arm_debug_state64_t * src,
907 arm_debug_state64_t * target,
908 __unused boolean_t all)
909 {
910 bcopy(src, target, sizeof(arm_debug_state64_t));
911 }
912
913 kern_return_t
914 machine_thread_set_tsd_base(thread_t thread,
915 mach_vm_offset_t tsd_base)
916 {
917 if (thread->task == kernel_task) {
918 return KERN_INVALID_ARGUMENT;
919 }
920
921 if (tsd_base & MACHDEP_CPUNUM_MASK) {
922 return KERN_INVALID_ARGUMENT;
923 }
924
925 if (thread_is_64bit_addr(thread)) {
926 if (tsd_base > vm_map_max(thread->map))
927 tsd_base = 0ULL;
928 } else {
929 if (tsd_base > UINT32_MAX)
930 tsd_base = 0ULL;
931 }
932
933 thread->machine.cthread_self = tsd_base;
934
935 /* For current thread, make the TSD base active immediately */
936 if (thread == current_thread()) {
937 uint64_t cpunum, tpidrro_el0;
938
939 mp_disable_preemption();
940 tpidrro_el0 = get_tpidrro();
941 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
942 set_tpidrro(tsd_base | cpunum);
943 mp_enable_preemption();
944
945 }
946
947 return KERN_SUCCESS;
948 }
949
950 void
951 machine_tecs(__unused thread_t thr)
952 {
953 }
954
955 int
956 machine_csv(__unused cpuvn_e cve)
957 {
958 return 0;
959 }