]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
ff4efbfdddd123c1e15edb22f63ab43983539e57
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90
91 /*
92 * Routine: machine_switch_context
93 *
94 */
95 thread_t
96 machine_switch_context(thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t * cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) \
105 /* kprintf("machine_switch_context: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114
115 new_pmap = new->map->pmap;
116 if (old->map->pmap != new_pmap)
117 pmap_switch(new_pmap);
118
119
120 new->machine.CpuDatap = cpu_data_ptr;
121
122 /* TODO: Should this be ordered? */
123 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
124 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
125
126 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
127
128 retval = Switch_context(old, continuation, new);
129 assert(retval != NULL);
130
131 return retval;
132 }
133
134 boolean_t
135 machine_thread_on_core(thread_t thread)
136 {
137 return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU;
138 }
139
140 /*
141 * Routine: machine_thread_create
142 *
143 */
144 kern_return_t
145 machine_thread_create(thread_t thread,
146 task_t task)
147 {
148 arm_context_t *thread_user_ss = NULL;
149 kern_return_t result = KERN_SUCCESS;
150
151 #define machine_thread_create_kprintf(x...) \
152 /* kprintf("machine_thread_create: " x) */
153
154 machine_thread_create_kprintf("thread = %x\n", thread);
155
156 if (current_thread() != thread) {
157 thread->machine.CpuDatap = (cpu_data_t *)0;
158 }
159 thread->machine.preemption_count = 0;
160 thread->machine.cthread_self = 0;
161 #if defined(HAS_APPLE_PAC)
162 thread->machine.rop_pid = task->rop_pid;
163 thread->machine.disable_user_jop = task->disable_user_jop;
164 #endif
165
166
167 if (task != kernel_task) {
168 /* If this isn't a kernel thread, we'll have userspace state. */
169 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
170
171 if (!thread->machine.contextData) {
172 result = KERN_FAILURE;
173 goto done;
174 }
175
176 thread->machine.upcb = &thread->machine.contextData->ss;
177 thread->machine.uNeon = &thread->machine.contextData->ns;
178
179 if (task_has_64Bit_data(task)) {
180 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
181 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
182 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
183 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
184 } else {
185 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
186 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
187 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
188 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
189 }
190
191 } else {
192 thread->machine.upcb = NULL;
193 thread->machine.uNeon = NULL;
194 thread->machine.contextData = NULL;
195 }
196
197
198 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
199 result = machine_thread_state_initialize(thread);
200
201 done:
202 if (result != KERN_SUCCESS) {
203 thread_user_ss = thread->machine.contextData;
204
205 if (thread_user_ss) {
206 thread->machine.upcb = NULL;
207 thread->machine.uNeon = NULL;
208 thread->machine.contextData = NULL;
209 zfree(user_ss_zone, thread_user_ss);
210 }
211 }
212
213 return result;
214 }
215
216 /*
217 * Routine: machine_thread_destroy
218 *
219 */
220 void
221 machine_thread_destroy(thread_t thread)
222 {
223 arm_context_t *thread_user_ss;
224
225 if (thread->machine.contextData) {
226 /* Disassociate the user save state from the thread before we free it. */
227 thread_user_ss = thread->machine.contextData;
228 thread->machine.upcb = NULL;
229 thread->machine.uNeon = NULL;
230 thread->machine.contextData = NULL;
231
232
233 zfree(user_ss_zone, thread_user_ss);
234 }
235
236 if (thread->machine.DebugData != NULL) {
237 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
238 arm_debug_set(NULL);
239 }
240
241 zfree(ads_zone, thread->machine.DebugData);
242 }
243 }
244
245
246 /*
247 * Routine: machine_thread_init
248 *
249 */
250 void
251 machine_thread_init(void)
252 {
253 ads_zone = zinit(sizeof(arm_debug_state_t),
254 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
255 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
256 "arm debug state");
257
258 /*
259 * Create a zone for the user save state. At the time this zone was created,
260 * the user save state was 848 bytes, and the matching kalloc zone was 1024
261 * bytes, which would result in significant amounts of wasted space if we
262 * simply used kalloc to allocate the user saved state.
263 *
264 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
265 * of wasted space per chunk, which should correspond to 19 allocations.
266 */
267 user_ss_zone = zinit(sizeof(arm_context_t),
268 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
269 USER_SS_ZONE_ALLOC_SIZE,
270 "user save state");
271
272 }
273
274 /*
275 * Routine: machine_thread_template_init
276 *
277 */
278 void
279 machine_thread_template_init(thread_t __unused thr_template)
280 {
281 /* Nothing to do on this platform. */
282 }
283
284 /*
285 * Routine: get_useraddr
286 *
287 */
288 user_addr_t
289 get_useraddr()
290 {
291 return (get_saved_state_pc(current_thread()->machine.upcb));
292 }
293
294 /*
295 * Routine: machine_stack_detach
296 *
297 */
298 vm_offset_t
299 machine_stack_detach(thread_t thread)
300 {
301 vm_offset_t stack;
302
303 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
304 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
305
306 stack = thread->kernel_stack;
307 thread->kernel_stack = 0;
308 thread->machine.kstackptr = 0;
309
310 return (stack);
311 }
312
313
314 /*
315 * Routine: machine_stack_attach
316 *
317 */
318 void
319 machine_stack_attach(thread_t thread,
320 vm_offset_t stack)
321 {
322 struct arm_context *context;
323 struct arm_saved_state64 *savestate;
324 uint32_t current_el;
325
326 #define machine_stack_attach_kprintf(x...) \
327 /* kprintf("machine_stack_attach: " x) */
328
329 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
330 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
331
332 thread->kernel_stack = stack;
333 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
334 thread_initialize_kernel_state(thread);
335
336 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
337
338 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
339 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
340 savestate = saved_state64(&context->ss);
341 savestate->fp = 0;
342 savestate->sp = thread->machine.kstackptr;
343 #if defined(HAS_APPLE_PAC)
344 /* Sign the initial kernel stack saved state */
345 const uint32_t default_cpsr = PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK;
346 asm volatile (
347 "mov x0, %[ss]" "\n"
348
349 "mov x1, xzr" "\n"
350 "str x1, [x0, %[SS64_PC]]" "\n"
351
352 "mov x2, %[default_cpsr_lo]" "\n"
353 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
354 "mrs x3, CurrentEL" "\n"
355 "orr w2, w2, w3" "\n"
356 "str w2, [x0, %[SS64_CPSR]]" "\n"
357
358 "adrp x3, _thread_continue@page" "\n"
359 "add x3, x3, _thread_continue@pageoff" "\n"
360 "str x3, [x0, %[SS64_LR]]" "\n"
361
362 "mov x4, xzr" "\n"
363 "mov x5, xzr" "\n"
364 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
365
366 "mov x6, lr" "\n"
367 "bl _ml_sign_thread_state" "\n"
368 "mov lr, x6" "\n"
369 :
370 : [ss] "r"(&context->ss),
371 [default_cpsr_lo] "M"(default_cpsr & 0xFFFF),
372 [default_cpsr_hi] "M"(default_cpsr >> 16),
373 [SS64_X16] "i"(offsetof(struct arm_saved_state, ss_64.x[16])),
374 [SS64_PC] "i"(offsetof(struct arm_saved_state, ss_64.pc)),
375 [SS64_CPSR] "i"(offsetof(struct arm_saved_state, ss_64.cpsr)),
376 [SS64_LR] "i"(offsetof(struct arm_saved_state, ss_64.lr))
377 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
378 );
379 #else
380 savestate->lr = (uintptr_t)thread_continue;
381 savestate->cpsr = (PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK) | current_el;
382 #endif /* defined(HAS_APPLE_PAC) */
383 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
384 }
385
386
387 /*
388 * Routine: machine_stack_handoff
389 *
390 */
391 void
392 machine_stack_handoff(thread_t old,
393 thread_t new)
394 {
395 vm_offset_t stack;
396 pmap_t new_pmap;
397 cpu_data_t * cpu_data_ptr;
398
399 kpc_off_cpu(old);
400
401 stack = machine_stack_detach(old);
402 cpu_data_ptr = getCpuDatap();
403 new->kernel_stack = stack;
404 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
405 if (stack == old->reserved_stack) {
406 assert(new->reserved_stack);
407 old->reserved_stack = new->reserved_stack;
408 new->reserved_stack = stack;
409 }
410
411
412
413 new_pmap = new->map->pmap;
414 if (old->map->pmap != new_pmap)
415 pmap_switch(new_pmap);
416
417
418 new->machine.CpuDatap = cpu_data_ptr;
419
420 /* TODO: Should this be ordered? */
421 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
422 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
423
424 machine_set_current_thread(new);
425 thread_initialize_kernel_state(new);
426
427 return;
428 }
429
430
431 /*
432 * Routine: call_continuation
433 *
434 */
435 void
436 call_continuation(thread_continue_t continuation,
437 void *parameter,
438 wait_result_t wresult,
439 boolean_t enable_interrupts)
440 {
441 #define call_continuation_kprintf(x...) \
442 /* kprintf("call_continuation_kprintf:" x) */
443
444 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
445 Call_continuation(continuation, parameter, wresult, enable_interrupts);
446 }
447
448 #define SET_DBGBCRn(n, value, accum) \
449 __asm__ volatile( \
450 "msr DBGBCR" #n "_EL1, %[val]\n" \
451 "orr %[result], %[result], %[val]\n" \
452 : [result] "+r"(accum) : [val] "r"((value)))
453
454 #define SET_DBGBVRn(n, value) \
455 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
456
457 #define SET_DBGWCRn(n, value, accum) \
458 __asm__ volatile( \
459 "msr DBGWCR" #n "_EL1, %[val]\n" \
460 "orr %[result], %[result], %[val]\n" \
461 : [result] "+r"(accum) : [val] "r"((value)))
462
463 #define SET_DBGWVRn(n, value) \
464 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
465
466 void arm_debug_set32(arm_debug_state_t *debug_state)
467 {
468 struct cpu_data * cpu_data_ptr;
469 arm_debug_info_t * debug_info = arm_debug_info();
470 boolean_t intr, set_mde = 0;
471 arm_debug_state_t off_state;
472 uint32_t i;
473 uint64_t all_ctrls = 0;
474
475 intr = ml_set_interrupts_enabled(FALSE);
476 cpu_data_ptr = getCpuDatap();
477
478 // Set current user debug
479 cpu_data_ptr->cpu_user_debug = debug_state;
480
481 if (NULL == debug_state) {
482 bzero(&off_state, sizeof(off_state));
483 debug_state = &off_state;
484 }
485
486 switch (debug_info->num_breakpoint_pairs) {
487 case 16:
488 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
489 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
490 case 15:
491 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
492 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
493 case 14:
494 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
495 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
496 case 13:
497 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
498 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
499 case 12:
500 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
501 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
502 case 11:
503 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
504 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
505 case 10:
506 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
507 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
508 case 9:
509 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
510 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
511 case 8:
512 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
513 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
514 case 7:
515 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
516 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
517 case 6:
518 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
519 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
520 case 5:
521 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
522 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
523 case 4:
524 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
525 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
526 case 3:
527 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
528 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
529 case 2:
530 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
531 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
532 case 1:
533 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
534 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
535 default:
536 break;
537 }
538
539 switch (debug_info->num_watchpoint_pairs) {
540 case 16:
541 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
542 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
543 case 15:
544 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
545 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
546 case 14:
547 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
548 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
549 case 13:
550 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
551 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
552 case 12:
553 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
554 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
555 case 11:
556 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
557 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
558 case 10:
559 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
560 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
561 case 9:
562 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
563 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
564 case 8:
565 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
566 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
567 case 7:
568 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
569 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
570 case 6:
571 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
572 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
573 case 5:
574 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
575 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
576 case 4:
577 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
578 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
579 case 3:
580 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
581 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
582 case 2:
583 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
584 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
585 case 1:
586 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
587 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
588 default:
589 break;
590 }
591
592 #if defined(CONFIG_KERNEL_INTEGRITY)
593 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
594 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
595 }
596 #endif
597
598 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
599 if (0 != debug_state->uds.ds32.bcr[i]) {
600 set_mde = 1;
601 break;
602 }
603 }
604
605 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
606 if (0 != debug_state->uds.ds32.wcr[i]) {
607 set_mde = 1;
608 break;
609 }
610 }
611
612 /*
613 * Breakpoint/Watchpoint Enable
614 */
615 if (set_mde) {
616 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
617 } else {
618 update_mdscr(0x8000, 0);
619 }
620
621 /*
622 * Software debug single step enable
623 */
624 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
625 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
626
627 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
628 } else {
629
630 update_mdscr(0x1, 0);
631
632 #if SINGLE_STEP_RETIRE_ERRATA
633 // Workaround for radar 20619637
634 __builtin_arm_isb(ISB_SY);
635 #endif
636 }
637
638 (void) ml_set_interrupts_enabled(intr);
639
640 return;
641 }
642
643 void arm_debug_set64(arm_debug_state_t *debug_state)
644 {
645 struct cpu_data * cpu_data_ptr;
646 arm_debug_info_t * debug_info = arm_debug_info();
647 boolean_t intr, set_mde = 0;
648 arm_debug_state_t off_state;
649 uint32_t i;
650 uint64_t all_ctrls = 0;
651
652 intr = ml_set_interrupts_enabled(FALSE);
653 cpu_data_ptr = getCpuDatap();
654
655 // Set current user debug
656 cpu_data_ptr->cpu_user_debug = debug_state;
657
658 if (NULL == debug_state) {
659 bzero(&off_state, sizeof(off_state));
660 debug_state = &off_state;
661 }
662
663 switch (debug_info->num_breakpoint_pairs) {
664 case 16:
665 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
666 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
667 case 15:
668 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
669 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
670 case 14:
671 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
672 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
673 case 13:
674 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
675 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
676 case 12:
677 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
678 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
679 case 11:
680 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
681 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
682 case 10:
683 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
684 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
685 case 9:
686 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
687 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
688 case 8:
689 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
690 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
691 case 7:
692 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
693 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
694 case 6:
695 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
696 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
697 case 5:
698 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
699 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
700 case 4:
701 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
702 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
703 case 3:
704 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
705 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
706 case 2:
707 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
708 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
709 case 1:
710 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
711 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
712 default:
713 break;
714 }
715
716 switch (debug_info->num_watchpoint_pairs) {
717 case 16:
718 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
719 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
720 case 15:
721 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
722 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
723 case 14:
724 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
725 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
726 case 13:
727 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
728 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
729 case 12:
730 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
731 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
732 case 11:
733 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
734 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
735 case 10:
736 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
737 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
738 case 9:
739 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
740 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
741 case 8:
742 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
743 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
744 case 7:
745 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
746 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
747 case 6:
748 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
749 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
750 case 5:
751 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
752 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
753 case 4:
754 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
755 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
756 case 3:
757 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
758 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
759 case 2:
760 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
761 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
762 case 1:
763 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
764 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
765 default:
766 break;
767 }
768
769 #if defined(CONFIG_KERNEL_INTEGRITY)
770 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
771 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
772 }
773 #endif
774
775 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
776 if (0 != debug_state->uds.ds64.bcr[i]) {
777 set_mde = 1;
778 break;
779 }
780 }
781
782 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
783 if (0 != debug_state->uds.ds64.wcr[i]) {
784 set_mde = 1;
785 break;
786 }
787 }
788
789 /*
790 * Breakpoint/Watchpoint Enable
791 */
792 if (set_mde) {
793 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
794 }
795
796 /*
797 * Software debug single step enable
798 */
799 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
800
801 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
802
803 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
804 } else {
805
806 update_mdscr(0x1, 0);
807
808 #if SINGLE_STEP_RETIRE_ERRATA
809 // Workaround for radar 20619637
810 __builtin_arm_isb(ISB_SY);
811 #endif
812 }
813
814 (void) ml_set_interrupts_enabled(intr);
815
816 return;
817 }
818
819 void arm_debug_set(arm_debug_state_t *debug_state)
820 {
821 if (debug_state) {
822 switch (debug_state->dsh.flavor) {
823 case ARM_DEBUG_STATE32:
824 arm_debug_set32(debug_state);
825 break;
826 case ARM_DEBUG_STATE64:
827 arm_debug_set64(debug_state);
828 break;
829 default:
830 panic("arm_debug_set");
831 break;
832 }
833 } else {
834 if (thread_is_64bit_data(current_thread()))
835 arm_debug_set64(debug_state);
836 else
837 arm_debug_set32(debug_state);
838 }
839 }
840
841 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
842 boolean_t
843 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
844 {
845 arm_debug_info_t *debug_info = arm_debug_info();
846 uint32_t i;
847 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
848 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
849 return FALSE;
850 }
851
852 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
853 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
854 return FALSE;
855 }
856 return TRUE;
857 }
858
859 boolean_t
860 debug_state_is_valid32(arm_debug_state32_t *debug_state)
861 {
862 arm_debug_info_t *debug_info = arm_debug_info();
863 uint32_t i;
864 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
865 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
866 return FALSE;
867 }
868
869 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
870 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
871 return FALSE;
872 }
873 return TRUE;
874 }
875
876 boolean_t
877 debug_state_is_valid64(arm_debug_state64_t *debug_state)
878 {
879 arm_debug_info_t *debug_info = arm_debug_info();
880 uint32_t i;
881 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
882 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
883 return FALSE;
884 }
885
886 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
887 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
888 return FALSE;
889 }
890 return TRUE;
891 }
892
893 /*
894 * Duplicate one arm_debug_state_t to another. "all" parameter
895 * is ignored in the case of ARM -- Is this the right assumption?
896 */
897 void
898 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
899 arm_legacy_debug_state_t * target,
900 __unused boolean_t all)
901 {
902 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
903 }
904
905 void
906 copy_debug_state32(arm_debug_state32_t * src,
907 arm_debug_state32_t * target,
908 __unused boolean_t all)
909 {
910 bcopy(src, target, sizeof(arm_debug_state32_t));
911 }
912
913 void
914 copy_debug_state64(arm_debug_state64_t * src,
915 arm_debug_state64_t * target,
916 __unused boolean_t all)
917 {
918 bcopy(src, target, sizeof(arm_debug_state64_t));
919 }
920
921 kern_return_t
922 machine_thread_set_tsd_base(thread_t thread,
923 mach_vm_offset_t tsd_base)
924 {
925 if (thread->task == kernel_task) {
926 return KERN_INVALID_ARGUMENT;
927 }
928
929 if (tsd_base & MACHDEP_CPUNUM_MASK) {
930 return KERN_INVALID_ARGUMENT;
931 }
932
933 if (thread_is_64bit_addr(thread)) {
934 if (tsd_base > vm_map_max(thread->map))
935 tsd_base = 0ULL;
936 } else {
937 if (tsd_base > UINT32_MAX)
938 tsd_base = 0ULL;
939 }
940
941 thread->machine.cthread_self = tsd_base;
942
943 /* For current thread, make the TSD base active immediately */
944 if (thread == current_thread()) {
945 uint64_t cpunum, tpidrro_el0;
946
947 mp_disable_preemption();
948 tpidrro_el0 = get_tpidrro();
949 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
950 set_tpidrro(tsd_base | cpunum);
951 mp_enable_preemption();
952
953 }
954
955 return KERN_SUCCESS;
956 }
957
958 void
959 machine_tecs(__unused thread_t thr)
960 {
961 }
962
963 int
964 machine_csv(__unused cpuvn_e cve)
965 {
966 return 0;
967 }