]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90
91 /*
92 * Routine: machine_switch_context
93 *
94 */
95 thread_t
96 machine_switch_context(thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t * cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) \
105 /* kprintf("machine_switch_context: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114
115 new_pmap = new->map->pmap;
116 if (old->map->pmap != new_pmap)
117 pmap_switch(new_pmap);
118
119
120 new->machine.CpuDatap = cpu_data_ptr;
121
122 /* TODO: Should this be ordered? */
123 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
124 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
125
126 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
127
128 retval = Switch_context(old, continuation, new);
129 assert(retval != NULL);
130
131 return retval;
132 }
133
134 boolean_t
135 machine_thread_on_core(thread_t thread)
136 {
137 return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU;
138 }
139
140 /*
141 * Routine: machine_thread_create
142 *
143 */
144 kern_return_t
145 machine_thread_create(thread_t thread,
146 task_t task)
147 {
148 arm_context_t *thread_user_ss = NULL;
149 kern_return_t result = KERN_SUCCESS;
150
151 #define machine_thread_create_kprintf(x...) \
152 /* kprintf("machine_thread_create: " x) */
153
154 machine_thread_create_kprintf("thread = %x\n", thread);
155
156 if (current_thread() != thread) {
157 thread->machine.CpuDatap = (cpu_data_t *)0;
158 }
159 thread->machine.preemption_count = 0;
160 thread->machine.cthread_self = 0;
161 #if defined(HAS_APPLE_PAC)
162 thread->machine.rop_pid = task->rop_pid;
163 thread->machine.disable_user_jop = task->disable_user_jop;
164 #endif
165
166
167 if (task != kernel_task) {
168 /* If this isn't a kernel thread, we'll have userspace state. */
169 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
170
171 if (!thread->machine.contextData) {
172 result = KERN_FAILURE;
173 goto done;
174 }
175
176 thread->machine.upcb = &thread->machine.contextData->ss;
177 thread->machine.uNeon = &thread->machine.contextData->ns;
178
179 if (task_has_64Bit_data(task)) {
180 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
181 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
182 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
183 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
184 } else {
185 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
186 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
187 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
188 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
189 }
190
191 } else {
192 thread->machine.upcb = NULL;
193 thread->machine.uNeon = NULL;
194 thread->machine.contextData = NULL;
195 }
196
197
198 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
199 result = machine_thread_state_initialize(thread);
200
201 done:
202 if (result != KERN_SUCCESS) {
203 thread_user_ss = thread->machine.contextData;
204
205 if (thread_user_ss) {
206 thread->machine.upcb = NULL;
207 thread->machine.uNeon = NULL;
208 thread->machine.contextData = NULL;
209 zfree(user_ss_zone, thread_user_ss);
210 }
211 }
212
213 return result;
214 }
215
216 /*
217 * Routine: machine_thread_destroy
218 *
219 */
220 void
221 machine_thread_destroy(thread_t thread)
222 {
223 arm_context_t *thread_user_ss;
224
225 if (thread->machine.contextData) {
226 /* Disassociate the user save state from the thread before we free it. */
227 thread_user_ss = thread->machine.contextData;
228 thread->machine.upcb = NULL;
229 thread->machine.uNeon = NULL;
230 thread->machine.contextData = NULL;
231
232
233 zfree(user_ss_zone, thread_user_ss);
234 }
235
236 if (thread->machine.DebugData != NULL) {
237 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
238 arm_debug_set(NULL);
239 }
240
241 zfree(ads_zone, thread->machine.DebugData);
242 }
243 }
244
245
246 /*
247 * Routine: machine_thread_init
248 *
249 */
250 void
251 machine_thread_init(void)
252 {
253 ads_zone = zinit(sizeof(arm_debug_state_t),
254 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
255 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
256 "arm debug state");
257
258 /*
259 * Create a zone for the user save state. At the time this zone was created,
260 * the user save state was 848 bytes, and the matching kalloc zone was 1024
261 * bytes, which would result in significant amounts of wasted space if we
262 * simply used kalloc to allocate the user saved state.
263 *
264 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
265 * of wasted space per chunk, which should correspond to 19 allocations.
266 */
267 user_ss_zone = zinit(sizeof(arm_context_t),
268 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
269 USER_SS_ZONE_ALLOC_SIZE,
270 "user save state");
271
272 }
273
274
275 /*
276 * Routine: get_useraddr
277 *
278 */
279 user_addr_t
280 get_useraddr()
281 {
282 return (get_saved_state_pc(current_thread()->machine.upcb));
283 }
284
285 /*
286 * Routine: machine_stack_detach
287 *
288 */
289 vm_offset_t
290 machine_stack_detach(thread_t thread)
291 {
292 vm_offset_t stack;
293
294 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
295 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
296
297 stack = thread->kernel_stack;
298 thread->kernel_stack = 0;
299 thread->machine.kstackptr = 0;
300
301 return (stack);
302 }
303
304
305 /*
306 * Routine: machine_stack_attach
307 *
308 */
309 void
310 machine_stack_attach(thread_t thread,
311 vm_offset_t stack)
312 {
313 struct arm_context *context;
314 struct arm_saved_state64 *savestate;
315 uint32_t current_el;
316
317 #define machine_stack_attach_kprintf(x...) \
318 /* kprintf("machine_stack_attach: " x) */
319
320 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
321 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
322
323 thread->kernel_stack = stack;
324 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
325 thread_initialize_kernel_state(thread);
326
327 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
328
329 current_el = (uint32_t) __builtin_arm_rsr64("CurrentEL");
330 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
331 savestate = saved_state64(&context->ss);
332 savestate->fp = 0;
333 savestate->sp = thread->machine.kstackptr;
334 #if defined(HAS_APPLE_PAC)
335 /* Sign the initial kernel stack saved state */
336 const uint32_t default_cpsr = PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK;
337 asm volatile (
338 "mov x0, %[ss]" "\n"
339
340 "mov x1, xzr" "\n"
341 "str x1, [x0, %[SS64_PC]]" "\n"
342
343 "mov x2, %[default_cpsr_lo]" "\n"
344 "movk x2, %[default_cpsr_hi], lsl #16" "\n"
345 "mrs x3, CurrentEL" "\n"
346 "orr w2, w2, w3" "\n"
347 "str w2, [x0, %[SS64_CPSR]]" "\n"
348
349 "adrp x3, _thread_continue@page" "\n"
350 "add x3, x3, _thread_continue@pageoff" "\n"
351 "str x3, [x0, %[SS64_LR]]" "\n"
352
353 "mov x4, xzr" "\n"
354 "mov x5, xzr" "\n"
355 "stp x4, x5, [x0, %[SS64_X16]]" "\n"
356
357 "mov x6, lr" "\n"
358 "bl _ml_sign_thread_state" "\n"
359 "mov lr, x6" "\n"
360 :
361 : [ss] "r"(&context->ss),
362 [default_cpsr_lo] "M"(default_cpsr & 0xFFFF),
363 [default_cpsr_hi] "M"(default_cpsr >> 16),
364 [SS64_X16] "i"(offsetof(struct arm_saved_state, ss_64.x[16])),
365 [SS64_PC] "i"(offsetof(struct arm_saved_state, ss_64.pc)),
366 [SS64_CPSR] "i"(offsetof(struct arm_saved_state, ss_64.cpsr)),
367 [SS64_LR] "i"(offsetof(struct arm_saved_state, ss_64.lr))
368 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
369 );
370 #else
371 savestate->lr = (uintptr_t)thread_continue;
372 savestate->cpsr = (PSR64_KERNEL_DEFAULT & ~PSR64_MODE_EL_MASK) | current_el;
373 #endif /* defined(HAS_APPLE_PAC) */
374 machine_stack_attach_kprintf("thread = %p pc = %llx, sp = %llx\n", thread, savestate->lr, savestate->sp);
375 }
376
377
378 /*
379 * Routine: machine_stack_handoff
380 *
381 */
382 void
383 machine_stack_handoff(thread_t old,
384 thread_t new)
385 {
386 vm_offset_t stack;
387 pmap_t new_pmap;
388 cpu_data_t * cpu_data_ptr;
389
390 kpc_off_cpu(old);
391
392 stack = machine_stack_detach(old);
393 cpu_data_ptr = getCpuDatap();
394 new->kernel_stack = stack;
395 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
396 if (stack == old->reserved_stack) {
397 assert(new->reserved_stack);
398 old->reserved_stack = new->reserved_stack;
399 new->reserved_stack = stack;
400 }
401
402
403
404 new_pmap = new->map->pmap;
405 if (old->map->pmap != new_pmap)
406 pmap_switch(new_pmap);
407
408
409 new->machine.CpuDatap = cpu_data_ptr;
410
411 /* TODO: Should this be ordered? */
412 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
413 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
414
415 machine_set_current_thread(new);
416 thread_initialize_kernel_state(new);
417
418 return;
419 }
420
421
422 /*
423 * Routine: call_continuation
424 *
425 */
426 void
427 call_continuation(thread_continue_t continuation,
428 void *parameter,
429 wait_result_t wresult,
430 boolean_t enable_interrupts)
431 {
432 #define call_continuation_kprintf(x...) \
433 /* kprintf("call_continuation_kprintf:" x) */
434
435 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
436 Call_continuation(continuation, parameter, wresult, enable_interrupts);
437 }
438
439 #define SET_DBGBCRn(n, value, accum) \
440 __asm__ volatile( \
441 "msr DBGBCR" #n "_EL1, %[val]\n" \
442 "orr %[result], %[result], %[val]\n" \
443 : [result] "+r"(accum) : [val] "r"((value)))
444
445 #define SET_DBGBVRn(n, value) \
446 __asm__ volatile("msr DBGBVR" #n "_EL1, %0" : : "r"(value))
447
448 #define SET_DBGWCRn(n, value, accum) \
449 __asm__ volatile( \
450 "msr DBGWCR" #n "_EL1, %[val]\n" \
451 "orr %[result], %[result], %[val]\n" \
452 : [result] "+r"(accum) : [val] "r"((value)))
453
454 #define SET_DBGWVRn(n, value) \
455 __asm__ volatile("msr DBGWVR" #n "_EL1, %0" : : "r"(value))
456
457 void arm_debug_set32(arm_debug_state_t *debug_state)
458 {
459 struct cpu_data * cpu_data_ptr;
460 arm_debug_info_t * debug_info = arm_debug_info();
461 boolean_t intr, set_mde = 0;
462 arm_debug_state_t off_state;
463 uint32_t i;
464 uint64_t all_ctrls = 0;
465
466 intr = ml_set_interrupts_enabled(FALSE);
467 cpu_data_ptr = getCpuDatap();
468
469 // Set current user debug
470 cpu_data_ptr->cpu_user_debug = debug_state;
471
472 if (NULL == debug_state) {
473 bzero(&off_state, sizeof(off_state));
474 debug_state = &off_state;
475 }
476
477 switch (debug_info->num_breakpoint_pairs) {
478 case 16:
479 SET_DBGBVRn(15, (uint64_t)debug_state->uds.ds32.bvr[15]);
480 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds32.bcr[15], all_ctrls);
481 case 15:
482 SET_DBGBVRn(14, (uint64_t)debug_state->uds.ds32.bvr[14]);
483 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds32.bcr[14], all_ctrls);
484 case 14:
485 SET_DBGBVRn(13, (uint64_t)debug_state->uds.ds32.bvr[13]);
486 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds32.bcr[13], all_ctrls);
487 case 13:
488 SET_DBGBVRn(12, (uint64_t)debug_state->uds.ds32.bvr[12]);
489 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds32.bcr[12], all_ctrls);
490 case 12:
491 SET_DBGBVRn(11, (uint64_t)debug_state->uds.ds32.bvr[11]);
492 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds32.bcr[11], all_ctrls);
493 case 11:
494 SET_DBGBVRn(10, (uint64_t)debug_state->uds.ds32.bvr[10]);
495 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds32.bcr[10], all_ctrls);
496 case 10:
497 SET_DBGBVRn(9, (uint64_t)debug_state->uds.ds32.bvr[9]);
498 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds32.bcr[9], all_ctrls);
499 case 9:
500 SET_DBGBVRn(8, (uint64_t)debug_state->uds.ds32.bvr[8]);
501 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds32.bcr[8], all_ctrls);
502 case 8:
503 SET_DBGBVRn(7, (uint64_t)debug_state->uds.ds32.bvr[7]);
504 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds32.bcr[7], all_ctrls);
505 case 7:
506 SET_DBGBVRn(6, (uint64_t)debug_state->uds.ds32.bvr[6]);
507 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds32.bcr[6], all_ctrls);
508 case 6:
509 SET_DBGBVRn(5, (uint64_t)debug_state->uds.ds32.bvr[5]);
510 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds32.bcr[5], all_ctrls);
511 case 5:
512 SET_DBGBVRn(4, (uint64_t)debug_state->uds.ds32.bvr[4]);
513 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds32.bcr[4], all_ctrls);
514 case 4:
515 SET_DBGBVRn(3, (uint64_t)debug_state->uds.ds32.bvr[3]);
516 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds32.bcr[3], all_ctrls);
517 case 3:
518 SET_DBGBVRn(2, (uint64_t)debug_state->uds.ds32.bvr[2]);
519 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds32.bcr[2], all_ctrls);
520 case 2:
521 SET_DBGBVRn(1, (uint64_t)debug_state->uds.ds32.bvr[1]);
522 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds32.bcr[1], all_ctrls);
523 case 1:
524 SET_DBGBVRn(0, (uint64_t)debug_state->uds.ds32.bvr[0]);
525 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds32.bcr[0], all_ctrls);
526 default:
527 break;
528 }
529
530 switch (debug_info->num_watchpoint_pairs) {
531 case 16:
532 SET_DBGWVRn(15, (uint64_t)debug_state->uds.ds32.wvr[15]);
533 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds32.wcr[15], all_ctrls);
534 case 15:
535 SET_DBGWVRn(14, (uint64_t)debug_state->uds.ds32.wvr[14]);
536 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds32.wcr[14], all_ctrls);
537 case 14:
538 SET_DBGWVRn(13, (uint64_t)debug_state->uds.ds32.wvr[13]);
539 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds32.wcr[13], all_ctrls);
540 case 13:
541 SET_DBGWVRn(12, (uint64_t)debug_state->uds.ds32.wvr[12]);
542 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds32.wcr[12], all_ctrls);
543 case 12:
544 SET_DBGWVRn(11, (uint64_t)debug_state->uds.ds32.wvr[11]);
545 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds32.wcr[11], all_ctrls);
546 case 11:
547 SET_DBGWVRn(10, (uint64_t)debug_state->uds.ds32.wvr[10]);
548 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds32.wcr[10], all_ctrls);
549 case 10:
550 SET_DBGWVRn(9, (uint64_t)debug_state->uds.ds32.wvr[9]);
551 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds32.wcr[9], all_ctrls);
552 case 9:
553 SET_DBGWVRn(8, (uint64_t)debug_state->uds.ds32.wvr[8]);
554 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds32.wcr[8], all_ctrls);
555 case 8:
556 SET_DBGWVRn(7, (uint64_t)debug_state->uds.ds32.wvr[7]);
557 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds32.wcr[7], all_ctrls);
558 case 7:
559 SET_DBGWVRn(6, (uint64_t)debug_state->uds.ds32.wvr[6]);
560 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds32.wcr[6], all_ctrls);
561 case 6:
562 SET_DBGWVRn(5, (uint64_t)debug_state->uds.ds32.wvr[5]);
563 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds32.wcr[5], all_ctrls);
564 case 5:
565 SET_DBGWVRn(4, (uint64_t)debug_state->uds.ds32.wvr[4]);
566 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds32.wcr[4], all_ctrls);
567 case 4:
568 SET_DBGWVRn(3, (uint64_t)debug_state->uds.ds32.wvr[3]);
569 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds32.wcr[3], all_ctrls);
570 case 3:
571 SET_DBGWVRn(2, (uint64_t)debug_state->uds.ds32.wvr[2]);
572 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds32.wcr[2], all_ctrls);
573 case 2:
574 SET_DBGWVRn(1, (uint64_t)debug_state->uds.ds32.wvr[1]);
575 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds32.wcr[1], all_ctrls);
576 case 1:
577 SET_DBGWVRn(0, (uint64_t)debug_state->uds.ds32.wvr[0]);
578 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds32.wcr[0], all_ctrls);
579 default:
580 break;
581 }
582
583 #if defined(CONFIG_KERNEL_INTEGRITY)
584 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
585 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
586 }
587 #endif
588
589 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
590 if (0 != debug_state->uds.ds32.bcr[i]) {
591 set_mde = 1;
592 break;
593 }
594 }
595
596 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
597 if (0 != debug_state->uds.ds32.wcr[i]) {
598 set_mde = 1;
599 break;
600 }
601 }
602
603 /*
604 * Breakpoint/Watchpoint Enable
605 */
606 if (set_mde) {
607 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
608 } else {
609 update_mdscr(0x8000, 0);
610 }
611
612 /*
613 * Software debug single step enable
614 */
615 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
616 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
617
618 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
619 } else {
620
621 update_mdscr(0x1, 0);
622
623 #if SINGLE_STEP_RETIRE_ERRATA
624 // Workaround for radar 20619637
625 __builtin_arm_isb(ISB_SY);
626 #endif
627 }
628
629 (void) ml_set_interrupts_enabled(intr);
630
631 return;
632 }
633
634 void arm_debug_set64(arm_debug_state_t *debug_state)
635 {
636 struct cpu_data * cpu_data_ptr;
637 arm_debug_info_t * debug_info = arm_debug_info();
638 boolean_t intr, set_mde = 0;
639 arm_debug_state_t off_state;
640 uint32_t i;
641 uint64_t all_ctrls = 0;
642
643 intr = ml_set_interrupts_enabled(FALSE);
644 cpu_data_ptr = getCpuDatap();
645
646 // Set current user debug
647 cpu_data_ptr->cpu_user_debug = debug_state;
648
649 if (NULL == debug_state) {
650 bzero(&off_state, sizeof(off_state));
651 debug_state = &off_state;
652 }
653
654 switch (debug_info->num_breakpoint_pairs) {
655 case 16:
656 SET_DBGBVRn(15, debug_state->uds.ds64.bvr[15]);
657 SET_DBGBCRn(15, (uint64_t)debug_state->uds.ds64.bcr[15], all_ctrls);
658 case 15:
659 SET_DBGBVRn(14, debug_state->uds.ds64.bvr[14]);
660 SET_DBGBCRn(14, (uint64_t)debug_state->uds.ds64.bcr[14], all_ctrls);
661 case 14:
662 SET_DBGBVRn(13, debug_state->uds.ds64.bvr[13]);
663 SET_DBGBCRn(13, (uint64_t)debug_state->uds.ds64.bcr[13], all_ctrls);
664 case 13:
665 SET_DBGBVRn(12, debug_state->uds.ds64.bvr[12]);
666 SET_DBGBCRn(12, (uint64_t)debug_state->uds.ds64.bcr[12], all_ctrls);
667 case 12:
668 SET_DBGBVRn(11, debug_state->uds.ds64.bvr[11]);
669 SET_DBGBCRn(11, (uint64_t)debug_state->uds.ds64.bcr[11], all_ctrls);
670 case 11:
671 SET_DBGBVRn(10, debug_state->uds.ds64.bvr[10]);
672 SET_DBGBCRn(10, (uint64_t)debug_state->uds.ds64.bcr[10], all_ctrls);
673 case 10:
674 SET_DBGBVRn(9, debug_state->uds.ds64.bvr[9]);
675 SET_DBGBCRn(9, (uint64_t)debug_state->uds.ds64.bcr[9], all_ctrls);
676 case 9:
677 SET_DBGBVRn(8, debug_state->uds.ds64.bvr[8]);
678 SET_DBGBCRn(8, (uint64_t)debug_state->uds.ds64.bcr[8], all_ctrls);
679 case 8:
680 SET_DBGBVRn(7, debug_state->uds.ds64.bvr[7]);
681 SET_DBGBCRn(7, (uint64_t)debug_state->uds.ds64.bcr[7], all_ctrls);
682 case 7:
683 SET_DBGBVRn(6, debug_state->uds.ds64.bvr[6]);
684 SET_DBGBCRn(6, (uint64_t)debug_state->uds.ds64.bcr[6], all_ctrls);
685 case 6:
686 SET_DBGBVRn(5, debug_state->uds.ds64.bvr[5]);
687 SET_DBGBCRn(5, (uint64_t)debug_state->uds.ds64.bcr[5], all_ctrls);
688 case 5:
689 SET_DBGBVRn(4, debug_state->uds.ds64.bvr[4]);
690 SET_DBGBCRn(4, (uint64_t)debug_state->uds.ds64.bcr[4], all_ctrls);
691 case 4:
692 SET_DBGBVRn(3, debug_state->uds.ds64.bvr[3]);
693 SET_DBGBCRn(3, (uint64_t)debug_state->uds.ds64.bcr[3], all_ctrls);
694 case 3:
695 SET_DBGBVRn(2, debug_state->uds.ds64.bvr[2]);
696 SET_DBGBCRn(2, (uint64_t)debug_state->uds.ds64.bcr[2], all_ctrls);
697 case 2:
698 SET_DBGBVRn(1, debug_state->uds.ds64.bvr[1]);
699 SET_DBGBCRn(1, (uint64_t)debug_state->uds.ds64.bcr[1], all_ctrls);
700 case 1:
701 SET_DBGBVRn(0, debug_state->uds.ds64.bvr[0]);
702 SET_DBGBCRn(0, (uint64_t)debug_state->uds.ds64.bcr[0], all_ctrls);
703 default:
704 break;
705 }
706
707 switch (debug_info->num_watchpoint_pairs) {
708 case 16:
709 SET_DBGWVRn(15, debug_state->uds.ds64.wvr[15]);
710 SET_DBGWCRn(15, (uint64_t)debug_state->uds.ds64.wcr[15], all_ctrls);
711 case 15:
712 SET_DBGWVRn(14, debug_state->uds.ds64.wvr[14]);
713 SET_DBGWCRn(14, (uint64_t)debug_state->uds.ds64.wcr[14], all_ctrls);
714 case 14:
715 SET_DBGWVRn(13, debug_state->uds.ds64.wvr[13]);
716 SET_DBGWCRn(13, (uint64_t)debug_state->uds.ds64.wcr[13], all_ctrls);
717 case 13:
718 SET_DBGWVRn(12, debug_state->uds.ds64.wvr[12]);
719 SET_DBGWCRn(12, (uint64_t)debug_state->uds.ds64.wcr[12], all_ctrls);
720 case 12:
721 SET_DBGWVRn(11, debug_state->uds.ds64.wvr[11]);
722 SET_DBGWCRn(11, (uint64_t)debug_state->uds.ds64.wcr[11], all_ctrls);
723 case 11:
724 SET_DBGWVRn(10, debug_state->uds.ds64.wvr[10]);
725 SET_DBGWCRn(10, (uint64_t)debug_state->uds.ds64.wcr[10], all_ctrls);
726 case 10:
727 SET_DBGWVRn(9, debug_state->uds.ds64.wvr[9]);
728 SET_DBGWCRn(9, (uint64_t)debug_state->uds.ds64.wcr[9], all_ctrls);
729 case 9:
730 SET_DBGWVRn(8, debug_state->uds.ds64.wvr[8]);
731 SET_DBGWCRn(8, (uint64_t)debug_state->uds.ds64.wcr[8], all_ctrls);
732 case 8:
733 SET_DBGWVRn(7, debug_state->uds.ds64.wvr[7]);
734 SET_DBGWCRn(7, (uint64_t)debug_state->uds.ds64.wcr[7], all_ctrls);
735 case 7:
736 SET_DBGWVRn(6, debug_state->uds.ds64.wvr[6]);
737 SET_DBGWCRn(6, (uint64_t)debug_state->uds.ds64.wcr[6], all_ctrls);
738 case 6:
739 SET_DBGWVRn(5, debug_state->uds.ds64.wvr[5]);
740 SET_DBGWCRn(5, (uint64_t)debug_state->uds.ds64.wcr[5], all_ctrls);
741 case 5:
742 SET_DBGWVRn(4, debug_state->uds.ds64.wvr[4]);
743 SET_DBGWCRn(4, (uint64_t)debug_state->uds.ds64.wcr[4], all_ctrls);
744 case 4:
745 SET_DBGWVRn(3, debug_state->uds.ds64.wvr[3]);
746 SET_DBGWCRn(3, (uint64_t)debug_state->uds.ds64.wcr[3], all_ctrls);
747 case 3:
748 SET_DBGWVRn(2, debug_state->uds.ds64.wvr[2]);
749 SET_DBGWCRn(2, (uint64_t)debug_state->uds.ds64.wcr[2], all_ctrls);
750 case 2:
751 SET_DBGWVRn(1, debug_state->uds.ds64.wvr[1]);
752 SET_DBGWCRn(1, (uint64_t)debug_state->uds.ds64.wcr[1], all_ctrls);
753 case 1:
754 SET_DBGWVRn(0, debug_state->uds.ds64.wvr[0]);
755 SET_DBGWCRn(0, (uint64_t)debug_state->uds.ds64.wcr[0], all_ctrls);
756 default:
757 break;
758 }
759
760 #if defined(CONFIG_KERNEL_INTEGRITY)
761 if ((all_ctrls & (ARM_DBG_CR_MODE_CONTROL_PRIVILEGED | ARM_DBG_CR_HIGHER_MODE_ENABLE)) != 0) {
762 panic("sorry, self-hosted debug is not supported: 0x%llx", all_ctrls);
763 }
764 #endif
765
766 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
767 if (0 != debug_state->uds.ds64.bcr[i]) {
768 set_mde = 1;
769 break;
770 }
771 }
772
773 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
774 if (0 != debug_state->uds.ds64.wcr[i]) {
775 set_mde = 1;
776 break;
777 }
778 }
779
780 /*
781 * Breakpoint/Watchpoint Enable
782 */
783 if (set_mde) {
784 update_mdscr(0, 0x8000); // MDSCR_EL1[MDE]
785 }
786
787 /*
788 * Software debug single step enable
789 */
790 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
791
792 update_mdscr(0x8000, 1); // ~MDE | SS : no brk/watch while single stepping (which we've set)
793
794 mask_saved_state_cpsr(current_thread()->machine.upcb, PSR64_SS, 0);
795 } else {
796
797 update_mdscr(0x1, 0);
798
799 #if SINGLE_STEP_RETIRE_ERRATA
800 // Workaround for radar 20619637
801 __builtin_arm_isb(ISB_SY);
802 #endif
803 }
804
805 (void) ml_set_interrupts_enabled(intr);
806
807 return;
808 }
809
810 void arm_debug_set(arm_debug_state_t *debug_state)
811 {
812 if (debug_state) {
813 switch (debug_state->dsh.flavor) {
814 case ARM_DEBUG_STATE32:
815 arm_debug_set32(debug_state);
816 break;
817 case ARM_DEBUG_STATE64:
818 arm_debug_set64(debug_state);
819 break;
820 default:
821 panic("arm_debug_set");
822 break;
823 }
824 } else {
825 if (thread_is_64bit_data(current_thread()))
826 arm_debug_set64(debug_state);
827 else
828 arm_debug_set32(debug_state);
829 }
830 }
831
832 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
833 boolean_t
834 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
835 {
836 arm_debug_info_t *debug_info = arm_debug_info();
837 uint32_t i;
838 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
839 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
840 return FALSE;
841 }
842
843 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
844 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
845 return FALSE;
846 }
847 return TRUE;
848 }
849
850 boolean_t
851 debug_state_is_valid32(arm_debug_state32_t *debug_state)
852 {
853 arm_debug_info_t *debug_info = arm_debug_info();
854 uint32_t i;
855 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
856 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
857 return FALSE;
858 }
859
860 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
861 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
862 return FALSE;
863 }
864 return TRUE;
865 }
866
867 boolean_t
868 debug_state_is_valid64(arm_debug_state64_t *debug_state)
869 {
870 arm_debug_info_t *debug_info = arm_debug_info();
871 uint32_t i;
872 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
873 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
874 return FALSE;
875 }
876
877 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
878 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
879 return FALSE;
880 }
881 return TRUE;
882 }
883
884 /*
885 * Duplicate one arm_debug_state_t to another. "all" parameter
886 * is ignored in the case of ARM -- Is this the right assumption?
887 */
888 void
889 copy_legacy_debug_state(arm_legacy_debug_state_t * src,
890 arm_legacy_debug_state_t * target,
891 __unused boolean_t all)
892 {
893 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
894 }
895
896 void
897 copy_debug_state32(arm_debug_state32_t * src,
898 arm_debug_state32_t * target,
899 __unused boolean_t all)
900 {
901 bcopy(src, target, sizeof(arm_debug_state32_t));
902 }
903
904 void
905 copy_debug_state64(arm_debug_state64_t * src,
906 arm_debug_state64_t * target,
907 __unused boolean_t all)
908 {
909 bcopy(src, target, sizeof(arm_debug_state64_t));
910 }
911
912 kern_return_t
913 machine_thread_set_tsd_base(thread_t thread,
914 mach_vm_offset_t tsd_base)
915 {
916 if (thread->task == kernel_task) {
917 return KERN_INVALID_ARGUMENT;
918 }
919
920 if (tsd_base & MACHDEP_CPUNUM_MASK) {
921 return KERN_INVALID_ARGUMENT;
922 }
923
924 if (thread_is_64bit_addr(thread)) {
925 if (tsd_base > vm_map_max(thread->map))
926 tsd_base = 0ULL;
927 } else {
928 if (tsd_base > UINT32_MAX)
929 tsd_base = 0ULL;
930 }
931
932 thread->machine.cthread_self = tsd_base;
933
934 /* For current thread, make the TSD base active immediately */
935 if (thread == current_thread()) {
936 uint64_t cpunum, tpidrro_el0;
937
938 mp_disable_preemption();
939 tpidrro_el0 = get_tpidrro();
940 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
941 set_tpidrro(tsd_base | cpunum);
942 mp_enable_preemption();
943
944 }
945
946 return KERN_SUCCESS;
947 }
948
949 void
950 machine_tecs(__unused thread_t thr)
951 {
952 }
953
954 int
955 machine_csv(__unused cpuvn_e cve)
956 {
957 return 0;
958 }