]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/pcb.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / arm64 / pcb.c
1 /*
2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <debug.h>
30
31 #include <types.h>
32
33 #include <mach/mach_types.h>
34 #include <mach/thread_status.h>
35 #include <mach/vm_types.h>
36
37 #include <kern/kern_types.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/misc_protos.h>
41 #include <kern/mach_param.h>
42 #include <kern/spl.h>
43 #include <kern/machine.h>
44 #include <kern/kalloc.h>
45 #include <kern/kpc.h>
46
47 #if MONOTONIC
48 #include <kern/monotonic.h>
49 #endif /* MONOTONIC */
50
51 #include <machine/atomic.h>
52 #include <arm64/proc_reg.h>
53 #include <arm64/machine_machdep.h>
54 #include <arm/cpu_data_internal.h>
55 #include <arm/machdep_call.h>
56 #include <arm/misc_protos.h>
57 #include <arm/cpuid.h>
58
59 #include <vm/vm_map.h>
60 #include <vm/vm_protos.h>
61
62 #include <sys/kdebug.h>
63
64 #define USER_SS_ZONE_ALLOC_SIZE (0x4000)
65
66 extern int debug_task;
67
68 zone_t ads_zone; /* zone for debug_state area */
69 zone_t user_ss_zone; /* zone for user arm_context_t allocations */
70
71 /*
72 * Routine: consider_machine_collect
73 *
74 */
75 void
76 consider_machine_collect(void)
77 {
78 pmap_gc();
79 }
80
81 /*
82 * Routine: consider_machine_adjust
83 *
84 */
85 void
86 consider_machine_adjust(void)
87 {
88 }
89
90 /*
91 * Routine: machine_switch_context
92 *
93 */
94 thread_t
95 machine_switch_context(
96 thread_t old,
97 thread_continue_t continuation,
98 thread_t new)
99 {
100 thread_t retval;
101 pmap_t new_pmap;
102 cpu_data_t *cpu_data_ptr;
103
104 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
105 * text: " x) */
106
107 cpu_data_ptr = getCpuDatap();
108 if (old == new)
109 panic("machine_switch_context");
110
111 kpc_off_cpu(old);
112
113
114 new_pmap = new->map->pmap;
115 if (old->map->pmap != new_pmap)
116 pmap_switch(new_pmap);
117
118 new->machine.CpuDatap = cpu_data_ptr;
119
120 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
121
122 retval = Switch_context(old, continuation, new);
123 assert(retval != NULL);
124
125 return retval;
126 }
127
128 /*
129 * Routine: machine_thread_create
130 *
131 */
132 kern_return_t
133 machine_thread_create(
134 thread_t thread,
135 task_t task)
136 {
137 arm_context_t *thread_user_ss = NULL;
138 kern_return_t result = KERN_SUCCESS;
139
140 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
141
142 machine_thread_create_kprintf("thread = %x\n", thread);
143
144 if (current_thread() != thread) {
145 thread->machine.CpuDatap = (cpu_data_t *)0;
146 }
147 thread->machine.preemption_count = 0;
148 thread->machine.cthread_self = 0;
149 thread->machine.cthread_data = 0;
150
151
152 if (task != kernel_task) {
153 /* If this isn't a kernel thread, we'll have userspace state. */
154 thread->machine.contextData = (arm_context_t *)zalloc(user_ss_zone);
155
156 if (!thread->machine.contextData) {
157 return KERN_FAILURE;
158 }
159
160 thread->machine.upcb = &thread->machine.contextData->ss;
161 thread->machine.uNeon = &thread->machine.contextData->ns;
162
163 if (task_has_64BitAddr(task)) {
164 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
165 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
166 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
167 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
168 } else {
169 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
170 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
171 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
172 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
173 }
174 } else {
175 thread->machine.upcb = NULL;
176 thread->machine.uNeon = NULL;
177 thread->machine.contextData = NULL;
178 }
179
180 bzero(&thread->machine.perfctrl_state, sizeof(thread->machine.perfctrl_state));
181
182 result = machine_thread_state_initialize(thread);
183
184 if (result != KERN_SUCCESS) {
185 thread_user_ss = thread->machine.contextData;
186 thread->machine.upcb = NULL;
187 thread->machine.uNeon = NULL;
188 thread->machine.contextData = NULL;
189 zfree(user_ss_zone, thread_user_ss);
190 }
191
192 return result;
193 }
194
195 /*
196 * Routine: machine_thread_destroy
197 *
198 */
199 void
200 machine_thread_destroy(
201 thread_t thread)
202 {
203 arm_context_t *thread_user_ss;
204
205 if (thread->machine.contextData) {
206 /* Disassociate the user save state from the thread before we free it. */
207 thread_user_ss = thread->machine.contextData;
208 thread->machine.upcb = NULL;
209 thread->machine.uNeon = NULL;
210 thread->machine.contextData = NULL;
211 zfree(user_ss_zone, thread_user_ss);
212 }
213
214 if (thread->machine.DebugData != NULL) {
215 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
216 arm_debug_set(NULL);
217 }
218
219 zfree(ads_zone, thread->machine.DebugData);
220 }
221 }
222
223
224 /*
225 * Routine: machine_thread_init
226 *
227 */
228 void
229 machine_thread_init(void)
230 {
231 ads_zone = zinit(sizeof(arm_debug_state_t),
232 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
233 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
234 "arm debug state");
235
236 /*
237 * Create a zone for the user save state. At the time this zone was created,
238 * the user save state was 848 bytes, and the matching kalloc zone was 1024
239 * bytes, which would result in significant amounts of wasted space if we
240 * simply used kalloc to allocate the user saved state.
241 *
242 * 0x4000 has been chosen as the allocation size, as it results in 272 bytes
243 * of wasted space per chunk, which should correspond to 19 allocations.
244 */
245 user_ss_zone = zinit(sizeof(arm_context_t),
246 CONFIG_THREAD_MAX * (sizeof(arm_context_t)),
247 USER_SS_ZONE_ALLOC_SIZE,
248 "user save state");
249 }
250
251
252 /*
253 * Routine: get_useraddr
254 *
255 */
256 user_addr_t
257 get_useraddr()
258 {
259 return (get_saved_state_pc(current_thread()->machine.upcb));
260 }
261
262 /*
263 * Routine: machine_stack_detach
264 *
265 */
266 vm_offset_t
267 machine_stack_detach(
268 thread_t thread)
269 {
270 vm_offset_t stack;
271
272 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
273 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
274
275 stack = thread->kernel_stack;
276 thread->kernel_stack = 0;
277 thread->machine.kstackptr = 0;
278
279 return (stack);
280 }
281
282
283 /*
284 * Routine: machine_stack_attach
285 *
286 */
287 void
288 machine_stack_attach(
289 thread_t thread,
290 vm_offset_t stack)
291 {
292 struct arm_context *context;
293 struct arm_saved_state64 *savestate;
294
295 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
296
297 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
298 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
299
300 thread->kernel_stack = stack;
301 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
302 thread_initialize_kernel_state(thread);
303
304 machine_stack_attach_kprintf("kstackptr: %lx\n", (vm_address_t)thread->machine.kstackptr);
305
306 context = &((thread_kernel_state_t) thread->machine.kstackptr)->machine;
307 savestate = saved_state64(&context->ss);
308 savestate->fp = 0;
309 savestate->lr = (uintptr_t)thread_continue;
310 savestate->sp = thread->machine.kstackptr;
311 savestate->cpsr = PSR64_KERNEL_DEFAULT;
312 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
313 }
314
315
316 /*
317 * Routine: machine_stack_handoff
318 *
319 */
320 void
321 machine_stack_handoff(
322 thread_t old,
323 thread_t new)
324 {
325 vm_offset_t stack;
326 pmap_t new_pmap;
327 cpu_data_t *cpu_data_ptr;
328
329 kpc_off_cpu(old);
330
331 stack = machine_stack_detach(old);
332 cpu_data_ptr = getCpuDatap();
333 new->kernel_stack = stack;
334 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
335 if (stack == old->reserved_stack) {
336 assert(new->reserved_stack);
337 old->reserved_stack = new->reserved_stack;
338 new->reserved_stack = stack;
339 }
340
341
342 new_pmap = new->map->pmap;
343 if (old->map->pmap != new_pmap)
344 pmap_switch(new_pmap);
345
346 new->machine.CpuDatap = cpu_data_ptr;
347 machine_set_current_thread(new);
348 thread_initialize_kernel_state(new);
349
350 return;
351 }
352
353
354 /*
355 * Routine: call_continuation
356 *
357 */
358 void
359 call_continuation(
360 thread_continue_t continuation,
361 void *parameter,
362 wait_result_t wresult)
363 {
364 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:" x) */
365
366 call_continuation_kprintf("thread = %p continuation = %p, stack = %p\n", current_thread(), continuation, current_thread()->machine.kstackptr);
367 Call_continuation(continuation, parameter, wresult, current_thread()->machine.kstackptr);
368 }
369
370 void arm_debug_set32(arm_debug_state_t *debug_state)
371 {
372 struct cpu_data *cpu_data_ptr;
373 arm_debug_info_t *debug_info = arm_debug_info();
374 volatile uint64_t state;
375 boolean_t intr, set_mde = 0;
376 arm_debug_state_t off_state;
377 uint32_t i;
378
379 intr = ml_set_interrupts_enabled(FALSE);
380 cpu_data_ptr = getCpuDatap();
381
382 // Set current user debug
383 cpu_data_ptr->cpu_user_debug = debug_state;
384
385 if (NULL == debug_state) {
386 bzero(&off_state, sizeof(off_state));
387 debug_state = &off_state;
388 }
389
390 switch (debug_info->num_breakpoint_pairs) {
391 case 16:
392 __asm__ volatile("msr DBGBVR15_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[15]));
393 __asm__ volatile("msr DBGBCR15_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[15]));
394 case 15:
395 __asm__ volatile("msr DBGBVR14_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[14]));
396 __asm__ volatile("msr DBGBCR14_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[14]));
397 case 14:
398 __asm__ volatile("msr DBGBVR13_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[13]));
399 __asm__ volatile("msr DBGBCR13_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[13]));
400 case 13:
401 __asm__ volatile("msr DBGBVR12_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[12]));
402 __asm__ volatile("msr DBGBCR12_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[12]));
403 case 12:
404 __asm__ volatile("msr DBGBVR11_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[11]));
405 __asm__ volatile("msr DBGBCR11_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[11]));
406 case 11:
407 __asm__ volatile("msr DBGBVR10_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[10]));
408 __asm__ volatile("msr DBGBCR10_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[10]));
409 case 10:
410 __asm__ volatile("msr DBGBVR9_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[9]));
411 __asm__ volatile("msr DBGBCR9_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[9]));
412 case 9:
413 __asm__ volatile("msr DBGBVR8_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[8]));
414 __asm__ volatile("msr DBGBCR8_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[8]));
415 case 8:
416 __asm__ volatile("msr DBGBVR7_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[7]));
417 __asm__ volatile("msr DBGBCR7_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[7]));
418 case 7:
419 __asm__ volatile("msr DBGBVR6_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[6]));
420 __asm__ volatile("msr DBGBCR6_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[6]));
421 case 6:
422 __asm__ volatile("msr DBGBVR5_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[5]));
423 __asm__ volatile("msr DBGBCR5_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[5]));
424 case 5:
425 __asm__ volatile("msr DBGBVR4_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[4]));
426 __asm__ volatile("msr DBGBCR4_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[4]));
427 case 4:
428 __asm__ volatile("msr DBGBVR3_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[3]));
429 __asm__ volatile("msr DBGBCR3_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[3]));
430 case 3:
431 __asm__ volatile("msr DBGBVR2_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[2]));
432 __asm__ volatile("msr DBGBCR2_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[2]));
433 case 2:
434 __asm__ volatile("msr DBGBVR1_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[1]));
435 __asm__ volatile("msr DBGBCR1_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[1]));
436 case 1:
437 __asm__ volatile("msr DBGBVR0_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bvr[0]));
438 __asm__ volatile("msr DBGBCR0_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.bcr[0]));
439 default:
440 break;
441 }
442
443 switch (debug_info->num_watchpoint_pairs) {
444 case 16:
445 __asm__ volatile("msr DBGWVR15_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[15]));
446 __asm__ volatile("msr DBGWCR15_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[15]));
447 case 15:
448 __asm__ volatile("msr DBGWVR14_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[14]));
449 __asm__ volatile("msr DBGWCR14_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[14]));
450 case 14:
451 __asm__ volatile("msr DBGWVR13_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[13]));
452 __asm__ volatile("msr DBGWCR13_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[13]));
453 case 13:
454 __asm__ volatile("msr DBGWVR12_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[12]));
455 __asm__ volatile("msr DBGWCR12_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[12]));
456 case 12:
457 __asm__ volatile("msr DBGWVR11_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[11]));
458 __asm__ volatile("msr DBGWCR11_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[11]));
459 case 11:
460 __asm__ volatile("msr DBGWVR10_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[10]));
461 __asm__ volatile("msr DBGWCR10_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[10]));
462 case 10:
463 __asm__ volatile("msr DBGWVR9_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[9]));
464 __asm__ volatile("msr DBGWCR9_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[9]));
465 case 9:
466 __asm__ volatile("msr DBGWVR8_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[8]));
467 __asm__ volatile("msr DBGWCR8_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[8]));
468 case 8:
469 __asm__ volatile("msr DBGWVR7_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[7]));
470 __asm__ volatile("msr DBGWCR7_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[7]));
471 case 7:
472 __asm__ volatile("msr DBGWVR6_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[6]));
473 __asm__ volatile("msr DBGWCR6_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[6]));
474 case 6:
475 __asm__ volatile("msr DBGWVR5_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[5]));
476 __asm__ volatile("msr DBGWCR5_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[5]));
477 case 5:
478 __asm__ volatile("msr DBGWVR4_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[4]));
479 __asm__ volatile("msr DBGWCR4_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[4]));
480 case 4:
481 __asm__ volatile("msr DBGWVR3_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[3]));
482 __asm__ volatile("msr DBGWCR3_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[3]));
483 case 3:
484 __asm__ volatile("msr DBGWVR2_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[2]));
485 __asm__ volatile("msr DBGWCR2_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[2]));
486 case 2:
487 __asm__ volatile("msr DBGWVR1_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[1]));
488 __asm__ volatile("msr DBGWCR1_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[1]));
489 case 1:
490 __asm__ volatile("msr DBGWVR0_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wvr[0]));
491 __asm__ volatile("msr DBGWCR0_EL1, %0" : : "r"((uint64_t)debug_state->uds.ds32.wcr[0]));
492 default:
493 break;
494 }
495
496 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
497 if (0 != debug_state->uds.ds32.bcr[i]) {
498 set_mde = 1;
499 break;
500 }
501 }
502
503 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
504 if (0 != debug_state->uds.ds32.wcr[i]) {
505 set_mde = 1;
506 break;
507 }
508 }
509
510 /*
511 * Breakpoint/Watchpoint Enable
512 */
513 if (set_mde) {
514
515 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
516 state |= 0x8000; // MDSCR_EL1[MDE]
517 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
518
519 } else {
520
521 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
522 state &= ~0x8000;
523 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
524
525 }
526
527 /*
528 * Software debug single step enable
529 */
530 if (debug_state->uds.ds32.mdscr_el1 & 0x1) {
531
532 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
533 state = (state & ~0x8000) | 0x1; // ~MDE | SS : no brk/watch while single stepping (which we've set)
534 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
535
536 set_saved_state_cpsr((current_thread()->machine.upcb),
537 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
538
539 } else {
540
541 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
542 state &= ~0x1;
543 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
544
545 #if SINGLE_STEP_RETIRE_ERRATA
546 // Workaround for radar 20619637
547 __builtin_arm_isb(ISB_SY);
548 #endif
549 }
550
551 (void) ml_set_interrupts_enabled(intr);
552
553 return;
554 }
555
556 void arm_debug_set64(arm_debug_state_t *debug_state)
557 {
558 struct cpu_data *cpu_data_ptr;
559 arm_debug_info_t *debug_info = arm_debug_info();
560 volatile uint64_t state;
561 boolean_t intr, set_mde = 0;
562 arm_debug_state_t off_state;
563 uint32_t i;
564
565 intr = ml_set_interrupts_enabled(FALSE);
566 cpu_data_ptr = getCpuDatap();
567
568 // Set current user debug
569 cpu_data_ptr->cpu_user_debug = debug_state;
570
571 if (NULL == debug_state) {
572 bzero(&off_state, sizeof(off_state));
573 debug_state = &off_state;
574 }
575
576 switch (debug_info->num_breakpoint_pairs) {
577 case 16:
578 __asm__ volatile("msr DBGBVR15_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[15]));
579 __asm__ volatile("msr DBGBCR15_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[15]));
580 case 15:
581 __asm__ volatile("msr DBGBVR14_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[14]));
582 __asm__ volatile("msr DBGBCR14_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[14]));
583 case 14:
584 __asm__ volatile("msr DBGBVR13_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[13]));
585 __asm__ volatile("msr DBGBCR13_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[13]));
586 case 13:
587 __asm__ volatile("msr DBGBVR12_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[12]));
588 __asm__ volatile("msr DBGBCR12_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[12]));
589 case 12:
590 __asm__ volatile("msr DBGBVR11_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[11]));
591 __asm__ volatile("msr DBGBCR11_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[11]));
592 case 11:
593 __asm__ volatile("msr DBGBVR10_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[10]));
594 __asm__ volatile("msr DBGBCR10_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[10]));
595 case 10:
596 __asm__ volatile("msr DBGBVR9_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[9]));
597 __asm__ volatile("msr DBGBCR9_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[9]));
598 case 9:
599 __asm__ volatile("msr DBGBVR8_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[8]));
600 __asm__ volatile("msr DBGBCR8_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[8]));
601 case 8:
602 __asm__ volatile("msr DBGBVR7_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[7]));
603 __asm__ volatile("msr DBGBCR7_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[7]));
604 case 7:
605 __asm__ volatile("msr DBGBVR6_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[6]));
606 __asm__ volatile("msr DBGBCR6_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[6]));
607 case 6:
608 __asm__ volatile("msr DBGBVR5_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[5]));
609 __asm__ volatile("msr DBGBCR5_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[5]));
610 case 5:
611 __asm__ volatile("msr DBGBVR4_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[4]));
612 __asm__ volatile("msr DBGBCR4_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[4]));
613 case 4:
614 __asm__ volatile("msr DBGBVR3_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[3]));
615 __asm__ volatile("msr DBGBCR3_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[3]));
616 case 3:
617 __asm__ volatile("msr DBGBVR2_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[2]));
618 __asm__ volatile("msr DBGBCR2_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[2]));
619 case 2:
620 __asm__ volatile("msr DBGBVR1_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[1]));
621 __asm__ volatile("msr DBGBCR1_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[1]));
622 case 1:
623 __asm__ volatile("msr DBGBVR0_EL1, %0" : : "r"(debug_state->uds.ds64.bvr[0]));
624 __asm__ volatile("msr DBGBCR0_EL1, %0" : : "r"(debug_state->uds.ds64.bcr[0]));
625 default:
626 break;
627 }
628
629 switch (debug_info->num_watchpoint_pairs) {
630 case 16:
631 __asm__ volatile("msr DBGWVR15_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[15]));
632 __asm__ volatile("msr DBGWCR15_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[15]));
633 case 15:
634 __asm__ volatile("msr DBGWVR14_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[14]));
635 __asm__ volatile("msr DBGWCR14_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[14]));
636 case 14:
637 __asm__ volatile("msr DBGWVR13_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[13]));
638 __asm__ volatile("msr DBGWCR13_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[13]));
639 case 13:
640 __asm__ volatile("msr DBGWVR12_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[12]));
641 __asm__ volatile("msr DBGWCR12_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[12]));
642 case 12:
643 __asm__ volatile("msr DBGWVR11_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[11]));
644 __asm__ volatile("msr DBGWCR11_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[11]));
645 case 11:
646 __asm__ volatile("msr DBGWVR10_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[10]));
647 __asm__ volatile("msr DBGWCR10_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[10]));
648 case 10:
649 __asm__ volatile("msr DBGWVR9_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[9]));
650 __asm__ volatile("msr DBGWCR9_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[9]));
651 case 9:
652 __asm__ volatile("msr DBGWVR8_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[8]));
653 __asm__ volatile("msr DBGWCR8_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[8]));
654 case 8:
655 __asm__ volatile("msr DBGWVR7_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[7]));
656 __asm__ volatile("msr DBGWCR7_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[7]));
657 case 7:
658 __asm__ volatile("msr DBGWVR6_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[6]));
659 __asm__ volatile("msr DBGWCR6_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[6]));
660 case 6:
661 __asm__ volatile("msr DBGWVR5_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[5]));
662 __asm__ volatile("msr DBGWCR5_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[5]));
663 case 5:
664 __asm__ volatile("msr DBGWVR4_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[4]));
665 __asm__ volatile("msr DBGWCR4_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[4]));
666 case 4:
667 __asm__ volatile("msr DBGWVR3_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[3]));
668 __asm__ volatile("msr DBGWCR3_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[3]));
669 case 3:
670 __asm__ volatile("msr DBGWVR2_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[2]));
671 __asm__ volatile("msr DBGWCR2_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[2]));
672 case 2:
673 __asm__ volatile("msr DBGWVR1_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[1]));
674 __asm__ volatile("msr DBGWCR1_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[1]));
675 case 1:
676 __asm__ volatile("msr DBGWVR0_EL1, %0" : : "r"(debug_state->uds.ds64.wvr[0]));
677 __asm__ volatile("msr DBGWCR0_EL1, %0" : : "r"(debug_state->uds.ds64.wcr[0]));
678 default:
679 break;
680 }
681
682 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
683 if (0 != debug_state->uds.ds64.bcr[i]) {
684 set_mde = 1;
685 break;
686 }
687 }
688
689 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
690 if (0 != debug_state->uds.ds64.wcr[i]) {
691 set_mde = 1;
692 break;
693 }
694 }
695
696 /*
697 * Breakpoint/Watchpoint Enable
698 */
699 if (set_mde) {
700
701 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
702 state |= 0x8000; // MDSCR_EL1[MDE]
703 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
704
705 }
706
707 /*
708 * Software debug single step enable
709 */
710 if (debug_state->uds.ds64.mdscr_el1 & 0x1) {
711
712 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
713 state = (state & ~0x8000) | 0x1; // ~MDE | SS : no brk/watch while single stepping (which we've set)
714 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
715
716 set_saved_state_cpsr((current_thread()->machine.upcb),
717 get_saved_state_cpsr((current_thread()->machine.upcb)) | PSR64_SS);
718
719 } else {
720
721 __asm__ volatile("mrs %0, MDSCR_EL1" : "=r"(state));
722 state &= ~0x1;
723 __asm__ volatile("msr MDSCR_EL1, %0" : : "r"(state));
724
725 #if SINGLE_STEP_RETIRE_ERRATA
726 // Workaround for radar 20619637
727 __builtin_arm_isb(ISB_SY);
728 #endif
729 }
730
731 (void) ml_set_interrupts_enabled(intr);
732
733 return;
734 }
735
736 void arm_debug_set(arm_debug_state_t *debug_state)
737 {
738 if (debug_state) {
739 switch (debug_state->dsh.flavor) {
740 case ARM_DEBUG_STATE32:
741 arm_debug_set32(debug_state);
742 break;
743 case ARM_DEBUG_STATE64:
744 arm_debug_set64(debug_state);
745 break;
746 default:
747 panic("arm_debug_set");
748 break;
749 }
750 } else {
751 if (thread_is_64bit(current_thread()))
752 arm_debug_set64(debug_state);
753 else
754 arm_debug_set32(debug_state);
755 }
756 }
757
758 #define VM_MAX_ADDRESS32 ((vm_address_t) 0x80000000)
759 boolean_t
760 debug_legacy_state_is_valid(arm_legacy_debug_state_t *debug_state)
761 {
762 arm_debug_info_t *debug_info = arm_debug_info();
763 uint32_t i;
764 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
765 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
766 return FALSE;
767 }
768
769 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
770 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
771 return FALSE;
772 }
773 return TRUE;
774 }
775
776 boolean_t
777 debug_state_is_valid32(arm_debug_state32_t *debug_state)
778 {
779 arm_debug_info_t *debug_info = arm_debug_info();
780 uint32_t i;
781 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
782 if (0 != debug_state->bcr[i] && VM_MAX_ADDRESS32 <= debug_state->bvr[i])
783 return FALSE;
784 }
785
786 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
787 if (0 != debug_state->wcr[i] && VM_MAX_ADDRESS32 <= debug_state->wvr[i])
788 return FALSE;
789 }
790 return TRUE;
791 }
792
793 boolean_t
794 debug_state_is_valid64(arm_debug_state64_t *debug_state)
795 {
796 arm_debug_info_t *debug_info = arm_debug_info();
797 uint32_t i;
798 for (i = 0; i < debug_info->num_breakpoint_pairs; i++) {
799 if (0 != debug_state->bcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->bvr[i])
800 return FALSE;
801 }
802
803 for (i = 0; i < debug_info->num_watchpoint_pairs; i++) {
804 if (0 != debug_state->wcr[i] && MACH_VM_MAX_ADDRESS <= debug_state->wvr[i])
805 return FALSE;
806 }
807 return TRUE;
808 }
809
810 /*
811 * Duplicate one arm_debug_state_t to another. "all" parameter
812 * is ignored in the case of ARM -- Is this the right assumption?
813 */
814 void
815 copy_legacy_debug_state(
816 arm_legacy_debug_state_t *src,
817 arm_legacy_debug_state_t *target,
818 __unused boolean_t all)
819 {
820 bcopy(src, target, sizeof(arm_legacy_debug_state_t));
821 }
822
823 void
824 copy_debug_state32(
825 arm_debug_state32_t *src,
826 arm_debug_state32_t *target,
827 __unused boolean_t all)
828 {
829 bcopy(src, target, sizeof(arm_debug_state32_t));
830 }
831
832 void
833 copy_debug_state64(
834 arm_debug_state64_t *src,
835 arm_debug_state64_t *target,
836 __unused boolean_t all)
837 {
838 bcopy(src, target, sizeof(arm_debug_state64_t));
839 }
840
841 kern_return_t
842 machine_thread_set_tsd_base(
843 thread_t thread,
844 mach_vm_offset_t tsd_base)
845 {
846
847 if (thread->task == kernel_task) {
848 return KERN_INVALID_ARGUMENT;
849 }
850
851 if (tsd_base & MACHDEP_CPUNUM_MASK) {
852 return KERN_INVALID_ARGUMENT;
853 }
854
855 if (thread_is_64bit(thread)) {
856 if (tsd_base > vm_map_max(thread->map))
857 tsd_base = 0ULL;
858 } else {
859 if (tsd_base > UINT32_MAX)
860 tsd_base = 0ULL;
861 }
862
863 thread->machine.cthread_self = tsd_base;
864
865 /* For current thread, make the TSD base active immediately */
866 if (thread == current_thread()) {
867 uint64_t cpunum, tpidrro_el0;
868
869 mp_disable_preemption();
870 tpidrro_el0 = get_tpidrro();
871 cpunum = tpidrro_el0 & (MACHDEP_CPUNUM_MASK);
872 set_tpidrro(tsd_base | cpunum);
873 mp_enable_preemption();
874
875 }
876
877 return KERN_SUCCESS;
878 }