]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm/pcb.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / pcb.c
CommitLineData
5ba3f43e 1/*
f427ee49 2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29
30#include <types.h>
31
32#include <mach/mach_types.h>
33#include <mach/thread_status.h>
34#include <mach/vm_types.h>
35
36#include <kern/kern_types.h>
37#include <kern/task.h>
38#include <kern/thread.h>
39#include <kern/misc_protos.h>
40#include <kern/mach_param.h>
41#include <kern/spl.h>
42#include <kern/machine.h>
5ba3f43e
A
43#include <kern/kpc.h>
44
45#include <arm/proc_reg.h>
46#include <arm/cpu_data_internal.h>
47#include <arm/misc_protos.h>
48#include <arm/cpuid.h>
49
50#include <vm/vm_map.h>
51#include <vm/vm_protos.h>
52
53#include <sys/kdebug.h>
54
55extern int debug_task;
56
f427ee49
A
57/* zone for debug_state area */
58ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE);
5ba3f43e
A
59
60/*
61 * Routine: consider_machine_collect
62 *
63 */
64void
65consider_machine_collect(void)
66{
67 pmap_gc();
68}
69
70/*
71 * Routine: consider_machine_adjust
72 *
73 */
74void
75consider_machine_adjust(void)
76{
77}
78
f427ee49
A
79static inline void
80machine_thread_switch_cpu_data(thread_t old, thread_t new)
81{
82 /*
83 * We build with -fno-strict-aliasing, so the load through temporaries
84 * is required so that this generates a single load / store pair.
85 */
86 cpu_data_t *datap = old->machine.CpuDatap;
87 vm_offset_t base = old->machine.pcpu_data_base;
88
89 /* TODO: Should this be ordered? */
90
91 /*
92 * arm relies on CpuDatap being set for a thread that has run,
93 * so we only reset pcpu_data_base.
94 */
95 old->machine.pcpu_data_base = -1;
96
97 new->machine.CpuDatap = datap;
98 new->machine.pcpu_data_base = base;
99}
100
5ba3f43e
A
101/*
102 * Routine: machine_switch_context
103 *
104 */
105thread_t
106machine_switch_context(
f427ee49
A
107 thread_t old,
108 thread_continue_t continuation,
109 thread_t new)
5ba3f43e
A
110{
111 thread_t retval;
5ba3f43e 112
f427ee49
A
113#define machine_switch_context_kprintf(x...) \
114 /* kprintf("machine_switch_context: " x) */
5ba3f43e 115
f427ee49 116 if (old == new) {
5ba3f43e 117 panic("machine_switch_context");
f427ee49 118 }
5ba3f43e
A
119
120 kpc_off_cpu(old);
121
f427ee49
A
122 /*
123 * If the thread is preempted while performing cache or TLB maintenance,
124 * it may be migrated to a different CPU between the completion of the relevant
125 * maintenance instruction and the synchronizing DSB. ARM requires that the
126 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
127 * in order to guarantee completion of the instruction and visibility of its effects.
128 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
129 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
130 */
131 __builtin_arm_dsb(DSB_ISH);
5ba3f43e
A
132 pmap_set_pmap(new->map->pmap, new);
133
f427ee49 134 machine_thread_switch_cpu_data(old, new);
0a7de745 135
5ba3f43e
A
136 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
137 retval = Switch_context(old, continuation, new);
138 assert(retval != NULL);
139
140 return retval;
141}
142
cb323159
A
143boolean_t
144machine_thread_on_core(thread_t thread)
145{
f427ee49 146 return thread->machine.pcpu_data_base != -1;
cb323159
A
147}
148
5ba3f43e
A
149/*
150 * Routine: machine_thread_create
151 *
152 */
153kern_return_t
154machine_thread_create(
f427ee49
A
155 thread_t thread,
156#if !__ARM_USER_PROTECT__
157 __unused
5ba3f43e 158#endif
f427ee49 159 task_t task)
5ba3f43e 160{
f427ee49 161#define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
5ba3f43e
A
162
163 machine_thread_create_kprintf("thread = %x\n", thread);
164
165 if (current_thread() != thread) {
166 thread->machine.CpuDatap = (cpu_data_t *)0;
f427ee49
A
167 // setting this offset will cause trying to use it to panic
168 thread->machine.pcpu_data_base = -1;
5ba3f43e
A
169 }
170 thread->machine.preemption_count = 0;
171 thread->machine.cthread_self = 0;
f427ee49 172#if __ARM_USER_PROTECT__
5ba3f43e 173 {
f427ee49 174 struct pmap *new_pmap = vm_map_pmap(task->map);
5ba3f43e 175
f427ee49
A
176 thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP;
177 thread->machine.asid = new_pmap->hw_asid;
5ba3f43e 178 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP;
5ba3f43e
A
179 }
180#endif
181 machine_thread_state_initialize(thread);
182
f427ee49 183 return KERN_SUCCESS;
5ba3f43e
A
184}
185
186/*
187 * Routine: machine_thread_destroy
188 *
189 */
190void
191machine_thread_destroy(
f427ee49 192 thread_t thread)
5ba3f43e 193{
f427ee49
A
194 if (thread->machine.DebugData != NULL) {
195 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
5ba3f43e 196 arm_debug_set(NULL);
f427ee49 197 }
5ba3f43e
A
198 zfree(ads_zone, thread->machine.DebugData);
199 }
200}
201
202
203/*
204 * Routine: machine_thread_init
205 *
206 */
207void
208machine_thread_init(void)
209{
5ba3f43e
A
210}
211
ea3f0419
A
212/*
213 * Routine: machine_thread_template_init
214 *
215 */
216void
217machine_thread_template_init(thread_t __unused thr_template)
218{
219 /* Nothing to do on this platform. */
220}
5ba3f43e
A
221
222/*
223 * Routine: get_useraddr
224 *
225 */
226user_addr_t
227get_useraddr()
228{
f427ee49 229 return current_thread()->machine.PcbData.pc;
5ba3f43e
A
230}
231
232/*
233 * Routine: machine_stack_detach
234 *
235 */
236vm_offset_t
237machine_stack_detach(
f427ee49 238 thread_t thread)
5ba3f43e
A
239{
240 vm_offset_t stack;
241
242 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
f427ee49 243 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
5ba3f43e
A
244
245 stack = thread->kernel_stack;
246 thread->kernel_stack = 0;
247 thread->machine.kstackptr = 0;
248
f427ee49 249 return stack;
5ba3f43e
A
250}
251
252
253/*
254 * Routine: machine_stack_attach
255 *
256 */
257void
258machine_stack_attach(
f427ee49
A
259 thread_t thread,
260 vm_offset_t stack)
5ba3f43e
A
261{
262 struct arm_saved_state *savestate;
263
f427ee49 264#define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
5ba3f43e
A
265
266 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
f427ee49 267 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
5ba3f43e
A
268
269 thread->kernel_stack = stack;
270 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
271 thread_initialize_kernel_state(thread);
272 savestate = (struct arm_saved_state *) thread->machine.kstackptr;
273
274 savestate->lr = (uint32_t) thread_continue;
275 savestate->sp = thread->machine.kstackptr;
276 savestate->r[7] = 0x0UL;
277 savestate->r[9] = (uint32_t) NULL;
278 savestate->cpsr = PSR_SVC_MODE | PSR_INTMASK;
94ff46dc 279 vfp_state_initialize(&savestate->VFPdata);
5ba3f43e
A
280 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
281}
282
283
284/*
285 * Routine: machine_stack_handoff
286 *
287 */
288void
289machine_stack_handoff(
f427ee49
A
290 thread_t old,
291 thread_t new)
5ba3f43e
A
292{
293 vm_offset_t stack;
5ba3f43e
A
294
295 kpc_off_cpu(old);
296
297 stack = machine_stack_detach(old);
5ba3f43e
A
298 new->kernel_stack = stack;
299 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
300 if (stack == old->reserved_stack) {
301 assert(new->reserved_stack);
302 old->reserved_stack = new->reserved_stack;
303 new->reserved_stack = stack;
304 }
305
f427ee49
A
306 /*
307 * If the thread is preempted while performing cache or TLB maintenance,
308 * it may be migrated to a different CPU between the completion of the relevant
309 * maintenance instruction and the synchronizing DSB. ARM requires that the
310 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
311 * in order to guarantee completion of the instruction and visibility of its effects.
312 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
313 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
314 */
315 __builtin_arm_dsb(DSB_ISH);
5ba3f43e 316 pmap_set_pmap(new->map->pmap, new);
0a7de745 317
f427ee49 318 machine_thread_switch_cpu_data(old, new);
0a7de745 319
5ba3f43e
A
320 machine_set_current_thread(new);
321 thread_initialize_kernel_state(new);
5ba3f43e
A
322}
323
324
325/*
326 * Routine: call_continuation
327 *
328 */
329void
330call_continuation(
f427ee49
A
331 thread_continue_t continuation,
332 void *parameter,
333 wait_result_t wresult,
334 boolean_t enable_interrupts)
5ba3f43e 335{
f427ee49
A
336#define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
337 * " x) */
5ba3f43e
A
338
339 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr);
d9a64523 340 Call_continuation(continuation, parameter, wresult, enable_interrupts);
5ba3f43e
A
341}
342
f427ee49
A
343void
344arm_debug_set(arm_debug_state_t *debug_state)
5ba3f43e
A
345{
346 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
347 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
348 * functionality-wise.
349 */
350 struct cpu_data *cpu_data_ptr;
351 arm_debug_info_t *debug_info = arm_debug_info();
352 boolean_t intr;
353
354 intr = ml_set_interrupts_enabled(FALSE);
355 cpu_data_ptr = getCpuDatap();
356
357 // Set current user debug
358 cpu_data_ptr->cpu_user_debug = debug_state;
359
360 if (debug_info->memory_mapped_core_debug) {
361 int i;
362 uintptr_t debug_map = cpu_data_ptr->cpu_debug_interface_map;
363
364 // unlock debug registers
365 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
366
367 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
368 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGPRSR);
369
370 // enable monitor mode (needed to set and use debug registers)
371 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) |= ARM_DBGDSCR_MDBGEN;
372
373 // first turn off all breakpoints/watchpoints
374 for (i = 0; i < 16; i++) {
375 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = 0;
376 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = 0;
377 }
378
379 // if (debug_state == NULL) disable monitor mode
380 if (debug_state == NULL) {
381 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) &= ~ARM_DBGDSCR_MDBGEN;
382 } else {
383 for (i = 0; i < 16; i++) {
384 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBVR))[i] = debug_state->bvr[i];
385 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = debug_state->bcr[i];
386 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i];
387 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i];
388 }
f427ee49 389 }
5ba3f43e
A
390
391 // lock debug registers
392 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0;
f427ee49 393 } else if (debug_info->coprocessor_core_debug) {
5ba3f43e
A
394 arm_debug_set_cp14(debug_state);
395 }
396
397 (void) ml_set_interrupts_enabled(intr);
5ba3f43e
A
398}
399
400/*
401 * Duplicate one arm_debug_state_t to another. "all" parameter
402 * is ignored in the case of ARM -- Is this the right assumption?
403 */
404void
405copy_debug_state(
f427ee49
A
406 arm_debug_state_t *src,
407 arm_debug_state_t *target,
408 __unused boolean_t all)
5ba3f43e
A
409{
410 bcopy(src, target, sizeof(arm_debug_state_t));
411}
412
413kern_return_t
414machine_thread_set_tsd_base(
f427ee49
A
415 thread_t thread,
416 mach_vm_offset_t tsd_base)
5ba3f43e 417{
5ba3f43e
A
418 if (thread->task == kernel_task) {
419 return KERN_INVALID_ARGUMENT;
420 }
421
422 if (tsd_base & 0x3) {
423 return KERN_INVALID_ARGUMENT;
424 }
425
f427ee49 426 if (tsd_base > UINT32_MAX) {
5ba3f43e 427 tsd_base = 0ULL;
f427ee49 428 }
5ba3f43e
A
429
430 thread->machine.cthread_self = tsd_base;
431
432 /* For current thread, make the TSD base active immediately */
433 if (thread == current_thread()) {
5ba3f43e 434 mp_disable_preemption();
f427ee49
A
435 __asm__ volatile (
436 "mrc p15, 0, r6, c13, c0, 3\n"
437 "and r6, r6, #3\n"
438 "orr r6, r6, %0\n"
439 "mcr p15, 0, r6, c13, c0, 3\n"
440 : /* output */
441 : "r"((uint32_t)tsd_base) /* input */
442 : "r6" /* clobbered register */
443 );
5ba3f43e 444 mp_enable_preemption();
5ba3f43e
A
445 }
446
447 return KERN_SUCCESS;
448}
0a7de745
A
449
450void
451machine_tecs(__unused thread_t thr)
452{
453}
454
455int
456machine_csv(__unused cpuvn_e cve)
457{
458 return 0;
459}