]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/pcb.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm / pcb.c
1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29
30 #include <types.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
41 #include <kern/spl.h>
42 #include <kern/machine.h>
43 #include <kern/kpc.h>
44
45 #include <arm/proc_reg.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/misc_protos.h>
48 #include <arm/cpuid.h>
49
50 #include <vm/vm_map.h>
51 #include <vm/vm_protos.h>
52
53 #include <sys/kdebug.h>
54
55 extern int debug_task;
56
57 /* zone for debug_state area */
58 ZONE_DECLARE(ads_zone, "arm debug state", sizeof(arm_debug_state_t), ZC_NONE);
59
60 /*
61 * Routine: consider_machine_collect
62 *
63 */
64 void
65 consider_machine_collect(void)
66 {
67 pmap_gc();
68 }
69
70 /*
71 * Routine: consider_machine_adjust
72 *
73 */
74 void
75 consider_machine_adjust(void)
76 {
77 }
78
79 static inline void
80 machine_thread_switch_cpu_data(thread_t old, thread_t new)
81 {
82 /*
83 * We build with -fno-strict-aliasing, so the load through temporaries
84 * is required so that this generates a single load / store pair.
85 */
86 cpu_data_t *datap = old->machine.CpuDatap;
87 vm_offset_t base = old->machine.pcpu_data_base;
88
89 /* TODO: Should this be ordered? */
90
91 /*
92 * arm relies on CpuDatap being set for a thread that has run,
93 * so we only reset pcpu_data_base.
94 */
95 old->machine.pcpu_data_base = -1;
96
97 new->machine.CpuDatap = datap;
98 new->machine.pcpu_data_base = base;
99 }
100
101 /*
102 * Routine: machine_switch_context
103 *
104 */
105 thread_t
106 machine_switch_context(
107 thread_t old,
108 thread_continue_t continuation,
109 thread_t new)
110 {
111 thread_t retval;
112
113 #define machine_switch_context_kprintf(x...) \
114 /* kprintf("machine_switch_context: " x) */
115
116 if (old == new) {
117 panic("machine_switch_context");
118 }
119
120 kpc_off_cpu(old);
121
122 /*
123 * If the thread is preempted while performing cache or TLB maintenance,
124 * it may be migrated to a different CPU between the completion of the relevant
125 * maintenance instruction and the synchronizing DSB. ARM requires that the
126 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
127 * in order to guarantee completion of the instruction and visibility of its effects.
128 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
129 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
130 */
131 __builtin_arm_dsb(DSB_ISH);
132 pmap_set_pmap(new->map->pmap, new);
133
134 machine_thread_switch_cpu_data(old, new);
135
136 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
137 retval = Switch_context(old, continuation, new);
138 assert(retval != NULL);
139
140 return retval;
141 }
142
143 boolean_t
144 machine_thread_on_core(thread_t thread)
145 {
146 return thread->machine.pcpu_data_base != -1;
147 }
148
149 /*
150 * Routine: machine_thread_create
151 *
152 */
153 kern_return_t
154 machine_thread_create(
155 thread_t thread,
156 #if !__ARM_USER_PROTECT__
157 __unused
158 #endif
159 task_t task)
160 {
161 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
162
163 machine_thread_create_kprintf("thread = %x\n", thread);
164
165 if (current_thread() != thread) {
166 thread->machine.CpuDatap = (cpu_data_t *)0;
167 // setting this offset will cause trying to use it to panic
168 thread->machine.pcpu_data_base = -1;
169 }
170 thread->machine.preemption_count = 0;
171 thread->machine.cthread_self = 0;
172 #if __ARM_USER_PROTECT__
173 {
174 struct pmap *new_pmap = vm_map_pmap(task->map);
175
176 thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP;
177 thread->machine.asid = new_pmap->hw_asid;
178 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP;
179 }
180 #endif
181 machine_thread_state_initialize(thread);
182
183 return KERN_SUCCESS;
184 }
185
186 /*
187 * Routine: machine_thread_destroy
188 *
189 */
190 void
191 machine_thread_destroy(
192 thread_t thread)
193 {
194 if (thread->machine.DebugData != NULL) {
195 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug) {
196 arm_debug_set(NULL);
197 }
198 zfree(ads_zone, thread->machine.DebugData);
199 }
200 }
201
202
203 /*
204 * Routine: machine_thread_init
205 *
206 */
207 void
208 machine_thread_init(void)
209 {
210 }
211
212 /*
213 * Routine: machine_thread_template_init
214 *
215 */
216 void
217 machine_thread_template_init(thread_t __unused thr_template)
218 {
219 /* Nothing to do on this platform. */
220 }
221
222 /*
223 * Routine: get_useraddr
224 *
225 */
226 user_addr_t
227 get_useraddr()
228 {
229 return current_thread()->machine.PcbData.pc;
230 }
231
232 /*
233 * Routine: machine_stack_detach
234 *
235 */
236 vm_offset_t
237 machine_stack_detach(
238 thread_t thread)
239 {
240 vm_offset_t stack;
241
242 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
243 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
244
245 stack = thread->kernel_stack;
246 thread->kernel_stack = 0;
247 thread->machine.kstackptr = 0;
248
249 return stack;
250 }
251
252
253 /*
254 * Routine: machine_stack_attach
255 *
256 */
257 void
258 machine_stack_attach(
259 thread_t thread,
260 vm_offset_t stack)
261 {
262 struct arm_saved_state *savestate;
263
264 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
265
266 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
267 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
268
269 thread->kernel_stack = stack;
270 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
271 thread_initialize_kernel_state(thread);
272 savestate = (struct arm_saved_state *) thread->machine.kstackptr;
273
274 savestate->lr = (uint32_t) thread_continue;
275 savestate->sp = thread->machine.kstackptr;
276 savestate->r[7] = 0x0UL;
277 savestate->r[9] = (uint32_t) NULL;
278 savestate->cpsr = PSR_SVC_MODE | PSR_INTMASK;
279 vfp_state_initialize(&savestate->VFPdata);
280 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
281 }
282
283
284 /*
285 * Routine: machine_stack_handoff
286 *
287 */
288 void
289 machine_stack_handoff(
290 thread_t old,
291 thread_t new)
292 {
293 vm_offset_t stack;
294
295 kpc_off_cpu(old);
296
297 stack = machine_stack_detach(old);
298 new->kernel_stack = stack;
299 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
300 if (stack == old->reserved_stack) {
301 assert(new->reserved_stack);
302 old->reserved_stack = new->reserved_stack;
303 new->reserved_stack = stack;
304 }
305
306 /*
307 * If the thread is preempted while performing cache or TLB maintenance,
308 * it may be migrated to a different CPU between the completion of the relevant
309 * maintenance instruction and the synchronizing DSB. ARM requires that the
310 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
311 * in order to guarantee completion of the instruction and visibility of its effects.
312 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
313 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
314 */
315 __builtin_arm_dsb(DSB_ISH);
316 pmap_set_pmap(new->map->pmap, new);
317
318 machine_thread_switch_cpu_data(old, new);
319
320 machine_set_current_thread(new);
321 thread_initialize_kernel_state(new);
322 }
323
324
325 /*
326 * Routine: call_continuation
327 *
328 */
329 void
330 call_continuation(
331 thread_continue_t continuation,
332 void *parameter,
333 wait_result_t wresult,
334 boolean_t enable_interrupts)
335 {
336 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
337 * " x) */
338
339 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr);
340 Call_continuation(continuation, parameter, wresult, enable_interrupts);
341 }
342
343 void
344 arm_debug_set(arm_debug_state_t *debug_state)
345 {
346 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
347 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
348 * functionality-wise.
349 */
350 struct cpu_data *cpu_data_ptr;
351 arm_debug_info_t *debug_info = arm_debug_info();
352 boolean_t intr;
353
354 intr = ml_set_interrupts_enabled(FALSE);
355 cpu_data_ptr = getCpuDatap();
356
357 // Set current user debug
358 cpu_data_ptr->cpu_user_debug = debug_state;
359
360 if (debug_info->memory_mapped_core_debug) {
361 int i;
362 uintptr_t debug_map = cpu_data_ptr->cpu_debug_interface_map;
363
364 // unlock debug registers
365 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
366
367 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
368 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGPRSR);
369
370 // enable monitor mode (needed to set and use debug registers)
371 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) |= ARM_DBGDSCR_MDBGEN;
372
373 // first turn off all breakpoints/watchpoints
374 for (i = 0; i < 16; i++) {
375 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = 0;
376 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = 0;
377 }
378
379 // if (debug_state == NULL) disable monitor mode
380 if (debug_state == NULL) {
381 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) &= ~ARM_DBGDSCR_MDBGEN;
382 } else {
383 for (i = 0; i < 16; i++) {
384 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBVR))[i] = debug_state->bvr[i];
385 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = debug_state->bcr[i];
386 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i];
387 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i];
388 }
389 }
390
391 // lock debug registers
392 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0;
393 } else if (debug_info->coprocessor_core_debug) {
394 arm_debug_set_cp14(debug_state);
395 }
396
397 (void) ml_set_interrupts_enabled(intr);
398 }
399
400 /*
401 * Duplicate one arm_debug_state_t to another. "all" parameter
402 * is ignored in the case of ARM -- Is this the right assumption?
403 */
404 void
405 copy_debug_state(
406 arm_debug_state_t *src,
407 arm_debug_state_t *target,
408 __unused boolean_t all)
409 {
410 bcopy(src, target, sizeof(arm_debug_state_t));
411 }
412
413 kern_return_t
414 machine_thread_set_tsd_base(
415 thread_t thread,
416 mach_vm_offset_t tsd_base)
417 {
418 if (thread->task == kernel_task) {
419 return KERN_INVALID_ARGUMENT;
420 }
421
422 if (tsd_base & 0x3) {
423 return KERN_INVALID_ARGUMENT;
424 }
425
426 if (tsd_base > UINT32_MAX) {
427 tsd_base = 0ULL;
428 }
429
430 thread->machine.cthread_self = tsd_base;
431
432 /* For current thread, make the TSD base active immediately */
433 if (thread == current_thread()) {
434 mp_disable_preemption();
435 __asm__ volatile (
436 "mrc p15, 0, r6, c13, c0, 3\n"
437 "and r6, r6, #3\n"
438 "orr r6, r6, %0\n"
439 "mcr p15, 0, r6, c13, c0, 3\n"
440 : /* output */
441 : "r"((uint32_t)tsd_base) /* input */
442 : "r6" /* clobbered register */
443 );
444 mp_enable_preemption();
445 }
446
447 return KERN_SUCCESS;
448 }
449
450 void
451 machine_tecs(__unused thread_t thr)
452 {
453 }
454
455 int
456 machine_csv(__unused cpuvn_e cve)
457 {
458 return 0;
459 }