]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/pcb.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / arm / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29
30 #include <types.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
41 #include <kern/spl.h>
42 #include <kern/machine.h>
43 #include <kern/kalloc.h>
44 #include <kern/kpc.h>
45
46 #include <arm/proc_reg.h>
47 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/cpuid.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
53
54 #include <sys/kdebug.h>
55
56 extern int debug_task;
57
58 zone_t ads_zone; /* zone for debug_state area */
59
60 /*
61 * Routine: consider_machine_collect
62 *
63 */
64 void
65 consider_machine_collect(void)
66 {
67 pmap_gc();
68 }
69
70 /*
71 * Routine: consider_machine_adjust
72 *
73 */
74 void
75 consider_machine_adjust(void)
76 {
77 }
78
79 /*
80 * Routine: machine_switch_context
81 *
82 */
83 thread_t
84 machine_switch_context(
85 thread_t old,
86 thread_continue_t continuation,
87 thread_t new)
88 {
89 thread_t retval;
90 cpu_data_t *cpu_data_ptr;
91
92 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
93 * text: " x) */
94
95 cpu_data_ptr = getCpuDatap();
96 if (old == new)
97 panic("machine_switch_context");
98
99 kpc_off_cpu(old);
100
101 pmap_set_pmap(new->map->pmap, new);
102
103 new->machine.CpuDatap = cpu_data_ptr;
104
105 #if __SMP__
106 /* TODO: Should this be ordered? */
107 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
108 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
109 #endif /* __SMP__ */
110
111 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
112 retval = Switch_context(old, continuation, new);
113 assert(retval != NULL);
114
115 return retval;
116 }
117
118 boolean_t
119 machine_thread_on_core(thread_t thread)
120 {
121 return thread->machine.machine_thread_flags & MACHINE_THREAD_FLAGS_ON_CPU;
122 }
123
124 /*
125 * Routine: machine_thread_create
126 *
127 */
128 kern_return_t
129 machine_thread_create(
130 thread_t thread,
131 #if !__ARM_USER_PROTECT__
132 __unused
133 #endif
134 task_t task)
135 {
136
137 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
138
139 machine_thread_create_kprintf("thread = %x\n", thread);
140
141 if (current_thread() != thread) {
142 thread->machine.CpuDatap = (cpu_data_t *)0;
143 }
144 thread->machine.preemption_count = 0;
145 thread->machine.cthread_self = 0;
146 #if __ARM_USER_PROTECT__
147 {
148 struct pmap *new_pmap = vm_map_pmap(task->map);
149
150 thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP;
151 thread->machine.asid = new_pmap->hw_asid;
152 if (new_pmap->tte_index_max == NTTES) {
153 thread->machine.uptw_ttc = 2;
154 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP;
155 } else {
156 thread->machine.uptw_ttc = 1;
157 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep ) | TTBR_SETUP;
158 }
159 }
160 #endif
161 machine_thread_state_initialize(thread);
162
163 return (KERN_SUCCESS);
164 }
165
166 /*
167 * Routine: machine_thread_destroy
168 *
169 */
170 void
171 machine_thread_destroy(
172 thread_t thread)
173 {
174
175 if (thread->machine.DebugData != NULL) {
176 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug)
177 arm_debug_set(NULL);
178 zfree(ads_zone, thread->machine.DebugData);
179 }
180 }
181
182
183 /*
184 * Routine: machine_thread_init
185 *
186 */
187 void
188 machine_thread_init(void)
189 {
190 ads_zone = zinit(sizeof(arm_debug_state_t),
191 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
192 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
193 "arm debug state");
194 }
195
196 /*
197 * Routine: machine_thread_template_init
198 *
199 */
200 void
201 machine_thread_template_init(thread_t __unused thr_template)
202 {
203 /* Nothing to do on this platform. */
204 }
205
206 /*
207 * Routine: get_useraddr
208 *
209 */
210 user_addr_t
211 get_useraddr()
212 {
213 return (current_thread()->machine.PcbData.pc);
214 }
215
216 /*
217 * Routine: machine_stack_detach
218 *
219 */
220 vm_offset_t
221 machine_stack_detach(
222 thread_t thread)
223 {
224 vm_offset_t stack;
225
226 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
227 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
228
229 stack = thread->kernel_stack;
230 thread->kernel_stack = 0;
231 thread->machine.kstackptr = 0;
232
233 return (stack);
234 }
235
236
237 /*
238 * Routine: machine_stack_attach
239 *
240 */
241 void
242 machine_stack_attach(
243 thread_t thread,
244 vm_offset_t stack)
245 {
246 struct arm_saved_state *savestate;
247
248 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
249
250 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
251 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
252
253 thread->kernel_stack = stack;
254 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
255 thread_initialize_kernel_state(thread);
256 savestate = (struct arm_saved_state *) thread->machine.kstackptr;
257
258 savestate->lr = (uint32_t) thread_continue;
259 savestate->sp = thread->machine.kstackptr;
260 savestate->r[7] = 0x0UL;
261 savestate->r[9] = (uint32_t) NULL;
262 savestate->cpsr = PSR_SVC_MODE | PSR_INTMASK;
263 vfp_state_initialize(&savestate->VFPdata);
264 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
265 }
266
267
268 /*
269 * Routine: machine_stack_handoff
270 *
271 */
272 void
273 machine_stack_handoff(
274 thread_t old,
275 thread_t new)
276 {
277 vm_offset_t stack;
278 cpu_data_t *cpu_data_ptr;
279
280 kpc_off_cpu(old);
281
282 stack = machine_stack_detach(old);
283 cpu_data_ptr = getCpuDatap();
284 new->kernel_stack = stack;
285 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
286 if (stack == old->reserved_stack) {
287 assert(new->reserved_stack);
288 old->reserved_stack = new->reserved_stack;
289 new->reserved_stack = stack;
290 }
291
292 pmap_set_pmap(new->map->pmap, new);
293 new->machine.CpuDatap = cpu_data_ptr;
294
295 #if __SMP__
296 /* TODO: Should this be ordered? */
297 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
298 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
299 #endif /* __SMP__ */
300
301 machine_set_current_thread(new);
302 thread_initialize_kernel_state(new);
303
304 return;
305 }
306
307
308 /*
309 * Routine: call_continuation
310 *
311 */
312 void
313 call_continuation(
314 thread_continue_t continuation,
315 void *parameter,
316 wait_result_t wresult,
317 boolean_t enable_interrupts)
318 {
319 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
320 * " x) */
321
322 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr);
323 Call_continuation(continuation, parameter, wresult, enable_interrupts);
324 }
325
326 void arm_debug_set(arm_debug_state_t *debug_state)
327 {
328 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
329 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
330 * functionality-wise.
331 */
332 struct cpu_data *cpu_data_ptr;
333 arm_debug_info_t *debug_info = arm_debug_info();
334 boolean_t intr;
335
336 intr = ml_set_interrupts_enabled(FALSE);
337 cpu_data_ptr = getCpuDatap();
338
339 // Set current user debug
340 cpu_data_ptr->cpu_user_debug = debug_state;
341
342 if (debug_info->memory_mapped_core_debug) {
343 int i;
344 uintptr_t debug_map = cpu_data_ptr->cpu_debug_interface_map;
345
346 // unlock debug registers
347 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
348
349 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
350 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGPRSR);
351
352 // enable monitor mode (needed to set and use debug registers)
353 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) |= ARM_DBGDSCR_MDBGEN;
354
355 // first turn off all breakpoints/watchpoints
356 for (i = 0; i < 16; i++) {
357 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = 0;
358 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = 0;
359 }
360
361 // if (debug_state == NULL) disable monitor mode
362 if (debug_state == NULL) {
363 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) &= ~ARM_DBGDSCR_MDBGEN;
364 } else {
365 for (i = 0; i < 16; i++) {
366 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBVR))[i] = debug_state->bvr[i];
367 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = debug_state->bcr[i];
368 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i];
369 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i];
370 }
371 }
372
373 // lock debug registers
374 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0;
375
376 } else if (debug_info->coprocessor_core_debug) {
377 arm_debug_set_cp14(debug_state);
378 }
379
380 (void) ml_set_interrupts_enabled(intr);
381
382 return;
383 }
384
385 /*
386 * Duplicate one arm_debug_state_t to another. "all" parameter
387 * is ignored in the case of ARM -- Is this the right assumption?
388 */
389 void
390 copy_debug_state(
391 arm_debug_state_t *src,
392 arm_debug_state_t *target,
393 __unused boolean_t all)
394 {
395 bcopy(src, target, sizeof(arm_debug_state_t));
396 }
397
398 kern_return_t
399 machine_thread_set_tsd_base(
400 thread_t thread,
401 mach_vm_offset_t tsd_base)
402 {
403
404 if (thread->task == kernel_task) {
405 return KERN_INVALID_ARGUMENT;
406 }
407
408 if (tsd_base & 0x3) {
409 return KERN_INVALID_ARGUMENT;
410 }
411
412 if (tsd_base > UINT32_MAX)
413 tsd_base = 0ULL;
414
415 thread->machine.cthread_self = tsd_base;
416
417 /* For current thread, make the TSD base active immediately */
418 if (thread == current_thread()) {
419
420 mp_disable_preemption();
421 __asm__ volatile(
422 "mrc p15, 0, r6, c13, c0, 3\n"
423 "and r6, r6, #3\n"
424 "orr r6, r6, %0\n"
425 "mcr p15, 0, r6, c13, c0, 3\n"
426 : /* output */
427 : "r"((uint32_t)tsd_base) /* input */
428 : "r6" /* clobbered register */
429 );
430 mp_enable_preemption();
431
432 }
433
434 return KERN_SUCCESS;
435 }
436
437 void
438 machine_tecs(__unused thread_t thr)
439 {
440 }
441
442 int
443 machine_csv(__unused cpuvn_e cve)
444 {
445 return 0;
446 }