]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/pcb.c
2ec9f9dcbca8172178306e8a33e2f94f7bb169d4
[apple/xnu.git] / osfmk / arm / pcb.c
1 /*
2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29
30 #include <types.h>
31
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
35
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
41 #include <kern/spl.h>
42 #include <kern/machine.h>
43 #include <kern/kalloc.h>
44 #include <kern/kpc.h>
45
46 #include <arm/proc_reg.h>
47 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/cpuid.h>
50
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
53
54 #include <sys/kdebug.h>
55
56 extern int debug_task;
57
58 zone_t ads_zone; /* zone for debug_state area */
59
60 /*
61 * Routine: consider_machine_collect
62 *
63 */
64 void
65 consider_machine_collect(void)
66 {
67 pmap_gc();
68 }
69
70 /*
71 * Routine: consider_machine_adjust
72 *
73 */
74 void
75 consider_machine_adjust(void)
76 {
77 }
78
79 /*
80 * Routine: machine_switch_context
81 *
82 */
83 thread_t
84 machine_switch_context(
85 thread_t old,
86 thread_continue_t continuation,
87 thread_t new)
88 {
89 thread_t retval;
90 cpu_data_t *cpu_data_ptr;
91
92 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
93 * text: " x) */
94
95 cpu_data_ptr = getCpuDatap();
96 if (old == new)
97 panic("machine_switch_context");
98
99 kpc_off_cpu(old);
100
101 pmap_set_pmap(new->map->pmap, new);
102
103 new->machine.CpuDatap = cpu_data_ptr;
104
105 #if __SMP__
106 /* TODO: Should this be ordered? */
107 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
108 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
109 #endif /* __SMP__ */
110
111 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old, continuation, new);
112 retval = Switch_context(old, continuation, new);
113 assert(retval != NULL);
114
115 return retval;
116 }
117
118 /*
119 * Routine: machine_thread_create
120 *
121 */
122 kern_return_t
123 machine_thread_create(
124 thread_t thread,
125 #if !__ARM_USER_PROTECT__
126 __unused
127 #endif
128 task_t task)
129 {
130
131 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
132
133 machine_thread_create_kprintf("thread = %x\n", thread);
134
135 if (current_thread() != thread) {
136 thread->machine.CpuDatap = (cpu_data_t *)0;
137 }
138 thread->machine.preemption_count = 0;
139 thread->machine.cthread_self = 0;
140 thread->machine.cthread_data = 0;
141 #if __ARM_USER_PROTECT__
142 {
143 struct pmap *new_pmap = vm_map_pmap(task->map);
144
145 thread->machine.kptw_ttb = ((unsigned int) kernel_pmap->ttep) | TTBR_SETUP;
146 thread->machine.asid = new_pmap->asid;
147 if (new_pmap->tte_index_max == NTTES) {
148 thread->machine.uptw_ttc = 2;
149 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep) | TTBR_SETUP;
150 } else {
151 thread->machine.uptw_ttc = 1;
152 thread->machine.uptw_ttb = ((unsigned int) new_pmap->ttep ) | TTBR_SETUP;
153 }
154 }
155 #endif
156 machine_thread_state_initialize(thread);
157
158 return (KERN_SUCCESS);
159 }
160
161 /*
162 * Routine: machine_thread_destroy
163 *
164 */
165 void
166 machine_thread_destroy(
167 thread_t thread)
168 {
169
170 if (thread->machine.DebugData != NULL) {
171 if (thread->machine.DebugData == getCpuDatap()->cpu_user_debug)
172 arm_debug_set(NULL);
173 zfree(ads_zone, thread->machine.DebugData);
174 }
175 }
176
177
178 /*
179 * Routine: machine_thread_init
180 *
181 */
182 void
183 machine_thread_init(void)
184 {
185 ads_zone = zinit(sizeof(arm_debug_state_t),
186 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
187 THREAD_CHUNK * (sizeof(arm_debug_state_t)),
188 "arm debug state");
189 }
190
191
192 /*
193 * Routine: get_useraddr
194 *
195 */
196 user_addr_t
197 get_useraddr()
198 {
199 return (current_thread()->machine.PcbData.pc);
200 }
201
202 /*
203 * Routine: machine_stack_detach
204 *
205 */
206 vm_offset_t
207 machine_stack_detach(
208 thread_t thread)
209 {
210 vm_offset_t stack;
211
212 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DETACH),
213 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
214
215 stack = thread->kernel_stack;
216 thread->kernel_stack = 0;
217 thread->machine.kstackptr = 0;
218
219 return (stack);
220 }
221
222
223 /*
224 * Routine: machine_stack_attach
225 *
226 */
227 void
228 machine_stack_attach(
229 thread_t thread,
230 vm_offset_t stack)
231 {
232 struct arm_saved_state *savestate;
233
234 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
235
236 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_ATTACH),
237 (uintptr_t)thread_tid(thread), thread->priority, thread->sched_pri, 0, 0);
238
239 thread->kernel_stack = stack;
240 thread->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
241 thread_initialize_kernel_state(thread);
242 savestate = (struct arm_saved_state *) thread->machine.kstackptr;
243
244 savestate->lr = (uint32_t) thread_continue;
245 savestate->sp = thread->machine.kstackptr;
246 savestate->r[7] = 0x0UL;
247 savestate->r[9] = (uint32_t) NULL;
248 savestate->cpsr = PSR_SVC_MODE | PSR_INTMASK;
249 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread, savestate->lr, savestate->sp);
250 }
251
252
253 /*
254 * Routine: machine_stack_handoff
255 *
256 */
257 void
258 machine_stack_handoff(
259 thread_t old,
260 thread_t new)
261 {
262 vm_offset_t stack;
263 cpu_data_t *cpu_data_ptr;
264
265 kpc_off_cpu(old);
266
267 stack = machine_stack_detach(old);
268 cpu_data_ptr = getCpuDatap();
269 new->kernel_stack = stack;
270 new->machine.kstackptr = stack + kernel_stack_size - sizeof(struct thread_kernel_state);
271 if (stack == old->reserved_stack) {
272 assert(new->reserved_stack);
273 old->reserved_stack = new->reserved_stack;
274 new->reserved_stack = stack;
275 }
276
277 pmap_set_pmap(new->map->pmap, new);
278 new->machine.CpuDatap = cpu_data_ptr;
279
280 #if __SMP__
281 /* TODO: Should this be ordered? */
282 old->machine.machine_thread_flags &= ~MACHINE_THREAD_FLAGS_ON_CPU;
283 new->machine.machine_thread_flags |= MACHINE_THREAD_FLAGS_ON_CPU;
284 #endif /* __SMP__ */
285
286 machine_set_current_thread(new);
287 thread_initialize_kernel_state(new);
288
289 return;
290 }
291
292
293 /*
294 * Routine: call_continuation
295 *
296 */
297 void
298 call_continuation(
299 thread_continue_t continuation,
300 void *parameter,
301 wait_result_t wresult,
302 boolean_t enable_interrupts)
303 {
304 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
305 * " x) */
306
307 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation, current_thread()->machine.kstackptr);
308 Call_continuation(continuation, parameter, wresult, enable_interrupts);
309 }
310
311 void arm_debug_set(arm_debug_state_t *debug_state)
312 {
313 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
314 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
315 * functionality-wise.
316 */
317 struct cpu_data *cpu_data_ptr;
318 arm_debug_info_t *debug_info = arm_debug_info();
319 boolean_t intr;
320
321 intr = ml_set_interrupts_enabled(FALSE);
322 cpu_data_ptr = getCpuDatap();
323
324 // Set current user debug
325 cpu_data_ptr->cpu_user_debug = debug_state;
326
327 if (debug_info->memory_mapped_core_debug) {
328 int i;
329 uintptr_t debug_map = cpu_data_ptr->cpu_debug_interface_map;
330
331 // unlock debug registers
332 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = ARM_DBG_LOCK_ACCESS_KEY;
333
334 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
335 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGPRSR);
336
337 // enable monitor mode (needed to set and use debug registers)
338 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) |= ARM_DBGDSCR_MDBGEN;
339
340 // first turn off all breakpoints/watchpoints
341 for (i = 0; i < 16; i++) {
342 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = 0;
343 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = 0;
344 }
345
346 // if (debug_state == NULL) disable monitor mode
347 if (debug_state == NULL) {
348 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGDSCR) &= ~ARM_DBGDSCR_MDBGEN;
349 } else {
350 for (i = 0; i < 16; i++) {
351 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBVR))[i] = debug_state->bvr[i];
352 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGBCR))[i] = debug_state->bcr[i];
353 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWVR))[i] = debug_state->wvr[i];
354 ((volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGWCR))[i] = debug_state->wcr[i];
355 }
356 }
357
358 // lock debug registers
359 *(volatile uint32_t *)(debug_map + ARM_DEBUG_OFFSET_DBGLAR) = 0;
360
361 } else if (debug_info->coprocessor_core_debug) {
362 arm_debug_set_cp14(debug_state);
363 }
364
365 (void) ml_set_interrupts_enabled(intr);
366
367 return;
368 }
369
370 /*
371 * Duplicate one arm_debug_state_t to another. "all" parameter
372 * is ignored in the case of ARM -- Is this the right assumption?
373 */
374 void
375 copy_debug_state(
376 arm_debug_state_t *src,
377 arm_debug_state_t *target,
378 __unused boolean_t all)
379 {
380 bcopy(src, target, sizeof(arm_debug_state_t));
381 }
382
383 kern_return_t
384 machine_thread_set_tsd_base(
385 thread_t thread,
386 mach_vm_offset_t tsd_base)
387 {
388
389 if (thread->task == kernel_task) {
390 return KERN_INVALID_ARGUMENT;
391 }
392
393 if (tsd_base & 0x3) {
394 return KERN_INVALID_ARGUMENT;
395 }
396
397 if (tsd_base > UINT32_MAX)
398 tsd_base = 0ULL;
399
400 thread->machine.cthread_self = tsd_base;
401
402 /* For current thread, make the TSD base active immediately */
403 if (thread == current_thread()) {
404
405 mp_disable_preemption();
406 __asm__ volatile(
407 "mrc p15, 0, r6, c13, c0, 3\n"
408 "and r6, r6, #3\n"
409 "orr r6, r6, %0\n"
410 "mcr p15, 0, r6, c13, c0, 3\n"
411 : /* output */
412 : "r"((uint32_t)tsd_base) /* input */
413 : "r6" /* clobbered register */
414 );
415 mp_enable_preemption();
416
417 }
418
419 return KERN_SUCCESS;
420 }
421
422 void
423 machine_tecs(__unused thread_t thr)
424 {
425 }
426
427 int
428 machine_csv(__unused cpuvn_e cve)
429 {
430 return 0;
431 }