2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
42 #include <kern/machine.h>
45 #include <arm/proc_reg.h>
46 #include <arm/cpu_data_internal.h>
47 #include <arm/misc_protos.h>
48 #include <arm/cpuid.h>
50 #include <vm/vm_map.h>
51 #include <vm/vm_protos.h>
53 #include <sys/kdebug.h>
55 extern int debug_task
;
57 /* zone for debug_state area */
58 ZONE_DECLARE(ads_zone
, "arm debug state", sizeof(arm_debug_state_t
), ZC_NONE
);
61 * Routine: consider_machine_collect
65 consider_machine_collect(void)
71 * Routine: consider_machine_adjust
75 consider_machine_adjust(void)
80 machine_thread_switch_cpu_data(thread_t old
, thread_t
new)
83 * We build with -fno-strict-aliasing, so the load through temporaries
84 * is required so that this generates a single load / store pair.
86 cpu_data_t
*datap
= old
->machine
.CpuDatap
;
87 vm_offset_t base
= old
->machine
.pcpu_data_base
;
89 /* TODO: Should this be ordered? */
92 * arm relies on CpuDatap being set for a thread that has run,
93 * so we only reset pcpu_data_base.
95 old
->machine
.pcpu_data_base
= -1;
97 new->machine
.CpuDatap
= datap
;
98 new->machine
.pcpu_data_base
= base
;
102 * Routine: machine_switch_context
106 machine_switch_context(
108 thread_continue_t continuation
,
113 #define machine_switch_context_kprintf(x...) \
114 /* kprintf("machine_switch_context: " x) */
117 panic("machine_switch_context");
123 * If the thread is preempted while performing cache or TLB maintenance,
124 * it may be migrated to a different CPU between the completion of the relevant
125 * maintenance instruction and the synchronizing DSB. ARM requires that the
126 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
127 * in order to guarantee completion of the instruction and visibility of its effects.
128 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
129 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
131 __builtin_arm_dsb(DSB_ISH
);
132 pmap_set_pmap(new->map
->pmap
, new);
134 machine_thread_switch_cpu_data(old
, new);
136 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
137 retval
= Switch_context(old
, continuation
, new);
138 assert(retval
!= NULL
);
144 machine_thread_on_core(thread_t thread
)
146 return thread
->machine
.pcpu_data_base
!= -1;
150 * Routine: machine_thread_create
154 machine_thread_create(
156 #if !__ARM_USER_PROTECT__
161 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
163 machine_thread_create_kprintf("thread = %x\n", thread
);
165 if (current_thread() != thread
) {
166 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
167 // setting this offset will cause trying to use it to panic
168 thread
->machine
.pcpu_data_base
= -1;
170 thread
->machine
.preemption_count
= 0;
171 thread
->machine
.cthread_self
= 0;
172 #if __ARM_USER_PROTECT__
174 struct pmap
*new_pmap
= vm_map_pmap(task
->map
);
176 thread
->machine
.kptw_ttb
= ((unsigned int) kernel_pmap
->ttep
) | TTBR_SETUP
;
177 thread
->machine
.asid
= new_pmap
->hw_asid
;
178 thread
->machine
.uptw_ttb
= ((unsigned int) new_pmap
->ttep
) | TTBR_SETUP
;
181 machine_thread_state_initialize(thread
);
187 * Routine: machine_thread_destroy
191 machine_thread_destroy(
194 if (thread
->machine
.DebugData
!= NULL
) {
195 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
) {
198 zfree(ads_zone
, thread
->machine
.DebugData
);
204 * Routine: machine_thread_init
208 machine_thread_init(void)
213 * Routine: machine_thread_template_init
217 machine_thread_template_init(thread_t __unused thr_template
)
219 /* Nothing to do on this platform. */
223 * Routine: get_useraddr
229 return current_thread()->machine
.PcbData
.pc
;
233 * Routine: machine_stack_detach
237 machine_stack_detach(
242 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
243 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
245 stack
= thread
->kernel_stack
;
246 thread
->kernel_stack
= 0;
247 thread
->machine
.kstackptr
= 0;
254 * Routine: machine_stack_attach
258 machine_stack_attach(
262 struct arm_saved_state
*savestate
;
264 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
266 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
267 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
269 thread
->kernel_stack
= stack
;
270 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
271 thread_initialize_kernel_state(thread
);
272 savestate
= (struct arm_saved_state
*) thread
->machine
.kstackptr
;
274 savestate
->lr
= (uint32_t) thread_continue
;
275 savestate
->sp
= thread
->machine
.kstackptr
;
276 savestate
->r
[7] = 0x0UL
;
277 savestate
->r
[9] = (uint32_t) NULL
;
278 savestate
->cpsr
= PSR_SVC_MODE
| PSR_INTMASK
;
279 vfp_state_initialize(&savestate
->VFPdata
);
280 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread
, savestate
->lr
, savestate
->sp
);
285 * Routine: machine_stack_handoff
289 machine_stack_handoff(
297 stack
= machine_stack_detach(old
);
298 new->kernel_stack
= stack
;
299 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
300 if (stack
== old
->reserved_stack
) {
301 assert(new->reserved_stack
);
302 old
->reserved_stack
= new->reserved_stack
;
303 new->reserved_stack
= stack
;
307 * If the thread is preempted while performing cache or TLB maintenance,
308 * it may be migrated to a different CPU between the completion of the relevant
309 * maintenance instruction and the synchronizing DSB. ARM requires that the
310 * synchronizing DSB must be issued *on the PE that issued the maintenance instruction*
311 * in order to guarantee completion of the instruction and visibility of its effects.
312 * Issue DSB here to enforce that guarantee. Note that due to __ARM_USER_PROTECT__,
313 * pmap_set_pmap() will not update TTBR0 (which ordinarily would include DSB).
315 __builtin_arm_dsb(DSB_ISH
);
316 pmap_set_pmap(new->map
->pmap
, new);
318 machine_thread_switch_cpu_data(old
, new);
320 machine_set_current_thread(new);
321 thread_initialize_kernel_state(new);
326 * Routine: call_continuation
331 thread_continue_t continuation
,
333 wait_result_t wresult
,
334 boolean_t enable_interrupts
)
336 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
339 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
340 Call_continuation(continuation
, parameter
, wresult
, enable_interrupts
);
344 arm_debug_set(arm_debug_state_t
*debug_state
)
346 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
347 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
348 * functionality-wise.
350 struct cpu_data
*cpu_data_ptr
;
351 arm_debug_info_t
*debug_info
= arm_debug_info();
354 intr
= ml_set_interrupts_enabled(FALSE
);
355 cpu_data_ptr
= getCpuDatap();
357 // Set current user debug
358 cpu_data_ptr
->cpu_user_debug
= debug_state
;
360 if (debug_info
->memory_mapped_core_debug
) {
362 uintptr_t debug_map
= cpu_data_ptr
->cpu_debug_interface_map
;
364 // unlock debug registers
365 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
367 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
368 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGPRSR
);
370 // enable monitor mode (needed to set and use debug registers)
371 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) |= ARM_DBGDSCR_MDBGEN
;
373 // first turn off all breakpoints/watchpoints
374 for (i
= 0; i
< 16; i
++) {
375 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = 0;
376 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = 0;
379 // if (debug_state == NULL) disable monitor mode
380 if (debug_state
== NULL
) {
381 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) &= ~ARM_DBGDSCR_MDBGEN
;
383 for (i
= 0; i
< 16; i
++) {
384 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBVR
))[i
] = debug_state
->bvr
[i
];
385 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = debug_state
->bcr
[i
];
386 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWVR
))[i
] = debug_state
->wvr
[i
];
387 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = debug_state
->wcr
[i
];
391 // lock debug registers
392 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = 0;
393 } else if (debug_info
->coprocessor_core_debug
) {
394 arm_debug_set_cp14(debug_state
);
397 (void) ml_set_interrupts_enabled(intr
);
401 * Duplicate one arm_debug_state_t to another. "all" parameter
402 * is ignored in the case of ARM -- Is this the right assumption?
406 arm_debug_state_t
*src
,
407 arm_debug_state_t
*target
,
408 __unused boolean_t all
)
410 bcopy(src
, target
, sizeof(arm_debug_state_t
));
414 machine_thread_set_tsd_base(
416 mach_vm_offset_t tsd_base
)
418 if (thread
->task
== kernel_task
) {
419 return KERN_INVALID_ARGUMENT
;
422 if (tsd_base
& 0x3) {
423 return KERN_INVALID_ARGUMENT
;
426 if (tsd_base
> UINT32_MAX
) {
430 thread
->machine
.cthread_self
= tsd_base
;
432 /* For current thread, make the TSD base active immediately */
433 if (thread
== current_thread()) {
434 mp_disable_preemption();
436 "mrc p15, 0, r6, c13, c0, 3\n"
439 "mcr p15, 0, r6, c13, c0, 3\n"
441 : "r"((uint32_t)tsd_base
) /* input */
442 : "r6" /* clobbered register */
444 mp_enable_preemption();
451 machine_tecs(__unused thread_t thr
)
456 machine_csv(__unused cpuvn_e cve
)