2 * Copyright (c) 2007-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
42 #include <kern/machine.h>
43 #include <kern/kalloc.h>
46 #include <arm/proc_reg.h>
47 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/cpuid.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
54 #include <sys/kdebug.h>
56 extern int debug_task
;
58 zone_t ads_zone
; /* zone for debug_state area */
61 * Routine: consider_machine_collect
65 consider_machine_collect(void)
71 * Routine: consider_machine_adjust
75 consider_machine_adjust(void)
80 * Routine: machine_switch_context
84 machine_switch_context(
86 thread_continue_t continuation
,
90 cpu_data_t
*cpu_data_ptr
;
92 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
95 cpu_data_ptr
= getCpuDatap();
97 panic("machine_switch_context");
101 pmap_set_pmap(new->map
->pmap
, new);
103 new->machine
.CpuDatap
= cpu_data_ptr
;
106 /* TODO: Should this be ordered? */
107 old
->machine
.machine_thread_flags
&= ~MACHINE_THREAD_FLAGS_ON_CPU
;
108 new->machine
.machine_thread_flags
|= MACHINE_THREAD_FLAGS_ON_CPU
;
111 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
112 retval
= Switch_context(old
, continuation
, new);
113 assert(retval
!= NULL
);
119 machine_thread_on_core(thread_t thread
)
121 return thread
->machine
.machine_thread_flags
& MACHINE_THREAD_FLAGS_ON_CPU
;
125 * Routine: machine_thread_create
129 machine_thread_create(
131 #if !__ARM_USER_PROTECT__
137 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
139 machine_thread_create_kprintf("thread = %x\n", thread
);
141 if (current_thread() != thread
) {
142 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
144 thread
->machine
.preemption_count
= 0;
145 thread
->machine
.cthread_self
= 0;
146 thread
->machine
.cthread_data
= 0;
147 #if __ARM_USER_PROTECT__
149 struct pmap
*new_pmap
= vm_map_pmap(task
->map
);
151 thread
->machine
.kptw_ttb
= ((unsigned int) kernel_pmap
->ttep
) | TTBR_SETUP
;
152 thread
->machine
.asid
= new_pmap
->hw_asid
;
153 if (new_pmap
->tte_index_max
== NTTES
) {
154 thread
->machine
.uptw_ttc
= 2;
155 thread
->machine
.uptw_ttb
= ((unsigned int) new_pmap
->ttep
) | TTBR_SETUP
;
157 thread
->machine
.uptw_ttc
= 1;
158 thread
->machine
.uptw_ttb
= ((unsigned int) new_pmap
->ttep
) | TTBR_SETUP
;
162 machine_thread_state_initialize(thread
);
164 return (KERN_SUCCESS
);
168 * Routine: machine_thread_destroy
172 machine_thread_destroy(
176 if (thread
->machine
.DebugData
!= NULL
) {
177 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
)
179 zfree(ads_zone
, thread
->machine
.DebugData
);
185 * Routine: machine_thread_init
189 machine_thread_init(void)
191 ads_zone
= zinit(sizeof(arm_debug_state_t
),
192 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
193 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
199 * Routine: get_useraddr
205 return (current_thread()->machine
.PcbData
.pc
);
209 * Routine: machine_stack_detach
213 machine_stack_detach(
218 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
219 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
221 stack
= thread
->kernel_stack
;
222 thread
->kernel_stack
= 0;
223 thread
->machine
.kstackptr
= 0;
230 * Routine: machine_stack_attach
234 machine_stack_attach(
238 struct arm_saved_state
*savestate
;
240 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
242 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
243 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
245 thread
->kernel_stack
= stack
;
246 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
247 thread_initialize_kernel_state(thread
);
248 savestate
= (struct arm_saved_state
*) thread
->machine
.kstackptr
;
250 savestate
->lr
= (uint32_t) thread_continue
;
251 savestate
->sp
= thread
->machine
.kstackptr
;
252 savestate
->r
[7] = 0x0UL
;
253 savestate
->r
[9] = (uint32_t) NULL
;
254 savestate
->cpsr
= PSR_SVC_MODE
| PSR_INTMASK
;
255 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread
, savestate
->lr
, savestate
->sp
);
260 * Routine: machine_stack_handoff
264 machine_stack_handoff(
269 cpu_data_t
*cpu_data_ptr
;
273 stack
= machine_stack_detach(old
);
274 cpu_data_ptr
= getCpuDatap();
275 new->kernel_stack
= stack
;
276 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
277 if (stack
== old
->reserved_stack
) {
278 assert(new->reserved_stack
);
279 old
->reserved_stack
= new->reserved_stack
;
280 new->reserved_stack
= stack
;
283 pmap_set_pmap(new->map
->pmap
, new);
284 new->machine
.CpuDatap
= cpu_data_ptr
;
287 /* TODO: Should this be ordered? */
288 old
->machine
.machine_thread_flags
&= ~MACHINE_THREAD_FLAGS_ON_CPU
;
289 new->machine
.machine_thread_flags
|= MACHINE_THREAD_FLAGS_ON_CPU
;
292 machine_set_current_thread(new);
293 thread_initialize_kernel_state(new);
300 * Routine: call_continuation
305 thread_continue_t continuation
,
307 wait_result_t wresult
,
308 boolean_t enable_interrupts
)
310 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
313 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
314 Call_continuation(continuation
, parameter
, wresult
, enable_interrupts
);
317 void arm_debug_set(arm_debug_state_t
*debug_state
)
319 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
320 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
321 * functionality-wise.
323 struct cpu_data
*cpu_data_ptr
;
324 arm_debug_info_t
*debug_info
= arm_debug_info();
327 intr
= ml_set_interrupts_enabled(FALSE
);
328 cpu_data_ptr
= getCpuDatap();
330 // Set current user debug
331 cpu_data_ptr
->cpu_user_debug
= debug_state
;
333 if (debug_info
->memory_mapped_core_debug
) {
335 uintptr_t debug_map
= cpu_data_ptr
->cpu_debug_interface_map
;
337 // unlock debug registers
338 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
340 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
341 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGPRSR
);
343 // enable monitor mode (needed to set and use debug registers)
344 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) |= ARM_DBGDSCR_MDBGEN
;
346 // first turn off all breakpoints/watchpoints
347 for (i
= 0; i
< 16; i
++) {
348 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = 0;
349 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = 0;
352 // if (debug_state == NULL) disable monitor mode
353 if (debug_state
== NULL
) {
354 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) &= ~ARM_DBGDSCR_MDBGEN
;
356 for (i
= 0; i
< 16; i
++) {
357 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBVR
))[i
] = debug_state
->bvr
[i
];
358 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = debug_state
->bcr
[i
];
359 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWVR
))[i
] = debug_state
->wvr
[i
];
360 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = debug_state
->wcr
[i
];
364 // lock debug registers
365 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = 0;
367 } else if (debug_info
->coprocessor_core_debug
) {
368 arm_debug_set_cp14(debug_state
);
371 (void) ml_set_interrupts_enabled(intr
);
377 * Duplicate one arm_debug_state_t to another. "all" parameter
378 * is ignored in the case of ARM -- Is this the right assumption?
382 arm_debug_state_t
*src
,
383 arm_debug_state_t
*target
,
384 __unused boolean_t all
)
386 bcopy(src
, target
, sizeof(arm_debug_state_t
));
390 machine_thread_set_tsd_base(
392 mach_vm_offset_t tsd_base
)
395 if (thread
->task
== kernel_task
) {
396 return KERN_INVALID_ARGUMENT
;
399 if (tsd_base
& 0x3) {
400 return KERN_INVALID_ARGUMENT
;
403 if (tsd_base
> UINT32_MAX
)
406 thread
->machine
.cthread_self
= tsd_base
;
408 /* For current thread, make the TSD base active immediately */
409 if (thread
== current_thread()) {
411 mp_disable_preemption();
413 "mrc p15, 0, r6, c13, c0, 3\n"
416 "mcr p15, 0, r6, c13, c0, 3\n"
418 : "r"((uint32_t)tsd_base
) /* input */
419 : "r6" /* clobbered register */
421 mp_enable_preemption();
429 machine_tecs(__unused thread_t thr
)
434 machine_csv(__unused cpuvn_e cve
)