2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <mach/mach_types.h>
33 #include <mach/thread_status.h>
34 #include <mach/vm_types.h>
36 #include <kern/kern_types.h>
37 #include <kern/task.h>
38 #include <kern/thread.h>
39 #include <kern/misc_protos.h>
40 #include <kern/mach_param.h>
42 #include <kern/machine.h>
43 #include <kern/kalloc.h>
46 #include <arm/proc_reg.h>
47 #include <arm/cpu_data_internal.h>
48 #include <arm/misc_protos.h>
49 #include <arm/cpuid.h>
51 #include <vm/vm_map.h>
52 #include <vm/vm_protos.h>
54 #include <sys/kdebug.h>
56 extern int debug_task
;
58 zone_t ads_zone
; /* zone for debug_state area */
61 * Routine: consider_machine_collect
65 consider_machine_collect(void)
71 * Routine: consider_machine_adjust
75 consider_machine_adjust(void)
80 * Routine: machine_switch_context
84 machine_switch_context(
86 thread_continue_t continuation
,
90 cpu_data_t
*cpu_data_ptr
;
92 #define machine_switch_context_kprintf(x...) /* kprintf("machine_switch_con
95 cpu_data_ptr
= getCpuDatap();
97 panic("machine_switch_context");
101 pmap_set_pmap(new->map
->pmap
, new);
103 new->machine
.CpuDatap
= cpu_data_ptr
;
105 machine_switch_context_kprintf("old= %x contination = %x new = %x\n", old
, continuation
, new);
106 retval
= Switch_context(old
, continuation
, new);
107 assert(retval
!= NULL
);
113 * Routine: machine_thread_create
117 machine_thread_create(
119 #if !__ARM_USER_PROTECT__
125 #define machine_thread_create_kprintf(x...) /* kprintf("machine_thread_create: " x) */
127 machine_thread_create_kprintf("thread = %x\n", thread
);
129 if (current_thread() != thread
) {
130 thread
->machine
.CpuDatap
= (cpu_data_t
*)0;
132 thread
->machine
.preemption_count
= 0;
133 thread
->machine
.cthread_self
= 0;
134 thread
->machine
.cthread_data
= 0;
135 #if __ARM_USER_PROTECT__
137 struct pmap
*new_pmap
= vm_map_pmap(task
->map
);
139 thread
->machine
.kptw_ttb
= ((unsigned int) kernel_pmap
->ttep
) | TTBR_SETUP
;
140 thread
->machine
.asid
= new_pmap
->asid
;
141 if (new_pmap
->tte_index_max
== NTTES
) {
142 thread
->machine
.uptw_ttc
= 2;
143 thread
->machine
.uptw_ttb
= ((unsigned int) new_pmap
->ttep
) | TTBR_SETUP
;
145 thread
->machine
.uptw_ttc
= 1;
146 thread
->machine
.uptw_ttb
= ((unsigned int) new_pmap
->ttep
) | TTBR_SETUP
;
150 machine_thread_state_initialize(thread
);
152 return (KERN_SUCCESS
);
156 * Routine: machine_thread_destroy
160 machine_thread_destroy(
164 if (thread
->machine
.DebugData
!= NULL
) {
165 if (thread
->machine
.DebugData
== getCpuDatap()->cpu_user_debug
)
167 zfree(ads_zone
, thread
->machine
.DebugData
);
173 * Routine: machine_thread_init
177 machine_thread_init(void)
179 ads_zone
= zinit(sizeof(arm_debug_state_t
),
180 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
181 THREAD_CHUNK
* (sizeof(arm_debug_state_t
)),
187 * Routine: get_useraddr
193 return (current_thread()->machine
.PcbData
.pc
);
197 * Routine: machine_stack_detach
201 machine_stack_detach(
206 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DETACH
),
207 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
209 stack
= thread
->kernel_stack
;
210 thread
->kernel_stack
= 0;
211 thread
->machine
.kstackptr
= 0;
218 * Routine: machine_stack_attach
222 machine_stack_attach(
226 struct arm_saved_state
*savestate
;
228 #define machine_stack_attach_kprintf(x...) /* kprintf("machine_stack_attach: " x) */
230 KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_ATTACH
),
231 (uintptr_t)thread_tid(thread
), thread
->priority
, thread
->sched_pri
, 0, 0);
233 thread
->kernel_stack
= stack
;
234 thread
->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
235 thread_initialize_kernel_state(thread
);
236 savestate
= (struct arm_saved_state
*) thread
->machine
.kstackptr
;
238 savestate
->lr
= (uint32_t) thread_continue
;
239 savestate
->sp
= thread
->machine
.kstackptr
;
240 savestate
->r
[7] = 0x0UL
;
241 savestate
->r
[9] = (uint32_t) NULL
;
242 savestate
->cpsr
= PSR_SVC_MODE
| PSR_INTMASK
;
243 machine_stack_attach_kprintf("thread = %x pc = %x, sp = %x\n", thread
, savestate
->lr
, savestate
->sp
);
248 * Routine: machine_stack_handoff
252 machine_stack_handoff(
257 cpu_data_t
*cpu_data_ptr
;
261 stack
= machine_stack_detach(old
);
262 cpu_data_ptr
= getCpuDatap();
263 new->kernel_stack
= stack
;
264 new->machine
.kstackptr
= stack
+ kernel_stack_size
- sizeof(struct thread_kernel_state
);
265 if (stack
== old
->reserved_stack
) {
266 assert(new->reserved_stack
);
267 old
->reserved_stack
= new->reserved_stack
;
268 new->reserved_stack
= stack
;
271 pmap_set_pmap(new->map
->pmap
, new);
272 new->machine
.CpuDatap
= cpu_data_ptr
;
273 machine_set_current_thread(new);
274 thread_initialize_kernel_state(new);
281 * Routine: call_continuation
286 thread_continue_t continuation
,
288 wait_result_t wresult
)
290 #define call_continuation_kprintf(x...) /* kprintf("call_continuation_kprintf:
293 call_continuation_kprintf("thread = %x continuation = %x, stack = %x\n", current_thread(), continuation
, current_thread()->machine
.kstackptr
);
294 Call_continuation(continuation
, parameter
, wresult
, current_thread()->machine
.kstackptr
);
297 void arm_debug_set(arm_debug_state_t
*debug_state
)
299 /* If this CPU supports the memory-mapped debug interface, use it, otherwise
300 * attempt the Extended CP14 interface. The two routines need to be kept in sync,
301 * functionality-wise.
303 struct cpu_data
*cpu_data_ptr
;
304 arm_debug_info_t
*debug_info
= arm_debug_info();
307 intr
= ml_set_interrupts_enabled(FALSE
);
308 cpu_data_ptr
= getCpuDatap();
310 // Set current user debug
311 cpu_data_ptr
->cpu_user_debug
= debug_state
;
313 if (debug_info
->memory_mapped_core_debug
) {
315 uintptr_t debug_map
= cpu_data_ptr
->cpu_debug_interface_map
;
317 // unlock debug registers
318 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = ARM_DBG_LOCK_ACCESS_KEY
;
320 // read DBGPRSR to clear the sticky power-down bit (necessary to access debug registers)
321 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGPRSR
);
323 // enable monitor mode (needed to set and use debug registers)
324 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) |= ARM_DBGDSCR_MDBGEN
;
326 // first turn off all breakpoints/watchpoints
327 for (i
= 0; i
< 16; i
++) {
328 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = 0;
329 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = 0;
332 // if (debug_state == NULL) disable monitor mode
333 if (debug_state
== NULL
) {
334 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGDSCR
) &= ~ARM_DBGDSCR_MDBGEN
;
336 for (i
= 0; i
< 16; i
++) {
337 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBVR
))[i
] = debug_state
->bvr
[i
];
338 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGBCR
))[i
] = debug_state
->bcr
[i
];
339 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWVR
))[i
] = debug_state
->wvr
[i
];
340 ((volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGWCR
))[i
] = debug_state
->wcr
[i
];
344 // lock debug registers
345 *(volatile uint32_t *)(debug_map
+ ARM_DEBUG_OFFSET_DBGLAR
) = 0;
347 } else if (debug_info
->coprocessor_core_debug
) {
348 arm_debug_set_cp14(debug_state
);
351 (void) ml_set_interrupts_enabled(intr
);
357 * Duplicate one arm_debug_state_t to another. "all" parameter
358 * is ignored in the case of ARM -- Is this the right assumption?
362 arm_debug_state_t
*src
,
363 arm_debug_state_t
*target
,
364 __unused boolean_t all
)
366 bcopy(src
, target
, sizeof(arm_debug_state_t
));
370 machine_thread_set_tsd_base(
372 mach_vm_offset_t tsd_base
)
375 if (thread
->task
== kernel_task
) {
376 return KERN_INVALID_ARGUMENT
;
379 if (tsd_base
& 0x3) {
380 return KERN_INVALID_ARGUMENT
;
383 if (tsd_base
> UINT32_MAX
)
386 thread
->machine
.cthread_self
= tsd_base
;
388 /* For current thread, make the TSD base active immediately */
389 if (thread
== current_thread()) {
391 mp_disable_preemption();
393 "mrc p15, 0, r6, c13, c0, 3\n"
396 "mcr p15, 0, r6, c13, c0, 3\n"
398 : "r"((uint32_t)tsd_base
) /* input */
399 : "r6" /* clobbered register */
401 mp_enable_preemption();