2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
38 struct arm_vfpv2_state
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
55 thread_set_child(thread_t child
, int pid
);
58 thread_set_parent(thread_t parent
, int pid
);
61 * Maps state flavor to number of words in the state:
63 /* __private_extern__ */
64 unsigned int _MachineStateCount
[] = {
66 ARM_THREAD_STATE_COUNT
,
68 ARM_EXCEPTION_STATE_COUNT
,
72 extern zone_t ads_zone
;
75 * Routine: machine_thread_get_state
79 machine_thread_get_state(
81 thread_flavor_t flavor
,
82 thread_state_t tstate
,
83 mach_msg_type_number_t
* count
)
86 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
90 case THREAD_STATE_FLAVOR_LIST
:
92 return (KERN_INVALID_ARGUMENT
);
94 tstate
[0] = ARM_THREAD_STATE
;
95 tstate
[1] = ARM_VFP_STATE
;
96 tstate
[2] = ARM_EXCEPTION_STATE
;
97 tstate
[3] = ARM_DEBUG_STATE
;
101 case ARM_THREAD_STATE
:{
102 struct arm_thread_state
*state
;
103 struct arm_saved_state
*saved_state
;
104 arm_unified_thread_state_t
*unified_state
;
107 if (*count
< ARM_THREAD_STATE_COUNT
)
108 return (KERN_INVALID_ARGUMENT
);
110 if (*count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
111 unified_state
= (arm_unified_thread_state_t
*) tstate
;
112 state
= &unified_state
->ts_32
;
113 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
114 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
116 state
= (struct arm_thread_state
*) tstate
;
118 saved_state
= &thread
->machine
.PcbData
;
120 state
->sp
= saved_state
->sp
;
121 state
->lr
= saved_state
->lr
;
122 state
->pc
= saved_state
->pc
;
123 state
->cpsr
= saved_state
->cpsr
;
124 for (i
= 0; i
< 13; i
++)
125 state
->r
[i
] = saved_state
->r
[i
];
126 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
127 state
->pc
, state
->r
[0], state
->sp
);
129 if (*count
!= ARM_UNIFIED_THREAD_STATE_COUNT
) {
130 *count
= ARM_THREAD_STATE_COUNT
;
134 case ARM_EXCEPTION_STATE
:{
135 struct arm_exception_state
*state
;
136 struct arm_saved_state
*saved_state
;
138 if (*count
< ARM_EXCEPTION_STATE_COUNT
)
139 return (KERN_INVALID_ARGUMENT
);
141 state
= (struct arm_exception_state
*) tstate
;
142 saved_state
= &thread
->machine
.PcbData
;
144 state
->exception
= saved_state
->exception
;
145 state
->fsr
= saved_state
->fsr
;
146 state
->far
= saved_state
->far
;
148 *count
= ARM_EXCEPTION_STATE_COUNT
;
153 struct arm_vfp_state
*state
;
154 struct arm_vfpsaved_state
*saved_state
;
158 if (*count
< ARM_VFP_STATE_COUNT
) {
159 if (*count
< ARM_VFPV2_STATE_COUNT
)
160 return (KERN_INVALID_ARGUMENT
);
162 *count
= ARM_VFPV2_STATE_COUNT
;
165 if (*count
== ARM_VFPV2_STATE_COUNT
)
170 state
= (struct arm_vfp_state
*) tstate
;
171 saved_state
= find_user_vfp(thread
);
173 state
->fpscr
= saved_state
->fpscr
;
174 for (i
= 0; i
< max
; i
++)
175 state
->r
[i
] = saved_state
->r
[i
];
180 case ARM_DEBUG_STATE
:{
181 arm_debug_state_t
*state
;
182 arm_debug_state_t
*thread_state
;
184 if (*count
< ARM_DEBUG_STATE_COUNT
)
185 return (KERN_INVALID_ARGUMENT
);
187 state
= (arm_debug_state_t
*) tstate
;
188 thread_state
= find_debug_state(thread
);
190 if (thread_state
== NULL
)
191 bzero(state
, sizeof(arm_debug_state_t
));
193 bcopy(thread_state
, state
, sizeof(arm_debug_state_t
));
195 *count
= ARM_DEBUG_STATE_COUNT
;
200 return (KERN_INVALID_ARGUMENT
);
202 return (KERN_SUCCESS
);
207 * Routine: machine_thread_get_kern_state
211 machine_thread_get_kern_state(
213 thread_flavor_t flavor
,
214 thread_state_t tstate
,
215 mach_msg_type_number_t
* count
)
218 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
219 * d_get_kern_state: "
223 * This works only for an interrupted kernel thread
225 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
229 case ARM_THREAD_STATE
:{
230 struct arm_thread_state
*state
;
231 struct arm_saved_state
*saved_state
;
233 if (*count
< ARM_THREAD_STATE_COUNT
)
234 return (KERN_INVALID_ARGUMENT
);
236 state
= (struct arm_thread_state
*) tstate
;
237 saved_state
= getCpuDatap()->cpu_int_state
;
239 state
->sp
= saved_state
->sp
;
240 state
->lr
= saved_state
->lr
;
241 state
->pc
= saved_state
->pc
;
242 state
->cpsr
= saved_state
->cpsr
;
243 for (i
= 0; i
< 13; i
++)
244 state
->r
[i
] = saved_state
->r
[i
];
245 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
246 state
->pc
, state
->r
[0], state
->sp
);
247 *count
= ARM_THREAD_STATE_COUNT
;
251 return (KERN_INVALID_ARGUMENT
);
253 return (KERN_SUCCESS
);
256 extern long long arm_debug_get(void);
259 * Routine: machine_thread_set_state
263 machine_thread_set_state(
265 thread_flavor_t flavor
,
266 thread_state_t tstate
,
267 mach_msg_type_number_t count
)
270 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
274 case ARM_THREAD_STATE
:{
275 struct arm_thread_state
*state
;
276 struct arm_saved_state
*saved_state
;
277 arm_unified_thread_state_t
*unified_state
;
280 if (count
< ARM_THREAD_STATE_COUNT
)
281 return (KERN_INVALID_ARGUMENT
);
283 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
284 unified_state
= (arm_unified_thread_state_t
*) tstate
;
285 state
= &unified_state
->ts_32
;
287 state
= (struct arm_thread_state
*) tstate
;
289 saved_state
= &thread
->machine
.PcbData
;
290 old_psr
= saved_state
->cpsr
;
291 memcpy((char *) saved_state
, (char *) state
, sizeof(*state
));
293 * do not allow privileged bits of the PSR to be
296 saved_state
->cpsr
= (saved_state
->cpsr
& ~PSR_USER_MASK
) | (old_psr
& PSR_USER_MASK
);
298 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
299 state
->pc
, state
->r
[0], state
->sp
);
304 struct arm_vfp_state
*state
;
305 struct arm_vfpsaved_state
*saved_state
;
309 if (count
< ARM_VFP_STATE_COUNT
) {
310 if (count
< ARM_VFPV2_STATE_COUNT
)
311 return (KERN_INVALID_ARGUMENT
);
313 count
= ARM_VFPV2_STATE_COUNT
;
316 if (count
== ARM_VFPV2_STATE_COUNT
)
321 state
= (struct arm_vfp_state
*) tstate
;
322 saved_state
= find_user_vfp(thread
);
324 saved_state
->fpscr
= state
->fpscr
;
325 for (i
= 0; i
< max
; i
++)
326 saved_state
->r
[i
] = state
->r
[i
];
331 case ARM_EXCEPTION_STATE
:{
333 if (count
< ARM_EXCEPTION_STATE_COUNT
)
334 return (KERN_INVALID_ARGUMENT
);
338 case ARM_DEBUG_STATE
:{
339 arm_debug_state_t
*state
;
340 arm_debug_state_t
*thread_state
;
341 boolean_t enabled
= FALSE
;
344 if (count
< ARM_DEBUG_STATE_COUNT
)
345 return (KERN_INVALID_ARGUMENT
);
347 state
= (arm_debug_state_t
*) tstate
;
348 thread_state
= find_debug_state(thread
);
350 if (count
< ARM_DEBUG_STATE_COUNT
)
351 return (KERN_INVALID_ARGUMENT
);
353 for (i
= 0; i
< 16; i
++) {
354 /* do not allow context IDs to be set */
355 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
356 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
357 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
358 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
359 return KERN_PROTECTION_FAILURE
;
361 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
362 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
368 if (thread_state
!= NULL
)
370 void *pTmp
= thread
->machine
.DebugData
;
371 thread
->machine
.DebugData
= NULL
;
372 zfree(ads_zone
, pTmp
);
377 if (thread_state
== NULL
)
378 thread_state
= zalloc(ads_zone
);
380 for (i
= 0; i
< 16; i
++) {
381 /* set appropriate priviledge; mask out unknown bits */
382 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
383 | ARM_DBGBCR_MATCH_MASK
384 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
385 | ARM_DBG_CR_ENABLE_MASK
))
386 | ARM_DBGBCR_TYPE_IVA
387 | ARM_DBG_CR_LINKED_UNLINKED
388 | ARM_DBG_CR_SECURITY_STATE_BOTH
389 | ARM_DBG_CR_MODE_CONTROL_USER
;
390 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
391 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
392 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
393 | ARM_DBGWCR_ACCESS_CONTROL_MASK
394 | ARM_DBG_CR_ENABLE_MASK
))
395 | ARM_DBG_CR_LINKED_UNLINKED
396 | ARM_DBG_CR_SECURITY_STATE_BOTH
397 | ARM_DBG_CR_MODE_CONTROL_USER
;
398 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
401 if (thread
->machine
.DebugData
== NULL
)
402 thread
->machine
.DebugData
= thread_state
;
405 if (thread
== current_thread()) {
406 arm_debug_set(thread_state
);
413 return (KERN_INVALID_ARGUMENT
);
415 return (KERN_SUCCESS
);
419 * Routine: machine_thread_state_initialize
423 machine_thread_state_initialize(
426 struct arm_saved_state
*savestate
;
428 savestate
= (struct arm_saved_state
*) & thread
->machine
.PcbData
;
429 bzero((char *) savestate
, sizeof(struct arm_saved_state
));
430 savestate
->cpsr
= PSR_USERDFLT
;
433 vfp_state_initialize(&thread
->machine
.uVFPdata
);
434 vfp_state_initialize(&thread
->machine
.kVFPdata
);
437 thread
->machine
.DebugData
= NULL
;
444 vfp_state_initialize(struct arm_vfpsaved_state
*vfp_state
)
446 /* Set default VFP state to RunFast mode:
448 * - flush-to-zero mode
450 * - no enabled exceptions
452 * On the VFP11, this allows the use of floating point without
453 * trapping to support code, which we do not provide. With
454 * the Cortex-A8, this allows the use of the (much faster) NFP
455 * pipeline for single-precision operations.
458 bzero(vfp_state
, sizeof(*vfp_state
));
459 vfp_state
->fpscr
= FPSCR_DEFAULT
;
461 #endif /* __ARM_VFP__ */
465 * Routine: machine_thread_dup
473 struct arm_saved_state
*self_saved_state
;
474 struct arm_saved_state
*target_saved_state
;
477 struct arm_vfpsaved_state
*self_vfp_state
;
478 struct arm_vfpsaved_state
*target_vfp_state
;
481 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
482 target
->machine
.cthread_data
= self
->machine
.cthread_data
;
484 self_saved_state
= &self
->machine
.PcbData
;
485 target_saved_state
= &target
->machine
.PcbData
;
486 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
489 self_vfp_state
= &self
->machine
.uVFPdata
;
490 target_vfp_state
= &target
->machine
.uVFPdata
;
491 bcopy(self_vfp_state
, target_vfp_state
, sizeof(struct arm_vfpsaved_state
));
494 return (KERN_SUCCESS
);
498 * Routine: get_user_regs
501 struct arm_saved_state
*
505 return (&thread
->machine
.PcbData
);
509 * Routine: find_user_regs
512 struct arm_saved_state
*
516 return get_user_regs(thread
);
520 * Routine: find_kern_regs
523 struct arm_saved_state
*
528 * This works only for an interrupted kernel thread
530 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
531 return ((struct arm_saved_state
*) NULL
);
533 return (getCpuDatap()->cpu_int_state
);
539 * Find the user state floating point context. If there is no user state context,
540 * we just return a 0.
543 struct arm_vfpsaved_state
*
547 return &thread
->machine
.uVFPdata
;
549 #endif /* __ARM_VFP__ */
555 return thread
->machine
.DebugData
;
559 * Routine: thread_userstack
564 __unused thread_t thread
,
566 thread_state_t tstate
,
568 mach_vm_offset_t
* user_stack
,
570 __unused boolean_t is64bit
575 case ARM_THREAD_STATE
:
577 struct arm_thread_state
*state
;
580 if (count
< ARM_THREAD_STATE_COUNT
)
581 return (KERN_INVALID_ARGUMENT
);
585 state
= (struct arm_thread_state
*) tstate
;
588 *user_stack
= CAST_USER_ADDR_T(state
->sp
);
592 *user_stack
= CAST_USER_ADDR_T(USRSTACK
);
598 return (KERN_INVALID_ARGUMENT
);
601 return (KERN_SUCCESS
);
605 * thread_userstackdefault:
607 * Return the default stack location for the
608 * thread, if otherwise unknown.
611 thread_userstackdefault(
612 mach_vm_offset_t
*default_user_stack
,
613 boolean_t is64bit __unused
)
615 *default_user_stack
= USRSTACK
;
617 return (KERN_SUCCESS
);
621 * Routine: thread_setuserstack
625 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
627 struct arm_saved_state
*sv
;
629 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
632 sv
= get_user_regs(thread
);
636 thread_setuserstack_kprintf("stack %x\n", sv
->sp
);
642 * Routine: thread_adjuserstack
646 thread_adjuserstack(thread_t thread
, int adjust
)
648 struct arm_saved_state
*sv
;
650 sv
= get_user_regs(thread
);
658 * Routine: thread_setentrypoint
662 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
664 struct arm_saved_state
*sv
;
666 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
669 sv
= get_user_regs(thread
);
673 thread_setentrypoint_kprintf("entry %x\n", sv
->pc
);
679 * Routine: thread_entrypoint
684 __unused thread_t thread
,
686 thread_state_t tstate
,
687 __unused
unsigned int count
,
688 mach_vm_offset_t
* entry_point
692 case ARM_THREAD_STATE
:
694 struct arm_thread_state
*state
;
696 state
= (struct arm_thread_state
*) tstate
;
699 * If a valid entry point is specified, use it.
702 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
704 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
710 return (KERN_INVALID_ARGUMENT
);
713 return (KERN_SUCCESS
);
718 * Routine: thread_set_child
726 struct arm_saved_state
*child_state
;
728 child_state
= get_user_regs(child
);
730 child_state
->r
[0] = (uint_t
) pid
;
731 child_state
->r
[1] = 1ULL;
736 * Routine: thread_set_parent
744 struct arm_saved_state
*parent_state
;
746 parent_state
= get_user_regs(parent
);
748 parent_state
->r
[0] = pid
;
749 parent_state
->r
[1] = 0;
753 struct arm_act_context
{
754 struct arm_saved_state ss
;
756 struct arm_vfpsaved_state vfps
;
761 * Routine: act_thread_csave
765 act_thread_csave(void)
767 struct arm_act_context
*ic
;
771 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
773 if (ic
== (struct arm_act_context
*) NULL
)
776 val
= ARM_THREAD_STATE_COUNT
;
777 kret
= machine_thread_get_state(current_thread(),
779 (thread_state_t
) & ic
->ss
,
781 if (kret
!= KERN_SUCCESS
) {
782 kfree(ic
, sizeof(struct arm_act_context
));
786 val
= ARM_VFP_STATE_COUNT
;
787 kret
= machine_thread_get_state(current_thread(),
789 (thread_state_t
) & ic
->vfps
,
791 if (kret
!= KERN_SUCCESS
) {
792 kfree(ic
, sizeof(struct arm_act_context
));
800 * Routine: act_thread_catt
804 act_thread_catt(void *ctx
)
806 struct arm_act_context
*ic
;
809 ic
= (struct arm_act_context
*) ctx
;
811 if (ic
== (struct arm_act_context
*) NULL
)
814 kret
= machine_thread_set_state(current_thread(),
816 (thread_state_t
) & ic
->ss
,
817 ARM_THREAD_STATE_COUNT
);
818 if (kret
!= KERN_SUCCESS
)
822 kret
= machine_thread_set_state(current_thread(),
824 (thread_state_t
) & ic
->vfps
,
825 ARM_VFP_STATE_COUNT
);
826 if (kret
!= KERN_SUCCESS
)
830 kfree(ic
, sizeof(struct arm_act_context
));
834 * Routine: act_thread_catt
838 act_thread_cfree(void *ctx
)
840 kfree(ctx
, sizeof(struct arm_act_context
));
844 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
846 arm_thread_state_t
*state
;
847 struct arm_saved_state
*saved_state
;
848 thread_t curth
= current_thread();
851 saved_state
= &thread
->machine
.PcbData
;
852 state
= (arm_thread_state_t
*)tstate
;
854 if (curth
!= thread
) {
860 * do not zero saved_state, it can be concurrently accessed
861 * and zero is not a valid state for some of the registers,
864 thread_state32_to_saved_state(state
, saved_state
);
865 saved_state
->cpsr
= PSR_USERDFLT
;
867 if (curth
!= thread
) {
868 thread_unlock(thread
);