2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
38 struct arm_vfpv2_state
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
55 thread_set_child(thread_t child
, int pid
);
58 thread_set_parent(thread_t parent
, int pid
);
61 * Maps state flavor to number of words in the state:
63 /* __private_extern__ */
64 unsigned int _MachineStateCount
[] = {
66 ARM_THREAD_STATE_COUNT
,
68 ARM_EXCEPTION_STATE_COUNT
,
72 extern zone_t ads_zone
;
75 machine_thread_state_convert_to_user(
76 __unused thread_t thread
,
77 __unused thread_flavor_t flavor
,
78 __unused thread_state_t tstate
,
79 __unused mach_msg_type_number_t
*count
)
81 // No conversion to userspace representation on this platform
86 machine_thread_state_convert_from_user(
87 __unused thread_t thread
,
88 __unused thread_flavor_t flavor
,
89 __unused thread_state_t tstate
,
90 __unused mach_msg_type_number_t count
)
92 // No conversion from userspace representation on this platform
97 machine_thread_siguctx_pointer_convert_to_user(
98 __unused thread_t thread
,
99 __unused user_addr_t
*uctxp
)
101 // No conversion to userspace representation on this platform
106 machine_thread_function_pointers_convert_from_user(
107 __unused thread_t thread
,
108 __unused user_addr_t
*fptrs
,
109 __unused
uint32_t count
)
111 // No conversion from userspace representation on this platform
116 * Routine: machine_thread_get_state
120 machine_thread_get_state(
122 thread_flavor_t flavor
,
123 thread_state_t tstate
,
124 mach_msg_type_number_t
* count
)
127 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
131 case THREAD_STATE_FLAVOR_LIST
:
133 return (KERN_INVALID_ARGUMENT
);
135 tstate
[0] = ARM_THREAD_STATE
;
136 tstate
[1] = ARM_VFP_STATE
;
137 tstate
[2] = ARM_EXCEPTION_STATE
;
138 tstate
[3] = ARM_DEBUG_STATE
;
142 case ARM_THREAD_STATE
:{
143 struct arm_thread_state
*state
;
144 struct arm_saved_state
*saved_state
;
145 arm_unified_thread_state_t
*unified_state
;
148 if (*count
< ARM_THREAD_STATE_COUNT
)
149 return (KERN_INVALID_ARGUMENT
);
151 if (*count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
152 unified_state
= (arm_unified_thread_state_t
*) tstate
;
153 state
= &unified_state
->ts_32
;
154 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
155 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
157 state
= (struct arm_thread_state
*) tstate
;
159 saved_state
= &thread
->machine
.PcbData
;
161 state
->sp
= saved_state
->sp
;
162 state
->lr
= saved_state
->lr
;
163 state
->pc
= saved_state
->pc
;
164 state
->cpsr
= saved_state
->cpsr
;
165 for (i
= 0; i
< 13; i
++)
166 state
->r
[i
] = saved_state
->r
[i
];
167 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
168 state
->pc
, state
->r
[0], state
->sp
);
170 if (*count
!= ARM_UNIFIED_THREAD_STATE_COUNT
) {
171 *count
= ARM_THREAD_STATE_COUNT
;
175 case ARM_EXCEPTION_STATE
:{
176 struct arm_exception_state
*state
;
177 struct arm_saved_state
*saved_state
;
179 if (*count
< ARM_EXCEPTION_STATE_COUNT
)
180 return (KERN_INVALID_ARGUMENT
);
182 state
= (struct arm_exception_state
*) tstate
;
183 saved_state
= &thread
->machine
.PcbData
;
185 state
->exception
= saved_state
->exception
;
186 state
->fsr
= saved_state
->fsr
;
187 state
->far
= saved_state
->far
;
189 *count
= ARM_EXCEPTION_STATE_COUNT
;
194 struct arm_vfp_state
*state
;
195 struct arm_vfpsaved_state
*saved_state
;
199 if (*count
< ARM_VFP_STATE_COUNT
) {
200 if (*count
< ARM_VFPV2_STATE_COUNT
)
201 return (KERN_INVALID_ARGUMENT
);
203 *count
= ARM_VFPV2_STATE_COUNT
;
206 if (*count
== ARM_VFPV2_STATE_COUNT
)
211 state
= (struct arm_vfp_state
*) tstate
;
212 saved_state
= find_user_vfp(thread
);
214 state
->fpscr
= saved_state
->fpscr
;
215 for (i
= 0; i
< max
; i
++)
216 state
->r
[i
] = saved_state
->r
[i
];
221 case ARM_DEBUG_STATE
:{
222 arm_debug_state_t
*state
;
223 arm_debug_state_t
*thread_state
;
225 if (*count
< ARM_DEBUG_STATE_COUNT
)
226 return (KERN_INVALID_ARGUMENT
);
228 state
= (arm_debug_state_t
*) tstate
;
229 thread_state
= find_debug_state(thread
);
231 if (thread_state
== NULL
)
232 bzero(state
, sizeof(arm_debug_state_t
));
234 bcopy(thread_state
, state
, sizeof(arm_debug_state_t
));
236 *count
= ARM_DEBUG_STATE_COUNT
;
241 return (KERN_INVALID_ARGUMENT
);
243 return (KERN_SUCCESS
);
248 * Routine: machine_thread_get_kern_state
252 machine_thread_get_kern_state(
254 thread_flavor_t flavor
,
255 thread_state_t tstate
,
256 mach_msg_type_number_t
* count
)
259 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
260 * d_get_kern_state: "
264 * This works only for an interrupted kernel thread
266 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
270 case ARM_THREAD_STATE
:{
271 struct arm_thread_state
*state
;
272 struct arm_saved_state
*saved_state
;
274 if (*count
< ARM_THREAD_STATE_COUNT
)
275 return (KERN_INVALID_ARGUMENT
);
277 state
= (struct arm_thread_state
*) tstate
;
278 saved_state
= getCpuDatap()->cpu_int_state
;
280 state
->sp
= saved_state
->sp
;
281 state
->lr
= saved_state
->lr
;
282 state
->pc
= saved_state
->pc
;
283 state
->cpsr
= saved_state
->cpsr
;
284 for (i
= 0; i
< 13; i
++)
285 state
->r
[i
] = saved_state
->r
[i
];
286 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
287 state
->pc
, state
->r
[0], state
->sp
);
288 *count
= ARM_THREAD_STATE_COUNT
;
292 return (KERN_INVALID_ARGUMENT
);
294 return (KERN_SUCCESS
);
297 extern long long arm_debug_get(void);
300 * Routine: machine_thread_set_state
304 machine_thread_set_state(
306 thread_flavor_t flavor
,
307 thread_state_t tstate
,
308 mach_msg_type_number_t count
)
311 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
315 case ARM_THREAD_STATE
:{
316 struct arm_thread_state
*state
;
317 struct arm_saved_state
*saved_state
;
318 arm_unified_thread_state_t
*unified_state
;
321 if (count
< ARM_THREAD_STATE_COUNT
)
322 return (KERN_INVALID_ARGUMENT
);
324 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
325 unified_state
= (arm_unified_thread_state_t
*) tstate
;
326 state
= &unified_state
->ts_32
;
328 state
= (struct arm_thread_state
*) tstate
;
330 saved_state
= &thread
->machine
.PcbData
;
331 old_psr
= saved_state
->cpsr
;
332 memcpy((char *) saved_state
, (char *) state
, sizeof(*state
));
334 * do not allow privileged bits of the PSR to be
337 saved_state
->cpsr
= (saved_state
->cpsr
& ~PSR_USER_MASK
) | (old_psr
& PSR_USER_MASK
);
339 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
340 state
->pc
, state
->r
[0], state
->sp
);
345 struct arm_vfp_state
*state
;
346 struct arm_vfpsaved_state
*saved_state
;
350 if (count
< ARM_VFP_STATE_COUNT
) {
351 if (count
< ARM_VFPV2_STATE_COUNT
)
352 return (KERN_INVALID_ARGUMENT
);
354 count
= ARM_VFPV2_STATE_COUNT
;
357 if (count
== ARM_VFPV2_STATE_COUNT
)
362 state
= (struct arm_vfp_state
*) tstate
;
363 saved_state
= find_user_vfp(thread
);
365 saved_state
->fpscr
= state
->fpscr
;
366 for (i
= 0; i
< max
; i
++)
367 saved_state
->r
[i
] = state
->r
[i
];
372 case ARM_EXCEPTION_STATE
:{
374 if (count
< ARM_EXCEPTION_STATE_COUNT
)
375 return (KERN_INVALID_ARGUMENT
);
379 case ARM_DEBUG_STATE
:{
380 arm_debug_state_t
*state
;
381 arm_debug_state_t
*thread_state
;
382 boolean_t enabled
= FALSE
;
385 if (count
< ARM_DEBUG_STATE_COUNT
)
386 return (KERN_INVALID_ARGUMENT
);
388 state
= (arm_debug_state_t
*) tstate
;
389 thread_state
= find_debug_state(thread
);
391 if (count
< ARM_DEBUG_STATE_COUNT
)
392 return (KERN_INVALID_ARGUMENT
);
394 for (i
= 0; i
< 16; i
++) {
395 /* do not allow context IDs to be set */
396 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
397 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
398 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
399 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
400 return KERN_PROTECTION_FAILURE
;
402 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
403 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
409 if (thread_state
!= NULL
)
411 void *pTmp
= thread
->machine
.DebugData
;
412 thread
->machine
.DebugData
= NULL
;
413 zfree(ads_zone
, pTmp
);
418 if (thread_state
== NULL
)
419 thread_state
= zalloc(ads_zone
);
421 for (i
= 0; i
< 16; i
++) {
422 /* set appropriate priviledge; mask out unknown bits */
423 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
424 | ARM_DBGBCR_MATCH_MASK
425 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
426 | ARM_DBG_CR_ENABLE_MASK
))
427 | ARM_DBGBCR_TYPE_IVA
428 | ARM_DBG_CR_LINKED_UNLINKED
429 | ARM_DBG_CR_SECURITY_STATE_BOTH
430 | ARM_DBG_CR_MODE_CONTROL_USER
;
431 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
432 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
433 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
434 | ARM_DBGWCR_ACCESS_CONTROL_MASK
435 | ARM_DBG_CR_ENABLE_MASK
))
436 | ARM_DBG_CR_LINKED_UNLINKED
437 | ARM_DBG_CR_SECURITY_STATE_BOTH
438 | ARM_DBG_CR_MODE_CONTROL_USER
;
439 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
442 if (thread
->machine
.DebugData
== NULL
)
443 thread
->machine
.DebugData
= thread_state
;
446 if (thread
== current_thread()) {
447 arm_debug_set(thread_state
);
454 return (KERN_INVALID_ARGUMENT
);
456 return (KERN_SUCCESS
);
460 * Routine: machine_thread_state_initialize
464 machine_thread_state_initialize(
467 struct arm_saved_state
*savestate
;
469 savestate
= (struct arm_saved_state
*) & thread
->machine
.PcbData
;
470 bzero((char *) savestate
, sizeof(struct arm_saved_state
));
471 savestate
->cpsr
= PSR_USERDFLT
;
474 vfp_state_initialize(&thread
->machine
.uVFPdata
);
475 vfp_state_initialize(&thread
->machine
.kVFPdata
);
478 thread
->machine
.DebugData
= NULL
;
485 vfp_state_initialize(struct arm_vfpsaved_state
*vfp_state
)
487 /* Set default VFP state to RunFast mode:
489 * - flush-to-zero mode
491 * - no enabled exceptions
493 * On the VFP11, this allows the use of floating point without
494 * trapping to support code, which we do not provide. With
495 * the Cortex-A8, this allows the use of the (much faster) NFP
496 * pipeline for single-precision operations.
499 bzero(vfp_state
, sizeof(*vfp_state
));
500 vfp_state
->fpscr
= FPSCR_DEFAULT
;
502 #endif /* __ARM_VFP__ */
506 * Routine: machine_thread_dup
513 __unused boolean_t is_corpse
)
515 struct arm_saved_state
*self_saved_state
;
516 struct arm_saved_state
*target_saved_state
;
519 struct arm_vfpsaved_state
*self_vfp_state
;
520 struct arm_vfpsaved_state
*target_vfp_state
;
523 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
524 target
->machine
.cthread_data
= self
->machine
.cthread_data
;
526 self_saved_state
= &self
->machine
.PcbData
;
527 target_saved_state
= &target
->machine
.PcbData
;
528 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
531 self_vfp_state
= &self
->machine
.uVFPdata
;
532 target_vfp_state
= &target
->machine
.uVFPdata
;
533 bcopy(self_vfp_state
, target_vfp_state
, sizeof(struct arm_vfpsaved_state
));
536 return (KERN_SUCCESS
);
540 * Routine: get_user_regs
543 struct arm_saved_state
*
547 return (&thread
->machine
.PcbData
);
551 * Routine: find_user_regs
554 struct arm_saved_state
*
558 return get_user_regs(thread
);
562 * Routine: find_kern_regs
565 struct arm_saved_state
*
570 * This works only for an interrupted kernel thread
572 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
573 return ((struct arm_saved_state
*) NULL
);
575 return (getCpuDatap()->cpu_int_state
);
581 * Find the user state floating point context. If there is no user state context,
582 * we just return a 0.
585 struct arm_vfpsaved_state
*
589 return &thread
->machine
.uVFPdata
;
591 #endif /* __ARM_VFP__ */
597 return thread
->machine
.DebugData
;
601 * Routine: thread_userstack
606 __unused thread_t thread
,
608 thread_state_t tstate
,
610 mach_vm_offset_t
* user_stack
,
612 __unused boolean_t is64bit
617 case ARM_THREAD_STATE
:
619 struct arm_thread_state
*state
;
622 if (count
< ARM_THREAD_STATE_COUNT
)
623 return (KERN_INVALID_ARGUMENT
);
627 state
= (struct arm_thread_state
*) tstate
;
630 *user_stack
= CAST_USER_ADDR_T(state
->sp
);
634 *user_stack
= CAST_USER_ADDR_T(USRSTACK
);
640 return (KERN_INVALID_ARGUMENT
);
643 return (KERN_SUCCESS
);
647 * thread_userstackdefault:
649 * Return the default stack location for the
650 * thread, if otherwise unknown.
653 thread_userstackdefault(
654 mach_vm_offset_t
*default_user_stack
,
655 boolean_t is64bit __unused
)
657 *default_user_stack
= USRSTACK
;
659 return (KERN_SUCCESS
);
663 * Routine: thread_setuserstack
667 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
669 struct arm_saved_state
*sv
;
671 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
674 sv
= get_user_regs(thread
);
678 thread_setuserstack_kprintf("stack %x\n", sv
->sp
);
684 * Routine: thread_adjuserstack
688 thread_adjuserstack(thread_t thread
, int adjust
)
690 struct arm_saved_state
*sv
;
692 sv
= get_user_regs(thread
);
700 * Routine: thread_setentrypoint
704 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
706 struct arm_saved_state
*sv
;
708 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
711 sv
= get_user_regs(thread
);
715 thread_setentrypoint_kprintf("entry %x\n", sv
->pc
);
721 * Routine: thread_entrypoint
726 __unused thread_t thread
,
728 thread_state_t tstate
,
729 __unused
unsigned int count
,
730 mach_vm_offset_t
* entry_point
734 case ARM_THREAD_STATE
:
736 struct arm_thread_state
*state
;
738 state
= (struct arm_thread_state
*) tstate
;
741 * If a valid entry point is specified, use it.
744 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
746 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
752 return (KERN_INVALID_ARGUMENT
);
755 return (KERN_SUCCESS
);
760 * Routine: thread_set_child
768 struct arm_saved_state
*child_state
;
770 child_state
= get_user_regs(child
);
772 child_state
->r
[0] = (uint_t
) pid
;
773 child_state
->r
[1] = 1ULL;
778 * Routine: thread_set_parent
786 struct arm_saved_state
*parent_state
;
788 parent_state
= get_user_regs(parent
);
790 parent_state
->r
[0] = pid
;
791 parent_state
->r
[1] = 0;
795 struct arm_act_context
{
796 struct arm_saved_state ss
;
798 struct arm_vfpsaved_state vfps
;
803 * Routine: act_thread_csave
807 act_thread_csave(void)
809 struct arm_act_context
*ic
;
813 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
815 if (ic
== (struct arm_act_context
*) NULL
)
818 val
= ARM_THREAD_STATE_COUNT
;
819 kret
= machine_thread_get_state(current_thread(),
821 (thread_state_t
) & ic
->ss
,
823 if (kret
!= KERN_SUCCESS
) {
824 kfree(ic
, sizeof(struct arm_act_context
));
828 val
= ARM_VFP_STATE_COUNT
;
829 kret
= machine_thread_get_state(current_thread(),
831 (thread_state_t
) & ic
->vfps
,
833 if (kret
!= KERN_SUCCESS
) {
834 kfree(ic
, sizeof(struct arm_act_context
));
842 * Routine: act_thread_catt
846 act_thread_catt(void *ctx
)
848 struct arm_act_context
*ic
;
851 ic
= (struct arm_act_context
*) ctx
;
853 if (ic
== (struct arm_act_context
*) NULL
)
856 kret
= machine_thread_set_state(current_thread(),
858 (thread_state_t
) & ic
->ss
,
859 ARM_THREAD_STATE_COUNT
);
860 if (kret
!= KERN_SUCCESS
)
864 kret
= machine_thread_set_state(current_thread(),
866 (thread_state_t
) & ic
->vfps
,
867 ARM_VFP_STATE_COUNT
);
868 if (kret
!= KERN_SUCCESS
)
872 kfree(ic
, sizeof(struct arm_act_context
));
876 * Routine: act_thread_catt
880 act_thread_cfree(void *ctx
)
882 kfree(ctx
, sizeof(struct arm_act_context
));
886 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
888 arm_thread_state_t
*state
;
889 struct arm_saved_state
*saved_state
;
890 thread_t curth
= current_thread();
893 saved_state
= &thread
->machine
.PcbData
;
894 state
= (arm_thread_state_t
*)tstate
;
896 if (curth
!= thread
) {
902 * do not zero saved_state, it can be concurrently accessed
903 * and zero is not a valid state for some of the registers,
906 thread_state32_to_saved_state(state
, saved_state
);
907 saved_state
->cpsr
= PSR_USERDFLT
;
909 if (curth
!= thread
) {
910 thread_unlock(thread
);