2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
38 struct arm_vfpv2_state
{
43 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
51 void thread_set_child(thread_t child
, int pid
);
52 void thread_set_parent(thread_t parent
, int pid
);
55 * Maps state flavor to number of words in the state:
57 /* __private_extern__ */
58 unsigned int _MachineStateCount
[] = {
60 ARM_UNIFIED_THREAD_STATE_COUNT
,
62 ARM_EXCEPTION_STATE_COUNT
,
63 ARM_DEBUG_STATE_COUNT
,
64 /* THREAD_STATE_NONE (legacy) */ 0,
65 ARM_THREAD_STATE64_COUNT
,
66 ARM_EXCEPTION_STATE64_COUNT
,
67 /* THREAD_STATE_LAST (legacy) */ 0,
68 ARM_THREAD_STATE32_COUNT
,
73 ARM_DEBUG_STATE32_COUNT
,
74 ARM_DEBUG_STATE64_COUNT
,
76 ARM_NEON_STATE64_COUNT
,
79 /* ARM_SAVED_STATE32_COUNT */ 0,
80 /* ARM_SAVED_STATE64_COUNT */ 0,
81 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
82 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
85 extern zone_t ads_zone
;
89 * Copy values from saved_state to ts64.
92 saved_state_to_thread_state64(const arm_saved_state_t
*saved_state
, arm_thread_state64_t
*ts64
)
96 assert(is_saved_state64(saved_state
));
98 ts64
->fp
= get_saved_state_fp(saved_state
);
99 ts64
->lr
= get_saved_state_lr(saved_state
);
100 ts64
->sp
= get_saved_state_sp(saved_state
);
101 ts64
->pc
= get_saved_state_pc(saved_state
);
102 ts64
->cpsr
= get_saved_state_cpsr(saved_state
);
103 for (i
= 0; i
< 29; i
++) {
104 ts64
->x
[i
] = get_saved_state_reg(saved_state
, i
);
109 * Copy values from ts64 to saved_state
112 thread_state64_to_saved_state(const arm_thread_state64_t
*ts64
, arm_saved_state_t
*saved_state
)
116 assert(is_saved_state64(saved_state
));
118 set_saved_state_fp(saved_state
, ts64
->fp
);
119 set_saved_state_lr(saved_state
, ts64
->lr
);
120 set_saved_state_sp(saved_state
, ts64
->sp
);
121 set_saved_state_pc(saved_state
, ts64
->pc
);
122 set_saved_state_cpsr(saved_state
, (ts64
->cpsr
& ~PSR64_MODE_MASK
) | PSR64_MODE_RW_64
);
123 for (i
= 0; i
< 29; i
++) {
124 set_saved_state_reg(saved_state
, i
, ts64
->x
[i
]);
130 handle_get_arm32_thread_state(
131 thread_state_t tstate
,
132 mach_msg_type_number_t
* count
,
133 const arm_saved_state_t
*saved_state
)
135 if (*count
< ARM_THREAD_STATE32_COUNT
) {
136 return KERN_INVALID_ARGUMENT
;
138 if (!is_saved_state32(saved_state
)) {
139 return KERN_INVALID_ARGUMENT
;
142 (void)saved_state_to_thread_state32(saved_state
, (arm_thread_state32_t
*)tstate
);
143 *count
= ARM_THREAD_STATE32_COUNT
;
148 handle_get_arm64_thread_state(
149 thread_state_t tstate
,
150 mach_msg_type_number_t
* count
,
151 const arm_saved_state_t
*saved_state
)
153 if (*count
< ARM_THREAD_STATE64_COUNT
) {
154 return KERN_INVALID_ARGUMENT
;
156 if (!is_saved_state64(saved_state
)) {
157 return KERN_INVALID_ARGUMENT
;
160 (void)saved_state_to_thread_state64(saved_state
, (arm_thread_state64_t
*)tstate
);
161 *count
= ARM_THREAD_STATE64_COUNT
;
167 handle_get_arm_thread_state(
168 thread_state_t tstate
,
169 mach_msg_type_number_t
* count
,
170 const arm_saved_state_t
*saved_state
)
172 /* In an arm64 world, this flavor can be used to retrieve the thread
173 * state of a 32-bit or 64-bit thread into a unified structure, but we
174 * need to support legacy clients who are only aware of 32-bit, so
175 * check the count to see what the client is expecting.
177 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
178 return handle_get_arm32_thread_state(tstate
, count
, saved_state
);
181 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*) tstate
;
182 bzero(unified_state
, sizeof(*unified_state
));
184 if (is_saved_state64(saved_state
)) {
185 unified_state
->ash
.flavor
= ARM_THREAD_STATE64
;
186 unified_state
->ash
.count
= ARM_THREAD_STATE64_COUNT
;
187 (void)saved_state_to_thread_state64(saved_state
, thread_state64(unified_state
));
191 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
192 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
193 (void)saved_state_to_thread_state32(saved_state
, thread_state32(unified_state
));
195 *count
= ARM_UNIFIED_THREAD_STATE_COUNT
;
200 handle_set_arm32_thread_state(
201 const thread_state_t tstate
,
202 mach_msg_type_number_t count
,
203 arm_saved_state_t
*saved_state
)
205 if (count
!= ARM_THREAD_STATE32_COUNT
) {
206 return KERN_INVALID_ARGUMENT
;
209 (void)thread_state32_to_saved_state((const arm_thread_state32_t
*)tstate
, saved_state
);
214 handle_set_arm64_thread_state(
215 const thread_state_t tstate
,
216 mach_msg_type_number_t count
,
217 arm_saved_state_t
*saved_state
)
219 if (count
!= ARM_THREAD_STATE64_COUNT
) {
220 return KERN_INVALID_ARGUMENT
;
223 (void)thread_state64_to_saved_state((const arm_thread_state64_t
*)tstate
, saved_state
);
229 handle_set_arm_thread_state(
230 const thread_state_t tstate
,
231 mach_msg_type_number_t count
,
232 arm_saved_state_t
*saved_state
)
234 /* In an arm64 world, this flavor can be used to set the thread state of a
235 * 32-bit or 64-bit thread from a unified structure, but we need to support
236 * legacy clients who are only aware of 32-bit, so check the count to see
237 * what the client is expecting.
239 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
240 if (!is_saved_state32(saved_state
)) {
241 return KERN_INVALID_ARGUMENT
;
243 return handle_set_arm32_thread_state(tstate
, count
, saved_state
);
246 const arm_unified_thread_state_t
*unified_state
= (const arm_unified_thread_state_t
*) tstate
;
248 if (is_thread_state64(unified_state
)) {
249 if (!is_saved_state64(saved_state
)) {
250 return KERN_INVALID_ARGUMENT
;
252 (void)thread_state64_to_saved_state(const_thread_state64(unified_state
), saved_state
);
256 if (!is_saved_state32(saved_state
)) {
257 return KERN_INVALID_ARGUMENT
;
259 (void)thread_state32_to_saved_state(const_thread_state32(unified_state
), saved_state
);
266 * Translate thread state arguments to userspace representation
270 machine_thread_state_convert_to_user(
272 thread_flavor_t flavor
,
273 thread_state_t tstate
,
274 mach_msg_type_number_t
*count
)
276 // No conversion to userspace representation on this platform
277 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
282 * Translate thread state arguments from userspace representation
286 machine_thread_state_convert_from_user(
288 thread_flavor_t flavor
,
289 thread_state_t tstate
,
290 mach_msg_type_number_t count
)
292 // No conversion from userspace representation on this platform
293 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
298 * Translate signal context data pointer to userspace representation
302 machine_thread_siguctx_pointer_convert_to_user(
303 __assert_only thread_t thread
,
306 // No conversion to userspace representation on this platform
307 (void)thread
; (void)uctxp
;
312 * Translate array of function pointer syscall arguments from userspace representation
316 machine_thread_function_pointers_convert_from_user(
317 __assert_only thread_t thread
,
321 // No conversion from userspace representation on this platform
322 (void)thread
; (void)fptrs
; (void)count
;
327 * Routine: machine_thread_get_state
331 machine_thread_get_state(
333 thread_flavor_t flavor
,
334 thread_state_t tstate
,
335 mach_msg_type_number_t
* count
)
338 case THREAD_STATE_FLAVOR_LIST
:
340 return KERN_INVALID_ARGUMENT
;
343 tstate
[0] = ARM_THREAD_STATE
;
344 tstate
[1] = ARM_VFP_STATE
;
345 tstate
[2] = ARM_EXCEPTION_STATE
;
346 tstate
[3] = ARM_DEBUG_STATE
;
350 case THREAD_STATE_FLAVOR_LIST_NEW
:
352 return KERN_INVALID_ARGUMENT
;
355 tstate
[0] = ARM_THREAD_STATE
;
356 tstate
[1] = ARM_VFP_STATE
;
357 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
358 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
362 case ARM_THREAD_STATE
:
364 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
370 case ARM_THREAD_STATE32
:
372 if (thread_is_64bit_data(thread
)) {
373 return KERN_INVALID_ARGUMENT
;
376 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
383 case ARM_THREAD_STATE64
:
385 if (!thread_is_64bit_data(thread
)) {
386 return KERN_INVALID_ARGUMENT
;
389 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
396 case ARM_EXCEPTION_STATE
:{
397 struct arm_exception_state
*state
;
398 struct arm_saved_state32
*saved_state
;
400 if (*count
< ARM_EXCEPTION_STATE_COUNT
) {
401 return KERN_INVALID_ARGUMENT
;
403 if (thread_is_64bit_data(thread
)) {
404 return KERN_INVALID_ARGUMENT
;
407 state
= (struct arm_exception_state
*) tstate
;
408 saved_state
= saved_state32(thread
->machine
.upcb
);
410 state
->exception
= saved_state
->exception
;
411 state
->fsr
= saved_state
->esr
;
412 state
->far
= saved_state
->far
;
414 *count
= ARM_EXCEPTION_STATE_COUNT
;
417 case ARM_EXCEPTION_STATE64
:{
418 struct arm_exception_state64
*state
;
419 struct arm_saved_state64
*saved_state
;
421 if (*count
< ARM_EXCEPTION_STATE64_COUNT
) {
422 return KERN_INVALID_ARGUMENT
;
424 if (!thread_is_64bit_data(thread
)) {
425 return KERN_INVALID_ARGUMENT
;
428 state
= (struct arm_exception_state64
*) tstate
;
429 saved_state
= saved_state64(thread
->machine
.upcb
);
431 state
->exception
= saved_state
->exception
;
432 state
->far
= saved_state
->far
;
433 state
->esr
= saved_state
->esr
;
435 *count
= ARM_EXCEPTION_STATE64_COUNT
;
438 case ARM_DEBUG_STATE
:{
439 arm_legacy_debug_state_t
*state
;
440 arm_debug_state32_t
*thread_state
;
442 if (*count
< ARM_LEGACY_DEBUG_STATE_COUNT
) {
443 return KERN_INVALID_ARGUMENT
;
446 if (thread_is_64bit_data(thread
)) {
447 return KERN_INVALID_ARGUMENT
;
450 state
= (arm_legacy_debug_state_t
*) tstate
;
451 thread_state
= find_debug_state32(thread
);
453 if (thread_state
== NULL
) {
454 bzero(state
, sizeof(arm_legacy_debug_state_t
));
456 bcopy(thread_state
, state
, sizeof(arm_legacy_debug_state_t
));
459 *count
= ARM_LEGACY_DEBUG_STATE_COUNT
;
462 case ARM_DEBUG_STATE32
:{
463 arm_debug_state32_t
*state
;
464 arm_debug_state32_t
*thread_state
;
466 if (*count
< ARM_DEBUG_STATE32_COUNT
) {
467 return KERN_INVALID_ARGUMENT
;
470 if (thread_is_64bit_data(thread
)) {
471 return KERN_INVALID_ARGUMENT
;
474 state
= (arm_debug_state32_t
*) tstate
;
475 thread_state
= find_debug_state32(thread
);
477 if (thread_state
== NULL
) {
478 bzero(state
, sizeof(arm_debug_state32_t
));
480 bcopy(thread_state
, state
, sizeof(arm_debug_state32_t
));
483 *count
= ARM_DEBUG_STATE32_COUNT
;
487 case ARM_DEBUG_STATE64
:{
488 arm_debug_state64_t
*state
;
489 arm_debug_state64_t
*thread_state
;
491 if (*count
< ARM_DEBUG_STATE64_COUNT
) {
492 return KERN_INVALID_ARGUMENT
;
495 if (!thread_is_64bit_data(thread
)) {
496 return KERN_INVALID_ARGUMENT
;
499 state
= (arm_debug_state64_t
*) tstate
;
500 thread_state
= find_debug_state64(thread
);
502 if (thread_state
== NULL
) {
503 bzero(state
, sizeof(arm_debug_state64_t
));
505 bcopy(thread_state
, state
, sizeof(arm_debug_state64_t
));
508 *count
= ARM_DEBUG_STATE64_COUNT
;
513 struct arm_vfp_state
*state
;
514 arm_neon_saved_state32_t
*thread_state
;
517 if (*count
< ARM_VFP_STATE_COUNT
) {
518 if (*count
< ARM_VFPV2_STATE_COUNT
) {
519 return KERN_INVALID_ARGUMENT
;
521 *count
= ARM_VFPV2_STATE_COUNT
;
525 if (*count
== ARM_VFPV2_STATE_COUNT
) {
531 state
= (struct arm_vfp_state
*) tstate
;
532 thread_state
= neon_state32(thread
->machine
.uNeon
);
533 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
535 bcopy(thread_state
, state
, (max
+ 1) * sizeof(uint32_t));
539 case ARM_NEON_STATE
:{
540 arm_neon_state_t
*state
;
541 arm_neon_saved_state32_t
*thread_state
;
543 if (*count
< ARM_NEON_STATE_COUNT
) {
544 return KERN_INVALID_ARGUMENT
;
547 if (thread_is_64bit_data(thread
)) {
548 return KERN_INVALID_ARGUMENT
;
551 state
= (arm_neon_state_t
*)tstate
;
552 thread_state
= neon_state32(thread
->machine
.uNeon
);
554 assert(sizeof(*thread_state
) == sizeof(*state
));
555 bcopy(thread_state
, state
, sizeof(arm_neon_state_t
));
557 *count
= ARM_NEON_STATE_COUNT
;
561 case ARM_NEON_STATE64
:{
562 arm_neon_state64_t
*state
;
563 arm_neon_saved_state64_t
*thread_state
;
565 if (*count
< ARM_NEON_STATE64_COUNT
) {
566 return KERN_INVALID_ARGUMENT
;
569 if (!thread_is_64bit_data(thread
)) {
570 return KERN_INVALID_ARGUMENT
;
573 state
= (arm_neon_state64_t
*)tstate
;
574 thread_state
= neon_state64(thread
->machine
.uNeon
);
576 /* For now, these are identical */
577 assert(sizeof(*state
) == sizeof(*thread_state
));
578 bcopy(thread_state
, state
, sizeof(arm_neon_state64_t
));
580 *count
= ARM_NEON_STATE64_COUNT
;
585 return KERN_INVALID_ARGUMENT
;
592 * Routine: machine_thread_get_kern_state
596 machine_thread_get_kern_state(
598 thread_flavor_t flavor
,
599 thread_state_t tstate
,
600 mach_msg_type_number_t
* count
)
603 * This works only for an interrupted kernel thread
605 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
610 case ARM_THREAD_STATE
:
612 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
618 case ARM_THREAD_STATE32
:
620 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
627 case ARM_THREAD_STATE64
:
629 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
637 return KERN_INVALID_ARGUMENT
;
643 machine_thread_switch_addrmode(thread_t thread
)
645 if (task_has_64Bit_data(thread
->task
)) {
646 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
647 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
648 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
649 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
652 * Reinitialize the NEON state.
654 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
655 thread
->machine
.uNeon
->ns_64
.fpcr
= FPCR_DEFAULT
;
657 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
658 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
659 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
660 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
663 * Reinitialize the NEON state.
665 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
666 thread
->machine
.uNeon
->ns_32
.fpcr
= FPCR_DEFAULT_32
;
670 extern long long arm_debug_get(void);
673 * Routine: machine_thread_set_state
677 machine_thread_set_state(
679 thread_flavor_t flavor
,
680 thread_state_t tstate
,
681 mach_msg_type_number_t count
)
686 case ARM_THREAD_STATE
:
687 rn
= handle_set_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
693 case ARM_THREAD_STATE32
:
694 if (thread_is_64bit_data(thread
)) {
695 return KERN_INVALID_ARGUMENT
;
698 rn
= handle_set_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
705 case ARM_THREAD_STATE64
:
706 if (!thread_is_64bit_data(thread
)) {
707 return KERN_INVALID_ARGUMENT
;
710 rn
= handle_set_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
716 case ARM_EXCEPTION_STATE
:{
717 if (count
!= ARM_EXCEPTION_STATE_COUNT
) {
718 return KERN_INVALID_ARGUMENT
;
720 if (thread_is_64bit_data(thread
)) {
721 return KERN_INVALID_ARGUMENT
;
726 case ARM_EXCEPTION_STATE64
:{
727 if (count
!= ARM_EXCEPTION_STATE64_COUNT
) {
728 return KERN_INVALID_ARGUMENT
;
730 if (!thread_is_64bit_data(thread
)) {
731 return KERN_INVALID_ARGUMENT
;
736 case ARM_DEBUG_STATE
:
738 arm_legacy_debug_state_t
*state
;
739 boolean_t enabled
= FALSE
;
742 if (count
!= ARM_LEGACY_DEBUG_STATE_COUNT
) {
743 return KERN_INVALID_ARGUMENT
;
745 if (thread_is_64bit_data(thread
)) {
746 return KERN_INVALID_ARGUMENT
;
749 state
= (arm_legacy_debug_state_t
*) tstate
;
751 for (i
= 0; i
< 16; i
++) {
752 /* do not allow context IDs to be set */
753 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
754 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
755 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
756 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
757 return KERN_PROTECTION_FAILURE
;
759 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
760 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
767 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
768 if (thread_state
!= NULL
) {
769 void *pTmp
= thread
->machine
.DebugData
;
770 thread
->machine
.DebugData
= NULL
;
771 zfree(ads_zone
, pTmp
);
774 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
775 if (thread_state
== NULL
) {
776 thread
->machine
.DebugData
= zalloc(ads_zone
);
777 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
778 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
779 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
780 thread_state
= find_debug_state32(thread
);
782 assert(NULL
!= thread_state
);
784 for (i
= 0; i
< 16; i
++) {
785 /* set appropriate privilege; mask out unknown bits */
786 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
787 | ARM_DBGBCR_MATCH_MASK
788 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
789 | ARM_DBG_CR_ENABLE_MASK
))
790 | ARM_DBGBCR_TYPE_IVA
791 | ARM_DBG_CR_LINKED_UNLINKED
792 | ARM_DBG_CR_SECURITY_STATE_BOTH
793 | ARM_DBG_CR_MODE_CONTROL_USER
;
794 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
795 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
796 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
797 | ARM_DBGWCR_ACCESS_CONTROL_MASK
798 | ARM_DBG_CR_ENABLE_MASK
))
799 | ARM_DBG_CR_LINKED_UNLINKED
800 | ARM_DBG_CR_SECURITY_STATE_BOTH
801 | ARM_DBG_CR_MODE_CONTROL_USER
;
802 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
805 thread_state
->mdscr_el1
= 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
808 if (thread
== current_thread()) {
809 arm_debug_set32(thread
->machine
.DebugData
);
814 case ARM_DEBUG_STATE32
:
815 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
817 arm_debug_state32_t
*state
;
818 boolean_t enabled
= FALSE
;
821 if (count
!= ARM_DEBUG_STATE32_COUNT
) {
822 return KERN_INVALID_ARGUMENT
;
824 if (thread_is_64bit_data(thread
)) {
825 return KERN_INVALID_ARGUMENT
;
828 state
= (arm_debug_state32_t
*) tstate
;
830 if (state
->mdscr_el1
& 0x1) {
834 for (i
= 0; i
< 16; i
++) {
835 /* do not allow context IDs to be set */
836 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
837 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
838 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
839 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
840 return KERN_PROTECTION_FAILURE
;
842 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
843 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
849 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
850 if (thread_state
!= NULL
) {
851 void *pTmp
= thread
->machine
.DebugData
;
852 thread
->machine
.DebugData
= NULL
;
853 zfree(ads_zone
, pTmp
);
856 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
857 if (thread_state
== NULL
) {
858 thread
->machine
.DebugData
= zalloc(ads_zone
);
859 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
860 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
861 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
862 thread_state
= find_debug_state32(thread
);
864 assert(NULL
!= thread_state
);
866 if (state
->mdscr_el1
& 0x1) {
867 thread_state
->mdscr_el1
|= 0x1;
869 thread_state
->mdscr_el1
&= ~0x1;
872 for (i
= 0; i
< 16; i
++) {
873 /* set appropriate privilege; mask out unknown bits */
874 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
875 | ARM_DBGBCR_MATCH_MASK
876 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
877 | ARM_DBG_CR_ENABLE_MASK
))
878 | ARM_DBGBCR_TYPE_IVA
879 | ARM_DBG_CR_LINKED_UNLINKED
880 | ARM_DBG_CR_SECURITY_STATE_BOTH
881 | ARM_DBG_CR_MODE_CONTROL_USER
;
882 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
883 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
884 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
885 | ARM_DBGWCR_ACCESS_CONTROL_MASK
886 | ARM_DBG_CR_ENABLE_MASK
))
887 | ARM_DBG_CR_LINKED_UNLINKED
888 | ARM_DBG_CR_SECURITY_STATE_BOTH
889 | ARM_DBG_CR_MODE_CONTROL_USER
;
890 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
894 if (thread
== current_thread()) {
895 arm_debug_set32(thread
->machine
.DebugData
);
901 case ARM_DEBUG_STATE64
:
903 arm_debug_state64_t
*state
;
904 boolean_t enabled
= FALSE
;
907 if (count
!= ARM_DEBUG_STATE64_COUNT
) {
908 return KERN_INVALID_ARGUMENT
;
910 if (!thread_is_64bit_data(thread
)) {
911 return KERN_INVALID_ARGUMENT
;
914 state
= (arm_debug_state64_t
*) tstate
;
916 if (state
->mdscr_el1
& 0x1) {
920 for (i
= 0; i
< 16; i
++) {
921 /* do not allow context IDs to be set */
922 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
923 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
924 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
925 return KERN_PROTECTION_FAILURE
;
927 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
928 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
934 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
935 if (thread_state
!= NULL
) {
936 void *pTmp
= thread
->machine
.DebugData
;
937 thread
->machine
.DebugData
= NULL
;
938 zfree(ads_zone
, pTmp
);
941 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
942 if (thread_state
== NULL
) {
943 thread
->machine
.DebugData
= zalloc(ads_zone
);
944 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
945 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE64
;
946 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE64_COUNT
;
947 thread_state
= find_debug_state64(thread
);
949 assert(NULL
!= thread_state
);
951 if (state
->mdscr_el1
& 0x1) {
952 thread_state
->mdscr_el1
|= 0x1;
954 thread_state
->mdscr_el1
&= ~0x1;
957 for (i
= 0; i
< 16; i
++) {
958 /* set appropriate privilege; mask out unknown bits */
959 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
960 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
961 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
962 | ARM_DBG_CR_ENABLE_MASK
))
963 | ARM_DBGBCR_TYPE_IVA
964 | ARM_DBG_CR_LINKED_UNLINKED
965 | ARM_DBG_CR_SECURITY_STATE_BOTH
966 | ARM_DBG_CR_MODE_CONTROL_USER
;
967 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
968 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
969 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
970 | ARM_DBGWCR_ACCESS_CONTROL_MASK
971 | ARM_DBG_CR_ENABLE_MASK
))
972 | ARM_DBG_CR_LINKED_UNLINKED
973 | ARM_DBG_CR_SECURITY_STATE_BOTH
974 | ARM_DBG_CR_MODE_CONTROL_USER
;
975 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
979 if (thread
== current_thread()) {
980 arm_debug_set64(thread
->machine
.DebugData
);
987 struct arm_vfp_state
*state
;
988 arm_neon_saved_state32_t
*thread_state
;
991 if (count
!= ARM_VFP_STATE_COUNT
&& count
!= ARM_VFPV2_STATE_COUNT
) {
992 return KERN_INVALID_ARGUMENT
;
995 if (count
== ARM_VFPV2_STATE_COUNT
) {
1001 state
= (struct arm_vfp_state
*) tstate
;
1002 thread_state
= neon_state32(thread
->machine
.uNeon
);
1003 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1005 bcopy(state
, thread_state
, (max
+ 1) * sizeof(uint32_t));
1007 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1008 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1012 case ARM_NEON_STATE
:{
1013 arm_neon_state_t
*state
;
1014 arm_neon_saved_state32_t
*thread_state
;
1016 if (count
!= ARM_NEON_STATE_COUNT
) {
1017 return KERN_INVALID_ARGUMENT
;
1020 if (thread_is_64bit_data(thread
)) {
1021 return KERN_INVALID_ARGUMENT
;
1024 state
= (arm_neon_state_t
*)tstate
;
1025 thread_state
= neon_state32(thread
->machine
.uNeon
);
1027 assert(sizeof(*state
) == sizeof(*thread_state
));
1028 bcopy(state
, thread_state
, sizeof(arm_neon_state_t
));
1030 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1031 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1035 case ARM_NEON_STATE64
:{
1036 arm_neon_state64_t
*state
;
1037 arm_neon_saved_state64_t
*thread_state
;
1039 if (count
!= ARM_NEON_STATE64_COUNT
) {
1040 return KERN_INVALID_ARGUMENT
;
1043 if (!thread_is_64bit_data(thread
)) {
1044 return KERN_INVALID_ARGUMENT
;
1047 state
= (arm_neon_state64_t
*)tstate
;
1048 thread_state
= neon_state64(thread
->machine
.uNeon
);
1050 assert(sizeof(*state
) == sizeof(*thread_state
));
1051 bcopy(state
, thread_state
, sizeof(arm_neon_state64_t
));
1053 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
1054 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
1059 return KERN_INVALID_ARGUMENT
;
1061 return KERN_SUCCESS
;
1065 * Routine: machine_thread_state_initialize
1069 machine_thread_state_initialize(
1072 arm_context_t
*context
= thread
->machine
.contextData
;
1075 * Should always be set up later. For a kernel thread, we don't care
1076 * about this state. For a user thread, we'll set the state up in
1077 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1080 if (context
!= NULL
) {
1081 bzero(&context
->ss
.uss
, sizeof(context
->ss
.uss
));
1082 bzero(&context
->ns
.uns
, sizeof(context
->ns
.uns
));
1084 if (context
->ns
.nsh
.flavor
== ARM_NEON_SAVED_STATE64
) {
1085 context
->ns
.ns_64
.fpcr
= FPCR_DEFAULT
;
1087 context
->ns
.ns_32
.fpcr
= FPCR_DEFAULT_32
;
1091 thread
->machine
.DebugData
= NULL
;
1094 return KERN_SUCCESS
;
1098 * Routine: machine_thread_dup
1105 __unused boolean_t is_corpse
)
1107 struct arm_saved_state
*self_saved_state
;
1108 struct arm_saved_state
*target_saved_state
;
1110 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
1111 target
->machine
.cthread_data
= self
->machine
.cthread_data
;
1113 self_saved_state
= self
->machine
.upcb
;
1114 target_saved_state
= target
->machine
.upcb
;
1115 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
1117 return KERN_SUCCESS
;
1121 * Routine: get_user_regs
1124 struct arm_saved_state
*
1128 return thread
->machine
.upcb
;
1131 arm_neon_saved_state_t
*
1135 return thread
->machine
.uNeon
;
1139 * Routine: find_user_regs
1142 struct arm_saved_state
*
1146 return thread
->machine
.upcb
;
1150 * Routine: find_kern_regs
1153 struct arm_saved_state
*
1158 * This works only for an interrupted kernel thread
1160 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
1161 return (struct arm_saved_state
*) NULL
;
1163 return getCpuDatap()->cpu_int_state
;
1167 arm_debug_state32_t
*
1171 if (thread
&& thread
->machine
.DebugData
) {
1172 return &(thread
->machine
.DebugData
->uds
.ds32
);
1178 arm_debug_state64_t
*
1182 if (thread
&& thread
->machine
.DebugData
) {
1183 return &(thread
->machine
.DebugData
->uds
.ds64
);
1190 * Routine: thread_userstack
1195 __unused thread_t thread
,
1197 thread_state_t tstate
,
1199 mach_vm_offset_t
* user_stack
,
1201 boolean_t is_64bit_data
1207 case ARM_THREAD_STATE
:
1208 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
1210 if (is_64bit_data
) {
1211 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_64
.sp
;
1215 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_32
.sp
;
1221 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1222 case ARM_THREAD_STATE32
:
1223 if (count
!= ARM_THREAD_STATE32_COUNT
) {
1224 return KERN_INVALID_ARGUMENT
;
1226 if (is_64bit_data
) {
1227 return KERN_INVALID_ARGUMENT
;
1230 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1233 case ARM_THREAD_STATE64
:
1234 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1235 return KERN_INVALID_ARGUMENT
;
1237 if (!is_64bit_data
) {
1238 return KERN_INVALID_ARGUMENT
;
1241 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1245 return KERN_INVALID_ARGUMENT
;
1249 *user_stack
= CAST_USER_ADDR_T(sp
);
1254 *user_stack
= CAST_USER_ADDR_T(USRSTACK64
);
1260 return KERN_SUCCESS
;
1264 * thread_userstackdefault:
1266 * Return the default stack location for the
1267 * thread, if otherwise unknown.
1270 thread_userstackdefault(
1271 mach_vm_offset_t
*default_user_stack
,
1275 *default_user_stack
= USRSTACK64
;
1277 *default_user_stack
= USRSTACK
;
1280 return KERN_SUCCESS
;
1284 * Routine: thread_setuserstack
1288 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
1290 struct arm_saved_state
*sv
;
1292 sv
= get_user_regs(thread
);
1294 set_saved_state_sp(sv
, user_stack
);
1300 * Routine: thread_adjuserstack
1304 thread_adjuserstack(thread_t thread
, int adjust
)
1306 struct arm_saved_state
*sv
;
1309 sv
= get_user_regs(thread
);
1311 sp
= get_saved_state_sp(sv
);
1313 set_saved_state_sp(sv
, sp
);;
1319 * Routine: thread_setentrypoint
1323 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
1325 struct arm_saved_state
*sv
;
1327 sv
= get_user_regs(thread
);
1329 set_saved_state_pc(sv
, entry
);
1335 * Routine: thread_entrypoint
1340 __unused thread_t thread
,
1342 thread_state_t tstate
,
1343 unsigned int count __unused
,
1344 mach_vm_offset_t
* entry_point
1348 case ARM_THREAD_STATE
:
1350 struct arm_thread_state
*state
;
1352 state
= (struct arm_thread_state
*) tstate
;
1355 * If a valid entry point is specified, use it.
1358 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1360 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1365 case ARM_THREAD_STATE64
:
1367 struct arm_thread_state64
*state
;
1369 state
= (struct arm_thread_state64
*) tstate
;
1372 * If a valid entry point is specified, use it.
1375 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1377 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1383 return KERN_INVALID_ARGUMENT
;
1386 return KERN_SUCCESS
;
1391 * Routine: thread_set_child
1399 struct arm_saved_state
*child_state
;
1401 child_state
= get_user_regs(child
);
1403 set_saved_state_reg(child_state
, 0, pid
);
1404 set_saved_state_reg(child_state
, 1, 1ULL);
1409 * Routine: thread_set_parent
1417 struct arm_saved_state
*parent_state
;
1419 parent_state
= get_user_regs(parent
);
1421 set_saved_state_reg(parent_state
, 0, pid
);
1422 set_saved_state_reg(parent_state
, 1, 0);
1426 struct arm_act_context
{
1427 struct arm_unified_thread_state ss
;
1429 struct arm_neon_saved_state ns
;
1434 * Routine: act_thread_csave
1438 act_thread_csave(void)
1440 struct arm_act_context
*ic
;
1443 thread_t thread
= current_thread();
1445 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
1446 if (ic
== (struct arm_act_context
*) NULL
) {
1450 val
= ARM_UNIFIED_THREAD_STATE_COUNT
;
1451 kret
= machine_thread_get_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, &val
);
1452 if (kret
!= KERN_SUCCESS
) {
1453 kfree(ic
, sizeof(struct arm_act_context
));
1458 if (thread_is_64bit_data(thread
)) {
1459 val
= ARM_NEON_STATE64_COUNT
;
1460 kret
= machine_thread_get_state(thread
,
1462 (thread_state_t
) &ic
->ns
,
1465 val
= ARM_NEON_STATE_COUNT
;
1466 kret
= machine_thread_get_state(thread
,
1468 (thread_state_t
) &ic
->ns
,
1471 if (kret
!= KERN_SUCCESS
) {
1472 kfree(ic
, sizeof(struct arm_act_context
));
1480 * Routine: act_thread_catt
1484 act_thread_catt(void *ctx
)
1486 struct arm_act_context
*ic
;
1488 thread_t thread
= current_thread();
1490 ic
= (struct arm_act_context
*) ctx
;
1491 if (ic
== (struct arm_act_context
*) NULL
) {
1495 kret
= machine_thread_set_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, ARM_UNIFIED_THREAD_STATE_COUNT
);
1496 if (kret
!= KERN_SUCCESS
) {
1501 if (thread_is_64bit_data(thread
)) {
1502 kret
= machine_thread_set_state(thread
,
1504 (thread_state_t
) &ic
->ns
,
1505 ARM_NEON_STATE64_COUNT
);
1507 kret
= machine_thread_set_state(thread
,
1509 (thread_state_t
) &ic
->ns
,
1510 ARM_NEON_STATE_COUNT
);
1512 if (kret
!= KERN_SUCCESS
) {
1517 kfree(ic
, sizeof(struct arm_act_context
));
1521 * Routine: act_thread_catt
1525 act_thread_cfree(void *ctx
)
1527 kfree(ctx
, sizeof(struct arm_act_context
));
1531 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
1533 arm_thread_state_t
*state
;
1534 struct arm_saved_state
*saved_state
;
1535 struct arm_saved_state32
*saved_state_32
;
1536 thread_t curth
= current_thread();
1539 assert(!thread_is_64bit_data(thread
));
1541 saved_state
= thread
->machine
.upcb
;
1542 saved_state_32
= saved_state32(saved_state
);
1544 state
= (arm_thread_state_t
*)tstate
;
1546 if (curth
!= thread
) {
1548 thread_lock(thread
);
1552 * do not zero saved_state, it can be concurrently accessed
1553 * and zero is not a valid state for some of the registers,
1556 thread_state32_to_saved_state(state
, saved_state
);
1557 saved_state_32
->cpsr
= PSR64_USER32_DEFAULT
;
1559 if (curth
!= thread
) {
1560 thread_unlock(thread
);
1564 return KERN_SUCCESS
;
1568 thread_set_wq_state64(thread_t thread
, thread_state_t tstate
)
1570 arm_thread_state64_t
*state
;
1571 struct arm_saved_state
*saved_state
;
1572 struct arm_saved_state64
*saved_state_64
;
1573 thread_t curth
= current_thread();
1576 assert(thread_is_64bit_data(thread
));
1578 saved_state
= thread
->machine
.upcb
;
1579 saved_state_64
= saved_state64(saved_state
);
1580 state
= (arm_thread_state64_t
*)tstate
;
1582 if (curth
!= thread
) {
1584 thread_lock(thread
);
1588 * do not zero saved_state, it can be concurrently accessed
1589 * and zero is not a valid state for some of the registers,
1592 thread_state64_to_saved_state(state
, saved_state
);
1593 set_saved_state_cpsr(saved_state
, PSR64_USER64_DEFAULT
);
1595 if (curth
!= thread
) {
1596 thread_unlock(thread
);
1600 return KERN_SUCCESS
;