2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #if __has_feature(ptrauth_calls)
41 struct arm_vfpv2_state
{
46 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
48 #define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
54 void thread_set_child(thread_t child
, int pid
);
55 void thread_set_parent(thread_t parent
, int pid
);
58 * Maps state flavor to number of words in the state:
60 /* __private_extern__ */
61 unsigned int _MachineStateCount
[] = {
62 [ARM_UNIFIED_THREAD_STATE
] = ARM_UNIFIED_THREAD_STATE_COUNT
,
63 [ARM_VFP_STATE
] = ARM_VFP_STATE_COUNT
,
64 [ARM_EXCEPTION_STATE
] = ARM_EXCEPTION_STATE_COUNT
,
65 [ARM_DEBUG_STATE
] = ARM_DEBUG_STATE_COUNT
,
66 [ARM_THREAD_STATE64
] = ARM_THREAD_STATE64_COUNT
,
67 [ARM_EXCEPTION_STATE64
] = ARM_EXCEPTION_STATE64_COUNT
,
68 [ARM_THREAD_STATE32
] = ARM_THREAD_STATE32_COUNT
,
69 [ARM_DEBUG_STATE32
] = ARM_DEBUG_STATE32_COUNT
,
70 [ARM_DEBUG_STATE64
] = ARM_DEBUG_STATE64_COUNT
,
71 [ARM_NEON_STATE
] = ARM_NEON_STATE_COUNT
,
72 [ARM_NEON_STATE64
] = ARM_NEON_STATE64_COUNT
,
73 [ARM_PAGEIN_STATE
] = ARM_PAGEIN_STATE_COUNT
,
76 extern zone_t ads_zone
;
80 * Copy values from saved_state to ts64.
83 saved_state_to_thread_state64(const arm_saved_state_t
* saved_state
,
84 arm_thread_state64_t
* ts64
)
88 assert(is_saved_state64(saved_state
));
90 ts64
->fp
= get_saved_state_fp(saved_state
);
91 ts64
->lr
= get_saved_state_lr(saved_state
);
92 ts64
->sp
= get_saved_state_sp(saved_state
);
93 ts64
->pc
= get_saved_state_pc(saved_state
);
94 ts64
->cpsr
= get_saved_state_cpsr(saved_state
);
95 for (i
= 0; i
< 29; i
++) {
96 ts64
->x
[i
] = get_saved_state_reg(saved_state
, i
);
101 * Copy values from ts64 to saved_state
104 thread_state64_to_saved_state(const arm_thread_state64_t
* ts64
,
105 arm_saved_state_t
* saved_state
)
108 #if __has_feature(ptrauth_calls)
109 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
110 #endif /* __has_feature(ptrauth_calls) */
112 assert(is_saved_state64(saved_state
));
114 set_saved_state_cpsr(saved_state
, (ts64
->cpsr
& ~PSR64_MODE_MASK
) | PSR64_MODE_RW_64
);
115 #if __has_feature(ptrauth_calls)
117 * Make writes to ts64->cpsr visible first, since it's useful as a
118 * canary to detect thread-state corruption.
120 __builtin_arm_dmb(DMB_ST
);
122 set_saved_state_fp(saved_state
, ts64
->fp
);
123 set_saved_state_lr(saved_state
, ts64
->lr
);
124 set_saved_state_sp(saved_state
, ts64
->sp
);
125 set_saved_state_pc(saved_state
, ts64
->pc
);
126 for (i
= 0; i
< 29; i
++) {
127 set_saved_state_reg(saved_state
, i
, ts64
->x
[i
]);
130 #if __has_feature(ptrauth_calls)
131 ml_set_interrupts_enabled(intr
);
132 #endif /* __has_feature(ptrauth_calls) */
135 #endif /* __arm64__ */
138 handle_get_arm32_thread_state(thread_state_t tstate
,
139 mach_msg_type_number_t
* count
,
140 const arm_saved_state_t
* saved_state
)
142 if (*count
< ARM_THREAD_STATE32_COUNT
) {
143 return KERN_INVALID_ARGUMENT
;
145 if (!is_saved_state32(saved_state
)) {
146 return KERN_INVALID_ARGUMENT
;
149 (void)saved_state_to_thread_state32(saved_state
, (arm_thread_state32_t
*)tstate
);
150 *count
= ARM_THREAD_STATE32_COUNT
;
155 handle_get_arm64_thread_state(thread_state_t tstate
,
156 mach_msg_type_number_t
* count
,
157 const arm_saved_state_t
* saved_state
)
159 if (*count
< ARM_THREAD_STATE64_COUNT
) {
160 return KERN_INVALID_ARGUMENT
;
162 if (!is_saved_state64(saved_state
)) {
163 return KERN_INVALID_ARGUMENT
;
166 (void)saved_state_to_thread_state64(saved_state
, (arm_thread_state64_t
*)tstate
);
167 *count
= ARM_THREAD_STATE64_COUNT
;
173 handle_get_arm_thread_state(thread_state_t tstate
,
174 mach_msg_type_number_t
* count
,
175 const arm_saved_state_t
* saved_state
)
177 /* In an arm64 world, this flavor can be used to retrieve the thread
178 * state of a 32-bit or 64-bit thread into a unified structure, but we
179 * need to support legacy clients who are only aware of 32-bit, so
180 * check the count to see what the client is expecting.
182 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
183 return handle_get_arm32_thread_state(tstate
, count
, saved_state
);
186 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*) tstate
;
187 bzero(unified_state
, sizeof(*unified_state
));
189 if (is_saved_state64(saved_state
)) {
190 unified_state
->ash
.flavor
= ARM_THREAD_STATE64
;
191 unified_state
->ash
.count
= ARM_THREAD_STATE64_COUNT
;
192 (void)saved_state_to_thread_state64(saved_state
, thread_state64(unified_state
));
196 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
197 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
198 (void)saved_state_to_thread_state32(saved_state
, thread_state32(unified_state
));
200 *count
= ARM_UNIFIED_THREAD_STATE_COUNT
;
206 handle_set_arm32_thread_state(const thread_state_t tstate
,
207 mach_msg_type_number_t count
,
208 arm_saved_state_t
* saved_state
)
210 if (count
!= ARM_THREAD_STATE32_COUNT
) {
211 return KERN_INVALID_ARGUMENT
;
214 (void)thread_state32_to_saved_state((const arm_thread_state32_t
*)tstate
, saved_state
);
219 handle_set_arm64_thread_state(const thread_state_t tstate
,
220 mach_msg_type_number_t count
,
221 arm_saved_state_t
* saved_state
)
223 if (count
!= ARM_THREAD_STATE64_COUNT
) {
224 return KERN_INVALID_ARGUMENT
;
227 (void)thread_state64_to_saved_state((const arm_thread_state64_t
*)tstate
, saved_state
);
233 handle_set_arm_thread_state(const thread_state_t tstate
,
234 mach_msg_type_number_t count
,
235 arm_saved_state_t
* saved_state
)
237 /* In an arm64 world, this flavor can be used to set the thread state of a
238 * 32-bit or 64-bit thread from a unified structure, but we need to support
239 * legacy clients who are only aware of 32-bit, so check the count to see
240 * what the client is expecting.
242 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
243 if (!is_saved_state32(saved_state
)) {
244 return KERN_INVALID_ARGUMENT
;
246 return handle_set_arm32_thread_state(tstate
, count
, saved_state
);
249 const arm_unified_thread_state_t
*unified_state
= (const arm_unified_thread_state_t
*) tstate
;
251 if (is_thread_state64(unified_state
)) {
252 if (!is_saved_state64(saved_state
)) {
253 return KERN_INVALID_ARGUMENT
;
255 (void)thread_state64_to_saved_state(const_thread_state64(unified_state
), saved_state
);
259 if (!is_saved_state32(saved_state
)) {
260 return KERN_INVALID_ARGUMENT
;
262 (void)thread_state32_to_saved_state(const_thread_state32(unified_state
), saved_state
);
270 * Translate thread state arguments to userspace representation
274 machine_thread_state_convert_to_user(
276 thread_flavor_t flavor
,
277 thread_state_t tstate
,
278 mach_msg_type_number_t
*count
)
280 #if __has_feature(ptrauth_calls)
281 arm_thread_state64_t
*ts64
;
284 case ARM_THREAD_STATE
:
286 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
288 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
291 ts64
= thread_state64(unified_state
);
294 case ARM_THREAD_STATE64
:
296 if (*count
< ARM_THREAD_STATE64_COUNT
) {
299 ts64
= (arm_thread_state64_t
*)tstate
;
306 // Note that kernel threads never have disable_user_jop set
307 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread()) ||
308 thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
) ||
309 (BootArgs
->bootFlags
& kBootFlagsDisableUserThreadStateJOP
)) {
310 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
316 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
317 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
318 ptrauth_key_return_address
);
319 if (ts64
->lr
!= stripped_lr
) {
320 // Need to allow already-signed lr value to round-trip as is
321 ts64
->flags
|= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
323 // Note that an IB-signed return address that happens to have a 0 signature value
324 // will round-trip correctly even if IA-signed again below (and IA-authd later)
327 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
332 ts64
->pc
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->pc
,
333 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"));
335 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
336 ts64
->lr
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->lr
,
337 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"));
340 ts64
->sp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->sp
,
341 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"));
344 ts64
->fp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->fp
,
345 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"));
350 // No conversion to userspace representation on this platform
351 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
353 #endif /* __has_feature(ptrauth_calls) */
357 * Translate thread state arguments from userspace representation
361 machine_thread_state_convert_from_user(
363 thread_flavor_t flavor
,
364 thread_state_t tstate
,
365 mach_msg_type_number_t count
)
367 #if __has_feature(ptrauth_calls)
368 arm_thread_state64_t
*ts64
;
371 case ARM_THREAD_STATE
:
373 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
375 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
378 ts64
= thread_state64(unified_state
);
381 case ARM_THREAD_STATE64
:
383 if (count
!= ARM_THREAD_STATE64_COUNT
) {
386 ts64
= (arm_thread_state64_t
*)tstate
;
393 // Note that kernel threads never have disable_user_jop set
394 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
395 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
)) {
396 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
399 // A JOP-disabled process must not set thread state on a JOP-enabled process
400 return KERN_PROTECTION_FAILURE
;
403 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
) {
404 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
) ||
405 (BootArgs
->bootFlags
& kBootFlagsDisableUserThreadStateJOP
)) {
408 // Disallow setting unsigned thread state on JOP-enabled processes.
409 // Ignore flag and treat thread state arguments as signed, ptrauth
410 // poisoning will cause resulting thread state to be invalid
411 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
414 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
) {
415 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
416 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
417 ptrauth_key_return_address
);
418 if (ts64
->lr
== stripped_lr
) {
419 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
420 // treat as IA-signed below (where auth failure may poison the value).
421 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
423 // Note that an IB-signed return address that happens to have a 0 signature value
424 // will also have been IA-signed (without this flag being set) and so will IA-auth
428 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
433 ts64
->pc
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->pc
,
434 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"));
436 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
437 ts64
->lr
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->lr
,
438 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"));
441 ts64
->sp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->sp
,
442 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"));
445 ts64
->fp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->fp
,
446 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"));
451 // No conversion from userspace representation on this platform
452 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
454 #endif /* __has_feature(ptrauth_calls) */
458 * Translate signal context data pointer to userspace representation
462 machine_thread_siguctx_pointer_convert_to_user(
463 __assert_only thread_t thread
,
466 #if __has_feature(ptrauth_calls)
467 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
468 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
472 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
477 *uctxp
= (uintptr_t)pmap_sign_user_ptr((void*)*uctxp
,
478 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("uctx"));
483 // No conversion to userspace representation on this platform
484 (void)thread
; (void)uctxp
;
486 #endif /* __has_feature(ptrauth_calls) */
490 * Translate array of function pointer syscall arguments from userspace representation
494 machine_thread_function_pointers_convert_from_user(
495 __assert_only thread_t thread
,
499 #if __has_feature(ptrauth_calls)
500 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
501 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
505 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
511 *fptrs
= (uintptr_t)pmap_auth_user_ptr((void*)*fptrs
,
512 ptrauth_key_function_pointer
, 0);
519 // No conversion from userspace representation on this platform
520 (void)thread
; (void)fptrs
; (void)count
;
522 #endif /* __has_feature(ptrauth_calls) */
526 * Routine: machine_thread_get_state
530 machine_thread_get_state(thread_t thread
,
531 thread_flavor_t flavor
,
532 thread_state_t tstate
,
533 mach_msg_type_number_t
* count
)
536 case THREAD_STATE_FLAVOR_LIST
:
538 return KERN_INVALID_ARGUMENT
;
541 tstate
[0] = ARM_THREAD_STATE
;
542 tstate
[1] = ARM_VFP_STATE
;
543 tstate
[2] = ARM_EXCEPTION_STATE
;
544 tstate
[3] = ARM_DEBUG_STATE
;
548 case THREAD_STATE_FLAVOR_LIST_NEW
:
550 return KERN_INVALID_ARGUMENT
;
553 tstate
[0] = ARM_THREAD_STATE
;
554 tstate
[1] = ARM_VFP_STATE
;
555 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
556 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
560 case THREAD_STATE_FLAVOR_LIST_10_15
:
562 return KERN_INVALID_ARGUMENT
;
565 tstate
[0] = ARM_THREAD_STATE
;
566 tstate
[1] = ARM_VFP_STATE
;
567 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
568 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
569 tstate
[4] = ARM_PAGEIN_STATE
;
573 case ARM_THREAD_STATE
:
575 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
581 case ARM_THREAD_STATE32
:
583 if (thread_is_64bit_data(thread
)) {
584 return KERN_INVALID_ARGUMENT
;
587 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
594 case ARM_THREAD_STATE64
:
596 if (!thread_is_64bit_data(thread
)) {
597 return KERN_INVALID_ARGUMENT
;
600 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
607 case ARM_EXCEPTION_STATE
:{
608 struct arm_exception_state
*state
;
609 struct arm_saved_state32
*saved_state
;
611 if (*count
< ARM_EXCEPTION_STATE_COUNT
) {
612 return KERN_INVALID_ARGUMENT
;
614 if (thread_is_64bit_data(thread
)) {
615 return KERN_INVALID_ARGUMENT
;
618 state
= (struct arm_exception_state
*) tstate
;
619 saved_state
= saved_state32(thread
->machine
.upcb
);
621 state
->exception
= saved_state
->exception
;
622 state
->fsr
= saved_state
->esr
;
623 state
->far
= saved_state
->far
;
625 *count
= ARM_EXCEPTION_STATE_COUNT
;
628 case ARM_EXCEPTION_STATE64
:{
629 struct arm_exception_state64
*state
;
630 struct arm_saved_state64
*saved_state
;
632 if (*count
< ARM_EXCEPTION_STATE64_COUNT
) {
633 return KERN_INVALID_ARGUMENT
;
635 if (!thread_is_64bit_data(thread
)) {
636 return KERN_INVALID_ARGUMENT
;
639 state
= (struct arm_exception_state64
*) tstate
;
640 saved_state
= saved_state64(thread
->machine
.upcb
);
642 state
->exception
= saved_state
->exception
;
643 state
->far
= saved_state
->far
;
644 state
->esr
= saved_state
->esr
;
646 *count
= ARM_EXCEPTION_STATE64_COUNT
;
649 case ARM_DEBUG_STATE
:{
650 arm_legacy_debug_state_t
*state
;
651 arm_debug_state32_t
*thread_state
;
653 if (*count
< ARM_LEGACY_DEBUG_STATE_COUNT
) {
654 return KERN_INVALID_ARGUMENT
;
657 if (thread_is_64bit_data(thread
)) {
658 return KERN_INVALID_ARGUMENT
;
661 state
= (arm_legacy_debug_state_t
*) tstate
;
662 thread_state
= find_debug_state32(thread
);
664 if (thread_state
== NULL
) {
665 bzero(state
, sizeof(arm_legacy_debug_state_t
));
667 bcopy(thread_state
, state
, sizeof(arm_legacy_debug_state_t
));
670 *count
= ARM_LEGACY_DEBUG_STATE_COUNT
;
673 case ARM_DEBUG_STATE32
:{
674 arm_debug_state32_t
*state
;
675 arm_debug_state32_t
*thread_state
;
677 if (*count
< ARM_DEBUG_STATE32_COUNT
) {
678 return KERN_INVALID_ARGUMENT
;
681 if (thread_is_64bit_data(thread
)) {
682 return KERN_INVALID_ARGUMENT
;
685 state
= (arm_debug_state32_t
*) tstate
;
686 thread_state
= find_debug_state32(thread
);
688 if (thread_state
== NULL
) {
689 bzero(state
, sizeof(arm_debug_state32_t
));
691 bcopy(thread_state
, state
, sizeof(arm_debug_state32_t
));
694 *count
= ARM_DEBUG_STATE32_COUNT
;
698 case ARM_DEBUG_STATE64
:{
699 arm_debug_state64_t
*state
;
700 arm_debug_state64_t
*thread_state
;
702 if (*count
< ARM_DEBUG_STATE64_COUNT
) {
703 return KERN_INVALID_ARGUMENT
;
706 if (!thread_is_64bit_data(thread
)) {
707 return KERN_INVALID_ARGUMENT
;
710 state
= (arm_debug_state64_t
*) tstate
;
711 thread_state
= find_debug_state64(thread
);
713 if (thread_state
== NULL
) {
714 bzero(state
, sizeof(arm_debug_state64_t
));
716 bcopy(thread_state
, state
, sizeof(arm_debug_state64_t
));
719 *count
= ARM_DEBUG_STATE64_COUNT
;
724 struct arm_vfp_state
*state
;
725 arm_neon_saved_state32_t
*thread_state
;
728 if (*count
< ARM_VFP_STATE_COUNT
) {
729 if (*count
< ARM_VFPV2_STATE_COUNT
) {
730 return KERN_INVALID_ARGUMENT
;
732 *count
= ARM_VFPV2_STATE_COUNT
;
736 if (*count
== ARM_VFPV2_STATE_COUNT
) {
742 state
= (struct arm_vfp_state
*) tstate
;
743 thread_state
= neon_state32(thread
->machine
.uNeon
);
744 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
746 bcopy(thread_state
, state
, (max
+ 1) * sizeof(uint32_t));
750 case ARM_NEON_STATE
:{
751 arm_neon_state_t
*state
;
752 arm_neon_saved_state32_t
*thread_state
;
754 if (*count
< ARM_NEON_STATE_COUNT
) {
755 return KERN_INVALID_ARGUMENT
;
758 if (thread_is_64bit_data(thread
)) {
759 return KERN_INVALID_ARGUMENT
;
762 state
= (arm_neon_state_t
*)tstate
;
763 thread_state
= neon_state32(thread
->machine
.uNeon
);
765 assert(sizeof(*thread_state
) == sizeof(*state
));
766 bcopy(thread_state
, state
, sizeof(arm_neon_state_t
));
768 *count
= ARM_NEON_STATE_COUNT
;
772 case ARM_NEON_STATE64
:{
773 arm_neon_state64_t
*state
;
774 arm_neon_saved_state64_t
*thread_state
;
776 if (*count
< ARM_NEON_STATE64_COUNT
) {
777 return KERN_INVALID_ARGUMENT
;
780 if (!thread_is_64bit_data(thread
)) {
781 return KERN_INVALID_ARGUMENT
;
784 state
= (arm_neon_state64_t
*)tstate
;
785 thread_state
= neon_state64(thread
->machine
.uNeon
);
787 /* For now, these are identical */
788 assert(sizeof(*state
) == sizeof(*thread_state
));
789 bcopy(thread_state
, state
, sizeof(arm_neon_state64_t
));
791 *count
= ARM_NEON_STATE64_COUNT
;
796 case ARM_PAGEIN_STATE
: {
797 arm_pagein_state_t
*state
;
799 if (*count
< ARM_PAGEIN_STATE_COUNT
) {
800 return KERN_INVALID_ARGUMENT
;
803 state
= (arm_pagein_state_t
*)tstate
;
804 state
->__pagein_error
= thread
->t_pagein_error
;
806 *count
= ARM_PAGEIN_STATE_COUNT
;
812 return KERN_INVALID_ARGUMENT
;
819 * Routine: machine_thread_get_kern_state
823 machine_thread_get_kern_state(thread_t thread
,
824 thread_flavor_t flavor
,
825 thread_state_t tstate
,
826 mach_msg_type_number_t
* count
)
829 * This works only for an interrupted kernel thread
831 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
836 case ARM_THREAD_STATE
:
838 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
844 case ARM_THREAD_STATE32
:
846 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
853 case ARM_THREAD_STATE64
:
855 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
863 return KERN_INVALID_ARGUMENT
;
869 machine_thread_switch_addrmode(thread_t thread
)
871 if (task_has_64Bit_data(thread
->task
)) {
872 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
873 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
874 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
875 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
878 * Reinitialize the NEON state.
880 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
881 thread
->machine
.uNeon
->ns_64
.fpcr
= FPCR_DEFAULT
;
883 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
884 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
885 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
886 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
889 * Reinitialize the NEON state.
891 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
892 thread
->machine
.uNeon
->ns_32
.fpcr
= FPCR_DEFAULT_32
;
896 extern long long arm_debug_get(void);
899 * Routine: machine_thread_set_state
903 machine_thread_set_state(thread_t thread
,
904 thread_flavor_t flavor
,
905 thread_state_t tstate
,
906 mach_msg_type_number_t count
)
911 case ARM_THREAD_STATE
:
912 rn
= handle_set_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
918 case ARM_THREAD_STATE32
:
919 if (thread_is_64bit_data(thread
)) {
920 return KERN_INVALID_ARGUMENT
;
923 rn
= handle_set_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
930 case ARM_THREAD_STATE64
:
931 if (!thread_is_64bit_data(thread
)) {
932 return KERN_INVALID_ARGUMENT
;
935 rn
= handle_set_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
941 case ARM_EXCEPTION_STATE
:{
942 if (count
!= ARM_EXCEPTION_STATE_COUNT
) {
943 return KERN_INVALID_ARGUMENT
;
945 if (thread_is_64bit_data(thread
)) {
946 return KERN_INVALID_ARGUMENT
;
951 case ARM_EXCEPTION_STATE64
:{
952 if (count
!= ARM_EXCEPTION_STATE64_COUNT
) {
953 return KERN_INVALID_ARGUMENT
;
955 if (!thread_is_64bit_data(thread
)) {
956 return KERN_INVALID_ARGUMENT
;
961 case ARM_DEBUG_STATE
:
963 arm_legacy_debug_state_t
*state
;
964 boolean_t enabled
= FALSE
;
967 if (count
!= ARM_LEGACY_DEBUG_STATE_COUNT
) {
968 return KERN_INVALID_ARGUMENT
;
970 if (thread_is_64bit_data(thread
)) {
971 return KERN_INVALID_ARGUMENT
;
974 state
= (arm_legacy_debug_state_t
*) tstate
;
976 for (i
= 0; i
< 16; i
++) {
977 /* do not allow context IDs to be set */
978 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
979 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
980 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
981 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
982 return KERN_PROTECTION_FAILURE
;
984 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
985 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
991 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
992 if (thread_state
!= NULL
) {
993 void *pTmp
= thread
->machine
.DebugData
;
994 thread
->machine
.DebugData
= NULL
;
995 zfree(ads_zone
, pTmp
);
998 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
999 if (thread_state
== NULL
) {
1000 thread
->machine
.DebugData
= zalloc(ads_zone
);
1001 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1002 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
1003 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
1004 thread_state
= find_debug_state32(thread
);
1006 assert(NULL
!= thread_state
);
1008 for (i
= 0; i
< 16; i
++) {
1009 /* set appropriate privilege; mask out unknown bits */
1010 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1011 | ARM_DBGBCR_MATCH_MASK
1012 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1013 | ARM_DBG_CR_ENABLE_MASK
))
1014 | ARM_DBGBCR_TYPE_IVA
1015 | ARM_DBG_CR_LINKED_UNLINKED
1016 | ARM_DBG_CR_SECURITY_STATE_BOTH
1017 | ARM_DBG_CR_MODE_CONTROL_USER
;
1018 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1019 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1020 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1021 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1022 | ARM_DBG_CR_ENABLE_MASK
))
1023 | ARM_DBG_CR_LINKED_UNLINKED
1024 | ARM_DBG_CR_SECURITY_STATE_BOTH
1025 | ARM_DBG_CR_MODE_CONTROL_USER
;
1026 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1029 thread_state
->mdscr_el1
= 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1032 if (thread
== current_thread()) {
1033 arm_debug_set32(thread
->machine
.DebugData
);
1038 case ARM_DEBUG_STATE32
:
1039 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1041 arm_debug_state32_t
*state
;
1042 boolean_t enabled
= FALSE
;
1045 if (count
!= ARM_DEBUG_STATE32_COUNT
) {
1046 return KERN_INVALID_ARGUMENT
;
1048 if (thread_is_64bit_data(thread
)) {
1049 return KERN_INVALID_ARGUMENT
;
1052 state
= (arm_debug_state32_t
*) tstate
;
1054 if (state
->mdscr_el1
& 0x1) {
1058 for (i
= 0; i
< 16; i
++) {
1059 /* do not allow context IDs to be set */
1060 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1061 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1062 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1063 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1064 return KERN_PROTECTION_FAILURE
;
1066 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1067 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1073 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
1074 if (thread_state
!= NULL
) {
1075 void *pTmp
= thread
->machine
.DebugData
;
1076 thread
->machine
.DebugData
= NULL
;
1077 zfree(ads_zone
, pTmp
);
1080 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
1081 if (thread_state
== NULL
) {
1082 thread
->machine
.DebugData
= zalloc(ads_zone
);
1083 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1084 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
1085 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
1086 thread_state
= find_debug_state32(thread
);
1088 assert(NULL
!= thread_state
);
1090 if (state
->mdscr_el1
& 0x1) {
1091 thread_state
->mdscr_el1
|= 0x1;
1093 thread_state
->mdscr_el1
&= ~0x1;
1096 for (i
= 0; i
< 16; i
++) {
1097 /* set appropriate privilege; mask out unknown bits */
1098 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1099 | ARM_DBGBCR_MATCH_MASK
1100 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1101 | ARM_DBG_CR_ENABLE_MASK
))
1102 | ARM_DBGBCR_TYPE_IVA
1103 | ARM_DBG_CR_LINKED_UNLINKED
1104 | ARM_DBG_CR_SECURITY_STATE_BOTH
1105 | ARM_DBG_CR_MODE_CONTROL_USER
;
1106 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1107 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1108 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1109 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1110 | ARM_DBG_CR_ENABLE_MASK
))
1111 | ARM_DBG_CR_LINKED_UNLINKED
1112 | ARM_DBG_CR_SECURITY_STATE_BOTH
1113 | ARM_DBG_CR_MODE_CONTROL_USER
;
1114 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1118 if (thread
== current_thread()) {
1119 arm_debug_set32(thread
->machine
.DebugData
);
1125 case ARM_DEBUG_STATE64
:
1127 arm_debug_state64_t
*state
;
1128 boolean_t enabled
= FALSE
;
1131 if (count
!= ARM_DEBUG_STATE64_COUNT
) {
1132 return KERN_INVALID_ARGUMENT
;
1134 if (!thread_is_64bit_data(thread
)) {
1135 return KERN_INVALID_ARGUMENT
;
1138 state
= (arm_debug_state64_t
*) tstate
;
1140 if (state
->mdscr_el1
& 0x1) {
1144 for (i
= 0; i
< 16; i
++) {
1145 /* do not allow context IDs to be set */
1146 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1147 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1148 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1149 return KERN_PROTECTION_FAILURE
;
1151 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1152 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1158 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
1159 if (thread_state
!= NULL
) {
1160 void *pTmp
= thread
->machine
.DebugData
;
1161 thread
->machine
.DebugData
= NULL
;
1162 zfree(ads_zone
, pTmp
);
1165 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
1166 if (thread_state
== NULL
) {
1167 thread
->machine
.DebugData
= zalloc(ads_zone
);
1168 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1169 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE64
;
1170 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE64_COUNT
;
1171 thread_state
= find_debug_state64(thread
);
1173 assert(NULL
!= thread_state
);
1175 if (state
->mdscr_el1
& 0x1) {
1176 thread_state
->mdscr_el1
|= 0x1;
1178 thread_state
->mdscr_el1
&= ~0x1;
1181 for (i
= 0; i
< 16; i
++) {
1182 /* set appropriate privilege; mask out unknown bits */
1183 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1184 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1185 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1186 | ARM_DBG_CR_ENABLE_MASK
))
1187 | ARM_DBGBCR_TYPE_IVA
1188 | ARM_DBG_CR_LINKED_UNLINKED
1189 | ARM_DBG_CR_SECURITY_STATE_BOTH
1190 | ARM_DBG_CR_MODE_CONTROL_USER
;
1191 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1192 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1193 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1194 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1195 | ARM_DBG_CR_ENABLE_MASK
))
1196 | ARM_DBG_CR_LINKED_UNLINKED
1197 | ARM_DBG_CR_SECURITY_STATE_BOTH
1198 | ARM_DBG_CR_MODE_CONTROL_USER
;
1199 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1203 if (thread
== current_thread()) {
1204 arm_debug_set64(thread
->machine
.DebugData
);
1210 case ARM_VFP_STATE
:{
1211 struct arm_vfp_state
*state
;
1212 arm_neon_saved_state32_t
*thread_state
;
1215 if (count
!= ARM_VFP_STATE_COUNT
&& count
!= ARM_VFPV2_STATE_COUNT
) {
1216 return KERN_INVALID_ARGUMENT
;
1219 if (count
== ARM_VFPV2_STATE_COUNT
) {
1225 state
= (struct arm_vfp_state
*) tstate
;
1226 thread_state
= neon_state32(thread
->machine
.uNeon
);
1227 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1229 bcopy(state
, thread_state
, (max
+ 1) * sizeof(uint32_t));
1231 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1232 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1236 case ARM_NEON_STATE
:{
1237 arm_neon_state_t
*state
;
1238 arm_neon_saved_state32_t
*thread_state
;
1240 if (count
!= ARM_NEON_STATE_COUNT
) {
1241 return KERN_INVALID_ARGUMENT
;
1244 if (thread_is_64bit_data(thread
)) {
1245 return KERN_INVALID_ARGUMENT
;
1248 state
= (arm_neon_state_t
*)tstate
;
1249 thread_state
= neon_state32(thread
->machine
.uNeon
);
1251 assert(sizeof(*state
) == sizeof(*thread_state
));
1252 bcopy(state
, thread_state
, sizeof(arm_neon_state_t
));
1254 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1255 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1259 case ARM_NEON_STATE64
:{
1260 arm_neon_state64_t
*state
;
1261 arm_neon_saved_state64_t
*thread_state
;
1263 if (count
!= ARM_NEON_STATE64_COUNT
) {
1264 return KERN_INVALID_ARGUMENT
;
1267 if (!thread_is_64bit_data(thread
)) {
1268 return KERN_INVALID_ARGUMENT
;
1271 state
= (arm_neon_state64_t
*)tstate
;
1272 thread_state
= neon_state64(thread
->machine
.uNeon
);
1274 assert(sizeof(*state
) == sizeof(*thread_state
));
1275 bcopy(state
, thread_state
, sizeof(arm_neon_state64_t
));
1277 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
1278 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
1284 return KERN_INVALID_ARGUMENT
;
1286 return KERN_SUCCESS
;
1290 machine_thread_pc(thread_t thread
)
1292 struct arm_saved_state
*ss
= get_user_regs(thread
);
1293 return (mach_vm_address_t
)get_saved_state_pc(ss
);
1297 machine_thread_reset_pc(thread_t thread
, mach_vm_address_t pc
)
1299 set_saved_state_pc(get_user_regs(thread
), (register_t
)pc
);
1303 * Routine: machine_thread_state_initialize
1307 machine_thread_state_initialize(thread_t thread
)
1309 arm_context_t
*context
= thread
->machine
.contextData
;
1312 * Should always be set up later. For a kernel thread, we don't care
1313 * about this state. For a user thread, we'll set the state up in
1314 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1317 if (context
!= NULL
) {
1318 bzero(&context
->ss
.uss
, sizeof(context
->ss
.uss
));
1319 bzero(&context
->ns
.uns
, sizeof(context
->ns
.uns
));
1321 if (context
->ns
.nsh
.flavor
== ARM_NEON_SAVED_STATE64
) {
1322 context
->ns
.ns_64
.fpcr
= FPCR_DEFAULT
;
1324 context
->ns
.ns_32
.fpcr
= FPCR_DEFAULT_32
;
1328 thread
->machine
.DebugData
= NULL
;
1330 #if defined(HAS_APPLE_PAC)
1331 /* Sign the initial user-space thread state */
1332 if (thread
->machine
.upcb
!= NULL
) {
1333 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
1334 ml_sign_thread_state(thread
->machine
.upcb
, 0, 0, 0, 0, 0);
1335 ml_set_interrupts_enabled(intr
);
1337 #endif /* defined(HAS_APPLE_PAC) */
1339 return KERN_SUCCESS
;
1343 * Routine: machine_thread_dup
1347 machine_thread_dup(thread_t self
,
1349 __unused boolean_t is_corpse
)
1351 struct arm_saved_state
*self_saved_state
;
1352 struct arm_saved_state
*target_saved_state
;
1354 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
1356 self_saved_state
= self
->machine
.upcb
;
1357 target_saved_state
= target
->machine
.upcb
;
1358 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
1359 #if defined(HAS_APPLE_PAC)
1360 if (!is_corpse
&& is_saved_state64(self_saved_state
)) {
1361 check_and_sign_copied_thread_state(target_saved_state
, self_saved_state
);
1363 #endif /* defined(HAS_APPLE_PAC) */
1365 return KERN_SUCCESS
;
1369 * Routine: get_user_regs
1372 struct arm_saved_state
*
1373 get_user_regs(thread_t thread
)
1375 return thread
->machine
.upcb
;
1378 arm_neon_saved_state_t
*
1379 get_user_neon_regs(thread_t thread
)
1381 return thread
->machine
.uNeon
;
1385 * Routine: find_user_regs
1388 struct arm_saved_state
*
1389 find_user_regs(thread_t thread
)
1391 return thread
->machine
.upcb
;
1395 * Routine: find_kern_regs
1398 struct arm_saved_state
*
1399 find_kern_regs(thread_t thread
)
1402 * This works only for an interrupted kernel thread
1404 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
1405 return (struct arm_saved_state
*) NULL
;
1407 return getCpuDatap()->cpu_int_state
;
1411 arm_debug_state32_t
*
1412 find_debug_state32(thread_t thread
)
1414 if (thread
&& thread
->machine
.DebugData
) {
1415 return &(thread
->machine
.DebugData
->uds
.ds32
);
1421 arm_debug_state64_t
*
1422 find_debug_state64(thread_t thread
)
1424 if (thread
&& thread
->machine
.DebugData
) {
1425 return &(thread
->machine
.DebugData
->uds
.ds64
);
1432 * Routine: thread_userstack
1436 thread_userstack(__unused thread_t thread
,
1438 thread_state_t tstate
,
1440 mach_vm_offset_t
* user_stack
,
1442 boolean_t is_64bit_data
1448 case ARM_THREAD_STATE
:
1449 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
1451 if (is_64bit_data
) {
1452 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_64
.sp
;
1456 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_32
.sp
;
1462 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1463 case ARM_THREAD_STATE32
:
1464 if (count
!= ARM_THREAD_STATE32_COUNT
) {
1465 return KERN_INVALID_ARGUMENT
;
1467 if (is_64bit_data
) {
1468 return KERN_INVALID_ARGUMENT
;
1471 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1474 case ARM_THREAD_STATE64
:
1475 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1476 return KERN_INVALID_ARGUMENT
;
1478 if (!is_64bit_data
) {
1479 return KERN_INVALID_ARGUMENT
;
1482 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1486 return KERN_INVALID_ARGUMENT
;
1490 *user_stack
= CAST_USER_ADDR_T(sp
);
1495 *user_stack
= CAST_USER_ADDR_T(USRSTACK64
);
1501 return KERN_SUCCESS
;
1505 * thread_userstackdefault:
1507 * Return the default stack location for the
1508 * thread, if otherwise unknown.
1511 thread_userstackdefault(mach_vm_offset_t
* default_user_stack
,
1515 *default_user_stack
= USRSTACK64
;
1517 *default_user_stack
= USRSTACK
;
1520 return KERN_SUCCESS
;
1524 * Routine: thread_setuserstack
1528 thread_setuserstack(thread_t thread
,
1529 mach_vm_address_t user_stack
)
1531 struct arm_saved_state
*sv
;
1533 sv
= get_user_regs(thread
);
1535 set_saved_state_sp(sv
, user_stack
);
1541 * Routine: thread_adjuserstack
1545 thread_adjuserstack(thread_t thread
,
1548 struct arm_saved_state
*sv
;
1551 sv
= get_user_regs(thread
);
1553 sp
= get_saved_state_sp(sv
);
1555 set_saved_state_sp(sv
, sp
);;
1561 * Routine: thread_setentrypoint
1565 thread_setentrypoint(thread_t thread
,
1566 mach_vm_offset_t entry
)
1568 struct arm_saved_state
*sv
;
1570 sv
= get_user_regs(thread
);
1572 set_saved_state_pc(sv
, entry
);
1578 * Routine: thread_entrypoint
1582 thread_entrypoint(__unused thread_t thread
,
1584 thread_state_t tstate
,
1586 mach_vm_offset_t
* entry_point
1590 case ARM_THREAD_STATE
:
1592 struct arm_thread_state
*state
;
1594 if (count
!= ARM_THREAD_STATE_COUNT
) {
1595 return KERN_INVALID_ARGUMENT
;
1598 state
= (struct arm_thread_state
*) tstate
;
1601 * If a valid entry point is specified, use it.
1604 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1606 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1611 case ARM_THREAD_STATE64
:
1613 struct arm_thread_state64
*state
;
1615 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1616 return KERN_INVALID_ARGUMENT
;
1619 state
= (struct arm_thread_state64
*) tstate
;
1622 * If a valid entry point is specified, use it.
1625 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1627 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1633 return KERN_INVALID_ARGUMENT
;
1636 return KERN_SUCCESS
;
1641 * Routine: thread_set_child
1645 thread_set_child(thread_t child
,
1648 struct arm_saved_state
*child_state
;
1650 child_state
= get_user_regs(child
);
1652 set_saved_state_reg(child_state
, 0, pid
);
1653 set_saved_state_reg(child_state
, 1, 1ULL);
1658 * Routine: thread_set_parent
1662 thread_set_parent(thread_t parent
,
1665 struct arm_saved_state
*parent_state
;
1667 parent_state
= get_user_regs(parent
);
1669 set_saved_state_reg(parent_state
, 0, pid
);
1670 set_saved_state_reg(parent_state
, 1, 0);
1674 struct arm_act_context
{
1675 struct arm_unified_thread_state ss
;
1677 struct arm_neon_saved_state ns
;
1682 * Routine: act_thread_csave
1686 act_thread_csave(void)
1688 struct arm_act_context
*ic
;
1691 thread_t thread
= current_thread();
1693 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
1694 if (ic
== (struct arm_act_context
*) NULL
) {
1698 val
= ARM_UNIFIED_THREAD_STATE_COUNT
;
1699 kret
= machine_thread_get_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, &val
);
1700 if (kret
!= KERN_SUCCESS
) {
1701 kfree(ic
, sizeof(struct arm_act_context
));
1706 if (thread_is_64bit_data(thread
)) {
1707 val
= ARM_NEON_STATE64_COUNT
;
1708 kret
= machine_thread_get_state(thread
,
1710 (thread_state_t
)&ic
->ns
,
1713 val
= ARM_NEON_STATE_COUNT
;
1714 kret
= machine_thread_get_state(thread
,
1716 (thread_state_t
)&ic
->ns
,
1719 if (kret
!= KERN_SUCCESS
) {
1720 kfree(ic
, sizeof(struct arm_act_context
));
1728 * Routine: act_thread_catt
1732 act_thread_catt(void * ctx
)
1734 struct arm_act_context
*ic
;
1736 thread_t thread
= current_thread();
1738 ic
= (struct arm_act_context
*) ctx
;
1739 if (ic
== (struct arm_act_context
*) NULL
) {
1743 kret
= machine_thread_set_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, ARM_UNIFIED_THREAD_STATE_COUNT
);
1744 if (kret
!= KERN_SUCCESS
) {
1749 if (thread_is_64bit_data(thread
)) {
1750 kret
= machine_thread_set_state(thread
,
1752 (thread_state_t
)&ic
->ns
,
1753 ARM_NEON_STATE64_COUNT
);
1755 kret
= machine_thread_set_state(thread
,
1757 (thread_state_t
)&ic
->ns
,
1758 ARM_NEON_STATE_COUNT
);
1760 if (kret
!= KERN_SUCCESS
) {
1765 kfree(ic
, sizeof(struct arm_act_context
));
1769 * Routine: act_thread_catt
1773 act_thread_cfree(void *ctx
)
1775 kfree(ctx
, sizeof(struct arm_act_context
));
1779 thread_set_wq_state32(thread_t thread
,
1780 thread_state_t tstate
)
1782 arm_thread_state_t
*state
;
1783 struct arm_saved_state
*saved_state
;
1784 struct arm_saved_state32
*saved_state_32
;
1785 thread_t curth
= current_thread();
1788 assert(!thread_is_64bit_data(thread
));
1790 saved_state
= thread
->machine
.upcb
;
1791 saved_state_32
= saved_state32(saved_state
);
1793 state
= (arm_thread_state_t
*)tstate
;
1795 if (curth
!= thread
) {
1797 thread_lock(thread
);
1801 * do not zero saved_state, it can be concurrently accessed
1802 * and zero is not a valid state for some of the registers,
1805 thread_state32_to_saved_state(state
, saved_state
);
1806 saved_state_32
->cpsr
= PSR64_USER32_DEFAULT
;
1808 if (curth
!= thread
) {
1809 thread_unlock(thread
);
1813 return KERN_SUCCESS
;
1817 thread_set_wq_state64(thread_t thread
,
1818 thread_state_t tstate
)
1820 arm_thread_state64_t
*state
;
1821 struct arm_saved_state
*saved_state
;
1822 struct arm_saved_state64
*saved_state_64
;
1823 thread_t curth
= current_thread();
1826 assert(thread_is_64bit_data(thread
));
1828 saved_state
= thread
->machine
.upcb
;
1829 saved_state_64
= saved_state64(saved_state
);
1830 state
= (arm_thread_state64_t
*)tstate
;
1832 if (curth
!= thread
) {
1834 thread_lock(thread
);
1838 * do not zero saved_state, it can be concurrently accessed
1839 * and zero is not a valid state for some of the registers,
1842 thread_state64_to_saved_state(state
, saved_state
);
1843 set_saved_state_cpsr(saved_state
, PSR64_USER64_DEFAULT
);
1845 if (curth
!= thread
) {
1846 thread_unlock(thread
);
1850 return KERN_SUCCESS
;