2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37 #if __has_feature(ptrauth_calls)
41 struct arm_vfpv2_state
{
46 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
48 #define ARM_VFPV2_STATE_COUNT \
49 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
54 void thread_set_child(thread_t child
, int pid
);
55 void thread_set_parent(thread_t parent
, int pid
);
58 * Maps state flavor to number of words in the state:
60 /* __private_extern__ */
61 unsigned int _MachineStateCount
[] = {
62 [ARM_UNIFIED_THREAD_STATE
] = ARM_UNIFIED_THREAD_STATE_COUNT
,
63 [ARM_VFP_STATE
] = ARM_VFP_STATE_COUNT
,
64 [ARM_EXCEPTION_STATE
] = ARM_EXCEPTION_STATE_COUNT
,
65 [ARM_DEBUG_STATE
] = ARM_DEBUG_STATE_COUNT
,
66 [ARM_THREAD_STATE64
] = ARM_THREAD_STATE64_COUNT
,
67 [ARM_EXCEPTION_STATE64
] = ARM_EXCEPTION_STATE64_COUNT
,
68 [ARM_THREAD_STATE32
] = ARM_THREAD_STATE32_COUNT
,
69 [ARM_DEBUG_STATE32
] = ARM_DEBUG_STATE32_COUNT
,
70 [ARM_DEBUG_STATE64
] = ARM_DEBUG_STATE64_COUNT
,
71 [ARM_NEON_STATE
] = ARM_NEON_STATE_COUNT
,
72 [ARM_NEON_STATE64
] = ARM_NEON_STATE64_COUNT
,
73 [ARM_PAGEIN_STATE
] = ARM_PAGEIN_STATE_COUNT
,
76 extern zone_t ads_zone
;
80 * Copy values from saved_state to ts64.
83 saved_state_to_thread_state64(const arm_saved_state_t
* saved_state
,
84 arm_thread_state64_t
* ts64
)
88 assert(is_saved_state64(saved_state
));
90 ts64
->fp
= get_saved_state_fp(saved_state
);
91 ts64
->lr
= get_saved_state_lr(saved_state
);
92 ts64
->sp
= get_saved_state_sp(saved_state
);
93 ts64
->pc
= get_saved_state_pc(saved_state
);
94 ts64
->cpsr
= get_saved_state_cpsr(saved_state
);
95 for (i
= 0; i
< 29; i
++) {
96 ts64
->x
[i
] = get_saved_state_reg(saved_state
, i
);
101 * Copy values from ts64 to saved_state
104 thread_state64_to_saved_state(const arm_thread_state64_t
* ts64
,
105 arm_saved_state_t
* saved_state
)
109 assert(is_saved_state64(saved_state
));
111 set_saved_state_fp(saved_state
, ts64
->fp
);
112 set_saved_state_lr(saved_state
, ts64
->lr
);
113 set_saved_state_sp(saved_state
, ts64
->sp
);
114 set_saved_state_pc(saved_state
, ts64
->pc
);
115 set_saved_state_cpsr(saved_state
, (ts64
->cpsr
& ~PSR64_MODE_MASK
) | PSR64_MODE_RW_64
);
116 for (i
= 0; i
< 29; i
++) {
117 set_saved_state_reg(saved_state
, i
, ts64
->x
[i
]);
121 #endif /* __arm64__ */
124 handle_get_arm32_thread_state(thread_state_t tstate
,
125 mach_msg_type_number_t
* count
,
126 const arm_saved_state_t
* saved_state
)
128 if (*count
< ARM_THREAD_STATE32_COUNT
) {
129 return KERN_INVALID_ARGUMENT
;
131 if (!is_saved_state32(saved_state
)) {
132 return KERN_INVALID_ARGUMENT
;
135 (void)saved_state_to_thread_state32(saved_state
, (arm_thread_state32_t
*)tstate
);
136 *count
= ARM_THREAD_STATE32_COUNT
;
141 handle_get_arm64_thread_state(thread_state_t tstate
,
142 mach_msg_type_number_t
* count
,
143 const arm_saved_state_t
* saved_state
)
145 if (*count
< ARM_THREAD_STATE64_COUNT
) {
146 return KERN_INVALID_ARGUMENT
;
148 if (!is_saved_state64(saved_state
)) {
149 return KERN_INVALID_ARGUMENT
;
152 (void)saved_state_to_thread_state64(saved_state
, (arm_thread_state64_t
*)tstate
);
153 *count
= ARM_THREAD_STATE64_COUNT
;
159 handle_get_arm_thread_state(thread_state_t tstate
,
160 mach_msg_type_number_t
* count
,
161 const arm_saved_state_t
* saved_state
)
163 /* In an arm64 world, this flavor can be used to retrieve the thread
164 * state of a 32-bit or 64-bit thread into a unified structure, but we
165 * need to support legacy clients who are only aware of 32-bit, so
166 * check the count to see what the client is expecting.
168 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
169 return handle_get_arm32_thread_state(tstate
, count
, saved_state
);
172 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*) tstate
;
173 bzero(unified_state
, sizeof(*unified_state
));
175 if (is_saved_state64(saved_state
)) {
176 unified_state
->ash
.flavor
= ARM_THREAD_STATE64
;
177 unified_state
->ash
.count
= ARM_THREAD_STATE64_COUNT
;
178 (void)saved_state_to_thread_state64(saved_state
, thread_state64(unified_state
));
182 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
183 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
184 (void)saved_state_to_thread_state32(saved_state
, thread_state32(unified_state
));
186 *count
= ARM_UNIFIED_THREAD_STATE_COUNT
;
192 handle_set_arm32_thread_state(const thread_state_t tstate
,
193 mach_msg_type_number_t count
,
194 arm_saved_state_t
* saved_state
)
196 if (count
!= ARM_THREAD_STATE32_COUNT
) {
197 return KERN_INVALID_ARGUMENT
;
200 (void)thread_state32_to_saved_state((const arm_thread_state32_t
*)tstate
, saved_state
);
205 handle_set_arm64_thread_state(const thread_state_t tstate
,
206 mach_msg_type_number_t count
,
207 arm_saved_state_t
* saved_state
)
209 if (count
!= ARM_THREAD_STATE64_COUNT
) {
210 return KERN_INVALID_ARGUMENT
;
213 (void)thread_state64_to_saved_state((const arm_thread_state64_t
*)tstate
, saved_state
);
219 handle_set_arm_thread_state(const thread_state_t tstate
,
220 mach_msg_type_number_t count
,
221 arm_saved_state_t
* saved_state
)
223 /* In an arm64 world, this flavor can be used to set the thread state of a
224 * 32-bit or 64-bit thread from a unified structure, but we need to support
225 * legacy clients who are only aware of 32-bit, so check the count to see
226 * what the client is expecting.
228 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
229 if (!is_saved_state32(saved_state
)) {
230 return KERN_INVALID_ARGUMENT
;
232 return handle_set_arm32_thread_state(tstate
, count
, saved_state
);
235 const arm_unified_thread_state_t
*unified_state
= (const arm_unified_thread_state_t
*) tstate
;
237 if (is_thread_state64(unified_state
)) {
238 if (!is_saved_state64(saved_state
)) {
239 return KERN_INVALID_ARGUMENT
;
241 (void)thread_state64_to_saved_state(const_thread_state64(unified_state
), saved_state
);
245 if (!is_saved_state32(saved_state
)) {
246 return KERN_INVALID_ARGUMENT
;
248 (void)thread_state32_to_saved_state(const_thread_state32(unified_state
), saved_state
);
256 * Translate thread state arguments to userspace representation
260 machine_thread_state_convert_to_user(
262 thread_flavor_t flavor
,
263 thread_state_t tstate
,
264 mach_msg_type_number_t
*count
)
266 #if __has_feature(ptrauth_calls)
267 arm_thread_state64_t
*ts64
;
270 case ARM_THREAD_STATE
:
272 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
274 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
277 ts64
= thread_state64(unified_state
);
280 case ARM_THREAD_STATE64
:
282 if (*count
< ARM_THREAD_STATE64_COUNT
) {
285 ts64
= (arm_thread_state64_t
*)tstate
;
292 // Note that kernel threads never have disable_user_jop set
293 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread()) ||
294 thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
) ||
295 (BootArgs
->bootFlags
& kBootFlagsDisableUserThreadStateJOP
)) {
296 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
302 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
303 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
304 ptrauth_key_return_address
);
305 if (ts64
->lr
!= stripped_lr
) {
306 // Need to allow already-signed lr value to round-trip as is
307 ts64
->flags
|= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
309 // Note that an IB-signed return address that happens to have a 0 signature value
310 // will round-trip correctly even if IA-signed again below (and IA-authd later)
313 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
318 ts64
->pc
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->pc
,
319 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"));
321 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
322 ts64
->lr
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->lr
,
323 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"));
326 ts64
->sp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->sp
,
327 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"));
330 ts64
->fp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->fp
,
331 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"));
336 // No conversion to userspace representation on this platform
337 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
339 #endif /* __has_feature(ptrauth_calls) */
343 * Translate thread state arguments from userspace representation
347 machine_thread_state_convert_from_user(
349 thread_flavor_t flavor
,
350 thread_state_t tstate
,
351 mach_msg_type_number_t count
)
353 #if __has_feature(ptrauth_calls)
354 arm_thread_state64_t
*ts64
;
357 case ARM_THREAD_STATE
:
359 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
361 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
364 ts64
= thread_state64(unified_state
);
367 case ARM_THREAD_STATE64
:
369 if (count
!= ARM_THREAD_STATE64_COUNT
) {
372 ts64
= (arm_thread_state64_t
*)tstate
;
379 // Note that kernel threads never have disable_user_jop set
380 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
381 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
)) {
382 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
385 // A JOP-disabled process must not set thread state on a JOP-enabled process
386 return KERN_PROTECTION_FAILURE
;
389 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
) {
390 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
) ||
391 (BootArgs
->bootFlags
& kBootFlagsDisableUserThreadStateJOP
)) {
394 // Disallow setting unsigned thread state on JOP-enabled processes.
395 // Ignore flag and treat thread state arguments as signed, ptrauth
396 // poisoning will cause resulting thread state to be invalid
397 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
400 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
) {
401 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
402 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
403 ptrauth_key_return_address
);
404 if (ts64
->lr
== stripped_lr
) {
405 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
406 // treat as IA-signed below (where auth failure may poison the value).
407 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
409 // Note that an IB-signed return address that happens to have a 0 signature value
410 // will also have been IA-signed (without this flag being set) and so will IA-auth
414 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
419 ts64
->pc
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->pc
,
420 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"));
422 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
423 ts64
->lr
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->lr
,
424 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"));
427 ts64
->sp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->sp
,
428 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"));
431 ts64
->fp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->fp
,
432 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"));
437 // No conversion from userspace representation on this platform
438 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
440 #endif /* __has_feature(ptrauth_calls) */
444 * Translate signal context data pointer to userspace representation
448 machine_thread_siguctx_pointer_convert_to_user(
449 __assert_only thread_t thread
,
452 #if __has_feature(ptrauth_calls)
453 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
454 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
458 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
463 *uctxp
= (uintptr_t)pmap_sign_user_ptr((void*)*uctxp
,
464 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("uctx"));
469 // No conversion to userspace representation on this platform
470 (void)thread
; (void)uctxp
;
472 #endif /* __has_feature(ptrauth_calls) */
476 * Translate array of function pointer syscall arguments from userspace representation
480 machine_thread_function_pointers_convert_from_user(
481 __assert_only thread_t thread
,
485 #if __has_feature(ptrauth_calls)
486 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
487 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
491 if (BootArgs
->bootFlags
& kBootFlagsDisableUserJOP
) {
497 *fptrs
= (uintptr_t)pmap_auth_user_ptr((void*)*fptrs
,
498 ptrauth_key_function_pointer
, 0);
505 // No conversion from userspace representation on this platform
506 (void)thread
; (void)fptrs
; (void)count
;
508 #endif /* __has_feature(ptrauth_calls) */
512 * Routine: machine_thread_get_state
516 machine_thread_get_state(thread_t thread
,
517 thread_flavor_t flavor
,
518 thread_state_t tstate
,
519 mach_msg_type_number_t
* count
)
522 case THREAD_STATE_FLAVOR_LIST
:
524 return KERN_INVALID_ARGUMENT
;
527 tstate
[0] = ARM_THREAD_STATE
;
528 tstate
[1] = ARM_VFP_STATE
;
529 tstate
[2] = ARM_EXCEPTION_STATE
;
530 tstate
[3] = ARM_DEBUG_STATE
;
534 case THREAD_STATE_FLAVOR_LIST_NEW
:
536 return KERN_INVALID_ARGUMENT
;
539 tstate
[0] = ARM_THREAD_STATE
;
540 tstate
[1] = ARM_VFP_STATE
;
541 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
542 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
546 case THREAD_STATE_FLAVOR_LIST_10_15
:
548 return KERN_INVALID_ARGUMENT
;
551 tstate
[0] = ARM_THREAD_STATE
;
552 tstate
[1] = ARM_VFP_STATE
;
553 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
554 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
555 tstate
[4] = ARM_PAGEIN_STATE
;
559 case ARM_THREAD_STATE
:
561 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
567 case ARM_THREAD_STATE32
:
569 if (thread_is_64bit_data(thread
)) {
570 return KERN_INVALID_ARGUMENT
;
573 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
580 case ARM_THREAD_STATE64
:
582 if (!thread_is_64bit_data(thread
)) {
583 return KERN_INVALID_ARGUMENT
;
586 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
593 case ARM_EXCEPTION_STATE
:{
594 struct arm_exception_state
*state
;
595 struct arm_saved_state32
*saved_state
;
597 if (*count
< ARM_EXCEPTION_STATE_COUNT
) {
598 return KERN_INVALID_ARGUMENT
;
600 if (thread_is_64bit_data(thread
)) {
601 return KERN_INVALID_ARGUMENT
;
604 state
= (struct arm_exception_state
*) tstate
;
605 saved_state
= saved_state32(thread
->machine
.upcb
);
607 state
->exception
= saved_state
->exception
;
608 state
->fsr
= saved_state
->esr
;
609 state
->far
= saved_state
->far
;
611 *count
= ARM_EXCEPTION_STATE_COUNT
;
614 case ARM_EXCEPTION_STATE64
:{
615 struct arm_exception_state64
*state
;
616 struct arm_saved_state64
*saved_state
;
618 if (*count
< ARM_EXCEPTION_STATE64_COUNT
) {
619 return KERN_INVALID_ARGUMENT
;
621 if (!thread_is_64bit_data(thread
)) {
622 return KERN_INVALID_ARGUMENT
;
625 state
= (struct arm_exception_state64
*) tstate
;
626 saved_state
= saved_state64(thread
->machine
.upcb
);
628 state
->exception
= saved_state
->exception
;
629 state
->far
= saved_state
->far
;
630 state
->esr
= saved_state
->esr
;
632 *count
= ARM_EXCEPTION_STATE64_COUNT
;
635 case ARM_DEBUG_STATE
:{
636 arm_legacy_debug_state_t
*state
;
637 arm_debug_state32_t
*thread_state
;
639 if (*count
< ARM_LEGACY_DEBUG_STATE_COUNT
) {
640 return KERN_INVALID_ARGUMENT
;
643 if (thread_is_64bit_data(thread
)) {
644 return KERN_INVALID_ARGUMENT
;
647 state
= (arm_legacy_debug_state_t
*) tstate
;
648 thread_state
= find_debug_state32(thread
);
650 if (thread_state
== NULL
) {
651 bzero(state
, sizeof(arm_legacy_debug_state_t
));
653 bcopy(thread_state
, state
, sizeof(arm_legacy_debug_state_t
));
656 *count
= ARM_LEGACY_DEBUG_STATE_COUNT
;
659 case ARM_DEBUG_STATE32
:{
660 arm_debug_state32_t
*state
;
661 arm_debug_state32_t
*thread_state
;
663 if (*count
< ARM_DEBUG_STATE32_COUNT
) {
664 return KERN_INVALID_ARGUMENT
;
667 if (thread_is_64bit_data(thread
)) {
668 return KERN_INVALID_ARGUMENT
;
671 state
= (arm_debug_state32_t
*) tstate
;
672 thread_state
= find_debug_state32(thread
);
674 if (thread_state
== NULL
) {
675 bzero(state
, sizeof(arm_debug_state32_t
));
677 bcopy(thread_state
, state
, sizeof(arm_debug_state32_t
));
680 *count
= ARM_DEBUG_STATE32_COUNT
;
684 case ARM_DEBUG_STATE64
:{
685 arm_debug_state64_t
*state
;
686 arm_debug_state64_t
*thread_state
;
688 if (*count
< ARM_DEBUG_STATE64_COUNT
) {
689 return KERN_INVALID_ARGUMENT
;
692 if (!thread_is_64bit_data(thread
)) {
693 return KERN_INVALID_ARGUMENT
;
696 state
= (arm_debug_state64_t
*) tstate
;
697 thread_state
= find_debug_state64(thread
);
699 if (thread_state
== NULL
) {
700 bzero(state
, sizeof(arm_debug_state64_t
));
702 bcopy(thread_state
, state
, sizeof(arm_debug_state64_t
));
705 *count
= ARM_DEBUG_STATE64_COUNT
;
710 struct arm_vfp_state
*state
;
711 arm_neon_saved_state32_t
*thread_state
;
714 if (*count
< ARM_VFP_STATE_COUNT
) {
715 if (*count
< ARM_VFPV2_STATE_COUNT
) {
716 return KERN_INVALID_ARGUMENT
;
718 *count
= ARM_VFPV2_STATE_COUNT
;
722 if (*count
== ARM_VFPV2_STATE_COUNT
) {
728 state
= (struct arm_vfp_state
*) tstate
;
729 thread_state
= neon_state32(thread
->machine
.uNeon
);
730 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
732 bcopy(thread_state
, state
, (max
+ 1) * sizeof(uint32_t));
736 case ARM_NEON_STATE
:{
737 arm_neon_state_t
*state
;
738 arm_neon_saved_state32_t
*thread_state
;
740 if (*count
< ARM_NEON_STATE_COUNT
) {
741 return KERN_INVALID_ARGUMENT
;
744 if (thread_is_64bit_data(thread
)) {
745 return KERN_INVALID_ARGUMENT
;
748 state
= (arm_neon_state_t
*)tstate
;
749 thread_state
= neon_state32(thread
->machine
.uNeon
);
751 assert(sizeof(*thread_state
) == sizeof(*state
));
752 bcopy(thread_state
, state
, sizeof(arm_neon_state_t
));
754 *count
= ARM_NEON_STATE_COUNT
;
758 case ARM_NEON_STATE64
:{
759 arm_neon_state64_t
*state
;
760 arm_neon_saved_state64_t
*thread_state
;
762 if (*count
< ARM_NEON_STATE64_COUNT
) {
763 return KERN_INVALID_ARGUMENT
;
766 if (!thread_is_64bit_data(thread
)) {
767 return KERN_INVALID_ARGUMENT
;
770 state
= (arm_neon_state64_t
*)tstate
;
771 thread_state
= neon_state64(thread
->machine
.uNeon
);
773 /* For now, these are identical */
774 assert(sizeof(*state
) == sizeof(*thread_state
));
775 bcopy(thread_state
, state
, sizeof(arm_neon_state64_t
));
777 *count
= ARM_NEON_STATE64_COUNT
;
782 case ARM_PAGEIN_STATE
: {
783 arm_pagein_state_t
*state
;
785 if (*count
< ARM_PAGEIN_STATE_COUNT
) {
786 return KERN_INVALID_ARGUMENT
;
789 state
= (arm_pagein_state_t
*)tstate
;
790 state
->__pagein_error
= thread
->t_pagein_error
;
792 *count
= ARM_PAGEIN_STATE_COUNT
;
798 return KERN_INVALID_ARGUMENT
;
805 * Routine: machine_thread_get_kern_state
809 machine_thread_get_kern_state(thread_t thread
,
810 thread_flavor_t flavor
,
811 thread_state_t tstate
,
812 mach_msg_type_number_t
* count
)
815 * This works only for an interrupted kernel thread
817 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
822 case ARM_THREAD_STATE
:
824 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
830 case ARM_THREAD_STATE32
:
832 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
839 case ARM_THREAD_STATE64
:
841 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
849 return KERN_INVALID_ARGUMENT
;
855 machine_thread_switch_addrmode(thread_t thread
)
857 if (task_has_64Bit_data(thread
->task
)) {
858 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
859 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
860 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
861 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
864 * Reinitialize the NEON state.
866 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
867 thread
->machine
.uNeon
->ns_64
.fpcr
= FPCR_DEFAULT
;
869 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
870 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
871 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
872 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
875 * Reinitialize the NEON state.
877 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
878 thread
->machine
.uNeon
->ns_32
.fpcr
= FPCR_DEFAULT_32
;
882 extern long long arm_debug_get(void);
885 * Routine: machine_thread_set_state
889 machine_thread_set_state(thread_t thread
,
890 thread_flavor_t flavor
,
891 thread_state_t tstate
,
892 mach_msg_type_number_t count
)
897 case ARM_THREAD_STATE
:
898 rn
= handle_set_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
904 case ARM_THREAD_STATE32
:
905 if (thread_is_64bit_data(thread
)) {
906 return KERN_INVALID_ARGUMENT
;
909 rn
= handle_set_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
916 case ARM_THREAD_STATE64
:
917 if (!thread_is_64bit_data(thread
)) {
918 return KERN_INVALID_ARGUMENT
;
921 rn
= handle_set_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
927 case ARM_EXCEPTION_STATE
:{
928 if (count
!= ARM_EXCEPTION_STATE_COUNT
) {
929 return KERN_INVALID_ARGUMENT
;
931 if (thread_is_64bit_data(thread
)) {
932 return KERN_INVALID_ARGUMENT
;
937 case ARM_EXCEPTION_STATE64
:{
938 if (count
!= ARM_EXCEPTION_STATE64_COUNT
) {
939 return KERN_INVALID_ARGUMENT
;
941 if (!thread_is_64bit_data(thread
)) {
942 return KERN_INVALID_ARGUMENT
;
947 case ARM_DEBUG_STATE
:
949 arm_legacy_debug_state_t
*state
;
950 boolean_t enabled
= FALSE
;
953 if (count
!= ARM_LEGACY_DEBUG_STATE_COUNT
) {
954 return KERN_INVALID_ARGUMENT
;
956 if (thread_is_64bit_data(thread
)) {
957 return KERN_INVALID_ARGUMENT
;
960 state
= (arm_legacy_debug_state_t
*) tstate
;
962 for (i
= 0; i
< 16; i
++) {
963 /* do not allow context IDs to be set */
964 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
965 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
966 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
967 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
968 return KERN_PROTECTION_FAILURE
;
970 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
971 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
977 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
978 if (thread_state
!= NULL
) {
979 void *pTmp
= thread
->machine
.DebugData
;
980 thread
->machine
.DebugData
= NULL
;
981 zfree(ads_zone
, pTmp
);
984 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
985 if (thread_state
== NULL
) {
986 thread
->machine
.DebugData
= zalloc(ads_zone
);
987 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
988 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
989 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
990 thread_state
= find_debug_state32(thread
);
992 assert(NULL
!= thread_state
);
994 for (i
= 0; i
< 16; i
++) {
995 /* set appropriate privilege; mask out unknown bits */
996 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
997 | ARM_DBGBCR_MATCH_MASK
998 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
999 | ARM_DBG_CR_ENABLE_MASK
))
1000 | ARM_DBGBCR_TYPE_IVA
1001 | ARM_DBG_CR_LINKED_UNLINKED
1002 | ARM_DBG_CR_SECURITY_STATE_BOTH
1003 | ARM_DBG_CR_MODE_CONTROL_USER
;
1004 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1005 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1006 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1007 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1008 | ARM_DBG_CR_ENABLE_MASK
))
1009 | ARM_DBG_CR_LINKED_UNLINKED
1010 | ARM_DBG_CR_SECURITY_STATE_BOTH
1011 | ARM_DBG_CR_MODE_CONTROL_USER
;
1012 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1015 thread_state
->mdscr_el1
= 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1018 if (thread
== current_thread()) {
1019 arm_debug_set32(thread
->machine
.DebugData
);
1024 case ARM_DEBUG_STATE32
:
1025 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1027 arm_debug_state32_t
*state
;
1028 boolean_t enabled
= FALSE
;
1031 if (count
!= ARM_DEBUG_STATE32_COUNT
) {
1032 return KERN_INVALID_ARGUMENT
;
1034 if (thread_is_64bit_data(thread
)) {
1035 return KERN_INVALID_ARGUMENT
;
1038 state
= (arm_debug_state32_t
*) tstate
;
1040 if (state
->mdscr_el1
& 0x1) {
1044 for (i
= 0; i
< 16; i
++) {
1045 /* do not allow context IDs to be set */
1046 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1047 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1048 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1049 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1050 return KERN_PROTECTION_FAILURE
;
1052 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1053 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1059 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
1060 if (thread_state
!= NULL
) {
1061 void *pTmp
= thread
->machine
.DebugData
;
1062 thread
->machine
.DebugData
= NULL
;
1063 zfree(ads_zone
, pTmp
);
1066 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
1067 if (thread_state
== NULL
) {
1068 thread
->machine
.DebugData
= zalloc(ads_zone
);
1069 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1070 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
1071 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
1072 thread_state
= find_debug_state32(thread
);
1074 assert(NULL
!= thread_state
);
1076 if (state
->mdscr_el1
& 0x1) {
1077 thread_state
->mdscr_el1
|= 0x1;
1079 thread_state
->mdscr_el1
&= ~0x1;
1082 for (i
= 0; i
< 16; i
++) {
1083 /* set appropriate privilege; mask out unknown bits */
1084 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1085 | ARM_DBGBCR_MATCH_MASK
1086 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1087 | ARM_DBG_CR_ENABLE_MASK
))
1088 | ARM_DBGBCR_TYPE_IVA
1089 | ARM_DBG_CR_LINKED_UNLINKED
1090 | ARM_DBG_CR_SECURITY_STATE_BOTH
1091 | ARM_DBG_CR_MODE_CONTROL_USER
;
1092 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1093 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1094 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1095 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1096 | ARM_DBG_CR_ENABLE_MASK
))
1097 | ARM_DBG_CR_LINKED_UNLINKED
1098 | ARM_DBG_CR_SECURITY_STATE_BOTH
1099 | ARM_DBG_CR_MODE_CONTROL_USER
;
1100 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1104 if (thread
== current_thread()) {
1105 arm_debug_set32(thread
->machine
.DebugData
);
1111 case ARM_DEBUG_STATE64
:
1113 arm_debug_state64_t
*state
;
1114 boolean_t enabled
= FALSE
;
1117 if (count
!= ARM_DEBUG_STATE64_COUNT
) {
1118 return KERN_INVALID_ARGUMENT
;
1120 if (!thread_is_64bit_data(thread
)) {
1121 return KERN_INVALID_ARGUMENT
;
1124 state
= (arm_debug_state64_t
*) tstate
;
1126 if (state
->mdscr_el1
& 0x1) {
1130 for (i
= 0; i
< 16; i
++) {
1131 /* do not allow context IDs to be set */
1132 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1133 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1134 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1135 return KERN_PROTECTION_FAILURE
;
1137 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1138 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1144 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
1145 if (thread_state
!= NULL
) {
1146 void *pTmp
= thread
->machine
.DebugData
;
1147 thread
->machine
.DebugData
= NULL
;
1148 zfree(ads_zone
, pTmp
);
1151 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
1152 if (thread_state
== NULL
) {
1153 thread
->machine
.DebugData
= zalloc(ads_zone
);
1154 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1155 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE64
;
1156 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE64_COUNT
;
1157 thread_state
= find_debug_state64(thread
);
1159 assert(NULL
!= thread_state
);
1161 if (state
->mdscr_el1
& 0x1) {
1162 thread_state
->mdscr_el1
|= 0x1;
1164 thread_state
->mdscr_el1
&= ~0x1;
1167 for (i
= 0; i
< 16; i
++) {
1168 /* set appropriate privilege; mask out unknown bits */
1169 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1170 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1171 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1172 | ARM_DBG_CR_ENABLE_MASK
))
1173 | ARM_DBGBCR_TYPE_IVA
1174 | ARM_DBG_CR_LINKED_UNLINKED
1175 | ARM_DBG_CR_SECURITY_STATE_BOTH
1176 | ARM_DBG_CR_MODE_CONTROL_USER
;
1177 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1178 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1179 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1180 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1181 | ARM_DBG_CR_ENABLE_MASK
))
1182 | ARM_DBG_CR_LINKED_UNLINKED
1183 | ARM_DBG_CR_SECURITY_STATE_BOTH
1184 | ARM_DBG_CR_MODE_CONTROL_USER
;
1185 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1189 if (thread
== current_thread()) {
1190 arm_debug_set64(thread
->machine
.DebugData
);
1196 case ARM_VFP_STATE
:{
1197 struct arm_vfp_state
*state
;
1198 arm_neon_saved_state32_t
*thread_state
;
1201 if (count
!= ARM_VFP_STATE_COUNT
&& count
!= ARM_VFPV2_STATE_COUNT
) {
1202 return KERN_INVALID_ARGUMENT
;
1205 if (count
== ARM_VFPV2_STATE_COUNT
) {
1211 state
= (struct arm_vfp_state
*) tstate
;
1212 thread_state
= neon_state32(thread
->machine
.uNeon
);
1213 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1215 bcopy(state
, thread_state
, (max
+ 1) * sizeof(uint32_t));
1217 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1218 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1222 case ARM_NEON_STATE
:{
1223 arm_neon_state_t
*state
;
1224 arm_neon_saved_state32_t
*thread_state
;
1226 if (count
!= ARM_NEON_STATE_COUNT
) {
1227 return KERN_INVALID_ARGUMENT
;
1230 if (thread_is_64bit_data(thread
)) {
1231 return KERN_INVALID_ARGUMENT
;
1234 state
= (arm_neon_state_t
*)tstate
;
1235 thread_state
= neon_state32(thread
->machine
.uNeon
);
1237 assert(sizeof(*state
) == sizeof(*thread_state
));
1238 bcopy(state
, thread_state
, sizeof(arm_neon_state_t
));
1240 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1241 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1245 case ARM_NEON_STATE64
:{
1246 arm_neon_state64_t
*state
;
1247 arm_neon_saved_state64_t
*thread_state
;
1249 if (count
!= ARM_NEON_STATE64_COUNT
) {
1250 return KERN_INVALID_ARGUMENT
;
1253 if (!thread_is_64bit_data(thread
)) {
1254 return KERN_INVALID_ARGUMENT
;
1257 state
= (arm_neon_state64_t
*)tstate
;
1258 thread_state
= neon_state64(thread
->machine
.uNeon
);
1260 assert(sizeof(*state
) == sizeof(*thread_state
));
1261 bcopy(state
, thread_state
, sizeof(arm_neon_state64_t
));
1263 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
1264 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
1270 return KERN_INVALID_ARGUMENT
;
1272 return KERN_SUCCESS
;
1276 machine_thread_pc(thread_t thread
)
1278 struct arm_saved_state
*ss
= get_user_regs(thread
);
1279 return (mach_vm_address_t
)get_saved_state_pc(ss
);
1283 machine_thread_reset_pc(thread_t thread
, mach_vm_address_t pc
)
1285 set_saved_state_pc(get_user_regs(thread
), (register_t
)pc
);
1289 * Routine: machine_thread_state_initialize
1293 machine_thread_state_initialize(thread_t thread
)
1295 arm_context_t
*context
= thread
->machine
.contextData
;
1298 * Should always be set up later. For a kernel thread, we don't care
1299 * about this state. For a user thread, we'll set the state up in
1300 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1303 if (context
!= NULL
) {
1304 bzero(&context
->ss
.uss
, sizeof(context
->ss
.uss
));
1305 bzero(&context
->ns
.uns
, sizeof(context
->ns
.uns
));
1307 if (context
->ns
.nsh
.flavor
== ARM_NEON_SAVED_STATE64
) {
1308 context
->ns
.ns_64
.fpcr
= FPCR_DEFAULT
;
1310 context
->ns
.ns_32
.fpcr
= FPCR_DEFAULT_32
;
1314 thread
->machine
.DebugData
= NULL
;
1316 #if defined(HAS_APPLE_PAC)
1317 /* Sign the initial user-space thread state */
1318 if (thread
->machine
.upcb
!= NULL
) {
1319 ml_sign_thread_state(thread
->machine
.upcb
, 0, 0, 0, 0, 0);
1321 #endif /* defined(HAS_APPLE_PAC) */
1323 return KERN_SUCCESS
;
1327 * Routine: machine_thread_dup
1331 machine_thread_dup(thread_t self
,
1333 __unused boolean_t is_corpse
)
1335 struct arm_saved_state
*self_saved_state
;
1336 struct arm_saved_state
*target_saved_state
;
1338 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
1340 self_saved_state
= self
->machine
.upcb
;
1341 target_saved_state
= target
->machine
.upcb
;
1342 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
1343 #if defined(HAS_APPLE_PAC)
1344 if (!is_corpse
&& is_saved_state64(self_saved_state
)) {
1345 check_and_sign_copied_thread_state(target_saved_state
, self_saved_state
);
1347 #endif /* defined(HAS_APPLE_PAC) */
1349 return KERN_SUCCESS
;
1353 * Routine: get_user_regs
1356 struct arm_saved_state
*
1357 get_user_regs(thread_t thread
)
1359 return thread
->machine
.upcb
;
1362 arm_neon_saved_state_t
*
1363 get_user_neon_regs(thread_t thread
)
1365 return thread
->machine
.uNeon
;
1369 * Routine: find_user_regs
1372 struct arm_saved_state
*
1373 find_user_regs(thread_t thread
)
1375 return thread
->machine
.upcb
;
1379 * Routine: find_kern_regs
1382 struct arm_saved_state
*
1383 find_kern_regs(thread_t thread
)
1386 * This works only for an interrupted kernel thread
1388 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
1389 return (struct arm_saved_state
*) NULL
;
1391 return getCpuDatap()->cpu_int_state
;
1395 arm_debug_state32_t
*
1396 find_debug_state32(thread_t thread
)
1398 if (thread
&& thread
->machine
.DebugData
) {
1399 return &(thread
->machine
.DebugData
->uds
.ds32
);
1405 arm_debug_state64_t
*
1406 find_debug_state64(thread_t thread
)
1408 if (thread
&& thread
->machine
.DebugData
) {
1409 return &(thread
->machine
.DebugData
->uds
.ds64
);
1416 * Routine: thread_userstack
1420 thread_userstack(__unused thread_t thread
,
1422 thread_state_t tstate
,
1424 mach_vm_offset_t
* user_stack
,
1426 boolean_t is_64bit_data
1432 case ARM_THREAD_STATE
:
1433 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
1435 if (is_64bit_data
) {
1436 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_64
.sp
;
1440 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_32
.sp
;
1446 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1447 case ARM_THREAD_STATE32
:
1448 if (count
!= ARM_THREAD_STATE32_COUNT
) {
1449 return KERN_INVALID_ARGUMENT
;
1451 if (is_64bit_data
) {
1452 return KERN_INVALID_ARGUMENT
;
1455 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1458 case ARM_THREAD_STATE64
:
1459 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1460 return KERN_INVALID_ARGUMENT
;
1462 if (!is_64bit_data
) {
1463 return KERN_INVALID_ARGUMENT
;
1466 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1470 return KERN_INVALID_ARGUMENT
;
1474 *user_stack
= CAST_USER_ADDR_T(sp
);
1479 *user_stack
= CAST_USER_ADDR_T(USRSTACK64
);
1485 return KERN_SUCCESS
;
1489 * thread_userstackdefault:
1491 * Return the default stack location for the
1492 * thread, if otherwise unknown.
1495 thread_userstackdefault(mach_vm_offset_t
* default_user_stack
,
1499 *default_user_stack
= USRSTACK64
;
1501 *default_user_stack
= USRSTACK
;
1504 return KERN_SUCCESS
;
1508 * Routine: thread_setuserstack
1512 thread_setuserstack(thread_t thread
,
1513 mach_vm_address_t user_stack
)
1515 struct arm_saved_state
*sv
;
1517 sv
= get_user_regs(thread
);
1519 set_saved_state_sp(sv
, user_stack
);
1525 * Routine: thread_adjuserstack
1529 thread_adjuserstack(thread_t thread
,
1532 struct arm_saved_state
*sv
;
1535 sv
= get_user_regs(thread
);
1537 sp
= get_saved_state_sp(sv
);
1539 set_saved_state_sp(sv
, sp
);;
1545 * Routine: thread_setentrypoint
1549 thread_setentrypoint(thread_t thread
,
1550 mach_vm_offset_t entry
)
1552 struct arm_saved_state
*sv
;
1554 sv
= get_user_regs(thread
);
1556 set_saved_state_pc(sv
, entry
);
1562 * Routine: thread_entrypoint
1566 thread_entrypoint(__unused thread_t thread
,
1568 thread_state_t tstate
,
1569 unsigned int count __unused
,
1570 mach_vm_offset_t
* entry_point
1574 case ARM_THREAD_STATE
:
1576 struct arm_thread_state
*state
;
1578 state
= (struct arm_thread_state
*) tstate
;
1581 * If a valid entry point is specified, use it.
1584 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1586 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1591 case ARM_THREAD_STATE64
:
1593 struct arm_thread_state64
*state
;
1595 state
= (struct arm_thread_state64
*) tstate
;
1598 * If a valid entry point is specified, use it.
1601 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1603 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1609 return KERN_INVALID_ARGUMENT
;
1612 return KERN_SUCCESS
;
1617 * Routine: thread_set_child
1621 thread_set_child(thread_t child
,
1624 struct arm_saved_state
*child_state
;
1626 child_state
= get_user_regs(child
);
1628 set_saved_state_reg(child_state
, 0, pid
);
1629 set_saved_state_reg(child_state
, 1, 1ULL);
1634 * Routine: thread_set_parent
1638 thread_set_parent(thread_t parent
,
1641 struct arm_saved_state
*parent_state
;
1643 parent_state
= get_user_regs(parent
);
1645 set_saved_state_reg(parent_state
, 0, pid
);
1646 set_saved_state_reg(parent_state
, 1, 0);
1650 struct arm_act_context
{
1651 struct arm_unified_thread_state ss
;
1653 struct arm_neon_saved_state ns
;
1658 * Routine: act_thread_csave
1662 act_thread_csave(void)
1664 struct arm_act_context
*ic
;
1667 thread_t thread
= current_thread();
1669 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
1670 if (ic
== (struct arm_act_context
*) NULL
) {
1674 val
= ARM_UNIFIED_THREAD_STATE_COUNT
;
1675 kret
= machine_thread_get_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, &val
);
1676 if (kret
!= KERN_SUCCESS
) {
1677 kfree(ic
, sizeof(struct arm_act_context
));
1682 if (thread_is_64bit_data(thread
)) {
1683 val
= ARM_NEON_STATE64_COUNT
;
1684 kret
= machine_thread_get_state(thread
,
1686 (thread_state_t
)&ic
->ns
,
1689 val
= ARM_NEON_STATE_COUNT
;
1690 kret
= machine_thread_get_state(thread
,
1692 (thread_state_t
)&ic
->ns
,
1695 if (kret
!= KERN_SUCCESS
) {
1696 kfree(ic
, sizeof(struct arm_act_context
));
1704 * Routine: act_thread_catt
1708 act_thread_catt(void * ctx
)
1710 struct arm_act_context
*ic
;
1712 thread_t thread
= current_thread();
1714 ic
= (struct arm_act_context
*) ctx
;
1715 if (ic
== (struct arm_act_context
*) NULL
) {
1719 kret
= machine_thread_set_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, ARM_UNIFIED_THREAD_STATE_COUNT
);
1720 if (kret
!= KERN_SUCCESS
) {
1725 if (thread_is_64bit_data(thread
)) {
1726 kret
= machine_thread_set_state(thread
,
1728 (thread_state_t
)&ic
->ns
,
1729 ARM_NEON_STATE64_COUNT
);
1731 kret
= machine_thread_set_state(thread
,
1733 (thread_state_t
)&ic
->ns
,
1734 ARM_NEON_STATE_COUNT
);
1736 if (kret
!= KERN_SUCCESS
) {
1741 kfree(ic
, sizeof(struct arm_act_context
));
1745 * Routine: act_thread_catt
1749 act_thread_cfree(void *ctx
)
1751 kfree(ctx
, sizeof(struct arm_act_context
));
1755 thread_set_wq_state32(thread_t thread
,
1756 thread_state_t tstate
)
1758 arm_thread_state_t
*state
;
1759 struct arm_saved_state
*saved_state
;
1760 struct arm_saved_state32
*saved_state_32
;
1761 thread_t curth
= current_thread();
1764 assert(!thread_is_64bit_data(thread
));
1766 saved_state
= thread
->machine
.upcb
;
1767 saved_state_32
= saved_state32(saved_state
);
1769 state
= (arm_thread_state_t
*)tstate
;
1771 if (curth
!= thread
) {
1773 thread_lock(thread
);
1777 * do not zero saved_state, it can be concurrently accessed
1778 * and zero is not a valid state for some of the registers,
1781 thread_state32_to_saved_state(state
, saved_state
);
1782 saved_state_32
->cpsr
= PSR64_USER32_DEFAULT
;
1784 if (curth
!= thread
) {
1785 thread_unlock(thread
);
1789 return KERN_SUCCESS
;
1793 thread_set_wq_state64(thread_t thread
,
1794 thread_state_t tstate
)
1796 arm_thread_state64_t
*state
;
1797 struct arm_saved_state
*saved_state
;
1798 struct arm_saved_state64
*saved_state_64
;
1799 thread_t curth
= current_thread();
1802 assert(thread_is_64bit_data(thread
));
1804 saved_state
= thread
->machine
.upcb
;
1805 saved_state_64
= saved_state64(saved_state
);
1806 state
= (arm_thread_state64_t
*)tstate
;
1808 if (curth
!= thread
) {
1810 thread_lock(thread
);
1814 * do not zero saved_state, it can be concurrently accessed
1815 * and zero is not a valid state for some of the registers,
1818 thread_state64_to_saved_state(state
, saved_state
);
1819 set_saved_state_cpsr(saved_state
, PSR64_USER64_DEFAULT
);
1821 if (curth
!= thread
) {
1822 thread_unlock(thread
);
1826 return KERN_SUCCESS
;