2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/proc_reg.h>
38 #if __has_feature(ptrauth_calls)
43 struct arm_vfpv2_state
{
48 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
50 #define ARM_VFPV2_STATE_COUNT \
51 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
56 void thread_set_child(thread_t child
, int pid
);
57 void thread_set_parent(thread_t parent
, int pid
);
58 static void free_debug_state(thread_t thread
);
61 * Maps state flavor to number of words in the state:
63 /* __private_extern__ */
64 unsigned int _MachineStateCount
[] = {
65 [ARM_UNIFIED_THREAD_STATE
] = ARM_UNIFIED_THREAD_STATE_COUNT
,
66 [ARM_VFP_STATE
] = ARM_VFP_STATE_COUNT
,
67 [ARM_EXCEPTION_STATE
] = ARM_EXCEPTION_STATE_COUNT
,
68 [ARM_DEBUG_STATE
] = ARM_DEBUG_STATE_COUNT
,
69 [ARM_THREAD_STATE64
] = ARM_THREAD_STATE64_COUNT
,
70 [ARM_EXCEPTION_STATE64
] = ARM_EXCEPTION_STATE64_COUNT
,
71 [ARM_THREAD_STATE32
] = ARM_THREAD_STATE32_COUNT
,
72 [ARM_DEBUG_STATE32
] = ARM_DEBUG_STATE32_COUNT
,
73 [ARM_DEBUG_STATE64
] = ARM_DEBUG_STATE64_COUNT
,
74 [ARM_NEON_STATE
] = ARM_NEON_STATE_COUNT
,
75 [ARM_NEON_STATE64
] = ARM_NEON_STATE64_COUNT
,
76 [ARM_PAGEIN_STATE
] = ARM_PAGEIN_STATE_COUNT
,
79 extern zone_t ads_zone
;
83 * Copy values from saved_state to ts64.
86 saved_state_to_thread_state64(const arm_saved_state_t
* saved_state
,
87 arm_thread_state64_t
* ts64
)
91 assert(is_saved_state64(saved_state
));
93 ts64
->fp
= get_saved_state_fp(saved_state
);
94 ts64
->lr
= get_saved_state_lr(saved_state
);
95 ts64
->sp
= get_saved_state_sp(saved_state
);
96 ts64
->pc
= get_saved_state_pc(saved_state
);
97 ts64
->cpsr
= get_saved_state_cpsr(saved_state
);
98 for (i
= 0; i
< 29; i
++) {
99 ts64
->x
[i
] = get_saved_state_reg(saved_state
, i
);
104 * Copy values from ts64 to saved_state
107 thread_state64_to_saved_state(const arm_thread_state64_t
* ts64
,
108 arm_saved_state_t
* saved_state
)
111 #if __has_feature(ptrauth_calls)
112 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
113 #endif /* __has_feature(ptrauth_calls) */
115 assert(is_saved_state64(saved_state
));
117 #if __has_feature(ptrauth_calls)
118 MANIPULATE_SIGNED_THREAD_STATE(saved_state
,
119 "and w2, w2, %w[not_psr64_user_mask] \n"
120 "mov w6, %w[cpsr] \n"
121 "and w6, w6, %w[psr64_user_mask] \n"
123 "str w2, [x0, %[SS64_CPSR]] \n",
124 [cpsr
] "r"(ts64
->cpsr
),
125 [psr64_user_mask
] "i"(PSR64_USER_MASK
),
126 [not_psr64_user_mask
] "i"(~PSR64_USER_MASK
)
129 * Make writes to ts64->cpsr visible first, since it's useful as a
130 * canary to detect thread-state corruption.
132 __builtin_arm_dmb(DMB_ST
);
134 set_saved_state_cpsr(saved_state
,
135 (get_saved_state_cpsr(saved_state
) & ~PSR64_USER_MASK
) | (ts64
->cpsr
& PSR64_USER_MASK
));
136 #endif /* __has_feature(ptrauth_calls) */
137 set_saved_state_fp(saved_state
, ts64
->fp
);
138 set_saved_state_lr(saved_state
, ts64
->lr
);
139 set_saved_state_sp(saved_state
, ts64
->sp
);
140 set_saved_state_pc(saved_state
, ts64
->pc
);
141 for (i
= 0; i
< 29; i
++) {
142 set_saved_state_reg(saved_state
, i
, ts64
->x
[i
]);
145 #if __has_feature(ptrauth_calls)
146 ml_set_interrupts_enabled(intr
);
147 #endif /* __has_feature(ptrauth_calls) */
150 #endif /* __arm64__ */
153 handle_get_arm32_thread_state(thread_state_t tstate
,
154 mach_msg_type_number_t
* count
,
155 const arm_saved_state_t
* saved_state
)
157 if (*count
< ARM_THREAD_STATE32_COUNT
) {
158 return KERN_INVALID_ARGUMENT
;
160 if (!is_saved_state32(saved_state
)) {
161 return KERN_INVALID_ARGUMENT
;
164 (void)saved_state_to_thread_state32(saved_state
, (arm_thread_state32_t
*)tstate
);
165 *count
= ARM_THREAD_STATE32_COUNT
;
170 handle_get_arm64_thread_state(thread_state_t tstate
,
171 mach_msg_type_number_t
* count
,
172 const arm_saved_state_t
* saved_state
)
174 if (*count
< ARM_THREAD_STATE64_COUNT
) {
175 return KERN_INVALID_ARGUMENT
;
177 if (!is_saved_state64(saved_state
)) {
178 return KERN_INVALID_ARGUMENT
;
181 (void)saved_state_to_thread_state64(saved_state
, (arm_thread_state64_t
*)tstate
);
182 *count
= ARM_THREAD_STATE64_COUNT
;
188 handle_get_arm_thread_state(thread_state_t tstate
,
189 mach_msg_type_number_t
* count
,
190 const arm_saved_state_t
* saved_state
)
192 /* In an arm64 world, this flavor can be used to retrieve the thread
193 * state of a 32-bit or 64-bit thread into a unified structure, but we
194 * need to support legacy clients who are only aware of 32-bit, so
195 * check the count to see what the client is expecting.
197 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
198 return handle_get_arm32_thread_state(tstate
, count
, saved_state
);
201 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*) tstate
;
202 bzero(unified_state
, sizeof(*unified_state
));
204 if (is_saved_state64(saved_state
)) {
205 unified_state
->ash
.flavor
= ARM_THREAD_STATE64
;
206 unified_state
->ash
.count
= ARM_THREAD_STATE64_COUNT
;
207 (void)saved_state_to_thread_state64(saved_state
, thread_state64(unified_state
));
211 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
212 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
213 (void)saved_state_to_thread_state32(saved_state
, thread_state32(unified_state
));
215 *count
= ARM_UNIFIED_THREAD_STATE_COUNT
;
221 handle_set_arm32_thread_state(const thread_state_t tstate
,
222 mach_msg_type_number_t count
,
223 arm_saved_state_t
* saved_state
)
225 if (count
!= ARM_THREAD_STATE32_COUNT
) {
226 return KERN_INVALID_ARGUMENT
;
229 (void)thread_state32_to_saved_state((const arm_thread_state32_t
*)tstate
, saved_state
);
234 handle_set_arm64_thread_state(const thread_state_t tstate
,
235 mach_msg_type_number_t count
,
236 arm_saved_state_t
* saved_state
)
238 if (count
!= ARM_THREAD_STATE64_COUNT
) {
239 return KERN_INVALID_ARGUMENT
;
242 (void)thread_state64_to_saved_state((const arm_thread_state64_t
*)tstate
, saved_state
);
248 handle_set_arm_thread_state(const thread_state_t tstate
,
249 mach_msg_type_number_t count
,
250 arm_saved_state_t
* saved_state
)
252 /* In an arm64 world, this flavor can be used to set the thread state of a
253 * 32-bit or 64-bit thread from a unified structure, but we need to support
254 * legacy clients who are only aware of 32-bit, so check the count to see
255 * what the client is expecting.
257 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
258 if (!is_saved_state32(saved_state
)) {
259 return KERN_INVALID_ARGUMENT
;
261 return handle_set_arm32_thread_state(tstate
, count
, saved_state
);
264 const arm_unified_thread_state_t
*unified_state
= (const arm_unified_thread_state_t
*) tstate
;
266 if (is_thread_state64(unified_state
)) {
267 if (!is_saved_state64(saved_state
)) {
268 return KERN_INVALID_ARGUMENT
;
270 (void)thread_state64_to_saved_state(const_thread_state64(unified_state
), saved_state
);
274 if (!is_saved_state32(saved_state
)) {
275 return KERN_INVALID_ARGUMENT
;
277 (void)thread_state32_to_saved_state(const_thread_state32(unified_state
), saved_state
);
285 * Translate thread state arguments to userspace representation
289 machine_thread_state_convert_to_user(
291 thread_flavor_t flavor
,
292 thread_state_t tstate
,
293 mach_msg_type_number_t
*count
)
295 #if __has_feature(ptrauth_calls)
296 arm_thread_state64_t
*ts64
;
299 case ARM_THREAD_STATE
:
301 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
303 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
306 ts64
= thread_state64(unified_state
);
309 case ARM_THREAD_STATE64
:
311 if (*count
< ARM_THREAD_STATE64_COUNT
) {
314 ts64
= (arm_thread_state64_t
*)tstate
;
321 // Note that kernel threads never have disable_user_jop set
322 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread()) ||
323 thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
)
325 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
331 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
332 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
333 ptrauth_key_return_address
);
334 if (ts64
->lr
!= stripped_lr
) {
335 // Need to allow already-signed lr value to round-trip as is
336 ts64
->flags
|= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
338 // Note that an IB-signed return address that happens to have a 0 signature value
339 // will round-trip correctly even if IA-signed again below (and IA-authd later)
342 if (arm_user_jop_disabled()) {
347 ts64
->pc
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->pc
,
348 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"),
349 thread
->machine
.jop_pid
);
351 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
352 ts64
->lr
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->lr
,
353 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"),
354 thread
->machine
.jop_pid
);
357 ts64
->sp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->sp
,
358 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"),
359 thread
->machine
.jop_pid
);
362 ts64
->fp
= (uintptr_t)pmap_sign_user_ptr((void*)ts64
->fp
,
363 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"),
364 thread
->machine
.jop_pid
);
369 // No conversion to userspace representation on this platform
370 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
372 #endif /* __has_feature(ptrauth_calls) */
376 * Translate thread state arguments from userspace representation
380 machine_thread_state_convert_from_user(
382 thread_flavor_t flavor
,
383 thread_state_t tstate
,
384 mach_msg_type_number_t count
)
386 #if __has_feature(ptrauth_calls)
387 arm_thread_state64_t
*ts64
;
390 case ARM_THREAD_STATE
:
392 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*)tstate
;
394 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
|| !is_thread_state64(unified_state
)) {
397 ts64
= thread_state64(unified_state
);
400 case ARM_THREAD_STATE64
:
402 if (count
!= ARM_THREAD_STATE64_COUNT
) {
405 ts64
= (arm_thread_state64_t
*)tstate
;
412 // Note that kernel threads never have disable_user_jop set
413 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
414 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
)) {
415 ts64
->flags
= __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
418 // A JOP-disabled process must not set thread state on a JOP-enabled process
419 return KERN_PROTECTION_FAILURE
;
422 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
) {
423 if (thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
)
427 // Disallow setting unsigned thread state on JOP-enabled processes.
428 // Ignore flag and treat thread state arguments as signed, ptrauth
429 // poisoning will cause resulting thread state to be invalid
430 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH
;
433 if (ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
) {
434 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
435 uintptr_t stripped_lr
= (uintptr_t)ptrauth_strip((void *)ts64
->lr
,
436 ptrauth_key_return_address
);
437 if (ts64
->lr
== stripped_lr
) {
438 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
439 // treat as IA-signed below (where auth failure may poison the value).
440 ts64
->flags
&= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
;
442 // Note that an IB-signed return address that happens to have a 0 signature value
443 // will also have been IA-signed (without this flag being set) and so will IA-auth
447 if (arm_user_jop_disabled()) {
452 ts64
->pc
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->pc
,
453 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("pc"),
454 thread
->machine
.jop_pid
);
456 if (ts64
->lr
&& !(ts64
->flags
& __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR
)) {
457 ts64
->lr
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->lr
,
458 ptrauth_key_process_independent_code
, ptrauth_string_discriminator("lr"),
459 thread
->machine
.jop_pid
);
462 ts64
->sp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->sp
,
463 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("sp"),
464 thread
->machine
.jop_pid
);
467 ts64
->fp
= (uintptr_t)pmap_auth_user_ptr((void*)ts64
->fp
,
468 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("fp"),
469 thread
->machine
.jop_pid
);
474 // No conversion from userspace representation on this platform
475 (void)thread
; (void)flavor
; (void)tstate
; (void)count
;
477 #endif /* __has_feature(ptrauth_calls) */
481 * Translate signal context data pointer to userspace representation
485 machine_thread_siguctx_pointer_convert_to_user(
489 #if __has_feature(ptrauth_calls)
490 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
491 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
495 if (arm_user_jop_disabled()) {
500 *uctxp
= (uintptr_t)pmap_sign_user_ptr((void*)*uctxp
,
501 ptrauth_key_process_independent_data
, ptrauth_string_discriminator("uctx"),
502 thread
->machine
.jop_pid
);
507 // No conversion to userspace representation on this platform
508 (void)thread
; (void)uctxp
;
510 #endif /* __has_feature(ptrauth_calls) */
514 * Translate array of function pointer syscall arguments from userspace representation
518 machine_thread_function_pointers_convert_from_user(
523 #if __has_feature(ptrauth_calls)
524 if (current_thread()->machine
.disable_user_jop
|| !thread_is_64bit_addr(current_thread())) {
525 assert(thread
->machine
.disable_user_jop
|| !thread_is_64bit_addr(thread
));
529 if (arm_user_jop_disabled()) {
535 *fptrs
= (uintptr_t)pmap_auth_user_ptr((void*)*fptrs
,
536 ptrauth_key_function_pointer
, 0, thread
->machine
.jop_pid
);
543 // No conversion from userspace representation on this platform
544 (void)thread
; (void)fptrs
; (void)count
;
546 #endif /* __has_feature(ptrauth_calls) */
550 * Routine: machine_thread_get_state
554 machine_thread_get_state(thread_t thread
,
555 thread_flavor_t flavor
,
556 thread_state_t tstate
,
557 mach_msg_type_number_t
* count
)
560 case THREAD_STATE_FLAVOR_LIST
:
562 return KERN_INVALID_ARGUMENT
;
565 tstate
[0] = ARM_THREAD_STATE
;
566 tstate
[1] = ARM_VFP_STATE
;
567 tstate
[2] = ARM_EXCEPTION_STATE
;
568 tstate
[3] = ARM_DEBUG_STATE
;
572 case THREAD_STATE_FLAVOR_LIST_NEW
:
574 return KERN_INVALID_ARGUMENT
;
577 tstate
[0] = ARM_THREAD_STATE
;
578 tstate
[1] = ARM_VFP_STATE
;
579 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
580 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
584 case THREAD_STATE_FLAVOR_LIST_10_15
:
586 return KERN_INVALID_ARGUMENT
;
589 tstate
[0] = ARM_THREAD_STATE
;
590 tstate
[1] = ARM_VFP_STATE
;
591 tstate
[2] = thread_is_64bit_data(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
592 tstate
[3] = thread_is_64bit_data(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
593 tstate
[4] = ARM_PAGEIN_STATE
;
597 case ARM_THREAD_STATE
:
599 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
605 case ARM_THREAD_STATE32
:
607 if (thread_is_64bit_data(thread
)) {
608 return KERN_INVALID_ARGUMENT
;
611 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
618 case ARM_THREAD_STATE64
:
620 if (!thread_is_64bit_data(thread
)) {
621 return KERN_INVALID_ARGUMENT
;
624 const arm_saved_state_t
*current_state
= thread
->machine
.upcb
;
626 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
,
635 case ARM_EXCEPTION_STATE
:{
636 struct arm_exception_state
*state
;
637 struct arm_saved_state32
*saved_state
;
639 if (*count
< ARM_EXCEPTION_STATE_COUNT
) {
640 return KERN_INVALID_ARGUMENT
;
642 if (thread_is_64bit_data(thread
)) {
643 return KERN_INVALID_ARGUMENT
;
646 state
= (struct arm_exception_state
*) tstate
;
647 saved_state
= saved_state32(thread
->machine
.upcb
);
649 state
->exception
= saved_state
->exception
;
650 state
->fsr
= saved_state
->esr
;
651 state
->far
= saved_state
->far
;
653 *count
= ARM_EXCEPTION_STATE_COUNT
;
656 case ARM_EXCEPTION_STATE64
:{
657 struct arm_exception_state64
*state
;
658 struct arm_saved_state64
*saved_state
;
660 if (*count
< ARM_EXCEPTION_STATE64_COUNT
) {
661 return KERN_INVALID_ARGUMENT
;
663 if (!thread_is_64bit_data(thread
)) {
664 return KERN_INVALID_ARGUMENT
;
667 state
= (struct arm_exception_state64
*) tstate
;
668 saved_state
= saved_state64(thread
->machine
.upcb
);
670 state
->exception
= saved_state
->exception
;
671 state
->far
= saved_state
->far
;
672 state
->esr
= saved_state
->esr
;
674 *count
= ARM_EXCEPTION_STATE64_COUNT
;
677 case ARM_DEBUG_STATE
:{
678 arm_legacy_debug_state_t
*state
;
679 arm_debug_state32_t
*thread_state
;
681 if (*count
< ARM_LEGACY_DEBUG_STATE_COUNT
) {
682 return KERN_INVALID_ARGUMENT
;
685 if (thread_is_64bit_data(thread
)) {
686 return KERN_INVALID_ARGUMENT
;
689 state
= (arm_legacy_debug_state_t
*) tstate
;
690 thread_state
= find_debug_state32(thread
);
692 if (thread_state
== NULL
) {
693 bzero(state
, sizeof(arm_legacy_debug_state_t
));
695 bcopy(thread_state
, state
, sizeof(arm_legacy_debug_state_t
));
698 *count
= ARM_LEGACY_DEBUG_STATE_COUNT
;
701 case ARM_DEBUG_STATE32
:{
702 arm_debug_state32_t
*state
;
703 arm_debug_state32_t
*thread_state
;
705 if (*count
< ARM_DEBUG_STATE32_COUNT
) {
706 return KERN_INVALID_ARGUMENT
;
709 if (thread_is_64bit_data(thread
)) {
710 return KERN_INVALID_ARGUMENT
;
713 state
= (arm_debug_state32_t
*) tstate
;
714 thread_state
= find_debug_state32(thread
);
716 if (thread_state
== NULL
) {
717 bzero(state
, sizeof(arm_debug_state32_t
));
719 bcopy(thread_state
, state
, sizeof(arm_debug_state32_t
));
722 *count
= ARM_DEBUG_STATE32_COUNT
;
726 case ARM_DEBUG_STATE64
:{
727 arm_debug_state64_t
*state
;
728 arm_debug_state64_t
*thread_state
;
730 if (*count
< ARM_DEBUG_STATE64_COUNT
) {
731 return KERN_INVALID_ARGUMENT
;
734 if (!thread_is_64bit_data(thread
)) {
735 return KERN_INVALID_ARGUMENT
;
738 state
= (arm_debug_state64_t
*) tstate
;
739 thread_state
= find_debug_state64(thread
);
741 if (thread_state
== NULL
) {
742 bzero(state
, sizeof(arm_debug_state64_t
));
744 bcopy(thread_state
, state
, sizeof(arm_debug_state64_t
));
747 *count
= ARM_DEBUG_STATE64_COUNT
;
752 struct arm_vfp_state
*state
;
753 arm_neon_saved_state32_t
*thread_state
;
756 if (*count
< ARM_VFP_STATE_COUNT
) {
757 if (*count
< ARM_VFPV2_STATE_COUNT
) {
758 return KERN_INVALID_ARGUMENT
;
760 *count
= ARM_VFPV2_STATE_COUNT
;
764 if (*count
== ARM_VFPV2_STATE_COUNT
) {
770 state
= (struct arm_vfp_state
*) tstate
;
771 thread_state
= neon_state32(thread
->machine
.uNeon
);
772 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
774 bcopy(thread_state
, state
, (max
+ 1) * sizeof(uint32_t));
778 case ARM_NEON_STATE
:{
779 arm_neon_state_t
*state
;
780 arm_neon_saved_state32_t
*thread_state
;
782 if (*count
< ARM_NEON_STATE_COUNT
) {
783 return KERN_INVALID_ARGUMENT
;
786 if (thread_is_64bit_data(thread
)) {
787 return KERN_INVALID_ARGUMENT
;
790 state
= (arm_neon_state_t
*)tstate
;
791 thread_state
= neon_state32(thread
->machine
.uNeon
);
793 assert(sizeof(*thread_state
) == sizeof(*state
));
794 bcopy(thread_state
, state
, sizeof(arm_neon_state_t
));
796 *count
= ARM_NEON_STATE_COUNT
;
800 case ARM_NEON_STATE64
:{
801 arm_neon_state64_t
*state
;
802 arm_neon_saved_state64_t
*thread_state
;
804 if (*count
< ARM_NEON_STATE64_COUNT
) {
805 return KERN_INVALID_ARGUMENT
;
808 if (!thread_is_64bit_data(thread
)) {
809 return KERN_INVALID_ARGUMENT
;
812 state
= (arm_neon_state64_t
*)tstate
;
813 thread_state
= neon_state64(thread
->machine
.uNeon
);
815 /* For now, these are identical */
816 assert(sizeof(*state
) == sizeof(*thread_state
));
817 bcopy(thread_state
, state
, sizeof(arm_neon_state64_t
));
820 *count
= ARM_NEON_STATE64_COUNT
;
825 case ARM_PAGEIN_STATE
: {
826 arm_pagein_state_t
*state
;
828 if (*count
< ARM_PAGEIN_STATE_COUNT
) {
829 return KERN_INVALID_ARGUMENT
;
832 state
= (arm_pagein_state_t
*)tstate
;
833 state
->__pagein_error
= thread
->t_pagein_error
;
835 *count
= ARM_PAGEIN_STATE_COUNT
;
841 return KERN_INVALID_ARGUMENT
;
848 * Routine: machine_thread_get_kern_state
852 machine_thread_get_kern_state(thread_t thread
,
853 thread_flavor_t flavor
,
854 thread_state_t tstate
,
855 mach_msg_type_number_t
* count
)
858 * This works only for an interrupted kernel thread
860 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
865 case ARM_THREAD_STATE
:
867 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
873 case ARM_THREAD_STATE32
:
875 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
882 case ARM_THREAD_STATE64
:
884 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
892 return KERN_INVALID_ARGUMENT
;
898 machine_thread_switch_addrmode(thread_t thread
)
900 if (task_has_64Bit_data(thread
->task
)) {
901 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
902 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
903 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
904 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
907 * Reinitialize the NEON state.
909 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
910 thread
->machine
.uNeon
->ns_64
.fpcr
= FPCR_DEFAULT
;
912 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
913 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
914 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
915 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
918 * Reinitialize the NEON state.
920 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
921 thread
->machine
.uNeon
->ns_32
.fpcr
= FPCR_DEFAULT_32
;
925 extern long long arm_debug_get(void);
928 * Routine: machine_thread_set_state
932 machine_thread_set_state(thread_t thread
,
933 thread_flavor_t flavor
,
934 thread_state_t tstate
,
935 mach_msg_type_number_t count
)
940 case ARM_THREAD_STATE
:
941 rn
= handle_set_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
947 case ARM_THREAD_STATE32
:
948 if (thread_is_64bit_data(thread
)) {
949 return KERN_INVALID_ARGUMENT
;
952 rn
= handle_set_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
959 case ARM_THREAD_STATE64
:
960 if (!thread_is_64bit_data(thread
)) {
961 return KERN_INVALID_ARGUMENT
;
965 rn
= handle_set_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
971 case ARM_EXCEPTION_STATE
:{
972 if (count
!= ARM_EXCEPTION_STATE_COUNT
) {
973 return KERN_INVALID_ARGUMENT
;
975 if (thread_is_64bit_data(thread
)) {
976 return KERN_INVALID_ARGUMENT
;
981 case ARM_EXCEPTION_STATE64
:{
982 if (count
!= ARM_EXCEPTION_STATE64_COUNT
) {
983 return KERN_INVALID_ARGUMENT
;
985 if (!thread_is_64bit_data(thread
)) {
986 return KERN_INVALID_ARGUMENT
;
991 case ARM_DEBUG_STATE
:
993 arm_legacy_debug_state_t
*state
;
994 boolean_t enabled
= FALSE
;
997 if (count
!= ARM_LEGACY_DEBUG_STATE_COUNT
) {
998 return KERN_INVALID_ARGUMENT
;
1000 if (thread_is_64bit_data(thread
)) {
1001 return KERN_INVALID_ARGUMENT
;
1004 state
= (arm_legacy_debug_state_t
*) tstate
;
1006 for (i
= 0; i
< 16; i
++) {
1007 /* do not allow context IDs to be set */
1008 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1009 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1010 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1011 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1012 return KERN_PROTECTION_FAILURE
;
1014 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1015 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1021 free_debug_state(thread
);
1023 arm_debug_state32_t
*thread_state
= find_or_allocate_debug_state32(thread
);
1025 if (thread_state
== NULL
) {
1026 return KERN_FAILURE
;
1029 for (i
= 0; i
< 16; i
++) {
1030 /* set appropriate privilege; mask out unknown bits */
1031 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1032 | ARM_DBGBCR_MATCH_MASK
1033 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1034 | ARM_DBG_CR_ENABLE_MASK
))
1035 | ARM_DBGBCR_TYPE_IVA
1036 | ARM_DBG_CR_LINKED_UNLINKED
1037 | ARM_DBG_CR_SECURITY_STATE_BOTH
1038 | ARM_DBG_CR_MODE_CONTROL_USER
;
1039 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1040 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1041 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1042 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1043 | ARM_DBG_CR_ENABLE_MASK
))
1044 | ARM_DBG_CR_LINKED_UNLINKED
1045 | ARM_DBG_CR_SECURITY_STATE_BOTH
1046 | ARM_DBG_CR_MODE_CONTROL_USER
;
1047 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1050 thread_state
->mdscr_el1
= 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1053 if (thread
== current_thread()) {
1054 arm_debug_set32(thread
->machine
.DebugData
);
1059 case ARM_DEBUG_STATE32
:
1060 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1062 arm_debug_state32_t
*state
;
1063 boolean_t enabled
= FALSE
;
1066 if (count
!= ARM_DEBUG_STATE32_COUNT
) {
1067 return KERN_INVALID_ARGUMENT
;
1069 if (thread_is_64bit_data(thread
)) {
1070 return KERN_INVALID_ARGUMENT
;
1073 state
= (arm_debug_state32_t
*) tstate
;
1075 if (state
->mdscr_el1
& MDSCR_SS
) {
1079 for (i
= 0; i
< 16; i
++) {
1080 /* do not allow context IDs to be set */
1081 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1082 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1083 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1084 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1085 return KERN_PROTECTION_FAILURE
;
1087 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1088 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1094 free_debug_state(thread
);
1096 arm_debug_state32_t
* thread_state
= find_or_allocate_debug_state32(thread
);
1098 if (thread_state
== NULL
) {
1099 return KERN_FAILURE
;
1102 if (state
->mdscr_el1
& MDSCR_SS
) {
1103 thread_state
->mdscr_el1
|= MDSCR_SS
;
1105 thread_state
->mdscr_el1
&= ~MDSCR_SS
;
1108 for (i
= 0; i
< 16; i
++) {
1109 /* set appropriate privilege; mask out unknown bits */
1110 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1111 | ARM_DBGBCR_MATCH_MASK
1112 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1113 | ARM_DBG_CR_ENABLE_MASK
))
1114 | ARM_DBGBCR_TYPE_IVA
1115 | ARM_DBG_CR_LINKED_UNLINKED
1116 | ARM_DBG_CR_SECURITY_STATE_BOTH
1117 | ARM_DBG_CR_MODE_CONTROL_USER
;
1118 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1119 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1120 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1121 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1122 | ARM_DBG_CR_ENABLE_MASK
))
1123 | ARM_DBG_CR_LINKED_UNLINKED
1124 | ARM_DBG_CR_SECURITY_STATE_BOTH
1125 | ARM_DBG_CR_MODE_CONTROL_USER
;
1126 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
1130 if (thread
== current_thread()) {
1131 arm_debug_set32(thread
->machine
.DebugData
);
1137 case ARM_DEBUG_STATE64
:
1139 arm_debug_state64_t
*state
;
1140 boolean_t enabled
= FALSE
;
1143 if (count
!= ARM_DEBUG_STATE64_COUNT
) {
1144 return KERN_INVALID_ARGUMENT
;
1146 if (!thread_is_64bit_data(thread
)) {
1147 return KERN_INVALID_ARGUMENT
;
1150 state
= (arm_debug_state64_t
*) tstate
;
1152 if (state
->mdscr_el1
& MDSCR_SS
) {
1156 for (i
= 0; i
< 16; i
++) {
1157 /* do not allow context IDs to be set */
1158 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
1159 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
1160 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
1161 return KERN_PROTECTION_FAILURE
;
1163 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
1164 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
1170 free_debug_state(thread
);
1172 arm_debug_state64_t
*thread_state
= find_or_allocate_debug_state64(thread
);
1174 if (thread_state
== NULL
) {
1175 return KERN_FAILURE
;
1178 if (state
->mdscr_el1
& MDSCR_SS
) {
1179 thread_state
->mdscr_el1
|= MDSCR_SS
;
1181 thread_state
->mdscr_el1
&= ~MDSCR_SS
;
1184 for (i
= 0; i
< 16; i
++) {
1185 /* set appropriate privilege; mask out unknown bits */
1186 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1187 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1188 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1189 | ARM_DBG_CR_ENABLE_MASK
))
1190 | ARM_DBGBCR_TYPE_IVA
1191 | ARM_DBG_CR_LINKED_UNLINKED
1192 | ARM_DBG_CR_SECURITY_STATE_BOTH
1193 | ARM_DBG_CR_MODE_CONTROL_USER
;
1194 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1195 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1196 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1197 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1198 | ARM_DBG_CR_ENABLE_MASK
))
1199 | ARM_DBG_CR_LINKED_UNLINKED
1200 | ARM_DBG_CR_SECURITY_STATE_BOTH
1201 | ARM_DBG_CR_MODE_CONTROL_USER
;
1202 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
1206 if (thread
== current_thread()) {
1207 arm_debug_set64(thread
->machine
.DebugData
);
1213 case ARM_VFP_STATE
:{
1214 struct arm_vfp_state
*state
;
1215 arm_neon_saved_state32_t
*thread_state
;
1218 if (count
!= ARM_VFP_STATE_COUNT
&& count
!= ARM_VFPV2_STATE_COUNT
) {
1219 return KERN_INVALID_ARGUMENT
;
1222 if (count
== ARM_VFPV2_STATE_COUNT
) {
1228 state
= (struct arm_vfp_state
*) tstate
;
1229 thread_state
= neon_state32(thread
->machine
.uNeon
);
1230 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1232 bcopy(state
, thread_state
, (max
+ 1) * sizeof(uint32_t));
1234 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1235 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1239 case ARM_NEON_STATE
:{
1240 arm_neon_state_t
*state
;
1241 arm_neon_saved_state32_t
*thread_state
;
1243 if (count
!= ARM_NEON_STATE_COUNT
) {
1244 return KERN_INVALID_ARGUMENT
;
1247 if (thread_is_64bit_data(thread
)) {
1248 return KERN_INVALID_ARGUMENT
;
1251 state
= (arm_neon_state_t
*)tstate
;
1252 thread_state
= neon_state32(thread
->machine
.uNeon
);
1254 assert(sizeof(*state
) == sizeof(*thread_state
));
1255 bcopy(state
, thread_state
, sizeof(arm_neon_state_t
));
1257 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
1258 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
1262 case ARM_NEON_STATE64
:{
1263 arm_neon_state64_t
*state
;
1264 arm_neon_saved_state64_t
*thread_state
;
1266 if (count
!= ARM_NEON_STATE64_COUNT
) {
1267 return KERN_INVALID_ARGUMENT
;
1270 if (!thread_is_64bit_data(thread
)) {
1271 return KERN_INVALID_ARGUMENT
;
1274 state
= (arm_neon_state64_t
*)tstate
;
1275 thread_state
= neon_state64(thread
->machine
.uNeon
);
1277 assert(sizeof(*state
) == sizeof(*thread_state
));
1278 bcopy(state
, thread_state
, sizeof(arm_neon_state64_t
));
1281 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
1282 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
1288 return KERN_INVALID_ARGUMENT
;
1290 return KERN_SUCCESS
;
1294 machine_thread_pc(thread_t thread
)
1296 struct arm_saved_state
*ss
= get_user_regs(thread
);
1297 return (mach_vm_address_t
)get_saved_state_pc(ss
);
1301 machine_thread_reset_pc(thread_t thread
, mach_vm_address_t pc
)
1303 set_saved_state_pc(get_user_regs(thread
), (register_t
)pc
);
1307 * Routine: machine_thread_state_initialize
1311 machine_thread_state_initialize(thread_t thread
)
1313 arm_context_t
*context
= thread
->machine
.contextData
;
1316 * Should always be set up later. For a kernel thread, we don't care
1317 * about this state. For a user thread, we'll set the state up in
1318 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1321 if (context
!= NULL
) {
1322 bzero(&context
->ss
.uss
, sizeof(context
->ss
.uss
));
1323 bzero(&context
->ns
.uns
, sizeof(context
->ns
.uns
));
1325 if (context
->ns
.nsh
.flavor
== ARM_NEON_SAVED_STATE64
) {
1326 context
->ns
.ns_64
.fpcr
= FPCR_DEFAULT
;
1328 context
->ns
.ns_32
.fpcr
= FPCR_DEFAULT_32
;
1332 thread
->machine
.DebugData
= NULL
;
1334 #if defined(HAS_APPLE_PAC)
1335 /* Sign the initial user-space thread state */
1336 if (thread
->machine
.upcb
!= NULL
) {
1337 boolean_t intr
= ml_set_interrupts_enabled(FALSE
);
1338 ml_sign_thread_state(thread
->machine
.upcb
, 0, 0, 0, 0, 0);
1339 ml_set_interrupts_enabled(intr
);
1341 #endif /* defined(HAS_APPLE_PAC) */
1343 return KERN_SUCCESS
;
1347 * Routine: machine_thread_dup
1351 machine_thread_dup(thread_t self
,
1353 __unused boolean_t is_corpse
)
1355 struct arm_saved_state
*self_saved_state
;
1356 struct arm_saved_state
*target_saved_state
;
1358 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
1360 self_saved_state
= self
->machine
.upcb
;
1361 target_saved_state
= target
->machine
.upcb
;
1362 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
1363 #if defined(HAS_APPLE_PAC)
1364 if (!is_corpse
&& is_saved_state64(self_saved_state
)) {
1365 check_and_sign_copied_thread_state(target_saved_state
, self_saved_state
);
1367 #endif /* defined(HAS_APPLE_PAC) */
1369 return KERN_SUCCESS
;
1373 * Routine: get_user_regs
1376 struct arm_saved_state
*
1377 get_user_regs(thread_t thread
)
1379 return thread
->machine
.upcb
;
1382 arm_neon_saved_state_t
*
1383 get_user_neon_regs(thread_t thread
)
1385 return thread
->machine
.uNeon
;
1389 * Routine: find_user_regs
1392 struct arm_saved_state
*
1393 find_user_regs(thread_t thread
)
1395 return thread
->machine
.upcb
;
1399 * Routine: find_kern_regs
1402 struct arm_saved_state
*
1403 find_kern_regs(thread_t thread
)
1406 * This works only for an interrupted kernel thread
1408 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
1409 return (struct arm_saved_state
*) NULL
;
1411 return getCpuDatap()->cpu_int_state
;
1415 arm_debug_state32_t
*
1416 find_debug_state32(thread_t thread
)
1418 if (thread
&& thread
->machine
.DebugData
) {
1419 return &(thread
->machine
.DebugData
->uds
.ds32
);
1425 arm_debug_state64_t
*
1426 find_debug_state64(thread_t thread
)
1428 if (thread
&& thread
->machine
.DebugData
) {
1429 return &(thread
->machine
.DebugData
->uds
.ds64
);
1436 * Finds the debug state for the given 64 bit thread, allocating one if it
1439 * @param thread 64 bit thread to find or allocate debug state for
1441 * @returns A pointer to the given thread's 64 bit debug state or a null
1442 * pointer if the given thread is null or the allocation of a new
1443 * debug state fails.
1445 arm_debug_state64_t
*
1446 find_or_allocate_debug_state64(thread_t thread
)
1448 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
1449 if (thread
!= NULL
&& thread_state
== NULL
) {
1450 thread
->machine
.DebugData
= zalloc(ads_zone
);
1451 if (thread
->machine
.DebugData
!= NULL
) {
1452 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1453 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE64
;
1454 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE64_COUNT
;
1455 thread_state
= find_debug_state64(thread
);
1458 return thread_state
;
1462 * Finds the debug state for the given 32 bit thread, allocating one if it
1465 * @param thread 32 bit thread to find or allocate debug state for
1467 * @returns A pointer to the given thread's 32 bit debug state or a null
1468 * pointer if the given thread is null or the allocation of a new
1469 * debug state fails.
1471 arm_debug_state32_t
*
1472 find_or_allocate_debug_state32(thread_t thread
)
1474 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
1475 if (thread
!= NULL
&& thread_state
== NULL
) {
1476 thread
->machine
.DebugData
= zalloc(ads_zone
);
1477 if (thread
->machine
.DebugData
!= NULL
) {
1478 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
1479 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
1480 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
1481 thread_state
= find_debug_state32(thread
);
1484 return thread_state
;
1488 * Frees a thread's debug state if allocated. Otherwise does nothing.
1490 * @param thread thread to free the debug state of
1493 free_debug_state(thread_t thread
)
1495 if (thread
!= NULL
&& thread
->machine
.DebugData
!= NULL
) {
1496 void *pTmp
= thread
->machine
.DebugData
;
1497 thread
->machine
.DebugData
= NULL
;
1498 zfree(ads_zone
, pTmp
);
1503 * Routine: thread_userstack
1507 thread_userstack(__unused thread_t thread
,
1509 thread_state_t tstate
,
1511 mach_vm_offset_t
* user_stack
,
1513 boolean_t is_64bit_data
1519 case ARM_THREAD_STATE
:
1520 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
1522 if (is_64bit_data
) {
1523 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_64
.sp
;
1527 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_32
.sp
;
1533 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1535 case ARM_THREAD_STATE32
:
1536 if (count
!= ARM_THREAD_STATE32_COUNT
) {
1537 return KERN_INVALID_ARGUMENT
;
1539 if (is_64bit_data
) {
1540 return KERN_INVALID_ARGUMENT
;
1543 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1546 case ARM_THREAD_STATE64
:
1547 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1548 return KERN_INVALID_ARGUMENT
;
1550 if (!is_64bit_data
) {
1551 return KERN_INVALID_ARGUMENT
;
1554 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1558 return KERN_INVALID_ARGUMENT
;
1562 *user_stack
= CAST_USER_ADDR_T(sp
);
1567 *user_stack
= CAST_USER_ADDR_T(USRSTACK64
);
1573 return KERN_SUCCESS
;
1577 * thread_userstackdefault:
1579 * Return the default stack location for the
1580 * thread, if otherwise unknown.
1583 thread_userstackdefault(mach_vm_offset_t
* default_user_stack
,
1587 *default_user_stack
= USRSTACK64
;
1589 *default_user_stack
= USRSTACK
;
1592 return KERN_SUCCESS
;
1596 * Routine: thread_setuserstack
1600 thread_setuserstack(thread_t thread
,
1601 mach_vm_address_t user_stack
)
1603 struct arm_saved_state
*sv
;
1605 sv
= get_user_regs(thread
);
1607 set_saved_state_sp(sv
, user_stack
);
1613 * Routine: thread_adjuserstack
1617 thread_adjuserstack(thread_t thread
,
1620 struct arm_saved_state
*sv
;
1623 sv
= get_user_regs(thread
);
1625 sp
= get_saved_state_sp(sv
);
1627 set_saved_state_sp(sv
, sp
);;
1634 * Routine: thread_setentrypoint
1638 thread_setentrypoint(thread_t thread
,
1639 mach_vm_offset_t entry
)
1641 struct arm_saved_state
*sv
;
1643 sv
= get_user_regs(thread
);
1645 set_saved_state_pc(sv
, entry
);
1651 * Routine: thread_entrypoint
1655 thread_entrypoint(__unused thread_t thread
,
1657 thread_state_t tstate
,
1659 mach_vm_offset_t
* entry_point
1663 case ARM_THREAD_STATE
:
1665 struct arm_thread_state
*state
;
1667 if (count
!= ARM_THREAD_STATE_COUNT
) {
1668 return KERN_INVALID_ARGUMENT
;
1671 state
= (struct arm_thread_state
*) tstate
;
1674 * If a valid entry point is specified, use it.
1677 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1679 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1684 case ARM_THREAD_STATE64
:
1686 struct arm_thread_state64
*state
;
1688 if (count
!= ARM_THREAD_STATE64_COUNT
) {
1689 return KERN_INVALID_ARGUMENT
;
1692 state
= (struct arm_thread_state64
*) tstate
;
1695 * If a valid entry point is specified, use it.
1698 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1700 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1706 return KERN_INVALID_ARGUMENT
;
1709 return KERN_SUCCESS
;
1714 * Routine: thread_set_child
1718 thread_set_child(thread_t child
,
1721 struct arm_saved_state
*child_state
;
1723 child_state
= get_user_regs(child
);
1725 set_saved_state_reg(child_state
, 0, pid
);
1726 set_saved_state_reg(child_state
, 1, 1ULL);
1731 * Routine: thread_set_parent
1735 thread_set_parent(thread_t parent
,
1738 struct arm_saved_state
*parent_state
;
1740 parent_state
= get_user_regs(parent
);
1742 set_saved_state_reg(parent_state
, 0, pid
);
1743 set_saved_state_reg(parent_state
, 1, 0);
1747 struct arm_act_context
{
1748 struct arm_unified_thread_state ss
;
1750 struct arm_neon_saved_state ns
;
1755 * Routine: act_thread_csave
1759 act_thread_csave(void)
1761 struct arm_act_context
*ic
;
1764 thread_t thread
= current_thread();
1766 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
1767 if (ic
== (struct arm_act_context
*) NULL
) {
1771 val
= ARM_UNIFIED_THREAD_STATE_COUNT
;
1772 kret
= machine_thread_get_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, &val
);
1773 if (kret
!= KERN_SUCCESS
) {
1774 kfree(ic
, sizeof(struct arm_act_context
));
1779 if (thread_is_64bit_data(thread
)) {
1780 val
= ARM_NEON_STATE64_COUNT
;
1781 kret
= machine_thread_get_state(thread
,
1783 (thread_state_t
)&ic
->ns
,
1786 val
= ARM_NEON_STATE_COUNT
;
1787 kret
= machine_thread_get_state(thread
,
1789 (thread_state_t
)&ic
->ns
,
1792 if (kret
!= KERN_SUCCESS
) {
1793 kfree(ic
, sizeof(struct arm_act_context
));
1801 * Routine: act_thread_catt
1805 act_thread_catt(void * ctx
)
1807 struct arm_act_context
*ic
;
1809 thread_t thread
= current_thread();
1811 ic
= (struct arm_act_context
*) ctx
;
1812 if (ic
== (struct arm_act_context
*) NULL
) {
1816 kret
= machine_thread_set_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, ARM_UNIFIED_THREAD_STATE_COUNT
);
1817 if (kret
!= KERN_SUCCESS
) {
1822 if (thread_is_64bit_data(thread
)) {
1823 kret
= machine_thread_set_state(thread
,
1825 (thread_state_t
)&ic
->ns
,
1826 ARM_NEON_STATE64_COUNT
);
1828 kret
= machine_thread_set_state(thread
,
1830 (thread_state_t
)&ic
->ns
,
1831 ARM_NEON_STATE_COUNT
);
1833 if (kret
!= KERN_SUCCESS
) {
1838 kfree(ic
, sizeof(struct arm_act_context
));
1842 * Routine: act_thread_catt
1846 act_thread_cfree(void *ctx
)
1848 kfree(ctx
, sizeof(struct arm_act_context
));
1852 thread_set_wq_state32(thread_t thread
,
1853 thread_state_t tstate
)
1855 arm_thread_state_t
*state
;
1856 struct arm_saved_state
*saved_state
;
1857 struct arm_saved_state32
*saved_state_32
;
1858 thread_t curth
= current_thread();
1861 assert(!thread_is_64bit_data(thread
));
1863 saved_state
= thread
->machine
.upcb
;
1864 saved_state_32
= saved_state32(saved_state
);
1866 state
= (arm_thread_state_t
*)tstate
;
1868 if (curth
!= thread
) {
1870 thread_lock(thread
);
1874 * do not zero saved_state, it can be concurrently accessed
1875 * and zero is not a valid state for some of the registers,
1878 thread_state32_to_saved_state(state
, saved_state
);
1879 saved_state_32
->cpsr
= PSR64_USER32_DEFAULT
;
1881 if (curth
!= thread
) {
1882 thread_unlock(thread
);
1886 return KERN_SUCCESS
;
1890 thread_set_wq_state64(thread_t thread
,
1891 thread_state_t tstate
)
1893 arm_thread_state64_t
*state
;
1894 struct arm_saved_state
*saved_state
;
1895 struct arm_saved_state64
*saved_state_64
;
1896 thread_t curth
= current_thread();
1899 assert(thread_is_64bit_data(thread
));
1901 saved_state
= thread
->machine
.upcb
;
1902 saved_state_64
= saved_state64(saved_state
);
1903 state
= (arm_thread_state64_t
*)tstate
;
1905 if (curth
!= thread
) {
1907 thread_lock(thread
);
1911 * do not zero saved_state, it can be concurrently accessed
1912 * and zero is not a valid state for some of the registers,
1915 thread_state64_to_saved_state(state
, saved_state
);
1916 set_saved_state_cpsr(saved_state
, PSR64_USER64_DEFAULT
);
1918 if (curth
!= thread
) {
1919 thread_unlock(thread
);
1923 return KERN_SUCCESS
;