2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
38 struct arm_vfpv2_state
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
53 void thread_set_child(thread_t child
, int pid
);
54 void thread_set_parent(thread_t parent
, int pid
);
57 * Maps state flavor to number of words in the state:
59 /* __private_extern__ */
60 unsigned int _MachineStateCount
[] = {
62 ARM_UNIFIED_THREAD_STATE_COUNT
,
64 ARM_EXCEPTION_STATE_COUNT
,
65 ARM_DEBUG_STATE_COUNT
,
66 /* THREAD_STATE_NONE (legacy) */ 0,
67 ARM_THREAD_STATE64_COUNT
,
68 ARM_EXCEPTION_STATE64_COUNT
,
69 /* THREAD_STATE_LAST (legacy) */ 0,
70 ARM_THREAD_STATE32_COUNT
,
75 ARM_DEBUG_STATE32_COUNT
,
76 ARM_DEBUG_STATE64_COUNT
,
78 ARM_NEON_STATE64_COUNT
,
81 /* ARM_SAVED_STATE32_COUNT */ 0,
82 /* ARM_SAVED_STATE64_COUNT */ 0,
83 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
84 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
87 extern zone_t ads_zone
;
91 * Copy values from saved_state to ts64.
94 saved_state_to_thread_state64(const arm_saved_state_t
*saved_state
, arm_thread_state64_t
*ts64
)
98 assert(is_saved_state64(saved_state
));
100 ts64
->fp
= get_saved_state_fp(saved_state
);
101 ts64
->lr
= get_saved_state_lr(saved_state
);
102 ts64
->sp
= get_saved_state_sp(saved_state
);
103 ts64
->pc
= get_saved_state_pc(saved_state
);
104 ts64
->cpsr
= get_saved_state_cpsr(saved_state
);
105 for (i
= 0; i
< 29; i
++)
106 ts64
->x
[i
] = get_saved_state_reg(saved_state
, i
);
110 * Copy values from ts64 to saved_state
113 thread_state64_to_saved_state(const arm_thread_state64_t
*ts64
, arm_saved_state_t
*saved_state
)
117 assert(is_saved_state64(saved_state
));
119 set_saved_state_fp(saved_state
, ts64
->fp
);
120 set_saved_state_lr(saved_state
, ts64
->lr
);
121 set_saved_state_sp(saved_state
, ts64
->sp
);
122 set_saved_state_pc(saved_state
, ts64
->pc
);
123 set_saved_state_cpsr(saved_state
, (ts64
->cpsr
& ~PSR64_MODE_MASK
) | PSR64_MODE_RW_64
);
124 for (i
= 0; i
< 29; i
++)
125 set_saved_state_reg(saved_state
, i
, ts64
->x
[i
]);
130 handle_get_arm32_thread_state(
131 thread_state_t tstate
,
132 mach_msg_type_number_t
* count
,
133 const arm_saved_state_t
*saved_state
)
135 if (*count
< ARM_THREAD_STATE32_COUNT
)
136 return (KERN_INVALID_ARGUMENT
);
137 if (!is_saved_state32(saved_state
))
138 return (KERN_INVALID_ARGUMENT
);
140 (void)saved_state_to_thread_state32(saved_state
, (arm_thread_state32_t
*)tstate
);
141 *count
= ARM_THREAD_STATE32_COUNT
;
146 handle_get_arm64_thread_state(
147 thread_state_t tstate
,
148 mach_msg_type_number_t
* count
,
149 const arm_saved_state_t
*saved_state
)
151 if (*count
< ARM_THREAD_STATE64_COUNT
)
152 return (KERN_INVALID_ARGUMENT
);
153 if (!is_saved_state64(saved_state
))
154 return (KERN_INVALID_ARGUMENT
);
156 (void)saved_state_to_thread_state64(saved_state
, (arm_thread_state64_t
*)tstate
);
157 *count
= ARM_THREAD_STATE64_COUNT
;
163 handle_get_arm_thread_state(
164 thread_state_t tstate
,
165 mach_msg_type_number_t
* count
,
166 const arm_saved_state_t
*saved_state
)
168 /* In an arm64 world, this flavor can be used to retrieve the thread
169 * state of a 32-bit or 64-bit thread into a unified structure, but we
170 * need to support legacy clients who are only aware of 32-bit, so
171 * check the count to see what the client is expecting.
173 if (*count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
174 return handle_get_arm32_thread_state(tstate
, count
, saved_state
);
177 arm_unified_thread_state_t
*unified_state
= (arm_unified_thread_state_t
*) tstate
;
178 bzero(unified_state
, sizeof(*unified_state
));
180 if (is_saved_state64(saved_state
)) {
181 unified_state
->ash
.flavor
= ARM_THREAD_STATE64
;
182 unified_state
->ash
.count
= ARM_THREAD_STATE64_COUNT
;
183 (void)saved_state_to_thread_state64(saved_state
, thread_state64(unified_state
));
187 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
188 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
189 (void)saved_state_to_thread_state32(saved_state
, thread_state32(unified_state
));
191 *count
= ARM_UNIFIED_THREAD_STATE_COUNT
;
192 return (KERN_SUCCESS
);
196 handle_set_arm32_thread_state(
197 const thread_state_t tstate
,
198 mach_msg_type_number_t count
,
199 arm_saved_state_t
*saved_state
)
201 if (count
!= ARM_THREAD_STATE32_COUNT
)
202 return (KERN_INVALID_ARGUMENT
);
204 (void)thread_state32_to_saved_state((const arm_thread_state32_t
*)tstate
, saved_state
);
209 handle_set_arm64_thread_state(
210 const thread_state_t tstate
,
211 mach_msg_type_number_t count
,
212 arm_saved_state_t
*saved_state
)
214 if (count
!= ARM_THREAD_STATE64_COUNT
)
215 return (KERN_INVALID_ARGUMENT
);
217 (void)thread_state64_to_saved_state((const arm_thread_state64_t
*)tstate
, saved_state
);
223 handle_set_arm_thread_state(
224 const thread_state_t tstate
,
225 mach_msg_type_number_t count
,
226 arm_saved_state_t
*saved_state
)
228 /* In an arm64 world, this flavor can be used to set the thread state of a
229 * 32-bit or 64-bit thread from a unified structure, but we need to support
230 * legacy clients who are only aware of 32-bit, so check the count to see
231 * what the client is expecting.
233 if (count
< ARM_UNIFIED_THREAD_STATE_COUNT
) {
234 return handle_set_arm32_thread_state(tstate
, count
, saved_state
);
237 const arm_unified_thread_state_t
*unified_state
= (const arm_unified_thread_state_t
*) tstate
;
239 if (is_thread_state64(unified_state
)) {
240 (void)thread_state64_to_saved_state(const_thread_state64(unified_state
), saved_state
);
244 (void)thread_state32_to_saved_state(const_thread_state32(unified_state
), saved_state
);
247 return (KERN_SUCCESS
);
251 * Routine: machine_thread_get_state
255 machine_thread_get_state(
257 thread_flavor_t flavor
,
258 thread_state_t tstate
,
259 mach_msg_type_number_t
* count
)
262 case THREAD_STATE_FLAVOR_LIST
:
264 return (KERN_INVALID_ARGUMENT
);
266 tstate
[0] = ARM_THREAD_STATE
;
267 tstate
[1] = ARM_VFP_STATE
;
268 tstate
[2] = ARM_EXCEPTION_STATE
;
269 tstate
[3] = ARM_DEBUG_STATE
;
273 case THREAD_STATE_FLAVOR_LIST_NEW
:
275 return (KERN_INVALID_ARGUMENT
);
277 tstate
[0] = ARM_THREAD_STATE
;
278 tstate
[1] = ARM_VFP_STATE
;
279 tstate
[2] = thread_is_64bit(thread
) ? ARM_EXCEPTION_STATE64
: ARM_EXCEPTION_STATE
;
280 tstate
[3] = thread_is_64bit(thread
) ? ARM_DEBUG_STATE64
: ARM_DEBUG_STATE32
;
284 case ARM_THREAD_STATE
:
286 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
290 case ARM_THREAD_STATE32
:
292 if (thread_is_64bit(thread
))
293 return KERN_INVALID_ARGUMENT
;
295 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
300 case ARM_THREAD_STATE64
:
302 if (!thread_is_64bit(thread
))
303 return KERN_INVALID_ARGUMENT
;
305 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
310 case ARM_EXCEPTION_STATE
:{
311 struct arm_exception_state
*state
;
312 struct arm_saved_state32
*saved_state
;
314 if (*count
< ARM_EXCEPTION_STATE_COUNT
)
315 return (KERN_INVALID_ARGUMENT
);
316 if (thread_is_64bit(thread
))
317 return (KERN_INVALID_ARGUMENT
);
319 state
= (struct arm_exception_state
*) tstate
;
320 saved_state
= saved_state32(thread
->machine
.upcb
);
322 state
->exception
= saved_state
->exception
;
323 state
->fsr
= saved_state
->esr
;
324 state
->far
= saved_state
->far
;
326 *count
= ARM_EXCEPTION_STATE_COUNT
;
329 case ARM_EXCEPTION_STATE64
:{
330 struct arm_exception_state64
*state
;
331 struct arm_saved_state64
*saved_state
;
333 if (*count
< ARM_EXCEPTION_STATE64_COUNT
)
334 return (KERN_INVALID_ARGUMENT
);
335 if (!thread_is_64bit(thread
))
336 return (KERN_INVALID_ARGUMENT
);
338 state
= (struct arm_exception_state64
*) tstate
;
339 saved_state
= saved_state64(thread
->machine
.upcb
);
341 state
->exception
= saved_state
->exception
;
342 state
->far
= saved_state
->far
;
343 state
->esr
= saved_state
->esr
;
345 *count
= ARM_EXCEPTION_STATE64_COUNT
;
348 case ARM_DEBUG_STATE
:{
349 arm_legacy_debug_state_t
*state
;
350 arm_debug_state32_t
*thread_state
;
352 if (*count
< ARM_LEGACY_DEBUG_STATE_COUNT
)
353 return (KERN_INVALID_ARGUMENT
);
355 if (thread_is_64bit(thread
))
356 return (KERN_INVALID_ARGUMENT
);
358 state
= (arm_legacy_debug_state_t
*) tstate
;
359 thread_state
= find_debug_state32(thread
);
361 if (thread_state
== NULL
)
362 bzero(state
, sizeof(arm_legacy_debug_state_t
));
364 bcopy(thread_state
, state
, sizeof(arm_legacy_debug_state_t
));
366 *count
= ARM_LEGACY_DEBUG_STATE_COUNT
;
369 case ARM_DEBUG_STATE32
:{
370 arm_debug_state32_t
*state
;
371 arm_debug_state32_t
*thread_state
;
373 if (*count
< ARM_DEBUG_STATE32_COUNT
)
374 return (KERN_INVALID_ARGUMENT
);
376 if (thread_is_64bit(thread
))
377 return (KERN_INVALID_ARGUMENT
);
379 state
= (arm_debug_state32_t
*) tstate
;
380 thread_state
= find_debug_state32(thread
);
382 if (thread_state
== NULL
)
383 bzero(state
, sizeof(arm_debug_state32_t
));
385 bcopy(thread_state
, state
, sizeof(arm_debug_state32_t
));
387 *count
= ARM_DEBUG_STATE32_COUNT
;
391 case ARM_DEBUG_STATE64
:{
392 arm_debug_state64_t
*state
;
393 arm_debug_state64_t
*thread_state
;
395 if (*count
< ARM_DEBUG_STATE64_COUNT
)
396 return (KERN_INVALID_ARGUMENT
);
398 if (!thread_is_64bit(thread
))
399 return (KERN_INVALID_ARGUMENT
);
401 state
= (arm_debug_state64_t
*) tstate
;
402 thread_state
= find_debug_state64(thread
);
404 if (thread_state
== NULL
)
405 bzero(state
, sizeof(arm_debug_state64_t
));
407 bcopy(thread_state
, state
, sizeof(arm_debug_state64_t
));
409 *count
= ARM_DEBUG_STATE64_COUNT
;
414 struct arm_vfp_state
*state
;
415 arm_neon_saved_state32_t
*thread_state
;
418 if (*count
< ARM_VFP_STATE_COUNT
) {
419 if (*count
< ARM_VFPV2_STATE_COUNT
)
420 return (KERN_INVALID_ARGUMENT
);
422 *count
= ARM_VFPV2_STATE_COUNT
;
425 if (*count
== ARM_VFPV2_STATE_COUNT
)
430 state
= (struct arm_vfp_state
*) tstate
;
431 thread_state
= neon_state32(thread
->machine
.uNeon
);
432 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
434 bcopy(thread_state
, state
, (max
+ 1)*sizeof(uint32_t));
438 case ARM_NEON_STATE
:{
439 arm_neon_state_t
*state
;
440 arm_neon_saved_state32_t
*thread_state
;
442 if (*count
< ARM_NEON_STATE_COUNT
)
443 return (KERN_INVALID_ARGUMENT
);
445 if (thread_is_64bit(thread
))
446 return (KERN_INVALID_ARGUMENT
);
448 state
= (arm_neon_state_t
*)tstate
;
449 thread_state
= neon_state32(thread
->machine
.uNeon
);
451 assert(sizeof(*thread_state
) == sizeof(*state
));
452 bcopy(thread_state
, state
, sizeof(arm_neon_state_t
));
454 *count
= ARM_NEON_STATE_COUNT
;
459 case ARM_NEON_STATE64
:{
460 arm_neon_state64_t
*state
;
461 arm_neon_saved_state64_t
*thread_state
;
463 if (*count
< ARM_NEON_STATE64_COUNT
)
464 return (KERN_INVALID_ARGUMENT
);
466 if (!thread_is_64bit(thread
))
467 return (KERN_INVALID_ARGUMENT
);
469 state
= (arm_neon_state64_t
*)tstate
;
470 thread_state
= neon_state64(thread
->machine
.uNeon
);
472 /* For now, these are identical */
473 assert(sizeof(*state
) == sizeof(*thread_state
));
474 bcopy(thread_state
, state
, sizeof(arm_neon_state64_t
));
476 *count
= ARM_NEON_STATE64_COUNT
;
482 return (KERN_INVALID_ARGUMENT
);
484 return (KERN_SUCCESS
);
489 * Routine: machine_thread_get_kern_state
493 machine_thread_get_kern_state(
495 thread_flavor_t flavor
,
496 thread_state_t tstate
,
497 mach_msg_type_number_t
* count
)
500 * This works only for an interrupted kernel thread
502 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
506 case ARM_THREAD_STATE
:
508 kern_return_t rn
= handle_get_arm_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
512 case ARM_THREAD_STATE32
:
514 kern_return_t rn
= handle_get_arm32_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
519 case ARM_THREAD_STATE64
:
521 kern_return_t rn
= handle_get_arm64_thread_state(tstate
, count
, getCpuDatap()->cpu_int_state
);
527 return (KERN_INVALID_ARGUMENT
);
529 return (KERN_SUCCESS
);
533 machine_thread_switch_addrmode(thread_t thread
)
535 if (task_has_64BitAddr(thread
->task
)) {
536 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE64
;
537 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE64_COUNT
;
538 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
539 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
542 * Reinitialize the NEON state.
544 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
545 thread
->machine
.uNeon
->ns_64
.fpcr
= FPCR_DEFAULT
;
547 thread
->machine
.upcb
->ash
.flavor
= ARM_SAVED_STATE32
;
548 thread
->machine
.upcb
->ash
.count
= ARM_SAVED_STATE32_COUNT
;
549 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
550 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
553 * Reinitialize the NEON state.
555 bzero(&thread
->machine
.uNeon
->uns
, sizeof(thread
->machine
.uNeon
->uns
));
556 thread
->machine
.uNeon
->ns_32
.fpcr
= FPCR_DEFAULT_32
;
560 extern long long arm_debug_get(void);
563 * Routine: machine_thread_set_state
567 machine_thread_set_state(
569 thread_flavor_t flavor
,
570 thread_state_t tstate
,
571 mach_msg_type_number_t count
)
576 case ARM_THREAD_STATE
:
577 rn
= handle_set_arm_thread_state(tstate
, count
, thread
->machine
.upcb
);
581 case ARM_THREAD_STATE32
:
582 if (thread_is_64bit(thread
))
583 return (KERN_INVALID_ARGUMENT
);
585 rn
= handle_set_arm32_thread_state(tstate
, count
, thread
->machine
.upcb
);
590 case ARM_THREAD_STATE64
:
591 if (!thread_is_64bit(thread
))
592 return (KERN_INVALID_ARGUMENT
);
594 rn
= handle_set_arm64_thread_state(tstate
, count
, thread
->machine
.upcb
);
598 case ARM_EXCEPTION_STATE
:{
600 if (count
!= ARM_EXCEPTION_STATE_COUNT
)
601 return (KERN_INVALID_ARGUMENT
);
602 if (thread_is_64bit(thread
))
603 return (KERN_INVALID_ARGUMENT
);
607 case ARM_EXCEPTION_STATE64
:{
609 if (count
!= ARM_EXCEPTION_STATE64_COUNT
)
610 return (KERN_INVALID_ARGUMENT
);
611 if (!thread_is_64bit(thread
))
612 return (KERN_INVALID_ARGUMENT
);
616 case ARM_DEBUG_STATE
:
618 arm_legacy_debug_state_t
*state
;
619 boolean_t enabled
= FALSE
;
622 if (count
!= ARM_LEGACY_DEBUG_STATE_COUNT
)
623 return (KERN_INVALID_ARGUMENT
);
624 if (thread_is_64bit(thread
))
625 return (KERN_INVALID_ARGUMENT
);
627 state
= (arm_legacy_debug_state_t
*) tstate
;
629 for (i
= 0; i
< 16; i
++) {
630 /* do not allow context IDs to be set */
631 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
632 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
633 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
634 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
635 return KERN_PROTECTION_FAILURE
;
637 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
638 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
645 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
646 if (thread_state
!= NULL
) {
647 void *pTmp
= thread
->machine
.DebugData
;
648 thread
->machine
.DebugData
= NULL
;
649 zfree(ads_zone
, pTmp
);
652 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
653 if (thread_state
== NULL
) {
654 thread
->machine
.DebugData
= zalloc(ads_zone
);
655 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
656 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
657 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
658 thread_state
= find_debug_state32(thread
);
660 assert(NULL
!= thread_state
);
662 for (i
= 0; i
< 16; i
++) {
663 /* set appropriate privilege; mask out unknown bits */
664 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
665 | ARM_DBGBCR_MATCH_MASK
666 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
667 | ARM_DBG_CR_ENABLE_MASK
))
668 | ARM_DBGBCR_TYPE_IVA
669 | ARM_DBG_CR_LINKED_UNLINKED
670 | ARM_DBG_CR_SECURITY_STATE_BOTH
671 | ARM_DBG_CR_MODE_CONTROL_USER
;
672 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
673 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
674 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
675 | ARM_DBGWCR_ACCESS_CONTROL_MASK
676 | ARM_DBG_CR_ENABLE_MASK
))
677 | ARM_DBG_CR_LINKED_UNLINKED
678 | ARM_DBG_CR_SECURITY_STATE_BOTH
679 | ARM_DBG_CR_MODE_CONTROL_USER
;
680 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
683 thread_state
->mdscr_el1
= 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
686 if (thread
== current_thread()) {
687 arm_debug_set32(thread
->machine
.DebugData
);
692 case ARM_DEBUG_STATE32
:
693 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
695 arm_debug_state32_t
*state
;
696 boolean_t enabled
= FALSE
;
699 if (count
!= ARM_DEBUG_STATE32_COUNT
)
700 return (KERN_INVALID_ARGUMENT
);
701 if (thread_is_64bit(thread
))
702 return (KERN_INVALID_ARGUMENT
);
704 state
= (arm_debug_state32_t
*) tstate
;
706 if (state
->mdscr_el1
& 0x1)
709 for (i
= 0; i
< 16; i
++) {
710 /* do not allow context IDs to be set */
711 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
712 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
713 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
714 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
715 return KERN_PROTECTION_FAILURE
;
717 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
718 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
724 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
725 if (thread_state
!= NULL
) {
726 void *pTmp
= thread
->machine
.DebugData
;
727 thread
->machine
.DebugData
= NULL
;
728 zfree(ads_zone
, pTmp
);
731 arm_debug_state32_t
*thread_state
= find_debug_state32(thread
);
732 if (thread_state
== NULL
) {
733 thread
->machine
.DebugData
= zalloc(ads_zone
);
734 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
735 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE32
;
736 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE32_COUNT
;
737 thread_state
= find_debug_state32(thread
);
739 assert(NULL
!= thread_state
);
741 if (state
->mdscr_el1
& 0x1)
742 thread_state
->mdscr_el1
|= 0x1;
744 thread_state
->mdscr_el1
&= ~0x1;
746 for (i
= 0; i
< 16; i
++) {
747 /* set appropriate privilege; mask out unknown bits */
748 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
749 | ARM_DBGBCR_MATCH_MASK
750 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
751 | ARM_DBG_CR_ENABLE_MASK
))
752 | ARM_DBGBCR_TYPE_IVA
753 | ARM_DBG_CR_LINKED_UNLINKED
754 | ARM_DBG_CR_SECURITY_STATE_BOTH
755 | ARM_DBG_CR_MODE_CONTROL_USER
;
756 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
757 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
758 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
759 | ARM_DBGWCR_ACCESS_CONTROL_MASK
760 | ARM_DBG_CR_ENABLE_MASK
))
761 | ARM_DBG_CR_LINKED_UNLINKED
762 | ARM_DBG_CR_SECURITY_STATE_BOTH
763 | ARM_DBG_CR_MODE_CONTROL_USER
;
764 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
769 if (thread
== current_thread()) {
770 arm_debug_set32(thread
->machine
.DebugData
);
776 case ARM_DEBUG_STATE64
:
778 arm_debug_state64_t
*state
;
779 boolean_t enabled
= FALSE
;
782 if (count
!= ARM_DEBUG_STATE64_COUNT
)
783 return (KERN_INVALID_ARGUMENT
);
784 if (!thread_is_64bit(thread
))
785 return (KERN_INVALID_ARGUMENT
);
787 state
= (arm_debug_state64_t
*) tstate
;
789 if (state
->mdscr_el1
& 0x1)
792 for (i
= 0; i
< 16; i
++) {
793 /* do not allow context IDs to be set */
794 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
795 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
796 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
797 return KERN_PROTECTION_FAILURE
;
799 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
800 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
806 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
807 if (thread_state
!= NULL
) {
808 void *pTmp
= thread
->machine
.DebugData
;
809 thread
->machine
.DebugData
= NULL
;
810 zfree(ads_zone
, pTmp
);
813 arm_debug_state64_t
*thread_state
= find_debug_state64(thread
);
814 if (thread_state
== NULL
) {
815 thread
->machine
.DebugData
= zalloc(ads_zone
);
816 bzero(thread
->machine
.DebugData
, sizeof *(thread
->machine
.DebugData
));
817 thread
->machine
.DebugData
->dsh
.flavor
= ARM_DEBUG_STATE64
;
818 thread
->machine
.DebugData
->dsh
.count
= ARM_DEBUG_STATE64_COUNT
;
819 thread_state
= find_debug_state64(thread
);
821 assert(NULL
!= thread_state
);
823 if (state
->mdscr_el1
& 0x1)
824 thread_state
->mdscr_el1
|= 0x1;
826 thread_state
->mdscr_el1
&= ~0x1;
828 for (i
= 0; i
< 16; i
++) {
829 /* set appropriate privilege; mask out unknown bits */
830 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
831 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
832 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
833 | ARM_DBG_CR_ENABLE_MASK
))
834 | ARM_DBGBCR_TYPE_IVA
835 | ARM_DBG_CR_LINKED_UNLINKED
836 | ARM_DBG_CR_SECURITY_STATE_BOTH
837 | ARM_DBG_CR_MODE_CONTROL_USER
;
838 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
839 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
840 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
841 | ARM_DBGWCR_ACCESS_CONTROL_MASK
842 | ARM_DBG_CR_ENABLE_MASK
))
843 | ARM_DBG_CR_LINKED_UNLINKED
844 | ARM_DBG_CR_SECURITY_STATE_BOTH
845 | ARM_DBG_CR_MODE_CONTROL_USER
;
846 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK64
;
851 if (thread
== current_thread()) {
852 arm_debug_set64(thread
->machine
.DebugData
);
859 struct arm_vfp_state
*state
;
860 arm_neon_saved_state32_t
*thread_state
;
863 if (count
!= ARM_VFP_STATE_COUNT
&& count
!= ARM_VFPV2_STATE_COUNT
)
864 return (KERN_INVALID_ARGUMENT
);
866 if (count
== ARM_VFPV2_STATE_COUNT
)
871 state
= (struct arm_vfp_state
*) tstate
;
872 thread_state
= neon_state32(thread
->machine
.uNeon
);
873 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
875 bcopy(state
, thread_state
, (max
+ 1)*sizeof(uint32_t));
877 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
878 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
882 case ARM_NEON_STATE
:{
883 arm_neon_state_t
*state
;
884 arm_neon_saved_state32_t
*thread_state
;
886 if (count
!= ARM_NEON_STATE_COUNT
)
887 return (KERN_INVALID_ARGUMENT
);
889 if (thread_is_64bit(thread
))
890 return (KERN_INVALID_ARGUMENT
);
892 state
= (arm_neon_state_t
*)tstate
;
893 thread_state
= neon_state32(thread
->machine
.uNeon
);
895 assert(sizeof(*state
) == sizeof(*thread_state
));
896 bcopy(state
, thread_state
, sizeof(arm_neon_state_t
));
898 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE32
;
899 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE32_COUNT
;
904 case ARM_NEON_STATE64
:{
905 arm_neon_state64_t
*state
;
906 arm_neon_saved_state64_t
*thread_state
;
908 if (count
!= ARM_NEON_STATE64_COUNT
)
909 return (KERN_INVALID_ARGUMENT
);
911 if (!thread_is_64bit(thread
))
912 return (KERN_INVALID_ARGUMENT
);
914 state
= (arm_neon_state64_t
*)tstate
;
915 thread_state
= neon_state64(thread
->machine
.uNeon
);
917 assert(sizeof(*state
) == sizeof(*thread_state
));
918 bcopy(state
, thread_state
, sizeof(arm_neon_state64_t
));
920 thread
->machine
.uNeon
->nsh
.flavor
= ARM_NEON_SAVED_STATE64
;
921 thread
->machine
.uNeon
->nsh
.count
= ARM_NEON_SAVED_STATE64_COUNT
;
927 return (KERN_INVALID_ARGUMENT
);
929 return (KERN_SUCCESS
);
933 * Routine: machine_thread_state_initialize
937 machine_thread_state_initialize(
940 arm_context_t
*context
= thread
->machine
.contextData
;
943 * Should always be set up later. For a kernel thread, we don't care
944 * about this state. For a user thread, we'll set the state up in
945 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
948 if (context
!= NULL
) {
949 bzero(&context
->ss
.uss
, sizeof(context
->ss
.uss
));
950 bzero(&context
->ns
.uns
, sizeof(context
->ns
.uns
));
952 if (context
->ns
.nsh
.flavor
== ARM_NEON_SAVED_STATE64
) {
953 context
->ns
.ns_64
.fpcr
= FPCR_DEFAULT
;
955 context
->ns
.ns_32
.fpcr
= FPCR_DEFAULT_32
;
959 thread
->machine
.DebugData
= NULL
;
965 * Routine: machine_thread_dup
973 struct arm_saved_state
*self_saved_state
;
974 struct arm_saved_state
*target_saved_state
;
976 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
977 target
->machine
.cthread_data
= self
->machine
.cthread_data
;
979 self_saved_state
= self
->machine
.upcb
;
980 target_saved_state
= target
->machine
.upcb
;
981 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
983 return (KERN_SUCCESS
);
987 * Routine: get_user_regs
990 struct arm_saved_state
*
994 return (thread
->machine
.upcb
);
997 arm_neon_saved_state_t
*
1001 return (thread
->machine
.uNeon
);
1005 * Routine: find_user_regs
1008 struct arm_saved_state
*
1012 return (thread
->machine
.upcb
);
1016 * Routine: find_kern_regs
1019 struct arm_saved_state
*
1024 * This works only for an interrupted kernel thread
1026 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
1027 return ((struct arm_saved_state
*) NULL
);
1029 return (getCpuDatap()->cpu_int_state
);
1033 arm_debug_state32_t
*
1037 if (thread
&& thread
->machine
.DebugData
)
1038 return &(thread
->machine
.DebugData
->uds
.ds32
);
1043 arm_debug_state64_t
*
1047 if (thread
&& thread
->machine
.DebugData
)
1048 return &(thread
->machine
.DebugData
->uds
.ds64
);
1054 * Routine: thread_userstack
1061 thread_state_t tstate
,
1063 mach_vm_offset_t
* user_stack
,
1071 case ARM_THREAD_STATE
:
1072 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
1074 if (thread_is_64bit(thread
)) {
1075 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_64
.sp
;
1079 sp
= ((arm_unified_thread_state_t
*)tstate
)->ts_32
.sp
;
1085 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1086 case ARM_THREAD_STATE32
:
1087 if (count
!= ARM_THREAD_STATE32_COUNT
)
1088 return (KERN_INVALID_ARGUMENT
);
1090 return (KERN_INVALID_ARGUMENT
);
1092 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1095 case ARM_THREAD_STATE64
:
1096 if (count
!= ARM_THREAD_STATE64_COUNT
)
1097 return (KERN_INVALID_ARGUMENT
);
1099 return (KERN_INVALID_ARGUMENT
);
1101 sp
= ((arm_thread_state32_t
*)tstate
)->sp
;
1105 return (KERN_INVALID_ARGUMENT
);
1109 *user_stack
= CAST_USER_ADDR_T(sp
);
1113 *user_stack
= CAST_USER_ADDR_T(USRSTACK64
);
1118 return (KERN_SUCCESS
);
1122 * thread_userstackdefault:
1124 * Return the default stack location for the
1125 * thread, if otherwise unknown.
1128 thread_userstackdefault(
1129 mach_vm_offset_t
*default_user_stack
,
1133 *default_user_stack
= USRSTACK64
;
1135 *default_user_stack
= USRSTACK
;
1138 return (KERN_SUCCESS
);
1142 * Routine: thread_setuserstack
1146 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
1148 struct arm_saved_state
*sv
;
1150 sv
= get_user_regs(thread
);
1152 set_saved_state_sp(sv
, user_stack
);
1158 * Routine: thread_adjuserstack
1162 thread_adjuserstack(thread_t thread
, int adjust
)
1164 struct arm_saved_state
*sv
;
1167 sv
= get_user_regs(thread
);
1169 sp
= get_saved_state_sp(sv
);
1171 set_saved_state_sp(sv
, sp
);;
1177 * Routine: thread_setentrypoint
1181 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
1183 struct arm_saved_state
*sv
;
1185 sv
= get_user_regs(thread
);
1187 set_saved_state_pc(sv
, entry
);
1193 * Routine: thread_entrypoint
1198 __unused thread_t thread
,
1200 thread_state_t tstate
,
1201 unsigned int count __unused
,
1202 mach_vm_offset_t
* entry_point
1206 case ARM_THREAD_STATE
:
1208 struct arm_thread_state
*state
;
1210 state
= (struct arm_thread_state
*) tstate
;
1213 * If a valid entry point is specified, use it.
1216 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1218 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1223 case ARM_THREAD_STATE64
:
1225 struct arm_thread_state64
*state
;
1227 state
= (struct arm_thread_state64
*) tstate
;
1230 * If a valid entry point is specified, use it.
1233 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
1235 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
1241 return (KERN_INVALID_ARGUMENT
);
1244 return (KERN_SUCCESS
);
1249 * Routine: thread_set_child
1257 struct arm_saved_state
*child_state
;
1259 child_state
= get_user_regs(child
);
1261 set_saved_state_reg(child_state
, 0, pid
);
1262 set_saved_state_reg(child_state
, 1, 1ULL);
1267 * Routine: thread_set_parent
1275 struct arm_saved_state
*parent_state
;
1277 parent_state
= get_user_regs(parent
);
1279 set_saved_state_reg(parent_state
, 0, pid
);
1280 set_saved_state_reg(parent_state
, 1, 0);
1284 struct arm_act_context
{
1285 struct arm_unified_thread_state ss
;
1287 struct arm_neon_saved_state ns
;
1292 * Routine: act_thread_csave
1296 act_thread_csave(void)
1298 struct arm_act_context
*ic
;
1301 thread_t thread
= current_thread();
1303 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
1304 if (ic
== (struct arm_act_context
*) NULL
)
1305 return ((void *) 0);
1307 val
= ARM_UNIFIED_THREAD_STATE_COUNT
;
1308 kret
= machine_thread_get_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, &val
);
1309 if (kret
!= KERN_SUCCESS
) {
1310 kfree(ic
, sizeof(struct arm_act_context
));
1311 return ((void *) 0);
1315 if (thread_is_64bit(thread
)) {
1316 val
= ARM_NEON_STATE64_COUNT
;
1317 kret
= machine_thread_get_state(thread
,
1319 (thread_state_t
) & ic
->ns
,
1322 val
= ARM_NEON_STATE_COUNT
;
1323 kret
= machine_thread_get_state(thread
,
1325 (thread_state_t
) & ic
->ns
,
1328 if (kret
!= KERN_SUCCESS
) {
1329 kfree(ic
, sizeof(struct arm_act_context
));
1330 return ((void *) 0);
1337 * Routine: act_thread_catt
1341 act_thread_catt(void *ctx
)
1343 struct arm_act_context
*ic
;
1345 thread_t thread
= current_thread();
1347 ic
= (struct arm_act_context
*) ctx
;
1348 if (ic
== (struct arm_act_context
*) NULL
)
1351 kret
= machine_thread_set_state(thread
, ARM_THREAD_STATE
, (thread_state_t
)&ic
->ss
, ARM_UNIFIED_THREAD_STATE_COUNT
);
1352 if (kret
!= KERN_SUCCESS
)
1356 if (thread_is_64bit(thread
)) {
1357 kret
= machine_thread_set_state(thread
,
1359 (thread_state_t
) & ic
->ns
,
1360 ARM_NEON_STATE64_COUNT
);
1362 kret
= machine_thread_set_state(thread
,
1364 (thread_state_t
) & ic
->ns
,
1365 ARM_NEON_STATE_COUNT
);
1367 if (kret
!= KERN_SUCCESS
)
1371 kfree(ic
, sizeof(struct arm_act_context
));
1375 * Routine: act_thread_catt
1379 act_thread_cfree(void *ctx
)
1381 kfree(ctx
, sizeof(struct arm_act_context
));
1385 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
1387 arm_thread_state_t
*state
;
1388 struct arm_saved_state
*saved_state
;
1389 struct arm_saved_state32
*saved_state_32
;
1390 thread_t curth
= current_thread();
1393 assert(!thread_is_64bit(thread
));
1395 saved_state
= thread
->machine
.upcb
;
1396 saved_state_32
= saved_state32(saved_state
);
1398 state
= (arm_thread_state_t
*)tstate
;
1400 if (curth
!= thread
) {
1402 thread_lock(thread
);
1406 * do not zero saved_state, it can be concurrently accessed
1407 * and zero is not a valid state for some of the registers,
1410 thread_state32_to_saved_state(state
, saved_state
);
1411 saved_state_32
->cpsr
= PSR64_USER32_DEFAULT
;
1413 if (curth
!= thread
) {
1414 thread_unlock(thread
);
1418 return KERN_SUCCESS
;
1422 thread_set_wq_state64(thread_t thread
, thread_state_t tstate
)
1424 arm_thread_state64_t
*state
;
1425 struct arm_saved_state
*saved_state
;
1426 struct arm_saved_state64
*saved_state_64
;
1427 thread_t curth
= current_thread();
1430 assert(thread_is_64bit(thread
));
1432 saved_state
= thread
->machine
.upcb
;
1433 saved_state_64
= saved_state64(saved_state
);
1434 state
= (arm_thread_state64_t
*)tstate
;
1436 if (curth
!= thread
) {
1438 thread_lock(thread
);
1442 * do not zero saved_state, it can be concurrently accessed
1443 * and zero is not a valid state for some of the registers,
1446 thread_state64_to_saved_state(state
, saved_state
);
1447 saved_state_64
->cpsr
= PSR64_USER64_DEFAULT
;
1449 if (curth
!= thread
) {
1450 thread_unlock(thread
);
1454 return KERN_SUCCESS
;