]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/status.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/thread_status.h>
32#include <kern/thread.h>
33#include <kern/kalloc.h>
34#include <arm/vmparam.h>
35#include <arm/cpu_data_internal.h>
36#include <arm64/proc_reg.h>
37
0a7de745
A
38struct arm_vfpv2_state {
39 __uint32_t __r[32];
40 __uint32_t __fpscr;
5ba3f43e
A
41};
42
43typedef struct arm_vfpv2_state arm_vfpv2_state_t;
44
45#define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
0a7de745 46 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
5ba3f43e
A
47
48/*
49 * Forward definitions
50 */
51void thread_set_child(thread_t child, int pid);
52void thread_set_parent(thread_t parent, int pid);
53
54/*
55 * Maps state flavor to number of words in the state:
56 */
57/* __private_extern__ */
58unsigned int _MachineStateCount[] = {
0a7de745 59 /* FLAVOR_LIST */ 0,
5ba3f43e
A
60 ARM_UNIFIED_THREAD_STATE_COUNT,
61 ARM_VFP_STATE_COUNT,
62 ARM_EXCEPTION_STATE_COUNT,
63 ARM_DEBUG_STATE_COUNT,
64 /* THREAD_STATE_NONE (legacy) */ 0,
65 ARM_THREAD_STATE64_COUNT,
66 ARM_EXCEPTION_STATE64_COUNT,
67 /* THREAD_STATE_LAST (legacy) */ 0,
68 ARM_THREAD_STATE32_COUNT,
69 /* UNALLOCATED */ 0,
70 /* UNALLOCATED */ 0,
71 /* UNALLOCATED */ 0,
72 /* UNALLOCATED */ 0,
73 ARM_DEBUG_STATE32_COUNT,
74 ARM_DEBUG_STATE64_COUNT,
75 ARM_NEON_STATE_COUNT,
76 ARM_NEON_STATE64_COUNT,
77 /* UNALLOCATED */ 0,
78 /* UNALLOCATED */ 0,
79 /* ARM_SAVED_STATE32_COUNT */ 0,
80 /* ARM_SAVED_STATE64_COUNT */ 0,
81 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
82 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
83};
84
85extern zone_t ads_zone;
86
87#if __arm64__
88/*
89 * Copy values from saved_state to ts64.
90 */
91void
92saved_state_to_thread_state64(const arm_saved_state_t *saved_state, arm_thread_state64_t *ts64)
93{
94 uint32_t i;
95
96 assert(is_saved_state64(saved_state));
97
98 ts64->fp = get_saved_state_fp(saved_state);
99 ts64->lr = get_saved_state_lr(saved_state);
100 ts64->sp = get_saved_state_sp(saved_state);
101 ts64->pc = get_saved_state_pc(saved_state);
102 ts64->cpsr = get_saved_state_cpsr(saved_state);
0a7de745 103 for (i = 0; i < 29; i++) {
5ba3f43e 104 ts64->x[i] = get_saved_state_reg(saved_state, i);
0a7de745 105 }
5ba3f43e
A
106}
107
108/*
109 * Copy values from ts64 to saved_state
110 */
111void
112thread_state64_to_saved_state(const arm_thread_state64_t *ts64, arm_saved_state_t *saved_state)
113{
114 uint32_t i;
115
116 assert(is_saved_state64(saved_state));
117
118 set_saved_state_fp(saved_state, ts64->fp);
119 set_saved_state_lr(saved_state, ts64->lr);
120 set_saved_state_sp(saved_state, ts64->sp);
121 set_saved_state_pc(saved_state, ts64->pc);
122 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
0a7de745 123 for (i = 0; i < 29; i++) {
5ba3f43e 124 set_saved_state_reg(saved_state, i, ts64->x[i]);
0a7de745 125 }
5ba3f43e
A
126}
127#endif
128
129kern_return_t
130handle_get_arm32_thread_state(
0a7de745
A
131 thread_state_t tstate,
132 mach_msg_type_number_t * count,
133 const arm_saved_state_t *saved_state)
5ba3f43e 134{
0a7de745
A
135 if (*count < ARM_THREAD_STATE32_COUNT) {
136 return KERN_INVALID_ARGUMENT;
137 }
138 if (!is_saved_state32(saved_state)) {
139 return KERN_INVALID_ARGUMENT;
140 }
5ba3f43e
A
141
142 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
143 *count = ARM_THREAD_STATE32_COUNT;
144 return KERN_SUCCESS;
145}
146
147kern_return_t
148handle_get_arm64_thread_state(
0a7de745
A
149 thread_state_t tstate,
150 mach_msg_type_number_t * count,
151 const arm_saved_state_t *saved_state)
5ba3f43e 152{
0a7de745
A
153 if (*count < ARM_THREAD_STATE64_COUNT) {
154 return KERN_INVALID_ARGUMENT;
155 }
156 if (!is_saved_state64(saved_state)) {
157 return KERN_INVALID_ARGUMENT;
158 }
5ba3f43e
A
159
160 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
161 *count = ARM_THREAD_STATE64_COUNT;
162 return KERN_SUCCESS;
163}
164
165
166kern_return_t
167handle_get_arm_thread_state(
0a7de745
A
168 thread_state_t tstate,
169 mach_msg_type_number_t * count,
170 const arm_saved_state_t *saved_state)
5ba3f43e
A
171{
172 /* In an arm64 world, this flavor can be used to retrieve the thread
173 * state of a 32-bit or 64-bit thread into a unified structure, but we
174 * need to support legacy clients who are only aware of 32-bit, so
175 * check the count to see what the client is expecting.
176 */
177 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
178 return handle_get_arm32_thread_state(tstate, count, saved_state);
179 }
180
181 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
182 bzero(unified_state, sizeof(*unified_state));
183#if __arm64__
184 if (is_saved_state64(saved_state)) {
185 unified_state->ash.flavor = ARM_THREAD_STATE64;
186 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
187 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
188 } else
189#endif
190 {
191 unified_state->ash.flavor = ARM_THREAD_STATE32;
192 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
193 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
194 }
195 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
0a7de745 196 return KERN_SUCCESS;
5ba3f43e
A
197}
198
199kern_return_t
200handle_set_arm32_thread_state(
0a7de745
A
201 const thread_state_t tstate,
202 mach_msg_type_number_t count,
203 arm_saved_state_t *saved_state)
5ba3f43e 204{
0a7de745
A
205 if (count != ARM_THREAD_STATE32_COUNT) {
206 return KERN_INVALID_ARGUMENT;
207 }
5ba3f43e
A
208
209 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
210 return KERN_SUCCESS;
211}
212
213kern_return_t
214handle_set_arm64_thread_state(
0a7de745
A
215 const thread_state_t tstate,
216 mach_msg_type_number_t count,
217 arm_saved_state_t *saved_state)
5ba3f43e 218{
0a7de745
A
219 if (count != ARM_THREAD_STATE64_COUNT) {
220 return KERN_INVALID_ARGUMENT;
221 }
5ba3f43e
A
222
223 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
224 return KERN_SUCCESS;
225}
226
227
228kern_return_t
229handle_set_arm_thread_state(
0a7de745
A
230 const thread_state_t tstate,
231 mach_msg_type_number_t count,
232 arm_saved_state_t *saved_state)
5ba3f43e
A
233{
234 /* In an arm64 world, this flavor can be used to set the thread state of a
235 * 32-bit or 64-bit thread from a unified structure, but we need to support
236 * legacy clients who are only aware of 32-bit, so check the count to see
237 * what the client is expecting.
238 */
239 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
d9a64523 240 if (!is_saved_state32(saved_state)) {
0a7de745 241 return KERN_INVALID_ARGUMENT;
d9a64523 242 }
5ba3f43e
A
243 return handle_set_arm32_thread_state(tstate, count, saved_state);
244 }
245
246 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
247#if __arm64__
248 if (is_thread_state64(unified_state)) {
d9a64523 249 if (!is_saved_state64(saved_state)) {
0a7de745 250 return KERN_INVALID_ARGUMENT;
d9a64523 251 }
5ba3f43e
A
252 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
253 } else
254#endif
255 {
d9a64523 256 if (!is_saved_state32(saved_state)) {
0a7de745 257 return KERN_INVALID_ARGUMENT;
d9a64523 258 }
5ba3f43e
A
259 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
260 }
261
0a7de745 262 return KERN_SUCCESS;
5ba3f43e
A
263}
264
d9a64523
A
265/*
266 * Translate thread state arguments to userspace representation
267 */
268
269kern_return_t
270machine_thread_state_convert_to_user(
0a7de745
A
271 thread_t thread,
272 thread_flavor_t flavor,
273 thread_state_t tstate,
274 mach_msg_type_number_t *count)
d9a64523
A
275{
276 // No conversion to userspace representation on this platform
277 (void)thread; (void)flavor; (void)tstate; (void)count;
278 return KERN_SUCCESS;
279}
280
281/*
282 * Translate thread state arguments from userspace representation
283 */
284
285kern_return_t
286machine_thread_state_convert_from_user(
0a7de745
A
287 thread_t thread,
288 thread_flavor_t flavor,
289 thread_state_t tstate,
290 mach_msg_type_number_t count)
d9a64523
A
291{
292 // No conversion from userspace representation on this platform
293 (void)thread; (void)flavor; (void)tstate; (void)count;
294 return KERN_SUCCESS;
295}
296
297/*
298 * Translate signal context data pointer to userspace representation
299 */
300
301kern_return_t
302machine_thread_siguctx_pointer_convert_to_user(
0a7de745
A
303 __assert_only thread_t thread,
304 user_addr_t *uctxp)
d9a64523
A
305{
306 // No conversion to userspace representation on this platform
307 (void)thread; (void)uctxp;
308 return KERN_SUCCESS;
309}
310
311/*
312 * Translate array of function pointer syscall arguments from userspace representation
313 */
314
315kern_return_t
316machine_thread_function_pointers_convert_from_user(
0a7de745
A
317 __assert_only thread_t thread,
318 user_addr_t *fptrs,
319 uint32_t count)
d9a64523
A
320{
321 // No conversion from userspace representation on this platform
322 (void)thread; (void)fptrs; (void)count;
323 return KERN_SUCCESS;
324}
325
5ba3f43e
A
326/*
327 * Routine: machine_thread_get_state
328 *
329 */
330kern_return_t
331machine_thread_get_state(
0a7de745
A
332 thread_t thread,
333 thread_flavor_t flavor,
334 thread_state_t tstate,
335 mach_msg_type_number_t * count)
5ba3f43e
A
336{
337 switch (flavor) {
338 case THREAD_STATE_FLAVOR_LIST:
0a7de745
A
339 if (*count < 4) {
340 return KERN_INVALID_ARGUMENT;
341 }
5ba3f43e
A
342
343 tstate[0] = ARM_THREAD_STATE;
344 tstate[1] = ARM_VFP_STATE;
345 tstate[2] = ARM_EXCEPTION_STATE;
346 tstate[3] = ARM_DEBUG_STATE;
347 *count = 4;
348 break;
349
350 case THREAD_STATE_FLAVOR_LIST_NEW:
0a7de745
A
351 if (*count < 4) {
352 return KERN_INVALID_ARGUMENT;
353 }
5ba3f43e
A
354
355 tstate[0] = ARM_THREAD_STATE;
356 tstate[1] = ARM_VFP_STATE;
d9a64523
A
357 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
358 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
5ba3f43e
A
359 *count = 4;
360 break;
361
362 case ARM_THREAD_STATE:
363 {
364 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
365 if (rn) {
366 return rn;
367 }
5ba3f43e
A
368 break;
369 }
370 case ARM_THREAD_STATE32:
371 {
0a7de745 372 if (thread_is_64bit_data(thread)) {
5ba3f43e 373 return KERN_INVALID_ARGUMENT;
0a7de745 374 }
5ba3f43e
A
375
376 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
377 if (rn) {
378 return rn;
379 }
5ba3f43e
A
380 break;
381 }
382#if __arm64__
383 case ARM_THREAD_STATE64:
384 {
0a7de745 385 if (!thread_is_64bit_data(thread)) {
5ba3f43e 386 return KERN_INVALID_ARGUMENT;
0a7de745 387 }
5ba3f43e
A
388
389 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
390 if (rn) {
391 return rn;
392 }
5ba3f43e
A
393 break;
394 }
395#endif
396 case ARM_EXCEPTION_STATE:{
0a7de745
A
397 struct arm_exception_state *state;
398 struct arm_saved_state32 *saved_state;
5ba3f43e 399
0a7de745
A
400 if (*count < ARM_EXCEPTION_STATE_COUNT) {
401 return KERN_INVALID_ARGUMENT;
402 }
403 if (thread_is_64bit_data(thread)) {
404 return KERN_INVALID_ARGUMENT;
405 }
5ba3f43e 406
0a7de745
A
407 state = (struct arm_exception_state *) tstate;
408 saved_state = saved_state32(thread->machine.upcb);
5ba3f43e 409
0a7de745
A
410 state->exception = saved_state->exception;
411 state->fsr = saved_state->esr;
412 state->far = saved_state->far;
5ba3f43e 413
0a7de745
A
414 *count = ARM_EXCEPTION_STATE_COUNT;
415 break;
416 }
5ba3f43e 417 case ARM_EXCEPTION_STATE64:{
0a7de745
A
418 struct arm_exception_state64 *state;
419 struct arm_saved_state64 *saved_state;
420
421 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
422 return KERN_INVALID_ARGUMENT;
423 }
424 if (!thread_is_64bit_data(thread)) {
425 return KERN_INVALID_ARGUMENT;
426 }
5ba3f43e 427
0a7de745
A
428 state = (struct arm_exception_state64 *) tstate;
429 saved_state = saved_state64(thread->machine.upcb);
5ba3f43e 430
0a7de745
A
431 state->exception = saved_state->exception;
432 state->far = saved_state->far;
433 state->esr = saved_state->esr;
5ba3f43e 434
0a7de745
A
435 *count = ARM_EXCEPTION_STATE64_COUNT;
436 break;
437 }
438 case ARM_DEBUG_STATE:{
439 arm_legacy_debug_state_t *state;
440 arm_debug_state32_t *thread_state;
5ba3f43e 441
0a7de745
A
442 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
443 return KERN_INVALID_ARGUMENT;
5ba3f43e 444 }
0a7de745
A
445
446 if (thread_is_64bit_data(thread)) {
447 return KERN_INVALID_ARGUMENT;
448 }
449
450 state = (arm_legacy_debug_state_t *) tstate;
451 thread_state = find_debug_state32(thread);
452
453 if (thread_state == NULL) {
454 bzero(state, sizeof(arm_legacy_debug_state_t));
455 } else {
456 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
5ba3f43e 457 }
0a7de745
A
458
459 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
460 break;
461 }
5ba3f43e 462 case ARM_DEBUG_STATE32:{
0a7de745
A
463 arm_debug_state32_t *state;
464 arm_debug_state32_t *thread_state;
465
466 if (*count < ARM_DEBUG_STATE32_COUNT) {
467 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
468 }
469
0a7de745
A
470 if (thread_is_64bit_data(thread)) {
471 return KERN_INVALID_ARGUMENT;
472 }
473
474 state = (arm_debug_state32_t *) tstate;
475 thread_state = find_debug_state32(thread);
476
477 if (thread_state == NULL) {
478 bzero(state, sizeof(arm_debug_state32_t));
479 } else {
480 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
481 }
482
483 *count = ARM_DEBUG_STATE32_COUNT;
484 break;
485 }
486
5ba3f43e 487 case ARM_DEBUG_STATE64:{
0a7de745
A
488 arm_debug_state64_t *state;
489 arm_debug_state64_t *thread_state;
490
491 if (*count < ARM_DEBUG_STATE64_COUNT) {
492 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
493 }
494
0a7de745
A
495 if (!thread_is_64bit_data(thread)) {
496 return KERN_INVALID_ARGUMENT;
497 }
498
499 state = (arm_debug_state64_t *) tstate;
500 thread_state = find_debug_state64(thread);
501
502 if (thread_state == NULL) {
503 bzero(state, sizeof(arm_debug_state64_t));
504 } else {
505 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
506 }
507
508 *count = ARM_DEBUG_STATE64_COUNT;
509 break;
510 }
511
5ba3f43e 512 case ARM_VFP_STATE:{
0a7de745
A
513 struct arm_vfp_state *state;
514 arm_neon_saved_state32_t *thread_state;
515 unsigned int max;
516
517 if (*count < ARM_VFP_STATE_COUNT) {
518 if (*count < ARM_VFPV2_STATE_COUNT) {
519 return KERN_INVALID_ARGUMENT;
520 } else {
521 *count = ARM_VFPV2_STATE_COUNT;
5ba3f43e 522 }
0a7de745 523 }
5ba3f43e 524
0a7de745
A
525 if (*count == ARM_VFPV2_STATE_COUNT) {
526 max = 32;
527 } else {
528 max = 64;
529 }
5ba3f43e 530
0a7de745
A
531 state = (struct arm_vfp_state *) tstate;
532 thread_state = neon_state32(thread->machine.uNeon);
533 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
5ba3f43e 534
0a7de745
A
535 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
536 *count = (max + 1);
537 break;
538 }
5ba3f43e
A
539 case ARM_NEON_STATE:{
540 arm_neon_state_t *state;
541 arm_neon_saved_state32_t *thread_state;
542
0a7de745
A
543 if (*count < ARM_NEON_STATE_COUNT) {
544 return KERN_INVALID_ARGUMENT;
545 }
5ba3f43e 546
0a7de745
A
547 if (thread_is_64bit_data(thread)) {
548 return KERN_INVALID_ARGUMENT;
549 }
5ba3f43e
A
550
551 state = (arm_neon_state_t *)tstate;
552 thread_state = neon_state32(thread->machine.uNeon);
553
554 assert(sizeof(*thread_state) == sizeof(*state));
555 bcopy(thread_state, state, sizeof(arm_neon_state_t));
556
557 *count = ARM_NEON_STATE_COUNT;
558 break;
0a7de745 559 }
5ba3f43e
A
560
561 case ARM_NEON_STATE64:{
562 arm_neon_state64_t *state;
563 arm_neon_saved_state64_t *thread_state;
564
0a7de745
A
565 if (*count < ARM_NEON_STATE64_COUNT) {
566 return KERN_INVALID_ARGUMENT;
567 }
5ba3f43e 568
0a7de745
A
569 if (!thread_is_64bit_data(thread)) {
570 return KERN_INVALID_ARGUMENT;
571 }
5ba3f43e
A
572
573 state = (arm_neon_state64_t *)tstate;
574 thread_state = neon_state64(thread->machine.uNeon);
575
576 /* For now, these are identical */
577 assert(sizeof(*state) == sizeof(*thread_state));
578 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
579
580 *count = ARM_NEON_STATE64_COUNT;
581 break;
0a7de745 582 }
5ba3f43e
A
583
584 default:
0a7de745 585 return KERN_INVALID_ARGUMENT;
5ba3f43e 586 }
0a7de745 587 return KERN_SUCCESS;
5ba3f43e
A
588}
589
590
591/*
592 * Routine: machine_thread_get_kern_state
593 *
594 */
595kern_return_t
596machine_thread_get_kern_state(
0a7de745
A
597 thread_t thread,
598 thread_flavor_t flavor,
599 thread_state_t tstate,
600 mach_msg_type_number_t * count)
5ba3f43e
A
601{
602 /*
603 * This works only for an interrupted kernel thread
604 */
0a7de745 605 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
5ba3f43e 606 return KERN_FAILURE;
0a7de745 607 }
5ba3f43e
A
608
609 switch (flavor) {
610 case ARM_THREAD_STATE:
611 {
612 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
613 if (rn) {
614 return rn;
615 }
5ba3f43e
A
616 break;
617 }
618 case ARM_THREAD_STATE32:
619 {
620 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
621 if (rn) {
622 return rn;
623 }
5ba3f43e
A
624 break;
625 }
626#if __arm64__
627 case ARM_THREAD_STATE64:
628 {
629 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
630 if (rn) {
631 return rn;
632 }
5ba3f43e
A
633 break;
634 }
635#endif
636 default:
0a7de745 637 return KERN_INVALID_ARGUMENT;
5ba3f43e 638 }
0a7de745 639 return KERN_SUCCESS;
5ba3f43e
A
640}
641
642void
643machine_thread_switch_addrmode(thread_t thread)
644{
d9a64523 645 if (task_has_64Bit_data(thread->task)) {
5ba3f43e
A
646 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
647 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
648 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
649 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
650
651 /*
652 * Reinitialize the NEON state.
653 */
654 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
655 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
656 } else {
657 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
658 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
659 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
660 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
661
662 /*
663 * Reinitialize the NEON state.
664 */
665 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
666 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
667 }
668}
669
670extern long long arm_debug_get(void);
671
672/*
673 * Routine: machine_thread_set_state
674 *
675 */
676kern_return_t
677machine_thread_set_state(
0a7de745
A
678 thread_t thread,
679 thread_flavor_t flavor,
680 thread_state_t tstate,
681 mach_msg_type_number_t count)
5ba3f43e
A
682{
683 kern_return_t rn;
684
685 switch (flavor) {
686 case ARM_THREAD_STATE:
687 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
688 if (rn) {
689 return rn;
690 }
5ba3f43e
A
691 break;
692
693 case ARM_THREAD_STATE32:
0a7de745
A
694 if (thread_is_64bit_data(thread)) {
695 return KERN_INVALID_ARGUMENT;
696 }
5ba3f43e
A
697
698 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
699 if (rn) {
700 return rn;
701 }
5ba3f43e
A
702 break;
703
704#if __arm64__
705 case ARM_THREAD_STATE64:
0a7de745
A
706 if (!thread_is_64bit_data(thread)) {
707 return KERN_INVALID_ARGUMENT;
708 }
5ba3f43e
A
709
710 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
711 if (rn) {
712 return rn;
713 }
5ba3f43e
A
714 break;
715#endif
716 case ARM_EXCEPTION_STATE:{
0a7de745
A
717 if (count != ARM_EXCEPTION_STATE_COUNT) {
718 return KERN_INVALID_ARGUMENT;
5ba3f43e 719 }
0a7de745
A
720 if (thread_is_64bit_data(thread)) {
721 return KERN_INVALID_ARGUMENT;
722 }
723
724 break;
725 }
5ba3f43e 726 case ARM_EXCEPTION_STATE64:{
0a7de745
A
727 if (count != ARM_EXCEPTION_STATE64_COUNT) {
728 return KERN_INVALID_ARGUMENT;
729 }
730 if (!thread_is_64bit_data(thread)) {
731 return KERN_INVALID_ARGUMENT;
732 }
5ba3f43e 733
0a7de745
A
734 break;
735 }
736 case ARM_DEBUG_STATE:
737 {
738 arm_legacy_debug_state_t *state;
739 boolean_t enabled = FALSE;
740 unsigned int i;
5ba3f43e 741
0a7de745
A
742 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
743 return KERN_INVALID_ARGUMENT;
744 }
745 if (thread_is_64bit_data(thread)) {
746 return KERN_INVALID_ARGUMENT;
5ba3f43e 747 }
5ba3f43e 748
0a7de745 749 state = (arm_legacy_debug_state_t *) tstate;
5ba3f43e 750
0a7de745
A
751 for (i = 0; i < 16; i++) {
752 /* do not allow context IDs to be set */
753 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
754 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
755 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
756 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
757 return KERN_PROTECTION_FAILURE;
758 }
759 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
760 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
761 enabled = TRUE;
762 }
763 }
5ba3f43e 764
0a7de745
A
765
766 if (!enabled) {
767 arm_debug_state32_t *thread_state = find_debug_state32(thread);
768 if (thread_state != NULL) {
769 void *pTmp = thread->machine.DebugData;
770 thread->machine.DebugData = NULL;
771 zfree(ads_zone, pTmp);
5ba3f43e 772 }
0a7de745
A
773 } else {
774 arm_debug_state32_t *thread_state = find_debug_state32(thread);
775 if (thread_state == NULL) {
776 thread->machine.DebugData = zalloc(ads_zone);
777 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
778 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
779 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
780 thread_state = find_debug_state32(thread);
5ba3f43e 781 }
0a7de745
A
782 assert(NULL != thread_state);
783
784 for (i = 0; i < 16; i++) {
785 /* set appropriate privilege; mask out unknown bits */
786 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
787 | ARM_DBGBCR_MATCH_MASK
788 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
789 | ARM_DBG_CR_ENABLE_MASK))
790 | ARM_DBGBCR_TYPE_IVA
791 | ARM_DBG_CR_LINKED_UNLINKED
792 | ARM_DBG_CR_SECURITY_STATE_BOTH
793 | ARM_DBG_CR_MODE_CONTROL_USER;
794 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
795 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
796 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
797 | ARM_DBGWCR_ACCESS_CONTROL_MASK
798 | ARM_DBG_CR_ENABLE_MASK))
799 | ARM_DBG_CR_LINKED_UNLINKED
800 | ARM_DBG_CR_SECURITY_STATE_BOTH
801 | ARM_DBG_CR_MODE_CONTROL_USER;
802 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 803 }
0a7de745
A
804
805 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
5ba3f43e 806 }
0a7de745
A
807
808 if (thread == current_thread()) {
809 arm_debug_set32(thread->machine.DebugData);
810 }
811
812 break;
813 }
5ba3f43e
A
814 case ARM_DEBUG_STATE32:
815 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
0a7de745
A
816 {
817 arm_debug_state32_t *state;
818 boolean_t enabled = FALSE;
819 unsigned int i;
5ba3f43e 820
0a7de745
A
821 if (count != ARM_DEBUG_STATE32_COUNT) {
822 return KERN_INVALID_ARGUMENT;
823 }
824 if (thread_is_64bit_data(thread)) {
825 return KERN_INVALID_ARGUMENT;
826 }
827
828 state = (arm_debug_state32_t *) tstate;
5ba3f43e 829
0a7de745
A
830 if (state->mdscr_el1 & 0x1) {
831 enabled = TRUE;
832 }
5ba3f43e 833
0a7de745
A
834 for (i = 0; i < 16; i++) {
835 /* do not allow context IDs to be set */
836 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
837 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
838 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
839 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
840 return KERN_PROTECTION_FAILURE;
841 }
842 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
843 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 844 enabled = TRUE;
0a7de745
A
845 }
846 }
5ba3f43e 847
0a7de745
A
848 if (!enabled) {
849 arm_debug_state32_t *thread_state = find_debug_state32(thread);
850 if (thread_state != NULL) {
851 void *pTmp = thread->machine.DebugData;
852 thread->machine.DebugData = NULL;
853 zfree(ads_zone, pTmp);
854 }
855 } else {
856 arm_debug_state32_t *thread_state = find_debug_state32(thread);
857 if (thread_state == NULL) {
858 thread->machine.DebugData = zalloc(ads_zone);
859 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
860 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
861 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
862 thread_state = find_debug_state32(thread);
5ba3f43e 863 }
0a7de745
A
864 assert(NULL != thread_state);
865
866 if (state->mdscr_el1 & 0x1) {
867 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 868 } else {
0a7de745 869 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 870 }
0a7de745
A
871
872 for (i = 0; i < 16; i++) {
873 /* set appropriate privilege; mask out unknown bits */
874 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
875 | ARM_DBGBCR_MATCH_MASK
876 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
877 | ARM_DBG_CR_ENABLE_MASK))
878 | ARM_DBGBCR_TYPE_IVA
879 | ARM_DBG_CR_LINKED_UNLINKED
880 | ARM_DBG_CR_SECURITY_STATE_BOTH
881 | ARM_DBG_CR_MODE_CONTROL_USER;
882 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
883 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
884 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
885 | ARM_DBGWCR_ACCESS_CONTROL_MASK
886 | ARM_DBG_CR_ENABLE_MASK))
887 | ARM_DBG_CR_LINKED_UNLINKED
888 | ARM_DBG_CR_SECURITY_STATE_BOTH
889 | ARM_DBG_CR_MODE_CONTROL_USER;
890 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 891 }
5ba3f43e
A
892 }
893
0a7de745
A
894 if (thread == current_thread()) {
895 arm_debug_set32(thread->machine.DebugData);
896 }
897
898 break;
899 }
900
5ba3f43e 901 case ARM_DEBUG_STATE64:
0a7de745
A
902 {
903 arm_debug_state64_t *state;
904 boolean_t enabled = FALSE;
905 unsigned int i;
5ba3f43e 906
0a7de745
A
907 if (count != ARM_DEBUG_STATE64_COUNT) {
908 return KERN_INVALID_ARGUMENT;
909 }
910 if (!thread_is_64bit_data(thread)) {
911 return KERN_INVALID_ARGUMENT;
912 }
5ba3f43e 913
0a7de745
A
914 state = (arm_debug_state64_t *) tstate;
915
916 if (state->mdscr_el1 & 0x1) {
917 enabled = TRUE;
918 }
5ba3f43e 919
0a7de745
A
920 for (i = 0; i < 16; i++) {
921 /* do not allow context IDs to be set */
922 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
923 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
924 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
925 return KERN_PROTECTION_FAILURE;
926 }
927 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
928 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 929 enabled = TRUE;
0a7de745
A
930 }
931 }
5ba3f43e 932
0a7de745
A
933 if (!enabled) {
934 arm_debug_state64_t *thread_state = find_debug_state64(thread);
935 if (thread_state != NULL) {
936 void *pTmp = thread->machine.DebugData;
937 thread->machine.DebugData = NULL;
938 zfree(ads_zone, pTmp);
5ba3f43e 939 }
0a7de745
A
940 } else {
941 arm_debug_state64_t *thread_state = find_debug_state64(thread);
942 if (thread_state == NULL) {
943 thread->machine.DebugData = zalloc(ads_zone);
944 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
945 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
946 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
947 thread_state = find_debug_state64(thread);
948 }
949 assert(NULL != thread_state);
5ba3f43e 950
0a7de745
A
951 if (state->mdscr_el1 & 0x1) {
952 thread_state->mdscr_el1 |= 0x1;
5ba3f43e 953 } else {
0a7de745 954 thread_state->mdscr_el1 &= ~0x1;
5ba3f43e 955 }
0a7de745
A
956
957 for (i = 0; i < 16; i++) {
958 /* set appropriate privilege; mask out unknown bits */
959 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
960 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
961 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
962 | ARM_DBG_CR_ENABLE_MASK))
963 | ARM_DBGBCR_TYPE_IVA
964 | ARM_DBG_CR_LINKED_UNLINKED
965 | ARM_DBG_CR_SECURITY_STATE_BOTH
966 | ARM_DBG_CR_MODE_CONTROL_USER;
967 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
968 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
969 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
970 | ARM_DBGWCR_ACCESS_CONTROL_MASK
971 | ARM_DBG_CR_ENABLE_MASK))
972 | ARM_DBG_CR_LINKED_UNLINKED
973 | ARM_DBG_CR_SECURITY_STATE_BOTH
974 | ARM_DBG_CR_MODE_CONTROL_USER;
975 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
5ba3f43e 976 }
5ba3f43e
A
977 }
978
0a7de745
A
979 if (thread == current_thread()) {
980 arm_debug_set64(thread->machine.DebugData);
981 }
982
983 break;
984 }
985
5ba3f43e 986 case ARM_VFP_STATE:{
0a7de745
A
987 struct arm_vfp_state *state;
988 arm_neon_saved_state32_t *thread_state;
989 unsigned int max;
5ba3f43e 990
0a7de745
A
991 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
992 return KERN_INVALID_ARGUMENT;
993 }
5ba3f43e 994
0a7de745
A
995 if (count == ARM_VFPV2_STATE_COUNT) {
996 max = 32;
997 } else {
998 max = 64;
999 }
5ba3f43e 1000
0a7de745
A
1001 state = (struct arm_vfp_state *) tstate;
1002 thread_state = neon_state32(thread->machine.uNeon);
1003 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
5ba3f43e 1004
0a7de745 1005 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
5ba3f43e 1006
0a7de745
A
1007 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1008 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1009 break;
1010 }
5ba3f43e
A
1011
1012 case ARM_NEON_STATE:{
1013 arm_neon_state_t *state;
1014 arm_neon_saved_state32_t *thread_state;
1015
0a7de745
A
1016 if (count != ARM_NEON_STATE_COUNT) {
1017 return KERN_INVALID_ARGUMENT;
1018 }
5ba3f43e 1019
0a7de745
A
1020 if (thread_is_64bit_data(thread)) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
5ba3f43e
A
1023
1024 state = (arm_neon_state_t *)tstate;
1025 thread_state = neon_state32(thread->machine.uNeon);
1026
1027 assert(sizeof(*state) == sizeof(*thread_state));
1028 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1029
1030 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1031 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1032 break;
0a7de745 1033 }
5ba3f43e
A
1034
1035 case ARM_NEON_STATE64:{
1036 arm_neon_state64_t *state;
1037 arm_neon_saved_state64_t *thread_state;
1038
0a7de745
A
1039 if (count != ARM_NEON_STATE64_COUNT) {
1040 return KERN_INVALID_ARGUMENT;
1041 }
5ba3f43e 1042
0a7de745
A
1043 if (!thread_is_64bit_data(thread)) {
1044 return KERN_INVALID_ARGUMENT;
1045 }
5ba3f43e
A
1046
1047 state = (arm_neon_state64_t *)tstate;
1048 thread_state = neon_state64(thread->machine.uNeon);
1049
1050 assert(sizeof(*state) == sizeof(*thread_state));
1051 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1052
1053 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1054 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1055 break;
0a7de745 1056 }
5ba3f43e
A
1057
1058 default:
0a7de745 1059 return KERN_INVALID_ARGUMENT;
5ba3f43e 1060 }
0a7de745 1061 return KERN_SUCCESS;
5ba3f43e
A
1062}
1063
1064/*
1065 * Routine: machine_thread_state_initialize
1066 *
1067 */
1068kern_return_t
1069machine_thread_state_initialize(
0a7de745 1070 thread_t thread)
5ba3f43e
A
1071{
1072 arm_context_t *context = thread->machine.contextData;
1073
0a7de745 1074 /*
5ba3f43e 1075 * Should always be set up later. For a kernel thread, we don't care
0a7de745 1076 * about this state. For a user thread, we'll set the state up in
5ba3f43e
A
1077 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1078 */
1079
1080 if (context != NULL) {
1081 bzero(&context->ss.uss, sizeof(context->ss.uss));
1082 bzero(&context->ns.uns, sizeof(context->ns.uns));
1083
1084 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1085 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1086 } else {
1087 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1088 }
1089 }
1090
1091 thread->machine.DebugData = NULL;
1092
d9a64523 1093
5ba3f43e
A
1094 return KERN_SUCCESS;
1095}
1096
1097/*
1098 * Routine: machine_thread_dup
1099 *
1100 */
1101kern_return_t
1102machine_thread_dup(
0a7de745
A
1103 thread_t self,
1104 thread_t target,
1105 __unused boolean_t is_corpse)
5ba3f43e
A
1106{
1107 struct arm_saved_state *self_saved_state;
1108 struct arm_saved_state *target_saved_state;
1109
1110 target->machine.cthread_self = self->machine.cthread_self;
1111 target->machine.cthread_data = self->machine.cthread_data;
1112
1113 self_saved_state = self->machine.upcb;
1114 target_saved_state = target->machine.upcb;
1115 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1116
0a7de745 1117 return KERN_SUCCESS;
5ba3f43e
A
1118}
1119
1120/*
1121 * Routine: get_user_regs
1122 *
1123 */
1124struct arm_saved_state *
1125get_user_regs(
0a7de745 1126 thread_t thread)
5ba3f43e 1127{
0a7de745 1128 return thread->machine.upcb;
5ba3f43e
A
1129}
1130
1131arm_neon_saved_state_t *
1132get_user_neon_regs(
0a7de745 1133 thread_t thread)
5ba3f43e 1134{
0a7de745 1135 return thread->machine.uNeon;
5ba3f43e
A
1136}
1137
1138/*
1139 * Routine: find_user_regs
1140 *
1141 */
1142struct arm_saved_state *
1143find_user_regs(
0a7de745 1144 thread_t thread)
5ba3f43e 1145{
0a7de745 1146 return thread->machine.upcb;
5ba3f43e
A
1147}
1148
1149/*
1150 * Routine: find_kern_regs
1151 *
1152 */
1153struct arm_saved_state *
1154find_kern_regs(
0a7de745 1155 thread_t thread)
5ba3f43e
A
1156{
1157 /*
0a7de745
A
1158 * This works only for an interrupted kernel thread
1159 */
1160 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1161 return (struct arm_saved_state *) NULL;
1162 } else {
1163 return getCpuDatap()->cpu_int_state;
1164 }
5ba3f43e
A
1165}
1166
1167arm_debug_state32_t *
1168find_debug_state32(
0a7de745 1169 thread_t thread)
5ba3f43e 1170{
0a7de745 1171 if (thread && thread->machine.DebugData) {
5ba3f43e 1172 return &(thread->machine.DebugData->uds.ds32);
0a7de745 1173 } else {
5ba3f43e 1174 return NULL;
0a7de745 1175 }
5ba3f43e
A
1176}
1177
1178arm_debug_state64_t *
1179find_debug_state64(
0a7de745 1180 thread_t thread)
5ba3f43e 1181{
0a7de745 1182 if (thread && thread->machine.DebugData) {
5ba3f43e 1183 return &(thread->machine.DebugData->uds.ds64);
0a7de745 1184 } else {
5ba3f43e 1185 return NULL;
0a7de745 1186 }
5ba3f43e
A
1187}
1188
1189/*
1190 * Routine: thread_userstack
1191 *
1192 */
1193kern_return_t
1194thread_userstack(
0a7de745
A
1195 __unused thread_t thread,
1196 int flavor,
1197 thread_state_t tstate,
1198 unsigned int count,
1199 mach_vm_offset_t * user_stack,
1200 int *customstack,
1201 boolean_t is_64bit_data
1202 )
5ba3f43e
A
1203{
1204 register_t sp;
1205
1206 switch (flavor) {
1207 case ARM_THREAD_STATE:
1208 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1209#if __arm64__
d9a64523 1210 if (is_64bit_data) {
5ba3f43e
A
1211 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1212 } else
1213#endif
1214 {
1215 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1216 }
1217
1218 break;
1219 }
1220
1221 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1222 case ARM_THREAD_STATE32:
0a7de745
A
1223 if (count != ARM_THREAD_STATE32_COUNT) {
1224 return KERN_INVALID_ARGUMENT;
1225 }
1226 if (is_64bit_data) {
1227 return KERN_INVALID_ARGUMENT;
1228 }
5ba3f43e
A
1229
1230 sp = ((arm_thread_state32_t *)tstate)->sp;
1231 break;
1232#if __arm64__
1233 case ARM_THREAD_STATE64:
0a7de745
A
1234 if (count != ARM_THREAD_STATE64_COUNT) {
1235 return KERN_INVALID_ARGUMENT;
1236 }
1237 if (!is_64bit_data) {
1238 return KERN_INVALID_ARGUMENT;
1239 }
5ba3f43e
A
1240
1241 sp = ((arm_thread_state32_t *)tstate)->sp;
1242 break;
1243#endif
1244 default:
0a7de745 1245 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1246 }
1247
1248 if (sp) {
1249 *user_stack = CAST_USER_ADDR_T(sp);
0a7de745 1250 if (customstack) {
5ba3f43e 1251 *customstack = 1;
0a7de745 1252 }
5ba3f43e
A
1253 } else {
1254 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
0a7de745 1255 if (customstack) {
5ba3f43e 1256 *customstack = 0;
0a7de745 1257 }
5ba3f43e
A
1258 }
1259
0a7de745 1260 return KERN_SUCCESS;
5ba3f43e
A
1261}
1262
1263/*
1264 * thread_userstackdefault:
1265 *
1266 * Return the default stack location for the
1267 * thread, if otherwise unknown.
1268 */
1269kern_return_t
1270thread_userstackdefault(
1271 mach_vm_offset_t *default_user_stack,
1272 boolean_t is64bit)
1273{
1274 if (is64bit) {
1275 *default_user_stack = USRSTACK64;
1276 } else {
1277 *default_user_stack = USRSTACK;
1278 }
1279
0a7de745 1280 return KERN_SUCCESS;
5ba3f43e
A
1281}
1282
1283/*
1284 * Routine: thread_setuserstack
1285 *
1286 */
1287void
1288thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
1289{
1290 struct arm_saved_state *sv;
1291
1292 sv = get_user_regs(thread);
1293
1294 set_saved_state_sp(sv, user_stack);
1295
1296 return;
1297}
1298
1299/*
1300 * Routine: thread_adjuserstack
1301 *
1302 */
1303uint64_t
1304thread_adjuserstack(thread_t thread, int adjust)
1305{
1306 struct arm_saved_state *sv;
1307 uint64_t sp;
1308
1309 sv = get_user_regs(thread);
1310
1311 sp = get_saved_state_sp(sv);
1312 sp += adjust;
1313 set_saved_state_sp(sv, sp);;
1314
1315 return sp;
1316}
1317
1318/*
1319 * Routine: thread_setentrypoint
1320 *
1321 */
1322void
1323thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
1324{
1325 struct arm_saved_state *sv;
1326
1327 sv = get_user_regs(thread);
1328
1329 set_saved_state_pc(sv, entry);
1330
1331 return;
1332}
1333
1334/*
1335 * Routine: thread_entrypoint
1336 *
1337 */
1338kern_return_t
1339thread_entrypoint(
0a7de745
A
1340 __unused thread_t thread,
1341 int flavor,
1342 thread_state_t tstate,
1343 unsigned int count __unused,
1344 mach_vm_offset_t * entry_point
1345 )
5ba3f43e
A
1346{
1347 switch (flavor) {
1348 case ARM_THREAD_STATE:
0a7de745
A
1349 {
1350 struct arm_thread_state *state;
5ba3f43e 1351
0a7de745 1352 state = (struct arm_thread_state *) tstate;
5ba3f43e 1353
0a7de745
A
1354 /*
1355 * If a valid entry point is specified, use it.
1356 */
1357 if (state->pc) {
1358 *entry_point = CAST_USER_ADDR_T(state->pc);
1359 } else {
1360 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1361 }
0a7de745
A
1362 }
1363 break;
5ba3f43e
A
1364
1365 case ARM_THREAD_STATE64:
0a7de745
A
1366 {
1367 struct arm_thread_state64 *state;
5ba3f43e 1368
0a7de745 1369 state = (struct arm_thread_state64*) tstate;
5ba3f43e 1370
0a7de745
A
1371 /*
1372 * If a valid entry point is specified, use it.
1373 */
1374 if (state->pc) {
1375 *entry_point = CAST_USER_ADDR_T(state->pc);
1376 } else {
1377 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1378 }
0a7de745
A
1379
1380 break;
1381 }
5ba3f43e 1382 default:
0a7de745 1383 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1384 }
1385
0a7de745 1386 return KERN_SUCCESS;
5ba3f43e
A
1387}
1388
1389
1390/*
1391 * Routine: thread_set_child
1392 *
1393 */
1394void
1395thread_set_child(
0a7de745
A
1396 thread_t child,
1397 int pid)
5ba3f43e
A
1398{
1399 struct arm_saved_state *child_state;
1400
1401 child_state = get_user_regs(child);
1402
1403 set_saved_state_reg(child_state, 0, pid);
1404 set_saved_state_reg(child_state, 1, 1ULL);
1405}
1406
1407
1408/*
1409 * Routine: thread_set_parent
1410 *
1411 */
1412void
1413thread_set_parent(
0a7de745
A
1414 thread_t parent,
1415 int pid)
5ba3f43e
A
1416{
1417 struct arm_saved_state *parent_state;
1418
1419 parent_state = get_user_regs(parent);
1420
1421 set_saved_state_reg(parent_state, 0, pid);
1422 set_saved_state_reg(parent_state, 1, 0);
1423}
1424
1425
1426struct arm_act_context {
1427 struct arm_unified_thread_state ss;
1428#if __ARM_VFP__
1429 struct arm_neon_saved_state ns;
1430#endif
1431};
1432
1433/*
1434 * Routine: act_thread_csave
1435 *
1436 */
1437void *
1438act_thread_csave(void)
1439{
1440 struct arm_act_context *ic;
1441 kern_return_t kret;
1442 unsigned int val;
1443 thread_t thread = current_thread();
1444
1445 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
0a7de745
A
1446 if (ic == (struct arm_act_context *) NULL) {
1447 return (void *) 0;
1448 }
5ba3f43e
A
1449
1450 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1451 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1452 if (kret != KERN_SUCCESS) {
1453 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1454 return (void *) 0;
5ba3f43e
A
1455 }
1456
1457#if __ARM_VFP__
d9a64523 1458 if (thread_is_64bit_data(thread)) {
5ba3f43e
A
1459 val = ARM_NEON_STATE64_COUNT;
1460 kret = machine_thread_get_state(thread,
0a7de745
A
1461 ARM_NEON_STATE64,
1462 (thread_state_t) &ic->ns,
1463 &val);
5ba3f43e
A
1464 } else {
1465 val = ARM_NEON_STATE_COUNT;
1466 kret = machine_thread_get_state(thread,
0a7de745
A
1467 ARM_NEON_STATE,
1468 (thread_state_t) &ic->ns,
1469 &val);
5ba3f43e
A
1470 }
1471 if (kret != KERN_SUCCESS) {
1472 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1473 return (void *) 0;
5ba3f43e
A
1474 }
1475#endif
0a7de745 1476 return ic;
5ba3f43e
A
1477}
1478
1479/*
1480 * Routine: act_thread_catt
1481 *
1482 */
1483void
1484act_thread_catt(void *ctx)
1485{
1486 struct arm_act_context *ic;
1487 kern_return_t kret;
1488 thread_t thread = current_thread();
1489
1490 ic = (struct arm_act_context *) ctx;
0a7de745 1491 if (ic == (struct arm_act_context *) NULL) {
5ba3f43e 1492 return;
0a7de745 1493 }
5ba3f43e
A
1494
1495 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
0a7de745 1496 if (kret != KERN_SUCCESS) {
5ba3f43e 1497 goto out;
0a7de745 1498 }
5ba3f43e
A
1499
1500#if __ARM_VFP__
d9a64523 1501 if (thread_is_64bit_data(thread)) {
5ba3f43e 1502 kret = machine_thread_set_state(thread,
0a7de745
A
1503 ARM_NEON_STATE64,
1504 (thread_state_t) &ic->ns,
1505 ARM_NEON_STATE64_COUNT);
5ba3f43e
A
1506 } else {
1507 kret = machine_thread_set_state(thread,
0a7de745
A
1508 ARM_NEON_STATE,
1509 (thread_state_t) &ic->ns,
1510 ARM_NEON_STATE_COUNT);
5ba3f43e 1511 }
0a7de745 1512 if (kret != KERN_SUCCESS) {
5ba3f43e 1513 goto out;
0a7de745 1514 }
5ba3f43e
A
1515#endif
1516out:
1517 kfree(ic, sizeof(struct arm_act_context));
1518}
1519
1520/*
1521 * Routine: act_thread_catt
1522 *
1523 */
0a7de745 1524void
5ba3f43e
A
1525act_thread_cfree(void *ctx)
1526{
1527 kfree(ctx, sizeof(struct arm_act_context));
1528}
1529
1530kern_return_t
1531thread_set_wq_state32(thread_t thread, thread_state_t tstate)
1532{
1533 arm_thread_state_t *state;
1534 struct arm_saved_state *saved_state;
1535 struct arm_saved_state32 *saved_state_32;
1536 thread_t curth = current_thread();
0a7de745 1537 spl_t s = 0;
5ba3f43e 1538
d9a64523 1539 assert(!thread_is_64bit_data(thread));
5ba3f43e
A
1540
1541 saved_state = thread->machine.upcb;
1542 saved_state_32 = saved_state32(saved_state);
1543
1544 state = (arm_thread_state_t *)tstate;
1545
1546 if (curth != thread) {
1547 s = splsched();
1548 thread_lock(thread);
1549 }
1550
1551 /*
1552 * do not zero saved_state, it can be concurrently accessed
1553 * and zero is not a valid state for some of the registers,
1554 * like sp.
1555 */
1556 thread_state32_to_saved_state(state, saved_state);
1557 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1558
1559 if (curth != thread) {
1560 thread_unlock(thread);
1561 splx(s);
1562 }
1563
1564 return KERN_SUCCESS;
1565}
1566
1567kern_return_t
1568thread_set_wq_state64(thread_t thread, thread_state_t tstate)
1569{
1570 arm_thread_state64_t *state;
1571 struct arm_saved_state *saved_state;
1572 struct arm_saved_state64 *saved_state_64;
1573 thread_t curth = current_thread();
0a7de745 1574 spl_t s = 0;
5ba3f43e 1575
d9a64523 1576 assert(thread_is_64bit_data(thread));
5ba3f43e
A
1577
1578 saved_state = thread->machine.upcb;
1579 saved_state_64 = saved_state64(saved_state);
1580 state = (arm_thread_state64_t *)tstate;
1581
1582 if (curth != thread) {
1583 s = splsched();
1584 thread_lock(thread);
1585 }
1586
1587 /*
1588 * do not zero saved_state, it can be concurrently accessed
1589 * and zero is not a valid state for some of the registers,
1590 * like sp.
1591 */
1592 thread_state64_to_saved_state(state, saved_state);
d9a64523 1593 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
5ba3f43e
A
1594
1595 if (curth != thread) {
1596 thread_unlock(thread);
1597 splx(s);
1598 }
1599
1600 return KERN_SUCCESS;
1601}