]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/status.c
5a69eabc437db5ad77fffb2a5896d49f8d4d3489
[apple/xnu.git] / osfmk / arm64 / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37
38 struct arm_vfpv2_state {
39 __uint32_t __r[32];
40 __uint32_t __fpscr;
41 };
42
43 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
44
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
47
48 /*
49 * Forward definitions
50 */
51 void thread_set_child(thread_t child, int pid);
52 void thread_set_parent(thread_t parent, int pid);
53
54 /*
55 * Maps state flavor to number of words in the state:
56 */
57 /* __private_extern__ */
58 unsigned int _MachineStateCount[] = {
59 /* FLAVOR_LIST */ 0,
60 ARM_UNIFIED_THREAD_STATE_COUNT,
61 ARM_VFP_STATE_COUNT,
62 ARM_EXCEPTION_STATE_COUNT,
63 ARM_DEBUG_STATE_COUNT,
64 /* THREAD_STATE_NONE (legacy) */ 0,
65 ARM_THREAD_STATE64_COUNT,
66 ARM_EXCEPTION_STATE64_COUNT,
67 /* THREAD_STATE_LAST (legacy) */ 0,
68 ARM_THREAD_STATE32_COUNT,
69 /* UNALLOCATED */ 0,
70 /* UNALLOCATED */ 0,
71 /* UNALLOCATED */ 0,
72 /* UNALLOCATED */ 0,
73 ARM_DEBUG_STATE32_COUNT,
74 ARM_DEBUG_STATE64_COUNT,
75 ARM_NEON_STATE_COUNT,
76 ARM_NEON_STATE64_COUNT,
77 /* UNALLOCATED */ 0,
78 /* UNALLOCATED */ 0,
79 /* ARM_SAVED_STATE32_COUNT */ 0,
80 /* ARM_SAVED_STATE64_COUNT */ 0,
81 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
82 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
83 };
84
85 extern zone_t ads_zone;
86
87 #if __arm64__
88 /*
89 * Copy values from saved_state to ts64.
90 */
91 void
92 saved_state_to_thread_state64(const arm_saved_state_t *saved_state, arm_thread_state64_t *ts64)
93 {
94 uint32_t i;
95
96 assert(is_saved_state64(saved_state));
97
98 ts64->fp = get_saved_state_fp(saved_state);
99 ts64->lr = get_saved_state_lr(saved_state);
100 ts64->sp = get_saved_state_sp(saved_state);
101 ts64->pc = get_saved_state_pc(saved_state);
102 ts64->cpsr = get_saved_state_cpsr(saved_state);
103 for (i = 0; i < 29; i++) {
104 ts64->x[i] = get_saved_state_reg(saved_state, i);
105 }
106 }
107
108 /*
109 * Copy values from ts64 to saved_state
110 */
111 void
112 thread_state64_to_saved_state(const arm_thread_state64_t *ts64, arm_saved_state_t *saved_state)
113 {
114 uint32_t i;
115
116 assert(is_saved_state64(saved_state));
117
118 set_saved_state_fp(saved_state, ts64->fp);
119 set_saved_state_lr(saved_state, ts64->lr);
120 set_saved_state_sp(saved_state, ts64->sp);
121 set_saved_state_pc(saved_state, ts64->pc);
122 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
123 for (i = 0; i < 29; i++) {
124 set_saved_state_reg(saved_state, i, ts64->x[i]);
125 }
126 }
127 #endif
128
129 kern_return_t
130 handle_get_arm32_thread_state(
131 thread_state_t tstate,
132 mach_msg_type_number_t * count,
133 const arm_saved_state_t *saved_state)
134 {
135 if (*count < ARM_THREAD_STATE32_COUNT) {
136 return KERN_INVALID_ARGUMENT;
137 }
138 if (!is_saved_state32(saved_state)) {
139 return KERN_INVALID_ARGUMENT;
140 }
141
142 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
143 *count = ARM_THREAD_STATE32_COUNT;
144 return KERN_SUCCESS;
145 }
146
147 kern_return_t
148 handle_get_arm64_thread_state(
149 thread_state_t tstate,
150 mach_msg_type_number_t * count,
151 const arm_saved_state_t *saved_state)
152 {
153 if (*count < ARM_THREAD_STATE64_COUNT) {
154 return KERN_INVALID_ARGUMENT;
155 }
156 if (!is_saved_state64(saved_state)) {
157 return KERN_INVALID_ARGUMENT;
158 }
159
160 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
161 *count = ARM_THREAD_STATE64_COUNT;
162 return KERN_SUCCESS;
163 }
164
165
166 kern_return_t
167 handle_get_arm_thread_state(
168 thread_state_t tstate,
169 mach_msg_type_number_t * count,
170 const arm_saved_state_t *saved_state)
171 {
172 /* In an arm64 world, this flavor can be used to retrieve the thread
173 * state of a 32-bit or 64-bit thread into a unified structure, but we
174 * need to support legacy clients who are only aware of 32-bit, so
175 * check the count to see what the client is expecting.
176 */
177 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
178 return handle_get_arm32_thread_state(tstate, count, saved_state);
179 }
180
181 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
182 bzero(unified_state, sizeof(*unified_state));
183 #if __arm64__
184 if (is_saved_state64(saved_state)) {
185 unified_state->ash.flavor = ARM_THREAD_STATE64;
186 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
187 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
188 } else
189 #endif
190 {
191 unified_state->ash.flavor = ARM_THREAD_STATE32;
192 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
193 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
194 }
195 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
196 return KERN_SUCCESS;
197 }
198
199 kern_return_t
200 handle_set_arm32_thread_state(
201 const thread_state_t tstate,
202 mach_msg_type_number_t count,
203 arm_saved_state_t *saved_state)
204 {
205 if (count != ARM_THREAD_STATE32_COUNT) {
206 return KERN_INVALID_ARGUMENT;
207 }
208
209 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
210 return KERN_SUCCESS;
211 }
212
213 kern_return_t
214 handle_set_arm64_thread_state(
215 const thread_state_t tstate,
216 mach_msg_type_number_t count,
217 arm_saved_state_t *saved_state)
218 {
219 if (count != ARM_THREAD_STATE64_COUNT) {
220 return KERN_INVALID_ARGUMENT;
221 }
222
223 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
224 return KERN_SUCCESS;
225 }
226
227
228 kern_return_t
229 handle_set_arm_thread_state(
230 const thread_state_t tstate,
231 mach_msg_type_number_t count,
232 arm_saved_state_t *saved_state)
233 {
234 /* In an arm64 world, this flavor can be used to set the thread state of a
235 * 32-bit or 64-bit thread from a unified structure, but we need to support
236 * legacy clients who are only aware of 32-bit, so check the count to see
237 * what the client is expecting.
238 */
239 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
240 if (!is_saved_state32(saved_state)) {
241 return KERN_INVALID_ARGUMENT;
242 }
243 return handle_set_arm32_thread_state(tstate, count, saved_state);
244 }
245
246 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
247 #if __arm64__
248 if (is_thread_state64(unified_state)) {
249 if (!is_saved_state64(saved_state)) {
250 return KERN_INVALID_ARGUMENT;
251 }
252 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
253 } else
254 #endif
255 {
256 if (!is_saved_state32(saved_state)) {
257 return KERN_INVALID_ARGUMENT;
258 }
259 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
260 }
261
262 return KERN_SUCCESS;
263 }
264
265 /*
266 * Translate thread state arguments to userspace representation
267 */
268
269 kern_return_t
270 machine_thread_state_convert_to_user(
271 thread_t thread,
272 thread_flavor_t flavor,
273 thread_state_t tstate,
274 mach_msg_type_number_t *count)
275 {
276 // No conversion to userspace representation on this platform
277 (void)thread; (void)flavor; (void)tstate; (void)count;
278 return KERN_SUCCESS;
279 }
280
281 /*
282 * Translate thread state arguments from userspace representation
283 */
284
285 kern_return_t
286 machine_thread_state_convert_from_user(
287 thread_t thread,
288 thread_flavor_t flavor,
289 thread_state_t tstate,
290 mach_msg_type_number_t count)
291 {
292 // No conversion from userspace representation on this platform
293 (void)thread; (void)flavor; (void)tstate; (void)count;
294 return KERN_SUCCESS;
295 }
296
297 /*
298 * Translate signal context data pointer to userspace representation
299 */
300
301 kern_return_t
302 machine_thread_siguctx_pointer_convert_to_user(
303 __assert_only thread_t thread,
304 user_addr_t *uctxp)
305 {
306 // No conversion to userspace representation on this platform
307 (void)thread; (void)uctxp;
308 return KERN_SUCCESS;
309 }
310
311 /*
312 * Translate array of function pointer syscall arguments from userspace representation
313 */
314
315 kern_return_t
316 machine_thread_function_pointers_convert_from_user(
317 __assert_only thread_t thread,
318 user_addr_t *fptrs,
319 uint32_t count)
320 {
321 // No conversion from userspace representation on this platform
322 (void)thread; (void)fptrs; (void)count;
323 return KERN_SUCCESS;
324 }
325
326 /*
327 * Routine: machine_thread_get_state
328 *
329 */
330 kern_return_t
331 machine_thread_get_state(
332 thread_t thread,
333 thread_flavor_t flavor,
334 thread_state_t tstate,
335 mach_msg_type_number_t * count)
336 {
337 switch (flavor) {
338 case THREAD_STATE_FLAVOR_LIST:
339 if (*count < 4) {
340 return KERN_INVALID_ARGUMENT;
341 }
342
343 tstate[0] = ARM_THREAD_STATE;
344 tstate[1] = ARM_VFP_STATE;
345 tstate[2] = ARM_EXCEPTION_STATE;
346 tstate[3] = ARM_DEBUG_STATE;
347 *count = 4;
348 break;
349
350 case THREAD_STATE_FLAVOR_LIST_NEW:
351 if (*count < 4) {
352 return KERN_INVALID_ARGUMENT;
353 }
354
355 tstate[0] = ARM_THREAD_STATE;
356 tstate[1] = ARM_VFP_STATE;
357 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
358 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
359 *count = 4;
360 break;
361
362 case ARM_THREAD_STATE:
363 {
364 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
365 if (rn) {
366 return rn;
367 }
368 break;
369 }
370 case ARM_THREAD_STATE32:
371 {
372 if (thread_is_64bit_data(thread)) {
373 return KERN_INVALID_ARGUMENT;
374 }
375
376 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
377 if (rn) {
378 return rn;
379 }
380 break;
381 }
382 #if __arm64__
383 case ARM_THREAD_STATE64:
384 {
385 if (!thread_is_64bit_data(thread)) {
386 return KERN_INVALID_ARGUMENT;
387 }
388
389 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
390 if (rn) {
391 return rn;
392 }
393 break;
394 }
395 #endif
396 case ARM_EXCEPTION_STATE:{
397 struct arm_exception_state *state;
398 struct arm_saved_state32 *saved_state;
399
400 if (*count < ARM_EXCEPTION_STATE_COUNT) {
401 return KERN_INVALID_ARGUMENT;
402 }
403 if (thread_is_64bit_data(thread)) {
404 return KERN_INVALID_ARGUMENT;
405 }
406
407 state = (struct arm_exception_state *) tstate;
408 saved_state = saved_state32(thread->machine.upcb);
409
410 state->exception = saved_state->exception;
411 state->fsr = saved_state->esr;
412 state->far = saved_state->far;
413
414 *count = ARM_EXCEPTION_STATE_COUNT;
415 break;
416 }
417 case ARM_EXCEPTION_STATE64:{
418 struct arm_exception_state64 *state;
419 struct arm_saved_state64 *saved_state;
420
421 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
422 return KERN_INVALID_ARGUMENT;
423 }
424 if (!thread_is_64bit_data(thread)) {
425 return KERN_INVALID_ARGUMENT;
426 }
427
428 state = (struct arm_exception_state64 *) tstate;
429 saved_state = saved_state64(thread->machine.upcb);
430
431 state->exception = saved_state->exception;
432 state->far = saved_state->far;
433 state->esr = saved_state->esr;
434
435 *count = ARM_EXCEPTION_STATE64_COUNT;
436 break;
437 }
438 case ARM_DEBUG_STATE:{
439 arm_legacy_debug_state_t *state;
440 arm_debug_state32_t *thread_state;
441
442 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
443 return KERN_INVALID_ARGUMENT;
444 }
445
446 if (thread_is_64bit_data(thread)) {
447 return KERN_INVALID_ARGUMENT;
448 }
449
450 state = (arm_legacy_debug_state_t *) tstate;
451 thread_state = find_debug_state32(thread);
452
453 if (thread_state == NULL) {
454 bzero(state, sizeof(arm_legacy_debug_state_t));
455 } else {
456 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
457 }
458
459 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
460 break;
461 }
462 case ARM_DEBUG_STATE32:{
463 arm_debug_state32_t *state;
464 arm_debug_state32_t *thread_state;
465
466 if (*count < ARM_DEBUG_STATE32_COUNT) {
467 return KERN_INVALID_ARGUMENT;
468 }
469
470 if (thread_is_64bit_data(thread)) {
471 return KERN_INVALID_ARGUMENT;
472 }
473
474 state = (arm_debug_state32_t *) tstate;
475 thread_state = find_debug_state32(thread);
476
477 if (thread_state == NULL) {
478 bzero(state, sizeof(arm_debug_state32_t));
479 } else {
480 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
481 }
482
483 *count = ARM_DEBUG_STATE32_COUNT;
484 break;
485 }
486
487 case ARM_DEBUG_STATE64:{
488 arm_debug_state64_t *state;
489 arm_debug_state64_t *thread_state;
490
491 if (*count < ARM_DEBUG_STATE64_COUNT) {
492 return KERN_INVALID_ARGUMENT;
493 }
494
495 if (!thread_is_64bit_data(thread)) {
496 return KERN_INVALID_ARGUMENT;
497 }
498
499 state = (arm_debug_state64_t *) tstate;
500 thread_state = find_debug_state64(thread);
501
502 if (thread_state == NULL) {
503 bzero(state, sizeof(arm_debug_state64_t));
504 } else {
505 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
506 }
507
508 *count = ARM_DEBUG_STATE64_COUNT;
509 break;
510 }
511
512 case ARM_VFP_STATE:{
513 struct arm_vfp_state *state;
514 arm_neon_saved_state32_t *thread_state;
515 unsigned int max;
516
517 if (*count < ARM_VFP_STATE_COUNT) {
518 if (*count < ARM_VFPV2_STATE_COUNT) {
519 return KERN_INVALID_ARGUMENT;
520 } else {
521 *count = ARM_VFPV2_STATE_COUNT;
522 }
523 }
524
525 if (*count == ARM_VFPV2_STATE_COUNT) {
526 max = 32;
527 } else {
528 max = 64;
529 }
530
531 state = (struct arm_vfp_state *) tstate;
532 thread_state = neon_state32(thread->machine.uNeon);
533 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
534
535 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
536 *count = (max + 1);
537 break;
538 }
539 case ARM_NEON_STATE:{
540 arm_neon_state_t *state;
541 arm_neon_saved_state32_t *thread_state;
542
543 if (*count < ARM_NEON_STATE_COUNT) {
544 return KERN_INVALID_ARGUMENT;
545 }
546
547 if (thread_is_64bit_data(thread)) {
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 state = (arm_neon_state_t *)tstate;
552 thread_state = neon_state32(thread->machine.uNeon);
553
554 assert(sizeof(*thread_state) == sizeof(*state));
555 bcopy(thread_state, state, sizeof(arm_neon_state_t));
556
557 *count = ARM_NEON_STATE_COUNT;
558 break;
559 }
560
561 case ARM_NEON_STATE64:{
562 arm_neon_state64_t *state;
563 arm_neon_saved_state64_t *thread_state;
564
565 if (*count < ARM_NEON_STATE64_COUNT) {
566 return KERN_INVALID_ARGUMENT;
567 }
568
569 if (!thread_is_64bit_data(thread)) {
570 return KERN_INVALID_ARGUMENT;
571 }
572
573 state = (arm_neon_state64_t *)tstate;
574 thread_state = neon_state64(thread->machine.uNeon);
575
576 /* For now, these are identical */
577 assert(sizeof(*state) == sizeof(*thread_state));
578 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
579
580 *count = ARM_NEON_STATE64_COUNT;
581 break;
582 }
583
584 default:
585 return KERN_INVALID_ARGUMENT;
586 }
587 return KERN_SUCCESS;
588 }
589
590
591 /*
592 * Routine: machine_thread_get_kern_state
593 *
594 */
595 kern_return_t
596 machine_thread_get_kern_state(
597 thread_t thread,
598 thread_flavor_t flavor,
599 thread_state_t tstate,
600 mach_msg_type_number_t * count)
601 {
602 /*
603 * This works only for an interrupted kernel thread
604 */
605 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
606 return KERN_FAILURE;
607 }
608
609 switch (flavor) {
610 case ARM_THREAD_STATE:
611 {
612 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
613 if (rn) {
614 return rn;
615 }
616 break;
617 }
618 case ARM_THREAD_STATE32:
619 {
620 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
621 if (rn) {
622 return rn;
623 }
624 break;
625 }
626 #if __arm64__
627 case ARM_THREAD_STATE64:
628 {
629 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
630 if (rn) {
631 return rn;
632 }
633 break;
634 }
635 #endif
636 default:
637 return KERN_INVALID_ARGUMENT;
638 }
639 return KERN_SUCCESS;
640 }
641
642 void
643 machine_thread_switch_addrmode(thread_t thread)
644 {
645 if (task_has_64Bit_data(thread->task)) {
646 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
647 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
648 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
649 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
650
651 /*
652 * Reinitialize the NEON state.
653 */
654 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
655 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
656 } else {
657 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
658 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
659 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
660 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
661
662 /*
663 * Reinitialize the NEON state.
664 */
665 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
666 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
667 }
668 }
669
670 extern long long arm_debug_get(void);
671
672 /*
673 * Routine: machine_thread_set_state
674 *
675 */
676 kern_return_t
677 machine_thread_set_state(
678 thread_t thread,
679 thread_flavor_t flavor,
680 thread_state_t tstate,
681 mach_msg_type_number_t count)
682 {
683 kern_return_t rn;
684
685 switch (flavor) {
686 case ARM_THREAD_STATE:
687 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
688 if (rn) {
689 return rn;
690 }
691 break;
692
693 case ARM_THREAD_STATE32:
694 if (thread_is_64bit_data(thread)) {
695 return KERN_INVALID_ARGUMENT;
696 }
697
698 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
699 if (rn) {
700 return rn;
701 }
702 break;
703
704 #if __arm64__
705 case ARM_THREAD_STATE64:
706 if (!thread_is_64bit_data(thread)) {
707 return KERN_INVALID_ARGUMENT;
708 }
709
710 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
711 if (rn) {
712 return rn;
713 }
714 break;
715 #endif
716 case ARM_EXCEPTION_STATE:{
717 if (count != ARM_EXCEPTION_STATE_COUNT) {
718 return KERN_INVALID_ARGUMENT;
719 }
720 if (thread_is_64bit_data(thread)) {
721 return KERN_INVALID_ARGUMENT;
722 }
723
724 break;
725 }
726 case ARM_EXCEPTION_STATE64:{
727 if (count != ARM_EXCEPTION_STATE64_COUNT) {
728 return KERN_INVALID_ARGUMENT;
729 }
730 if (!thread_is_64bit_data(thread)) {
731 return KERN_INVALID_ARGUMENT;
732 }
733
734 break;
735 }
736 case ARM_DEBUG_STATE:
737 {
738 arm_legacy_debug_state_t *state;
739 boolean_t enabled = FALSE;
740 unsigned int i;
741
742 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
743 return KERN_INVALID_ARGUMENT;
744 }
745 if (thread_is_64bit_data(thread)) {
746 return KERN_INVALID_ARGUMENT;
747 }
748
749 state = (arm_legacy_debug_state_t *) tstate;
750
751 for (i = 0; i < 16; i++) {
752 /* do not allow context IDs to be set */
753 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
754 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
755 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
756 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
757 return KERN_PROTECTION_FAILURE;
758 }
759 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
760 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
761 enabled = TRUE;
762 }
763 }
764
765
766 if (!enabled) {
767 arm_debug_state32_t *thread_state = find_debug_state32(thread);
768 if (thread_state != NULL) {
769 void *pTmp = thread->machine.DebugData;
770 thread->machine.DebugData = NULL;
771 zfree(ads_zone, pTmp);
772 }
773 } else {
774 arm_debug_state32_t *thread_state = find_debug_state32(thread);
775 if (thread_state == NULL) {
776 thread->machine.DebugData = zalloc(ads_zone);
777 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
778 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
779 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
780 thread_state = find_debug_state32(thread);
781 }
782 assert(NULL != thread_state);
783
784 for (i = 0; i < 16; i++) {
785 /* set appropriate privilege; mask out unknown bits */
786 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
787 | ARM_DBGBCR_MATCH_MASK
788 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
789 | ARM_DBG_CR_ENABLE_MASK))
790 | ARM_DBGBCR_TYPE_IVA
791 | ARM_DBG_CR_LINKED_UNLINKED
792 | ARM_DBG_CR_SECURITY_STATE_BOTH
793 | ARM_DBG_CR_MODE_CONTROL_USER;
794 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
795 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
796 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
797 | ARM_DBGWCR_ACCESS_CONTROL_MASK
798 | ARM_DBG_CR_ENABLE_MASK))
799 | ARM_DBG_CR_LINKED_UNLINKED
800 | ARM_DBG_CR_SECURITY_STATE_BOTH
801 | ARM_DBG_CR_MODE_CONTROL_USER;
802 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
803 }
804
805 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
806 }
807
808 if (thread == current_thread()) {
809 arm_debug_set32(thread->machine.DebugData);
810 }
811
812 break;
813 }
814 case ARM_DEBUG_STATE32:
815 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
816 {
817 arm_debug_state32_t *state;
818 boolean_t enabled = FALSE;
819 unsigned int i;
820
821 if (count != ARM_DEBUG_STATE32_COUNT) {
822 return KERN_INVALID_ARGUMENT;
823 }
824 if (thread_is_64bit_data(thread)) {
825 return KERN_INVALID_ARGUMENT;
826 }
827
828 state = (arm_debug_state32_t *) tstate;
829
830 if (state->mdscr_el1 & 0x1) {
831 enabled = TRUE;
832 }
833
834 for (i = 0; i < 16; i++) {
835 /* do not allow context IDs to be set */
836 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
837 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
838 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
839 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
840 return KERN_PROTECTION_FAILURE;
841 }
842 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
843 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
844 enabled = TRUE;
845 }
846 }
847
848 if (!enabled) {
849 arm_debug_state32_t *thread_state = find_debug_state32(thread);
850 if (thread_state != NULL) {
851 void *pTmp = thread->machine.DebugData;
852 thread->machine.DebugData = NULL;
853 zfree(ads_zone, pTmp);
854 }
855 } else {
856 arm_debug_state32_t *thread_state = find_debug_state32(thread);
857 if (thread_state == NULL) {
858 thread->machine.DebugData = zalloc(ads_zone);
859 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
860 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
861 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
862 thread_state = find_debug_state32(thread);
863 }
864 assert(NULL != thread_state);
865
866 if (state->mdscr_el1 & 0x1) {
867 thread_state->mdscr_el1 |= 0x1;
868 } else {
869 thread_state->mdscr_el1 &= ~0x1;
870 }
871
872 for (i = 0; i < 16; i++) {
873 /* set appropriate privilege; mask out unknown bits */
874 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
875 | ARM_DBGBCR_MATCH_MASK
876 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
877 | ARM_DBG_CR_ENABLE_MASK))
878 | ARM_DBGBCR_TYPE_IVA
879 | ARM_DBG_CR_LINKED_UNLINKED
880 | ARM_DBG_CR_SECURITY_STATE_BOTH
881 | ARM_DBG_CR_MODE_CONTROL_USER;
882 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
883 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
884 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
885 | ARM_DBGWCR_ACCESS_CONTROL_MASK
886 | ARM_DBG_CR_ENABLE_MASK))
887 | ARM_DBG_CR_LINKED_UNLINKED
888 | ARM_DBG_CR_SECURITY_STATE_BOTH
889 | ARM_DBG_CR_MODE_CONTROL_USER;
890 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
891 }
892 }
893
894 if (thread == current_thread()) {
895 arm_debug_set32(thread->machine.DebugData);
896 }
897
898 break;
899 }
900
901 case ARM_DEBUG_STATE64:
902 {
903 arm_debug_state64_t *state;
904 boolean_t enabled = FALSE;
905 unsigned int i;
906
907 if (count != ARM_DEBUG_STATE64_COUNT) {
908 return KERN_INVALID_ARGUMENT;
909 }
910 if (!thread_is_64bit_data(thread)) {
911 return KERN_INVALID_ARGUMENT;
912 }
913
914 state = (arm_debug_state64_t *) tstate;
915
916 if (state->mdscr_el1 & 0x1) {
917 enabled = TRUE;
918 }
919
920 for (i = 0; i < 16; i++) {
921 /* do not allow context IDs to be set */
922 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
923 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
924 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
925 return KERN_PROTECTION_FAILURE;
926 }
927 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
928 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
929 enabled = TRUE;
930 }
931 }
932
933 if (!enabled) {
934 arm_debug_state64_t *thread_state = find_debug_state64(thread);
935 if (thread_state != NULL) {
936 void *pTmp = thread->machine.DebugData;
937 thread->machine.DebugData = NULL;
938 zfree(ads_zone, pTmp);
939 }
940 } else {
941 arm_debug_state64_t *thread_state = find_debug_state64(thread);
942 if (thread_state == NULL) {
943 thread->machine.DebugData = zalloc(ads_zone);
944 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
945 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
946 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
947 thread_state = find_debug_state64(thread);
948 }
949 assert(NULL != thread_state);
950
951 if (state->mdscr_el1 & 0x1) {
952 thread_state->mdscr_el1 |= 0x1;
953 } else {
954 thread_state->mdscr_el1 &= ~0x1;
955 }
956
957 for (i = 0; i < 16; i++) {
958 /* set appropriate privilege; mask out unknown bits */
959 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
960 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
961 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
962 | ARM_DBG_CR_ENABLE_MASK))
963 | ARM_DBGBCR_TYPE_IVA
964 | ARM_DBG_CR_LINKED_UNLINKED
965 | ARM_DBG_CR_SECURITY_STATE_BOTH
966 | ARM_DBG_CR_MODE_CONTROL_USER;
967 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
968 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
969 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
970 | ARM_DBGWCR_ACCESS_CONTROL_MASK
971 | ARM_DBG_CR_ENABLE_MASK))
972 | ARM_DBG_CR_LINKED_UNLINKED
973 | ARM_DBG_CR_SECURITY_STATE_BOTH
974 | ARM_DBG_CR_MODE_CONTROL_USER;
975 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
976 }
977 }
978
979 if (thread == current_thread()) {
980 arm_debug_set64(thread->machine.DebugData);
981 }
982
983 break;
984 }
985
986 case ARM_VFP_STATE:{
987 struct arm_vfp_state *state;
988 arm_neon_saved_state32_t *thread_state;
989 unsigned int max;
990
991 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
992 return KERN_INVALID_ARGUMENT;
993 }
994
995 if (count == ARM_VFPV2_STATE_COUNT) {
996 max = 32;
997 } else {
998 max = 64;
999 }
1000
1001 state = (struct arm_vfp_state *) tstate;
1002 thread_state = neon_state32(thread->machine.uNeon);
1003 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1004
1005 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1006
1007 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1008 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1009 break;
1010 }
1011
1012 case ARM_NEON_STATE:{
1013 arm_neon_state_t *state;
1014 arm_neon_saved_state32_t *thread_state;
1015
1016 if (count != ARM_NEON_STATE_COUNT) {
1017 return KERN_INVALID_ARGUMENT;
1018 }
1019
1020 if (thread_is_64bit_data(thread)) {
1021 return KERN_INVALID_ARGUMENT;
1022 }
1023
1024 state = (arm_neon_state_t *)tstate;
1025 thread_state = neon_state32(thread->machine.uNeon);
1026
1027 assert(sizeof(*state) == sizeof(*thread_state));
1028 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1029
1030 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1031 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1032 break;
1033 }
1034
1035 case ARM_NEON_STATE64:{
1036 arm_neon_state64_t *state;
1037 arm_neon_saved_state64_t *thread_state;
1038
1039 if (count != ARM_NEON_STATE64_COUNT) {
1040 return KERN_INVALID_ARGUMENT;
1041 }
1042
1043 if (!thread_is_64bit_data(thread)) {
1044 return KERN_INVALID_ARGUMENT;
1045 }
1046
1047 state = (arm_neon_state64_t *)tstate;
1048 thread_state = neon_state64(thread->machine.uNeon);
1049
1050 assert(sizeof(*state) == sizeof(*thread_state));
1051 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1052
1053 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1054 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1055 break;
1056 }
1057
1058 default:
1059 return KERN_INVALID_ARGUMENT;
1060 }
1061 return KERN_SUCCESS;
1062 }
1063
1064 /*
1065 * Routine: machine_thread_state_initialize
1066 *
1067 */
1068 kern_return_t
1069 machine_thread_state_initialize(
1070 thread_t thread)
1071 {
1072 arm_context_t *context = thread->machine.contextData;
1073
1074 /*
1075 * Should always be set up later. For a kernel thread, we don't care
1076 * about this state. For a user thread, we'll set the state up in
1077 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1078 */
1079
1080 if (context != NULL) {
1081 bzero(&context->ss.uss, sizeof(context->ss.uss));
1082 bzero(&context->ns.uns, sizeof(context->ns.uns));
1083
1084 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1085 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1086 } else {
1087 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1088 }
1089 }
1090
1091 thread->machine.DebugData = NULL;
1092
1093
1094 return KERN_SUCCESS;
1095 }
1096
1097 /*
1098 * Routine: machine_thread_dup
1099 *
1100 */
1101 kern_return_t
1102 machine_thread_dup(
1103 thread_t self,
1104 thread_t target,
1105 __unused boolean_t is_corpse)
1106 {
1107 struct arm_saved_state *self_saved_state;
1108 struct arm_saved_state *target_saved_state;
1109
1110 target->machine.cthread_self = self->machine.cthread_self;
1111 target->machine.cthread_data = self->machine.cthread_data;
1112
1113 self_saved_state = self->machine.upcb;
1114 target_saved_state = target->machine.upcb;
1115 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1116
1117 return KERN_SUCCESS;
1118 }
1119
1120 /*
1121 * Routine: get_user_regs
1122 *
1123 */
1124 struct arm_saved_state *
1125 get_user_regs(
1126 thread_t thread)
1127 {
1128 return thread->machine.upcb;
1129 }
1130
1131 arm_neon_saved_state_t *
1132 get_user_neon_regs(
1133 thread_t thread)
1134 {
1135 return thread->machine.uNeon;
1136 }
1137
1138 /*
1139 * Routine: find_user_regs
1140 *
1141 */
1142 struct arm_saved_state *
1143 find_user_regs(
1144 thread_t thread)
1145 {
1146 return thread->machine.upcb;
1147 }
1148
1149 /*
1150 * Routine: find_kern_regs
1151 *
1152 */
1153 struct arm_saved_state *
1154 find_kern_regs(
1155 thread_t thread)
1156 {
1157 /*
1158 * This works only for an interrupted kernel thread
1159 */
1160 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1161 return (struct arm_saved_state *) NULL;
1162 } else {
1163 return getCpuDatap()->cpu_int_state;
1164 }
1165 }
1166
1167 arm_debug_state32_t *
1168 find_debug_state32(
1169 thread_t thread)
1170 {
1171 if (thread && thread->machine.DebugData) {
1172 return &(thread->machine.DebugData->uds.ds32);
1173 } else {
1174 return NULL;
1175 }
1176 }
1177
1178 arm_debug_state64_t *
1179 find_debug_state64(
1180 thread_t thread)
1181 {
1182 if (thread && thread->machine.DebugData) {
1183 return &(thread->machine.DebugData->uds.ds64);
1184 } else {
1185 return NULL;
1186 }
1187 }
1188
1189 /*
1190 * Routine: thread_userstack
1191 *
1192 */
1193 kern_return_t
1194 thread_userstack(
1195 __unused thread_t thread,
1196 int flavor,
1197 thread_state_t tstate,
1198 unsigned int count,
1199 mach_vm_offset_t * user_stack,
1200 int *customstack,
1201 boolean_t is_64bit_data
1202 )
1203 {
1204 register_t sp;
1205
1206 switch (flavor) {
1207 case ARM_THREAD_STATE:
1208 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1209 #if __arm64__
1210 if (is_64bit_data) {
1211 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1212 } else
1213 #endif
1214 {
1215 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1216 }
1217
1218 break;
1219 }
1220
1221 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1222 case ARM_THREAD_STATE32:
1223 if (count != ARM_THREAD_STATE32_COUNT) {
1224 return KERN_INVALID_ARGUMENT;
1225 }
1226 if (is_64bit_data) {
1227 return KERN_INVALID_ARGUMENT;
1228 }
1229
1230 sp = ((arm_thread_state32_t *)tstate)->sp;
1231 break;
1232 #if __arm64__
1233 case ARM_THREAD_STATE64:
1234 if (count != ARM_THREAD_STATE64_COUNT) {
1235 return KERN_INVALID_ARGUMENT;
1236 }
1237 if (!is_64bit_data) {
1238 return KERN_INVALID_ARGUMENT;
1239 }
1240
1241 sp = ((arm_thread_state32_t *)tstate)->sp;
1242 break;
1243 #endif
1244 default:
1245 return KERN_INVALID_ARGUMENT;
1246 }
1247
1248 if (sp) {
1249 *user_stack = CAST_USER_ADDR_T(sp);
1250 if (customstack) {
1251 *customstack = 1;
1252 }
1253 } else {
1254 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1255 if (customstack) {
1256 *customstack = 0;
1257 }
1258 }
1259
1260 return KERN_SUCCESS;
1261 }
1262
1263 /*
1264 * thread_userstackdefault:
1265 *
1266 * Return the default stack location for the
1267 * thread, if otherwise unknown.
1268 */
1269 kern_return_t
1270 thread_userstackdefault(
1271 mach_vm_offset_t *default_user_stack,
1272 boolean_t is64bit)
1273 {
1274 if (is64bit) {
1275 *default_user_stack = USRSTACK64;
1276 } else {
1277 *default_user_stack = USRSTACK;
1278 }
1279
1280 return KERN_SUCCESS;
1281 }
1282
1283 /*
1284 * Routine: thread_setuserstack
1285 *
1286 */
1287 void
1288 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
1289 {
1290 struct arm_saved_state *sv;
1291
1292 sv = get_user_regs(thread);
1293
1294 set_saved_state_sp(sv, user_stack);
1295
1296 return;
1297 }
1298
1299 /*
1300 * Routine: thread_adjuserstack
1301 *
1302 */
1303 uint64_t
1304 thread_adjuserstack(thread_t thread, int adjust)
1305 {
1306 struct arm_saved_state *sv;
1307 uint64_t sp;
1308
1309 sv = get_user_regs(thread);
1310
1311 sp = get_saved_state_sp(sv);
1312 sp += adjust;
1313 set_saved_state_sp(sv, sp);;
1314
1315 return sp;
1316 }
1317
1318 /*
1319 * Routine: thread_setentrypoint
1320 *
1321 */
1322 void
1323 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
1324 {
1325 struct arm_saved_state *sv;
1326
1327 sv = get_user_regs(thread);
1328
1329 set_saved_state_pc(sv, entry);
1330
1331 return;
1332 }
1333
1334 /*
1335 * Routine: thread_entrypoint
1336 *
1337 */
1338 kern_return_t
1339 thread_entrypoint(
1340 __unused thread_t thread,
1341 int flavor,
1342 thread_state_t tstate,
1343 unsigned int count __unused,
1344 mach_vm_offset_t * entry_point
1345 )
1346 {
1347 switch (flavor) {
1348 case ARM_THREAD_STATE:
1349 {
1350 struct arm_thread_state *state;
1351
1352 state = (struct arm_thread_state *) tstate;
1353
1354 /*
1355 * If a valid entry point is specified, use it.
1356 */
1357 if (state->pc) {
1358 *entry_point = CAST_USER_ADDR_T(state->pc);
1359 } else {
1360 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1361 }
1362 }
1363 break;
1364
1365 case ARM_THREAD_STATE64:
1366 {
1367 struct arm_thread_state64 *state;
1368
1369 state = (struct arm_thread_state64*) tstate;
1370
1371 /*
1372 * If a valid entry point is specified, use it.
1373 */
1374 if (state->pc) {
1375 *entry_point = CAST_USER_ADDR_T(state->pc);
1376 } else {
1377 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1378 }
1379
1380 break;
1381 }
1382 default:
1383 return KERN_INVALID_ARGUMENT;
1384 }
1385
1386 return KERN_SUCCESS;
1387 }
1388
1389
1390 /*
1391 * Routine: thread_set_child
1392 *
1393 */
1394 void
1395 thread_set_child(
1396 thread_t child,
1397 int pid)
1398 {
1399 struct arm_saved_state *child_state;
1400
1401 child_state = get_user_regs(child);
1402
1403 set_saved_state_reg(child_state, 0, pid);
1404 set_saved_state_reg(child_state, 1, 1ULL);
1405 }
1406
1407
1408 /*
1409 * Routine: thread_set_parent
1410 *
1411 */
1412 void
1413 thread_set_parent(
1414 thread_t parent,
1415 int pid)
1416 {
1417 struct arm_saved_state *parent_state;
1418
1419 parent_state = get_user_regs(parent);
1420
1421 set_saved_state_reg(parent_state, 0, pid);
1422 set_saved_state_reg(parent_state, 1, 0);
1423 }
1424
1425
1426 struct arm_act_context {
1427 struct arm_unified_thread_state ss;
1428 #if __ARM_VFP__
1429 struct arm_neon_saved_state ns;
1430 #endif
1431 };
1432
1433 /*
1434 * Routine: act_thread_csave
1435 *
1436 */
1437 void *
1438 act_thread_csave(void)
1439 {
1440 struct arm_act_context *ic;
1441 kern_return_t kret;
1442 unsigned int val;
1443 thread_t thread = current_thread();
1444
1445 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1446 if (ic == (struct arm_act_context *) NULL) {
1447 return (void *) 0;
1448 }
1449
1450 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1451 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1452 if (kret != KERN_SUCCESS) {
1453 kfree(ic, sizeof(struct arm_act_context));
1454 return (void *) 0;
1455 }
1456
1457 #if __ARM_VFP__
1458 if (thread_is_64bit_data(thread)) {
1459 val = ARM_NEON_STATE64_COUNT;
1460 kret = machine_thread_get_state(thread,
1461 ARM_NEON_STATE64,
1462 (thread_state_t) &ic->ns,
1463 &val);
1464 } else {
1465 val = ARM_NEON_STATE_COUNT;
1466 kret = machine_thread_get_state(thread,
1467 ARM_NEON_STATE,
1468 (thread_state_t) &ic->ns,
1469 &val);
1470 }
1471 if (kret != KERN_SUCCESS) {
1472 kfree(ic, sizeof(struct arm_act_context));
1473 return (void *) 0;
1474 }
1475 #endif
1476 return ic;
1477 }
1478
1479 /*
1480 * Routine: act_thread_catt
1481 *
1482 */
1483 void
1484 act_thread_catt(void *ctx)
1485 {
1486 struct arm_act_context *ic;
1487 kern_return_t kret;
1488 thread_t thread = current_thread();
1489
1490 ic = (struct arm_act_context *) ctx;
1491 if (ic == (struct arm_act_context *) NULL) {
1492 return;
1493 }
1494
1495 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1496 if (kret != KERN_SUCCESS) {
1497 goto out;
1498 }
1499
1500 #if __ARM_VFP__
1501 if (thread_is_64bit_data(thread)) {
1502 kret = machine_thread_set_state(thread,
1503 ARM_NEON_STATE64,
1504 (thread_state_t) &ic->ns,
1505 ARM_NEON_STATE64_COUNT);
1506 } else {
1507 kret = machine_thread_set_state(thread,
1508 ARM_NEON_STATE,
1509 (thread_state_t) &ic->ns,
1510 ARM_NEON_STATE_COUNT);
1511 }
1512 if (kret != KERN_SUCCESS) {
1513 goto out;
1514 }
1515 #endif
1516 out:
1517 kfree(ic, sizeof(struct arm_act_context));
1518 }
1519
1520 /*
1521 * Routine: act_thread_catt
1522 *
1523 */
1524 void
1525 act_thread_cfree(void *ctx)
1526 {
1527 kfree(ctx, sizeof(struct arm_act_context));
1528 }
1529
1530 kern_return_t
1531 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
1532 {
1533 arm_thread_state_t *state;
1534 struct arm_saved_state *saved_state;
1535 struct arm_saved_state32 *saved_state_32;
1536 thread_t curth = current_thread();
1537 spl_t s = 0;
1538
1539 assert(!thread_is_64bit_data(thread));
1540
1541 saved_state = thread->machine.upcb;
1542 saved_state_32 = saved_state32(saved_state);
1543
1544 state = (arm_thread_state_t *)tstate;
1545
1546 if (curth != thread) {
1547 s = splsched();
1548 thread_lock(thread);
1549 }
1550
1551 /*
1552 * do not zero saved_state, it can be concurrently accessed
1553 * and zero is not a valid state for some of the registers,
1554 * like sp.
1555 */
1556 thread_state32_to_saved_state(state, saved_state);
1557 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1558
1559 if (curth != thread) {
1560 thread_unlock(thread);
1561 splx(s);
1562 }
1563
1564 return KERN_SUCCESS;
1565 }
1566
1567 kern_return_t
1568 thread_set_wq_state64(thread_t thread, thread_state_t tstate)
1569 {
1570 arm_thread_state64_t *state;
1571 struct arm_saved_state *saved_state;
1572 struct arm_saved_state64 *saved_state_64;
1573 thread_t curth = current_thread();
1574 spl_t s = 0;
1575
1576 assert(thread_is_64bit_data(thread));
1577
1578 saved_state = thread->machine.upcb;
1579 saved_state_64 = saved_state64(saved_state);
1580 state = (arm_thread_state64_t *)tstate;
1581
1582 if (curth != thread) {
1583 s = splsched();
1584 thread_lock(thread);
1585 }
1586
1587 /*
1588 * do not zero saved_state, it can be concurrently accessed
1589 * and zero is not a valid state for some of the registers,
1590 * like sp.
1591 */
1592 thread_state64_to_saved_state(state, saved_state);
1593 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1594
1595 if (curth != thread) {
1596 thread_unlock(thread);
1597 splx(s);
1598 }
1599
1600 return KERN_SUCCESS;
1601 }