]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/status.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm64/proc_reg.h>
37
38 struct arm_vfpv2_state
39 {
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43 };
44
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50 /*
51 * Forward definitions
52 */
53 void thread_set_child(thread_t child, int pid);
54 void thread_set_parent(thread_t parent, int pid);
55
56 /*
57 * Maps state flavor to number of words in the state:
58 */
59 /* __private_extern__ */
60 unsigned int _MachineStateCount[] = {
61 /* FLAVOR_LIST */ 0,
62 ARM_UNIFIED_THREAD_STATE_COUNT,
63 ARM_VFP_STATE_COUNT,
64 ARM_EXCEPTION_STATE_COUNT,
65 ARM_DEBUG_STATE_COUNT,
66 /* THREAD_STATE_NONE (legacy) */ 0,
67 ARM_THREAD_STATE64_COUNT,
68 ARM_EXCEPTION_STATE64_COUNT,
69 /* THREAD_STATE_LAST (legacy) */ 0,
70 ARM_THREAD_STATE32_COUNT,
71 /* UNALLOCATED */ 0,
72 /* UNALLOCATED */ 0,
73 /* UNALLOCATED */ 0,
74 /* UNALLOCATED */ 0,
75 ARM_DEBUG_STATE32_COUNT,
76 ARM_DEBUG_STATE64_COUNT,
77 ARM_NEON_STATE_COUNT,
78 ARM_NEON_STATE64_COUNT,
79 /* UNALLOCATED */ 0,
80 /* UNALLOCATED */ 0,
81 /* ARM_SAVED_STATE32_COUNT */ 0,
82 /* ARM_SAVED_STATE64_COUNT */ 0,
83 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
84 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
85 };
86
87 extern zone_t ads_zone;
88
89 #if __arm64__
90 /*
91 * Copy values from saved_state to ts64.
92 */
93 void
94 saved_state_to_thread_state64(const arm_saved_state_t *saved_state, arm_thread_state64_t *ts64)
95 {
96 uint32_t i;
97
98 assert(is_saved_state64(saved_state));
99
100 ts64->fp = get_saved_state_fp(saved_state);
101 ts64->lr = get_saved_state_lr(saved_state);
102 ts64->sp = get_saved_state_sp(saved_state);
103 ts64->pc = get_saved_state_pc(saved_state);
104 ts64->cpsr = get_saved_state_cpsr(saved_state);
105 for (i = 0; i < 29; i++)
106 ts64->x[i] = get_saved_state_reg(saved_state, i);
107 }
108
109 /*
110 * Copy values from ts64 to saved_state
111 */
112 void
113 thread_state64_to_saved_state(const arm_thread_state64_t *ts64, arm_saved_state_t *saved_state)
114 {
115 uint32_t i;
116
117 assert(is_saved_state64(saved_state));
118
119 set_saved_state_fp(saved_state, ts64->fp);
120 set_saved_state_lr(saved_state, ts64->lr);
121 set_saved_state_sp(saved_state, ts64->sp);
122 set_saved_state_pc(saved_state, ts64->pc);
123 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
124 for (i = 0; i < 29; i++)
125 set_saved_state_reg(saved_state, i, ts64->x[i]);
126 }
127 #endif
128
129 kern_return_t
130 handle_get_arm32_thread_state(
131 thread_state_t tstate,
132 mach_msg_type_number_t * count,
133 const arm_saved_state_t *saved_state)
134 {
135 if (*count < ARM_THREAD_STATE32_COUNT)
136 return (KERN_INVALID_ARGUMENT);
137 if (!is_saved_state32(saved_state))
138 return (KERN_INVALID_ARGUMENT);
139
140 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
141 *count = ARM_THREAD_STATE32_COUNT;
142 return KERN_SUCCESS;
143 }
144
145 kern_return_t
146 handle_get_arm64_thread_state(
147 thread_state_t tstate,
148 mach_msg_type_number_t * count,
149 const arm_saved_state_t *saved_state)
150 {
151 if (*count < ARM_THREAD_STATE64_COUNT)
152 return (KERN_INVALID_ARGUMENT);
153 if (!is_saved_state64(saved_state))
154 return (KERN_INVALID_ARGUMENT);
155
156 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
157 *count = ARM_THREAD_STATE64_COUNT;
158 return KERN_SUCCESS;
159 }
160
161
162 kern_return_t
163 handle_get_arm_thread_state(
164 thread_state_t tstate,
165 mach_msg_type_number_t * count,
166 const arm_saved_state_t *saved_state)
167 {
168 /* In an arm64 world, this flavor can be used to retrieve the thread
169 * state of a 32-bit or 64-bit thread into a unified structure, but we
170 * need to support legacy clients who are only aware of 32-bit, so
171 * check the count to see what the client is expecting.
172 */
173 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
174 return handle_get_arm32_thread_state(tstate, count, saved_state);
175 }
176
177 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
178 bzero(unified_state, sizeof(*unified_state));
179 #if __arm64__
180 if (is_saved_state64(saved_state)) {
181 unified_state->ash.flavor = ARM_THREAD_STATE64;
182 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
183 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
184 } else
185 #endif
186 {
187 unified_state->ash.flavor = ARM_THREAD_STATE32;
188 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
189 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
190 }
191 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
192 return (KERN_SUCCESS);
193 }
194
195 kern_return_t
196 handle_set_arm32_thread_state(
197 const thread_state_t tstate,
198 mach_msg_type_number_t count,
199 arm_saved_state_t *saved_state)
200 {
201 if (count != ARM_THREAD_STATE32_COUNT)
202 return (KERN_INVALID_ARGUMENT);
203
204 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
205 return KERN_SUCCESS;
206 }
207
208 kern_return_t
209 handle_set_arm64_thread_state(
210 const thread_state_t tstate,
211 mach_msg_type_number_t count,
212 arm_saved_state_t *saved_state)
213 {
214 if (count != ARM_THREAD_STATE64_COUNT)
215 return (KERN_INVALID_ARGUMENT);
216
217 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
218 return KERN_SUCCESS;
219 }
220
221
222 kern_return_t
223 handle_set_arm_thread_state(
224 const thread_state_t tstate,
225 mach_msg_type_number_t count,
226 arm_saved_state_t *saved_state)
227 {
228 /* In an arm64 world, this flavor can be used to set the thread state of a
229 * 32-bit or 64-bit thread from a unified structure, but we need to support
230 * legacy clients who are only aware of 32-bit, so check the count to see
231 * what the client is expecting.
232 */
233 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
234 if (!is_saved_state32(saved_state)) {
235 return (KERN_INVALID_ARGUMENT);
236 }
237 return handle_set_arm32_thread_state(tstate, count, saved_state);
238 }
239
240 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
241 #if __arm64__
242 if (is_thread_state64(unified_state)) {
243 if (!is_saved_state64(saved_state)) {
244 return (KERN_INVALID_ARGUMENT);
245 }
246 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
247 } else
248 #endif
249 {
250 if (!is_saved_state32(saved_state)) {
251 return (KERN_INVALID_ARGUMENT);
252 }
253 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
254 }
255
256 return (KERN_SUCCESS);
257 }
258
259 /*
260 * Translate thread state arguments to userspace representation
261 */
262
263 kern_return_t
264 machine_thread_state_convert_to_user(
265 thread_t thread,
266 thread_flavor_t flavor,
267 thread_state_t tstate,
268 mach_msg_type_number_t *count)
269 {
270 // No conversion to userspace representation on this platform
271 (void)thread; (void)flavor; (void)tstate; (void)count;
272 return KERN_SUCCESS;
273 }
274
275 /*
276 * Translate thread state arguments from userspace representation
277 */
278
279 kern_return_t
280 machine_thread_state_convert_from_user(
281 thread_t thread,
282 thread_flavor_t flavor,
283 thread_state_t tstate,
284 mach_msg_type_number_t count)
285 {
286 // No conversion from userspace representation on this platform
287 (void)thread; (void)flavor; (void)tstate; (void)count;
288 return KERN_SUCCESS;
289 }
290
291 /*
292 * Translate signal context data pointer to userspace representation
293 */
294
295 kern_return_t
296 machine_thread_siguctx_pointer_convert_to_user(
297 __assert_only thread_t thread,
298 user_addr_t *uctxp)
299 {
300 // No conversion to userspace representation on this platform
301 (void)thread; (void)uctxp;
302 return KERN_SUCCESS;
303 }
304
305 /*
306 * Translate array of function pointer syscall arguments from userspace representation
307 */
308
309 kern_return_t
310 machine_thread_function_pointers_convert_from_user(
311 __assert_only thread_t thread,
312 user_addr_t *fptrs,
313 uint32_t count)
314 {
315 // No conversion from userspace representation on this platform
316 (void)thread; (void)fptrs; (void)count;
317 return KERN_SUCCESS;
318 }
319
320 /*
321 * Routine: machine_thread_get_state
322 *
323 */
324 kern_return_t
325 machine_thread_get_state(
326 thread_t thread,
327 thread_flavor_t flavor,
328 thread_state_t tstate,
329 mach_msg_type_number_t * count)
330 {
331 switch (flavor) {
332 case THREAD_STATE_FLAVOR_LIST:
333 if (*count < 4)
334 return (KERN_INVALID_ARGUMENT);
335
336 tstate[0] = ARM_THREAD_STATE;
337 tstate[1] = ARM_VFP_STATE;
338 tstate[2] = ARM_EXCEPTION_STATE;
339 tstate[3] = ARM_DEBUG_STATE;
340 *count = 4;
341 break;
342
343 case THREAD_STATE_FLAVOR_LIST_NEW:
344 if (*count < 4)
345 return (KERN_INVALID_ARGUMENT);
346
347 tstate[0] = ARM_THREAD_STATE;
348 tstate[1] = ARM_VFP_STATE;
349 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
350 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
351 *count = 4;
352 break;
353
354 case ARM_THREAD_STATE:
355 {
356 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
357 if (rn) return rn;
358 break;
359 }
360 case ARM_THREAD_STATE32:
361 {
362 if (thread_is_64bit_data(thread))
363 return KERN_INVALID_ARGUMENT;
364
365 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
366 if (rn) return rn;
367 break;
368 }
369 #if __arm64__
370 case ARM_THREAD_STATE64:
371 {
372 if (!thread_is_64bit_data(thread))
373 return KERN_INVALID_ARGUMENT;
374
375 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
376 if (rn) return rn;
377 break;
378 }
379 #endif
380 case ARM_EXCEPTION_STATE:{
381 struct arm_exception_state *state;
382 struct arm_saved_state32 *saved_state;
383
384 if (*count < ARM_EXCEPTION_STATE_COUNT)
385 return (KERN_INVALID_ARGUMENT);
386 if (thread_is_64bit_data(thread))
387 return (KERN_INVALID_ARGUMENT);
388
389 state = (struct arm_exception_state *) tstate;
390 saved_state = saved_state32(thread->machine.upcb);
391
392 state->exception = saved_state->exception;
393 state->fsr = saved_state->esr;
394 state->far = saved_state->far;
395
396 *count = ARM_EXCEPTION_STATE_COUNT;
397 break;
398 }
399 case ARM_EXCEPTION_STATE64:{
400 struct arm_exception_state64 *state;
401 struct arm_saved_state64 *saved_state;
402
403 if (*count < ARM_EXCEPTION_STATE64_COUNT)
404 return (KERN_INVALID_ARGUMENT);
405 if (!thread_is_64bit_data(thread))
406 return (KERN_INVALID_ARGUMENT);
407
408 state = (struct arm_exception_state64 *) tstate;
409 saved_state = saved_state64(thread->machine.upcb);
410
411 state->exception = saved_state->exception;
412 state->far = saved_state->far;
413 state->esr = saved_state->esr;
414
415 *count = ARM_EXCEPTION_STATE64_COUNT;
416 break;
417 }
418 case ARM_DEBUG_STATE:{
419 arm_legacy_debug_state_t *state;
420 arm_debug_state32_t *thread_state;
421
422 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT)
423 return (KERN_INVALID_ARGUMENT);
424
425 if (thread_is_64bit_data(thread))
426 return (KERN_INVALID_ARGUMENT);
427
428 state = (arm_legacy_debug_state_t *) tstate;
429 thread_state = find_debug_state32(thread);
430
431 if (thread_state == NULL)
432 bzero(state, sizeof(arm_legacy_debug_state_t));
433 else
434 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
435
436 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
437 break;
438 }
439 case ARM_DEBUG_STATE32:{
440 arm_debug_state32_t *state;
441 arm_debug_state32_t *thread_state;
442
443 if (*count < ARM_DEBUG_STATE32_COUNT)
444 return (KERN_INVALID_ARGUMENT);
445
446 if (thread_is_64bit_data(thread))
447 return (KERN_INVALID_ARGUMENT);
448
449 state = (arm_debug_state32_t *) tstate;
450 thread_state = find_debug_state32(thread);
451
452 if (thread_state == NULL)
453 bzero(state, sizeof(arm_debug_state32_t));
454 else
455 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
456
457 *count = ARM_DEBUG_STATE32_COUNT;
458 break;
459 }
460
461 case ARM_DEBUG_STATE64:{
462 arm_debug_state64_t *state;
463 arm_debug_state64_t *thread_state;
464
465 if (*count < ARM_DEBUG_STATE64_COUNT)
466 return (KERN_INVALID_ARGUMENT);
467
468 if (!thread_is_64bit_data(thread))
469 return (KERN_INVALID_ARGUMENT);
470
471 state = (arm_debug_state64_t *) tstate;
472 thread_state = find_debug_state64(thread);
473
474 if (thread_state == NULL)
475 bzero(state, sizeof(arm_debug_state64_t));
476 else
477 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
478
479 *count = ARM_DEBUG_STATE64_COUNT;
480 break;
481 }
482
483 case ARM_VFP_STATE:{
484 struct arm_vfp_state *state;
485 arm_neon_saved_state32_t *thread_state;
486 unsigned int max;
487
488 if (*count < ARM_VFP_STATE_COUNT) {
489 if (*count < ARM_VFPV2_STATE_COUNT)
490 return (KERN_INVALID_ARGUMENT);
491 else
492 *count = ARM_VFPV2_STATE_COUNT;
493 }
494
495 if (*count == ARM_VFPV2_STATE_COUNT)
496 max = 32;
497 else
498 max = 64;
499
500 state = (struct arm_vfp_state *) tstate;
501 thread_state = neon_state32(thread->machine.uNeon);
502 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
503
504 bcopy(thread_state, state, (max + 1)*sizeof(uint32_t));
505 *count = (max + 1);
506 break;
507 }
508 case ARM_NEON_STATE:{
509 arm_neon_state_t *state;
510 arm_neon_saved_state32_t *thread_state;
511
512 if (*count < ARM_NEON_STATE_COUNT)
513 return (KERN_INVALID_ARGUMENT);
514
515 if (thread_is_64bit_data(thread))
516 return (KERN_INVALID_ARGUMENT);
517
518 state = (arm_neon_state_t *)tstate;
519 thread_state = neon_state32(thread->machine.uNeon);
520
521 assert(sizeof(*thread_state) == sizeof(*state));
522 bcopy(thread_state, state, sizeof(arm_neon_state_t));
523
524 *count = ARM_NEON_STATE_COUNT;
525 break;
526
527 }
528
529 case ARM_NEON_STATE64:{
530 arm_neon_state64_t *state;
531 arm_neon_saved_state64_t *thread_state;
532
533 if (*count < ARM_NEON_STATE64_COUNT)
534 return (KERN_INVALID_ARGUMENT);
535
536 if (!thread_is_64bit_data(thread))
537 return (KERN_INVALID_ARGUMENT);
538
539 state = (arm_neon_state64_t *)tstate;
540 thread_state = neon_state64(thread->machine.uNeon);
541
542 /* For now, these are identical */
543 assert(sizeof(*state) == sizeof(*thread_state));
544 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
545
546 *count = ARM_NEON_STATE64_COUNT;
547 break;
548
549 }
550
551 default:
552 return (KERN_INVALID_ARGUMENT);
553 }
554 return (KERN_SUCCESS);
555 }
556
557
558 /*
559 * Routine: machine_thread_get_kern_state
560 *
561 */
562 kern_return_t
563 machine_thread_get_kern_state(
564 thread_t thread,
565 thread_flavor_t flavor,
566 thread_state_t tstate,
567 mach_msg_type_number_t * count)
568 {
569 /*
570 * This works only for an interrupted kernel thread
571 */
572 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
573 return KERN_FAILURE;
574
575 switch (flavor) {
576 case ARM_THREAD_STATE:
577 {
578 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
579 if (rn) return rn;
580 break;
581 }
582 case ARM_THREAD_STATE32:
583 {
584 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
585 if (rn) return rn;
586 break;
587 }
588 #if __arm64__
589 case ARM_THREAD_STATE64:
590 {
591 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
592 if (rn) return rn;
593 break;
594 }
595 #endif
596 default:
597 return (KERN_INVALID_ARGUMENT);
598 }
599 return (KERN_SUCCESS);
600 }
601
602 void
603 machine_thread_switch_addrmode(thread_t thread)
604 {
605 if (task_has_64Bit_data(thread->task)) {
606 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
607 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
608 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
609 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
610
611 /*
612 * Reinitialize the NEON state.
613 */
614 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
615 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
616 } else {
617 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
618 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
619 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
620 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
621
622 /*
623 * Reinitialize the NEON state.
624 */
625 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
626 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
627 }
628 }
629
630 extern long long arm_debug_get(void);
631
632 /*
633 * Routine: machine_thread_set_state
634 *
635 */
636 kern_return_t
637 machine_thread_set_state(
638 thread_t thread,
639 thread_flavor_t flavor,
640 thread_state_t tstate,
641 mach_msg_type_number_t count)
642 {
643 kern_return_t rn;
644
645 switch (flavor) {
646 case ARM_THREAD_STATE:
647 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
648 if (rn) return rn;
649 break;
650
651 case ARM_THREAD_STATE32:
652 if (thread_is_64bit_data(thread))
653 return (KERN_INVALID_ARGUMENT);
654
655 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
656 if (rn) return rn;
657 break;
658
659 #if __arm64__
660 case ARM_THREAD_STATE64:
661 if (!thread_is_64bit_data(thread))
662 return (KERN_INVALID_ARGUMENT);
663
664 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
665 if (rn) return rn;
666 break;
667 #endif
668 case ARM_EXCEPTION_STATE:{
669
670 if (count != ARM_EXCEPTION_STATE_COUNT)
671 return (KERN_INVALID_ARGUMENT);
672 if (thread_is_64bit_data(thread))
673 return (KERN_INVALID_ARGUMENT);
674
675 break;
676 }
677 case ARM_EXCEPTION_STATE64:{
678
679 if (count != ARM_EXCEPTION_STATE64_COUNT)
680 return (KERN_INVALID_ARGUMENT);
681 if (!thread_is_64bit_data(thread))
682 return (KERN_INVALID_ARGUMENT);
683
684 break;
685 }
686 case ARM_DEBUG_STATE:
687 {
688 arm_legacy_debug_state_t *state;
689 boolean_t enabled = FALSE;
690 unsigned int i;
691
692 if (count != ARM_LEGACY_DEBUG_STATE_COUNT)
693 return (KERN_INVALID_ARGUMENT);
694 if (thread_is_64bit_data(thread))
695 return (KERN_INVALID_ARGUMENT);
696
697 state = (arm_legacy_debug_state_t *) tstate;
698
699 for (i = 0; i < 16; i++) {
700 /* do not allow context IDs to be set */
701 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
702 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
703 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
704 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
705 return KERN_PROTECTION_FAILURE;
706 }
707 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
708 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
709 enabled = TRUE;
710 }
711 }
712
713
714 if (!enabled) {
715 arm_debug_state32_t *thread_state = find_debug_state32(thread);
716 if (thread_state != NULL) {
717 void *pTmp = thread->machine.DebugData;
718 thread->machine.DebugData = NULL;
719 zfree(ads_zone, pTmp);
720 }
721 } else {
722 arm_debug_state32_t *thread_state = find_debug_state32(thread);
723 if (thread_state == NULL) {
724 thread->machine.DebugData = zalloc(ads_zone);
725 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
726 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
727 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
728 thread_state = find_debug_state32(thread);
729 }
730 assert(NULL != thread_state);
731
732 for (i = 0; i < 16; i++) {
733 /* set appropriate privilege; mask out unknown bits */
734 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
735 | ARM_DBGBCR_MATCH_MASK
736 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
737 | ARM_DBG_CR_ENABLE_MASK))
738 | ARM_DBGBCR_TYPE_IVA
739 | ARM_DBG_CR_LINKED_UNLINKED
740 | ARM_DBG_CR_SECURITY_STATE_BOTH
741 | ARM_DBG_CR_MODE_CONTROL_USER;
742 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
743 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
744 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
745 | ARM_DBGWCR_ACCESS_CONTROL_MASK
746 | ARM_DBG_CR_ENABLE_MASK))
747 | ARM_DBG_CR_LINKED_UNLINKED
748 | ARM_DBG_CR_SECURITY_STATE_BOTH
749 | ARM_DBG_CR_MODE_CONTROL_USER;
750 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
751 }
752
753 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
754 }
755
756 if (thread == current_thread()) {
757 arm_debug_set32(thread->machine.DebugData);
758 }
759
760 break;
761 }
762 case ARM_DEBUG_STATE32:
763 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
764 {
765 arm_debug_state32_t *state;
766 boolean_t enabled = FALSE;
767 unsigned int i;
768
769 if (count != ARM_DEBUG_STATE32_COUNT)
770 return (KERN_INVALID_ARGUMENT);
771 if (thread_is_64bit_data(thread))
772 return (KERN_INVALID_ARGUMENT);
773
774 state = (arm_debug_state32_t *) tstate;
775
776 if (state->mdscr_el1 & 0x1)
777 enabled = TRUE;
778
779 for (i = 0; i < 16; i++) {
780 /* do not allow context IDs to be set */
781 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
782 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
783 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
784 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
785 return KERN_PROTECTION_FAILURE;
786 }
787 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
788 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
789 enabled = TRUE;
790 }
791 }
792
793 if (!enabled) {
794 arm_debug_state32_t *thread_state = find_debug_state32(thread);
795 if (thread_state != NULL) {
796 void *pTmp = thread->machine.DebugData;
797 thread->machine.DebugData = NULL;
798 zfree(ads_zone, pTmp);
799 }
800 } else {
801 arm_debug_state32_t *thread_state = find_debug_state32(thread);
802 if (thread_state == NULL) {
803 thread->machine.DebugData = zalloc(ads_zone);
804 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
805 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
806 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
807 thread_state = find_debug_state32(thread);
808 }
809 assert(NULL != thread_state);
810
811 if (state->mdscr_el1 & 0x1)
812 thread_state->mdscr_el1 |= 0x1;
813 else
814 thread_state->mdscr_el1 &= ~0x1;
815
816 for (i = 0; i < 16; i++) {
817 /* set appropriate privilege; mask out unknown bits */
818 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
819 | ARM_DBGBCR_MATCH_MASK
820 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
821 | ARM_DBG_CR_ENABLE_MASK))
822 | ARM_DBGBCR_TYPE_IVA
823 | ARM_DBG_CR_LINKED_UNLINKED
824 | ARM_DBG_CR_SECURITY_STATE_BOTH
825 | ARM_DBG_CR_MODE_CONTROL_USER;
826 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
827 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
828 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
829 | ARM_DBGWCR_ACCESS_CONTROL_MASK
830 | ARM_DBG_CR_ENABLE_MASK))
831 | ARM_DBG_CR_LINKED_UNLINKED
832 | ARM_DBG_CR_SECURITY_STATE_BOTH
833 | ARM_DBG_CR_MODE_CONTROL_USER;
834 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
835 }
836
837 }
838
839 if (thread == current_thread()) {
840 arm_debug_set32(thread->machine.DebugData);
841 }
842
843 break;
844 }
845
846 case ARM_DEBUG_STATE64:
847 {
848 arm_debug_state64_t *state;
849 boolean_t enabled = FALSE;
850 unsigned int i;
851
852 if (count != ARM_DEBUG_STATE64_COUNT)
853 return (KERN_INVALID_ARGUMENT);
854 if (!thread_is_64bit_data(thread))
855 return (KERN_INVALID_ARGUMENT);
856
857 state = (arm_debug_state64_t *) tstate;
858
859 if (state->mdscr_el1 & 0x1)
860 enabled = TRUE;
861
862 for (i = 0; i < 16; i++) {
863 /* do not allow context IDs to be set */
864 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
865 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
866 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
867 return KERN_PROTECTION_FAILURE;
868 }
869 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
870 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
871 enabled = TRUE;
872 }
873 }
874
875 if (!enabled) {
876 arm_debug_state64_t *thread_state = find_debug_state64(thread);
877 if (thread_state != NULL) {
878 void *pTmp = thread->machine.DebugData;
879 thread->machine.DebugData = NULL;
880 zfree(ads_zone, pTmp);
881 }
882 } else {
883 arm_debug_state64_t *thread_state = find_debug_state64(thread);
884 if (thread_state == NULL) {
885 thread->machine.DebugData = zalloc(ads_zone);
886 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
887 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
888 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
889 thread_state = find_debug_state64(thread);
890 }
891 assert(NULL != thread_state);
892
893 if (state->mdscr_el1 & 0x1)
894 thread_state->mdscr_el1 |= 0x1;
895 else
896 thread_state->mdscr_el1 &= ~0x1;
897
898 for (i = 0; i < 16; i++) {
899 /* set appropriate privilege; mask out unknown bits */
900 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
901 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
902 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
903 | ARM_DBG_CR_ENABLE_MASK))
904 | ARM_DBGBCR_TYPE_IVA
905 | ARM_DBG_CR_LINKED_UNLINKED
906 | ARM_DBG_CR_SECURITY_STATE_BOTH
907 | ARM_DBG_CR_MODE_CONTROL_USER;
908 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
909 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
910 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
911 | ARM_DBGWCR_ACCESS_CONTROL_MASK
912 | ARM_DBG_CR_ENABLE_MASK))
913 | ARM_DBG_CR_LINKED_UNLINKED
914 | ARM_DBG_CR_SECURITY_STATE_BOTH
915 | ARM_DBG_CR_MODE_CONTROL_USER;
916 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
917 }
918
919 }
920
921 if (thread == current_thread()) {
922 arm_debug_set64(thread->machine.DebugData);
923 }
924
925 break;
926 }
927
928 case ARM_VFP_STATE:{
929 struct arm_vfp_state *state;
930 arm_neon_saved_state32_t *thread_state;
931 unsigned int max;
932
933 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT)
934 return (KERN_INVALID_ARGUMENT);
935
936 if (count == ARM_VFPV2_STATE_COUNT)
937 max = 32;
938 else
939 max = 64;
940
941 state = (struct arm_vfp_state *) tstate;
942 thread_state = neon_state32(thread->machine.uNeon);
943 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
944
945 bcopy(state, thread_state, (max + 1)*sizeof(uint32_t));
946
947 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
948 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
949 break;
950 }
951
952 case ARM_NEON_STATE:{
953 arm_neon_state_t *state;
954 arm_neon_saved_state32_t *thread_state;
955
956 if (count != ARM_NEON_STATE_COUNT)
957 return (KERN_INVALID_ARGUMENT);
958
959 if (thread_is_64bit_data(thread))
960 return (KERN_INVALID_ARGUMENT);
961
962 state = (arm_neon_state_t *)tstate;
963 thread_state = neon_state32(thread->machine.uNeon);
964
965 assert(sizeof(*state) == sizeof(*thread_state));
966 bcopy(state, thread_state, sizeof(arm_neon_state_t));
967
968 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
969 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
970 break;
971
972 }
973
974 case ARM_NEON_STATE64:{
975 arm_neon_state64_t *state;
976 arm_neon_saved_state64_t *thread_state;
977
978 if (count != ARM_NEON_STATE64_COUNT)
979 return (KERN_INVALID_ARGUMENT);
980
981 if (!thread_is_64bit_data(thread))
982 return (KERN_INVALID_ARGUMENT);
983
984 state = (arm_neon_state64_t *)tstate;
985 thread_state = neon_state64(thread->machine.uNeon);
986
987 assert(sizeof(*state) == sizeof(*thread_state));
988 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
989
990 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
991 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
992 break;
993
994 }
995
996 default:
997 return (KERN_INVALID_ARGUMENT);
998 }
999 return (KERN_SUCCESS);
1000 }
1001
1002 /*
1003 * Routine: machine_thread_state_initialize
1004 *
1005 */
1006 kern_return_t
1007 machine_thread_state_initialize(
1008 thread_t thread)
1009 {
1010 arm_context_t *context = thread->machine.contextData;
1011
1012 /*
1013 * Should always be set up later. For a kernel thread, we don't care
1014 * about this state. For a user thread, we'll set the state up in
1015 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1016 */
1017
1018 if (context != NULL) {
1019 bzero(&context->ss.uss, sizeof(context->ss.uss));
1020 bzero(&context->ns.uns, sizeof(context->ns.uns));
1021
1022 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1023 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1024 } else {
1025 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1026 }
1027 }
1028
1029 thread->machine.DebugData = NULL;
1030
1031
1032 return KERN_SUCCESS;
1033 }
1034
1035 /*
1036 * Routine: machine_thread_dup
1037 *
1038 */
1039 kern_return_t
1040 machine_thread_dup(
1041 thread_t self,
1042 thread_t target,
1043 __unused boolean_t is_corpse)
1044 {
1045 struct arm_saved_state *self_saved_state;
1046 struct arm_saved_state *target_saved_state;
1047
1048 target->machine.cthread_self = self->machine.cthread_self;
1049 target->machine.cthread_data = self->machine.cthread_data;
1050
1051 self_saved_state = self->machine.upcb;
1052 target_saved_state = target->machine.upcb;
1053 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1054
1055 return (KERN_SUCCESS);
1056 }
1057
1058 /*
1059 * Routine: get_user_regs
1060 *
1061 */
1062 struct arm_saved_state *
1063 get_user_regs(
1064 thread_t thread)
1065 {
1066 return (thread->machine.upcb);
1067 }
1068
1069 arm_neon_saved_state_t *
1070 get_user_neon_regs(
1071 thread_t thread)
1072 {
1073 return (thread->machine.uNeon);
1074 }
1075
1076 /*
1077 * Routine: find_user_regs
1078 *
1079 */
1080 struct arm_saved_state *
1081 find_user_regs(
1082 thread_t thread)
1083 {
1084 return (thread->machine.upcb);
1085 }
1086
1087 /*
1088 * Routine: find_kern_regs
1089 *
1090 */
1091 struct arm_saved_state *
1092 find_kern_regs(
1093 thread_t thread)
1094 {
1095 /*
1096 * This works only for an interrupted kernel thread
1097 */
1098 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
1099 return ((struct arm_saved_state *) NULL);
1100 else
1101 return (getCpuDatap()->cpu_int_state);
1102
1103 }
1104
1105 arm_debug_state32_t *
1106 find_debug_state32(
1107 thread_t thread)
1108 {
1109 if (thread && thread->machine.DebugData)
1110 return &(thread->machine.DebugData->uds.ds32);
1111 else
1112 return NULL;
1113 }
1114
1115 arm_debug_state64_t *
1116 find_debug_state64(
1117 thread_t thread)
1118 {
1119 if (thread && thread->machine.DebugData)
1120 return &(thread->machine.DebugData->uds.ds64);
1121 else
1122 return NULL;
1123 }
1124
1125 /*
1126 * Routine: thread_userstack
1127 *
1128 */
1129 kern_return_t
1130 thread_userstack(
1131 __unused thread_t thread,
1132 int flavor,
1133 thread_state_t tstate,
1134 unsigned int count,
1135 mach_vm_offset_t * user_stack,
1136 int *customstack,
1137 boolean_t is_64bit_data
1138 )
1139 {
1140 register_t sp;
1141
1142 switch (flavor) {
1143 case ARM_THREAD_STATE:
1144 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1145 #if __arm64__
1146 if (is_64bit_data) {
1147 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1148 } else
1149 #endif
1150 {
1151 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1152 }
1153
1154 break;
1155 }
1156
1157 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1158 case ARM_THREAD_STATE32:
1159 if (count != ARM_THREAD_STATE32_COUNT)
1160 return (KERN_INVALID_ARGUMENT);
1161 if (is_64bit_data)
1162 return (KERN_INVALID_ARGUMENT);
1163
1164 sp = ((arm_thread_state32_t *)tstate)->sp;
1165 break;
1166 #if __arm64__
1167 case ARM_THREAD_STATE64:
1168 if (count != ARM_THREAD_STATE64_COUNT)
1169 return (KERN_INVALID_ARGUMENT);
1170 if (!is_64bit_data)
1171 return (KERN_INVALID_ARGUMENT);
1172
1173 sp = ((arm_thread_state32_t *)tstate)->sp;
1174 break;
1175 #endif
1176 default:
1177 return (KERN_INVALID_ARGUMENT);
1178 }
1179
1180 if (sp) {
1181 *user_stack = CAST_USER_ADDR_T(sp);
1182 if (customstack)
1183 *customstack = 1;
1184 } else {
1185 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1186 if (customstack)
1187 *customstack = 0;
1188 }
1189
1190 return (KERN_SUCCESS);
1191 }
1192
1193 /*
1194 * thread_userstackdefault:
1195 *
1196 * Return the default stack location for the
1197 * thread, if otherwise unknown.
1198 */
1199 kern_return_t
1200 thread_userstackdefault(
1201 mach_vm_offset_t *default_user_stack,
1202 boolean_t is64bit)
1203 {
1204 if (is64bit) {
1205 *default_user_stack = USRSTACK64;
1206 } else {
1207 *default_user_stack = USRSTACK;
1208 }
1209
1210 return (KERN_SUCCESS);
1211 }
1212
1213 /*
1214 * Routine: thread_setuserstack
1215 *
1216 */
1217 void
1218 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
1219 {
1220 struct arm_saved_state *sv;
1221
1222 sv = get_user_regs(thread);
1223
1224 set_saved_state_sp(sv, user_stack);
1225
1226 return;
1227 }
1228
1229 /*
1230 * Routine: thread_adjuserstack
1231 *
1232 */
1233 uint64_t
1234 thread_adjuserstack(thread_t thread, int adjust)
1235 {
1236 struct arm_saved_state *sv;
1237 uint64_t sp;
1238
1239 sv = get_user_regs(thread);
1240
1241 sp = get_saved_state_sp(sv);
1242 sp += adjust;
1243 set_saved_state_sp(sv, sp);;
1244
1245 return sp;
1246 }
1247
1248 /*
1249 * Routine: thread_setentrypoint
1250 *
1251 */
1252 void
1253 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
1254 {
1255 struct arm_saved_state *sv;
1256
1257 sv = get_user_regs(thread);
1258
1259 set_saved_state_pc(sv, entry);
1260
1261 return;
1262 }
1263
1264 /*
1265 * Routine: thread_entrypoint
1266 *
1267 */
1268 kern_return_t
1269 thread_entrypoint(
1270 __unused thread_t thread,
1271 int flavor,
1272 thread_state_t tstate,
1273 unsigned int count __unused,
1274 mach_vm_offset_t * entry_point
1275 )
1276 {
1277 switch (flavor) {
1278 case ARM_THREAD_STATE:
1279 {
1280 struct arm_thread_state *state;
1281
1282 state = (struct arm_thread_state *) tstate;
1283
1284 /*
1285 * If a valid entry point is specified, use it.
1286 */
1287 if (state->pc) {
1288 *entry_point = CAST_USER_ADDR_T(state->pc);
1289 } else {
1290 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1291 }
1292 }
1293 break;
1294
1295 case ARM_THREAD_STATE64:
1296 {
1297 struct arm_thread_state64 *state;
1298
1299 state = (struct arm_thread_state64*) tstate;
1300
1301 /*
1302 * If a valid entry point is specified, use it.
1303 */
1304 if (state->pc) {
1305 *entry_point = CAST_USER_ADDR_T(state->pc);
1306 } else {
1307 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1308 }
1309
1310 break;
1311 }
1312 default:
1313 return (KERN_INVALID_ARGUMENT);
1314 }
1315
1316 return (KERN_SUCCESS);
1317 }
1318
1319
1320 /*
1321 * Routine: thread_set_child
1322 *
1323 */
1324 void
1325 thread_set_child(
1326 thread_t child,
1327 int pid)
1328 {
1329 struct arm_saved_state *child_state;
1330
1331 child_state = get_user_regs(child);
1332
1333 set_saved_state_reg(child_state, 0, pid);
1334 set_saved_state_reg(child_state, 1, 1ULL);
1335 }
1336
1337
1338 /*
1339 * Routine: thread_set_parent
1340 *
1341 */
1342 void
1343 thread_set_parent(
1344 thread_t parent,
1345 int pid)
1346 {
1347 struct arm_saved_state *parent_state;
1348
1349 parent_state = get_user_regs(parent);
1350
1351 set_saved_state_reg(parent_state, 0, pid);
1352 set_saved_state_reg(parent_state, 1, 0);
1353 }
1354
1355
1356 struct arm_act_context {
1357 struct arm_unified_thread_state ss;
1358 #if __ARM_VFP__
1359 struct arm_neon_saved_state ns;
1360 #endif
1361 };
1362
1363 /*
1364 * Routine: act_thread_csave
1365 *
1366 */
1367 void *
1368 act_thread_csave(void)
1369 {
1370 struct arm_act_context *ic;
1371 kern_return_t kret;
1372 unsigned int val;
1373 thread_t thread = current_thread();
1374
1375 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1376 if (ic == (struct arm_act_context *) NULL)
1377 return ((void *) 0);
1378
1379 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1380 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1381 if (kret != KERN_SUCCESS) {
1382 kfree(ic, sizeof(struct arm_act_context));
1383 return ((void *) 0);
1384 }
1385
1386 #if __ARM_VFP__
1387 if (thread_is_64bit_data(thread)) {
1388 val = ARM_NEON_STATE64_COUNT;
1389 kret = machine_thread_get_state(thread,
1390 ARM_NEON_STATE64,
1391 (thread_state_t) & ic->ns,
1392 &val);
1393 } else {
1394 val = ARM_NEON_STATE_COUNT;
1395 kret = machine_thread_get_state(thread,
1396 ARM_NEON_STATE,
1397 (thread_state_t) & ic->ns,
1398 &val);
1399 }
1400 if (kret != KERN_SUCCESS) {
1401 kfree(ic, sizeof(struct arm_act_context));
1402 return ((void *) 0);
1403 }
1404 #endif
1405 return (ic);
1406 }
1407
1408 /*
1409 * Routine: act_thread_catt
1410 *
1411 */
1412 void
1413 act_thread_catt(void *ctx)
1414 {
1415 struct arm_act_context *ic;
1416 kern_return_t kret;
1417 thread_t thread = current_thread();
1418
1419 ic = (struct arm_act_context *) ctx;
1420 if (ic == (struct arm_act_context *) NULL)
1421 return;
1422
1423 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1424 if (kret != KERN_SUCCESS)
1425 goto out;
1426
1427 #if __ARM_VFP__
1428 if (thread_is_64bit_data(thread)) {
1429 kret = machine_thread_set_state(thread,
1430 ARM_NEON_STATE64,
1431 (thread_state_t) & ic->ns,
1432 ARM_NEON_STATE64_COUNT);
1433 } else {
1434 kret = machine_thread_set_state(thread,
1435 ARM_NEON_STATE,
1436 (thread_state_t) & ic->ns,
1437 ARM_NEON_STATE_COUNT);
1438 }
1439 if (kret != KERN_SUCCESS)
1440 goto out;
1441 #endif
1442 out:
1443 kfree(ic, sizeof(struct arm_act_context));
1444 }
1445
1446 /*
1447 * Routine: act_thread_catt
1448 *
1449 */
1450 void
1451 act_thread_cfree(void *ctx)
1452 {
1453 kfree(ctx, sizeof(struct arm_act_context));
1454 }
1455
1456 kern_return_t
1457 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
1458 {
1459 arm_thread_state_t *state;
1460 struct arm_saved_state *saved_state;
1461 struct arm_saved_state32 *saved_state_32;
1462 thread_t curth = current_thread();
1463 spl_t s=0;
1464
1465 assert(!thread_is_64bit_data(thread));
1466
1467 saved_state = thread->machine.upcb;
1468 saved_state_32 = saved_state32(saved_state);
1469
1470 state = (arm_thread_state_t *)tstate;
1471
1472 if (curth != thread) {
1473 s = splsched();
1474 thread_lock(thread);
1475 }
1476
1477 /*
1478 * do not zero saved_state, it can be concurrently accessed
1479 * and zero is not a valid state for some of the registers,
1480 * like sp.
1481 */
1482 thread_state32_to_saved_state(state, saved_state);
1483 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1484
1485 if (curth != thread) {
1486 thread_unlock(thread);
1487 splx(s);
1488 }
1489
1490 return KERN_SUCCESS;
1491 }
1492
1493 kern_return_t
1494 thread_set_wq_state64(thread_t thread, thread_state_t tstate)
1495 {
1496 arm_thread_state64_t *state;
1497 struct arm_saved_state *saved_state;
1498 struct arm_saved_state64 *saved_state_64;
1499 thread_t curth = current_thread();
1500 spl_t s=0;
1501
1502 assert(thread_is_64bit_data(thread));
1503
1504 saved_state = thread->machine.upcb;
1505 saved_state_64 = saved_state64(saved_state);
1506 state = (arm_thread_state64_t *)tstate;
1507
1508 if (curth != thread) {
1509 s = splsched();
1510 thread_lock(thread);
1511 }
1512
1513 /*
1514 * do not zero saved_state, it can be concurrently accessed
1515 * and zero is not a valid state for some of the registers,
1516 * like sp.
1517 */
1518 thread_state64_to_saved_state(state, saved_state);
1519 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1520
1521 if (curth != thread) {
1522 thread_unlock(thread);
1523 splx(s);
1524 }
1525
1526 return KERN_SUCCESS;
1527 }