]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/status.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/thread_status.h>
32#include <kern/thread.h>
33#include <kern/kalloc.h>
34#include <arm/vmparam.h>
35#include <arm/cpu_data_internal.h>
36#include <arm64/proc_reg.h>
37
38struct arm_vfpv2_state
39{
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43};
44
45typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47#define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50/*
51 * Forward definitions
52 */
53void thread_set_child(thread_t child, int pid);
54void thread_set_parent(thread_t parent, int pid);
55
56/*
57 * Maps state flavor to number of words in the state:
58 */
59/* __private_extern__ */
60unsigned int _MachineStateCount[] = {
61 /* FLAVOR_LIST */ 0,
62 ARM_UNIFIED_THREAD_STATE_COUNT,
63 ARM_VFP_STATE_COUNT,
64 ARM_EXCEPTION_STATE_COUNT,
65 ARM_DEBUG_STATE_COUNT,
66 /* THREAD_STATE_NONE (legacy) */ 0,
67 ARM_THREAD_STATE64_COUNT,
68 ARM_EXCEPTION_STATE64_COUNT,
69 /* THREAD_STATE_LAST (legacy) */ 0,
70 ARM_THREAD_STATE32_COUNT,
71 /* UNALLOCATED */ 0,
72 /* UNALLOCATED */ 0,
73 /* UNALLOCATED */ 0,
74 /* UNALLOCATED */ 0,
75 ARM_DEBUG_STATE32_COUNT,
76 ARM_DEBUG_STATE64_COUNT,
77 ARM_NEON_STATE_COUNT,
78 ARM_NEON_STATE64_COUNT,
79 /* UNALLOCATED */ 0,
80 /* UNALLOCATED */ 0,
81 /* ARM_SAVED_STATE32_COUNT */ 0,
82 /* ARM_SAVED_STATE64_COUNT */ 0,
83 /* ARM_NEON_SAVED_STATE32_COUNT */ 0,
84 /* ARM_NEON_SAVED_STATE64_COUNT */ 0,
85};
86
87extern zone_t ads_zone;
88
89#if __arm64__
90/*
91 * Copy values from saved_state to ts64.
92 */
93void
94saved_state_to_thread_state64(const arm_saved_state_t *saved_state, arm_thread_state64_t *ts64)
95{
96 uint32_t i;
97
98 assert(is_saved_state64(saved_state));
99
100 ts64->fp = get_saved_state_fp(saved_state);
101 ts64->lr = get_saved_state_lr(saved_state);
102 ts64->sp = get_saved_state_sp(saved_state);
103 ts64->pc = get_saved_state_pc(saved_state);
104 ts64->cpsr = get_saved_state_cpsr(saved_state);
105 for (i = 0; i < 29; i++)
106 ts64->x[i] = get_saved_state_reg(saved_state, i);
107}
108
109/*
110 * Copy values from ts64 to saved_state
111 */
112void
113thread_state64_to_saved_state(const arm_thread_state64_t *ts64, arm_saved_state_t *saved_state)
114{
115 uint32_t i;
116
117 assert(is_saved_state64(saved_state));
118
119 set_saved_state_fp(saved_state, ts64->fp);
120 set_saved_state_lr(saved_state, ts64->lr);
121 set_saved_state_sp(saved_state, ts64->sp);
122 set_saved_state_pc(saved_state, ts64->pc);
123 set_saved_state_cpsr(saved_state, (ts64->cpsr & ~PSR64_MODE_MASK) | PSR64_MODE_RW_64);
124 for (i = 0; i < 29; i++)
125 set_saved_state_reg(saved_state, i, ts64->x[i]);
126}
127#endif
128
129kern_return_t
130handle_get_arm32_thread_state(
131 thread_state_t tstate,
132 mach_msg_type_number_t * count,
133 const arm_saved_state_t *saved_state)
134{
135 if (*count < ARM_THREAD_STATE32_COUNT)
136 return (KERN_INVALID_ARGUMENT);
137 if (!is_saved_state32(saved_state))
138 return (KERN_INVALID_ARGUMENT);
139
140 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
141 *count = ARM_THREAD_STATE32_COUNT;
142 return KERN_SUCCESS;
143}
144
145kern_return_t
146handle_get_arm64_thread_state(
147 thread_state_t tstate,
148 mach_msg_type_number_t * count,
149 const arm_saved_state_t *saved_state)
150{
151 if (*count < ARM_THREAD_STATE64_COUNT)
152 return (KERN_INVALID_ARGUMENT);
153 if (!is_saved_state64(saved_state))
154 return (KERN_INVALID_ARGUMENT);
155
156 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
157 *count = ARM_THREAD_STATE64_COUNT;
158 return KERN_SUCCESS;
159}
160
161
162kern_return_t
163handle_get_arm_thread_state(
164 thread_state_t tstate,
165 mach_msg_type_number_t * count,
166 const arm_saved_state_t *saved_state)
167{
168 /* In an arm64 world, this flavor can be used to retrieve the thread
169 * state of a 32-bit or 64-bit thread into a unified structure, but we
170 * need to support legacy clients who are only aware of 32-bit, so
171 * check the count to see what the client is expecting.
172 */
173 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
174 return handle_get_arm32_thread_state(tstate, count, saved_state);
175 }
176
177 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
178 bzero(unified_state, sizeof(*unified_state));
179#if __arm64__
180 if (is_saved_state64(saved_state)) {
181 unified_state->ash.flavor = ARM_THREAD_STATE64;
182 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
183 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
184 } else
185#endif
186 {
187 unified_state->ash.flavor = ARM_THREAD_STATE32;
188 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
189 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
190 }
191 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
192 return (KERN_SUCCESS);
193}
194
195kern_return_t
196handle_set_arm32_thread_state(
197 const thread_state_t tstate,
198 mach_msg_type_number_t count,
199 arm_saved_state_t *saved_state)
200{
201 if (count != ARM_THREAD_STATE32_COUNT)
202 return (KERN_INVALID_ARGUMENT);
203
204 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
205 return KERN_SUCCESS;
206}
207
208kern_return_t
209handle_set_arm64_thread_state(
210 const thread_state_t tstate,
211 mach_msg_type_number_t count,
212 arm_saved_state_t *saved_state)
213{
214 if (count != ARM_THREAD_STATE64_COUNT)
215 return (KERN_INVALID_ARGUMENT);
216
217 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
218 return KERN_SUCCESS;
219}
220
221
222kern_return_t
223handle_set_arm_thread_state(
224 const thread_state_t tstate,
225 mach_msg_type_number_t count,
226 arm_saved_state_t *saved_state)
227{
228 /* In an arm64 world, this flavor can be used to set the thread state of a
229 * 32-bit or 64-bit thread from a unified structure, but we need to support
230 * legacy clients who are only aware of 32-bit, so check the count to see
231 * what the client is expecting.
232 */
233 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
234 return handle_set_arm32_thread_state(tstate, count, saved_state);
235 }
236
237 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
238#if __arm64__
239 if (is_thread_state64(unified_state)) {
240 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
241 } else
242#endif
243 {
244 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
245 }
246
247 return (KERN_SUCCESS);
248}
249
250/*
251 * Routine: machine_thread_get_state
252 *
253 */
254kern_return_t
255machine_thread_get_state(
256 thread_t thread,
257 thread_flavor_t flavor,
258 thread_state_t tstate,
259 mach_msg_type_number_t * count)
260{
261 switch (flavor) {
262 case THREAD_STATE_FLAVOR_LIST:
263 if (*count < 4)
264 return (KERN_INVALID_ARGUMENT);
265
266 tstate[0] = ARM_THREAD_STATE;
267 tstate[1] = ARM_VFP_STATE;
268 tstate[2] = ARM_EXCEPTION_STATE;
269 tstate[3] = ARM_DEBUG_STATE;
270 *count = 4;
271 break;
272
273 case THREAD_STATE_FLAVOR_LIST_NEW:
274 if (*count < 4)
275 return (KERN_INVALID_ARGUMENT);
276
277 tstate[0] = ARM_THREAD_STATE;
278 tstate[1] = ARM_VFP_STATE;
279 tstate[2] = thread_is_64bit(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
280 tstate[3] = thread_is_64bit(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
281 *count = 4;
282 break;
283
284 case ARM_THREAD_STATE:
285 {
286 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
287 if (rn) return rn;
288 break;
289 }
290 case ARM_THREAD_STATE32:
291 {
292 if (thread_is_64bit(thread))
293 return KERN_INVALID_ARGUMENT;
294
295 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
296 if (rn) return rn;
297 break;
298 }
299#if __arm64__
300 case ARM_THREAD_STATE64:
301 {
302 if (!thread_is_64bit(thread))
303 return KERN_INVALID_ARGUMENT;
304
305 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
306 if (rn) return rn;
307 break;
308 }
309#endif
310 case ARM_EXCEPTION_STATE:{
311 struct arm_exception_state *state;
312 struct arm_saved_state32 *saved_state;
313
314 if (*count < ARM_EXCEPTION_STATE_COUNT)
315 return (KERN_INVALID_ARGUMENT);
316 if (thread_is_64bit(thread))
317 return (KERN_INVALID_ARGUMENT);
318
319 state = (struct arm_exception_state *) tstate;
320 saved_state = saved_state32(thread->machine.upcb);
321
322 state->exception = saved_state->exception;
323 state->fsr = saved_state->esr;
324 state->far = saved_state->far;
325
326 *count = ARM_EXCEPTION_STATE_COUNT;
327 break;
328 }
329 case ARM_EXCEPTION_STATE64:{
330 struct arm_exception_state64 *state;
331 struct arm_saved_state64 *saved_state;
332
333 if (*count < ARM_EXCEPTION_STATE64_COUNT)
334 return (KERN_INVALID_ARGUMENT);
335 if (!thread_is_64bit(thread))
336 return (KERN_INVALID_ARGUMENT);
337
338 state = (struct arm_exception_state64 *) tstate;
339 saved_state = saved_state64(thread->machine.upcb);
340
341 state->exception = saved_state->exception;
342 state->far = saved_state->far;
343 state->esr = saved_state->esr;
344
345 *count = ARM_EXCEPTION_STATE64_COUNT;
346 break;
347 }
348 case ARM_DEBUG_STATE:{
349 arm_legacy_debug_state_t *state;
350 arm_debug_state32_t *thread_state;
351
352 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT)
353 return (KERN_INVALID_ARGUMENT);
354
355 if (thread_is_64bit(thread))
356 return (KERN_INVALID_ARGUMENT);
357
358 state = (arm_legacy_debug_state_t *) tstate;
359 thread_state = find_debug_state32(thread);
360
361 if (thread_state == NULL)
362 bzero(state, sizeof(arm_legacy_debug_state_t));
363 else
364 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
365
366 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
367 break;
368 }
369 case ARM_DEBUG_STATE32:{
370 arm_debug_state32_t *state;
371 arm_debug_state32_t *thread_state;
372
373 if (*count < ARM_DEBUG_STATE32_COUNT)
374 return (KERN_INVALID_ARGUMENT);
375
376 if (thread_is_64bit(thread))
377 return (KERN_INVALID_ARGUMENT);
378
379 state = (arm_debug_state32_t *) tstate;
380 thread_state = find_debug_state32(thread);
381
382 if (thread_state == NULL)
383 bzero(state, sizeof(arm_debug_state32_t));
384 else
385 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
386
387 *count = ARM_DEBUG_STATE32_COUNT;
388 break;
389 }
390
391 case ARM_DEBUG_STATE64:{
392 arm_debug_state64_t *state;
393 arm_debug_state64_t *thread_state;
394
395 if (*count < ARM_DEBUG_STATE64_COUNT)
396 return (KERN_INVALID_ARGUMENT);
397
398 if (!thread_is_64bit(thread))
399 return (KERN_INVALID_ARGUMENT);
400
401 state = (arm_debug_state64_t *) tstate;
402 thread_state = find_debug_state64(thread);
403
404 if (thread_state == NULL)
405 bzero(state, sizeof(arm_debug_state64_t));
406 else
407 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
408
409 *count = ARM_DEBUG_STATE64_COUNT;
410 break;
411 }
412
413 case ARM_VFP_STATE:{
414 struct arm_vfp_state *state;
415 arm_neon_saved_state32_t *thread_state;
416 unsigned int max;
417
418 if (*count < ARM_VFP_STATE_COUNT) {
419 if (*count < ARM_VFPV2_STATE_COUNT)
420 return (KERN_INVALID_ARGUMENT);
421 else
422 *count = ARM_VFPV2_STATE_COUNT;
423 }
424
425 if (*count == ARM_VFPV2_STATE_COUNT)
426 max = 32;
427 else
428 max = 64;
429
430 state = (struct arm_vfp_state *) tstate;
431 thread_state = neon_state32(thread->machine.uNeon);
432 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
433
434 bcopy(thread_state, state, (max + 1)*sizeof(uint32_t));
435 *count = (max + 1);
436 break;
437 }
438 case ARM_NEON_STATE:{
439 arm_neon_state_t *state;
440 arm_neon_saved_state32_t *thread_state;
441
442 if (*count < ARM_NEON_STATE_COUNT)
443 return (KERN_INVALID_ARGUMENT);
444
445 if (thread_is_64bit(thread))
446 return (KERN_INVALID_ARGUMENT);
447
448 state = (arm_neon_state_t *)tstate;
449 thread_state = neon_state32(thread->machine.uNeon);
450
451 assert(sizeof(*thread_state) == sizeof(*state));
452 bcopy(thread_state, state, sizeof(arm_neon_state_t));
453
454 *count = ARM_NEON_STATE_COUNT;
455 break;
456
457 }
458
459 case ARM_NEON_STATE64:{
460 arm_neon_state64_t *state;
461 arm_neon_saved_state64_t *thread_state;
462
463 if (*count < ARM_NEON_STATE64_COUNT)
464 return (KERN_INVALID_ARGUMENT);
465
466 if (!thread_is_64bit(thread))
467 return (KERN_INVALID_ARGUMENT);
468
469 state = (arm_neon_state64_t *)tstate;
470 thread_state = neon_state64(thread->machine.uNeon);
471
472 /* For now, these are identical */
473 assert(sizeof(*state) == sizeof(*thread_state));
474 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
475
476 *count = ARM_NEON_STATE64_COUNT;
477 break;
478
479 }
480
481 default:
482 return (KERN_INVALID_ARGUMENT);
483 }
484 return (KERN_SUCCESS);
485}
486
487
488/*
489 * Routine: machine_thread_get_kern_state
490 *
491 */
492kern_return_t
493machine_thread_get_kern_state(
494 thread_t thread,
495 thread_flavor_t flavor,
496 thread_state_t tstate,
497 mach_msg_type_number_t * count)
498{
499 /*
500 * This works only for an interrupted kernel thread
501 */
502 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
503 return KERN_FAILURE;
504
505 switch (flavor) {
506 case ARM_THREAD_STATE:
507 {
508 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
509 if (rn) return rn;
510 break;
511 }
512 case ARM_THREAD_STATE32:
513 {
514 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
515 if (rn) return rn;
516 break;
517 }
518#if __arm64__
519 case ARM_THREAD_STATE64:
520 {
521 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
522 if (rn) return rn;
523 break;
524 }
525#endif
526 default:
527 return (KERN_INVALID_ARGUMENT);
528 }
529 return (KERN_SUCCESS);
530}
531
532void
533machine_thread_switch_addrmode(thread_t thread)
534{
535 if (task_has_64BitAddr(thread->task)) {
536 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
537 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
538 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
539 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
540
541 /*
542 * Reinitialize the NEON state.
543 */
544 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
545 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
546 } else {
547 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
548 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
549 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
550 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
551
552 /*
553 * Reinitialize the NEON state.
554 */
555 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
556 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
557 }
558}
559
560extern long long arm_debug_get(void);
561
562/*
563 * Routine: machine_thread_set_state
564 *
565 */
566kern_return_t
567machine_thread_set_state(
568 thread_t thread,
569 thread_flavor_t flavor,
570 thread_state_t tstate,
571 mach_msg_type_number_t count)
572{
573 kern_return_t rn;
574
575 switch (flavor) {
576 case ARM_THREAD_STATE:
577 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
578 if (rn) return rn;
579 break;
580
581 case ARM_THREAD_STATE32:
582 if (thread_is_64bit(thread))
583 return (KERN_INVALID_ARGUMENT);
584
585 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
586 if (rn) return rn;
587 break;
588
589#if __arm64__
590 case ARM_THREAD_STATE64:
591 if (!thread_is_64bit(thread))
592 return (KERN_INVALID_ARGUMENT);
593
594 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
595 if (rn) return rn;
596 break;
597#endif
598 case ARM_EXCEPTION_STATE:{
599
600 if (count != ARM_EXCEPTION_STATE_COUNT)
601 return (KERN_INVALID_ARGUMENT);
602 if (thread_is_64bit(thread))
603 return (KERN_INVALID_ARGUMENT);
604
605 break;
606 }
607 case ARM_EXCEPTION_STATE64:{
608
609 if (count != ARM_EXCEPTION_STATE64_COUNT)
610 return (KERN_INVALID_ARGUMENT);
611 if (!thread_is_64bit(thread))
612 return (KERN_INVALID_ARGUMENT);
613
614 break;
615 }
616 case ARM_DEBUG_STATE:
617 {
618 arm_legacy_debug_state_t *state;
619 boolean_t enabled = FALSE;
620 unsigned int i;
621
622 if (count != ARM_LEGACY_DEBUG_STATE_COUNT)
623 return (KERN_INVALID_ARGUMENT);
624 if (thread_is_64bit(thread))
625 return (KERN_INVALID_ARGUMENT);
626
627 state = (arm_legacy_debug_state_t *) tstate;
628
629 for (i = 0; i < 16; i++) {
630 /* do not allow context IDs to be set */
631 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
632 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
633 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
634 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
635 return KERN_PROTECTION_FAILURE;
636 }
637 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
638 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
639 enabled = TRUE;
640 }
641 }
642
643
644 if (!enabled) {
645 arm_debug_state32_t *thread_state = find_debug_state32(thread);
646 if (thread_state != NULL) {
647 void *pTmp = thread->machine.DebugData;
648 thread->machine.DebugData = NULL;
649 zfree(ads_zone, pTmp);
650 }
651 } else {
652 arm_debug_state32_t *thread_state = find_debug_state32(thread);
653 if (thread_state == NULL) {
654 thread->machine.DebugData = zalloc(ads_zone);
655 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
656 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
657 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
658 thread_state = find_debug_state32(thread);
659 }
660 assert(NULL != thread_state);
661
662 for (i = 0; i < 16; i++) {
663 /* set appropriate privilege; mask out unknown bits */
664 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
665 | ARM_DBGBCR_MATCH_MASK
666 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
667 | ARM_DBG_CR_ENABLE_MASK))
668 | ARM_DBGBCR_TYPE_IVA
669 | ARM_DBG_CR_LINKED_UNLINKED
670 | ARM_DBG_CR_SECURITY_STATE_BOTH
671 | ARM_DBG_CR_MODE_CONTROL_USER;
672 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
673 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
674 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
675 | ARM_DBGWCR_ACCESS_CONTROL_MASK
676 | ARM_DBG_CR_ENABLE_MASK))
677 | ARM_DBG_CR_LINKED_UNLINKED
678 | ARM_DBG_CR_SECURITY_STATE_BOTH
679 | ARM_DBG_CR_MODE_CONTROL_USER;
680 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
681 }
682
683 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
684 }
685
686 if (thread == current_thread()) {
687 arm_debug_set32(thread->machine.DebugData);
688 }
689
690 break;
691 }
692 case ARM_DEBUG_STATE32:
693 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
694 {
695 arm_debug_state32_t *state;
696 boolean_t enabled = FALSE;
697 unsigned int i;
698
699 if (count != ARM_DEBUG_STATE32_COUNT)
700 return (KERN_INVALID_ARGUMENT);
701 if (thread_is_64bit(thread))
702 return (KERN_INVALID_ARGUMENT);
703
704 state = (arm_debug_state32_t *) tstate;
705
706 if (state->mdscr_el1 & 0x1)
707 enabled = TRUE;
708
709 for (i = 0; i < 16; i++) {
710 /* do not allow context IDs to be set */
711 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
712 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
713 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
714 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
715 return KERN_PROTECTION_FAILURE;
716 }
717 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
718 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
719 enabled = TRUE;
720 }
721 }
722
723 if (!enabled) {
724 arm_debug_state32_t *thread_state = find_debug_state32(thread);
725 if (thread_state != NULL) {
726 void *pTmp = thread->machine.DebugData;
727 thread->machine.DebugData = NULL;
728 zfree(ads_zone, pTmp);
729 }
730 } else {
731 arm_debug_state32_t *thread_state = find_debug_state32(thread);
732 if (thread_state == NULL) {
733 thread->machine.DebugData = zalloc(ads_zone);
734 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
735 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
736 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
737 thread_state = find_debug_state32(thread);
738 }
739 assert(NULL != thread_state);
740
741 if (state->mdscr_el1 & 0x1)
742 thread_state->mdscr_el1 |= 0x1;
743 else
744 thread_state->mdscr_el1 &= ~0x1;
745
746 for (i = 0; i < 16; i++) {
747 /* set appropriate privilege; mask out unknown bits */
748 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
749 | ARM_DBGBCR_MATCH_MASK
750 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
751 | ARM_DBG_CR_ENABLE_MASK))
752 | ARM_DBGBCR_TYPE_IVA
753 | ARM_DBG_CR_LINKED_UNLINKED
754 | ARM_DBG_CR_SECURITY_STATE_BOTH
755 | ARM_DBG_CR_MODE_CONTROL_USER;
756 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
757 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
758 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
759 | ARM_DBGWCR_ACCESS_CONTROL_MASK
760 | ARM_DBG_CR_ENABLE_MASK))
761 | ARM_DBG_CR_LINKED_UNLINKED
762 | ARM_DBG_CR_SECURITY_STATE_BOTH
763 | ARM_DBG_CR_MODE_CONTROL_USER;
764 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
765 }
766
767 }
768
769 if (thread == current_thread()) {
770 arm_debug_set32(thread->machine.DebugData);
771 }
772
773 break;
774 }
775
776 case ARM_DEBUG_STATE64:
777 {
778 arm_debug_state64_t *state;
779 boolean_t enabled = FALSE;
780 unsigned int i;
781
782 if (count != ARM_DEBUG_STATE64_COUNT)
783 return (KERN_INVALID_ARGUMENT);
784 if (!thread_is_64bit(thread))
785 return (KERN_INVALID_ARGUMENT);
786
787 state = (arm_debug_state64_t *) tstate;
788
789 if (state->mdscr_el1 & 0x1)
790 enabled = TRUE;
791
792 for (i = 0; i < 16; i++) {
793 /* do not allow context IDs to be set */
794 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
795 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
796 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
797 return KERN_PROTECTION_FAILURE;
798 }
799 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
800 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
801 enabled = TRUE;
802 }
803 }
804
805 if (!enabled) {
806 arm_debug_state64_t *thread_state = find_debug_state64(thread);
807 if (thread_state != NULL) {
808 void *pTmp = thread->machine.DebugData;
809 thread->machine.DebugData = NULL;
810 zfree(ads_zone, pTmp);
811 }
812 } else {
813 arm_debug_state64_t *thread_state = find_debug_state64(thread);
814 if (thread_state == NULL) {
815 thread->machine.DebugData = zalloc(ads_zone);
816 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
817 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
818 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
819 thread_state = find_debug_state64(thread);
820 }
821 assert(NULL != thread_state);
822
823 if (state->mdscr_el1 & 0x1)
824 thread_state->mdscr_el1 |= 0x1;
825 else
826 thread_state->mdscr_el1 &= ~0x1;
827
828 for (i = 0; i < 16; i++) {
829 /* set appropriate privilege; mask out unknown bits */
830 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
831 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
832 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
833 | ARM_DBG_CR_ENABLE_MASK))
834 | ARM_DBGBCR_TYPE_IVA
835 | ARM_DBG_CR_LINKED_UNLINKED
836 | ARM_DBG_CR_SECURITY_STATE_BOTH
837 | ARM_DBG_CR_MODE_CONTROL_USER;
838 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
839 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
840 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
841 | ARM_DBGWCR_ACCESS_CONTROL_MASK
842 | ARM_DBG_CR_ENABLE_MASK))
843 | ARM_DBG_CR_LINKED_UNLINKED
844 | ARM_DBG_CR_SECURITY_STATE_BOTH
845 | ARM_DBG_CR_MODE_CONTROL_USER;
846 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
847 }
848
849 }
850
851 if (thread == current_thread()) {
852 arm_debug_set64(thread->machine.DebugData);
853 }
854
855 break;
856 }
857
858 case ARM_VFP_STATE:{
859 struct arm_vfp_state *state;
860 arm_neon_saved_state32_t *thread_state;
861 unsigned int max;
862
863 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT)
864 return (KERN_INVALID_ARGUMENT);
865
866 if (count == ARM_VFPV2_STATE_COUNT)
867 max = 32;
868 else
869 max = 64;
870
871 state = (struct arm_vfp_state *) tstate;
872 thread_state = neon_state32(thread->machine.uNeon);
873 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
874
875 bcopy(state, thread_state, (max + 1)*sizeof(uint32_t));
876
877 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
878 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
879 break;
880 }
881
882 case ARM_NEON_STATE:{
883 arm_neon_state_t *state;
884 arm_neon_saved_state32_t *thread_state;
885
886 if (count != ARM_NEON_STATE_COUNT)
887 return (KERN_INVALID_ARGUMENT);
888
889 if (thread_is_64bit(thread))
890 return (KERN_INVALID_ARGUMENT);
891
892 state = (arm_neon_state_t *)tstate;
893 thread_state = neon_state32(thread->machine.uNeon);
894
895 assert(sizeof(*state) == sizeof(*thread_state));
896 bcopy(state, thread_state, sizeof(arm_neon_state_t));
897
898 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
899 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
900 break;
901
902 }
903
904 case ARM_NEON_STATE64:{
905 arm_neon_state64_t *state;
906 arm_neon_saved_state64_t *thread_state;
907
908 if (count != ARM_NEON_STATE64_COUNT)
909 return (KERN_INVALID_ARGUMENT);
910
911 if (!thread_is_64bit(thread))
912 return (KERN_INVALID_ARGUMENT);
913
914 state = (arm_neon_state64_t *)tstate;
915 thread_state = neon_state64(thread->machine.uNeon);
916
917 assert(sizeof(*state) == sizeof(*thread_state));
918 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
919
920 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
921 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
922 break;
923
924 }
925
926 default:
927 return (KERN_INVALID_ARGUMENT);
928 }
929 return (KERN_SUCCESS);
930}
931
932/*
933 * Routine: machine_thread_state_initialize
934 *
935 */
936kern_return_t
937machine_thread_state_initialize(
938 thread_t thread)
939{
940 arm_context_t *context = thread->machine.contextData;
941
942 /*
943 * Should always be set up later. For a kernel thread, we don't care
944 * about this state. For a user thread, we'll set the state up in
945 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
946 */
947
948 if (context != NULL) {
949 bzero(&context->ss.uss, sizeof(context->ss.uss));
950 bzero(&context->ns.uns, sizeof(context->ns.uns));
951
952 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
953 context->ns.ns_64.fpcr = FPCR_DEFAULT;
954 } else {
955 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
956 }
957 }
958
959 thread->machine.DebugData = NULL;
960
961 return KERN_SUCCESS;
962}
963
964/*
965 * Routine: machine_thread_dup
966 *
967 */
968kern_return_t
969machine_thread_dup(
970 thread_t self,
971 thread_t target)
972{
973 struct arm_saved_state *self_saved_state;
974 struct arm_saved_state *target_saved_state;
975
976 target->machine.cthread_self = self->machine.cthread_self;
977 target->machine.cthread_data = self->machine.cthread_data;
978
979 self_saved_state = self->machine.upcb;
980 target_saved_state = target->machine.upcb;
981 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
982
983 return (KERN_SUCCESS);
984}
985
986/*
987 * Routine: get_user_regs
988 *
989 */
990struct arm_saved_state *
991get_user_regs(
992 thread_t thread)
993{
994 return (thread->machine.upcb);
995}
996
997arm_neon_saved_state_t *
998get_user_neon_regs(
999 thread_t thread)
1000{
1001 return (thread->machine.uNeon);
1002}
1003
1004/*
1005 * Routine: find_user_regs
1006 *
1007 */
1008struct arm_saved_state *
1009find_user_regs(
1010 thread_t thread)
1011{
1012 return (thread->machine.upcb);
1013}
1014
1015/*
1016 * Routine: find_kern_regs
1017 *
1018 */
1019struct arm_saved_state *
1020find_kern_regs(
1021 thread_t thread)
1022{
1023 /*
1024 * This works only for an interrupted kernel thread
1025 */
1026 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
1027 return ((struct arm_saved_state *) NULL);
1028 else
1029 return (getCpuDatap()->cpu_int_state);
1030
1031}
1032
1033arm_debug_state32_t *
1034find_debug_state32(
1035 thread_t thread)
1036{
1037 if (thread && thread->machine.DebugData)
1038 return &(thread->machine.DebugData->uds.ds32);
1039 else
1040 return NULL;
1041}
1042
1043arm_debug_state64_t *
1044find_debug_state64(
1045 thread_t thread)
1046{
1047 if (thread && thread->machine.DebugData)
1048 return &(thread->machine.DebugData->uds.ds64);
1049 else
1050 return NULL;
1051}
1052
1053/*
1054 * Routine: thread_userstack
1055 *
1056 */
1057kern_return_t
1058thread_userstack(
1059 thread_t thread,
1060 int flavor,
1061 thread_state_t tstate,
1062 unsigned int count,
1063 mach_vm_offset_t * user_stack,
1064 int *customstack,
1065 boolean_t is64bit
1066)
1067{
1068 register_t sp;
1069
1070 switch (flavor) {
1071 case ARM_THREAD_STATE:
1072 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1073#if __arm64__
1074 if (thread_is_64bit(thread)) {
1075 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1076 } else
1077#endif
1078 {
1079 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1080 }
1081
1082 break;
1083 }
1084
1085 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1086 case ARM_THREAD_STATE32:
1087 if (count != ARM_THREAD_STATE32_COUNT)
1088 return (KERN_INVALID_ARGUMENT);
1089 if (is64bit)
1090 return (KERN_INVALID_ARGUMENT);
1091
1092 sp = ((arm_thread_state32_t *)tstate)->sp;
1093 break;
1094#if __arm64__
1095 case ARM_THREAD_STATE64:
1096 if (count != ARM_THREAD_STATE64_COUNT)
1097 return (KERN_INVALID_ARGUMENT);
1098 if (!is64bit)
1099 return (KERN_INVALID_ARGUMENT);
1100
1101 sp = ((arm_thread_state32_t *)tstate)->sp;
1102 break;
1103#endif
1104 default:
1105 return (KERN_INVALID_ARGUMENT);
1106 }
1107
1108 if (sp) {
1109 *user_stack = CAST_USER_ADDR_T(sp);
1110 if (customstack)
1111 *customstack = 1;
1112 } else {
1113 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1114 if (customstack)
1115 *customstack = 0;
1116 }
1117
1118 return (KERN_SUCCESS);
1119}
1120
1121/*
1122 * thread_userstackdefault:
1123 *
1124 * Return the default stack location for the
1125 * thread, if otherwise unknown.
1126 */
1127kern_return_t
1128thread_userstackdefault(
1129 mach_vm_offset_t *default_user_stack,
1130 boolean_t is64bit)
1131{
1132 if (is64bit) {
1133 *default_user_stack = USRSTACK64;
1134 } else {
1135 *default_user_stack = USRSTACK;
1136 }
1137
1138 return (KERN_SUCCESS);
1139}
1140
1141/*
1142 * Routine: thread_setuserstack
1143 *
1144 */
1145void
1146thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
1147{
1148 struct arm_saved_state *sv;
1149
1150 sv = get_user_regs(thread);
1151
1152 set_saved_state_sp(sv, user_stack);
1153
1154 return;
1155}
1156
1157/*
1158 * Routine: thread_adjuserstack
1159 *
1160 */
1161uint64_t
1162thread_adjuserstack(thread_t thread, int adjust)
1163{
1164 struct arm_saved_state *sv;
1165 uint64_t sp;
1166
1167 sv = get_user_regs(thread);
1168
1169 sp = get_saved_state_sp(sv);
1170 sp += adjust;
1171 set_saved_state_sp(sv, sp);;
1172
1173 return sp;
1174}
1175
1176/*
1177 * Routine: thread_setentrypoint
1178 *
1179 */
1180void
1181thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
1182{
1183 struct arm_saved_state *sv;
1184
1185 sv = get_user_regs(thread);
1186
1187 set_saved_state_pc(sv, entry);
1188
1189 return;
1190}
1191
1192/*
1193 * Routine: thread_entrypoint
1194 *
1195 */
1196kern_return_t
1197thread_entrypoint(
1198 __unused thread_t thread,
1199 int flavor,
1200 thread_state_t tstate,
1201 unsigned int count __unused,
1202 mach_vm_offset_t * entry_point
1203)
1204{
1205 switch (flavor) {
1206 case ARM_THREAD_STATE:
1207 {
1208 struct arm_thread_state *state;
1209
1210 state = (struct arm_thread_state *) tstate;
1211
1212 /*
1213 * If a valid entry point is specified, use it.
1214 */
1215 if (state->pc) {
1216 *entry_point = CAST_USER_ADDR_T(state->pc);
1217 } else {
1218 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1219 }
1220 }
1221 break;
1222
1223 case ARM_THREAD_STATE64:
1224 {
1225 struct arm_thread_state64 *state;
1226
1227 state = (struct arm_thread_state64*) tstate;
1228
1229 /*
1230 * If a valid entry point is specified, use it.
1231 */
1232 if (state->pc) {
1233 *entry_point = CAST_USER_ADDR_T(state->pc);
1234 } else {
1235 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1236 }
1237
1238 break;
1239 }
1240 default:
1241 return (KERN_INVALID_ARGUMENT);
1242 }
1243
1244 return (KERN_SUCCESS);
1245}
1246
1247
1248/*
1249 * Routine: thread_set_child
1250 *
1251 */
1252void
1253thread_set_child(
1254 thread_t child,
1255 int pid)
1256{
1257 struct arm_saved_state *child_state;
1258
1259 child_state = get_user_regs(child);
1260
1261 set_saved_state_reg(child_state, 0, pid);
1262 set_saved_state_reg(child_state, 1, 1ULL);
1263}
1264
1265
1266/*
1267 * Routine: thread_set_parent
1268 *
1269 */
1270void
1271thread_set_parent(
1272 thread_t parent,
1273 int pid)
1274{
1275 struct arm_saved_state *parent_state;
1276
1277 parent_state = get_user_regs(parent);
1278
1279 set_saved_state_reg(parent_state, 0, pid);
1280 set_saved_state_reg(parent_state, 1, 0);
1281}
1282
1283
1284struct arm_act_context {
1285 struct arm_unified_thread_state ss;
1286#if __ARM_VFP__
1287 struct arm_neon_saved_state ns;
1288#endif
1289};
1290
1291/*
1292 * Routine: act_thread_csave
1293 *
1294 */
1295void *
1296act_thread_csave(void)
1297{
1298 struct arm_act_context *ic;
1299 kern_return_t kret;
1300 unsigned int val;
1301 thread_t thread = current_thread();
1302
1303 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
1304 if (ic == (struct arm_act_context *) NULL)
1305 return ((void *) 0);
1306
1307 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1308 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1309 if (kret != KERN_SUCCESS) {
1310 kfree(ic, sizeof(struct arm_act_context));
1311 return ((void *) 0);
1312 }
1313
1314#if __ARM_VFP__
1315 if (thread_is_64bit(thread)) {
1316 val = ARM_NEON_STATE64_COUNT;
1317 kret = machine_thread_get_state(thread,
1318 ARM_NEON_STATE64,
1319 (thread_state_t) & ic->ns,
1320 &val);
1321 } else {
1322 val = ARM_NEON_STATE_COUNT;
1323 kret = machine_thread_get_state(thread,
1324 ARM_NEON_STATE,
1325 (thread_state_t) & ic->ns,
1326 &val);
1327 }
1328 if (kret != KERN_SUCCESS) {
1329 kfree(ic, sizeof(struct arm_act_context));
1330 return ((void *) 0);
1331 }
1332#endif
1333 return (ic);
1334}
1335
1336/*
1337 * Routine: act_thread_catt
1338 *
1339 */
1340void
1341act_thread_catt(void *ctx)
1342{
1343 struct arm_act_context *ic;
1344 kern_return_t kret;
1345 thread_t thread = current_thread();
1346
1347 ic = (struct arm_act_context *) ctx;
1348 if (ic == (struct arm_act_context *) NULL)
1349 return;
1350
1351 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1352 if (kret != KERN_SUCCESS)
1353 goto out;
1354
1355#if __ARM_VFP__
1356 if (thread_is_64bit(thread)) {
1357 kret = machine_thread_set_state(thread,
1358 ARM_NEON_STATE64,
1359 (thread_state_t) & ic->ns,
1360 ARM_NEON_STATE64_COUNT);
1361 } else {
1362 kret = machine_thread_set_state(thread,
1363 ARM_NEON_STATE,
1364 (thread_state_t) & ic->ns,
1365 ARM_NEON_STATE_COUNT);
1366 }
1367 if (kret != KERN_SUCCESS)
1368 goto out;
1369#endif
1370out:
1371 kfree(ic, sizeof(struct arm_act_context));
1372}
1373
1374/*
1375 * Routine: act_thread_catt
1376 *
1377 */
1378void
1379act_thread_cfree(void *ctx)
1380{
1381 kfree(ctx, sizeof(struct arm_act_context));
1382}
1383
1384kern_return_t
1385thread_set_wq_state32(thread_t thread, thread_state_t tstate)
1386{
1387 arm_thread_state_t *state;
1388 struct arm_saved_state *saved_state;
1389 struct arm_saved_state32 *saved_state_32;
1390 thread_t curth = current_thread();
1391 spl_t s=0;
1392
1393 assert(!thread_is_64bit(thread));
1394
1395 saved_state = thread->machine.upcb;
1396 saved_state_32 = saved_state32(saved_state);
1397
1398 state = (arm_thread_state_t *)tstate;
1399
1400 if (curth != thread) {
1401 s = splsched();
1402 thread_lock(thread);
1403 }
1404
1405 /*
1406 * do not zero saved_state, it can be concurrently accessed
1407 * and zero is not a valid state for some of the registers,
1408 * like sp.
1409 */
1410 thread_state32_to_saved_state(state, saved_state);
1411 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1412
1413 if (curth != thread) {
1414 thread_unlock(thread);
1415 splx(s);
1416 }
1417
1418 return KERN_SUCCESS;
1419}
1420
1421kern_return_t
1422thread_set_wq_state64(thread_t thread, thread_state_t tstate)
1423{
1424 arm_thread_state64_t *state;
1425 struct arm_saved_state *saved_state;
1426 struct arm_saved_state64 *saved_state_64;
1427 thread_t curth = current_thread();
1428 spl_t s=0;
1429
1430 assert(thread_is_64bit(thread));
1431
1432 saved_state = thread->machine.upcb;
1433 saved_state_64 = saved_state64(saved_state);
1434 state = (arm_thread_state64_t *)tstate;
1435
1436 if (curth != thread) {
1437 s = splsched();
1438 thread_lock(thread);
1439 }
1440
1441 /*
1442 * do not zero saved_state, it can be concurrently accessed
1443 * and zero is not a valid state for some of the registers,
1444 * like sp.
1445 */
1446 thread_state64_to_saved_state(state, saved_state);
1447 saved_state_64->cpsr = PSR64_USER64_DEFAULT;
1448
1449 if (curth != thread) {
1450 thread_unlock(thread);
1451 splx(s);
1452 }
1453
1454 return KERN_SUCCESS;
1455}