]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/status.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37
38 struct arm_vfpv2_state
39 {
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43 };
44
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50
51 /*
52 * Forward definitions
53 */
54 void
55 thread_set_child(thread_t child, int pid);
56
57 void
58 thread_set_parent(thread_t parent, int pid);
59
60 /*
61 * Maps state flavor to number of words in the state:
62 */
63 /* __private_extern__ */
64 unsigned int _MachineStateCount[] = {
65 /* FLAVOR_LIST */ 0,
66 ARM_THREAD_STATE_COUNT,
67 ARM_VFP_STATE_COUNT,
68 ARM_EXCEPTION_STATE_COUNT,
69 ARM_DEBUG_STATE_COUNT
70 };
71
72 extern zone_t ads_zone;
73
74 /*
75 * Routine: machine_thread_get_state
76 *
77 */
78 kern_return_t
79 machine_thread_get_state(
80 thread_t thread,
81 thread_flavor_t flavor,
82 thread_state_t tstate,
83 mach_msg_type_number_t * count)
84 {
85
86 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
87 * _state: " x) */
88
89 switch (flavor) {
90 case THREAD_STATE_FLAVOR_LIST:
91 if (*count < 4)
92 return (KERN_INVALID_ARGUMENT);
93
94 tstate[0] = ARM_THREAD_STATE;
95 tstate[1] = ARM_VFP_STATE;
96 tstate[2] = ARM_EXCEPTION_STATE;
97 tstate[3] = ARM_DEBUG_STATE;
98 *count = 4;
99 break;
100
101 case ARM_THREAD_STATE:{
102 struct arm_thread_state *state;
103 struct arm_saved_state *saved_state;
104 arm_unified_thread_state_t *unified_state;
105
106 unsigned int i;
107 if (*count < ARM_THREAD_STATE_COUNT)
108 return (KERN_INVALID_ARGUMENT);
109
110 if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
111 unified_state = (arm_unified_thread_state_t *) tstate;
112 state = &unified_state->ts_32;
113 unified_state->ash.flavor = ARM_THREAD_STATE32;
114 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
115 } else {
116 state = (struct arm_thread_state *) tstate;
117 }
118 saved_state = &thread->machine.PcbData;
119
120 state->sp = saved_state->sp;
121 state->lr = saved_state->lr;
122 state->pc = saved_state->pc;
123 state->cpsr = saved_state->cpsr;
124 for (i = 0; i < 13; i++)
125 state->r[i] = saved_state->r[i];
126 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
127 state->pc, state->r[0], state->sp);
128
129 if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
130 *count = ARM_THREAD_STATE_COUNT;
131 }
132 break;
133 }
134 case ARM_EXCEPTION_STATE:{
135 struct arm_exception_state *state;
136 struct arm_saved_state *saved_state;
137
138 if (*count < ARM_EXCEPTION_STATE_COUNT)
139 return (KERN_INVALID_ARGUMENT);
140
141 state = (struct arm_exception_state *) tstate;
142 saved_state = &thread->machine.PcbData;
143
144 state->exception = saved_state->exception;
145 state->fsr = saved_state->fsr;
146 state->far = saved_state->far;
147
148 *count = ARM_EXCEPTION_STATE_COUNT;
149 break;
150 }
151 case ARM_VFP_STATE:{
152 #if __ARM_VFP__
153 struct arm_vfp_state *state;
154 struct arm_vfpsaved_state *saved_state;
155 unsigned int i;
156 unsigned int max;
157
158 if (*count < ARM_VFP_STATE_COUNT) {
159 if (*count < ARM_VFPV2_STATE_COUNT)
160 return (KERN_INVALID_ARGUMENT);
161 else
162 *count = ARM_VFPV2_STATE_COUNT;
163 }
164
165 if (*count == ARM_VFPV2_STATE_COUNT)
166 max = 32;
167 else
168 max = 64;
169
170 state = (struct arm_vfp_state *) tstate;
171 saved_state = find_user_vfp(thread);
172
173 state->fpscr = saved_state->fpscr;
174 for (i = 0; i < max; i++)
175 state->r[i] = saved_state->r[i];
176
177 #endif
178 break;
179 }
180 case ARM_DEBUG_STATE:{
181 arm_debug_state_t *state;
182 arm_debug_state_t *thread_state;
183
184 if (*count < ARM_DEBUG_STATE_COUNT)
185 return (KERN_INVALID_ARGUMENT);
186
187 state = (arm_debug_state_t *) tstate;
188 thread_state = find_debug_state(thread);
189
190 if (thread_state == NULL)
191 bzero(state, sizeof(arm_debug_state_t));
192 else
193 bcopy(thread_state, state, sizeof(arm_debug_state_t));
194
195 *count = ARM_DEBUG_STATE_COUNT;
196 break;
197 }
198
199 default:
200 return (KERN_INVALID_ARGUMENT);
201 }
202 return (KERN_SUCCESS);
203 }
204
205
206 /*
207 * Routine: machine_thread_get_kern_state
208 *
209 */
210 kern_return_t
211 machine_thread_get_kern_state(
212 thread_t thread,
213 thread_flavor_t flavor,
214 thread_state_t tstate,
215 mach_msg_type_number_t * count)
216 {
217
218 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
219 * d_get_kern_state: "
220 * x) */
221
222 /*
223 * This works only for an interrupted kernel thread
224 */
225 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
226 return KERN_FAILURE;
227
228 switch (flavor) {
229 case ARM_THREAD_STATE:{
230 struct arm_thread_state *state;
231 struct arm_saved_state *saved_state;
232 unsigned int i;
233 if (*count < ARM_THREAD_STATE_COUNT)
234 return (KERN_INVALID_ARGUMENT);
235
236 state = (struct arm_thread_state *) tstate;
237 saved_state = getCpuDatap()->cpu_int_state;
238
239 state->sp = saved_state->sp;
240 state->lr = saved_state->lr;
241 state->pc = saved_state->pc;
242 state->cpsr = saved_state->cpsr;
243 for (i = 0; i < 13; i++)
244 state->r[i] = saved_state->r[i];
245 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
246 state->pc, state->r[0], state->sp);
247 *count = ARM_THREAD_STATE_COUNT;
248 break;
249 }
250 default:
251 return (KERN_INVALID_ARGUMENT);
252 }
253 return (KERN_SUCCESS);
254 }
255
256 extern long long arm_debug_get(void);
257
258 /*
259 * Routine: machine_thread_set_state
260 *
261 */
262 kern_return_t
263 machine_thread_set_state(
264 thread_t thread,
265 thread_flavor_t flavor,
266 thread_state_t tstate,
267 mach_msg_type_number_t count)
268 {
269
270 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
271 * _state: " x) */
272
273 switch (flavor) {
274 case ARM_THREAD_STATE:{
275 struct arm_thread_state *state;
276 struct arm_saved_state *saved_state;
277 arm_unified_thread_state_t *unified_state;
278 int old_psr;
279
280 if (count < ARM_THREAD_STATE_COUNT)
281 return (KERN_INVALID_ARGUMENT);
282
283 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
284 unified_state = (arm_unified_thread_state_t *) tstate;
285 state = &unified_state->ts_32;
286 } else {
287 state = (struct arm_thread_state *) tstate;
288 }
289 saved_state = &thread->machine.PcbData;
290 old_psr = saved_state->cpsr;
291 memcpy((char *) saved_state, (char *) state, sizeof(*state));
292 /*
293 * do not allow privileged bits of the PSR to be
294 * changed
295 */
296 saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
297
298 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
299 state->pc, state->r[0], state->sp);
300 break;
301 }
302 case ARM_VFP_STATE:{
303 #if __ARM_VFP__
304 struct arm_vfp_state *state;
305 struct arm_vfpsaved_state *saved_state;
306 unsigned int i;
307 unsigned int max;
308
309 if (count < ARM_VFP_STATE_COUNT) {
310 if (count < ARM_VFPV2_STATE_COUNT)
311 return (KERN_INVALID_ARGUMENT);
312 else
313 count = ARM_VFPV2_STATE_COUNT;
314 }
315
316 if (count == ARM_VFPV2_STATE_COUNT)
317 max = 32;
318 else
319 max = 64;
320
321 state = (struct arm_vfp_state *) tstate;
322 saved_state = find_user_vfp(thread);
323
324 saved_state->fpscr = state->fpscr;
325 for (i = 0; i < max; i++)
326 saved_state->r[i] = state->r[i];
327
328 #endif
329 break;
330 }
331 case ARM_EXCEPTION_STATE:{
332
333 if (count < ARM_EXCEPTION_STATE_COUNT)
334 return (KERN_INVALID_ARGUMENT);
335
336 break;
337 }
338 case ARM_DEBUG_STATE:{
339 arm_debug_state_t *state;
340 arm_debug_state_t *thread_state;
341 boolean_t enabled = FALSE;
342 unsigned int i;
343
344 if (count < ARM_DEBUG_STATE_COUNT)
345 return (KERN_INVALID_ARGUMENT);
346
347 state = (arm_debug_state_t *) tstate;
348 thread_state = find_debug_state(thread);
349
350 if (count < ARM_DEBUG_STATE_COUNT)
351 return (KERN_INVALID_ARGUMENT);
352
353 for (i = 0; i < 16; i++) {
354 /* do not allow context IDs to be set */
355 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
356 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
357 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
358 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
359 return KERN_PROTECTION_FAILURE;
360 }
361 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
362 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
363 enabled = TRUE;
364 }
365 }
366
367 if (!enabled) {
368 if (thread_state != NULL)
369 {
370 void *pTmp = thread->machine.DebugData;
371 thread->machine.DebugData = NULL;
372 zfree(ads_zone, pTmp);
373 }
374 }
375 else
376 {
377 if (thread_state == NULL)
378 thread_state = zalloc(ads_zone);
379
380 for (i = 0; i < 16; i++) {
381 /* set appropriate priviledge; mask out unknown bits */
382 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
383 | ARM_DBGBCR_MATCH_MASK
384 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
385 | ARM_DBG_CR_ENABLE_MASK))
386 | ARM_DBGBCR_TYPE_IVA
387 | ARM_DBG_CR_LINKED_UNLINKED
388 | ARM_DBG_CR_SECURITY_STATE_BOTH
389 | ARM_DBG_CR_MODE_CONTROL_USER;
390 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
391 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
392 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
393 | ARM_DBGWCR_ACCESS_CONTROL_MASK
394 | ARM_DBG_CR_ENABLE_MASK))
395 | ARM_DBG_CR_LINKED_UNLINKED
396 | ARM_DBG_CR_SECURITY_STATE_BOTH
397 | ARM_DBG_CR_MODE_CONTROL_USER;
398 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
399 }
400
401 if (thread->machine.DebugData == NULL)
402 thread->machine.DebugData = thread_state;
403 }
404
405 if (thread == current_thread()) {
406 arm_debug_set(thread_state);
407 }
408
409 break;
410 }
411
412 default:
413 return (KERN_INVALID_ARGUMENT);
414 }
415 return (KERN_SUCCESS);
416 }
417
418 /*
419 * Routine: machine_thread_state_initialize
420 *
421 */
422 kern_return_t
423 machine_thread_state_initialize(
424 thread_t thread)
425 {
426 struct arm_saved_state *savestate;
427
428 savestate = (struct arm_saved_state *) & thread->machine.PcbData;
429 bzero((char *) savestate, sizeof(struct arm_saved_state));
430 savestate->cpsr = PSR_USERDFLT;
431
432 #if __ARM_VFP__
433 vfp_state_initialize(&thread->machine.uVFPdata);
434 vfp_state_initialize(&thread->machine.kVFPdata);
435 #endif
436
437 thread->machine.DebugData = NULL;
438
439 return KERN_SUCCESS;
440 }
441
442 #if __ARM_VFP__
443 void
444 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
445 {
446 /* Set default VFP state to RunFast mode:
447 *
448 * - flush-to-zero mode
449 * - default NaN mode
450 * - no enabled exceptions
451 *
452 * On the VFP11, this allows the use of floating point without
453 * trapping to support code, which we do not provide. With
454 * the Cortex-A8, this allows the use of the (much faster) NFP
455 * pipeline for single-precision operations.
456 */
457
458 bzero(vfp_state, sizeof(*vfp_state));
459 vfp_state->fpscr = FPSCR_DEFAULT;
460 }
461 #endif /* __ARM_VFP__ */
462
463
464 /*
465 * Routine: machine_thread_dup
466 *
467 */
468 kern_return_t
469 machine_thread_dup(
470 thread_t self,
471 thread_t target)
472 {
473 struct arm_saved_state *self_saved_state;
474 struct arm_saved_state *target_saved_state;
475
476 #if __ARM_VFP__
477 struct arm_vfpsaved_state *self_vfp_state;
478 struct arm_vfpsaved_state *target_vfp_state;
479 #endif
480
481 target->machine.cthread_self = self->machine.cthread_self;
482 target->machine.cthread_data = self->machine.cthread_data;
483
484 self_saved_state = &self->machine.PcbData;
485 target_saved_state = &target->machine.PcbData;
486 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
487
488 #if __ARM_VFP__
489 self_vfp_state = &self->machine.uVFPdata;
490 target_vfp_state = &target->machine.uVFPdata;
491 bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
492 #endif
493
494 return (KERN_SUCCESS);
495 }
496
497 /*
498 * Routine: get_user_regs
499 *
500 */
501 struct arm_saved_state *
502 get_user_regs(
503 thread_t thread)
504 {
505 return (&thread->machine.PcbData);
506 }
507
508 /*
509 * Routine: find_user_regs
510 *
511 */
512 struct arm_saved_state *
513 find_user_regs(
514 thread_t thread)
515 {
516 return get_user_regs(thread);
517 }
518
519 /*
520 * Routine: find_kern_regs
521 *
522 */
523 struct arm_saved_state *
524 find_kern_regs(
525 thread_t thread)
526 {
527 /*
528 * This works only for an interrupted kernel thread
529 */
530 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
531 return ((struct arm_saved_state *) NULL);
532 else
533 return (getCpuDatap()->cpu_int_state);
534
535 }
536
537 #if __ARM_VFP__
538 /*
539 * Find the user state floating point context. If there is no user state context,
540 * we just return a 0.
541 */
542
543 struct arm_vfpsaved_state *
544 find_user_vfp(
545 thread_t thread)
546 {
547 return &thread->machine.uVFPdata;
548 }
549 #endif /* __ARM_VFP__ */
550
551 arm_debug_state_t *
552 find_debug_state(
553 thread_t thread)
554 {
555 return thread->machine.DebugData;
556 }
557
558 /*
559 * Routine: thread_userstack
560 *
561 */
562 kern_return_t
563 thread_userstack(
564 __unused thread_t thread,
565 int flavor,
566 thread_state_t tstate,
567 unsigned int count,
568 mach_vm_offset_t * user_stack,
569 int *customstack,
570 __unused boolean_t is64bit
571 )
572 {
573
574 switch (flavor) {
575 case ARM_THREAD_STATE:
576 {
577 struct arm_thread_state *state;
578
579
580 if (count < ARM_THREAD_STATE_COUNT)
581 return (KERN_INVALID_ARGUMENT);
582
583 if (customstack)
584 *customstack = 0;
585 state = (struct arm_thread_state *) tstate;
586
587 if (state->sp) {
588 *user_stack = CAST_USER_ADDR_T(state->sp);
589 if (customstack)
590 *customstack = 1;
591 } else {
592 *user_stack = CAST_USER_ADDR_T(USRSTACK);
593 }
594 }
595 break;
596
597 default:
598 return (KERN_INVALID_ARGUMENT);
599 }
600
601 return (KERN_SUCCESS);
602 }
603
604 /*
605 * thread_userstackdefault:
606 *
607 * Return the default stack location for the
608 * thread, if otherwise unknown.
609 */
610 kern_return_t
611 thread_userstackdefault(
612 mach_vm_offset_t *default_user_stack,
613 boolean_t is64bit __unused)
614 {
615 *default_user_stack = USRSTACK;
616
617 return (KERN_SUCCESS);
618 }
619
620 /*
621 * Routine: thread_setuserstack
622 *
623 */
624 void
625 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
626 {
627 struct arm_saved_state *sv;
628
629 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
630 * k: " x) */
631
632 sv = get_user_regs(thread);
633
634 sv->sp = user_stack;
635
636 thread_setuserstack_kprintf("stack %x\n", sv->sp);
637
638 return;
639 }
640
641 /*
642 * Routine: thread_adjuserstack
643 *
644 */
645 uint64_t
646 thread_adjuserstack(thread_t thread, int adjust)
647 {
648 struct arm_saved_state *sv;
649
650 sv = get_user_regs(thread);
651
652 sv->sp += adjust;
653
654 return sv->sp;
655 }
656
657 /*
658 * Routine: thread_setentrypoint
659 *
660 */
661 void
662 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
663 {
664 struct arm_saved_state *sv;
665
666 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
667 * nt: " x) */
668
669 sv = get_user_regs(thread);
670
671 sv->pc = entry;
672
673 thread_setentrypoint_kprintf("entry %x\n", sv->pc);
674
675 return;
676 }
677
678 /*
679 * Routine: thread_entrypoint
680 *
681 */
682 kern_return_t
683 thread_entrypoint(
684 __unused thread_t thread,
685 int flavor,
686 thread_state_t tstate,
687 __unused unsigned int count,
688 mach_vm_offset_t * entry_point
689 )
690 {
691 switch (flavor) {
692 case ARM_THREAD_STATE:
693 {
694 struct arm_thread_state *state;
695
696 state = (struct arm_thread_state *) tstate;
697
698 /*
699 * If a valid entry point is specified, use it.
700 */
701 if (state->pc) {
702 *entry_point = CAST_USER_ADDR_T(state->pc);
703 } else {
704 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
705 }
706 }
707 break;
708
709 default:
710 return (KERN_INVALID_ARGUMENT);
711 }
712
713 return (KERN_SUCCESS);
714 }
715
716
717 /*
718 * Routine: thread_set_child
719 *
720 */
721 void
722 thread_set_child(
723 thread_t child,
724 int pid)
725 {
726 struct arm_saved_state *child_state;
727
728 child_state = get_user_regs(child);
729
730 child_state->r[0] = (uint_t) pid;
731 child_state->r[1] = 1ULL;
732 }
733
734
735 /*
736 * Routine: thread_set_parent
737 *
738 */
739 void
740 thread_set_parent(
741 thread_t parent,
742 int pid)
743 {
744 struct arm_saved_state *parent_state;
745
746 parent_state = get_user_regs(parent);
747
748 parent_state->r[0] = pid;
749 parent_state->r[1] = 0;
750 }
751
752
753 struct arm_act_context {
754 struct arm_saved_state ss;
755 #if __ARM_VFP__
756 struct arm_vfpsaved_state vfps;
757 #endif
758 };
759
760 /*
761 * Routine: act_thread_csave
762 *
763 */
764 void *
765 act_thread_csave(void)
766 {
767 struct arm_act_context *ic;
768 kern_return_t kret;
769 unsigned int val;
770
771 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
772
773 if (ic == (struct arm_act_context *) NULL)
774 return ((void *) 0);
775
776 val = ARM_THREAD_STATE_COUNT;
777 kret = machine_thread_get_state(current_thread(),
778 ARM_THREAD_STATE,
779 (thread_state_t) & ic->ss,
780 &val);
781 if (kret != KERN_SUCCESS) {
782 kfree(ic, sizeof(struct arm_act_context));
783 return ((void *) 0);
784 }
785 #if __ARM_VFP__
786 val = ARM_VFP_STATE_COUNT;
787 kret = machine_thread_get_state(current_thread(),
788 ARM_VFP_STATE,
789 (thread_state_t) & ic->vfps,
790 &val);
791 if (kret != KERN_SUCCESS) {
792 kfree(ic, sizeof(struct arm_act_context));
793 return ((void *) 0);
794 }
795 #endif
796 return (ic);
797 }
798
799 /*
800 * Routine: act_thread_catt
801 *
802 */
803 void
804 act_thread_catt(void *ctx)
805 {
806 struct arm_act_context *ic;
807 kern_return_t kret;
808
809 ic = (struct arm_act_context *) ctx;
810
811 if (ic == (struct arm_act_context *) NULL)
812 return;
813
814 kret = machine_thread_set_state(current_thread(),
815 ARM_THREAD_STATE,
816 (thread_state_t) & ic->ss,
817 ARM_THREAD_STATE_COUNT);
818 if (kret != KERN_SUCCESS)
819 goto out;
820
821 #if __ARM_VFP__
822 kret = machine_thread_set_state(current_thread(),
823 ARM_VFP_STATE,
824 (thread_state_t) & ic->vfps,
825 ARM_VFP_STATE_COUNT);
826 if (kret != KERN_SUCCESS)
827 goto out;
828 #endif
829 out:
830 kfree(ic, sizeof(struct arm_act_context));
831 }
832
833 /*
834 * Routine: act_thread_catt
835 *
836 */
837 void
838 act_thread_cfree(void *ctx)
839 {
840 kfree(ctx, sizeof(struct arm_act_context));
841 }
842
843 kern_return_t
844 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
845 {
846 arm_thread_state_t *state;
847 struct arm_saved_state *saved_state;
848 thread_t curth = current_thread();
849 spl_t s=0;
850
851 saved_state = &thread->machine.PcbData;
852 state = (arm_thread_state_t *)tstate;
853
854 if (curth != thread) {
855 s = splsched();
856 thread_lock(thread);
857 }
858
859 /*
860 * do not zero saved_state, it can be concurrently accessed
861 * and zero is not a valid state for some of the registers,
862 * like sp.
863 */
864 thread_state32_to_saved_state(state, saved_state);
865 saved_state->cpsr = PSR_USERDFLT;
866
867 if (curth != thread) {
868 thread_unlock(thread);
869 splx(s);
870 }
871
872 return KERN_SUCCESS;
873 }