]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/status.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / arm / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37
38 struct arm_vfpv2_state
39 {
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43 };
44
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50
51 /*
52 * Forward definitions
53 */
54 void
55 thread_set_child(thread_t child, int pid);
56
57 void
58 thread_set_parent(thread_t parent, int pid);
59
60 /*
61 * Maps state flavor to number of words in the state:
62 */
63 /* __private_extern__ */
64 unsigned int _MachineStateCount[] = {
65 /* FLAVOR_LIST */ 0,
66 ARM_THREAD_STATE_COUNT,
67 ARM_VFP_STATE_COUNT,
68 ARM_EXCEPTION_STATE_COUNT,
69 ARM_DEBUG_STATE_COUNT
70 };
71
72 extern zone_t ads_zone;
73
74 kern_return_t
75 machine_thread_state_convert_to_user(
76 __unused thread_t thread,
77 __unused thread_flavor_t flavor,
78 __unused thread_state_t tstate,
79 __unused mach_msg_type_number_t *count)
80 {
81 // No conversion to userspace representation on this platform
82 return KERN_SUCCESS;
83 }
84
85 kern_return_t
86 machine_thread_state_convert_from_user(
87 __unused thread_t thread,
88 __unused thread_flavor_t flavor,
89 __unused thread_state_t tstate,
90 __unused mach_msg_type_number_t count)
91 {
92 // No conversion from userspace representation on this platform
93 return KERN_SUCCESS;
94 }
95
96 kern_return_t
97 machine_thread_siguctx_pointer_convert_to_user(
98 __unused thread_t thread,
99 __unused user_addr_t *uctxp)
100 {
101 // No conversion to userspace representation on this platform
102 return KERN_SUCCESS;
103 }
104
105 kern_return_t
106 machine_thread_function_pointers_convert_from_user(
107 __unused thread_t thread,
108 __unused user_addr_t *fptrs,
109 __unused uint32_t count)
110 {
111 // No conversion from userspace representation on this platform
112 return KERN_SUCCESS;
113 }
114
115 /*
116 * Routine: machine_thread_get_state
117 *
118 */
119 kern_return_t
120 machine_thread_get_state(
121 thread_t thread,
122 thread_flavor_t flavor,
123 thread_state_t tstate,
124 mach_msg_type_number_t * count)
125 {
126
127 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
128 * _state: " x) */
129
130 switch (flavor) {
131 case THREAD_STATE_FLAVOR_LIST:
132 if (*count < 4)
133 return (KERN_INVALID_ARGUMENT);
134
135 tstate[0] = ARM_THREAD_STATE;
136 tstate[1] = ARM_VFP_STATE;
137 tstate[2] = ARM_EXCEPTION_STATE;
138 tstate[3] = ARM_DEBUG_STATE;
139 *count = 4;
140 break;
141
142 case ARM_THREAD_STATE:{
143 struct arm_thread_state *state;
144 struct arm_saved_state *saved_state;
145 arm_unified_thread_state_t *unified_state;
146
147 unsigned int i;
148 if (*count < ARM_THREAD_STATE_COUNT)
149 return (KERN_INVALID_ARGUMENT);
150
151 if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
152 unified_state = (arm_unified_thread_state_t *) tstate;
153 state = &unified_state->ts_32;
154 unified_state->ash.flavor = ARM_THREAD_STATE32;
155 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
156 } else {
157 state = (struct arm_thread_state *) tstate;
158 }
159 saved_state = &thread->machine.PcbData;
160
161 state->sp = saved_state->sp;
162 state->lr = saved_state->lr;
163 state->pc = saved_state->pc;
164 state->cpsr = saved_state->cpsr;
165 for (i = 0; i < 13; i++)
166 state->r[i] = saved_state->r[i];
167 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
168 state->pc, state->r[0], state->sp);
169
170 if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
171 *count = ARM_THREAD_STATE_COUNT;
172 }
173 break;
174 }
175 case ARM_EXCEPTION_STATE:{
176 struct arm_exception_state *state;
177 struct arm_saved_state *saved_state;
178
179 if (*count < ARM_EXCEPTION_STATE_COUNT)
180 return (KERN_INVALID_ARGUMENT);
181
182 state = (struct arm_exception_state *) tstate;
183 saved_state = &thread->machine.PcbData;
184
185 state->exception = saved_state->exception;
186 state->fsr = saved_state->fsr;
187 state->far = saved_state->far;
188
189 *count = ARM_EXCEPTION_STATE_COUNT;
190 break;
191 }
192 case ARM_VFP_STATE:{
193 #if __ARM_VFP__
194 struct arm_vfp_state *state;
195 struct arm_vfpsaved_state *saved_state;
196 unsigned int i;
197 unsigned int max;
198
199 if (*count < ARM_VFP_STATE_COUNT) {
200 if (*count < ARM_VFPV2_STATE_COUNT)
201 return (KERN_INVALID_ARGUMENT);
202 else
203 *count = ARM_VFPV2_STATE_COUNT;
204 }
205
206 if (*count == ARM_VFPV2_STATE_COUNT)
207 max = 32;
208 else
209 max = 64;
210
211 state = (struct arm_vfp_state *) tstate;
212 saved_state = find_user_vfp(thread);
213
214 state->fpscr = saved_state->fpscr;
215 for (i = 0; i < max; i++)
216 state->r[i] = saved_state->r[i];
217
218 #endif
219 break;
220 }
221 case ARM_DEBUG_STATE:{
222 arm_debug_state_t *state;
223 arm_debug_state_t *thread_state;
224
225 if (*count < ARM_DEBUG_STATE_COUNT)
226 return (KERN_INVALID_ARGUMENT);
227
228 state = (arm_debug_state_t *) tstate;
229 thread_state = find_debug_state(thread);
230
231 if (thread_state == NULL)
232 bzero(state, sizeof(arm_debug_state_t));
233 else
234 bcopy(thread_state, state, sizeof(arm_debug_state_t));
235
236 *count = ARM_DEBUG_STATE_COUNT;
237 break;
238 }
239
240 default:
241 return (KERN_INVALID_ARGUMENT);
242 }
243 return (KERN_SUCCESS);
244 }
245
246
247 /*
248 * Routine: machine_thread_get_kern_state
249 *
250 */
251 kern_return_t
252 machine_thread_get_kern_state(
253 thread_t thread,
254 thread_flavor_t flavor,
255 thread_state_t tstate,
256 mach_msg_type_number_t * count)
257 {
258
259 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
260 * d_get_kern_state: "
261 * x) */
262
263 /*
264 * This works only for an interrupted kernel thread
265 */
266 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
267 return KERN_FAILURE;
268
269 switch (flavor) {
270 case ARM_THREAD_STATE:{
271 struct arm_thread_state *state;
272 struct arm_saved_state *saved_state;
273 unsigned int i;
274 if (*count < ARM_THREAD_STATE_COUNT)
275 return (KERN_INVALID_ARGUMENT);
276
277 state = (struct arm_thread_state *) tstate;
278 saved_state = getCpuDatap()->cpu_int_state;
279
280 state->sp = saved_state->sp;
281 state->lr = saved_state->lr;
282 state->pc = saved_state->pc;
283 state->cpsr = saved_state->cpsr;
284 for (i = 0; i < 13; i++)
285 state->r[i] = saved_state->r[i];
286 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
287 state->pc, state->r[0], state->sp);
288 *count = ARM_THREAD_STATE_COUNT;
289 break;
290 }
291 default:
292 return (KERN_INVALID_ARGUMENT);
293 }
294 return (KERN_SUCCESS);
295 }
296
297 extern long long arm_debug_get(void);
298
299 /*
300 * Routine: machine_thread_set_state
301 *
302 */
303 kern_return_t
304 machine_thread_set_state(
305 thread_t thread,
306 thread_flavor_t flavor,
307 thread_state_t tstate,
308 mach_msg_type_number_t count)
309 {
310
311 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
312 * _state: " x) */
313
314 switch (flavor) {
315 case ARM_THREAD_STATE:{
316 struct arm_thread_state *state;
317 struct arm_saved_state *saved_state;
318 arm_unified_thread_state_t *unified_state;
319 int old_psr;
320
321 if (count < ARM_THREAD_STATE_COUNT)
322 return (KERN_INVALID_ARGUMENT);
323
324 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
325 unified_state = (arm_unified_thread_state_t *) tstate;
326 state = &unified_state->ts_32;
327 } else {
328 state = (struct arm_thread_state *) tstate;
329 }
330 saved_state = &thread->machine.PcbData;
331 old_psr = saved_state->cpsr;
332 memcpy((char *) saved_state, (char *) state, sizeof(*state));
333 /*
334 * do not allow privileged bits of the PSR to be
335 * changed
336 */
337 saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
338
339 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
340 state->pc, state->r[0], state->sp);
341 break;
342 }
343 case ARM_VFP_STATE:{
344 #if __ARM_VFP__
345 struct arm_vfp_state *state;
346 struct arm_vfpsaved_state *saved_state;
347 unsigned int i;
348 unsigned int max;
349
350 if (count < ARM_VFP_STATE_COUNT) {
351 if (count < ARM_VFPV2_STATE_COUNT)
352 return (KERN_INVALID_ARGUMENT);
353 else
354 count = ARM_VFPV2_STATE_COUNT;
355 }
356
357 if (count == ARM_VFPV2_STATE_COUNT)
358 max = 32;
359 else
360 max = 64;
361
362 state = (struct arm_vfp_state *) tstate;
363 saved_state = find_user_vfp(thread);
364
365 saved_state->fpscr = state->fpscr;
366 for (i = 0; i < max; i++)
367 saved_state->r[i] = state->r[i];
368
369 #endif
370 break;
371 }
372 case ARM_EXCEPTION_STATE:{
373
374 if (count < ARM_EXCEPTION_STATE_COUNT)
375 return (KERN_INVALID_ARGUMENT);
376
377 break;
378 }
379 case ARM_DEBUG_STATE:{
380 arm_debug_state_t *state;
381 arm_debug_state_t *thread_state;
382 boolean_t enabled = FALSE;
383 unsigned int i;
384
385 if (count < ARM_DEBUG_STATE_COUNT)
386 return (KERN_INVALID_ARGUMENT);
387
388 state = (arm_debug_state_t *) tstate;
389 thread_state = find_debug_state(thread);
390
391 if (count < ARM_DEBUG_STATE_COUNT)
392 return (KERN_INVALID_ARGUMENT);
393
394 for (i = 0; i < 16; i++) {
395 /* do not allow context IDs to be set */
396 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
397 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
398 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
399 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
400 return KERN_PROTECTION_FAILURE;
401 }
402 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
403 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
404 enabled = TRUE;
405 }
406 }
407
408 if (!enabled) {
409 if (thread_state != NULL)
410 {
411 void *pTmp = thread->machine.DebugData;
412 thread->machine.DebugData = NULL;
413 zfree(ads_zone, pTmp);
414 }
415 }
416 else
417 {
418 if (thread_state == NULL)
419 thread_state = zalloc(ads_zone);
420
421 for (i = 0; i < 16; i++) {
422 /* set appropriate priviledge; mask out unknown bits */
423 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
424 | ARM_DBGBCR_MATCH_MASK
425 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
426 | ARM_DBG_CR_ENABLE_MASK))
427 | ARM_DBGBCR_TYPE_IVA
428 | ARM_DBG_CR_LINKED_UNLINKED
429 | ARM_DBG_CR_SECURITY_STATE_BOTH
430 | ARM_DBG_CR_MODE_CONTROL_USER;
431 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
432 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
433 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
434 | ARM_DBGWCR_ACCESS_CONTROL_MASK
435 | ARM_DBG_CR_ENABLE_MASK))
436 | ARM_DBG_CR_LINKED_UNLINKED
437 | ARM_DBG_CR_SECURITY_STATE_BOTH
438 | ARM_DBG_CR_MODE_CONTROL_USER;
439 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
440 }
441
442 if (thread->machine.DebugData == NULL)
443 thread->machine.DebugData = thread_state;
444 }
445
446 if (thread == current_thread()) {
447 arm_debug_set(thread_state);
448 }
449
450 break;
451 }
452
453 default:
454 return (KERN_INVALID_ARGUMENT);
455 }
456 return (KERN_SUCCESS);
457 }
458
459 /*
460 * Routine: machine_thread_state_initialize
461 *
462 */
463 kern_return_t
464 machine_thread_state_initialize(
465 thread_t thread)
466 {
467 struct arm_saved_state *savestate;
468
469 savestate = (struct arm_saved_state *) & thread->machine.PcbData;
470 bzero((char *) savestate, sizeof(struct arm_saved_state));
471 savestate->cpsr = PSR_USERDFLT;
472
473 #if __ARM_VFP__
474 vfp_state_initialize(&thread->machine.uVFPdata);
475 vfp_state_initialize(&thread->machine.kVFPdata);
476 #endif
477
478 thread->machine.DebugData = NULL;
479
480 return KERN_SUCCESS;
481 }
482
483 #if __ARM_VFP__
484 void
485 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
486 {
487 /* Set default VFP state to RunFast mode:
488 *
489 * - flush-to-zero mode
490 * - default NaN mode
491 * - no enabled exceptions
492 *
493 * On the VFP11, this allows the use of floating point without
494 * trapping to support code, which we do not provide. With
495 * the Cortex-A8, this allows the use of the (much faster) NFP
496 * pipeline for single-precision operations.
497 */
498
499 bzero(vfp_state, sizeof(*vfp_state));
500 vfp_state->fpscr = FPSCR_DEFAULT;
501 }
502 #endif /* __ARM_VFP__ */
503
504
505 /*
506 * Routine: machine_thread_dup
507 *
508 */
509 kern_return_t
510 machine_thread_dup(
511 thread_t self,
512 thread_t target,
513 __unused boolean_t is_corpse)
514 {
515 struct arm_saved_state *self_saved_state;
516 struct arm_saved_state *target_saved_state;
517
518 #if __ARM_VFP__
519 struct arm_vfpsaved_state *self_vfp_state;
520 struct arm_vfpsaved_state *target_vfp_state;
521 #endif
522
523 target->machine.cthread_self = self->machine.cthread_self;
524 target->machine.cthread_data = self->machine.cthread_data;
525
526 self_saved_state = &self->machine.PcbData;
527 target_saved_state = &target->machine.PcbData;
528 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
529
530 #if __ARM_VFP__
531 self_vfp_state = &self->machine.uVFPdata;
532 target_vfp_state = &target->machine.uVFPdata;
533 bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
534 #endif
535
536 return (KERN_SUCCESS);
537 }
538
539 /*
540 * Routine: get_user_regs
541 *
542 */
543 struct arm_saved_state *
544 get_user_regs(
545 thread_t thread)
546 {
547 return (&thread->machine.PcbData);
548 }
549
550 /*
551 * Routine: find_user_regs
552 *
553 */
554 struct arm_saved_state *
555 find_user_regs(
556 thread_t thread)
557 {
558 return get_user_regs(thread);
559 }
560
561 /*
562 * Routine: find_kern_regs
563 *
564 */
565 struct arm_saved_state *
566 find_kern_regs(
567 thread_t thread)
568 {
569 /*
570 * This works only for an interrupted kernel thread
571 */
572 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
573 return ((struct arm_saved_state *) NULL);
574 else
575 return (getCpuDatap()->cpu_int_state);
576
577 }
578
579 #if __ARM_VFP__
580 /*
581 * Find the user state floating point context. If there is no user state context,
582 * we just return a 0.
583 */
584
585 struct arm_vfpsaved_state *
586 find_user_vfp(
587 thread_t thread)
588 {
589 return &thread->machine.uVFPdata;
590 }
591 #endif /* __ARM_VFP__ */
592
593 arm_debug_state_t *
594 find_debug_state(
595 thread_t thread)
596 {
597 return thread->machine.DebugData;
598 }
599
600 /*
601 * Routine: thread_userstack
602 *
603 */
604 kern_return_t
605 thread_userstack(
606 __unused thread_t thread,
607 int flavor,
608 thread_state_t tstate,
609 unsigned int count,
610 mach_vm_offset_t * user_stack,
611 int *customstack,
612 __unused boolean_t is64bit
613 )
614 {
615
616 switch (flavor) {
617 case ARM_THREAD_STATE:
618 {
619 struct arm_thread_state *state;
620
621
622 if (count < ARM_THREAD_STATE_COUNT)
623 return (KERN_INVALID_ARGUMENT);
624
625 if (customstack)
626 *customstack = 0;
627 state = (struct arm_thread_state *) tstate;
628
629 if (state->sp) {
630 *user_stack = CAST_USER_ADDR_T(state->sp);
631 if (customstack)
632 *customstack = 1;
633 } else {
634 *user_stack = CAST_USER_ADDR_T(USRSTACK);
635 }
636 }
637 break;
638
639 default:
640 return (KERN_INVALID_ARGUMENT);
641 }
642
643 return (KERN_SUCCESS);
644 }
645
646 /*
647 * thread_userstackdefault:
648 *
649 * Return the default stack location for the
650 * thread, if otherwise unknown.
651 */
652 kern_return_t
653 thread_userstackdefault(
654 mach_vm_offset_t *default_user_stack,
655 boolean_t is64bit __unused)
656 {
657 *default_user_stack = USRSTACK;
658
659 return (KERN_SUCCESS);
660 }
661
662 /*
663 * Routine: thread_setuserstack
664 *
665 */
666 void
667 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
668 {
669 struct arm_saved_state *sv;
670
671 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
672 * k: " x) */
673
674 sv = get_user_regs(thread);
675
676 sv->sp = user_stack;
677
678 thread_setuserstack_kprintf("stack %x\n", sv->sp);
679
680 return;
681 }
682
683 /*
684 * Routine: thread_adjuserstack
685 *
686 */
687 uint64_t
688 thread_adjuserstack(thread_t thread, int adjust)
689 {
690 struct arm_saved_state *sv;
691
692 sv = get_user_regs(thread);
693
694 sv->sp += adjust;
695
696 return sv->sp;
697 }
698
699 /*
700 * Routine: thread_setentrypoint
701 *
702 */
703 void
704 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
705 {
706 struct arm_saved_state *sv;
707
708 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
709 * nt: " x) */
710
711 sv = get_user_regs(thread);
712
713 sv->pc = entry;
714
715 thread_setentrypoint_kprintf("entry %x\n", sv->pc);
716
717 return;
718 }
719
720 /*
721 * Routine: thread_entrypoint
722 *
723 */
724 kern_return_t
725 thread_entrypoint(
726 __unused thread_t thread,
727 int flavor,
728 thread_state_t tstate,
729 __unused unsigned int count,
730 mach_vm_offset_t * entry_point
731 )
732 {
733 switch (flavor) {
734 case ARM_THREAD_STATE:
735 {
736 struct arm_thread_state *state;
737
738 state = (struct arm_thread_state *) tstate;
739
740 /*
741 * If a valid entry point is specified, use it.
742 */
743 if (state->pc) {
744 *entry_point = CAST_USER_ADDR_T(state->pc);
745 } else {
746 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
747 }
748 }
749 break;
750
751 default:
752 return (KERN_INVALID_ARGUMENT);
753 }
754
755 return (KERN_SUCCESS);
756 }
757
758
759 /*
760 * Routine: thread_set_child
761 *
762 */
763 void
764 thread_set_child(
765 thread_t child,
766 int pid)
767 {
768 struct arm_saved_state *child_state;
769
770 child_state = get_user_regs(child);
771
772 child_state->r[0] = (uint_t) pid;
773 child_state->r[1] = 1ULL;
774 }
775
776
777 /*
778 * Routine: thread_set_parent
779 *
780 */
781 void
782 thread_set_parent(
783 thread_t parent,
784 int pid)
785 {
786 struct arm_saved_state *parent_state;
787
788 parent_state = get_user_regs(parent);
789
790 parent_state->r[0] = pid;
791 parent_state->r[1] = 0;
792 }
793
794
795 struct arm_act_context {
796 struct arm_saved_state ss;
797 #if __ARM_VFP__
798 struct arm_vfpsaved_state vfps;
799 #endif
800 };
801
802 /*
803 * Routine: act_thread_csave
804 *
805 */
806 void *
807 act_thread_csave(void)
808 {
809 struct arm_act_context *ic;
810 kern_return_t kret;
811 unsigned int val;
812
813 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
814
815 if (ic == (struct arm_act_context *) NULL)
816 return ((void *) 0);
817
818 val = ARM_THREAD_STATE_COUNT;
819 kret = machine_thread_get_state(current_thread(),
820 ARM_THREAD_STATE,
821 (thread_state_t) & ic->ss,
822 &val);
823 if (kret != KERN_SUCCESS) {
824 kfree(ic, sizeof(struct arm_act_context));
825 return ((void *) 0);
826 }
827 #if __ARM_VFP__
828 val = ARM_VFP_STATE_COUNT;
829 kret = machine_thread_get_state(current_thread(),
830 ARM_VFP_STATE,
831 (thread_state_t) & ic->vfps,
832 &val);
833 if (kret != KERN_SUCCESS) {
834 kfree(ic, sizeof(struct arm_act_context));
835 return ((void *) 0);
836 }
837 #endif
838 return (ic);
839 }
840
841 /*
842 * Routine: act_thread_catt
843 *
844 */
845 void
846 act_thread_catt(void *ctx)
847 {
848 struct arm_act_context *ic;
849 kern_return_t kret;
850
851 ic = (struct arm_act_context *) ctx;
852
853 if (ic == (struct arm_act_context *) NULL)
854 return;
855
856 kret = machine_thread_set_state(current_thread(),
857 ARM_THREAD_STATE,
858 (thread_state_t) & ic->ss,
859 ARM_THREAD_STATE_COUNT);
860 if (kret != KERN_SUCCESS)
861 goto out;
862
863 #if __ARM_VFP__
864 kret = machine_thread_set_state(current_thread(),
865 ARM_VFP_STATE,
866 (thread_state_t) & ic->vfps,
867 ARM_VFP_STATE_COUNT);
868 if (kret != KERN_SUCCESS)
869 goto out;
870 #endif
871 out:
872 kfree(ic, sizeof(struct arm_act_context));
873 }
874
875 /*
876 * Routine: act_thread_catt
877 *
878 */
879 void
880 act_thread_cfree(void *ctx)
881 {
882 kfree(ctx, sizeof(struct arm_act_context));
883 }
884
885 kern_return_t
886 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
887 {
888 arm_thread_state_t *state;
889 struct arm_saved_state *saved_state;
890 thread_t curth = current_thread();
891 spl_t s=0;
892
893 saved_state = &thread->machine.PcbData;
894 state = (arm_thread_state_t *)tstate;
895
896 if (curth != thread) {
897 s = splsched();
898 thread_lock(thread);
899 }
900
901 /*
902 * do not zero saved_state, it can be concurrently accessed
903 * and zero is not a valid state for some of the registers,
904 * like sp.
905 */
906 thread_state32_to_saved_state(state, saved_state);
907 saved_state->cpsr = PSR_USERDFLT;
908
909 if (curth != thread) {
910 thread_unlock(thread);
911 splx(s);
912 }
913
914 return KERN_SUCCESS;
915 }