]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/status.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37
38 struct arm_vfpv2_state {
39 __uint32_t __r[32];
40 __uint32_t __fpscr;
41 };
42
43 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
44
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
47
48
49 /*
50 * Forward definitions
51 */
52 void
53 thread_set_child(thread_t child, int pid);
54
55 void
56 thread_set_parent(thread_t parent, int pid);
57
58 /*
59 * Maps state flavor to number of words in the state:
60 */
61 /* __private_extern__ */
62 unsigned int _MachineStateCount[] = {
63 /* FLAVOR_LIST */ 0,
64 [ARM_THREAD_STATE] = ARM_THREAD_STATE_COUNT,
65 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
66 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
67 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
68 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
69 };
70
71 extern zone_t ads_zone;
72
73 kern_return_t
74 machine_thread_state_convert_to_user(
75 __unused thread_t thread,
76 __unused thread_flavor_t flavor,
77 __unused thread_state_t tstate,
78 __unused mach_msg_type_number_t *count)
79 {
80 // No conversion to userspace representation on this platform
81 return KERN_SUCCESS;
82 }
83
84 kern_return_t
85 machine_thread_state_convert_from_user(
86 __unused thread_t thread,
87 __unused thread_flavor_t flavor,
88 __unused thread_state_t tstate,
89 __unused mach_msg_type_number_t count)
90 {
91 // No conversion from userspace representation on this platform
92 return KERN_SUCCESS;
93 }
94
95 kern_return_t
96 machine_thread_siguctx_pointer_convert_to_user(
97 __unused thread_t thread,
98 __unused user_addr_t *uctxp)
99 {
100 // No conversion to userspace representation on this platform
101 return KERN_SUCCESS;
102 }
103
104 kern_return_t
105 machine_thread_function_pointers_convert_from_user(
106 __unused thread_t thread,
107 __unused user_addr_t *fptrs,
108 __unused uint32_t count)
109 {
110 // No conversion from userspace representation on this platform
111 return KERN_SUCCESS;
112 }
113
114 /*
115 * Routine: machine_thread_get_state
116 *
117 */
118 kern_return_t
119 machine_thread_get_state(
120 thread_t thread,
121 thread_flavor_t flavor,
122 thread_state_t tstate,
123 mach_msg_type_number_t * count)
124 {
125 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
126 * _state: " x) */
127
128 switch (flavor) {
129 case THREAD_STATE_FLAVOR_LIST:
130 if (*count < 4) {
131 return KERN_INVALID_ARGUMENT;
132 }
133
134 tstate[0] = ARM_THREAD_STATE;
135 tstate[1] = ARM_VFP_STATE;
136 tstate[2] = ARM_EXCEPTION_STATE;
137 tstate[3] = ARM_DEBUG_STATE;
138 *count = 4;
139 break;
140
141 case THREAD_STATE_FLAVOR_LIST_10_15:
142 if (*count < 5) {
143 return KERN_INVALID_ARGUMENT;
144 }
145
146 tstate[0] = ARM_THREAD_STATE;
147 tstate[1] = ARM_VFP_STATE;
148 tstate[2] = ARM_EXCEPTION_STATE;
149 tstate[3] = ARM_DEBUG_STATE;
150 tstate[4] = ARM_PAGEIN_STATE;
151 *count = 5;
152 break;
153
154 case ARM_THREAD_STATE:{
155 struct arm_thread_state *state;
156 struct arm_saved_state *saved_state;
157 arm_unified_thread_state_t *unified_state;
158
159 unsigned int i;
160 if (*count < ARM_THREAD_STATE_COUNT) {
161 return KERN_INVALID_ARGUMENT;
162 }
163
164 if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
165 unified_state = (arm_unified_thread_state_t *) tstate;
166 state = &unified_state->ts_32;
167 unified_state->ash.flavor = ARM_THREAD_STATE32;
168 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
169 } else {
170 state = (struct arm_thread_state *) tstate;
171 }
172 saved_state = &thread->machine.PcbData;
173
174 state->sp = saved_state->sp;
175 state->lr = saved_state->lr;
176 state->pc = saved_state->pc;
177 state->cpsr = saved_state->cpsr;
178 for (i = 0; i < 13; i++) {
179 state->r[i] = saved_state->r[i];
180 }
181 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
182 state->pc, state->r[0], state->sp);
183
184 if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
185 *count = ARM_THREAD_STATE_COUNT;
186 }
187 break;
188 }
189 case ARM_EXCEPTION_STATE:{
190 struct arm_exception_state *state;
191 struct arm_saved_state *saved_state;
192
193 if (*count < ARM_EXCEPTION_STATE_COUNT) {
194 return KERN_INVALID_ARGUMENT;
195 }
196
197 state = (struct arm_exception_state *) tstate;
198 saved_state = &thread->machine.PcbData;
199
200 state->exception = saved_state->exception;
201 state->fsr = saved_state->fsr;
202 state->far = saved_state->far;
203
204 *count = ARM_EXCEPTION_STATE_COUNT;
205 break;
206 }
207 case ARM_VFP_STATE:{
208 #if __ARM_VFP__
209 struct arm_vfp_state *state;
210 struct arm_vfpsaved_state *saved_state;
211 unsigned int i;
212 unsigned int max;
213
214 if (*count < ARM_VFP_STATE_COUNT) {
215 if (*count < ARM_VFPV2_STATE_COUNT) {
216 return KERN_INVALID_ARGUMENT;
217 } else {
218 *count = ARM_VFPV2_STATE_COUNT;
219 }
220 }
221
222 if (*count == ARM_VFPV2_STATE_COUNT) {
223 max = 32;
224 } else {
225 max = 64;
226 }
227
228 state = (struct arm_vfp_state *) tstate;
229 saved_state = find_user_vfp(thread);
230
231 state->fpscr = saved_state->fpscr;
232 for (i = 0; i < max; i++) {
233 state->r[i] = saved_state->r[i];
234 }
235
236 #endif
237 break;
238 }
239 case ARM_DEBUG_STATE:{
240 arm_debug_state_t *state;
241 arm_debug_state_t *thread_state;
242
243 if (*count < ARM_DEBUG_STATE_COUNT) {
244 return KERN_INVALID_ARGUMENT;
245 }
246
247 state = (arm_debug_state_t *) tstate;
248 thread_state = find_debug_state(thread);
249
250 if (thread_state == NULL) {
251 bzero(state, sizeof(arm_debug_state_t));
252 } else {
253 bcopy(thread_state, state, sizeof(arm_debug_state_t));
254 }
255
256 *count = ARM_DEBUG_STATE_COUNT;
257 break;
258 }
259
260 case ARM_PAGEIN_STATE:{
261 arm_pagein_state_t *state;
262
263 if (*count < ARM_PAGEIN_STATE_COUNT) {
264 return KERN_INVALID_ARGUMENT;
265 }
266
267 state = (arm_pagein_state_t *)tstate;
268 state->__pagein_error = thread->t_pagein_error;
269
270 *count = ARM_PAGEIN_STATE_COUNT;
271 break;
272 }
273
274 default:
275 return KERN_INVALID_ARGUMENT;
276 }
277 return KERN_SUCCESS;
278 }
279
280
281 /*
282 * Routine: machine_thread_get_kern_state
283 *
284 */
285 kern_return_t
286 machine_thread_get_kern_state(
287 thread_t thread,
288 thread_flavor_t flavor,
289 thread_state_t tstate,
290 mach_msg_type_number_t * count)
291 {
292 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
293 * d_get_kern_state: "
294 * x) */
295
296 /*
297 * This works only for an interrupted kernel thread
298 */
299 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
300 return KERN_FAILURE;
301 }
302
303 switch (flavor) {
304 case ARM_THREAD_STATE:{
305 struct arm_thread_state *state;
306 struct arm_saved_state *saved_state;
307 unsigned int i;
308 if (*count < ARM_THREAD_STATE_COUNT) {
309 return KERN_INVALID_ARGUMENT;
310 }
311
312 state = (struct arm_thread_state *) tstate;
313 saved_state = getCpuDatap()->cpu_int_state;
314
315 state->sp = saved_state->sp;
316 state->lr = saved_state->lr;
317 state->pc = saved_state->pc;
318 state->cpsr = saved_state->cpsr;
319 for (i = 0; i < 13; i++) {
320 state->r[i] = saved_state->r[i];
321 }
322 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
323 state->pc, state->r[0], state->sp);
324 *count = ARM_THREAD_STATE_COUNT;
325 break;
326 }
327 default:
328 return KERN_INVALID_ARGUMENT;
329 }
330 return KERN_SUCCESS;
331 }
332
333 extern long long arm_debug_get(void);
334
335 /*
336 * Routine: machine_thread_set_state
337 *
338 */
339 kern_return_t
340 machine_thread_set_state(
341 thread_t thread,
342 thread_flavor_t flavor,
343 thread_state_t tstate,
344 mach_msg_type_number_t count)
345 {
346 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
347 * _state: " x) */
348
349 switch (flavor) {
350 case ARM_THREAD_STATE:{
351 struct arm_thread_state *state;
352 struct arm_saved_state *saved_state;
353 arm_unified_thread_state_t *unified_state;
354 int old_psr;
355
356 if (count < ARM_THREAD_STATE_COUNT) {
357 return KERN_INVALID_ARGUMENT;
358 }
359
360 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
361 unified_state = (arm_unified_thread_state_t *) tstate;
362 state = &unified_state->ts_32;
363 } else {
364 state = (struct arm_thread_state *) tstate;
365 }
366 saved_state = &thread->machine.PcbData;
367 old_psr = saved_state->cpsr;
368 memcpy((char *) saved_state, (char *) state, sizeof(*state));
369 /*
370 * do not allow privileged bits of the PSR to be
371 * changed
372 */
373 saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
374
375 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
376 state->pc, state->r[0], state->sp);
377 break;
378 }
379 case ARM_VFP_STATE:{
380 #if __ARM_VFP__
381 struct arm_vfp_state *state;
382 struct arm_vfpsaved_state *saved_state;
383 unsigned int i;
384 unsigned int max;
385
386 if (count < ARM_VFP_STATE_COUNT) {
387 if (count < ARM_VFPV2_STATE_COUNT) {
388 return KERN_INVALID_ARGUMENT;
389 } else {
390 count = ARM_VFPV2_STATE_COUNT;
391 }
392 }
393
394 if (count == ARM_VFPV2_STATE_COUNT) {
395 max = 32;
396 } else {
397 max = 64;
398 }
399
400 state = (struct arm_vfp_state *) tstate;
401 saved_state = find_user_vfp(thread);
402
403 saved_state->fpscr = state->fpscr;
404 for (i = 0; i < max; i++) {
405 saved_state->r[i] = state->r[i];
406 }
407
408 #endif
409 break;
410 }
411 case ARM_EXCEPTION_STATE:{
412 if (count < ARM_EXCEPTION_STATE_COUNT) {
413 return KERN_INVALID_ARGUMENT;
414 }
415
416 break;
417 }
418 case ARM_DEBUG_STATE:{
419 arm_debug_state_t *state;
420 arm_debug_state_t *thread_state;
421 boolean_t enabled = FALSE;
422 unsigned int i;
423
424 if (count < ARM_DEBUG_STATE_COUNT) {
425 return KERN_INVALID_ARGUMENT;
426 }
427
428 state = (arm_debug_state_t *) tstate;
429 thread_state = find_debug_state(thread);
430
431 if (count < ARM_DEBUG_STATE_COUNT) {
432 return KERN_INVALID_ARGUMENT;
433 }
434
435 for (i = 0; i < 16; i++) {
436 /* do not allow context IDs to be set */
437 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
438 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
439 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
440 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
441 return KERN_PROTECTION_FAILURE;
442 }
443 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
444 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
445 enabled = TRUE;
446 }
447 }
448
449 if (!enabled) {
450 if (thread_state != NULL) {
451 void *pTmp = thread->machine.DebugData;
452 thread->machine.DebugData = NULL;
453 zfree(ads_zone, pTmp);
454 }
455 } else {
456 if (thread_state == NULL) {
457 thread_state = zalloc(ads_zone);
458 }
459
460 for (i = 0; i < 16; i++) {
461 /* set appropriate priviledge; mask out unknown bits */
462 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
463 | ARM_DBGBCR_MATCH_MASK
464 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
465 | ARM_DBG_CR_ENABLE_MASK))
466 | ARM_DBGBCR_TYPE_IVA
467 | ARM_DBG_CR_LINKED_UNLINKED
468 | ARM_DBG_CR_SECURITY_STATE_BOTH
469 | ARM_DBG_CR_MODE_CONTROL_USER;
470 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
471 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
472 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
473 | ARM_DBGWCR_ACCESS_CONTROL_MASK
474 | ARM_DBG_CR_ENABLE_MASK))
475 | ARM_DBG_CR_LINKED_UNLINKED
476 | ARM_DBG_CR_SECURITY_STATE_BOTH
477 | ARM_DBG_CR_MODE_CONTROL_USER;
478 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
479 }
480
481 if (thread->machine.DebugData == NULL) {
482 thread->machine.DebugData = thread_state;
483 }
484 }
485
486 if (thread == current_thread()) {
487 arm_debug_set(thread_state);
488 }
489
490 break;
491 }
492
493 default:
494 return KERN_INVALID_ARGUMENT;
495 }
496 return KERN_SUCCESS;
497 }
498
499 mach_vm_address_t
500 machine_thread_pc(thread_t thread)
501 {
502 struct arm_saved_state *ss = get_user_regs(thread);
503 return (mach_vm_address_t)get_saved_state_pc(ss);
504 }
505
506 void
507 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
508 {
509 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
510 }
511
512 /*
513 * Routine: machine_thread_state_initialize
514 *
515 */
516 kern_return_t
517 machine_thread_state_initialize(
518 thread_t thread)
519 {
520 struct arm_saved_state *savestate;
521
522 savestate = (struct arm_saved_state *) &thread->machine.PcbData;
523 bzero((char *) savestate, sizeof(struct arm_saved_state));
524 savestate->cpsr = PSR_USERDFLT;
525
526 #if __ARM_VFP__
527 vfp_state_initialize(&thread->machine.PcbData.VFPdata);
528 #endif
529
530 thread->machine.DebugData = NULL;
531
532 return KERN_SUCCESS;
533 }
534
535 #if __ARM_VFP__
536 void
537 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
538 {
539 /* Set default VFP state to RunFast mode:
540 *
541 * - flush-to-zero mode
542 * - default NaN mode
543 * - no enabled exceptions
544 *
545 * On the VFP11, this allows the use of floating point without
546 * trapping to support code, which we do not provide. With
547 * the Cortex-A8, this allows the use of the (much faster) NFP
548 * pipeline for single-precision operations.
549 */
550
551 bzero(vfp_state, sizeof(*vfp_state));
552 vfp_state->fpscr = FPSCR_DEFAULT;
553 }
554 #endif /* __ARM_VFP__ */
555
556
557 /*
558 * Routine: machine_thread_dup
559 *
560 */
561 kern_return_t
562 machine_thread_dup(
563 thread_t self,
564 thread_t target,
565 __unused boolean_t is_corpse)
566 {
567 struct arm_saved_state *self_saved_state;
568 struct arm_saved_state *target_saved_state;
569
570 #if __ARM_VFP__
571 struct arm_vfpsaved_state *self_vfp_state;
572 struct arm_vfpsaved_state *target_vfp_state;
573 #endif
574
575 target->machine.cthread_self = self->machine.cthread_self;
576
577 self_saved_state = &self->machine.PcbData;
578 target_saved_state = &target->machine.PcbData;
579 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
580
581 #if __ARM_VFP__
582 self_vfp_state = &self->machine.PcbData.VFPdata;
583 target_vfp_state = &target->machine.PcbData.VFPdata;
584 bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
585 #endif
586
587 return KERN_SUCCESS;
588 }
589
590 /*
591 * Routine: get_user_regs
592 *
593 */
594 struct arm_saved_state *
595 get_user_regs(
596 thread_t thread)
597 {
598 return &thread->machine.PcbData;
599 }
600
601 /*
602 * Routine: find_user_regs
603 *
604 */
605 struct arm_saved_state *
606 find_user_regs(
607 thread_t thread)
608 {
609 return get_user_regs(thread);
610 }
611
612 /*
613 * Routine: find_kern_regs
614 *
615 */
616 struct arm_saved_state *
617 find_kern_regs(
618 thread_t thread)
619 {
620 /*
621 * This works only for an interrupted kernel thread
622 */
623 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
624 return (struct arm_saved_state *) NULL;
625 } else {
626 return getCpuDatap()->cpu_int_state;
627 }
628 }
629
630 #if __ARM_VFP__
631 /*
632 * Find the user state floating point context. If there is no user state context,
633 * we just return a 0.
634 */
635
636 struct arm_vfpsaved_state *
637 find_user_vfp(
638 thread_t thread)
639 {
640 return &thread->machine.PcbData.VFPdata;
641 }
642 #endif /* __ARM_VFP__ */
643
644 arm_debug_state_t *
645 find_debug_state(
646 thread_t thread)
647 {
648 return thread->machine.DebugData;
649 }
650
651 /*
652 * Routine: thread_userstack
653 *
654 */
655 kern_return_t
656 thread_userstack(
657 __unused thread_t thread,
658 int flavor,
659 thread_state_t tstate,
660 unsigned int count,
661 mach_vm_offset_t * user_stack,
662 int *customstack,
663 __unused boolean_t is64bit
664 )
665 {
666 switch (flavor) {
667 case ARM_THREAD_STATE:
668 {
669 struct arm_thread_state *state;
670
671
672 if (count < ARM_THREAD_STATE_COUNT) {
673 return KERN_INVALID_ARGUMENT;
674 }
675
676 if (customstack) {
677 *customstack = 0;
678 }
679 state = (struct arm_thread_state *) tstate;
680
681 if (state->sp) {
682 *user_stack = CAST_USER_ADDR_T(state->sp);
683 if (customstack) {
684 *customstack = 1;
685 }
686 } else {
687 *user_stack = CAST_USER_ADDR_T(USRSTACK);
688 }
689 }
690 break;
691
692 default:
693 return KERN_INVALID_ARGUMENT;
694 }
695
696 return KERN_SUCCESS;
697 }
698
699 /*
700 * thread_userstackdefault:
701 *
702 * Return the default stack location for the
703 * thread, if otherwise unknown.
704 */
705 kern_return_t
706 thread_userstackdefault(
707 mach_vm_offset_t *default_user_stack,
708 boolean_t is64bit __unused)
709 {
710 *default_user_stack = USRSTACK;
711
712 return KERN_SUCCESS;
713 }
714
715 /*
716 * Routine: thread_setuserstack
717 *
718 */
719 void
720 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
721 {
722 struct arm_saved_state *sv;
723
724 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
725 * k: " x) */
726
727 sv = get_user_regs(thread);
728
729 sv->sp = user_stack;
730
731 thread_setuserstack_kprintf("stack %x\n", sv->sp);
732
733 return;
734 }
735
736 /*
737 * Routine: thread_adjuserstack
738 *
739 */
740 user_addr_t
741 thread_adjuserstack(thread_t thread, int adjust)
742 {
743 struct arm_saved_state *sv;
744
745 sv = get_user_regs(thread);
746
747 sv->sp += adjust;
748
749 return sv->sp;
750 }
751
752 /*
753 * Routine: thread_setentrypoint
754 *
755 */
756 void
757 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
758 {
759 struct arm_saved_state *sv;
760
761 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
762 * nt: " x) */
763
764 sv = get_user_regs(thread);
765
766 sv->pc = entry;
767
768 thread_setentrypoint_kprintf("entry %x\n", sv->pc);
769
770 return;
771 }
772
773 /*
774 * Routine: thread_entrypoint
775 *
776 */
777 kern_return_t
778 thread_entrypoint(
779 __unused thread_t thread,
780 int flavor,
781 thread_state_t tstate,
782 __unused unsigned int count,
783 mach_vm_offset_t * entry_point
784 )
785 {
786 switch (flavor) {
787 case ARM_THREAD_STATE:
788 {
789 struct arm_thread_state *state;
790
791 if (count != ARM_THREAD_STATE_COUNT) {
792 return KERN_INVALID_ARGUMENT;
793 }
794
795 state = (struct arm_thread_state *) tstate;
796
797 /*
798 * If a valid entry point is specified, use it.
799 */
800 if (state->pc) {
801 *entry_point = CAST_USER_ADDR_T(state->pc);
802 } else {
803 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
804 }
805 }
806 break;
807
808 default:
809 return KERN_INVALID_ARGUMENT;
810 }
811
812 return KERN_SUCCESS;
813 }
814
815
816 /*
817 * Routine: thread_set_child
818 *
819 */
820 void
821 thread_set_child(
822 thread_t child,
823 int pid)
824 {
825 struct arm_saved_state *child_state;
826
827 child_state = get_user_regs(child);
828
829 child_state->r[0] = (uint_t) pid;
830 child_state->r[1] = 1ULL;
831 }
832
833
834 /*
835 * Routine: thread_set_parent
836 *
837 */
838 void
839 thread_set_parent(
840 thread_t parent,
841 int pid)
842 {
843 struct arm_saved_state *parent_state;
844
845 parent_state = get_user_regs(parent);
846
847 parent_state->r[0] = pid;
848 parent_state->r[1] = 0;
849 }
850
851
852 struct arm_act_context {
853 struct arm_saved_state ss;
854 #if __ARM_VFP__
855 struct arm_vfpsaved_state vfps;
856 #endif
857 };
858
859 /*
860 * Routine: act_thread_csave
861 *
862 */
863 void *
864 act_thread_csave(void)
865 {
866 struct arm_act_context *ic;
867 kern_return_t kret;
868 unsigned int val;
869
870 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
871
872 if (ic == (struct arm_act_context *) NULL) {
873 return (void *) 0;
874 }
875
876 val = ARM_THREAD_STATE_COUNT;
877 kret = machine_thread_get_state(current_thread(),
878 ARM_THREAD_STATE,
879 (thread_state_t) &ic->ss,
880 &val);
881 if (kret != KERN_SUCCESS) {
882 kfree(ic, sizeof(struct arm_act_context));
883 return (void *) 0;
884 }
885 #if __ARM_VFP__
886 val = ARM_VFP_STATE_COUNT;
887 kret = machine_thread_get_state(current_thread(),
888 ARM_VFP_STATE,
889 (thread_state_t) &ic->vfps,
890 &val);
891 if (kret != KERN_SUCCESS) {
892 kfree(ic, sizeof(struct arm_act_context));
893 return (void *) 0;
894 }
895 #endif
896 return ic;
897 }
898
899 /*
900 * Routine: act_thread_catt
901 *
902 */
903 void
904 act_thread_catt(void *ctx)
905 {
906 struct arm_act_context *ic;
907 kern_return_t kret;
908
909 ic = (struct arm_act_context *) ctx;
910
911 if (ic == (struct arm_act_context *) NULL) {
912 return;
913 }
914
915 kret = machine_thread_set_state(current_thread(),
916 ARM_THREAD_STATE,
917 (thread_state_t) &ic->ss,
918 ARM_THREAD_STATE_COUNT);
919 if (kret != KERN_SUCCESS) {
920 goto out;
921 }
922
923 #if __ARM_VFP__
924 kret = machine_thread_set_state(current_thread(),
925 ARM_VFP_STATE,
926 (thread_state_t) &ic->vfps,
927 ARM_VFP_STATE_COUNT);
928 if (kret != KERN_SUCCESS) {
929 goto out;
930 }
931 #endif
932 out:
933 kfree(ic, sizeof(struct arm_act_context));
934 }
935
936 /*
937 * Routine: act_thread_catt
938 *
939 */
940 void
941 act_thread_cfree(void *ctx)
942 {
943 kfree(ctx, sizeof(struct arm_act_context));
944 }
945
946 kern_return_t
947 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
948 {
949 arm_thread_state_t *state;
950 struct arm_saved_state *saved_state;
951 thread_t curth = current_thread();
952 spl_t s = 0;
953
954 saved_state = &thread->machine.PcbData;
955 state = (arm_thread_state_t *)tstate;
956
957 if (curth != thread) {
958 s = splsched();
959 thread_lock(thread);
960 }
961
962 /*
963 * do not zero saved_state, it can be concurrently accessed
964 * and zero is not a valid state for some of the registers,
965 * like sp.
966 */
967 thread_state32_to_saved_state(state, saved_state);
968 saved_state->cpsr = PSR_USERDFLT;
969
970 if (curth != thread) {
971 thread_unlock(thread);
972 splx(s);
973 }
974
975 return KERN_SUCCESS;
976 }