]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/status.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / arm / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37
38 struct arm_vfpv2_state
39 {
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43 };
44
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50
51 /*
52 * Forward definitions
53 */
54 void
55 thread_set_child(thread_t child, int pid);
56
57 void
58 thread_set_parent(thread_t parent, int pid);
59
60 /*
61 * Maps state flavor to number of words in the state:
62 */
63 /* __private_extern__ */
64 unsigned int _MachineStateCount[] = {
65 /* FLAVOR_LIST */ 0,
66 [ARM_THREAD_STATE] = ARM_THREAD_STATE_COUNT,
67 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
68 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
69 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
70 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
71 };
72
73 extern zone_t ads_zone;
74
75 kern_return_t
76 machine_thread_state_convert_to_user(
77 __unused thread_t thread,
78 __unused thread_flavor_t flavor,
79 __unused thread_state_t tstate,
80 __unused mach_msg_type_number_t *count)
81 {
82 // No conversion to userspace representation on this platform
83 return KERN_SUCCESS;
84 }
85
86 kern_return_t
87 machine_thread_state_convert_from_user(
88 __unused thread_t thread,
89 __unused thread_flavor_t flavor,
90 __unused thread_state_t tstate,
91 __unused mach_msg_type_number_t count)
92 {
93 // No conversion from userspace representation on this platform
94 return KERN_SUCCESS;
95 }
96
97 kern_return_t
98 machine_thread_siguctx_pointer_convert_to_user(
99 __unused thread_t thread,
100 __unused user_addr_t *uctxp)
101 {
102 // No conversion to userspace representation on this platform
103 return KERN_SUCCESS;
104 }
105
106 kern_return_t
107 machine_thread_function_pointers_convert_from_user(
108 __unused thread_t thread,
109 __unused user_addr_t *fptrs,
110 __unused uint32_t count)
111 {
112 // No conversion from userspace representation on this platform
113 return KERN_SUCCESS;
114 }
115
116 /*
117 * Routine: machine_thread_get_state
118 *
119 */
120 kern_return_t
121 machine_thread_get_state(
122 thread_t thread,
123 thread_flavor_t flavor,
124 thread_state_t tstate,
125 mach_msg_type_number_t * count)
126 {
127
128 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
129 * _state: " x) */
130
131 switch (flavor) {
132 case THREAD_STATE_FLAVOR_LIST:
133 if (*count < 4)
134 return (KERN_INVALID_ARGUMENT);
135
136 tstate[0] = ARM_THREAD_STATE;
137 tstate[1] = ARM_VFP_STATE;
138 tstate[2] = ARM_EXCEPTION_STATE;
139 tstate[3] = ARM_DEBUG_STATE;
140 *count = 4;
141 break;
142
143 case THREAD_STATE_FLAVOR_LIST_10_15:
144 if (*count < 5)
145 return (KERN_INVALID_ARGUMENT);
146
147 tstate[0] = ARM_THREAD_STATE;
148 tstate[1] = ARM_VFP_STATE;
149 tstate[2] = ARM_EXCEPTION_STATE;
150 tstate[3] = ARM_DEBUG_STATE;
151 tstate[4] = ARM_PAGEIN_STATE;
152 *count = 5;
153 break;
154
155 case ARM_THREAD_STATE:{
156 struct arm_thread_state *state;
157 struct arm_saved_state *saved_state;
158 arm_unified_thread_state_t *unified_state;
159
160 unsigned int i;
161 if (*count < ARM_THREAD_STATE_COUNT)
162 return (KERN_INVALID_ARGUMENT);
163
164 if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
165 unified_state = (arm_unified_thread_state_t *) tstate;
166 state = &unified_state->ts_32;
167 unified_state->ash.flavor = ARM_THREAD_STATE32;
168 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
169 } else {
170 state = (struct arm_thread_state *) tstate;
171 }
172 saved_state = &thread->machine.PcbData;
173
174 state->sp = saved_state->sp;
175 state->lr = saved_state->lr;
176 state->pc = saved_state->pc;
177 state->cpsr = saved_state->cpsr;
178 for (i = 0; i < 13; i++)
179 state->r[i] = saved_state->r[i];
180 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
181 state->pc, state->r[0], state->sp);
182
183 if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
184 *count = ARM_THREAD_STATE_COUNT;
185 }
186 break;
187 }
188 case ARM_EXCEPTION_STATE:{
189 struct arm_exception_state *state;
190 struct arm_saved_state *saved_state;
191
192 if (*count < ARM_EXCEPTION_STATE_COUNT)
193 return (KERN_INVALID_ARGUMENT);
194
195 state = (struct arm_exception_state *) tstate;
196 saved_state = &thread->machine.PcbData;
197
198 state->exception = saved_state->exception;
199 state->fsr = saved_state->fsr;
200 state->far = saved_state->far;
201
202 *count = ARM_EXCEPTION_STATE_COUNT;
203 break;
204 }
205 case ARM_VFP_STATE:{
206 #if __ARM_VFP__
207 struct arm_vfp_state *state;
208 struct arm_vfpsaved_state *saved_state;
209 unsigned int i;
210 unsigned int max;
211
212 if (*count < ARM_VFP_STATE_COUNT) {
213 if (*count < ARM_VFPV2_STATE_COUNT)
214 return (KERN_INVALID_ARGUMENT);
215 else
216 *count = ARM_VFPV2_STATE_COUNT;
217 }
218
219 if (*count == ARM_VFPV2_STATE_COUNT)
220 max = 32;
221 else
222 max = 64;
223
224 state = (struct arm_vfp_state *) tstate;
225 saved_state = find_user_vfp(thread);
226
227 state->fpscr = saved_state->fpscr;
228 for (i = 0; i < max; i++)
229 state->r[i] = saved_state->r[i];
230
231 #endif
232 break;
233 }
234 case ARM_DEBUG_STATE:{
235 arm_debug_state_t *state;
236 arm_debug_state_t *thread_state;
237
238 if (*count < ARM_DEBUG_STATE_COUNT)
239 return (KERN_INVALID_ARGUMENT);
240
241 state = (arm_debug_state_t *) tstate;
242 thread_state = find_debug_state(thread);
243
244 if (thread_state == NULL)
245 bzero(state, sizeof(arm_debug_state_t));
246 else
247 bcopy(thread_state, state, sizeof(arm_debug_state_t));
248
249 *count = ARM_DEBUG_STATE_COUNT;
250 break;
251 }
252
253 case ARM_PAGEIN_STATE:{
254 arm_pagein_state_t *state;
255
256 if (*count < ARM_PAGEIN_STATE_COUNT) {
257 return (KERN_INVALID_ARGUMENT);
258 }
259
260 state = (arm_pagein_state_t *)tstate;
261 state->__pagein_error = thread->t_pagein_error;
262
263 *count = ARM_PAGEIN_STATE_COUNT;
264 break;
265 }
266
267 default:
268 return (KERN_INVALID_ARGUMENT);
269 }
270 return (KERN_SUCCESS);
271 }
272
273
274 /*
275 * Routine: machine_thread_get_kern_state
276 *
277 */
278 kern_return_t
279 machine_thread_get_kern_state(
280 thread_t thread,
281 thread_flavor_t flavor,
282 thread_state_t tstate,
283 mach_msg_type_number_t * count)
284 {
285
286 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
287 * d_get_kern_state: "
288 * x) */
289
290 /*
291 * This works only for an interrupted kernel thread
292 */
293 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
294 return KERN_FAILURE;
295
296 switch (flavor) {
297 case ARM_THREAD_STATE:{
298 struct arm_thread_state *state;
299 struct arm_saved_state *saved_state;
300 unsigned int i;
301 if (*count < ARM_THREAD_STATE_COUNT)
302 return (KERN_INVALID_ARGUMENT);
303
304 state = (struct arm_thread_state *) tstate;
305 saved_state = getCpuDatap()->cpu_int_state;
306
307 state->sp = saved_state->sp;
308 state->lr = saved_state->lr;
309 state->pc = saved_state->pc;
310 state->cpsr = saved_state->cpsr;
311 for (i = 0; i < 13; i++)
312 state->r[i] = saved_state->r[i];
313 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
314 state->pc, state->r[0], state->sp);
315 *count = ARM_THREAD_STATE_COUNT;
316 break;
317 }
318 default:
319 return (KERN_INVALID_ARGUMENT);
320 }
321 return (KERN_SUCCESS);
322 }
323
324 extern long long arm_debug_get(void);
325
326 /*
327 * Routine: machine_thread_set_state
328 *
329 */
330 kern_return_t
331 machine_thread_set_state(
332 thread_t thread,
333 thread_flavor_t flavor,
334 thread_state_t tstate,
335 mach_msg_type_number_t count)
336 {
337
338 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
339 * _state: " x) */
340
341 switch (flavor) {
342 case ARM_THREAD_STATE:{
343 struct arm_thread_state *state;
344 struct arm_saved_state *saved_state;
345 arm_unified_thread_state_t *unified_state;
346 int old_psr;
347
348 if (count < ARM_THREAD_STATE_COUNT)
349 return (KERN_INVALID_ARGUMENT);
350
351 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
352 unified_state = (arm_unified_thread_state_t *) tstate;
353 state = &unified_state->ts_32;
354 } else {
355 state = (struct arm_thread_state *) tstate;
356 }
357 saved_state = &thread->machine.PcbData;
358 old_psr = saved_state->cpsr;
359 memcpy((char *) saved_state, (char *) state, sizeof(*state));
360 /*
361 * do not allow privileged bits of the PSR to be
362 * changed
363 */
364 saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
365
366 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
367 state->pc, state->r[0], state->sp);
368 break;
369 }
370 case ARM_VFP_STATE:{
371 #if __ARM_VFP__
372 struct arm_vfp_state *state;
373 struct arm_vfpsaved_state *saved_state;
374 unsigned int i;
375 unsigned int max;
376
377 if (count < ARM_VFP_STATE_COUNT) {
378 if (count < ARM_VFPV2_STATE_COUNT)
379 return (KERN_INVALID_ARGUMENT);
380 else
381 count = ARM_VFPV2_STATE_COUNT;
382 }
383
384 if (count == ARM_VFPV2_STATE_COUNT)
385 max = 32;
386 else
387 max = 64;
388
389 state = (struct arm_vfp_state *) tstate;
390 saved_state = find_user_vfp(thread);
391
392 saved_state->fpscr = state->fpscr;
393 for (i = 0; i < max; i++)
394 saved_state->r[i] = state->r[i];
395
396 #endif
397 break;
398 }
399 case ARM_EXCEPTION_STATE:{
400
401 if (count < ARM_EXCEPTION_STATE_COUNT)
402 return (KERN_INVALID_ARGUMENT);
403
404 break;
405 }
406 case ARM_DEBUG_STATE:{
407 arm_debug_state_t *state;
408 arm_debug_state_t *thread_state;
409 boolean_t enabled = FALSE;
410 unsigned int i;
411
412 if (count < ARM_DEBUG_STATE_COUNT)
413 return (KERN_INVALID_ARGUMENT);
414
415 state = (arm_debug_state_t *) tstate;
416 thread_state = find_debug_state(thread);
417
418 if (count < ARM_DEBUG_STATE_COUNT)
419 return (KERN_INVALID_ARGUMENT);
420
421 for (i = 0; i < 16; i++) {
422 /* do not allow context IDs to be set */
423 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
424 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
425 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
426 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
427 return KERN_PROTECTION_FAILURE;
428 }
429 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
430 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
431 enabled = TRUE;
432 }
433 }
434
435 if (!enabled) {
436 if (thread_state != NULL)
437 {
438 void *pTmp = thread->machine.DebugData;
439 thread->machine.DebugData = NULL;
440 zfree(ads_zone, pTmp);
441 }
442 }
443 else
444 {
445 if (thread_state == NULL)
446 thread_state = zalloc(ads_zone);
447
448 for (i = 0; i < 16; i++) {
449 /* set appropriate priviledge; mask out unknown bits */
450 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
451 | ARM_DBGBCR_MATCH_MASK
452 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
453 | ARM_DBG_CR_ENABLE_MASK))
454 | ARM_DBGBCR_TYPE_IVA
455 | ARM_DBG_CR_LINKED_UNLINKED
456 | ARM_DBG_CR_SECURITY_STATE_BOTH
457 | ARM_DBG_CR_MODE_CONTROL_USER;
458 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
459 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
460 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
461 | ARM_DBGWCR_ACCESS_CONTROL_MASK
462 | ARM_DBG_CR_ENABLE_MASK))
463 | ARM_DBG_CR_LINKED_UNLINKED
464 | ARM_DBG_CR_SECURITY_STATE_BOTH
465 | ARM_DBG_CR_MODE_CONTROL_USER;
466 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
467 }
468
469 if (thread->machine.DebugData == NULL)
470 thread->machine.DebugData = thread_state;
471 }
472
473 if (thread == current_thread()) {
474 arm_debug_set(thread_state);
475 }
476
477 break;
478 }
479
480 default:
481 return (KERN_INVALID_ARGUMENT);
482 }
483 return (KERN_SUCCESS);
484 }
485
486 mach_vm_address_t
487 machine_thread_pc(thread_t thread)
488 {
489 struct arm_saved_state *ss = get_user_regs(thread);
490 return (mach_vm_address_t)get_saved_state_pc(ss);
491 }
492
493 void
494 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
495 {
496 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
497 }
498
499 /*
500 * Routine: machine_thread_state_initialize
501 *
502 */
503 kern_return_t
504 machine_thread_state_initialize(
505 thread_t thread)
506 {
507 struct arm_saved_state *savestate;
508
509 savestate = (struct arm_saved_state *) &thread->machine.PcbData;
510 bzero((char *) savestate, sizeof(struct arm_saved_state));
511 savestate->cpsr = PSR_USERDFLT;
512
513 #if __ARM_VFP__
514 vfp_state_initialize(&thread->machine.PcbData.VFPdata);
515 #endif
516
517 thread->machine.DebugData = NULL;
518
519 return KERN_SUCCESS;
520 }
521
522 #if __ARM_VFP__
523 void
524 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
525 {
526 /* Set default VFP state to RunFast mode:
527 *
528 * - flush-to-zero mode
529 * - default NaN mode
530 * - no enabled exceptions
531 *
532 * On the VFP11, this allows the use of floating point without
533 * trapping to support code, which we do not provide. With
534 * the Cortex-A8, this allows the use of the (much faster) NFP
535 * pipeline for single-precision operations.
536 */
537
538 bzero(vfp_state, sizeof(*vfp_state));
539 vfp_state->fpscr = FPSCR_DEFAULT;
540 }
541 #endif /* __ARM_VFP__ */
542
543
544 /*
545 * Routine: machine_thread_dup
546 *
547 */
548 kern_return_t
549 machine_thread_dup(
550 thread_t self,
551 thread_t target,
552 __unused boolean_t is_corpse)
553 {
554 struct arm_saved_state *self_saved_state;
555 struct arm_saved_state *target_saved_state;
556
557 #if __ARM_VFP__
558 struct arm_vfpsaved_state *self_vfp_state;
559 struct arm_vfpsaved_state *target_vfp_state;
560 #endif
561
562 target->machine.cthread_self = self->machine.cthread_self;
563
564 self_saved_state = &self->machine.PcbData;
565 target_saved_state = &target->machine.PcbData;
566 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
567
568 #if __ARM_VFP__
569 self_vfp_state = &self->machine.PcbData.VFPdata;
570 target_vfp_state = &target->machine.PcbData.VFPdata;
571 bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
572 #endif
573
574 return (KERN_SUCCESS);
575 }
576
577 /*
578 * Routine: get_user_regs
579 *
580 */
581 struct arm_saved_state *
582 get_user_regs(
583 thread_t thread)
584 {
585 return (&thread->machine.PcbData);
586 }
587
588 /*
589 * Routine: find_user_regs
590 *
591 */
592 struct arm_saved_state *
593 find_user_regs(
594 thread_t thread)
595 {
596 return get_user_regs(thread);
597 }
598
599 /*
600 * Routine: find_kern_regs
601 *
602 */
603 struct arm_saved_state *
604 find_kern_regs(
605 thread_t thread)
606 {
607 /*
608 * This works only for an interrupted kernel thread
609 */
610 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
611 return ((struct arm_saved_state *) NULL);
612 else
613 return (getCpuDatap()->cpu_int_state);
614
615 }
616
617 #if __ARM_VFP__
618 /*
619 * Find the user state floating point context. If there is no user state context,
620 * we just return a 0.
621 */
622
623 struct arm_vfpsaved_state *
624 find_user_vfp(
625 thread_t thread)
626 {
627 return &thread->machine.PcbData.VFPdata;
628 }
629 #endif /* __ARM_VFP__ */
630
631 arm_debug_state_t *
632 find_debug_state(
633 thread_t thread)
634 {
635 return thread->machine.DebugData;
636 }
637
638 /*
639 * Routine: thread_userstack
640 *
641 */
642 kern_return_t
643 thread_userstack(
644 __unused thread_t thread,
645 int flavor,
646 thread_state_t tstate,
647 unsigned int count,
648 mach_vm_offset_t * user_stack,
649 int *customstack,
650 __unused boolean_t is64bit
651 )
652 {
653
654 switch (flavor) {
655 case ARM_THREAD_STATE:
656 {
657 struct arm_thread_state *state;
658
659
660 if (count < ARM_THREAD_STATE_COUNT)
661 return (KERN_INVALID_ARGUMENT);
662
663 if (customstack)
664 *customstack = 0;
665 state = (struct arm_thread_state *) tstate;
666
667 if (state->sp) {
668 *user_stack = CAST_USER_ADDR_T(state->sp);
669 if (customstack)
670 *customstack = 1;
671 } else {
672 *user_stack = CAST_USER_ADDR_T(USRSTACK);
673 }
674 }
675 break;
676
677 default:
678 return (KERN_INVALID_ARGUMENT);
679 }
680
681 return (KERN_SUCCESS);
682 }
683
684 /*
685 * thread_userstackdefault:
686 *
687 * Return the default stack location for the
688 * thread, if otherwise unknown.
689 */
690 kern_return_t
691 thread_userstackdefault(
692 mach_vm_offset_t *default_user_stack,
693 boolean_t is64bit __unused)
694 {
695 *default_user_stack = USRSTACK;
696
697 return (KERN_SUCCESS);
698 }
699
700 /*
701 * Routine: thread_setuserstack
702 *
703 */
704 void
705 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
706 {
707 struct arm_saved_state *sv;
708
709 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
710 * k: " x) */
711
712 sv = get_user_regs(thread);
713
714 sv->sp = user_stack;
715
716 thread_setuserstack_kprintf("stack %x\n", sv->sp);
717
718 return;
719 }
720
721 /*
722 * Routine: thread_adjuserstack
723 *
724 */
725 uint64_t
726 thread_adjuserstack(thread_t thread, int adjust)
727 {
728 struct arm_saved_state *sv;
729
730 sv = get_user_regs(thread);
731
732 sv->sp += adjust;
733
734 return sv->sp;
735 }
736
737 /*
738 * Routine: thread_setentrypoint
739 *
740 */
741 void
742 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
743 {
744 struct arm_saved_state *sv;
745
746 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
747 * nt: " x) */
748
749 sv = get_user_regs(thread);
750
751 sv->pc = entry;
752
753 thread_setentrypoint_kprintf("entry %x\n", sv->pc);
754
755 return;
756 }
757
758 /*
759 * Routine: thread_entrypoint
760 *
761 */
762 kern_return_t
763 thread_entrypoint(
764 __unused thread_t thread,
765 int flavor,
766 thread_state_t tstate,
767 __unused unsigned int count,
768 mach_vm_offset_t * entry_point
769 )
770 {
771 switch (flavor) {
772 case ARM_THREAD_STATE:
773 {
774 struct arm_thread_state *state;
775
776 state = (struct arm_thread_state *) tstate;
777
778 /*
779 * If a valid entry point is specified, use it.
780 */
781 if (state->pc) {
782 *entry_point = CAST_USER_ADDR_T(state->pc);
783 } else {
784 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
785 }
786 }
787 break;
788
789 default:
790 return (KERN_INVALID_ARGUMENT);
791 }
792
793 return (KERN_SUCCESS);
794 }
795
796
797 /*
798 * Routine: thread_set_child
799 *
800 */
801 void
802 thread_set_child(
803 thread_t child,
804 int pid)
805 {
806 struct arm_saved_state *child_state;
807
808 child_state = get_user_regs(child);
809
810 child_state->r[0] = (uint_t) pid;
811 child_state->r[1] = 1ULL;
812 }
813
814
815 /*
816 * Routine: thread_set_parent
817 *
818 */
819 void
820 thread_set_parent(
821 thread_t parent,
822 int pid)
823 {
824 struct arm_saved_state *parent_state;
825
826 parent_state = get_user_regs(parent);
827
828 parent_state->r[0] = pid;
829 parent_state->r[1] = 0;
830 }
831
832
833 struct arm_act_context {
834 struct arm_saved_state ss;
835 #if __ARM_VFP__
836 struct arm_vfpsaved_state vfps;
837 #endif
838 };
839
840 /*
841 * Routine: act_thread_csave
842 *
843 */
844 void *
845 act_thread_csave(void)
846 {
847 struct arm_act_context *ic;
848 kern_return_t kret;
849 unsigned int val;
850
851 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
852
853 if (ic == (struct arm_act_context *) NULL)
854 return ((void *) 0);
855
856 val = ARM_THREAD_STATE_COUNT;
857 kret = machine_thread_get_state(current_thread(),
858 ARM_THREAD_STATE,
859 (thread_state_t) & ic->ss,
860 &val);
861 if (kret != KERN_SUCCESS) {
862 kfree(ic, sizeof(struct arm_act_context));
863 return ((void *) 0);
864 }
865 #if __ARM_VFP__
866 val = ARM_VFP_STATE_COUNT;
867 kret = machine_thread_get_state(current_thread(),
868 ARM_VFP_STATE,
869 (thread_state_t) & ic->vfps,
870 &val);
871 if (kret != KERN_SUCCESS) {
872 kfree(ic, sizeof(struct arm_act_context));
873 return ((void *) 0);
874 }
875 #endif
876 return (ic);
877 }
878
879 /*
880 * Routine: act_thread_catt
881 *
882 */
883 void
884 act_thread_catt(void *ctx)
885 {
886 struct arm_act_context *ic;
887 kern_return_t kret;
888
889 ic = (struct arm_act_context *) ctx;
890
891 if (ic == (struct arm_act_context *) NULL)
892 return;
893
894 kret = machine_thread_set_state(current_thread(),
895 ARM_THREAD_STATE,
896 (thread_state_t) & ic->ss,
897 ARM_THREAD_STATE_COUNT);
898 if (kret != KERN_SUCCESS)
899 goto out;
900
901 #if __ARM_VFP__
902 kret = machine_thread_set_state(current_thread(),
903 ARM_VFP_STATE,
904 (thread_state_t) & ic->vfps,
905 ARM_VFP_STATE_COUNT);
906 if (kret != KERN_SUCCESS)
907 goto out;
908 #endif
909 out:
910 kfree(ic, sizeof(struct arm_act_context));
911 }
912
913 /*
914 * Routine: act_thread_catt
915 *
916 */
917 void
918 act_thread_cfree(void *ctx)
919 {
920 kfree(ctx, sizeof(struct arm_act_context));
921 }
922
923 kern_return_t
924 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
925 {
926 arm_thread_state_t *state;
927 struct arm_saved_state *saved_state;
928 thread_t curth = current_thread();
929 spl_t s=0;
930
931 saved_state = &thread->machine.PcbData;
932 state = (arm_thread_state_t *)tstate;
933
934 if (curth != thread) {
935 s = splsched();
936 thread_lock(thread);
937 }
938
939 /*
940 * do not zero saved_state, it can be concurrently accessed
941 * and zero is not a valid state for some of the registers,
942 * like sp.
943 */
944 thread_state32_to_saved_state(state, saved_state);
945 saved_state->cpsr = PSR_USERDFLT;
946
947 if (curth != thread) {
948 thread_unlock(thread);
949 splx(s);
950 }
951
952 return KERN_SUCCESS;
953 }