]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/status.c
bdfcf5a6ba531e72109febe813d29e58ffb6d4ce
[apple/xnu.git] / osfmk / arm / status.c
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37
38 struct arm_vfpv2_state
39 {
40 __uint32_t __r[32];
41 __uint32_t __fpscr;
42
43 };
44
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
46
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
49
50
51 /*
52 * Forward definitions
53 */
54 void
55 thread_set_child(thread_t child, int pid);
56
57 void
58 thread_set_parent(thread_t parent, int pid);
59
60 /*
61 * Maps state flavor to number of words in the state:
62 */
63 /* __private_extern__ */
64 unsigned int _MachineStateCount[] = {
65 /* FLAVOR_LIST */ 0,
66 [ARM_THREAD_STATE] = ARM_THREAD_STATE_COUNT,
67 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
68 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
69 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
70 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
71 };
72
73 extern zone_t ads_zone;
74
75 kern_return_t
76 machine_thread_state_convert_to_user(
77 __unused thread_t thread,
78 __unused thread_flavor_t flavor,
79 __unused thread_state_t tstate,
80 __unused mach_msg_type_number_t *count)
81 {
82 // No conversion to userspace representation on this platform
83 return KERN_SUCCESS;
84 }
85
86 kern_return_t
87 machine_thread_state_convert_from_user(
88 __unused thread_t thread,
89 __unused thread_flavor_t flavor,
90 __unused thread_state_t tstate,
91 __unused mach_msg_type_number_t count)
92 {
93 // No conversion from userspace representation on this platform
94 return KERN_SUCCESS;
95 }
96
97 kern_return_t
98 machine_thread_siguctx_pointer_convert_to_user(
99 __unused thread_t thread,
100 __unused user_addr_t *uctxp)
101 {
102 // No conversion to userspace representation on this platform
103 return KERN_SUCCESS;
104 }
105
106 kern_return_t
107 machine_thread_function_pointers_convert_from_user(
108 __unused thread_t thread,
109 __unused user_addr_t *fptrs,
110 __unused uint32_t count)
111 {
112 // No conversion from userspace representation on this platform
113 return KERN_SUCCESS;
114 }
115
116 /*
117 * Routine: machine_thread_get_state
118 *
119 */
120 kern_return_t
121 machine_thread_get_state(
122 thread_t thread,
123 thread_flavor_t flavor,
124 thread_state_t tstate,
125 mach_msg_type_number_t * count)
126 {
127
128 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
129 * _state: " x) */
130
131 switch (flavor) {
132 case THREAD_STATE_FLAVOR_LIST:
133 if (*count < 4)
134 return (KERN_INVALID_ARGUMENT);
135
136 tstate[0] = ARM_THREAD_STATE;
137 tstate[1] = ARM_VFP_STATE;
138 tstate[2] = ARM_EXCEPTION_STATE;
139 tstate[3] = ARM_DEBUG_STATE;
140 *count = 4;
141 break;
142
143 case THREAD_STATE_FLAVOR_LIST_10_15:
144 if (*count < 5)
145 return (KERN_INVALID_ARGUMENT);
146
147 tstate[0] = ARM_THREAD_STATE;
148 tstate[1] = ARM_VFP_STATE;
149 tstate[2] = ARM_EXCEPTION_STATE;
150 tstate[3] = ARM_DEBUG_STATE;
151 tstate[4] = ARM_PAGEIN_STATE;
152 *count = 5;
153 break;
154
155 case ARM_THREAD_STATE:{
156 struct arm_thread_state *state;
157 struct arm_saved_state *saved_state;
158 arm_unified_thread_state_t *unified_state;
159
160 unsigned int i;
161 if (*count < ARM_THREAD_STATE_COUNT)
162 return (KERN_INVALID_ARGUMENT);
163
164 if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
165 unified_state = (arm_unified_thread_state_t *) tstate;
166 state = &unified_state->ts_32;
167 unified_state->ash.flavor = ARM_THREAD_STATE32;
168 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
169 } else {
170 state = (struct arm_thread_state *) tstate;
171 }
172 saved_state = &thread->machine.PcbData;
173
174 state->sp = saved_state->sp;
175 state->lr = saved_state->lr;
176 state->pc = saved_state->pc;
177 state->cpsr = saved_state->cpsr;
178 for (i = 0; i < 13; i++)
179 state->r[i] = saved_state->r[i];
180 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
181 state->pc, state->r[0], state->sp);
182
183 if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
184 *count = ARM_THREAD_STATE_COUNT;
185 }
186 break;
187 }
188 case ARM_EXCEPTION_STATE:{
189 struct arm_exception_state *state;
190 struct arm_saved_state *saved_state;
191
192 if (*count < ARM_EXCEPTION_STATE_COUNT)
193 return (KERN_INVALID_ARGUMENT);
194
195 state = (struct arm_exception_state *) tstate;
196 saved_state = &thread->machine.PcbData;
197
198 state->exception = saved_state->exception;
199 state->fsr = saved_state->fsr;
200 state->far = saved_state->far;
201
202 *count = ARM_EXCEPTION_STATE_COUNT;
203 break;
204 }
205 case ARM_VFP_STATE:{
206 #if __ARM_VFP__
207 struct arm_vfp_state *state;
208 struct arm_vfpsaved_state *saved_state;
209 unsigned int i;
210 unsigned int max;
211
212 if (*count < ARM_VFP_STATE_COUNT) {
213 if (*count < ARM_VFPV2_STATE_COUNT)
214 return (KERN_INVALID_ARGUMENT);
215 else
216 *count = ARM_VFPV2_STATE_COUNT;
217 }
218
219 if (*count == ARM_VFPV2_STATE_COUNT)
220 max = 32;
221 else
222 max = 64;
223
224 state = (struct arm_vfp_state *) tstate;
225 saved_state = find_user_vfp(thread);
226
227 state->fpscr = saved_state->fpscr;
228 for (i = 0; i < max; i++)
229 state->r[i] = saved_state->r[i];
230
231 #endif
232 break;
233 }
234 case ARM_DEBUG_STATE:{
235 arm_debug_state_t *state;
236 arm_debug_state_t *thread_state;
237
238 if (*count < ARM_DEBUG_STATE_COUNT)
239 return (KERN_INVALID_ARGUMENT);
240
241 state = (arm_debug_state_t *) tstate;
242 thread_state = find_debug_state(thread);
243
244 if (thread_state == NULL)
245 bzero(state, sizeof(arm_debug_state_t));
246 else
247 bcopy(thread_state, state, sizeof(arm_debug_state_t));
248
249 *count = ARM_DEBUG_STATE_COUNT;
250 break;
251 }
252
253 case ARM_PAGEIN_STATE:{
254 arm_pagein_state_t *state;
255
256 if (*count < ARM_PAGEIN_STATE_COUNT) {
257 return (KERN_INVALID_ARGUMENT);
258 }
259
260 state = (arm_pagein_state_t *)tstate;
261 state->__pagein_error = thread->t_pagein_error;
262
263 *count = ARM_PAGEIN_STATE_COUNT;
264 break;
265 }
266
267 default:
268 return (KERN_INVALID_ARGUMENT);
269 }
270 return (KERN_SUCCESS);
271 }
272
273
274 /*
275 * Routine: machine_thread_get_kern_state
276 *
277 */
278 kern_return_t
279 machine_thread_get_kern_state(
280 thread_t thread,
281 thread_flavor_t flavor,
282 thread_state_t tstate,
283 mach_msg_type_number_t * count)
284 {
285
286 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
287 * d_get_kern_state: "
288 * x) */
289
290 /*
291 * This works only for an interrupted kernel thread
292 */
293 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
294 return KERN_FAILURE;
295
296 switch (flavor) {
297 case ARM_THREAD_STATE:{
298 struct arm_thread_state *state;
299 struct arm_saved_state *saved_state;
300 unsigned int i;
301 if (*count < ARM_THREAD_STATE_COUNT)
302 return (KERN_INVALID_ARGUMENT);
303
304 state = (struct arm_thread_state *) tstate;
305 saved_state = getCpuDatap()->cpu_int_state;
306
307 state->sp = saved_state->sp;
308 state->lr = saved_state->lr;
309 state->pc = saved_state->pc;
310 state->cpsr = saved_state->cpsr;
311 for (i = 0; i < 13; i++)
312 state->r[i] = saved_state->r[i];
313 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
314 state->pc, state->r[0], state->sp);
315 *count = ARM_THREAD_STATE_COUNT;
316 break;
317 }
318 default:
319 return (KERN_INVALID_ARGUMENT);
320 }
321 return (KERN_SUCCESS);
322 }
323
324 extern long long arm_debug_get(void);
325
326 /*
327 * Routine: machine_thread_set_state
328 *
329 */
330 kern_return_t
331 machine_thread_set_state(
332 thread_t thread,
333 thread_flavor_t flavor,
334 thread_state_t tstate,
335 mach_msg_type_number_t count)
336 {
337
338 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
339 * _state: " x) */
340
341 switch (flavor) {
342 case ARM_THREAD_STATE:{
343 struct arm_thread_state *state;
344 struct arm_saved_state *saved_state;
345 arm_unified_thread_state_t *unified_state;
346 int old_psr;
347
348 if (count < ARM_THREAD_STATE_COUNT)
349 return (KERN_INVALID_ARGUMENT);
350
351 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
352 unified_state = (arm_unified_thread_state_t *) tstate;
353 state = &unified_state->ts_32;
354 } else {
355 state = (struct arm_thread_state *) tstate;
356 }
357 saved_state = &thread->machine.PcbData;
358 old_psr = saved_state->cpsr;
359 memcpy((char *) saved_state, (char *) state, sizeof(*state));
360 /*
361 * do not allow privileged bits of the PSR to be
362 * changed
363 */
364 saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
365
366 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
367 state->pc, state->r[0], state->sp);
368 break;
369 }
370 case ARM_VFP_STATE:{
371 #if __ARM_VFP__
372 struct arm_vfp_state *state;
373 struct arm_vfpsaved_state *saved_state;
374 unsigned int i;
375 unsigned int max;
376
377 if (count < ARM_VFP_STATE_COUNT) {
378 if (count < ARM_VFPV2_STATE_COUNT)
379 return (KERN_INVALID_ARGUMENT);
380 else
381 count = ARM_VFPV2_STATE_COUNT;
382 }
383
384 if (count == ARM_VFPV2_STATE_COUNT)
385 max = 32;
386 else
387 max = 64;
388
389 state = (struct arm_vfp_state *) tstate;
390 saved_state = find_user_vfp(thread);
391
392 saved_state->fpscr = state->fpscr;
393 for (i = 0; i < max; i++)
394 saved_state->r[i] = state->r[i];
395
396 #endif
397 break;
398 }
399 case ARM_EXCEPTION_STATE:{
400
401 if (count < ARM_EXCEPTION_STATE_COUNT)
402 return (KERN_INVALID_ARGUMENT);
403
404 break;
405 }
406 case ARM_DEBUG_STATE:{
407 arm_debug_state_t *state;
408 arm_debug_state_t *thread_state;
409 boolean_t enabled = FALSE;
410 unsigned int i;
411
412 if (count < ARM_DEBUG_STATE_COUNT)
413 return (KERN_INVALID_ARGUMENT);
414
415 state = (arm_debug_state_t *) tstate;
416 thread_state = find_debug_state(thread);
417
418 if (count < ARM_DEBUG_STATE_COUNT)
419 return (KERN_INVALID_ARGUMENT);
420
421 for (i = 0; i < 16; i++) {
422 /* do not allow context IDs to be set */
423 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
424 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
425 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
426 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
427 return KERN_PROTECTION_FAILURE;
428 }
429 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
430 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
431 enabled = TRUE;
432 }
433 }
434
435 if (!enabled) {
436 if (thread_state != NULL)
437 {
438 void *pTmp = thread->machine.DebugData;
439 thread->machine.DebugData = NULL;
440 zfree(ads_zone, pTmp);
441 }
442 }
443 else
444 {
445 if (thread_state == NULL)
446 thread_state = zalloc(ads_zone);
447
448 for (i = 0; i < 16; i++) {
449 /* set appropriate priviledge; mask out unknown bits */
450 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
451 | ARM_DBGBCR_MATCH_MASK
452 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
453 | ARM_DBG_CR_ENABLE_MASK))
454 | ARM_DBGBCR_TYPE_IVA
455 | ARM_DBG_CR_LINKED_UNLINKED
456 | ARM_DBG_CR_SECURITY_STATE_BOTH
457 | ARM_DBG_CR_MODE_CONTROL_USER;
458 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
459 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
460 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
461 | ARM_DBGWCR_ACCESS_CONTROL_MASK
462 | ARM_DBG_CR_ENABLE_MASK))
463 | ARM_DBG_CR_LINKED_UNLINKED
464 | ARM_DBG_CR_SECURITY_STATE_BOTH
465 | ARM_DBG_CR_MODE_CONTROL_USER;
466 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
467 }
468
469 if (thread->machine.DebugData == NULL)
470 thread->machine.DebugData = thread_state;
471 }
472
473 if (thread == current_thread()) {
474 arm_debug_set(thread_state);
475 }
476
477 break;
478 }
479
480 default:
481 return (KERN_INVALID_ARGUMENT);
482 }
483 return (KERN_SUCCESS);
484 }
485
486 mach_vm_address_t
487 machine_thread_pc(thread_t thread)
488 {
489 struct arm_saved_state *ss = get_user_regs(thread);
490 return (mach_vm_address_t)get_saved_state_pc(ss);
491 }
492
493 void
494 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
495 {
496 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
497 }
498
499 /*
500 * Routine: machine_thread_state_initialize
501 *
502 */
503 kern_return_t
504 machine_thread_state_initialize(
505 thread_t thread)
506 {
507 struct arm_saved_state *savestate;
508
509 savestate = (struct arm_saved_state *) &thread->machine.PcbData;
510 bzero((char *) savestate, sizeof(struct arm_saved_state));
511 savestate->cpsr = PSR_USERDFLT;
512
513 #if __ARM_VFP__
514 vfp_state_initialize(&thread->machine.uVFPdata);
515 vfp_state_initialize(&thread->machine.kVFPdata);
516 #endif
517
518 thread->machine.DebugData = NULL;
519
520 return KERN_SUCCESS;
521 }
522
523 #if __ARM_VFP__
524 void
525 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
526 {
527 /* Set default VFP state to RunFast mode:
528 *
529 * - flush-to-zero mode
530 * - default NaN mode
531 * - no enabled exceptions
532 *
533 * On the VFP11, this allows the use of floating point without
534 * trapping to support code, which we do not provide. With
535 * the Cortex-A8, this allows the use of the (much faster) NFP
536 * pipeline for single-precision operations.
537 */
538
539 bzero(vfp_state, sizeof(*vfp_state));
540 vfp_state->fpscr = FPSCR_DEFAULT;
541 }
542 #endif /* __ARM_VFP__ */
543
544
545 /*
546 * Routine: machine_thread_dup
547 *
548 */
549 kern_return_t
550 machine_thread_dup(
551 thread_t self,
552 thread_t target,
553 __unused boolean_t is_corpse)
554 {
555 struct arm_saved_state *self_saved_state;
556 struct arm_saved_state *target_saved_state;
557
558 #if __ARM_VFP__
559 struct arm_vfpsaved_state *self_vfp_state;
560 struct arm_vfpsaved_state *target_vfp_state;
561 #endif
562
563 target->machine.cthread_self = self->machine.cthread_self;
564 target->machine.cthread_data = self->machine.cthread_data;
565
566 self_saved_state = &self->machine.PcbData;
567 target_saved_state = &target->machine.PcbData;
568 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
569
570 #if __ARM_VFP__
571 self_vfp_state = &self->machine.uVFPdata;
572 target_vfp_state = &target->machine.uVFPdata;
573 bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
574 #endif
575
576 return (KERN_SUCCESS);
577 }
578
579 /*
580 * Routine: get_user_regs
581 *
582 */
583 struct arm_saved_state *
584 get_user_regs(
585 thread_t thread)
586 {
587 return (&thread->machine.PcbData);
588 }
589
590 /*
591 * Routine: find_user_regs
592 *
593 */
594 struct arm_saved_state *
595 find_user_regs(
596 thread_t thread)
597 {
598 return get_user_regs(thread);
599 }
600
601 /*
602 * Routine: find_kern_regs
603 *
604 */
605 struct arm_saved_state *
606 find_kern_regs(
607 thread_t thread)
608 {
609 /*
610 * This works only for an interrupted kernel thread
611 */
612 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL)
613 return ((struct arm_saved_state *) NULL);
614 else
615 return (getCpuDatap()->cpu_int_state);
616
617 }
618
619 #if __ARM_VFP__
620 /*
621 * Find the user state floating point context. If there is no user state context,
622 * we just return a 0.
623 */
624
625 struct arm_vfpsaved_state *
626 find_user_vfp(
627 thread_t thread)
628 {
629 return &thread->machine.uVFPdata;
630 }
631 #endif /* __ARM_VFP__ */
632
633 arm_debug_state_t *
634 find_debug_state(
635 thread_t thread)
636 {
637 return thread->machine.DebugData;
638 }
639
640 /*
641 * Routine: thread_userstack
642 *
643 */
644 kern_return_t
645 thread_userstack(
646 __unused thread_t thread,
647 int flavor,
648 thread_state_t tstate,
649 unsigned int count,
650 mach_vm_offset_t * user_stack,
651 int *customstack,
652 __unused boolean_t is64bit
653 )
654 {
655
656 switch (flavor) {
657 case ARM_THREAD_STATE:
658 {
659 struct arm_thread_state *state;
660
661
662 if (count < ARM_THREAD_STATE_COUNT)
663 return (KERN_INVALID_ARGUMENT);
664
665 if (customstack)
666 *customstack = 0;
667 state = (struct arm_thread_state *) tstate;
668
669 if (state->sp) {
670 *user_stack = CAST_USER_ADDR_T(state->sp);
671 if (customstack)
672 *customstack = 1;
673 } else {
674 *user_stack = CAST_USER_ADDR_T(USRSTACK);
675 }
676 }
677 break;
678
679 default:
680 return (KERN_INVALID_ARGUMENT);
681 }
682
683 return (KERN_SUCCESS);
684 }
685
686 /*
687 * thread_userstackdefault:
688 *
689 * Return the default stack location for the
690 * thread, if otherwise unknown.
691 */
692 kern_return_t
693 thread_userstackdefault(
694 mach_vm_offset_t *default_user_stack,
695 boolean_t is64bit __unused)
696 {
697 *default_user_stack = USRSTACK;
698
699 return (KERN_SUCCESS);
700 }
701
702 /*
703 * Routine: thread_setuserstack
704 *
705 */
706 void
707 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
708 {
709 struct arm_saved_state *sv;
710
711 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
712 * k: " x) */
713
714 sv = get_user_regs(thread);
715
716 sv->sp = user_stack;
717
718 thread_setuserstack_kprintf("stack %x\n", sv->sp);
719
720 return;
721 }
722
723 /*
724 * Routine: thread_adjuserstack
725 *
726 */
727 uint64_t
728 thread_adjuserstack(thread_t thread, int adjust)
729 {
730 struct arm_saved_state *sv;
731
732 sv = get_user_regs(thread);
733
734 sv->sp += adjust;
735
736 return sv->sp;
737 }
738
739 /*
740 * Routine: thread_setentrypoint
741 *
742 */
743 void
744 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
745 {
746 struct arm_saved_state *sv;
747
748 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
749 * nt: " x) */
750
751 sv = get_user_regs(thread);
752
753 sv->pc = entry;
754
755 thread_setentrypoint_kprintf("entry %x\n", sv->pc);
756
757 return;
758 }
759
760 /*
761 * Routine: thread_entrypoint
762 *
763 */
764 kern_return_t
765 thread_entrypoint(
766 __unused thread_t thread,
767 int flavor,
768 thread_state_t tstate,
769 __unused unsigned int count,
770 mach_vm_offset_t * entry_point
771 )
772 {
773 switch (flavor) {
774 case ARM_THREAD_STATE:
775 {
776 struct arm_thread_state *state;
777
778 state = (struct arm_thread_state *) tstate;
779
780 /*
781 * If a valid entry point is specified, use it.
782 */
783 if (state->pc) {
784 *entry_point = CAST_USER_ADDR_T(state->pc);
785 } else {
786 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
787 }
788 }
789 break;
790
791 default:
792 return (KERN_INVALID_ARGUMENT);
793 }
794
795 return (KERN_SUCCESS);
796 }
797
798
799 /*
800 * Routine: thread_set_child
801 *
802 */
803 void
804 thread_set_child(
805 thread_t child,
806 int pid)
807 {
808 struct arm_saved_state *child_state;
809
810 child_state = get_user_regs(child);
811
812 child_state->r[0] = (uint_t) pid;
813 child_state->r[1] = 1ULL;
814 }
815
816
817 /*
818 * Routine: thread_set_parent
819 *
820 */
821 void
822 thread_set_parent(
823 thread_t parent,
824 int pid)
825 {
826 struct arm_saved_state *parent_state;
827
828 parent_state = get_user_regs(parent);
829
830 parent_state->r[0] = pid;
831 parent_state->r[1] = 0;
832 }
833
834
835 struct arm_act_context {
836 struct arm_saved_state ss;
837 #if __ARM_VFP__
838 struct arm_vfpsaved_state vfps;
839 #endif
840 };
841
842 /*
843 * Routine: act_thread_csave
844 *
845 */
846 void *
847 act_thread_csave(void)
848 {
849 struct arm_act_context *ic;
850 kern_return_t kret;
851 unsigned int val;
852
853 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
854
855 if (ic == (struct arm_act_context *) NULL)
856 return ((void *) 0);
857
858 val = ARM_THREAD_STATE_COUNT;
859 kret = machine_thread_get_state(current_thread(),
860 ARM_THREAD_STATE,
861 (thread_state_t) & ic->ss,
862 &val);
863 if (kret != KERN_SUCCESS) {
864 kfree(ic, sizeof(struct arm_act_context));
865 return ((void *) 0);
866 }
867 #if __ARM_VFP__
868 val = ARM_VFP_STATE_COUNT;
869 kret = machine_thread_get_state(current_thread(),
870 ARM_VFP_STATE,
871 (thread_state_t) & ic->vfps,
872 &val);
873 if (kret != KERN_SUCCESS) {
874 kfree(ic, sizeof(struct arm_act_context));
875 return ((void *) 0);
876 }
877 #endif
878 return (ic);
879 }
880
881 /*
882 * Routine: act_thread_catt
883 *
884 */
885 void
886 act_thread_catt(void *ctx)
887 {
888 struct arm_act_context *ic;
889 kern_return_t kret;
890
891 ic = (struct arm_act_context *) ctx;
892
893 if (ic == (struct arm_act_context *) NULL)
894 return;
895
896 kret = machine_thread_set_state(current_thread(),
897 ARM_THREAD_STATE,
898 (thread_state_t) & ic->ss,
899 ARM_THREAD_STATE_COUNT);
900 if (kret != KERN_SUCCESS)
901 goto out;
902
903 #if __ARM_VFP__
904 kret = machine_thread_set_state(current_thread(),
905 ARM_VFP_STATE,
906 (thread_state_t) & ic->vfps,
907 ARM_VFP_STATE_COUNT);
908 if (kret != KERN_SUCCESS)
909 goto out;
910 #endif
911 out:
912 kfree(ic, sizeof(struct arm_act_context));
913 }
914
915 /*
916 * Routine: act_thread_catt
917 *
918 */
919 void
920 act_thread_cfree(void *ctx)
921 {
922 kfree(ctx, sizeof(struct arm_act_context));
923 }
924
925 kern_return_t
926 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
927 {
928 arm_thread_state_t *state;
929 struct arm_saved_state *saved_state;
930 thread_t curth = current_thread();
931 spl_t s=0;
932
933 saved_state = &thread->machine.PcbData;
934 state = (arm_thread_state_t *)tstate;
935
936 if (curth != thread) {
937 s = splsched();
938 thread_lock(thread);
939 }
940
941 /*
942 * do not zero saved_state, it can be concurrently accessed
943 * and zero is not a valid state for some of the registers,
944 * like sp.
945 */
946 thread_state32_to_saved_state(state, saved_state);
947 saved_state->cpsr = PSR_USERDFLT;
948
949 if (curth != thread) {
950 thread_unlock(thread);
951 splx(s);
952 }
953
954 return KERN_SUCCESS;
955 }