2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
38 struct arm_vfpv2_state
45 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
47 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
48 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
55 thread_set_child(thread_t child
, int pid
);
58 thread_set_parent(thread_t parent
, int pid
);
61 * Maps state flavor to number of words in the state:
63 /* __private_extern__ */
64 unsigned int _MachineStateCount
[] = {
66 [ARM_THREAD_STATE
] = ARM_THREAD_STATE_COUNT
,
67 [ARM_VFP_STATE
] = ARM_VFP_STATE_COUNT
,
68 [ARM_EXCEPTION_STATE
] = ARM_EXCEPTION_STATE_COUNT
,
69 [ARM_DEBUG_STATE
] = ARM_DEBUG_STATE_COUNT
,
70 [ARM_PAGEIN_STATE
] = ARM_PAGEIN_STATE_COUNT
,
73 extern zone_t ads_zone
;
76 machine_thread_state_convert_to_user(
77 __unused thread_t thread
,
78 __unused thread_flavor_t flavor
,
79 __unused thread_state_t tstate
,
80 __unused mach_msg_type_number_t
*count
)
82 // No conversion to userspace representation on this platform
87 machine_thread_state_convert_from_user(
88 __unused thread_t thread
,
89 __unused thread_flavor_t flavor
,
90 __unused thread_state_t tstate
,
91 __unused mach_msg_type_number_t count
)
93 // No conversion from userspace representation on this platform
98 machine_thread_siguctx_pointer_convert_to_user(
99 __unused thread_t thread
,
100 __unused user_addr_t
*uctxp
)
102 // No conversion to userspace representation on this platform
107 machine_thread_function_pointers_convert_from_user(
108 __unused thread_t thread
,
109 __unused user_addr_t
*fptrs
,
110 __unused
uint32_t count
)
112 // No conversion from userspace representation on this platform
117 * Routine: machine_thread_get_state
121 machine_thread_get_state(
123 thread_flavor_t flavor
,
124 thread_state_t tstate
,
125 mach_msg_type_number_t
* count
)
128 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
132 case THREAD_STATE_FLAVOR_LIST
:
134 return (KERN_INVALID_ARGUMENT
);
136 tstate
[0] = ARM_THREAD_STATE
;
137 tstate
[1] = ARM_VFP_STATE
;
138 tstate
[2] = ARM_EXCEPTION_STATE
;
139 tstate
[3] = ARM_DEBUG_STATE
;
143 case THREAD_STATE_FLAVOR_LIST_10_15
:
145 return (KERN_INVALID_ARGUMENT
);
147 tstate
[0] = ARM_THREAD_STATE
;
148 tstate
[1] = ARM_VFP_STATE
;
149 tstate
[2] = ARM_EXCEPTION_STATE
;
150 tstate
[3] = ARM_DEBUG_STATE
;
151 tstate
[4] = ARM_PAGEIN_STATE
;
155 case ARM_THREAD_STATE
:{
156 struct arm_thread_state
*state
;
157 struct arm_saved_state
*saved_state
;
158 arm_unified_thread_state_t
*unified_state
;
161 if (*count
< ARM_THREAD_STATE_COUNT
)
162 return (KERN_INVALID_ARGUMENT
);
164 if (*count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
165 unified_state
= (arm_unified_thread_state_t
*) tstate
;
166 state
= &unified_state
->ts_32
;
167 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
168 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
170 state
= (struct arm_thread_state
*) tstate
;
172 saved_state
= &thread
->machine
.PcbData
;
174 state
->sp
= saved_state
->sp
;
175 state
->lr
= saved_state
->lr
;
176 state
->pc
= saved_state
->pc
;
177 state
->cpsr
= saved_state
->cpsr
;
178 for (i
= 0; i
< 13; i
++)
179 state
->r
[i
] = saved_state
->r
[i
];
180 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
181 state
->pc
, state
->r
[0], state
->sp
);
183 if (*count
!= ARM_UNIFIED_THREAD_STATE_COUNT
) {
184 *count
= ARM_THREAD_STATE_COUNT
;
188 case ARM_EXCEPTION_STATE
:{
189 struct arm_exception_state
*state
;
190 struct arm_saved_state
*saved_state
;
192 if (*count
< ARM_EXCEPTION_STATE_COUNT
)
193 return (KERN_INVALID_ARGUMENT
);
195 state
= (struct arm_exception_state
*) tstate
;
196 saved_state
= &thread
->machine
.PcbData
;
198 state
->exception
= saved_state
->exception
;
199 state
->fsr
= saved_state
->fsr
;
200 state
->far
= saved_state
->far
;
202 *count
= ARM_EXCEPTION_STATE_COUNT
;
207 struct arm_vfp_state
*state
;
208 struct arm_vfpsaved_state
*saved_state
;
212 if (*count
< ARM_VFP_STATE_COUNT
) {
213 if (*count
< ARM_VFPV2_STATE_COUNT
)
214 return (KERN_INVALID_ARGUMENT
);
216 *count
= ARM_VFPV2_STATE_COUNT
;
219 if (*count
== ARM_VFPV2_STATE_COUNT
)
224 state
= (struct arm_vfp_state
*) tstate
;
225 saved_state
= find_user_vfp(thread
);
227 state
->fpscr
= saved_state
->fpscr
;
228 for (i
= 0; i
< max
; i
++)
229 state
->r
[i
] = saved_state
->r
[i
];
234 case ARM_DEBUG_STATE
:{
235 arm_debug_state_t
*state
;
236 arm_debug_state_t
*thread_state
;
238 if (*count
< ARM_DEBUG_STATE_COUNT
)
239 return (KERN_INVALID_ARGUMENT
);
241 state
= (arm_debug_state_t
*) tstate
;
242 thread_state
= find_debug_state(thread
);
244 if (thread_state
== NULL
)
245 bzero(state
, sizeof(arm_debug_state_t
));
247 bcopy(thread_state
, state
, sizeof(arm_debug_state_t
));
249 *count
= ARM_DEBUG_STATE_COUNT
;
253 case ARM_PAGEIN_STATE
:{
254 arm_pagein_state_t
*state
;
256 if (*count
< ARM_PAGEIN_STATE_COUNT
) {
257 return (KERN_INVALID_ARGUMENT
);
260 state
= (arm_pagein_state_t
*)tstate
;
261 state
->__pagein_error
= thread
->t_pagein_error
;
263 *count
= ARM_PAGEIN_STATE_COUNT
;
268 return (KERN_INVALID_ARGUMENT
);
270 return (KERN_SUCCESS
);
275 * Routine: machine_thread_get_kern_state
279 machine_thread_get_kern_state(
281 thread_flavor_t flavor
,
282 thread_state_t tstate
,
283 mach_msg_type_number_t
* count
)
286 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
287 * d_get_kern_state: "
291 * This works only for an interrupted kernel thread
293 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
297 case ARM_THREAD_STATE
:{
298 struct arm_thread_state
*state
;
299 struct arm_saved_state
*saved_state
;
301 if (*count
< ARM_THREAD_STATE_COUNT
)
302 return (KERN_INVALID_ARGUMENT
);
304 state
= (struct arm_thread_state
*) tstate
;
305 saved_state
= getCpuDatap()->cpu_int_state
;
307 state
->sp
= saved_state
->sp
;
308 state
->lr
= saved_state
->lr
;
309 state
->pc
= saved_state
->pc
;
310 state
->cpsr
= saved_state
->cpsr
;
311 for (i
= 0; i
< 13; i
++)
312 state
->r
[i
] = saved_state
->r
[i
];
313 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
314 state
->pc
, state
->r
[0], state
->sp
);
315 *count
= ARM_THREAD_STATE_COUNT
;
319 return (KERN_INVALID_ARGUMENT
);
321 return (KERN_SUCCESS
);
324 extern long long arm_debug_get(void);
327 * Routine: machine_thread_set_state
331 machine_thread_set_state(
333 thread_flavor_t flavor
,
334 thread_state_t tstate
,
335 mach_msg_type_number_t count
)
338 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
342 case ARM_THREAD_STATE
:{
343 struct arm_thread_state
*state
;
344 struct arm_saved_state
*saved_state
;
345 arm_unified_thread_state_t
*unified_state
;
348 if (count
< ARM_THREAD_STATE_COUNT
)
349 return (KERN_INVALID_ARGUMENT
);
351 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
352 unified_state
= (arm_unified_thread_state_t
*) tstate
;
353 state
= &unified_state
->ts_32
;
355 state
= (struct arm_thread_state
*) tstate
;
357 saved_state
= &thread
->machine
.PcbData
;
358 old_psr
= saved_state
->cpsr
;
359 memcpy((char *) saved_state
, (char *) state
, sizeof(*state
));
361 * do not allow privileged bits of the PSR to be
364 saved_state
->cpsr
= (saved_state
->cpsr
& ~PSR_USER_MASK
) | (old_psr
& PSR_USER_MASK
);
366 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
367 state
->pc
, state
->r
[0], state
->sp
);
372 struct arm_vfp_state
*state
;
373 struct arm_vfpsaved_state
*saved_state
;
377 if (count
< ARM_VFP_STATE_COUNT
) {
378 if (count
< ARM_VFPV2_STATE_COUNT
)
379 return (KERN_INVALID_ARGUMENT
);
381 count
= ARM_VFPV2_STATE_COUNT
;
384 if (count
== ARM_VFPV2_STATE_COUNT
)
389 state
= (struct arm_vfp_state
*) tstate
;
390 saved_state
= find_user_vfp(thread
);
392 saved_state
->fpscr
= state
->fpscr
;
393 for (i
= 0; i
< max
; i
++)
394 saved_state
->r
[i
] = state
->r
[i
];
399 case ARM_EXCEPTION_STATE
:{
401 if (count
< ARM_EXCEPTION_STATE_COUNT
)
402 return (KERN_INVALID_ARGUMENT
);
406 case ARM_DEBUG_STATE
:{
407 arm_debug_state_t
*state
;
408 arm_debug_state_t
*thread_state
;
409 boolean_t enabled
= FALSE
;
412 if (count
< ARM_DEBUG_STATE_COUNT
)
413 return (KERN_INVALID_ARGUMENT
);
415 state
= (arm_debug_state_t
*) tstate
;
416 thread_state
= find_debug_state(thread
);
418 if (count
< ARM_DEBUG_STATE_COUNT
)
419 return (KERN_INVALID_ARGUMENT
);
421 for (i
= 0; i
< 16; i
++) {
422 /* do not allow context IDs to be set */
423 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
424 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
425 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
426 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
427 return KERN_PROTECTION_FAILURE
;
429 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
430 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
436 if (thread_state
!= NULL
)
438 void *pTmp
= thread
->machine
.DebugData
;
439 thread
->machine
.DebugData
= NULL
;
440 zfree(ads_zone
, pTmp
);
445 if (thread_state
== NULL
)
446 thread_state
= zalloc(ads_zone
);
448 for (i
= 0; i
< 16; i
++) {
449 /* set appropriate priviledge; mask out unknown bits */
450 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
451 | ARM_DBGBCR_MATCH_MASK
452 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
453 | ARM_DBG_CR_ENABLE_MASK
))
454 | ARM_DBGBCR_TYPE_IVA
455 | ARM_DBG_CR_LINKED_UNLINKED
456 | ARM_DBG_CR_SECURITY_STATE_BOTH
457 | ARM_DBG_CR_MODE_CONTROL_USER
;
458 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
459 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
460 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
461 | ARM_DBGWCR_ACCESS_CONTROL_MASK
462 | ARM_DBG_CR_ENABLE_MASK
))
463 | ARM_DBG_CR_LINKED_UNLINKED
464 | ARM_DBG_CR_SECURITY_STATE_BOTH
465 | ARM_DBG_CR_MODE_CONTROL_USER
;
466 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
469 if (thread
->machine
.DebugData
== NULL
)
470 thread
->machine
.DebugData
= thread_state
;
473 if (thread
== current_thread()) {
474 arm_debug_set(thread_state
);
481 return (KERN_INVALID_ARGUMENT
);
483 return (KERN_SUCCESS
);
487 machine_thread_pc(thread_t thread
)
489 struct arm_saved_state
*ss
= get_user_regs(thread
);
490 return (mach_vm_address_t
)get_saved_state_pc(ss
);
494 machine_thread_reset_pc(thread_t thread
, mach_vm_address_t pc
)
496 set_saved_state_pc(get_user_regs(thread
), (register_t
)pc
);
500 * Routine: machine_thread_state_initialize
504 machine_thread_state_initialize(
507 struct arm_saved_state
*savestate
;
509 savestate
= (struct arm_saved_state
*) &thread
->machine
.PcbData
;
510 bzero((char *) savestate
, sizeof(struct arm_saved_state
));
511 savestate
->cpsr
= PSR_USERDFLT
;
514 vfp_state_initialize(&thread
->machine
.uVFPdata
);
515 vfp_state_initialize(&thread
->machine
.kVFPdata
);
518 thread
->machine
.DebugData
= NULL
;
525 vfp_state_initialize(struct arm_vfpsaved_state
*vfp_state
)
527 /* Set default VFP state to RunFast mode:
529 * - flush-to-zero mode
531 * - no enabled exceptions
533 * On the VFP11, this allows the use of floating point without
534 * trapping to support code, which we do not provide. With
535 * the Cortex-A8, this allows the use of the (much faster) NFP
536 * pipeline for single-precision operations.
539 bzero(vfp_state
, sizeof(*vfp_state
));
540 vfp_state
->fpscr
= FPSCR_DEFAULT
;
542 #endif /* __ARM_VFP__ */
546 * Routine: machine_thread_dup
553 __unused boolean_t is_corpse
)
555 struct arm_saved_state
*self_saved_state
;
556 struct arm_saved_state
*target_saved_state
;
559 struct arm_vfpsaved_state
*self_vfp_state
;
560 struct arm_vfpsaved_state
*target_vfp_state
;
563 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
564 target
->machine
.cthread_data
= self
->machine
.cthread_data
;
566 self_saved_state
= &self
->machine
.PcbData
;
567 target_saved_state
= &target
->machine
.PcbData
;
568 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
571 self_vfp_state
= &self
->machine
.uVFPdata
;
572 target_vfp_state
= &target
->machine
.uVFPdata
;
573 bcopy(self_vfp_state
, target_vfp_state
, sizeof(struct arm_vfpsaved_state
));
576 return (KERN_SUCCESS
);
580 * Routine: get_user_regs
583 struct arm_saved_state
*
587 return (&thread
->machine
.PcbData
);
591 * Routine: find_user_regs
594 struct arm_saved_state
*
598 return get_user_regs(thread
);
602 * Routine: find_kern_regs
605 struct arm_saved_state
*
610 * This works only for an interrupted kernel thread
612 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
)
613 return ((struct arm_saved_state
*) NULL
);
615 return (getCpuDatap()->cpu_int_state
);
621 * Find the user state floating point context. If there is no user state context,
622 * we just return a 0.
625 struct arm_vfpsaved_state
*
629 return &thread
->machine
.uVFPdata
;
631 #endif /* __ARM_VFP__ */
637 return thread
->machine
.DebugData
;
641 * Routine: thread_userstack
646 __unused thread_t thread
,
648 thread_state_t tstate
,
650 mach_vm_offset_t
* user_stack
,
652 __unused boolean_t is64bit
657 case ARM_THREAD_STATE
:
659 struct arm_thread_state
*state
;
662 if (count
< ARM_THREAD_STATE_COUNT
)
663 return (KERN_INVALID_ARGUMENT
);
667 state
= (struct arm_thread_state
*) tstate
;
670 *user_stack
= CAST_USER_ADDR_T(state
->sp
);
674 *user_stack
= CAST_USER_ADDR_T(USRSTACK
);
680 return (KERN_INVALID_ARGUMENT
);
683 return (KERN_SUCCESS
);
687 * thread_userstackdefault:
689 * Return the default stack location for the
690 * thread, if otherwise unknown.
693 thread_userstackdefault(
694 mach_vm_offset_t
*default_user_stack
,
695 boolean_t is64bit __unused
)
697 *default_user_stack
= USRSTACK
;
699 return (KERN_SUCCESS
);
703 * Routine: thread_setuserstack
707 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
709 struct arm_saved_state
*sv
;
711 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
714 sv
= get_user_regs(thread
);
718 thread_setuserstack_kprintf("stack %x\n", sv
->sp
);
724 * Routine: thread_adjuserstack
728 thread_adjuserstack(thread_t thread
, int adjust
)
730 struct arm_saved_state
*sv
;
732 sv
= get_user_regs(thread
);
740 * Routine: thread_setentrypoint
744 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
746 struct arm_saved_state
*sv
;
748 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
751 sv
= get_user_regs(thread
);
755 thread_setentrypoint_kprintf("entry %x\n", sv
->pc
);
761 * Routine: thread_entrypoint
766 __unused thread_t thread
,
768 thread_state_t tstate
,
769 __unused
unsigned int count
,
770 mach_vm_offset_t
* entry_point
774 case ARM_THREAD_STATE
:
776 struct arm_thread_state
*state
;
778 state
= (struct arm_thread_state
*) tstate
;
781 * If a valid entry point is specified, use it.
784 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
786 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
792 return (KERN_INVALID_ARGUMENT
);
795 return (KERN_SUCCESS
);
800 * Routine: thread_set_child
808 struct arm_saved_state
*child_state
;
810 child_state
= get_user_regs(child
);
812 child_state
->r
[0] = (uint_t
) pid
;
813 child_state
->r
[1] = 1ULL;
818 * Routine: thread_set_parent
826 struct arm_saved_state
*parent_state
;
828 parent_state
= get_user_regs(parent
);
830 parent_state
->r
[0] = pid
;
831 parent_state
->r
[1] = 0;
835 struct arm_act_context
{
836 struct arm_saved_state ss
;
838 struct arm_vfpsaved_state vfps
;
843 * Routine: act_thread_csave
847 act_thread_csave(void)
849 struct arm_act_context
*ic
;
853 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
855 if (ic
== (struct arm_act_context
*) NULL
)
858 val
= ARM_THREAD_STATE_COUNT
;
859 kret
= machine_thread_get_state(current_thread(),
861 (thread_state_t
) & ic
->ss
,
863 if (kret
!= KERN_SUCCESS
) {
864 kfree(ic
, sizeof(struct arm_act_context
));
868 val
= ARM_VFP_STATE_COUNT
;
869 kret
= machine_thread_get_state(current_thread(),
871 (thread_state_t
) & ic
->vfps
,
873 if (kret
!= KERN_SUCCESS
) {
874 kfree(ic
, sizeof(struct arm_act_context
));
882 * Routine: act_thread_catt
886 act_thread_catt(void *ctx
)
888 struct arm_act_context
*ic
;
891 ic
= (struct arm_act_context
*) ctx
;
893 if (ic
== (struct arm_act_context
*) NULL
)
896 kret
= machine_thread_set_state(current_thread(),
898 (thread_state_t
) & ic
->ss
,
899 ARM_THREAD_STATE_COUNT
);
900 if (kret
!= KERN_SUCCESS
)
904 kret
= machine_thread_set_state(current_thread(),
906 (thread_state_t
) & ic
->vfps
,
907 ARM_VFP_STATE_COUNT
);
908 if (kret
!= KERN_SUCCESS
)
912 kfree(ic
, sizeof(struct arm_act_context
));
916 * Routine: act_thread_catt
920 act_thread_cfree(void *ctx
)
922 kfree(ctx
, sizeof(struct arm_act_context
));
926 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
928 arm_thread_state_t
*state
;
929 struct arm_saved_state
*saved_state
;
930 thread_t curth
= current_thread();
933 saved_state
= &thread
->machine
.PcbData
;
934 state
= (arm_thread_state_t
*)tstate
;
936 if (curth
!= thread
) {
942 * do not zero saved_state, it can be concurrently accessed
943 * and zero is not a valid state for some of the registers,
946 thread_state32_to_saved_state(state
, saved_state
);
947 saved_state
->cpsr
= PSR_USERDFLT
;
949 if (curth
!= thread
) {
950 thread_unlock(thread
);