2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
38 struct arm_vfpv2_state
{
43 typedef struct arm_vfpv2_state arm_vfpv2_state_t
;
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 (sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
53 thread_set_child(thread_t child
, int pid
);
56 thread_set_parent(thread_t parent
, int pid
);
59 * Maps state flavor to number of words in the state:
61 /* __private_extern__ */
62 unsigned int _MachineStateCount
[] = {
64 [ARM_THREAD_STATE
] = ARM_THREAD_STATE_COUNT
,
65 [ARM_VFP_STATE
] = ARM_VFP_STATE_COUNT
,
66 [ARM_EXCEPTION_STATE
] = ARM_EXCEPTION_STATE_COUNT
,
67 [ARM_DEBUG_STATE
] = ARM_DEBUG_STATE_COUNT
,
68 [ARM_PAGEIN_STATE
] = ARM_PAGEIN_STATE_COUNT
,
71 extern zone_t ads_zone
;
74 machine_thread_state_convert_to_user(
75 __unused thread_t thread
,
76 __unused thread_flavor_t flavor
,
77 __unused thread_state_t tstate
,
78 __unused mach_msg_type_number_t
*count
)
80 // No conversion to userspace representation on this platform
85 machine_thread_state_convert_from_user(
86 __unused thread_t thread
,
87 __unused thread_flavor_t flavor
,
88 __unused thread_state_t tstate
,
89 __unused mach_msg_type_number_t count
)
91 // No conversion from userspace representation on this platform
96 machine_thread_siguctx_pointer_convert_to_user(
97 __unused thread_t thread
,
98 __unused user_addr_t
*uctxp
)
100 // No conversion to userspace representation on this platform
105 machine_thread_function_pointers_convert_from_user(
106 __unused thread_t thread
,
107 __unused user_addr_t
*fptrs
,
108 __unused
uint32_t count
)
110 // No conversion from userspace representation on this platform
115 * Routine: machine_thread_get_state
119 machine_thread_get_state(
121 thread_flavor_t flavor
,
122 thread_state_t tstate
,
123 mach_msg_type_number_t
* count
)
125 #define machine_thread_get_state_kprintf(x...) /* kprintf("machine_thread_get
129 case THREAD_STATE_FLAVOR_LIST
:
131 return KERN_INVALID_ARGUMENT
;
134 tstate
[0] = ARM_THREAD_STATE
;
135 tstate
[1] = ARM_VFP_STATE
;
136 tstate
[2] = ARM_EXCEPTION_STATE
;
137 tstate
[3] = ARM_DEBUG_STATE
;
141 case THREAD_STATE_FLAVOR_LIST_10_15
:
143 return KERN_INVALID_ARGUMENT
;
146 tstate
[0] = ARM_THREAD_STATE
;
147 tstate
[1] = ARM_VFP_STATE
;
148 tstate
[2] = ARM_EXCEPTION_STATE
;
149 tstate
[3] = ARM_DEBUG_STATE
;
150 tstate
[4] = ARM_PAGEIN_STATE
;
154 case ARM_THREAD_STATE
:{
155 struct arm_thread_state
*state
;
156 struct arm_saved_state
*saved_state
;
157 arm_unified_thread_state_t
*unified_state
;
160 if (*count
< ARM_THREAD_STATE_COUNT
) {
161 return KERN_INVALID_ARGUMENT
;
164 if (*count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
165 unified_state
= (arm_unified_thread_state_t
*) tstate
;
166 state
= &unified_state
->ts_32
;
167 unified_state
->ash
.flavor
= ARM_THREAD_STATE32
;
168 unified_state
->ash
.count
= ARM_THREAD_STATE32_COUNT
;
170 state
= (struct arm_thread_state
*) tstate
;
172 saved_state
= &thread
->machine
.PcbData
;
174 state
->sp
= saved_state
->sp
;
175 state
->lr
= saved_state
->lr
;
176 state
->pc
= saved_state
->pc
;
177 state
->cpsr
= saved_state
->cpsr
;
178 for (i
= 0; i
< 13; i
++) {
179 state
->r
[i
] = saved_state
->r
[i
];
181 machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
182 state
->pc
, state
->r
[0], state
->sp
);
184 if (*count
!= ARM_UNIFIED_THREAD_STATE_COUNT
) {
185 *count
= ARM_THREAD_STATE_COUNT
;
189 case ARM_EXCEPTION_STATE
:{
190 struct arm_exception_state
*state
;
191 struct arm_saved_state
*saved_state
;
193 if (*count
< ARM_EXCEPTION_STATE_COUNT
) {
194 return KERN_INVALID_ARGUMENT
;
197 state
= (struct arm_exception_state
*) tstate
;
198 saved_state
= &thread
->machine
.PcbData
;
200 state
->exception
= saved_state
->exception
;
201 state
->fsr
= saved_state
->fsr
;
202 state
->far
= saved_state
->far
;
204 *count
= ARM_EXCEPTION_STATE_COUNT
;
209 struct arm_vfp_state
*state
;
210 struct arm_vfpsaved_state
*saved_state
;
214 if (*count
< ARM_VFP_STATE_COUNT
) {
215 if (*count
< ARM_VFPV2_STATE_COUNT
) {
216 return KERN_INVALID_ARGUMENT
;
218 *count
= ARM_VFPV2_STATE_COUNT
;
222 if (*count
== ARM_VFPV2_STATE_COUNT
) {
228 state
= (struct arm_vfp_state
*) tstate
;
229 saved_state
= find_user_vfp(thread
);
231 state
->fpscr
= saved_state
->fpscr
;
232 for (i
= 0; i
< max
; i
++) {
233 state
->r
[i
] = saved_state
->r
[i
];
239 case ARM_DEBUG_STATE
:{
240 arm_debug_state_t
*state
;
241 arm_debug_state_t
*thread_state
;
243 if (*count
< ARM_DEBUG_STATE_COUNT
) {
244 return KERN_INVALID_ARGUMENT
;
247 state
= (arm_debug_state_t
*) tstate
;
248 thread_state
= find_debug_state(thread
);
250 if (thread_state
== NULL
) {
251 bzero(state
, sizeof(arm_debug_state_t
));
253 bcopy(thread_state
, state
, sizeof(arm_debug_state_t
));
256 *count
= ARM_DEBUG_STATE_COUNT
;
260 case ARM_PAGEIN_STATE
:{
261 arm_pagein_state_t
*state
;
263 if (*count
< ARM_PAGEIN_STATE_COUNT
) {
264 return KERN_INVALID_ARGUMENT
;
267 state
= (arm_pagein_state_t
*)tstate
;
268 state
->__pagein_error
= thread
->t_pagein_error
;
270 *count
= ARM_PAGEIN_STATE_COUNT
;
275 return KERN_INVALID_ARGUMENT
;
282 * Routine: machine_thread_get_kern_state
286 machine_thread_get_kern_state(
288 thread_flavor_t flavor
,
289 thread_state_t tstate
,
290 mach_msg_type_number_t
* count
)
292 #define machine_thread_get_kern_state_kprintf(x...) /* kprintf("machine_threa
293 * d_get_kern_state: "
297 * This works only for an interrupted kernel thread
299 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
304 case ARM_THREAD_STATE
:{
305 struct arm_thread_state
*state
;
306 struct arm_saved_state
*saved_state
;
308 if (*count
< ARM_THREAD_STATE_COUNT
) {
309 return KERN_INVALID_ARGUMENT
;
312 state
= (struct arm_thread_state
*) tstate
;
313 saved_state
= getCpuDatap()->cpu_int_state
;
315 state
->sp
= saved_state
->sp
;
316 state
->lr
= saved_state
->lr
;
317 state
->pc
= saved_state
->pc
;
318 state
->cpsr
= saved_state
->cpsr
;
319 for (i
= 0; i
< 13; i
++) {
320 state
->r
[i
] = saved_state
->r
[i
];
322 machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp 0x%x\n",
323 state
->pc
, state
->r
[0], state
->sp
);
324 *count
= ARM_THREAD_STATE_COUNT
;
328 return KERN_INVALID_ARGUMENT
;
333 extern long long arm_debug_get(void);
336 * Routine: machine_thread_set_state
340 machine_thread_set_state(
342 thread_flavor_t flavor
,
343 thread_state_t tstate
,
344 mach_msg_type_number_t count
)
346 #define machine_thread_set_state_kprintf(x...) /* kprintf("machine_thread_set
350 case ARM_THREAD_STATE
:{
351 struct arm_thread_state
*state
;
352 struct arm_saved_state
*saved_state
;
353 arm_unified_thread_state_t
*unified_state
;
356 if (count
< ARM_THREAD_STATE_COUNT
) {
357 return KERN_INVALID_ARGUMENT
;
360 if (count
== ARM_UNIFIED_THREAD_STATE_COUNT
) {
361 unified_state
= (arm_unified_thread_state_t
*) tstate
;
362 state
= &unified_state
->ts_32
;
364 state
= (struct arm_thread_state
*) tstate
;
366 saved_state
= &thread
->machine
.PcbData
;
367 old_psr
= saved_state
->cpsr
;
368 memcpy((char *) saved_state
, (char *) state
, sizeof(*state
));
370 * do not allow privileged bits of the PSR to be
373 saved_state
->cpsr
= (saved_state
->cpsr
& ~PSR_USER_MASK
) | (old_psr
& PSR_USER_MASK
);
375 machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
376 state
->pc
, state
->r
[0], state
->sp
);
381 struct arm_vfp_state
*state
;
382 struct arm_vfpsaved_state
*saved_state
;
386 if (count
< ARM_VFP_STATE_COUNT
) {
387 if (count
< ARM_VFPV2_STATE_COUNT
) {
388 return KERN_INVALID_ARGUMENT
;
390 count
= ARM_VFPV2_STATE_COUNT
;
394 if (count
== ARM_VFPV2_STATE_COUNT
) {
400 state
= (struct arm_vfp_state
*) tstate
;
401 saved_state
= find_user_vfp(thread
);
403 saved_state
->fpscr
= state
->fpscr
;
404 for (i
= 0; i
< max
; i
++) {
405 saved_state
->r
[i
] = state
->r
[i
];
411 case ARM_EXCEPTION_STATE
:{
412 if (count
< ARM_EXCEPTION_STATE_COUNT
) {
413 return KERN_INVALID_ARGUMENT
;
418 case ARM_DEBUG_STATE
:{
419 arm_debug_state_t
*state
;
420 arm_debug_state_t
*thread_state
;
421 boolean_t enabled
= FALSE
;
424 if (count
< ARM_DEBUG_STATE_COUNT
) {
425 return KERN_INVALID_ARGUMENT
;
428 state
= (arm_debug_state_t
*) tstate
;
429 thread_state
= find_debug_state(thread
);
431 if (count
< ARM_DEBUG_STATE_COUNT
) {
432 return KERN_INVALID_ARGUMENT
;
435 for (i
= 0; i
< 16; i
++) {
436 /* do not allow context IDs to be set */
437 if (((state
->bcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
438 || ((state
->bcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)
439 || ((state
->wcr
[i
] & ARM_DBGBCR_TYPE_MASK
) != ARM_DBGBCR_TYPE_IVA
)
440 || ((state
->wcr
[i
] & ARM_DBG_CR_LINKED_MASK
) != ARM_DBG_CR_LINKED_UNLINKED
)) {
441 return KERN_PROTECTION_FAILURE
;
443 if ((((state
->bcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
))
444 || ((state
->wcr
[i
] & ARM_DBG_CR_ENABLE_MASK
) == ARM_DBG_CR_ENABLE_ENABLE
)) {
450 if (thread_state
!= NULL
) {
451 void *pTmp
= thread
->machine
.DebugData
;
452 thread
->machine
.DebugData
= NULL
;
453 zfree(ads_zone
, pTmp
);
456 if (thread_state
== NULL
) {
457 thread_state
= zalloc(ads_zone
);
460 for (i
= 0; i
< 16; i
++) {
461 /* set appropriate priviledge; mask out unknown bits */
462 thread_state
->bcr
[i
] = (state
->bcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
463 | ARM_DBGBCR_MATCH_MASK
464 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
465 | ARM_DBG_CR_ENABLE_MASK
))
466 | ARM_DBGBCR_TYPE_IVA
467 | ARM_DBG_CR_LINKED_UNLINKED
468 | ARM_DBG_CR_SECURITY_STATE_BOTH
469 | ARM_DBG_CR_MODE_CONTROL_USER
;
470 thread_state
->bvr
[i
] = state
->bvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
471 thread_state
->wcr
[i
] = (state
->wcr
[i
] & (ARM_DBG_CR_ADDRESS_MASK_MASK
472 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
473 | ARM_DBGWCR_ACCESS_CONTROL_MASK
474 | ARM_DBG_CR_ENABLE_MASK
))
475 | ARM_DBG_CR_LINKED_UNLINKED
476 | ARM_DBG_CR_SECURITY_STATE_BOTH
477 | ARM_DBG_CR_MODE_CONTROL_USER
;
478 thread_state
->wvr
[i
] = state
->wvr
[i
] & ARM_DBG_VR_ADDRESS_MASK
;
481 if (thread
->machine
.DebugData
== NULL
) {
482 thread
->machine
.DebugData
= thread_state
;
486 if (thread
== current_thread()) {
487 arm_debug_set(thread_state
);
494 return KERN_INVALID_ARGUMENT
;
500 machine_thread_pc(thread_t thread
)
502 struct arm_saved_state
*ss
= get_user_regs(thread
);
503 return (mach_vm_address_t
)get_saved_state_pc(ss
);
507 machine_thread_reset_pc(thread_t thread
, mach_vm_address_t pc
)
509 set_saved_state_pc(get_user_regs(thread
), (register_t
)pc
);
513 * Routine: machine_thread_state_initialize
517 machine_thread_state_initialize(
520 struct arm_saved_state
*savestate
;
522 savestate
= (struct arm_saved_state
*) &thread
->machine
.PcbData
;
523 bzero((char *) savestate
, sizeof(struct arm_saved_state
));
524 savestate
->cpsr
= PSR_USERDFLT
;
527 vfp_state_initialize(&thread
->machine
.PcbData
.VFPdata
);
530 thread
->machine
.DebugData
= NULL
;
537 vfp_state_initialize(struct arm_vfpsaved_state
*vfp_state
)
539 /* Set default VFP state to RunFast mode:
541 * - flush-to-zero mode
543 * - no enabled exceptions
545 * On the VFP11, this allows the use of floating point without
546 * trapping to support code, which we do not provide. With
547 * the Cortex-A8, this allows the use of the (much faster) NFP
548 * pipeline for single-precision operations.
551 bzero(vfp_state
, sizeof(*vfp_state
));
552 vfp_state
->fpscr
= FPSCR_DEFAULT
;
554 #endif /* __ARM_VFP__ */
558 * Routine: machine_thread_dup
565 __unused boolean_t is_corpse
)
567 struct arm_saved_state
*self_saved_state
;
568 struct arm_saved_state
*target_saved_state
;
571 struct arm_vfpsaved_state
*self_vfp_state
;
572 struct arm_vfpsaved_state
*target_vfp_state
;
575 target
->machine
.cthread_self
= self
->machine
.cthread_self
;
577 self_saved_state
= &self
->machine
.PcbData
;
578 target_saved_state
= &target
->machine
.PcbData
;
579 bcopy(self_saved_state
, target_saved_state
, sizeof(struct arm_saved_state
));
582 self_vfp_state
= &self
->machine
.PcbData
.VFPdata
;
583 target_vfp_state
= &target
->machine
.PcbData
.VFPdata
;
584 bcopy(self_vfp_state
, target_vfp_state
, sizeof(struct arm_vfpsaved_state
));
591 * Routine: get_user_regs
594 struct arm_saved_state
*
598 return &thread
->machine
.PcbData
;
602 * Routine: find_user_regs
605 struct arm_saved_state
*
609 return get_user_regs(thread
);
613 * Routine: find_kern_regs
616 struct arm_saved_state
*
621 * This works only for an interrupted kernel thread
623 if (thread
!= current_thread() || getCpuDatap()->cpu_int_state
== NULL
) {
624 return (struct arm_saved_state
*) NULL
;
626 return getCpuDatap()->cpu_int_state
;
632 * Find the user state floating point context. If there is no user state context,
633 * we just return a 0.
636 struct arm_vfpsaved_state
*
640 return &thread
->machine
.PcbData
.VFPdata
;
642 #endif /* __ARM_VFP__ */
648 return thread
->machine
.DebugData
;
652 * Routine: thread_userstack
657 __unused thread_t thread
,
659 thread_state_t tstate
,
661 mach_vm_offset_t
* user_stack
,
663 __unused boolean_t is64bit
667 case ARM_THREAD_STATE
:
669 struct arm_thread_state
*state
;
672 if (count
< ARM_THREAD_STATE_COUNT
) {
673 return KERN_INVALID_ARGUMENT
;
679 state
= (struct arm_thread_state
*) tstate
;
682 *user_stack
= CAST_USER_ADDR_T(state
->sp
);
687 *user_stack
= CAST_USER_ADDR_T(USRSTACK
);
693 return KERN_INVALID_ARGUMENT
;
700 * thread_userstackdefault:
702 * Return the default stack location for the
703 * thread, if otherwise unknown.
706 thread_userstackdefault(
707 mach_vm_offset_t
*default_user_stack
,
708 boolean_t is64bit __unused
)
710 *default_user_stack
= USRSTACK
;
716 * Routine: thread_setuserstack
720 thread_setuserstack(thread_t thread
, mach_vm_address_t user_stack
)
722 struct arm_saved_state
*sv
;
724 #define thread_setuserstack_kprintf(x...) /* kprintf("thread_setuserstac
727 sv
= get_user_regs(thread
);
731 thread_setuserstack_kprintf("stack %x\n", sv
->sp
);
737 * Routine: thread_adjuserstack
741 thread_adjuserstack(thread_t thread
, int adjust
)
743 struct arm_saved_state
*sv
;
745 sv
= get_user_regs(thread
);
753 * Routine: thread_setentrypoint
757 thread_setentrypoint(thread_t thread
, mach_vm_offset_t entry
)
759 struct arm_saved_state
*sv
;
761 #define thread_setentrypoint_kprintf(x...) /* kprintf("thread_setentrypoi
764 sv
= get_user_regs(thread
);
768 thread_setentrypoint_kprintf("entry %x\n", sv
->pc
);
774 * Routine: thread_entrypoint
779 __unused thread_t thread
,
781 thread_state_t tstate
,
782 __unused
unsigned int count
,
783 mach_vm_offset_t
* entry_point
787 case ARM_THREAD_STATE
:
789 struct arm_thread_state
*state
;
791 if (count
!= ARM_THREAD_STATE_COUNT
) {
792 return KERN_INVALID_ARGUMENT
;
795 state
= (struct arm_thread_state
*) tstate
;
798 * If a valid entry point is specified, use it.
801 *entry_point
= CAST_USER_ADDR_T(state
->pc
);
803 *entry_point
= CAST_USER_ADDR_T(VM_MIN_ADDRESS
);
809 return KERN_INVALID_ARGUMENT
;
817 * Routine: thread_set_child
825 struct arm_saved_state
*child_state
;
827 child_state
= get_user_regs(child
);
829 child_state
->r
[0] = (uint_t
) pid
;
830 child_state
->r
[1] = 1ULL;
835 * Routine: thread_set_parent
843 struct arm_saved_state
*parent_state
;
845 parent_state
= get_user_regs(parent
);
847 parent_state
->r
[0] = pid
;
848 parent_state
->r
[1] = 0;
852 struct arm_act_context
{
853 struct arm_saved_state ss
;
855 struct arm_vfpsaved_state vfps
;
860 * Routine: act_thread_csave
864 act_thread_csave(void)
866 struct arm_act_context
*ic
;
870 ic
= (struct arm_act_context
*) kalloc(sizeof(struct arm_act_context
));
872 if (ic
== (struct arm_act_context
*) NULL
) {
876 val
= ARM_THREAD_STATE_COUNT
;
877 kret
= machine_thread_get_state(current_thread(),
879 (thread_state_t
) &ic
->ss
,
881 if (kret
!= KERN_SUCCESS
) {
882 kfree(ic
, sizeof(struct arm_act_context
));
886 val
= ARM_VFP_STATE_COUNT
;
887 kret
= machine_thread_get_state(current_thread(),
889 (thread_state_t
) &ic
->vfps
,
891 if (kret
!= KERN_SUCCESS
) {
892 kfree(ic
, sizeof(struct arm_act_context
));
900 * Routine: act_thread_catt
904 act_thread_catt(void *ctx
)
906 struct arm_act_context
*ic
;
909 ic
= (struct arm_act_context
*) ctx
;
911 if (ic
== (struct arm_act_context
*) NULL
) {
915 kret
= machine_thread_set_state(current_thread(),
917 (thread_state_t
) &ic
->ss
,
918 ARM_THREAD_STATE_COUNT
);
919 if (kret
!= KERN_SUCCESS
) {
924 kret
= machine_thread_set_state(current_thread(),
926 (thread_state_t
) &ic
->vfps
,
927 ARM_VFP_STATE_COUNT
);
928 if (kret
!= KERN_SUCCESS
) {
933 kfree(ic
, sizeof(struct arm_act_context
));
937 * Routine: act_thread_catt
941 act_thread_cfree(void *ctx
)
943 kfree(ctx
, sizeof(struct arm_act_context
));
947 thread_set_wq_state32(thread_t thread
, thread_state_t tstate
)
949 arm_thread_state_t
*state
;
950 struct arm_saved_state
*saved_state
;
951 thread_t curth
= current_thread();
954 saved_state
= &thread
->machine
.PcbData
;
955 state
= (arm_thread_state_t
*)tstate
;
957 if (curth
!= thread
) {
963 * do not zero saved_state, it can be concurrently accessed
964 * and zero is not a valid state for some of the registers,
967 thread_state32_to_saved_state(state
, saved_state
);
968 saved_state
->cpsr
= PSR_USERDFLT
;
970 if (curth
!= thread
) {
971 thread_unlock(thread
);