2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach_debug.h>
31 #include <mach_ldebug.h>
33 #include <mach/kern_return.h>
34 #include <mach/mach_traps.h>
35 #include <mach/thread_status.h>
36 #include <mach/vm_param.h>
38 #include <kern/counters.h>
39 #include <kern/cpu_data.h>
40 #include <kern/mach_param.h>
41 #include <kern/task.h>
42 #include <kern/thread.h>
43 #include <kern/sched_prim.h>
44 #include <kern/misc_protos.h>
45 #include <kern/assert.h>
46 #include <kern/debug.h>
48 #include <kern/syscall_sw.h>
49 #include <ipc/ipc_port.h>
50 #include <vm/vm_kern.h>
53 #include <i386/cpu_number.h>
54 #include <i386/eflags.h>
55 #include <i386/proc_reg.h>
57 #include <i386/user_ldt.h>
59 #include <i386/machdep_call.h>
60 #include <i386/vmparam.h>
61 #include <i386/mp_desc.h>
62 #include <i386/misc_protos.h>
63 #include <i386/thread.h>
64 #include <i386/trap.h>
66 #include <mach/i386/syscall_sw.h>
67 #include <sys/syscall.h>
68 #include <sys/kdebug.h>
69 #include <sys/errno.h>
70 #include <../bsd/sys/sysent.h>
73 extern void mach_kauth_cred_uthread_update(void);
76 void * find_user_regs(thread_t
);
78 unsigned int get_msr_exportmask(void);
80 unsigned int get_msr_nbits(void);
82 unsigned int get_msr_rbits(void);
84 extern void throttle_lowpri_io(int);
89 * Return the user stack pointer from the machine
90 * dependent thread state info.
94 __unused thread_t thread
,
96 thread_state_t tstate
,
97 __unused
unsigned int count
,
98 mach_vm_offset_t
*user_stack
,
106 case x86_THREAD_STATE32
:
108 x86_thread_state32_t
*state25
;
110 state25
= (x86_thread_state32_t
*) tstate
;
113 *user_stack
= state25
->esp
;
117 *user_stack
= VM_USRSTACK32
;
124 case x86_THREAD_STATE64
:
126 x86_thread_state64_t
*state25
;
128 state25
= (x86_thread_state64_t
*) tstate
;
131 *user_stack
= state25
->rsp
;
135 *user_stack
= VM_USRSTACK64
;
143 return (KERN_INVALID_ARGUMENT
);
146 return (KERN_SUCCESS
);
150 * thread_userstackdefault:
152 * Return the default stack location for the
153 * thread, if otherwise unknown.
156 thread_userstackdefault(
158 mach_vm_offset_t
*default_user_stack
)
160 if (thread_is_64bit(thread
)) {
161 *default_user_stack
= VM_USRSTACK64
;
163 *default_user_stack
= VM_USRSTACK32
;
165 return (KERN_SUCCESS
);
170 __unused thread_t thread
,
172 thread_state_t tstate
,
173 __unused
unsigned int count
,
174 mach_vm_offset_t
*entry_point
180 if (*entry_point
== 0)
181 *entry_point
= VM_MIN_ADDRESS
;
184 case x86_THREAD_STATE32
:
186 x86_thread_state32_t
*state25
;
188 state25
= (i386_thread_state_t
*) tstate
;
189 *entry_point
= state25
->eip
? state25
->eip
: VM_MIN_ADDRESS
;
193 case x86_THREAD_STATE64
:
195 x86_thread_state64_t
*state25
;
197 state25
= (x86_thread_state64_t
*) tstate
;
198 *entry_point
= state25
->rip
? state25
->rip
: VM_MIN_ADDRESS64
;
202 return (KERN_SUCCESS
);
206 * FIXME - thread_set_child
209 void thread_set_child(thread_t child
, int pid
);
211 thread_set_child(thread_t child
, int pid
)
213 pal_register_cache_state(child
, DIRTY
);
215 if (thread_is_64bit(child
)) {
216 x86_saved_state64_t
*iss64
;
218 iss64
= USER_REGS64(child
);
222 iss64
->isf
.rflags
&= ~EFL_CF
;
224 x86_saved_state32_t
*iss32
;
226 iss32
= USER_REGS32(child
);
230 iss32
->efl
&= ~EFL_CF
;
237 * System Call handling code
240 extern long fuword(vm_offset_t
);
245 machdep_syscall(x86_saved_state_t
*state
)
247 int args
[machdep_call_count
];
250 const machdep_call_t
*entry
;
251 x86_saved_state32_t
*regs
;
253 assert(is_saved_state32(state
));
254 regs
= saved_state32(state
);
258 kprintf("machdep_syscall(0x%08x) code=%d\n", regs
, trapno
);
261 DEBUG_KPRINT_SYSCALL_MDEP(
262 "machdep_syscall: trapno=%d\n", trapno
);
264 if (trapno
< 0 || trapno
>= machdep_call_count
) {
265 regs
->eax
= (unsigned int)kern_invalid(NULL
);
267 thread_exception_return();
270 entry
= &machdep_call_table
[trapno
];
271 nargs
= entry
->nargs
;
274 if (copyin((user_addr_t
) regs
->uesp
+ sizeof (int),
275 (char *) args
, (nargs
* sizeof (int)))) {
276 regs
->eax
= KERN_INVALID_ADDRESS
;
278 thread_exception_return();
284 regs
->eax
= (*entry
->routine
.args_0
)();
287 regs
->eax
= (*entry
->routine
.args_1
)(args
[0]);
290 regs
->eax
= (*entry
->routine
.args_2
)(args
[0],args
[1]);
293 if (!entry
->bsd_style
)
294 regs
->eax
= (*entry
->routine
.args_3
)(args
[0],args
[1],args
[2]);
299 error
= (*entry
->routine
.args_bsd_3
)(&rval
, args
[0], args
[1], args
[2]);
302 regs
->efl
|= EFL_CF
; /* carry bit */
305 regs
->efl
&= ~EFL_CF
;
310 regs
->eax
= (*entry
->routine
.args_4
)(args
[0], args
[1], args
[2], args
[3]);
314 panic("machdep_syscall: too many args");
316 if (current_thread()->funnel_lock
)
317 (void) thread_funnel_set(current_thread()->funnel_lock
, FALSE
);
319 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs
->eax
);
321 throttle_lowpri_io(TRUE
);
323 thread_exception_return();
329 machdep_syscall64(x86_saved_state_t
*state
)
332 const machdep_call_t
*entry
;
333 x86_saved_state64_t
*regs
;
335 assert(is_saved_state64(state
));
336 regs
= saved_state64(state
);
338 trapno
= (int)(regs
->rax
& SYSCALL_NUMBER_MASK
);
340 DEBUG_KPRINT_SYSCALL_MDEP(
341 "machdep_syscall64: trapno=%d\n", trapno
);
343 if (trapno
< 0 || trapno
>= machdep_call_count
) {
344 regs
->rax
= (unsigned int)kern_invalid(NULL
);
346 thread_exception_return();
349 entry
= &machdep_call_table64
[trapno
];
351 switch (entry
->nargs
) {
353 regs
->rax
= (*entry
->routine
.args_0
)();
356 regs
->rax
= (*entry
->routine
.args64_1
)(regs
->rdi
);
359 panic("machdep_syscall64: too many args");
361 if (current_thread()->funnel_lock
)
362 (void) thread_funnel_set(current_thread()->funnel_lock
, FALSE
);
364 DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs
->rax
);
366 throttle_lowpri_io(TRUE
);
368 thread_exception_return();
372 #endif /* MACH_BSD */
375 typedef kern_return_t (*mach_call_t
)(void *);
377 struct mach_call_args
{
390 mach_call_arg_munger32(uint32_t sp
, int nargs
, int call_number
, struct mach_call_args
*args
);
394 mach_call_arg_munger32(uint32_t sp
, int nargs
, int call_number
, struct mach_call_args
*args
)
396 unsigned int args32
[9];
398 if (copyin((user_addr_t
)(sp
+ sizeof(int)), (char *)args32
, nargs
* sizeof (int)))
399 return KERN_INVALID_ARGUMENT
;
402 case 9: args
->arg9
= args32
[8];
403 case 8: args
->arg8
= args32
[7];
404 case 7: args
->arg7
= args32
[6];
405 case 6: args
->arg6
= args32
[5];
406 case 5: args
->arg5
= args32
[4];
407 case 4: args
->arg4
= args32
[3];
408 case 3: args
->arg3
= args32
[2];
409 case 2: args
->arg2
= args32
[1];
410 case 1: args
->arg1
= args32
[0];
412 if (call_number
== 10) {
413 /* munge the mach_vm_size_t for mach_vm_allocate() */
414 args
->arg3
= (((uint64_t)(args32
[2])) | ((((uint64_t)(args32
[3]))<<32)));
415 args
->arg4
= args32
[4];
416 } else if (call_number
== 12) {
417 /* munge the mach_vm_address_t and mach_vm_size_t for mach_vm_deallocate() */
418 args
->arg2
= (((uint64_t)(args32
[1])) | ((((uint64_t)(args32
[2]))<<32)));
419 args
->arg3
= (((uint64_t)(args32
[3])) | ((((uint64_t)(args32
[4]))<<32)));
420 } else if (call_number
== 14) {
421 /* munge the mach_vm_address_t and mach_vm_size_t for mach_vm_protect() */
422 args
->arg2
= (((uint64_t)(args32
[1])) | ((((uint64_t)(args32
[2]))<<32)));
423 args
->arg3
= (((uint64_t)(args32
[3])) | ((((uint64_t)(args32
[4]))<<32)));
424 args
->arg4
= args32
[5];
425 args
->arg5
= args32
[6];
426 } else if (call_number
== 90) {
427 /* munge_l for mach_wait_until_trap() */
428 args
->arg1
= (((uint64_t)(args32
[0])) | ((((uint64_t)(args32
[1]))<<32)));
429 } else if (call_number
== 93) {
430 /* munge_wl for mk_timer_arm_trap() */
431 args
->arg2
= (((uint64_t)(args32
[1])) | ((((uint64_t)(args32
[2]))<<32)));
438 __private_extern__
void mach_call_munger(x86_saved_state_t
*state
);
440 extern const char *mach_syscall_name_table
[];
443 mach_call_munger(x86_saved_state_t
*state
)
447 mach_call_t mach_call
;
448 kern_return_t retval
;
449 struct mach_call_args args
= { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
450 x86_saved_state32_t
*regs
;
452 assert(is_saved_state32(state
));
453 regs
= saved_state32(state
);
455 call_number
= -(regs
->eax
);
457 DEBUG_KPRINT_SYSCALL_MACH(
458 "mach_call_munger: code=%d(%s)\n",
459 call_number
, mach_syscall_name_table
[call_number
]);
461 kprintf("mach_call_munger(0x%08x) code=%d\n", regs
, call_number
);
464 if (call_number
< 0 || call_number
>= mach_trap_count
) {
465 i386_exception(EXC_SYSCALL
, call_number
, 1);
468 mach_call
= (mach_call_t
)mach_trap_table
[call_number
].mach_trap_function
;
470 if (mach_call
== (mach_call_t
)kern_invalid
) {
471 DEBUG_KPRINT_SYSCALL_MACH(
472 "mach_call_munger: kern_invalid 0x%x\n", regs
->eax
);
473 i386_exception(EXC_SYSCALL
, call_number
, 1);
477 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
479 retval
= mach_call_arg_munger32(regs
->uesp
, argc
, call_number
, &args
);
480 if (retval
!= KERN_SUCCESS
) {
483 DEBUG_KPRINT_SYSCALL_MACH(
484 "mach_call_munger: retval=0x%x\n", retval
);
486 thread_exception_return();
492 mach_kauth_cred_uthread_update();
495 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
496 MACHDBG_CODE(DBG_MACH_EXCP_SC
, (call_number
)) | DBG_FUNC_START
,
497 args
.arg1
, args
.arg2
, args
.arg3
, args
.arg4
, 0);
499 retval
= mach_call(&args
);
501 DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval
);
503 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
504 MACHDBG_CODE(DBG_MACH_EXCP_SC
,(call_number
)) | DBG_FUNC_END
,
509 throttle_lowpri_io(TRUE
);
511 thread_exception_return();
516 __private_extern__
void mach_call_munger64(x86_saved_state_t
*regs
);
519 mach_call_munger64(x86_saved_state_t
*state
)
523 mach_call_t mach_call
;
524 x86_saved_state64_t
*regs
;
526 assert(is_saved_state64(state
));
527 regs
= saved_state64(state
);
529 call_number
= (int)(regs
->rax
& SYSCALL_NUMBER_MASK
);
531 DEBUG_KPRINT_SYSCALL_MACH(
532 "mach_call_munger64: code=%d(%s)\n",
533 call_number
, mach_syscall_name_table
[call_number
]);
535 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
536 MACHDBG_CODE(DBG_MACH_EXCP_SC
,(call_number
)) | DBG_FUNC_START
,
537 regs
->rdi
, regs
->rsi
, regs
->rdx
, regs
->r10
, 0);
539 if (call_number
< 0 || call_number
>= mach_trap_count
) {
540 i386_exception(EXC_SYSCALL
, regs
->rax
, 1);
543 mach_call
= (mach_call_t
)mach_trap_table
[call_number
].mach_trap_function
;
545 if (mach_call
== (mach_call_t
)kern_invalid
) {
546 i386_exception(EXC_SYSCALL
, regs
->rax
, 1);
549 argc
= mach_trap_table
[call_number
].mach_trap_arg_count
;
554 copyin_count
= (argc
- 6) * (int)sizeof(uint64_t);
556 if (copyin((user_addr_t
)(regs
->isf
.rsp
+ sizeof(user_addr_t
)), (char *)®s
->v_arg6
, copyin_count
)) {
557 regs
->rax
= KERN_INVALID_ARGUMENT
;
559 thread_exception_return();
565 mach_kauth_cred_uthread_update();
568 regs
->rax
= (uint64_t)mach_call((void *)(®s
->rdi
));
570 DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs
->rax
);
572 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
573 MACHDBG_CODE(DBG_MACH_EXCP_SC
,(call_number
)) | DBG_FUNC_END
,
574 regs
->rax
, 0, 0, 0, 0);
576 throttle_lowpri_io(TRUE
);
578 thread_exception_return();
584 * thread_setuserstack:
586 * Sets the user stack pointer into the machine
587 * dependent thread state info.
592 mach_vm_address_t user_stack
)
594 pal_register_cache_state(thread
, DIRTY
);
595 if (thread_is_64bit(thread
)) {
596 x86_saved_state64_t
*iss64
;
598 iss64
= USER_REGS64(thread
);
600 iss64
->isf
.rsp
= (uint64_t)user_stack
;
602 x86_saved_state32_t
*iss32
;
604 iss32
= USER_REGS32(thread
);
606 iss32
->uesp
= CAST_DOWN_EXPLICIT(unsigned int, user_stack
);
611 * thread_adjuserstack:
613 * Returns the adjusted user stack pointer from the machine
614 * dependent thread state info. Used for small (<2G) deltas.
621 pal_register_cache_state(thread
, DIRTY
);
622 if (thread_is_64bit(thread
)) {
623 x86_saved_state64_t
*iss64
;
625 iss64
= USER_REGS64(thread
);
627 iss64
->isf
.rsp
+= adjust
;
629 return iss64
->isf
.rsp
;
631 x86_saved_state32_t
*iss32
;
633 iss32
= USER_REGS32(thread
);
635 iss32
->uesp
+= adjust
;
637 return CAST_USER_ADDR_T(iss32
->uesp
);
642 * thread_setentrypoint:
644 * Sets the user PC into the machine
645 * dependent thread state info.
648 thread_setentrypoint(thread_t thread
, mach_vm_address_t entry
)
650 pal_register_cache_state(thread
, DIRTY
);
651 if (thread_is_64bit(thread
)) {
652 x86_saved_state64_t
*iss64
;
654 iss64
= USER_REGS64(thread
);
656 iss64
->isf
.rip
= (uint64_t)entry
;
658 x86_saved_state32_t
*iss32
;
660 iss32
= USER_REGS32(thread
);
662 iss32
->eip
= CAST_DOWN_EXPLICIT(unsigned int, entry
);
668 thread_setsinglestep(thread_t thread
, int on
)
670 pal_register_cache_state(thread
, DIRTY
);
671 if (thread_is_64bit(thread
)) {
672 x86_saved_state64_t
*iss64
;
674 iss64
= USER_REGS64(thread
);
677 iss64
->isf
.rflags
|= EFL_TF
;
679 iss64
->isf
.rflags
&= ~EFL_TF
;
681 x86_saved_state32_t
*iss32
;
683 iss32
= USER_REGS32(thread
);
686 iss32
->efl
|= EFL_TF
;
688 if (iss32
->cs
== SYSENTER_CS
)
689 iss32
->cs
= SYSENTER_TF_CS
;
692 iss32
->efl
&= ~EFL_TF
;
695 return (KERN_SUCCESS
);
700 /* XXX this should be a struct savearea so that CHUD will work better on x86 */
702 find_user_regs(thread_t thread
)
704 pal_register_cache_state(thread
, DIRTY
);
705 return USER_STATE(thread
);
709 get_user_regs(thread_t th
)
711 pal_register_cache_state(th
, DIRTY
);
712 return(USER_STATE(th
));
717 * DTrace would like to have a peek at the kernel interrupt state, if available.
718 * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
720 x86_saved_state_t
*find_kern_regs(thread_t
);
723 find_kern_regs(thread_t thread
)
725 if (thread
== current_thread() &&
726 NULL
!= current_cpu_datap()->cpu_int_state
&&
727 !(USER_STATE(thread
) == current_cpu_datap()->cpu_int_state
&&
728 current_cpu_datap()->cpu_interrupt_level
== 1)) {
730 return current_cpu_datap()->cpu_int_state
;
736 vm_offset_t
dtrace_get_cpu_int_stack_top(void);
739 dtrace_get_cpu_int_stack_top(void)
741 return current_cpu_datap()->cpu_int_stack_top
;