/*
- * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#ifdef MACH_BSD
-#include <cpus.h>
#include <mach_rt.h>
#include <mach_debug.h>
#include <mach_ldebug.h>
#include <mach/kern_return.h>
+#include <mach/mach_traps.h>
#include <mach/thread_status.h>
#include <mach/vm_param.h>
#include <kern/mach_param.h>
#include <kern/task.h>
#include <kern/thread.h>
-#include <kern/thread_swap.h>
#include <kern/sched_prim.h>
#include <kern/misc_protos.h>
#include <kern/assert.h>
+#include <kern/debug.h>
#include <kern/spl.h>
#include <kern/syscall_sw.h>
#include <ipc/ipc_port.h>
#include <vm/vm_kern.h>
#include <vm/pmap.h>
-#include <i386/thread.h>
+#include <i386/cpu_number.h>
#include <i386/eflags.h>
#include <i386/proc_reg.h>
-#include <i386/seg.h>
#include <i386/tss.h>
#include <i386/user_ldt.h>
#include <i386/fpu.h>
-#include <i386/iopb_entries.h>
#include <i386/machdep_call.h>
-
+#include <i386/vmparam.h>
+#include <i386/mp_desc.h>
+#include <i386/misc_protos.h>
+#include <i386/thread.h>
+#include <i386/trap.h>
+#include <i386/seg.h>
+#include <mach/i386/syscall_sw.h>
#include <sys/syscall.h>
-#include <sys/ktrace.h>
-struct proc;
+#include <sys/kdebug.h>
+#include <sys/errno.h>
+#include <../bsd/sys/sysent.h>
-kern_return_t
-thread_userstack(
- thread_t,
- int,
- thread_state_t,
- unsigned int,
- vm_offset_t *,
- int *
-);
-
-kern_return_t
-thread_entrypoint(
- thread_t,
- int,
- thread_state_t,
- unsigned int,
- vm_offset_t *
-);
+#ifdef MACH_BSD
+extern void mach_kauth_cred_uthread_update(void);
+extern void throttle_lowpri_io(int);
+#endif
-struct i386_saved_state *
-get_user_regs(
- thread_act_t);
+void * find_user_regs(thread_t);
unsigned int get_msr_exportmask(void);
unsigned int get_msr_rbits(void);
-kern_return_t
-thread_compose_cthread_desc(unsigned int addr, pcb_t pcb);
-
/*
* thread_userstack:
*
*/
kern_return_t
thread_userstack(
- thread_t thread,
+ __unused thread_t thread,
int flavor,
thread_state_t tstate,
- unsigned int count,
- vm_offset_t *user_stack,
+ __unused unsigned int count,
+ mach_vm_offset_t *user_stack,
int *customstack
)
{
- struct i386_saved_state *state;
- i386_thread_state_t *state25;
- vm_offset_t uesp;
-
- if (customstack)
- *customstack = 0;
-
- switch (flavor) {
- case i386_THREAD_STATE: /* FIXME */
- state25 = (i386_thread_state_t *) tstate;
- if (state25->esp)
- *user_stack = state25->esp;
- if (customstack && state25->esp)
- *customstack = 1;
- else
- *customstack = 0;
- break;
+ if (customstack)
+ *customstack = 0;
+
+ switch (flavor) {
+ case x86_THREAD_STATE32:
+ {
+ x86_thread_state32_t *state25;
+
+ state25 = (x86_thread_state32_t *) tstate;
+
+ if (state25->esp) {
+ *user_stack = state25->esp;
+ if (customstack)
+ *customstack = 1;
+ } else {
+ *user_stack = VM_USRSTACK32;
+ if (customstack)
+ *customstack = 0;
+ }
+ break;
+ }
+
+ case x86_THREAD_STATE64:
+ {
+ x86_thread_state64_t *state25;
+
+ state25 = (x86_thread_state64_t *) tstate;
+
+ if (state25->rsp) {
+ *user_stack = state25->rsp;
+ if (customstack)
+ *customstack = 1;
+ } else {
+ *user_stack = VM_USRSTACK64;
+ if (customstack)
+ *customstack = 0;
+ }
+ break;
+ }
+
+ default:
+ return (KERN_INVALID_ARGUMENT);
+ }
- case i386_NEW_THREAD_STATE:
- if (count < i386_NEW_THREAD_STATE_COUNT)
- return (KERN_INVALID_ARGUMENT);
- else {
- state = (struct i386_saved_state *) tstate;
- uesp = state->uesp;
- }
-
- /* If a valid user stack is specified, use it. */
- if (uesp)
- *user_stack = uesp;
- if (customstack && uesp)
- *customstack = 1;
- else
- *customstack = 0;
- break;
- default :
- return (KERN_INVALID_ARGUMENT);
- }
-
- return (KERN_SUCCESS);
-}
+ return (KERN_SUCCESS);
+}
+
+/*
+ * thread_userstackdefault:
+ *
+ * Return the default stack location for the
+ * thread, if otherwise unknown.
+ */
+kern_return_t
+thread_userstackdefault(
+ thread_t thread,
+ mach_vm_offset_t *default_user_stack)
+{
+ if (thread_is_64bit(thread)) {
+ *default_user_stack = VM_USRSTACK64;
+ } else {
+ *default_user_stack = VM_USRSTACK32;
+ }
+ return (KERN_SUCCESS);
+}
kern_return_t
thread_entrypoint(
- thread_t thread,
+ __unused thread_t thread,
int flavor,
thread_state_t tstate,
- unsigned int count,
- vm_offset_t *entry_point
+ __unused unsigned int count,
+ mach_vm_offset_t *entry_point
)
{
- struct i386_saved_state *state;
- i386_thread_state_t *state25;
-
- /*
- * Set a default.
- */
- if (*entry_point == 0)
- *entry_point = VM_MIN_ADDRESS;
-
- switch (flavor) {
- case i386_THREAD_STATE:
- state25 = (i386_thread_state_t *) tstate;
- *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
- break;
-
- case i386_NEW_THREAD_STATE:
- if (count < i386_THREAD_STATE_COUNT)
- return (KERN_INVALID_ARGUMENT);
- else {
- state = (struct i386_saved_state *) tstate;
-
- /*
- * If a valid entry point is specified, use it.
- */
- *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
+ /*
+ * Set a default.
+ */
+ if (*entry_point == 0)
+ *entry_point = VM_MIN_ADDRESS;
+
+ switch (flavor) {
+ case x86_THREAD_STATE32:
+ {
+ x86_thread_state32_t *state25;
+
+ state25 = (i386_thread_state_t *) tstate;
+ *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
+ break;
+ }
+
+ case x86_THREAD_STATE64:
+ {
+ x86_thread_state64_t *state25;
+
+ state25 = (x86_thread_state64_t *) tstate;
+ *entry_point = state25->rip ? state25->rip: VM_MIN_ADDRESS64;
+ break;
+ }
}
- break;
- }
+ return (KERN_SUCCESS);
+}
- return (KERN_SUCCESS);
-}
+/*
+ * FIXME - thread_set_child
+ */
-struct i386_saved_state *
-get_user_regs(thread_act_t th)
+void thread_set_child(thread_t child, int pid);
+void
+thread_set_child(thread_t child, int pid)
{
- if (th->mact.pcb)
- return(USER_REGS(th));
- else {
- printf("[get_user_regs: thread does not have pcb]");
- return NULL;
+ pal_register_cache_state(child, DIRTY);
+
+ if (thread_is_64bit(child)) {
+ x86_saved_state64_t *iss64;
+
+ iss64 = USER_REGS64(child);
+
+ iss64->rax = pid;
+ iss64->rdx = 1;
+ iss64->isf.rflags &= ~EFL_CF;
+ } else {
+ x86_saved_state32_t *iss32;
+
+ iss32 = USER_REGS32(child);
+
+ iss32->eax = pid;
+ iss32->edx = 1;
+ iss32->efl &= ~EFL_CF;
}
}
+
+
/*
- * Duplicate parent state in child
- * for U**X fork.
+ * System Call handling code
*/
-kern_return_t
-machine_thread_dup(
- thread_act_t parent,
- thread_act_t child
-)
+
+extern long fuword(vm_offset_t);
+
+
+
+void
+machdep_syscall(x86_saved_state_t *state)
{
- struct i386_saved_state *parent_state, *child_state;
- struct i386_machine_state *ims;
- struct i386_float_state floatregs;
-
-#ifdef XXX
- /* Save the FPU state */
- if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) {
- fp_state_save(parent);
- }
+ int args[machdep_call_count];
+ int trapno;
+ int nargs;
+ const machdep_call_t *entry;
+ x86_saved_state32_t *regs;
+
+ assert(is_saved_state32(state));
+ regs = saved_state32(state);
+
+ trapno = regs->eax;
+#if DEBUG_TRACE
+ kprintf("machdep_syscall(0x%08x) code=%d\n", regs, trapno);
#endif
- if (child->mact.pcb == NULL || parent->mact.pcb == NULL)
- return (KERN_FAILURE);
+ DEBUG_KPRINT_SYSCALL_MDEP(
+ "machdep_syscall: trapno=%d\n", trapno);
- /* Copy over the i386_saved_state registers */
- child->mact.pcb->iss = parent->mact.pcb->iss;
+ if (trapno < 0 || trapno >= machdep_call_count) {
+ regs->eax = (unsigned int)kern_invalid(NULL);
- /* Check to see if parent is using floating point
- * and if so, copy the registers to the child
- * FIXME - make sure this works.
- */
+ thread_exception_return();
+ /* NOTREACHED */
+ }
+ entry = &machdep_call_table[trapno];
+ nargs = entry->nargs;
- if (parent->mact.pcb->ims.ifps) {
- if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
- fpu_set_state(child, &floatregs);
+ if (nargs != 0) {
+ if (copyin((user_addr_t) regs->uesp + sizeof (int),
+ (char *) args, (nargs * sizeof (int)))) {
+ regs->eax = KERN_INVALID_ADDRESS;
+
+ thread_exception_return();
+ /* NOTREACHED */
+ }
}
-
- /* FIXME - should a user specified LDT, TSS and V86 info
- * be duplicated as well?? - probably not.
- */
+ switch (nargs) {
+ case 0:
+ regs->eax = (*entry->routine.args_0)();
+ break;
+ case 1:
+ regs->eax = (*entry->routine.args_1)(args[0]);
+ break;
+ case 2:
+ regs->eax = (*entry->routine.args_2)(args[0],args[1]);
+ break;
+ case 3:
+ if (!entry->bsd_style)
+ regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]);
+ else {
+ int error;
+ uint32_t rval;
+
+ error = (*entry->routine.args_bsd_3)(&rval, args[0], args[1], args[2]);
+ if (error) {
+ regs->eax = error;
+ regs->efl |= EFL_CF; /* carry bit */
+ } else {
+ regs->eax = rval;
+ regs->efl &= ~EFL_CF;
+ }
+ }
+ break;
+ case 4:
+ regs->eax = (*entry->routine.args_4)(args[0], args[1], args[2], args[3]);
+ break;
- return (KERN_SUCCESS);
-}
+ default:
+ panic("machdep_syscall: too many args");
+ }
+ if (current_thread()->funnel_lock)
+ (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
-/*
- * FIXME - thread_set_child
- */
+ DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%u\n", regs->eax);
-void thread_set_child(thread_act_t child, int pid);
-void
-thread_set_child(thread_act_t child, int pid)
-{
- child->mact.pcb->iss.eax = pid;
- child->mact.pcb->iss.edx = 1;
- child->mact.pcb->iss.efl &= ~EFL_CF;
+ throttle_lowpri_io(1);
+
+ thread_exception_return();
+ /* NOTREACHED */
}
-void thread_set_parent(thread_act_t parent, int pid);
+
+
void
-thread_set_parent(thread_act_t parent, int pid)
+machdep_syscall64(x86_saved_state_t *state)
{
- parent->mact.pcb->iss.eax = pid;
- parent->mact.pcb->iss.edx = 0;
- parent->mact.pcb->iss.efl &= ~EFL_CF;
-}
+ int trapno;
+ const machdep_call_t *entry;
+ x86_saved_state64_t *regs;
+ assert(is_saved_state64(state));
+ regs = saved_state64(state);
+
+ trapno = (int)(regs->rax & SYSCALL_NUMBER_MASK);
+ DEBUG_KPRINT_SYSCALL_MDEP(
+ "machdep_syscall64: trapno=%d\n", trapno);
-/*
- * Move pages from one kernel virtual address to another.
- * Both addresses are assumed to reside in the Sysmap,
- * and size must be a multiple of the page size.
- */
-void
-pagemove(
- register caddr_t from,
- register caddr_t to,
- int size)
-{
- pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size);
+ if (trapno < 0 || trapno >= machdep_call_count) {
+ regs->rax = (unsigned int)kern_invalid(NULL);
+
+ thread_exception_return();
+ /* NOTREACHED */
+ }
+ entry = &machdep_call_table64[trapno];
+
+ switch (entry->nargs) {
+ case 0:
+ regs->rax = (*entry->routine.args_0)();
+ break;
+ case 1:
+ regs->rax = (*entry->routine.args64_1)(regs->rdi);
+ break;
+ default:
+ panic("machdep_syscall64: too many args");
+ }
+ if (current_thread()->funnel_lock)
+ (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+
+ DEBUG_KPRINT_SYSCALL_MDEP("machdep_syscall: retval=%llu\n", regs->rax);
+
+ throttle_lowpri_io(1);
+
+ thread_exception_return();
+ /* NOTREACHED */
}
-/*
- * System Call handling code
- */
+#endif /* MACH_BSD */
+
-#define ERESTART -1 /* restart syscall */
-#define EJUSTRETURN -2 /* don't modify regs, just return */
+typedef kern_return_t (*mach_call_t)(void *);
-struct sysent { /* system call table */
- unsigned short sy_narg; /* number of args */
- char sy_parallel; /* can execute in parallel */
- char sy_funnel; /* funnel type */
- unsigned long (*sy_call)(void *, void *, int *); /* implementing function */
+struct mach_call_args {
+ syscall_arg_t arg1;
+ syscall_arg_t arg2;
+ syscall_arg_t arg3;
+ syscall_arg_t arg4;
+ syscall_arg_t arg5;
+ syscall_arg_t arg6;
+ syscall_arg_t arg7;
+ syscall_arg_t arg8;
+ syscall_arg_t arg9;
};
-#define NO_FUNNEL 0
-#define KERNEL_FUNNEL 1
-#define NETWORK_FUNNEL 2
+static kern_return_t
+mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp);
-extern funnel_t * kernel_flock;
-extern funnel_t * network_flock;
-extern struct sysent sysent[];
+static kern_return_t
+mach_call_arg_munger32(uint32_t sp, struct mach_call_args *args, const mach_trap_t *trapp)
+{
+ if (copyin((user_addr_t)(sp + sizeof(int)), (char *)args, trapp->mach_trap_u32_words * sizeof (int)))
+ return KERN_INVALID_ARGUMENT;
+ trapp->mach_trap_arg_munge32(NULL, args);
+ return KERN_SUCCESS;
+}
-int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
-void * get_bsduthreadarg(thread_act_t);
+__private_extern__ void mach_call_munger(x86_saved_state_t *state);
-void unix_syscall(struct i386_saved_state *);
+extern const char *mach_syscall_name_table[];
void
-unix_syscall_return(int error)
+mach_call_munger(x86_saved_state_t *state)
{
- thread_act_t thread;
- volatile int *rval;
- struct i386_saved_state *regs;
- struct proc *p;
- struct proc *current_proc();
- unsigned short code;
- vm_offset_t params;
- struct sysent *callp;
- extern int nsysent;
-
- thread = current_act();
- rval = (int *)get_bsduthreadrval(thread);
- p = current_proc();
-
- regs = USER_REGS(thread);
-
- /* reconstruct code for tracing before blasting eax */
- code = regs->eax;
- params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
- callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
- if (callp == sysent) {
- code = fuword(params);
+ int argc;
+ int call_number;
+ mach_call_t mach_call;
+ kern_return_t retval;
+ struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ x86_saved_state32_t *regs;
+
+ assert(is_saved_state32(state));
+ regs = saved_state32(state);
+
+ call_number = -(regs->eax);
+
+ DEBUG_KPRINT_SYSCALL_MACH(
+ "mach_call_munger: code=%d(%s)\n",
+ call_number, mach_syscall_name_table[call_number]);
+#if DEBUG_TRACE
+ kprintf("mach_call_munger(0x%08x) code=%d\n", regs, call_number);
+#endif
+
+ if (call_number < 0 || call_number >= mach_trap_count) {
+ i386_exception(EXC_SYSCALL, call_number, 1);
+ /* NOTREACHED */
}
+ mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
- if (error == ERESTART) {
- regs->eip -= 7;
+ if (mach_call == (mach_call_t)kern_invalid) {
+ DEBUG_KPRINT_SYSCALL_MACH(
+ "mach_call_munger: kern_invalid 0x%x\n", regs->eax);
+ i386_exception(EXC_SYSCALL, call_number, 1);
+ /* NOTREACHED */
}
- else if (error != EJUSTRETURN) {
- if (error) {
- regs->eax = error;
- regs->efl |= EFL_CF; /* carry bit */
- } else { /* (not error) */
- regs->eax = rval[0];
- regs->edx = rval[1];
- regs->efl &= ~EFL_CF;
- }
+
+ argc = mach_trap_table[call_number].mach_trap_arg_count;
+ if (argc) {
+ retval = mach_call_arg_munger32(regs->uesp, &args, &mach_trap_table[call_number]);
+ if (retval != KERN_SUCCESS) {
+ regs->eax = retval;
+
+ DEBUG_KPRINT_SYSCALL_MACH(
+ "mach_call_munger: retval=0x%x\n", retval);
+
+ thread_exception_return();
+ /* NOTREACHED */
+ }
}
- ktrsysret(p, code, error, rval[0], callp->sy_funnel);
+#ifdef MACH_BSD
+ mach_kauth_cred_uthread_update();
+#endif
+
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START,
+ args.arg1, args.arg2, args.arg3, args.arg4, 0);
+
+ retval = mach_call(&args);
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
- error, rval[0], rval[1], 0, 0);
+ DEBUG_KPRINT_SYSCALL_MACH("mach_call_munger: retval=0x%x\n", retval);
- if (callp->sy_funnel != NO_FUNNEL)
- (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
+ retval, 0, 0, 0, 0);
- thread_exception_return();
- /* NOTREACHED */
+ regs->eax = retval;
+
+ throttle_lowpri_io(1);
+
+ thread_exception_return();
+ /* NOTREACHED */
}
+__private_extern__ void mach_call_munger64(x86_saved_state_t *regs);
+
void
-unix_syscall(struct i386_saved_state *regs)
+mach_call_munger64(x86_saved_state_t *state)
{
- thread_act_t thread;
- void *vt;
- unsigned short code;
- struct sysent *callp;
- int nargs, error;
- volatile int *rval;
- int funnel_type;
- vm_offset_t params;
- extern int nsysent;
- struct proc *p;
- struct proc *current_proc();
-
- thread = current_act();
- p = current_proc();
- rval = (int *)get_bsduthreadrval(thread);
-
- //printf("[scall : eax %x]", regs->eax);
- code = regs->eax;
- params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
- callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
- if (callp == sysent) {
- code = fuword(params);
- params += sizeof (int);
- callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
- }
-
- vt = get_bsduthreadarg(thread);
+ int call_number;
+ int argc;
+ mach_call_t mach_call;
+ x86_saved_state64_t *regs;
- if ((nargs = (callp->sy_narg * sizeof (int))) &&
- (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
- regs->eax = error;
- regs->efl |= EFL_CF;
- thread_exception_return();
- /* NOTREACHED */
- }
-
- rval[0] = 0;
- rval[1] = regs->edx;
-
- funnel_type = callp->sy_funnel;
- if(funnel_type == KERNEL_FUNNEL)
- (void) thread_funnel_set(kernel_flock, TRUE);
- else if (funnel_type == NETWORK_FUNNEL)
- (void) thread_funnel_set(network_flock, TRUE);
-
- set_bsduthreadargs(thread, regs, NULL);
+ assert(is_saved_state64(state));
+ regs = saved_state64(state);
- if (callp->sy_narg > 8)
- panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
+ call_number = (int)(regs->rax & SYSCALL_NUMBER_MASK);
- ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
+ DEBUG_KPRINT_SYSCALL_MACH(
+ "mach_call_munger64: code=%d(%s)\n",
+ call_number, mach_syscall_name_table[call_number]);
- {
- int *ip = (int *)vt;
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
- *ip, *(ip+1), *(ip+2), *(ip+3), 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_START,
+ regs->rdi, regs->rsi, regs->rdx, regs->r10, 0);
+
+ if (call_number < 0 || call_number >= mach_trap_count) {
+ i386_exception(EXC_SYSCALL, regs->rax, 1);
+ /* NOTREACHED */
}
+ mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function;
- error = (*(callp->sy_call))(p, (void *) vt, (int *) &rval[0]);
-
-#if 0
- /* May be needed with vfork changes */
- regs = USER_REGS(thread);
-#endif
- if (error == ERESTART) {
- regs->eip -= 7;
+ if (mach_call == (mach_call_t)kern_invalid) {
+ i386_exception(EXC_SYSCALL, regs->rax, 1);
+ /* NOTREACHED */
}
- else if (error != EJUSTRETURN) {
- if (error) {
- regs->eax = error;
- regs->efl |= EFL_CF; /* carry bit */
- } else { /* (not error) */
- regs->eax = rval[0];
- regs->edx = rval[1];
- regs->efl &= ~EFL_CF;
- }
+ argc = mach_trap_table[call_number].mach_trap_arg_count;
+
+ if (argc > 6) {
+ int copyin_count;
+
+ copyin_count = (argc - 6) * (int)sizeof(uint64_t);
+
+ if (copyin((user_addr_t)(regs->isf.rsp + sizeof(user_addr_t)), (char *)®s->v_arg6, copyin_count)) {
+ regs->rax = KERN_INVALID_ARGUMENT;
+
+ thread_exception_return();
+ /* NOTREACHED */
+ }
}
- ktrsysret(p, code, error, rval[0], funnel_type);
+#ifdef MACH_BSD
+ mach_kauth_cred_uthread_update();
+#endif
+
+ regs->rax = (uint64_t)mach_call((void *)(®s->rdi));
+
+ DEBUG_KPRINT_SYSCALL_MACH( "mach_call_munger64: retval=0x%llx\n", regs->rax);
- KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
- error, rval[0], rval[1], 0, 0);
+ KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
+ MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END,
+ regs->rax, 0, 0, 0, 0);
- if(funnel_type != NO_FUNNEL)
- (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ throttle_lowpri_io(1);
- thread_exception_return();
- /* NOTREACHED */
+ thread_exception_return();
+ /* NOTREACHED */
}
+/*
+ * thread_setuserstack:
+ *
+ * Sets the user stack pointer into the machine
+ * dependent thread state info.
+ */
void
-machdep_syscall( struct i386_saved_state *regs)
+thread_setuserstack(
+ thread_t thread,
+ mach_vm_address_t user_stack)
{
- int trapno, nargs;
- machdep_call_t *entry;
- thread_t thread;
- struct proc *p;
- struct proc *current_proc();
-
- trapno = regs->eax;
- if (trapno < 0 || trapno >= machdep_call_count) {
- regs->eax = (unsigned int)kern_invalid();
-
- thread_exception_return();
- /* NOTREACHED */
- }
-
- entry = &machdep_call_table[trapno];
- nargs = entry->nargs;
+ pal_register_cache_state(thread, DIRTY);
+ if (thread_is_64bit(thread)) {
+ x86_saved_state64_t *iss64;
- if (nargs > 0) {
- int args[nargs];
+ iss64 = USER_REGS64(thread);
- if (copyin((char *) regs->uesp + sizeof (int),
- (char *) args,
- nargs * sizeof (int))) {
+ iss64->isf.rsp = (uint64_t)user_stack;
+ } else {
+ x86_saved_state32_t *iss32;
- regs->eax = KERN_INVALID_ADDRESS;
+ iss32 = USER_REGS32(thread);
- thread_exception_return();
- /* NOTREACHED */
+ iss32->uesp = CAST_DOWN_EXPLICIT(unsigned int, user_stack);
}
+}
- switch (nargs) {
- case 1:
- regs->eax = (*entry->routine)(args[0]);
- break;
- case 2:
- regs->eax = (*entry->routine)(args[0],args[1]);
- break;
- case 3:
- regs->eax = (*entry->routine)(args[0],args[1],args[2]);
- break;
- case 4:
- regs->eax = (*entry->routine)(args[0],args[1],args[2],args[3]);
- break;
- default:
- panic("machdep_syscall(): too many args");
- }
- }
- else
- regs->eax = (*entry->routine)();
+/*
+ * thread_adjuserstack:
+ *
+ * Returns the adjusted user stack pointer from the machine
+ * dependent thread state info. Used for small (<2G) deltas.
+ */
+uint64_t
+thread_adjuserstack(
+ thread_t thread,
+ int adjust)
+{
+ pal_register_cache_state(thread, DIRTY);
+ if (thread_is_64bit(thread)) {
+ x86_saved_state64_t *iss64;
- if (current_thread()->funnel_lock)
- (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ iss64 = USER_REGS64(thread);
- thread_exception_return();
- /* NOTREACHED */
-}
+ iss64->isf.rsp += adjust;
+ return iss64->isf.rsp;
+ } else {
+ x86_saved_state32_t *iss32;
-kern_return_t
-thread_compose_cthread_desc(unsigned int addr, pcb_t pcb)
-{
- struct real_descriptor desc;
- extern struct fake_descriptor *mp_ldt[];
- struct real_descriptor *ldtp;
- int mycpu = cpu_number();
-
- ldtp = (struct real_descriptor *)mp_ldt[mycpu];
- desc.limit_low = 1;
- desc.limit_high = 0;
- desc.base_low = addr & 0xffff;
- desc.base_med = (addr >> 16) & 0xff;
- desc.base_high = (addr >> 24) & 0xff;
- desc.access = ACC_P|ACC_PL_U|ACC_DATA_W;
- desc.granularity = SZ_32|SZ_G;
- pcb->cthread_desc = desc;
- ldtp[sel_idx(USER_CTHREAD)] = desc;
- return(KERN_SUCCESS);
+ iss32 = USER_REGS32(thread);
+
+ iss32->uesp += adjust;
+
+ return CAST_USER_ADDR_T(iss32->uesp);
+ }
}
-kern_return_t
-thread_set_cthread_self(int self)
+/*
+ * thread_setentrypoint:
+ *
+ * Sets the user PC into the machine
+ * dependent thread state info.
+ */
+void
+thread_setentrypoint(thread_t thread, mach_vm_address_t entry)
{
- current_act()->mact.pcb->cthread_self = (unsigned int)self;
-
- return (KERN_SUCCESS);
+ pal_register_cache_state(thread, DIRTY);
+ if (thread_is_64bit(thread)) {
+ x86_saved_state64_t *iss64;
+
+ iss64 = USER_REGS64(thread);
+
+ iss64->isf.rip = (uint64_t)entry;
+ } else {
+ x86_saved_state32_t *iss32;
+
+ iss32 = USER_REGS32(thread);
+
+ iss32->eip = CAST_DOWN_EXPLICIT(unsigned int, entry);
+ }
}
+
kern_return_t
-thread_get_cthread_self(void)
+thread_setsinglestep(thread_t thread, int on)
{
- return ((kern_return_t)current_act()->mact.pcb->cthread_self);
+ pal_register_cache_state(thread, DIRTY);
+ if (thread_is_64bit(thread)) {
+ x86_saved_state64_t *iss64;
+
+ iss64 = USER_REGS64(thread);
+
+ if (on)
+ iss64->isf.rflags |= EFL_TF;
+ else
+ iss64->isf.rflags &= ~EFL_TF;
+ } else {
+ x86_saved_state32_t *iss32;
+
+ iss32 = USER_REGS32(thread);
+
+ if (on) {
+ iss32->efl |= EFL_TF;
+ /* Ensure IRET */
+ if (iss32->cs == SYSENTER_CS)
+ iss32->cs = SYSENTER_TF_CS;
+ }
+ else
+ iss32->efl &= ~EFL_TF;
+ }
+
+ return (KERN_SUCCESS);
}
-kern_return_t
-thread_fast_set_cthread_self(int self)
+
+
+/* XXX this should be a struct savearea so that CHUD will work better on x86 */
+void *
+find_user_regs(thread_t thread)
{
- pcb_t pcb;
- pcb = (pcb_t)current_act()->mact.pcb;
- thread_compose_cthread_desc((unsigned int)self, pcb);
- pcb->cthread_self = (unsigned int)self; /* preserve old func too */
- return (USER_CTHREAD);
+ pal_register_cache_state(thread, DIRTY);
+ return USER_STATE(thread);
}
-void
-mach25_syscall(struct i386_saved_state *regs)
+void *
+get_user_regs(thread_t th)
{
- printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
- regs->eip, regs->eax, -regs->eax);
- panic("FIXME!");
+ pal_register_cache_state(th, DIRTY);
+ return(USER_STATE(th));
}
-#endif /* MACH_BSD */
-
-/* This routine is called from assembly before each and every mach trap.
+#if CONFIG_DTRACE
+/*
+ * DTrace would like to have a peek at the kernel interrupt state, if available.
+ * Based on osfmk/chud/i386/chud_thread_i386.c:chudxnu_thread_get_state(), which see.
*/
+x86_saved_state_t *find_kern_regs(thread_t);
-extern unsigned int mach_call_start(unsigned int, unsigned int *);
-
-__private_extern__
-unsigned int
-mach_call_start(unsigned int call_number, unsigned int *args)
+x86_saved_state_t *
+find_kern_regs(thread_t thread)
{
- int i, argc;
- unsigned int kdarg[3];
-
-/* Always prepare to trace mach system calls */
+ if (thread == current_thread() &&
+ NULL != current_cpu_datap()->cpu_int_state &&
+ !(USER_STATE(thread) == current_cpu_datap()->cpu_int_state &&
+ current_cpu_datap()->cpu_interrupt_level == 1)) {
- kdarg[0]=0;
- kdarg[1]=0;
- kdarg[2]=0;
-
- argc = mach_trap_table[call_number>>4].mach_trap_arg_count;
-
- if (argc > 3)
- argc = 3;
-
- for (i=0; i < argc; i++)
- kdarg[i] = (int)*(args + i);
-
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START,
- kdarg[0], kdarg[1], kdarg[2], 0, 0);
-
- return call_number; /* pass this back thru */
+ return current_cpu_datap()->cpu_int_state;
+ } else {
+ return NULL;
+ }
}
-/* This routine is called from assembly after each mach system call
- */
-
-extern unsigned int mach_call_end(unsigned int, unsigned int);
+vm_offset_t dtrace_get_cpu_int_stack_top(void);
-__private_extern__
-unsigned int
-mach_call_end(unsigned int call_number, unsigned int retval)
+vm_offset_t
+dtrace_get_cpu_int_stack_top(void)
{
- KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END,
- retval, 0, 0, 0, 0);
- return retval; /* pass this back thru */
+ return current_cpu_datap()->cpu_int_stack_top;
}
-
+#endif