/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
#include <mach/kern_return.h>
#include <mach/thread_status.h>
#include <mach/vm_param.h>
-#include <mach/rpc.h>
#include <kern/counters.h>
#include <kern/cpu_data.h>
#include <i386/iopb_entries.h>
#include <i386/machdep_call.h>
-#define USRSTACK 0xc0000000
+#include <sys/syscall.h>
+#include <sys/ktrace.h>
+struct proc;
kern_return_t
thread_userstack(
int,
thread_state_t,
unsigned int,
- vm_offset_t *
+ vm_offset_t *,
+ int *
);
kern_return_t
int flavor,
thread_state_t tstate,
unsigned int count,
- vm_offset_t *user_stack
+ vm_offset_t *user_stack,
+ int *customstack
)
{
struct i386_saved_state *state;
i386_thread_state_t *state25;
vm_offset_t uesp;
- /*
- * Set a default.
- */
- if (*user_stack == 0)
- *user_stack = USRSTACK;
+ if (customstack)
+ *customstack = 0;
switch (flavor) {
case i386_THREAD_STATE: /* FIXME */
state25 = (i386_thread_state_t *) tstate;
- *user_stack = state25->esp ? state25->esp : USRSTACK;
+ if (state25->esp)
+ *user_stack = state25->esp;
+ if (customstack && state25->esp)
+ *customstack = 1;
+ else
+ *customstack = 0;
break;
case i386_NEW_THREAD_STATE:
uesp = state->uesp;
}
- /*
- * If a valid user stack is specified, use it.
- */
- *user_stack = uesp ? uesp : USRSTACK;
+ /* If a valid user stack is specified, use it. */
+ if (uesp)
+ *user_stack = uesp;
+ if (customstack && uesp)
+ *customstack = 1;
+ else
+ *customstack = 0;
break;
default :
return (KERN_INVALID_ARGUMENT);
child->mact.pcb->iss.edx = 1;
child->mact.pcb->iss.efl &= ~EFL_CF;
}
+void thread_set_parent(thread_act_t parent, int pid);
+void
+thread_set_parent(thread_act_t parent, int pid)
+{
+ parent->mact.pcb->iss.eax = pid;
+ parent->mact.pcb->iss.edx = 0;
+ parent->mact.pcb->iss.efl &= ~EFL_CF;
+}
unsigned long (*sy_call)(void *, void *, int *); /* implementing function */
};
+#define NO_FUNNEL 0
#define KERNEL_FUNNEL 1
#define NETWORK_FUNNEL 2
extern struct sysent sysent[];
-void *get_bsdtask_info(
- task_t);
-
int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
void * get_bsduthreadarg(thread_act_t);
void
unix_syscall_return(int error)
{
- panic("unix_syscall_return not implemented yet!!");
+ thread_act_t thread;
+ volatile int *rval;
+ struct i386_saved_state *regs;
+ struct proc *p;
+ struct proc *current_proc();
+ unsigned short code;
+ vm_offset_t params;
+ struct sysent *callp;
+ extern int nsysent;
+
+ thread = current_act();
+ rval = (int *)get_bsduthreadrval(thread);
+ p = current_proc();
+
+ regs = USER_REGS(thread);
+
+ /* reconstruct code for tracing before blasting eax */
+ code = regs->eax;
+ params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
+ callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
+ if (callp == sysent) {
+ code = fuword(params);
+ }
+
+ if (error == ERESTART) {
+ regs->eip -= 7;
+ }
+ else if (error != EJUSTRETURN) {
+ if (error) {
+ regs->eax = error;
+ regs->efl |= EFL_CF; /* carry bit */
+ } else { /* (not error) */
+ regs->eax = rval[0];
+ regs->edx = rval[1];
+ regs->efl &= ~EFL_CF;
+ }
+ }
+
+ ktrsysret(p, code, error, rval[0], callp->sy_funnel);
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
+ error, rval[0], rval[1], 0, 0);
+
+ if (callp->sy_funnel != NO_FUNNEL) {
+ assert(thread_funnel_get() == THR_FUNNEL_NULL);
+ (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ }
+
+ thread_exception_return();
+ /* NOTREACHED */
}
unix_syscall(struct i386_saved_state *regs)
{
thread_act_t thread;
- void *p, *vt;
+ void *vt;
unsigned short code;
struct sysent *callp;
int nargs, error;
- int *rval;
- int funnel_type;
+ volatile int *rval;
+ int funnel_type;
vm_offset_t params;
extern int nsysent;
+ struct proc *p;
+ struct proc *current_proc();
thread = current_act();
- p = get_bsdtask_info(current_task());
+ p = current_proc();
rval = (int *)get_bsduthreadrval(thread);
//printf("[scall : eax %x]", regs->eax);
rval[0] = 0;
rval[1] = regs->edx;
- if(callp->sy_funnel == NETWORK_FUNNEL) {
- (void) thread_funnel_set(network_flock, TRUE);
- }
- else {
- (void) thread_funnel_set(kernel_flock, TRUE);
- }
+ funnel_type = callp->sy_funnel;
+ if(funnel_type == KERNEL_FUNNEL)
+ (void) thread_funnel_set(kernel_flock, TRUE);
+ else if (funnel_type == NETWORK_FUNNEL)
+ (void) thread_funnel_set(network_flock, TRUE);
+
set_bsduthreadargs(thread, regs, NULL);
if (callp->sy_narg > 8)
panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
+ ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
+
+ {
+ int *ip = (int *)vt;
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
+ *ip, *(ip+1), *(ip+2), *(ip+3), 0);
+ }
error = (*(callp->sy_call))(p, (void *) vt, rval);
+#if 0
+ /* May be needed with vfork changes */
+ regs = USER_REGS(thread);
+#endif
if (error == ERESTART) {
regs->eip -= 7;
}
}
}
- (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ ktrsysret(p, code, error, rval[0], funnel_type);
+
+ KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
+ error, rval[0], rval[1], 0, 0);
+
+ if(funnel_type != NO_FUNNEL)
+ (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
thread_exception_return();
/* NOTREACHED */
int trapno, nargs;
machdep_call_t *entry;
thread_t thread;
+ struct proc *p;
+ struct proc *current_proc();
trapno = regs->eax;
if (trapno < 0 || trapno >= machdep_call_count) {
else
regs->eax = (unsigned int)(*entry->routine)();
- (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
+ if (current_thread()->funnel_lock)
+ (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
thread_exception_return();
/* NOTREACHED */
#endif /* MACH_BSD */
#undef current_thread
-thread_act_t
+thread_t
current_thread(void)
{
return(current_thread_fast());