X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..0c530ab8987f0ae6a1a3d9284f40182b88852816:/bsd/dev/ppc/systemcalls.c diff --git a/bsd/dev/ppc/systemcalls.c b/bsd/dev/ppc/systemcalls.c index 4bd050722..4d0a3d397 100644 --- a/bsd/dev/ppc/systemcalls.c +++ b/bsd/dev/ppc/systemcalls.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -19,150 +19,425 @@ * * @APPLE_LICENSE_HEADER_END@ */ -/* - * Copyright (c) 1997 Apple Computer, Inc. - * - * PowerPC Family: System Call handlers. - * - * HISTORY - * 27-July-97 A. Ramesh - * Adopted for Common Core. - */ - -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#define ERESTART -1 /* restart syscall */ -#define EJUSTRETURN -2 /* don't modify regs, just return */ +extern void +unix_syscall(struct savearea *regs); +void +unix_syscall_return(int error); +extern struct savearea * +find_user_regs( + thread_t act); -struct unix_syscallargs { - int flavor; - int r3; - int arg1, arg2,arg3,arg4,arg5,arg6,arg7,arg8,arg9; -}; -extern struct sysent { /* system call table */ - int16_t sy_narg; /* number of args */ - int16_t sy_parallel;/* can execute in parallel */ - int32_t (*sy_call)(); /* implementing function */ -} sysent[]; +extern void enter_funnel_section(funnel_t *funnel_lock); +extern void exit_funnel_section(void); /* -** Function: unix_syscall -** -** Inputs: pcb - pointer to Process Control Block -** arg1 - arguments to mach system calls -** arg2 -** arg3 -** arg4 -** arg5 -** arg6 -** arg7 -** -** Outputs: none -*/ + * Function: unix_syscall + * + * Inputs: regs - pointer to Process Control Block + * + * Outputs: none + */ void -unix_syscall( - struct pcb * pcb, - int arg1, - int arg2, - int arg3, - int arg4, - int arg5, - int arg6, - int arg7 - ) +unix_syscall(struct savearea *regs) { - struct ppc_saved_state *regs; - thread_t thread; - struct proc *p; - struct sysent *callp; - int nargs, error; - unsigned short code; - int rval[2]; - struct unix_syscallargs sarg; + thread_t thread_act; + struct uthread *uthread; + struct proc *proc; + struct sysent *callp; + int error; + unsigned short code; + boolean_t flavor; + int funnel_type; + unsigned int cancel_enable; + + flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0; - if (!USERMODE(pcb->ss.srr1)) - panic("unix_syscall"); + if (flavor) + code = regs->save_r3; + else + code = regs->save_r0; - regs = &pcb->ss; - thread = current_thread(); + if (kdebug_enable && (code != 180)) { + if (flavor) + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + regs->save_r4, regs->save_r5, regs->save_r6, regs->save_r7, 0); + else + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, + regs->save_r3, regs->save_r4, regs->save_r5, regs->save_r6, 0); + } + thread_act = current_thread(); + uthread = get_bsdthread_info(thread_act); + if (!(uthread->uu_flag & UT_VFORK)) + proc = (struct proc *)get_bsdtask_info(current_task()); + else + proc = current_proc(); - /* - ** Get index into sysent table - */ - code = regs->r0; + /* Make sure there is a process associated with this task */ + if (proc == NULL) { + regs->save_r3 = (long long)EPERM; + /* set the "pc" to execute cerror routine */ + regs->save_srr0 -= 4; + task_terminate_internal(current_task()); + thread_exception_return(); + /* NOTREACHED */ + } - /* - ** Set up call pointer - */ + * Delayed binding of thread credential to process credential, if we + * are not running with an explicitly set thread credential. + */ + if (uthread->uu_ucred != proc->p_ucred && + (uthread->uu_flag & UT_SETUID) == 0) { + kauth_cred_t old = uthread->uu_ucred; + uthread->uu_ucred = kauth_cred_proc_ref(proc); + if (IS_VALID_CRED(old)) + kauth_cred_unref(&old); + } + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; - sarg. flavor = (callp == sysent): 1: 0; - sarg. r3 = regs->r3; - sarg. arg1 = arg1; - sarg. arg2 = arg2; - sarg. arg3 = arg3; - sarg. arg4 = arg4; - sarg. arg5 = arg5; - sarg. arg6 = arg6; - sarg. arg7 = arg7; + if (callp->sy_narg != 0) { + void *regsp; + sy_munge_t *mungerp; + + if (IS_64BIT_PROCESS(proc)) { + /* XXX Turn 64 bit unsafe calls into nosys() */ + if (callp->sy_funnel & UNSAFE_64BIT) { + callp = &sysent[63]; + goto unsafe; + } + mungerp = callp->sy_arg_munge64; + } + else { + mungerp = callp->sy_arg_munge32; + } + if ( !flavor) { + regsp = (void *) ®s->save_r3; + } else { + /* indirect system call consumes an argument so only 7 are supported */ + if (callp->sy_narg > 7) { + callp = &sysent[63]; + goto unsafe; + } + regsp = (void *) ®s->save_r4; + } + /* call syscall argument munger to copy in arguments (see xnu/bsd/dev/ppc/munge.s) */ + (*mungerp)(regsp, (void *) &uthread->uu_arg[0]); + } + +unsafe: + cancel_enable = callp->sy_cancel; + + if (cancel_enable == _SYSCALL_CANCEL_NONE) { + uthread->uu_flag |= UT_NOTCANCELPT; + } else { + if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) { + if (cancel_enable == _SYSCALL_CANCEL_PRE) { + /* system call cancelled; return to handle cancellation */ + regs->save_r3 = (long long)EINTR; + thread_exception_return(); + /* NOTREACHED */ + } else { + thread_abort_safely(thread_act); + } + } + } + + funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK); + if (funnel_type == KERNEL_FUNNEL) + enter_funnel_section(kernel_flock); + + uthread->uu_rval[0] = 0; + + /* + * r4 is volatile, if we set it to regs->save_r4 here the child + * will have parents r4 after execve + */ + uthread->uu_rval[1] = 0; + + error = 0; + + /* + * PPC runtime calls cerror after every unix system call, so + * assume no error and adjust the "pc" to skip this call. + * It will be set back to the cerror call if an error is detected. + */ + regs->save_srr0 += 4; + + if (KTRPOINT(proc, KTR_SYSCALL)) + ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg); + +#ifdef JOE_DEBUG + uthread->uu_iocount = 0; + uthread->uu_vpindex = 0; +#endif + AUDIT_SYSCALL_ENTER(code, proc, uthread); + error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0])); + AUDIT_SYSCALL_EXIT(error, proc, uthread); - set_bsduthreadargs(thread,pcb,&sarg); +#ifdef JOE_DEBUG + if (uthread->uu_iocount) + joe_debug("system call returned with uu_iocount != 0"); +#endif + regs = find_user_regs(thread_act); + if (error == ERESTART) { + regs->save_srr0 -= 8; + } else if (error != EJUSTRETURN) { + if (error) { + regs->save_r3 = (long long)error; + /* set the "pc" to execute cerror routine */ + regs->save_srr0 -= 4; + } else { /* (not error) */ + switch (callp->sy_return_type) { + case _SYSCALL_RET_INT_T: + regs->save_r3 = uthread->uu_rval[0]; + regs->save_r4 = uthread->uu_rval[1]; + break; + case _SYSCALL_RET_UINT_T: + regs->save_r3 = ((u_int)uthread->uu_rval[0]); + regs->save_r4 = ((u_int)uthread->uu_rval[1]); + break; + case _SYSCALL_RET_OFF_T: + /* off_t returns 64 bits split across two registers for 32 bit */ + /* process and in one register for 64 bit process */ + if (IS_64BIT_PROCESS(proc)) { + u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0]; + regs->save_r3 = *retp; + regs->save_r4 = 0; + } + else { + regs->save_r3 = uthread->uu_rval[0]; + regs->save_r4 = uthread->uu_rval[1]; + } + break; + case _SYSCALL_RET_ADDR_T: + case _SYSCALL_RET_SIZE_T: + case _SYSCALL_RET_SSIZE_T: + /* the variable length return types (user_addr_t, user_ssize_t, + * and user_size_t) are always the largest possible size in the + * kernel (we use uu_rval[0] and [1] as one 64 bit value). + */ + { + user_addr_t *retp = (user_addr_t *)&uthread->uu_rval[0]; + regs->save_r3 = *retp; + regs->save_r4 = 0; + } + break; + case _SYSCALL_RET_NONE: + break; + default: + panic("unix_syscall: unknown return type"); + break; + } + } + } + /* else (error == EJUSTRETURN) { nothing } */ - if (callp->sy_narg > 8) - panic("unix_syscall: max arg count exceeded"); - rval[0] = 0; + if (KTRPOINT(proc, KTR_SYSRET)) { + switch(callp->sy_return_type) { + case _SYSCALL_RET_ADDR_T: + case _SYSCALL_RET_SIZE_T: + case _SYSCALL_RET_SSIZE_T: + /* + * Trace the value of the least significant bits, + * until we can revise the ktrace API safely. + */ + ktrsysret(proc, code, error, uthread->uu_rval[1]); + break; + default: + ktrsysret(proc, code, error, uthread->uu_rval[0]); + break; + } + } - /* r4 is volatile, if we set it to regs->r4 here the child - * will have parents r4 after execve */ - rval[1] = 0; + if (cancel_enable == _SYSCALL_CANCEL_NONE) + uthread->uu_flag &= ~UT_NOTCANCELPT; - error = 0; /* Start with a good value */ + exit_funnel_section(); + + if (uthread->uu_lowpri_delay) { + /* + * task is marked as a low priority I/O type + * and the I/O we issued while in this system call + * collided with normal I/O operations... we'll + * delay in order to mitigate the impact of this + * task on the normal operation of the system + */ + IOSleep(uthread->uu_lowpri_delay); + uthread->uu_lowpri_delay = 0; + } + if (kdebug_enable && (code != 180)) { + + if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T) + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[1], 0, 0, 0); + else + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0); + } + + thread_exception_return(); + /* NOTREACHED */ +} + +void +unix_syscall_return(int error) +{ + thread_t thread_act; + struct uthread *uthread; + struct proc *proc; + struct savearea *regs; + unsigned short code; + struct sysent *callp; + unsigned int cancel_enable; + + thread_act = current_thread(); + proc = current_proc(); + uthread = get_bsdthread_info(thread_act); + + regs = find_user_regs(thread_act); + + if (regs->save_r0 != 0) + code = regs->save_r0; + else + code = regs->save_r3; + + callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; /* - ** the PPC runtime calls cerror after every unix system call, so - ** assume no error and adjust the "pc" to skip this call. - ** It will be set back to the cerror call if an error is detected. - */ - regs->srr0 += 4; - vt = get_bsduthreadarg(thread); - p = ((struct proc *)get_bsdtask_info(current_task())); - error = (*(callp->sy_call))(p, (caddr_t)vt, rval); - - if (error == ERESTART) { - regs->srr0 -= 8; - } - else if (error != EJUSTRETURN) { - if (error) - { - regs->r3 = error; - /* set the "pc" to execute cerror routine */ - regs->srr0 -= 4; - } else { /* (not error) */ - regs->r3 = rval[0]; - regs->r4 = rval[1]; - } - } - /* else (error == EJUSTRETURN) { nothing } */ - - thread_exception_return(); - /* NOTREACHED */ - + * Get index into sysent table + */ + if (error == ERESTART) { + regs->save_srr0 -= 8; + } else if (error != EJUSTRETURN) { + if (error) { + regs->save_r3 = (long long)error; + /* set the "pc" to execute cerror routine */ + regs->save_srr0 -= 4; + } else { /* (not error) */ + switch (callp->sy_return_type) { + case _SYSCALL_RET_INT_T: + regs->save_r3 = uthread->uu_rval[0]; + regs->save_r4 = uthread->uu_rval[1]; + break; + case _SYSCALL_RET_UINT_T: + regs->save_r3 = ((u_int)uthread->uu_rval[0]); + regs->save_r4 = ((u_int)uthread->uu_rval[1]); + break; + case _SYSCALL_RET_OFF_T: + /* off_t returns 64 bits split across two registers for 32 bit */ + /* process and in one register for 64 bit process */ + if (IS_64BIT_PROCESS(proc)) { + u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0]; + regs->save_r3 = *retp; + } + else { + regs->save_r3 = uthread->uu_rval[0]; + regs->save_r4 = uthread->uu_rval[1]; + } + break; + case _SYSCALL_RET_ADDR_T: + case _SYSCALL_RET_SIZE_T: + case _SYSCALL_RET_SSIZE_T: + /* the variable length return types (user_addr_t, user_ssize_t, + * and user_size_t) are always the largest possible size in the + * kernel (we use uu_rval[0] and [1] as one 64 bit value). + */ + { + u_int64_t *retp = (u_int64_t *)&uthread->uu_rval[0]; + regs->save_r3 = *retp; + } + break; + case _SYSCALL_RET_NONE: + break; + default: + panic("unix_syscall: unknown return type"); + break; + } + } + } + /* else (error == EJUSTRETURN) { nothing } */ + + if (KTRPOINT(proc, KTR_SYSRET)) { + switch(callp->sy_return_type) { + case _SYSCALL_RET_ADDR_T: + case _SYSCALL_RET_SIZE_T: + case _SYSCALL_RET_SSIZE_T: + /* + * Trace the value of the least significant bits, + * until we can revise the ktrace API safely. + */ + ktrsysret(proc, code, error, uthread->uu_rval[1]); + break; + default: + ktrsysret(proc, code, error, uthread->uu_rval[0]); + break; + } + } + + cancel_enable = callp->sy_cancel; + + if (cancel_enable == _SYSCALL_CANCEL_NONE) + uthread->uu_flag &= ~UT_NOTCANCELPT; + + exit_funnel_section(); + + if (uthread->uu_lowpri_delay) { + /* + * task is marked as a low priority I/O type + * and the I/O we issued while in this system call + * collided with normal I/O operations... we'll + * delay in order to mitigate the impact of this + * task on the normal operation of the system + */ + IOSleep(uthread->uu_lowpri_delay); + uthread->uu_lowpri_delay = 0; + } + if (kdebug_enable && (code != 180)) { + if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T) + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[1], 0, 0, 0); + else + KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, + error, uthread->uu_rval[0], uthread->uu_rval[1], 0, 0); + } + + thread_exception_return(); + /* NOTREACHED */ } +#ifdef JOE_DEBUG +joe_debug(char *p) { + + printf("%s\n", p); +} +#endif