/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/locks.h>
#include <kern/sched_prim.h>
#include <mach/machine/thread_status.h>
+#include <mach/thread_act.h>
#include <ppc/savearea.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/user.h>
#include <sys/errno.h>
-#include <sys/ktrace.h>
#include <sys/kdebug.h>
#include <sys/sysent.h>
#include <sys/sysproto.h>
#include <bsm/audit_kernel.h>
+#if CONFIG_DTRACE
+extern int32_t dtrace_systrace_syscall(struct proc *, void *, int *);
+extern void dtrace_systrace_syscall_return(unsigned short, int, int *);
+#endif
+
extern void
unix_syscall(struct savearea *regs);
-void
-unix_syscall_return(int error);
extern struct savearea *
find_user_regs(
thread_t act);
-extern void enter_funnel_section(funnel_t *funnel_lock);
-extern void exit_funnel_section(void);
+extern lck_spin_t * tz_slock;
/*
* Function: unix_syscall
struct proc *proc;
struct sysent *callp;
int error;
- unsigned short code;
+ unsigned int code;
boolean_t flavor;
- int funnel_type;
- unsigned int cancel_enable;
flavor = (((unsigned int)regs->save_r0) == 0)? 1: 0;
* Delayed binding of thread credential to process credential, if we
* are not running with an explicitly set thread credential.
*/
- if (uthread->uu_ucred != proc->p_ucred &&
- (uthread->uu_flag & UT_SETUID) == 0) {
- kauth_cred_t old = uthread->uu_ucred;
- proc_lock(proc);
- uthread->uu_ucred = proc->p_ucred;
- kauth_cred_ref(uthread->uu_ucred);
- proc_unlock(proc);
- if (old != NOCRED)
- kauth_cred_rele(old);
- }
-
- uthread->uu_ar0 = (int *)regs;
+ kauth_cred_uthread_update(uthread, proc);
- callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
+ callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];
if (callp->sy_narg != 0) {
void *regsp;
if (IS_64BIT_PROCESS(proc)) {
/* XXX Turn 64 bit unsafe calls into nosys() */
- if (callp->sy_funnel & UNSAFE_64BIT) {
+ if (callp->sy_flags & UNSAFE_64BIT) {
callp = &sysent[63];
goto unsafe;
}
}
unsafe:
- cancel_enable = callp->sy_cancel;
- if (cancel_enable == _SYSCALL_CANCEL_NONE) {
- uthread->uu_flag |= UT_NOTCANCELPT;
- } else {
- if((uthread->uu_flag & (UT_CANCELDISABLE | UT_CANCEL | UT_CANCELED)) == UT_CANCEL) {
- if (cancel_enable == _SYSCALL_CANCEL_PRE) {
- /* system call cancelled; return to handle cancellation */
- regs->save_r3 = (long long)EINTR;
- thread_exception_return();
- /* NOTREACHED */
- } else {
- thread_abort_safely(thread_act);
- }
- }
- }
+ uthread->uu_flag |= UT_NOTCANCELPT;
- funnel_type = (int)(callp->sy_funnel & FUNNEL_MASK);
- if (funnel_type == KERNEL_FUNNEL)
- enter_funnel_section(kernel_flock);
-
uthread->uu_rval[0] = 0;
/*
*/
regs->save_srr0 += 4;
- if (KTRPOINT(proc, KTR_SYSCALL))
- ktrsyscall(proc, code, callp->sy_narg, uthread->uu_arg);
-
#ifdef JOE_DEBUG
uthread->uu_iocount = 0;
uthread->uu_vpindex = 0;
#endif
AUDIT_SYSCALL_ENTER(code, proc, uthread);
error = (*(callp->sy_call))(proc, (void *)uthread->uu_arg, &(uthread->uu_rval[0]));
- AUDIT_SYSCALL_EXIT(error, proc, uthread);
+ AUDIT_SYSCALL_EXIT(code, proc, uthread, error);
+#if CONFIG_MACF
+ mac_thread_userret(code, error, thread_act);
+#endif
+
#ifdef JOE_DEBUG
if (uthread->uu_iocount)
joe_debug("system call returned with uu_iocount != 0");
#endif
+#if CONFIG_DTRACE
+ uthread->t_dtrace_errno = error;
+#endif /* CONFIG_DTRACE */
+
regs = find_user_regs(thread_act);
if (error == ERESTART) {
/* else (error == EJUSTRETURN) { nothing } */
- if (KTRPOINT(proc, KTR_SYSRET)) {
- switch(callp->sy_return_type) {
- case _SYSCALL_RET_ADDR_T:
- case _SYSCALL_RET_SIZE_T:
- case _SYSCALL_RET_SSIZE_T:
- /*
- * Trace the value of the least significant bits,
- * until we can revise the ktrace API safely.
- */
- ktrsysret(proc, code, error, uthread->uu_rval[1]);
- break;
- default:
- ktrsysret(proc, code, error, uthread->uu_rval[0]);
- break;
- }
- }
-
- if (cancel_enable == _SYSCALL_CANCEL_NONE)
- uthread->uu_flag &= ~UT_NOTCANCELPT;
+ uthread->uu_flag &= ~UT_NOTCANCELPT;
- exit_funnel_section();
+ /* panic if funnel is held */
+ syscall_exit_funnelcheck();
- if (uthread->uu_lowpri_delay) {
+ if (uthread->uu_lowpri_window) {
/*
* task is marked as a low priority I/O type
* and the I/O we issued while in this system call
* delay in order to mitigate the impact of this
* task on the normal operation of the system
*/
- IOSleep(uthread->uu_lowpri_delay);
- uthread->uu_lowpri_delay = 0;
+ throttle_lowpri_io(TRUE);
}
if (kdebug_enable && (code != 180)) {
struct uthread *uthread;
struct proc *proc;
struct savearea *regs;
- unsigned short code;
+ unsigned int code;
struct sysent *callp;
- int funnel_type;
- unsigned int cancel_enable;
thread_act = current_thread();
proc = current_proc();
else
code = regs->save_r3;
- callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
+ callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];
+
+#if CONFIG_DTRACE
+ if (callp->sy_call == dtrace_systrace_syscall)
+ dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
+#endif /* CONFIG_DTRACE */
/*
* Get index into sysent table
}
/* else (error == EJUSTRETURN) { nothing } */
- if (KTRPOINT(proc, KTR_SYSRET)) {
- switch(callp->sy_return_type) {
- case _SYSCALL_RET_ADDR_T:
- case _SYSCALL_RET_SIZE_T:
- case _SYSCALL_RET_SSIZE_T:
- /*
- * Trace the value of the least significant bits,
- * until we can revise the ktrace API safely.
- */
- ktrsysret(proc, code, error, uthread->uu_rval[1]);
- break;
- default:
- ktrsysret(proc, code, error, uthread->uu_rval[0]);
- break;
- }
- }
-
- cancel_enable = callp->sy_cancel;
- if (cancel_enable == _SYSCALL_CANCEL_NONE)
- uthread->uu_flag &= ~UT_NOTCANCELPT;
+ uthread->uu_flag &= ~UT_NOTCANCELPT;
- exit_funnel_section();
+ /* panic if funnel is held */
+ syscall_exit_funnelcheck();
- if (uthread->uu_lowpri_delay) {
+ if (uthread->uu_lowpri_window) {
/*
* task is marked as a low priority I/O type
* and the I/O we issued while in this system call
* delay in order to mitigate the impact of this
* task on the normal operation of the system
*/
- IOSleep(uthread->uu_lowpri_delay);
- uthread->uu_lowpri_delay = 0;
+ throttle_lowpri_io(TRUE);
}
if (kdebug_enable && (code != 180)) {
if (callp->sy_return_type == _SYSCALL_RET_SSIZE_T)
/* NOTREACHED */
}
-/*
- * Time of day and interval timer support.
- *
- * These routines provide the kernel entry points to get and set
- * the time-of-day and per-process interval timers. Subroutines
- * here provide support for adding and subtracting timeval structures
- * and decrementing interval timers, optionally reloading the interval
- * timers when they expire.
- */
-/* NOTE THIS implementation is for ppc architectures only.
- * It is infrequently called, since the commpage intercepts
- * most calls in user mode.
- *
- * XXX Y2038 bug because of assumed return of 32 bit seconds value, and
- * XXX first parameter to clock_gettimeofday()
- */
-int
-ppc_gettimeofday(__unused struct proc *p,
- register struct ppc_gettimeofday_args *uap,
- register_t *retval)
-{
- int error = 0;
- extern lck_spin_t * tz_slock;
-
- if (uap->tp)
- clock_gettimeofday(&retval[0], &retval[1]);
-
- if (uap->tzp) {
- struct timezone ltz;
-
- lck_spin_lock(tz_slock);
- ltz = tz;
- lck_spin_unlock(tz_slock);
- error = copyout((caddr_t)<z, uap->tzp, sizeof (tz));
- }
-
- return (error);
-}
-
#ifdef JOE_DEBUG
joe_debug(char *p) {
printf("%s\n", p);
}
#endif
-
-
-/*
- * WARNING - this is a temporary workaround for binary compatibility issues
- * with anti-piracy software that relies on patching ptrace (3928003).
- * This KPI will be removed in the system release after Tiger.
- */
-uintptr_t temp_patch_ptrace(uintptr_t new_ptrace)
-{
- struct sysent * callp;
- sy_call_t * old_ptrace;
-
- if (new_ptrace == 0)
- return(0);
-
- enter_funnel_section(kernel_flock);
- callp = &sysent[26];
- old_ptrace = callp->sy_call;
-
- /* only allow one patcher of ptrace */
- if (old_ptrace == (sy_call_t *) ptrace) {
- callp->sy_call = (sy_call_t *) new_ptrace;
- }
- else {
- old_ptrace = NULL;
- }
- exit_funnel_section( );
-
- return((uintptr_t)old_ptrace);
-}
-
-void temp_unpatch_ptrace(void)
-{
- struct sysent * callp;
-
- enter_funnel_section(kernel_flock);
- callp = &sysent[26];
- callp->sy_call = (sy_call_t *) ptrace;
- exit_funnel_section( );
-
- return;
-}