/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <sys/ubc.h>
#include <sys/stat.h>
+#include <bsm/audit_kernel.h>
+#include <bsm/audit_kevents.h>
+
#include <kern/kalloc.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
kern_return_t err = KERN_SUCCESS;
boolean_t funnel_state;
+ AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
+ AUDIT_ARG(mach_port1, t);
+
funnel_state = thread_funnel_set(kernel_flock, TRUE);
t1 = port_name_to_task(t);
}
task_deallocate(t1);
pftout:
+ AUDIT_ARG(pid, pid);
(void) copyout((char *) &pid, (char *) x, sizeof(*x));
thread_funnel_set(kernel_flock, funnel_state);
+ AUDIT_MACH_SYSCALL_EXIT(err);
return(err);
}
int error = 0;
boolean_t funnel_state;
+ AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
+ AUDIT_ARG(pid, pid);
+ AUDIT_ARG(mach_port1, target_tport);
+
t1 = port_name_to_task(target_tport);
if (t1 == TASK_NULL) {
(void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
+ AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
return(KERN_FAILURE);
}
restart:
p1 = get_bsdtask_info(t1);
+ p = pfind(pid);
+ AUDIT_ARG(process, p);
if (
- ((p = pfind(pid)) != (struct proc *) 0)
+ (p != (struct proc *) 0)
&& (p1 != (struct proc *) 0)
&& (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
get_task_ipcspace(current_task()));
} else
tret = MACH_PORT_NULL;
+ AUDIT_ARG(mach_port2, tret);
(void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
task_deallocate(t1);
error = KERN_SUCCESS;
error = KERN_FAILURE;
tfpout:
thread_funnel_set(kernel_flock, funnel_state);
+ AUDIT_MACH_SYSCALL_EXIT(error);
return(error);
}
ndp = &nd;
-
+ AUDIT_ARG(addr, base_address);
/* Retrieve the base address */
if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
goto lsf_bailout;
/*
* Get a vnode for the target file
*/
- NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
+ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE,
filename_str, p);
if ((error = namei(ndp))) {
int i;
kern_return_t kret;
+ AUDIT_ARG(addr, base_address);
/* Retrieve the base address */
if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
goto rsf_bailout;
}
lru = global_user_profile_cache.age;
+ *profile = NULL;
for(i = 0; i<global_user_profile_cache.max_ele; i++) {
+ /* Skip entry if it is in the process of being reused */
+ if(global_user_profile_cache.profiles[i].data_vp ==
+ (struct vnode *)0xFFFFFFFF)
+ continue;
+ /* Otherwise grab the first empty entry */
if(global_user_profile_cache.profiles[i].data_vp == NULL) {
*profile = &global_user_profile_cache.profiles[i];
(*profile)->age = global_user_profile_cache.age;
- global_user_profile_cache.age+=1;
break;
}
+ /* Otherwise grab the oldest entry */
if(global_user_profile_cache.profiles[i].age < lru) {
lru = global_user_profile_cache.profiles[i].age;
*profile = &global_user_profile_cache.profiles[i];
}
}
+ /* Did we set it? */
+ if (*profile == NULL) {
+ /*
+ * No entries are available; this can only happen if all
+ * of them are currently in the process of being reused;
+ * if this happens, we sleep on the address of the first
+ * element, and restart. This is less than ideal, but we
+ * know it will work because we know that there will be a
+ * wakeup on any entry currently in the process of being
+ * reused.
+ *
+ * XXX Reccomend a two handed clock and more than 3 total
+ * XXX cache entries at some point in the future.
+ */
+ /*
+ * drop funnel and wait
+ */
+ (void)tsleep((void *)
+ &global_user_profile_cache.profiles[0],
+ PRIBIO, "app_profile", 0);
+ goto restart;
+ }
+
+ /*
+ * If it's currently busy, we've picked the one at the end of the
+ * LRU list, but it's currently being actively used. We sleep on
+ * its address and restart.
+ */
if ((*profile)->busy) {
/*
* drop funnel and wait
*/
(void)tsleep((void *)
- &(global_user_profile_cache),
+ *profile,
PRIBIO, "app_profile", 0);
goto restart;
}
(*profile)->busy = 1;
(*profile)->user = user;
- if((*profile)->data_vp != NULL) {
+ /*
+ * put dummy value in for now to get competing request to wait
+ * above until we are finished
+ *
+ * Save the data_vp before setting it, so we can set it before
+ * we kmem_free() or vrele(). If we don't do this, then we
+ * have a potential funnel race condition we have to deal with.
+ */
+ data_vp = (*profile)->data_vp;
+ (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
+
+ /*
+ * Age the cache here in all cases; this guarantees that we won't
+ * be reusing only one entry over and over, once the system reaches
+ * steady-state.
+ */
+ global_user_profile_cache.age+=1;
+
+ if(data_vp != NULL) {
kmem_free(kernel_map,
(*profile)->buf_ptr, 4 * PAGE_SIZE);
if ((*profile)->names_vp) {
vrele((*profile)->names_vp);
(*profile)->names_vp = NULL;
}
- if ((*profile)->data_vp) {
- vrele((*profile)->data_vp);
- (*profile)->data_vp = NULL;
- }
+ vrele(data_vp);
}
-
- /* put dummy value in for now to get */
- /* competing request to wait above */
- /* until we are finished */
- (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
/* Try to open the appropriate users profile files */
/* If neither file is present, try to create them */
/* If the files do exist, check them for the app_file */
/* requested and read it in if present */
-
ret = kmem_alloc(kernel_map,
(vm_offset_t *)&profile_data_string, PATH_MAX);
resid_off = 0;
while(size) {
error = vn_rdwr(UIO_READ, vp,
- (caddr_t)(local_buf + resid_off),
+ CAST_DOWN(caddr_t, (local_buf + resid_off)),
size, file_off + resid_off, UIO_SYSSPACE,
IO_NODELOCKED, p->p_ucred, &resid, p);
if((error) || (size == resid)) {