*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
/*
*/
+
+
#include <meta_features.h>
#include <kern/task.h>
#include <sys/trace.h>
#include <sys/kernel.h>
#include <sys/ubc.h>
+#include <sys/stat.h>
#include <kern/kalloc.h>
-#include <kern/parallel.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <machine/spl.h>
+
#include <mach/shared_memory_server.h>
+#include <vm/vm_shared_memory_server.h>
+
+
+extern shared_region_mapping_t system_shared_region;
+extern zone_t lsf_zone;
useracc(addr, len, prot)
caddr_t addr;
if (
((p = pfind(pid)) != (struct proc *) 0)
&& (p1 != (struct proc *) 0)
- && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid)
+ && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
+ ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
|| !(suser(p1->p_ucred, &p1->p_acflag)))
&& (p->p_stat != SZOMB)
) {
mutex_pause(); /* temp loss of funnel */
goto restart;
}
- sright = convert_task_to_port(p->task);
- tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task()));
+ sright = (void *)convert_task_to_port(p->task);
+ tret = (void *)
+ ipc_port_copyout_send(sright,
+ get_task_ipcspace(current_task()));
} else
tret = MACH_PORT_NULL;
(void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
int local_flags;
int caller_flags;
int i;
+ int default_regions = 0;
vm_size_t dummy;
kern_return_t kret;
ndp = &nd;
- unix_master();
/* Retrieve the base address */
if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
if (error = copyin(flags, &local_flags, sizeof (int))) {
goto lsf_bailout;
}
+
+ if(local_flags & QUERY_IS_SYSTEM_REGION) {
+ vm_get_shared_region(current_task(), &shared_region);
+ if (shared_region == system_shared_region) {
+ local_flags = SYSTEM_REGION_BACKED;
+ } else {
+ local_flags = 0;
+ }
+ error = 0;
+ error = copyout(&local_flags, flags, sizeof (int));
+ goto lsf_bailout;
+ }
caller_flags = local_flags;
kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
(vm_size_t)(MAXPATHLEN));
goto lsf_bailout_free_vput;
}
+ vm_get_shared_region(current_task(), &shared_region);
+ if(shared_region == system_shared_region) {
+ default_regions = 1;
+ }
+ if(((vp->v_mount != rootvnode->v_mount)
+ && (shared_region == system_shared_region))
+ && (lsf_mapping_pool_gauge() < 75)) {
+ /* We don't want to run out of shared memory */
+ /* map entries by starting too many private versions */
+ /* of the shared library structures */
+ int error;
+ if(p->p_flag & P_NOSHLIB) {
+ error = clone_system_shared_regions(FALSE);
+ } else {
+ error = clone_system_shared_regions(TRUE);
+ }
+ if (error) {
+ goto lsf_bailout_free_vput;
+ }
+ local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
+ vm_get_shared_region(current_task(), &shared_region);
+ }
#ifdef notdef
if(vattr.va_size != mapped_file_size) {
error = EINVAL;
goto lsf_bailout_free_vput;
}
#endif
+ if(p->p_flag & P_NOSHLIB) {
+ p->p_flag = p->p_flag & ~P_NOSHLIB;
+ }
+
+ /* load alternate regions if the caller has requested. */
+ /* Note: the new regions are "clean slates" */
+ if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
+ error = clone_system_shared_regions(FALSE);
+ if (error) {
+ goto lsf_bailout_free_vput;
+ }
+ vm_get_shared_region(current_task(), &shared_region);
+ }
- vm_get_shared_region(current_task(), &shared_region);
task_mapping_info.self = (vm_offset_t)shared_region;
shared_region_mapping_info(shared_region,
}
}
- /* load alternate regions if the caller has requested. */
- /* Note: the new regions are "clean slates" */
-
- if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
-
- shared_region_mapping_t new_shared_region;
- shared_region_mapping_t old_shared_region;
- struct shared_region_task_mappings old_info;
- struct shared_region_task_mappings new_info;
-
- if(shared_file_create_system_region(&new_shared_region)) {
- error = ENOMEM;
- goto lsf_bailout_free_vput;
- }
- vm_get_shared_region(current_task(), &old_shared_region);
-
- old_info.self = (vm_offset_t)old_shared_region;
- shared_region_mapping_info(old_shared_region,
- &(old_info.text_region),
- &(old_info.text_size),
- &(old_info.data_region),
- &(old_info.data_size),
- &(old_info.region_mappings),
- &(old_info.client_base),
- &(old_info.alternate_base),
- &(old_info.alternate_next),
- &(old_info.flags), &next);
- new_info.self = (vm_offset_t)new_shared_region;
- shared_region_mapping_info(new_shared_region,
- &(new_info.text_region),
- &(new_info.text_size),
- &(new_info.data_region),
- &(new_info.data_size),
- &(new_info.region_mappings),
- &(new_info.client_base),
- &(new_info.alternate_base),
- &(new_info.alternate_next),
- &(new_info.flags), &next);
- if (vm_map_region_replace(current_map(), old_info.text_region,
- new_info.text_region, old_info.client_base,
- old_info.client_base+old_info.text_size)) {
- panic("load_shared_file: shared region mis-alignment");
- shared_region_mapping_dealloc(new_shared_region);
- error = EINVAL;
- goto lsf_bailout_free_vput;
- }
- if(vm_map_region_replace(current_map(), old_info.data_region,
- new_info.data_region,
- old_info.client_base + old_info.text_size,
- old_info.client_base
- + old_info.text_size + old_info.data_size)) {
- panic("load_shared_file: shared region mis-alignment 1");
- shared_region_mapping_dealloc(new_shared_region);
- error = EINVAL;
- goto lsf_bailout_free_vput;
- }
- vm_set_shared_region(current_task(), new_shared_region);
- task_mapping_info = new_info;
- shared_region_mapping_dealloc(old_shared_region);
- }
if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
mapped_file_size,
}
}
} else {
+ if(default_regions)
+ local_flags |= SYSTEM_REGION_BACKED;
if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
error = copyout(&local_base,
base_address, sizeof (caddr_t));
(vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
lsf_bailout:
- unix_release();
return error;
}
int i;
kern_return_t kret;
-
-
-
- unix_master();
-
/* Retrieve the base address */
if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
goto rsf_bailout;
(vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
rsf_bailout:
- unix_release();
return error;
}
+struct new_system_shared_regions_args {
+ int dummy;
+};
+
+int
+new_system_shared_regions(
+ struct proc *p,
+ struct new_system_shared_regions_args *uap,
+ register *retval)
+{
+ shared_region_mapping_t regions;
+ shared_region_mapping_t new_regions;
+
+ if(!(is_suser())) {
+ *retval = EINVAL;
+ return EINVAL;
+ }
+
+ /* get current shared region info for */
+ /* restoration after new system shared */
+ /* regions are in place */
+ vm_get_shared_region(current_task(), ®ions);
+
+ /* usually only called at boot time */
+ /* shared_file_boot_time_init creates */
+ /* a new set of system shared regions */
+ /* and places them as the system */
+ /* shared regions. */
+ shared_file_boot_time_init();
+
+ /* set current task back to its */
+ /* original regions. */
+ vm_get_shared_region(current_task(), &new_regions);
+ shared_region_mapping_dealloc(new_regions);
+
+ vm_set_shared_region(current_task(), regions);
+
+ *retval = 0;
+ return 0;
+}
int
-clone_system_shared_regions()
+clone_system_shared_regions(shared_regions_active)
{
shared_region_mapping_t new_shared_region;
shared_region_mapping_t next;
struct shared_region_task_mappings old_info;
struct shared_region_task_mappings new_info;
+ struct proc *p;
+
if (shared_file_create_system_region(&new_shared_region))
return (ENOMEM);
vm_get_shared_region(current_task(), &old_shared_region);
&(new_info.alternate_base),
&(new_info.alternate_next),
&(new_info.flags), &next);
- if(vm_region_clone(old_info.text_region, new_info.text_region)) {
- panic("clone_system_shared_regions: shared region mis-alignment 1");
+ if(shared_regions_active) {
+ if(vm_region_clone(old_info.text_region, new_info.text_region)) {
+ panic("clone_system_shared_regions: shared region mis-alignment 1");
shared_region_mapping_dealloc(new_shared_region);
return(EINVAL);
- }
- if (vm_region_clone(old_info.data_region, new_info.data_region)) {
- panic("clone_system_shared_regions: shared region mis-alignment 2");
+ }
+ if (vm_region_clone(old_info.data_region, new_info.data_region)) {
+ panic("clone_system_shared_regions: shared region mis-alignment 2");
shared_region_mapping_dealloc(new_shared_region);
return(EINVAL);
+ }
+ shared_region_object_chain_attach(
+ new_shared_region, old_shared_region);
}
if (vm_map_region_replace(current_map(), old_info.text_region,
new_info.text_region, old_info.client_base,
return(EINVAL);
}
vm_set_shared_region(current_task(), new_shared_region);
- shared_region_object_chain_attach(new_shared_region, old_shared_region);
+
+ /* consume the reference which wasn't accounted for in object */
+ /* chain attach */
+ if(!shared_regions_active)
+ shared_region_mapping_dealloc(old_shared_region);
+
return(0);
}
+
+extern vm_map_t bsd_pageable_map;
+
+/* header for the profile name file. The profiled app info is held */
+/* in the data file and pointed to by elements in the name file */
+
+struct profile_names_header {
+ unsigned int number_of_profiles;
+ unsigned int user_id;
+ unsigned int version;
+ off_t element_array;
+ unsigned int spare1;
+ unsigned int spare2;
+ unsigned int spare3;
+};
+
+struct profile_element {
+ off_t addr;
+ vm_size_t size;
+ unsigned int mod_date;
+ unsigned int inode;
+ char name[12];
+};
+
+struct global_profile {
+ struct vnode *names_vp;
+ struct vnode *data_vp;
+ vm_offset_t buf_ptr;
+ unsigned int user;
+ unsigned int age;
+ unsigned int busy;
+};
+
+struct global_profile_cache {
+ int max_ele;
+ unsigned int age;
+ struct global_profile profiles[3];
+};
+
+struct global_profile_cache global_user_profile_cache =
+ {3, 0, NULL, NULL, NULL, 0, 0, 0,
+ NULL, NULL, NULL, 0, 0, 0,
+ NULL, NULL, NULL, 0, 0, 0 };
+
+/* BSD_OPEN_PAGE_CACHE_FILES: */
+/* Caller provides a user id. This id was used in */
+/* prepare_profile_database to create two unique absolute */
+/* file paths to the associated profile files. These files */
+/* are either opened or bsd_open_page_cache_files returns an */
+/* error. The header of the names file is then consulted. */
+/* The header and the vnodes for the names and data files are */
+/* returned. */
+
+int
+bsd_open_page_cache_files(
+ unsigned int user,
+ struct global_profile **profile)
+{
+ char *cache_path = "/var/vm/app_profile/";
+ struct proc *p;
+ int error;
+ int resid;
+ off_t resid_off;
+ unsigned int lru;
+ vm_size_t size;
+
+ struct vnode *names_vp;
+ struct vnode *data_vp;
+ vm_offset_t names_buf;
+ vm_offset_t buf_ptr;
+
+ int profile_names_length;
+ int profile_data_length;
+ char *profile_data_string;
+ char *profile_names_string;
+ char *substring;
+
+ struct vattr vattr;
+
+ struct profile_names_header *profile_header;
+ kern_return_t ret;
+
+ struct nameidata nd_names;
+ struct nameidata nd_data;
+
+ int i;
+
+
+ p = current_proc();
+
+restart:
+ for(i = 0; i<global_user_profile_cache.max_ele; i++) {
+ if((global_user_profile_cache.profiles[i].user == user)
+ && (global_user_profile_cache.profiles[i].data_vp
+ != NULL)) {
+ *profile = &global_user_profile_cache.profiles[i];
+ /* already in cache, we're done */
+ if ((*profile)->busy) {
+ /*
+ * drop funnel and wait
+ */
+ (void)tsleep((void *)
+ *profile,
+ PRIBIO, "app_profile", 0);
+ goto restart;
+ }
+ (*profile)->busy = 1;
+ (*profile)->age = global_user_profile_cache.age;
+ global_user_profile_cache.age+=1;
+ return 0;
+ }
+ }
+
+ lru = global_user_profile_cache.age;
+ for(i = 0; i<global_user_profile_cache.max_ele; i++) {
+ if(global_user_profile_cache.profiles[i].data_vp == NULL) {
+ *profile = &global_user_profile_cache.profiles[i];
+ (*profile)->age = global_user_profile_cache.age;
+ global_user_profile_cache.age+=1;
+ break;
+ }
+ if(global_user_profile_cache.profiles[i].age < lru) {
+ lru = global_user_profile_cache.profiles[i].age;
+ *profile = &global_user_profile_cache.profiles[i];
+ }
+ }
+
+ if ((*profile)->busy) {
+ /*
+ * drop funnel and wait
+ */
+ (void)tsleep((void *)
+ &(global_user_profile_cache),
+ PRIBIO, "app_profile", 0);
+ goto restart;
+ }
+ (*profile)->busy = 1;
+ (*profile)->user = user;
+
+ if((*profile)->data_vp != NULL) {
+ kmem_free(kernel_map,
+ (*profile)->buf_ptr, 4 * PAGE_SIZE);
+ if ((*profile)->names_vp) {
+ vrele((*profile)->names_vp);
+ (*profile)->names_vp = NULL;
+ }
+ if ((*profile)->data_vp) {
+ vrele((*profile)->data_vp);
+ (*profile)->data_vp = NULL;
+ }
+ }
+
+ /* put dummy value in for now to get */
+ /* competing request to wait above */
+ /* until we are finished */
+ (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
+
+ /* Try to open the appropriate users profile files */
+ /* If neither file is present, try to create them */
+ /* If one file is present and the other not, fail. */
+ /* If the files do exist, check them for the app_file */
+ /* requested and read it in if present */
+
+
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *)&profile_data_string, PATH_MAX);
+
+ if(ret) {
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return ENOMEM;
+ }
+
+ /* Split the buffer in half since we know the size of */
+ /* our file path and our allocation is adequate for */
+ /* both file path names */
+ profile_names_string = profile_data_string + (PATH_MAX/2);
+
+
+ strcpy(profile_data_string, cache_path);
+ strcpy(profile_names_string, cache_path);
+ profile_names_length = profile_data_length
+ = strlen(profile_data_string);
+ substring = profile_data_string + profile_data_length;
+ sprintf(substring, "%x_data", user);
+ substring = profile_names_string + profile_names_length;
+ sprintf(substring, "%x_names", user);
+
+ /* We now have the absolute file names */
+
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
+ if(ret) {
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return ENOMEM;
+ }
+
+ NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
+ UIO_SYSSPACE, profile_names_string, p);
+ NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
+ UIO_SYSSPACE, profile_data_string, p);
+ if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
+#ifdef notdef
+ printf("bsd_open_page_cache_files: CacheData file not found %s\n",
+ profile_data_string);
+#endif
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return error;
+ }
+
+ data_vp = nd_data.ni_vp;
+ VOP_UNLOCK(data_vp, 0, p);
+
+ if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
+ printf("bsd_open_page_cache_files: NamesData file not found %s\n",
+ profile_data_string);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ vrele(data_vp);
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return error;
+ }
+ names_vp = nd_names.ni_vp;
+
+ if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
+ printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ vput(names_vp);
+ vrele(data_vp);
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return error;
+ }
+
+ size = vattr.va_size;
+ if(size > 4 * PAGE_SIZE)
+ size = 4 * PAGE_SIZE;
+ buf_ptr = names_buf;
+ resid_off = 0;
+
+ while(size) {
+ error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
+ size, resid_off,
+ UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
+ if((error) || (size == resid)) {
+ if(!error) {
+ error = EINVAL;
+ }
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ vput(names_vp);
+ vrele(data_vp);
+ (*profile)->data_vp = NULL;
+ (*profile)->busy = 0;
+ wakeup(*profile);
+ return error;
+ }
+ buf_ptr += size-resid;
+ resid_off += size-resid;
+ size = resid;
+ }
+
+ VOP_UNLOCK(names_vp, 0, p);
+ kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
+ (*profile)->names_vp = names_vp;
+ (*profile)->data_vp = data_vp;
+ (*profile)->buf_ptr = names_buf;
+ return 0;
+
+}
+
+void
+bsd_close_page_cache_files(
+ struct global_profile *profile)
+{
+ profile->busy = 0;
+ wakeup(profile);
+}
+
+int
+bsd_read_page_cache_file(
+ unsigned int user,
+ int *fid,
+ int *mod,
+ char *app_name,
+ struct vnode *app_vp,
+ vm_offset_t *buffer,
+ vm_offset_t *buf_size)
+{
+
+ boolean_t funnel_state;
+
+ struct proc *p;
+ int error;
+ int resid;
+ vm_size_t size;
+
+ off_t profile;
+ unsigned int profile_size;
+
+ vm_offset_t names_buf;
+ struct vattr vattr;
+
+ kern_return_t ret;
+
+ struct vnode *names_vp;
+ struct vnode *data_vp;
+ struct vnode *vp1;
+ struct vnode *vp2;
+
+ struct global_profile *uid_files;
+
+ funnel_state = thread_funnel_set(kernel_flock, TRUE);
+
+ /* Try to open the appropriate users profile files */
+ /* If neither file is present, try to create them */
+ /* If one file is present and the other not, fail. */
+ /* If the files do exist, check them for the app_file */
+ /* requested and read it in if present */
+
+
+ error = bsd_open_page_cache_files(user, &uid_files);
+ if(error) {
+ thread_funnel_set(kernel_flock, funnel_state);
+ return EINVAL;
+ }
+
+ p = current_proc();
+
+ names_vp = uid_files->names_vp;
+ data_vp = uid_files->data_vp;
+ names_buf = uid_files->buf_ptr;
+
+
+ /*
+ * Get locks on both files, get the vnode with the lowest address first
+ */
+
+ if((unsigned int)names_vp < (unsigned int)data_vp) {
+ vp1 = names_vp;
+ vp2 = data_vp;
+ } else {
+ vp1 = data_vp;
+ vp2 = names_vp;
+ }
+ error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
+ if(error) {
+ printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+ error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
+ if(error) {
+ printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
+ VOP_UNLOCK(vp1, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+
+ if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+
+ *fid = vattr.va_fileid;
+ *mod = vattr.va_mtime.tv_sec;
+
+
+ if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name,
+ (unsigned int) vattr.va_mtime.tv_sec,
+ vattr.va_fileid, &profile, &profile_size) == 0) {
+ /* profile is an offset in the profile data base */
+ /* It is zero if no profile data was found */
+
+ if(profile_size == 0) {
+ *buffer = NULL;
+ *buf_size = 0;
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return 0;
+ }
+ ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
+ if(ret) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return ENOMEM;
+ }
+ *buf_size = profile_size;
+ while(profile_size) {
+ error = vn_rdwr(UIO_READ, data_vp,
+ (caddr_t) *buffer, profile_size,
+ profile, UIO_SYSSPACE, IO_NODELOCKED,
+ p->p_ucred, &resid, p);
+ if((error) || (profile_size == resid)) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return EINVAL;
+ }
+ profile += profile_size - resid;
+ profile_size = resid;
+ }
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return 0;
+ } else {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return EINVAL;
+ }
+
+}
+
+int
+bsd_search_page_cache_data_base(
+ struct vnode *vp,
+ struct profile_names_header *database,
+ char *app_name,
+ unsigned int mod_date,
+ unsigned int inode,
+ off_t *profile,
+ unsigned int *profile_size)
+{
+
+ struct proc *p;
+
+ unsigned int i;
+ struct profile_element *element;
+ unsigned int ele_total;
+ unsigned int extended_list = 0;
+ off_t file_off = 0;
+ unsigned int size;
+ off_t resid_off;
+ int resid;
+ vm_offset_t local_buf = NULL;
+
+ int error;
+ kern_return_t ret;
+
+ p = current_proc();
+
+ if(((vm_offset_t)database->element_array) !=
+ sizeof(struct profile_names_header)) {
+ return EINVAL;
+ }
+ element = (struct profile_element *)(
+ (vm_offset_t)database->element_array +
+ (vm_offset_t)database);
+
+ ele_total = database->number_of_profiles;
+
+ *profile = 0;
+ *profile_size = 0;
+ while(ele_total) {
+ /* note: code assumes header + n*ele comes out on a page boundary */
+ if(((local_buf == 0) && (sizeof(struct profile_names_header) +
+ (ele_total * sizeof(struct profile_element)))
+ > (PAGE_SIZE * 4)) ||
+ ((local_buf != 0) &&
+ (ele_total * sizeof(struct profile_element))
+ > (PAGE_SIZE * 4))) {
+ extended_list = ele_total;
+ if(element == (struct profile_element *)
+ ((vm_offset_t)database->element_array +
+ (vm_offset_t)database)) {
+ ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
+ } else {
+ ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
+ }
+ extended_list -= ele_total;
+ }
+ for (i=0; i<ele_total; i++) {
+ if((mod_date == element[i].mod_date)
+ && (inode == element[i].inode)) {
+ if(strncmp(element[i].name, app_name, 12) == 0) {
+ *profile = element[i].addr;
+ *profile_size = element[i].size;
+ if(local_buf != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t)local_buf, 4 * PAGE_SIZE);
+ }
+ return 0;
+ }
+ }
+ }
+ if(extended_list == 0)
+ break;
+ if(local_buf == NULL) {
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
+ if(ret != KERN_SUCCESS) {
+ return ENOMEM;
+ }
+ }
+ element = (struct profile_element *)local_buf;
+ ele_total = extended_list;
+ extended_list = 0;
+ file_off += 4 * PAGE_SIZE;
+ if((ele_total * sizeof(struct profile_element)) >
+ (PAGE_SIZE * 4)) {
+ size = PAGE_SIZE * 4;
+ } else {
+ size = ele_total * sizeof(struct profile_element);
+ }
+ resid_off = 0;
+ while(size) {
+ error = vn_rdwr(UIO_READ, vp,
+ (caddr_t)(local_buf + resid_off),
+ size, file_off + resid_off, UIO_SYSSPACE,
+ IO_NODELOCKED, p->p_ucred, &resid, p);
+ if((error) || (size == resid)) {
+ if(local_buf != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t)local_buf,
+ 4 * PAGE_SIZE);
+ }
+ return EINVAL;
+ }
+ resid_off += size-resid;
+ size = resid;
+ }
+ }
+ if(local_buf != NULL) {
+ kmem_free(kernel_map,
+ (vm_offset_t)local_buf, 4 * PAGE_SIZE);
+ }
+ return 0;
+}
+
+int
+bsd_write_page_cache_file(
+ unsigned int user,
+ char *file_name,
+ caddr_t buffer,
+ vm_size_t size,
+ int mod,
+ int fid)
+{
+ struct proc *p;
+ struct nameidata nd;
+ struct vnode *vp = 0;
+ int resid;
+ off_t resid_off;
+ int error;
+ boolean_t funnel_state;
+ struct vattr vattr;
+ struct vattr data_vattr;
+
+ off_t profile;
+ unsigned int profile_size;
+
+ vm_offset_t names_buf;
+ struct vnode *names_vp;
+ struct vnode *data_vp;
+ struct vnode *vp1;
+ struct vnode *vp2;
+
+ struct profile_names_header *profile_header;
+ off_t name_offset;
+
+ struct global_profile *uid_files;
+
+
+ funnel_state = thread_funnel_set(kernel_flock, TRUE);
+
+
+
+ error = bsd_open_page_cache_files(user, &uid_files);
+ if(error) {
+ thread_funnel_set(kernel_flock, funnel_state);
+ return EINVAL;
+ }
+
+ p = current_proc();
+
+ names_vp = uid_files->names_vp;
+ data_vp = uid_files->data_vp;
+ names_buf = uid_files->buf_ptr;
+
+ /*
+ * Get locks on both files, get the vnode with the lowest address first
+ */
+
+ if((unsigned int)names_vp < (unsigned int)data_vp) {
+ vp1 = names_vp;
+ vp2 = data_vp;
+ } else {
+ vp1 = data_vp;
+ vp2 = names_vp;
+ }
+
+ error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
+ if(error) {
+ printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+ error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
+ if(error) {
+ printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
+ VOP_UNLOCK(vp1, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+
+ /* Stat data file for size */
+
+ if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return error;
+ }
+
+ if (bsd_search_page_cache_data_base(names_vp,
+ (struct profile_names_header *)names_buf,
+ file_name, (unsigned int) mod,
+ fid, &profile, &profile_size) == 0) {
+ /* profile is an offset in the profile data base */
+ /* It is zero if no profile data was found */
+
+ if(profile_size == 0) {
+ unsigned int header_size;
+ vm_offset_t buf_ptr;
+
+ /* Our Write case */
+
+ /* read header for last entry */
+ profile_header =
+ (struct profile_names_header *)names_buf;
+ name_offset = sizeof(struct profile_names_header) +
+ (sizeof(struct profile_element)
+ * profile_header->number_of_profiles);
+ profile_header->number_of_profiles += 1;
+
+ if(name_offset < PAGE_SIZE * 4) {
+ struct profile_element *name;
+ /* write new entry */
+ name = (struct profile_element *)
+ (names_buf + (vm_offset_t)name_offset);
+ name->addr = data_vattr.va_size;
+ name->size = size;
+ name->mod_date = mod;
+ name->inode = fid;
+ strncpy (name->name, file_name, 12);
+ } else {
+ unsigned int ele_size;
+ struct profile_element name;
+ /* write new entry */
+ name.addr = data_vattr.va_size;
+ name.size = size;
+ name.mod_date = mod;
+ name.inode = fid;
+ strncpy (name.name, file_name, 12);
+ /* write element out separately */
+ ele_size = sizeof(struct profile_element);
+ buf_ptr = (vm_offset_t)&name;
+ resid_off = name_offset;
+
+ while(ele_size) {
+ error = vn_rdwr(UIO_WRITE, names_vp,
+ (caddr_t)buf_ptr,
+ ele_size, resid_off,
+ UIO_SYSSPACE, IO_NODELOCKED,
+ p->p_ucred, &resid, p);
+ if(error) {
+ printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(
+ uid_files);
+ thread_funnel_set(
+ kernel_flock,
+ funnel_state);
+ return error;
+ }
+ buf_ptr += (vm_offset_t)
+ ele_size-resid;
+ resid_off += ele_size-resid;
+ ele_size = resid;
+ }
+ }
+
+ if(name_offset < PAGE_SIZE * 4) {
+ header_size = name_offset +
+ sizeof(struct profile_element);
+
+ } else {
+ header_size =
+ sizeof(struct profile_names_header);
+ }
+ buf_ptr = (vm_offset_t)profile_header;
+ resid_off = 0;
+
+ /* write names file header */
+ while(header_size) {
+ error = vn_rdwr(UIO_WRITE, names_vp,
+ (caddr_t)buf_ptr,
+ header_size, resid_off,
+ UIO_SYSSPACE, IO_NODELOCKED,
+ p->p_ucred, &resid, p);
+ if(error) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ printf("bsd_write_page_cache_file: Can't write header %x\n", user);
+ bsd_close_page_cache_files(
+ uid_files);
+ thread_funnel_set(
+ kernel_flock, funnel_state);
+ return error;
+ }
+ buf_ptr += (vm_offset_t)header_size-resid;
+ resid_off += header_size-resid;
+ header_size = resid;
+ }
+ /* write profile to data file */
+ resid_off = data_vattr.va_size;
+ while(size) {
+ error = vn_rdwr(UIO_WRITE, data_vp,
+ (caddr_t)buffer, size, resid_off,
+ UIO_SYSSPACE, IO_NODELOCKED,
+ p->p_ucred, &resid, p);
+ if(error) {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ printf("bsd_write_page_cache_file: Can't write header %x\n", user);
+ bsd_close_page_cache_files(
+ uid_files);
+ thread_funnel_set(
+ kernel_flock, funnel_state);
+ return error;
+ }
+ buffer += size-resid;
+ resid_off += size-resid;
+ size = resid;
+ }
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return 0;
+ }
+ /* Someone else wrote a twin profile before us */
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return 0;
+ } else {
+ VOP_UNLOCK(names_vp, 0, p);
+ VOP_UNLOCK(data_vp, 0, p);
+ bsd_close_page_cache_files(uid_files);
+ thread_funnel_set(kernel_flock, funnel_state);
+ return EINVAL;
+ }
+
+}
+
+int
+prepare_profile_database(int user)
+{
+ char *cache_path = "/var/vm/app_profile/";
+ struct proc *p;
+ int error;
+ int resid;
+ off_t resid_off;
+ unsigned int lru;
+ vm_size_t size;
+
+ struct vnode *names_vp;
+ struct vnode *data_vp;
+ vm_offset_t names_buf;
+ vm_offset_t buf_ptr;
+
+ int profile_names_length;
+ int profile_data_length;
+ char *profile_data_string;
+ char *profile_names_string;
+ char *substring;
+
+ struct vattr vattr;
+
+ struct profile_names_header *profile_header;
+ kern_return_t ret;
+
+ struct nameidata nd_names;
+ struct nameidata nd_data;
+
+ int i;
+
+ p = current_proc();
+
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *)&profile_data_string, PATH_MAX);
+
+ if(ret) {
+ return ENOMEM;
+ }
+
+ /* Split the buffer in half since we know the size of */
+ /* our file path and our allocation is adequate for */
+ /* both file path names */
+ profile_names_string = profile_data_string + (PATH_MAX/2);
+
+
+ strcpy(profile_data_string, cache_path);
+ strcpy(profile_names_string, cache_path);
+ profile_names_length = profile_data_length
+ = strlen(profile_data_string);
+ substring = profile_data_string + profile_data_length;
+ sprintf(substring, "%x_data", user);
+ substring = profile_names_string + profile_names_length;
+ sprintf(substring, "%x_names", user);
+
+ /* We now have the absolute file names */
+
+ ret = kmem_alloc(kernel_map,
+ (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
+ if(ret) {
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ return ENOMEM;
+ }
+
+ NDINIT(&nd_names, LOOKUP, FOLLOW,
+ UIO_SYSSPACE, profile_names_string, p);
+ NDINIT(&nd_data, LOOKUP, FOLLOW,
+ UIO_SYSSPACE, profile_data_string, p);
+
+ if (error = vn_open(&nd_data,
+ O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ return 0;
+ }
+
+ data_vp = nd_data.ni_vp;
+ VOP_UNLOCK(data_vp, 0, p);
+
+ if (error = vn_open(&nd_names,
+ O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
+ printf("prepare_profile_database: Can't create CacheNames %s\n",
+ profile_data_string);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ vrele(data_vp);
+ return error;
+ }
+
+ names_vp = nd_names.ni_vp;
+
+
+ /* Write Header for new names file */
+
+ profile_header = (struct profile_names_header *)names_buf;
+
+ profile_header->number_of_profiles = 0;
+ profile_header->user_id = user;
+ profile_header->version = 1;
+ profile_header->element_array =
+ sizeof(struct profile_names_header);
+ profile_header->spare1 = 0;
+ profile_header->spare2 = 0;
+ profile_header->spare3 = 0;
+
+ size = sizeof(struct profile_names_header);
+ buf_ptr = (vm_offset_t)profile_header;
+ resid_off = 0;
+
+ while(size) {
+ error = vn_rdwr(UIO_WRITE, names_vp,
+ (caddr_t)buf_ptr, size, resid_off,
+ UIO_SYSSPACE, IO_NODELOCKED,
+ p->p_ucred, &resid, p);
+ if(error) {
+ printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string,
+ PATH_MAX);
+ vput(names_vp);
+ vrele(data_vp);
+ return error;
+ }
+ buf_ptr += size-resid;
+ resid_off += size-resid;
+ size = resid;
+ }
+
+ VATTR_NULL(&vattr);
+ vattr.va_uid = user;
+ error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
+ if(error) {
+ printf("prepare_profile_database: "
+ "Can't set user %s\n", profile_names_string);
+ }
+ vput(names_vp);
+
+ error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
+ if(error) {
+ vrele(data_vp);
+ printf("prepare_profile_database: cannot lock data file %s\n",
+ profile_data_string);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ }
+ VATTR_NULL(&vattr);
+ vattr.va_uid = user;
+ error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
+ if(error) {
+ printf("prepare_profile_database: "
+ "Can't set user %s\n", profile_data_string);
+ }
+
+ vput(data_vp);
+ kmem_free(kernel_map,
+ (vm_offset_t)profile_data_string, PATH_MAX);
+ kmem_free(kernel_map,
+ (vm_offset_t)names_buf, 4 * PAGE_SIZE);
+ return 0;
+
+}