X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/bsd/vm/vm_unix.c?ds=sidebyside diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 41d4714bc..a322679ae 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -28,6 +28,8 @@ /* */ + + #include #include @@ -52,14 +54,22 @@ #include #include #include +#include + +#include +#include #include -#include #include #include #include + #include +#include + + +extern zone_t lsf_zone; useracc(addr, len, prot) caddr_t addr; @@ -68,7 +78,7 @@ useracc(addr, len, prot) { return (vm_map_check_protection( current_map(), - trunc_page(addr), round_page(addr+len), + trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); } @@ -76,9 +86,22 @@ vslock(addr, len) caddr_t addr; int len; { - vm_map_wire(current_map(), trunc_page(addr), - round_page(addr+len), +kern_return_t kret; + kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr), + round_page_32((unsigned int)(addr+len)), VM_PROT_READ | VM_PROT_WRITE ,FALSE); + + switch (kret) { + case KERN_SUCCESS: + return (0); + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } } vsunlock(addr, len, dirtied) @@ -91,11 +114,12 @@ vsunlock(addr, len, dirtied) vm_page_t pg; #endif /* FIXME ] */ vm_offset_t vaddr, paddr; + kern_return_t kret; #if FIXME /* [ */ if (dirtied) { pmap = get_task_pmap(current_task()); - for (vaddr = trunc_page(addr); vaddr < round_page(addr+len); + for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len)); vaddr += PAGE_SIZE) { paddr = pmap_extract(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); @@ -106,8 +130,19 @@ vsunlock(addr, len, dirtied) #ifdef lint dirtied++; #endif /* lint */ - vm_map_unwire(current_map(), trunc_page(addr), - round_page(addr+len), FALSE); + kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)), + round_page_32((unsigned int)(addr+len)), FALSE); + switch (kret) { + case KERN_SUCCESS: + return (0); + case KERN_INVALID_ADDRESS: + case KERN_NO_SPACE: + return (ENOMEM); + case KERN_PROTECTION_FAILURE: + return (EACCES); + default: + return (EINVAL); + } } #if defined(sun) || BALANCE || defined(m88k) @@ -195,39 +230,6 @@ swapon() return(EOPNOTSUPP); } -thread_t -procdup( - struct proc *child, - struct proc *parent) -{ - thread_t thread; - task_t task; - kern_return_t result; - - if (parent->task == kernel_task) - result = task_create_local(TASK_NULL, FALSE, FALSE, &task); - else - result = task_create_local(parent->task, TRUE, FALSE, &task); - if (result != KERN_SUCCESS) - printf("fork/procdup: task_create failed. Code: 0x%x\n", result); - child->task = task; - /* task->proc = child; */ - set_bsdtask_info(task, child); - result = thread_create(task, &thread); - if (result != KERN_SUCCESS) - printf("fork/procdup: thread_create failed. Code: 0x%x\n", result); - -#if FIXME /* [ */ - thread_deallocate(thread); // extra ref - - /* - * Don't need to lock thread here because it can't - * possibly execute and no one else knows about it. - */ - /* compute_priority(thread, FALSE); */ -#endif /* ] */ - return(thread); -} kern_return_t pid_for_task(t, x) @@ -238,14 +240,18 @@ pid_for_task(t, x) task_t t1; extern task_t port_name_to_task(mach_port_t t); int pid = -1; - kern_return_t err; + kern_return_t err = KERN_SUCCESS; boolean_t funnel_state; + AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK); + AUDIT_ARG(mach_port1, t); + funnel_state = thread_funnel_set(kernel_flock, TRUE); t1 = port_name_to_task(t); if (t1 == TASK_NULL) { err = KERN_FAILURE; + goto pftout; } else { p = get_bsdtask_info(t1); if (p) { @@ -256,9 +262,11 @@ pid_for_task(t, x) } } task_deallocate(t1); - (void) copyout((char *) &pid, (char *) x, sizeof(*x)); pftout: + AUDIT_ARG(pid, pid); + (void) copyout((char *) &pid, (char *) x, sizeof(*x)); thread_funnel_set(kernel_flock, funnel_state); + AUDIT_MACH_SYSCALL_EXIT(err); return(err); } @@ -286,21 +294,28 @@ task_for_pid(target_tport, pid, t) int error = 0; boolean_t funnel_state; + AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID); + AUDIT_ARG(pid, pid); + AUDIT_ARG(mach_port1, target_tport); + t1 = port_name_to_task(target_tport); if (t1 == TASK_NULL) { (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t)); - error = KERN_FAILURE; - goto tfpout; + AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); + return(KERN_FAILURE); } funnel_state = thread_funnel_set(kernel_flock, TRUE); restart: p1 = get_bsdtask_info(t1); + p = pfind(pid); + AUDIT_ARG(process, p); if ( - ((p = pfind(pid)) != (struct proc *) 0) + (p != (struct proc *) 0) && (p1 != (struct proc *) 0) - && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) + && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) && + ((p->p_cred->p_ruid == p1->p_cred->p_ruid))) || !(suser(p1->p_ucred, &p1->p_acflag))) && (p->p_stat != SZOMB) ) { @@ -309,10 +324,13 @@ task_for_pid(target_tport, pid, t) mutex_pause(); /* temp loss of funnel */ goto restart; } - sright = convert_task_to_port(p->task); - tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); + sright = (void *)convert_task_to_port(p->task); + tret = (void *) + ipc_port_copyout_send(sright, + get_task_ipcspace(current_task())); } else tret = MACH_PORT_NULL; + AUDIT_ARG(mach_port2, tret); (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t)); task_deallocate(t1); error = KERN_SUCCESS; @@ -324,6 +342,7 @@ task_for_pid(target_tport, pid, t) error = KERN_FAILURE; tfpout: thread_funnel_set(kernel_flock, funnel_state); + AUDIT_MACH_SYSCALL_EXIT(error); return(error); } @@ -338,6 +357,7 @@ struct load_shared_file_args { int *flags; }; +int ws_disabled = 1; int load_shared_file( @@ -359,13 +379,13 @@ load_shared_file( kern_return_t kr; struct vattr vattr; - void *object; - void *file_object; + memory_object_control_t file_control; sf_mapping_t *map_list; caddr_t local_base; int local_flags; int caller_flags; int i; + int default_regions = 0; vm_size_t dummy; kern_return_t kret; @@ -375,8 +395,7 @@ load_shared_file( ndp = &nd; - unix_master(); - + AUDIT_ARG(addr, base_address); /* Retrieve the base address */ if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { goto lsf_bailout; @@ -384,6 +403,39 @@ load_shared_file( if (error = copyin(flags, &local_flags, sizeof (int))) { goto lsf_bailout; } + + if(local_flags & QUERY_IS_SYSTEM_REGION) { + shared_region_mapping_t default_shared_region; + vm_get_shared_region(current_task(), &shared_region); + task_mapping_info.self = (vm_offset_t)shared_region; + + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if (shared_region == default_shared_region) { + local_flags = SYSTEM_REGION_BACKED; + } else { + local_flags = 0; + } + shared_region_mapping_dealloc(default_shared_region); + error = 0; + error = copyout(&local_flags, flags, sizeof (int)); + goto lsf_bailout; + } caller_flags = local_flags; kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str, (vm_size_t)(MAXPATHLEN)); @@ -413,7 +465,7 @@ load_shared_file( /* * Get a vnode for the target file */ - NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE, filename_str, p); if ((error = namei(ndp))) { @@ -434,8 +486,8 @@ load_shared_file( } - file_object = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT)); - if (file_object == (void *)NULL) { + file_control = ubc_getobject(vp, UBC_HOLDOBJECT); + if (file_control == MEMORY_OBJECT_CONTROL_NULL) { error = EINVAL; goto lsf_bailout_free_vput; } @@ -446,6 +498,18 @@ load_shared_file( goto lsf_bailout_free_vput; } #endif + if(p->p_flag & P_NOSHLIB) { + p->p_flag = p->p_flag & ~P_NOSHLIB; + } + + /* load alternate regions if the caller has requested. */ + /* Note: the new regions are "clean slates" */ + if (local_flags & NEW_LOCAL_SHARED_REGIONS) { + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); + if (error) { + goto lsf_bailout_free_vput; + } + } vm_get_shared_region(current_task(), &shared_region); task_mapping_info.self = (vm_offset_t)shared_region; @@ -459,8 +523,54 @@ load_shared_file( &(task_mapping_info.client_base), &(task_mapping_info.alternate_base), &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), &(task_mapping_info.flags), &next); + { + shared_region_mapping_t default_shared_region; + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if(shared_region == default_shared_region) { + default_regions = 1; + } + shared_region_mapping_dealloc(default_shared_region); + } + /* If we are running on a removable file system we must not */ + /* be in a set of shared regions or the file system will not */ + /* be removable. */ + if(((vp->v_mount != rootvnode->v_mount) && (default_regions)) + && (lsf_mapping_pool_gauge() < 75)) { + /* We don't want to run out of shared memory */ + /* map entries by starting too many private versions */ + /* of the shared library structures */ + int error; + if(p->p_flag & P_NOSHLIB) { + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); + } else { + error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT); + } + if (error) { + goto lsf_bailout_free_vput; + } + local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS; + vm_get_shared_region(current_task(), &shared_region); + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + } + /* This is a work-around to allow executables which have been */ /* built without knowledge of the proper shared segment to */ /* load. This code has been architected as a shared region */ @@ -487,71 +597,11 @@ load_shared_file( } } - /* load alternate regions if the caller has requested. */ - /* Note: the new regions are "clean slates" */ - - if (local_flags & NEW_LOCAL_SHARED_REGIONS) { - - shared_region_mapping_t new_shared_region; - shared_region_mapping_t old_shared_region; - struct shared_region_task_mappings old_info; - struct shared_region_task_mappings new_info; - - if(shared_file_create_system_region(&new_shared_region)) { - error = ENOMEM; - goto lsf_bailout_free_vput; - } - vm_get_shared_region(current_task(), &old_shared_region); - - old_info.self = (vm_offset_t)old_shared_region; - shared_region_mapping_info(old_shared_region, - &(old_info.text_region), - &(old_info.text_size), - &(old_info.data_region), - &(old_info.data_size), - &(old_info.region_mappings), - &(old_info.client_base), - &(old_info.alternate_base), - &(old_info.alternate_next), - &(old_info.flags), &next); - new_info.self = (vm_offset_t)new_shared_region; - shared_region_mapping_info(new_shared_region, - &(new_info.text_region), - &(new_info.text_size), - &(new_info.data_region), - &(new_info.data_size), - &(new_info.region_mappings), - &(new_info.client_base), - &(new_info.alternate_base), - &(new_info.alternate_next), - &(new_info.flags), &next); - if (vm_map_region_replace(current_map(), old_info.text_region, - new_info.text_region, old_info.client_base, - old_info.client_base+old_info.text_size)) { - panic("load_shared_file: shared region mis-alignment"); - shared_region_mapping_dealloc(new_shared_region); - error = EINVAL; - goto lsf_bailout_free_vput; - } - if(vm_map_region_replace(current_map(), old_info.data_region, - new_info.data_region, - old_info.client_base + old_info.text_size, - old_info.client_base - + old_info.text_size + old_info.data_size)) { - panic("load_shared_file: shared region mis-alignment 1"); - shared_region_mapping_dealloc(new_shared_region); - error = EINVAL; - goto lsf_bailout_free_vput; - } - vm_set_shared_region(current_task(), new_shared_region); - task_mapping_info = new_info; - shared_region_mapping_dealloc(old_shared_region); - } if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr, mapped_file_size, (vm_offset_t *)&local_base, - map_cnt, map_list, file_object, + map_cnt, map_list, file_control, &task_mapping_info, &local_flags))) { switch (kr) { case KERN_FAILURE: @@ -575,7 +625,7 @@ load_shared_file( error = EINVAL; }; if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) { - printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_object 0x%x\n", error, local_base, map_cnt, file_object); + printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control); for(i=0; ibusy) { + /* + * drop funnel and wait + */ + (void)tsleep((void *) + *profile, + PRIBIO, "app_profile", 0); + goto restart; + } + (*profile)->busy = 1; + (*profile)->age = global_user_profile_cache.age; + global_user_profile_cache.age+=1; + return 0; + } + } + + lru = global_user_profile_cache.age; + *profile = NULL; + for(i = 0; iage = global_user_profile_cache.age; + break; + } + /* Otherwise grab the oldest entry */ + if(global_user_profile_cache.profiles[i].age < lru) { + lru = global_user_profile_cache.profiles[i].age; + *profile = &global_user_profile_cache.profiles[i]; + } + } + + /* Did we set it? */ + if (*profile == NULL) { + /* + * No entries are available; this can only happen if all + * of them are currently in the process of being reused; + * if this happens, we sleep on the address of the first + * element, and restart. This is less than ideal, but we + * know it will work because we know that there will be a + * wakeup on any entry currently in the process of being + * reused. + * + * XXX Reccomend a two handed clock and more than 3 total + * XXX cache entries at some point in the future. + */ + /* + * drop funnel and wait + */ + (void)tsleep((void *) + &global_user_profile_cache.profiles[0], + PRIBIO, "app_profile", 0); + goto restart; + } + + /* + * If it's currently busy, we've picked the one at the end of the + * LRU list, but it's currently being actively used. We sleep on + * its address and restart. + */ + if ((*profile)->busy) { + /* + * drop funnel and wait + */ + (void)tsleep((void *) + *profile, + PRIBIO, "app_profile", 0); + goto restart; + } + (*profile)->busy = 1; + (*profile)->user = user; + + /* + * put dummy value in for now to get competing request to wait + * above until we are finished + * + * Save the data_vp before setting it, so we can set it before + * we kmem_free() or vrele(). If we don't do this, then we + * have a potential funnel race condition we have to deal with. + */ + data_vp = (*profile)->data_vp; + (*profile)->data_vp = (struct vnode *)0xFFFFFFFF; + + /* + * Age the cache here in all cases; this guarantees that we won't + * be reusing only one entry over and over, once the system reaches + * steady-state. + */ + global_user_profile_cache.age+=1; + + if(data_vp != NULL) { + kmem_free(kernel_map, + (*profile)->buf_ptr, 4 * PAGE_SIZE); + if ((*profile)->names_vp) { + vrele((*profile)->names_vp); + (*profile)->names_vp = NULL; + } + vrele(data_vp); + } + + /* Try to open the appropriate users profile files */ + /* If neither file is present, try to create them */ + /* If one file is present and the other not, fail. */ + /* If the files do exist, check them for the app_file */ + /* requested and read it in if present */ + + ret = kmem_alloc(kernel_map, + (vm_offset_t *)&profile_data_string, PATH_MAX); + + if(ret) { + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return ENOMEM; + } + + /* Split the buffer in half since we know the size of */ + /* our file path and our allocation is adequate for */ + /* both file path names */ + profile_names_string = profile_data_string + (PATH_MAX/2); + + + strcpy(profile_data_string, cache_path); + strcpy(profile_names_string, cache_path); + profile_names_length = profile_data_length + = strlen(profile_data_string); + substring = profile_data_string + profile_data_length; + sprintf(substring, "%x_data", user); + substring = profile_names_string + profile_names_length; + sprintf(substring, "%x_names", user); + + /* We now have the absolute file names */ + + ret = kmem_alloc(kernel_map, + (vm_offset_t *)&names_buf, 4 * PAGE_SIZE); + if(ret) { + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return ENOMEM; + } + + NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF, + UIO_SYSSPACE, profile_names_string, p); + NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF, + UIO_SYSSPACE, profile_data_string, p); + if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) { +#ifdef notdef + printf("bsd_open_page_cache_files: CacheData file not found %s\n", + profile_data_string); +#endif + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return error; + } + + data_vp = nd_data.ni_vp; + VOP_UNLOCK(data_vp, 0, p); + + if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) { + printf("bsd_open_page_cache_files: NamesData file not found %s\n", + profile_data_string); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + vrele(data_vp); + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return error; + } + names_vp = nd_names.ni_vp; + + if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) { + printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + vput(names_vp); + vrele(data_vp); + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return error; + } + + size = vattr.va_size; + if(size > 4 * PAGE_SIZE) + size = 4 * PAGE_SIZE; + buf_ptr = names_buf; + resid_off = 0; + + while(size) { + error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr, + size, resid_off, + UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); + if((error) || (size == resid)) { + if(!error) { + error = EINVAL; + } + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + vput(names_vp); + vrele(data_vp); + (*profile)->data_vp = NULL; + (*profile)->busy = 0; + wakeup(*profile); + return error; + } + buf_ptr += size-resid; + resid_off += size-resid; + size = resid; + } + + VOP_UNLOCK(names_vp, 0, p); + kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX); + (*profile)->names_vp = names_vp; + (*profile)->data_vp = data_vp; + (*profile)->buf_ptr = names_buf; + return 0; + +} + +void +bsd_close_page_cache_files( + struct global_profile *profile) +{ + profile->busy = 0; + wakeup(profile); +} + +int +bsd_read_page_cache_file( + unsigned int user, + int *fid, + int *mod, + char *app_name, + struct vnode *app_vp, + vm_offset_t *buffer, + vm_offset_t *buf_size) +{ + + boolean_t funnel_state; + + struct proc *p; + int error; + int resid; + vm_size_t size; + + off_t profile; + unsigned int profile_size; + + vm_offset_t names_buf; + struct vattr vattr; + + kern_return_t ret; + + struct vnode *names_vp; + struct vnode *data_vp; + struct vnode *vp1; + struct vnode *vp2; + + struct global_profile *uid_files; + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + /* Try to open the appropriate users profile files */ + /* If neither file is present, try to create them */ + /* If one file is present and the other not, fail. */ + /* If the files do exist, check them for the app_file */ + /* requested and read it in if present */ + + + error = bsd_open_page_cache_files(user, &uid_files); + if(error) { + thread_funnel_set(kernel_flock, funnel_state); + return EINVAL; + } + + p = current_proc(); + + names_vp = uid_files->names_vp; + data_vp = uid_files->data_vp; + names_buf = uid_files->buf_ptr; + + + /* + * Get locks on both files, get the vnode with the lowest address first + */ + + if((unsigned int)names_vp < (unsigned int)data_vp) { + vp1 = names_vp; + vp2 = data_vp; + } else { + vp1 = data_vp; + vp2 = names_vp; + } + error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p); + if(error) { + printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p); + if(error) { + printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user); + VOP_UNLOCK(vp1, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + + if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + printf("bsd_read_cache_file: Can't stat app file %s\n", app_name); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + + *fid = vattr.va_fileid; + *mod = vattr.va_mtime.tv_sec; + + + if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name, + (unsigned int) vattr.va_mtime.tv_sec, + vattr.va_fileid, &profile, &profile_size) == 0) { + /* profile is an offset in the profile data base */ + /* It is zero if no profile data was found */ + + if(profile_size == 0) { + *buffer = NULL; + *buf_size = 0; + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return 0; + } + ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size)); + if(ret) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return ENOMEM; + } + *buf_size = profile_size; + while(profile_size) { + error = vn_rdwr(UIO_READ, data_vp, + (caddr_t) *buffer, profile_size, + profile, UIO_SYSSPACE, IO_NODELOCKED, + p->p_ucred, &resid, p); + if((error) || (profile_size == resid)) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size); + thread_funnel_set(kernel_flock, funnel_state); + return EINVAL; + } + profile += profile_size - resid; + profile_size = resid; + } + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return 0; + } else { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return EINVAL; + } + +} + +int +bsd_search_page_cache_data_base( + struct vnode *vp, + struct profile_names_header *database, + char *app_name, + unsigned int mod_date, + unsigned int inode, + off_t *profile, + unsigned int *profile_size) +{ + + struct proc *p; + + unsigned int i; + struct profile_element *element; + unsigned int ele_total; + unsigned int extended_list = 0; + off_t file_off = 0; + unsigned int size; + off_t resid_off; + int resid; + vm_offset_t local_buf = NULL; + + int error; + kern_return_t ret; + + p = current_proc(); + + if(((vm_offset_t)database->element_array) != + sizeof(struct profile_names_header)) { + return EINVAL; + } + element = (struct profile_element *)( + (vm_offset_t)database->element_array + + (vm_offset_t)database); + + ele_total = database->number_of_profiles; + + *profile = 0; + *profile_size = 0; + while(ele_total) { + /* note: code assumes header + n*ele comes out on a page boundary */ + if(((local_buf == 0) && (sizeof(struct profile_names_header) + + (ele_total * sizeof(struct profile_element))) + > (PAGE_SIZE * 4)) || + ((local_buf != 0) && + (ele_total * sizeof(struct profile_element)) + > (PAGE_SIZE * 4))) { + extended_list = ele_total; + if(element == (struct profile_element *) + ((vm_offset_t)database->element_array + + (vm_offset_t)database)) { + ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1; + } else { + ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element); + } + extended_list -= ele_total; + } + for (i=0; i + (PAGE_SIZE * 4)) { + size = PAGE_SIZE * 4; + } else { + size = ele_total * sizeof(struct profile_element); + } + resid_off = 0; + while(size) { + error = vn_rdwr(UIO_READ, vp, + CAST_DOWN(caddr_t, (local_buf + resid_off)), + size, file_off + resid_off, UIO_SYSSPACE, + IO_NODELOCKED, p->p_ucred, &resid, p); + if((error) || (size == resid)) { + if(local_buf != NULL) { + kmem_free(kernel_map, + (vm_offset_t)local_buf, + 4 * PAGE_SIZE); + } + return EINVAL; + } + resid_off += size-resid; + size = resid; + } + } + if(local_buf != NULL) { + kmem_free(kernel_map, + (vm_offset_t)local_buf, 4 * PAGE_SIZE); + } + return 0; +} + +int +bsd_write_page_cache_file( + unsigned int user, + char *file_name, + caddr_t buffer, + vm_size_t size, + int mod, + int fid) +{ + struct proc *p; + struct nameidata nd; + struct vnode *vp = 0; + int resid; + off_t resid_off; + int error; + boolean_t funnel_state; + struct vattr vattr; + struct vattr data_vattr; + + off_t profile; + unsigned int profile_size; + + vm_offset_t names_buf; + struct vnode *names_vp; + struct vnode *data_vp; + struct vnode *vp1; + struct vnode *vp2; + + struct profile_names_header *profile_header; + off_t name_offset; + + struct global_profile *uid_files; + + + funnel_state = thread_funnel_set(kernel_flock, TRUE); + + + + error = bsd_open_page_cache_files(user, &uid_files); + if(error) { + thread_funnel_set(kernel_flock, funnel_state); + return EINVAL; + } + + p = current_proc(); + + names_vp = uid_files->names_vp; + data_vp = uid_files->data_vp; + names_buf = uid_files->buf_ptr; + + /* + * Get locks on both files, get the vnode with the lowest address first + */ + + if((unsigned int)names_vp < (unsigned int)data_vp) { + vp1 = names_vp; + vp2 = data_vp; + } else { + vp1 = data_vp; + vp2 = names_vp; + } + + error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p); + if(error) { + printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p); + if(error) { + printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user); + VOP_UNLOCK(vp1, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + + /* Stat data file for size */ + + if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return error; + } + + if (bsd_search_page_cache_data_base(names_vp, + (struct profile_names_header *)names_buf, + file_name, (unsigned int) mod, + fid, &profile, &profile_size) == 0) { + /* profile is an offset in the profile data base */ + /* It is zero if no profile data was found */ + + if(profile_size == 0) { + unsigned int header_size; + vm_offset_t buf_ptr; + + /* Our Write case */ + + /* read header for last entry */ + profile_header = + (struct profile_names_header *)names_buf; + name_offset = sizeof(struct profile_names_header) + + (sizeof(struct profile_element) + * profile_header->number_of_profiles); + profile_header->number_of_profiles += 1; + + if(name_offset < PAGE_SIZE * 4) { + struct profile_element *name; + /* write new entry */ + name = (struct profile_element *) + (names_buf + (vm_offset_t)name_offset); + name->addr = data_vattr.va_size; + name->size = size; + name->mod_date = mod; + name->inode = fid; + strncpy (name->name, file_name, 12); + } else { + unsigned int ele_size; + struct profile_element name; + /* write new entry */ + name.addr = data_vattr.va_size; + name.size = size; + name.mod_date = mod; + name.inode = fid; + strncpy (name.name, file_name, 12); + /* write element out separately */ + ele_size = sizeof(struct profile_element); + buf_ptr = (vm_offset_t)&name; + resid_off = name_offset; + + while(ele_size) { + error = vn_rdwr(UIO_WRITE, names_vp, + (caddr_t)buf_ptr, + ele_size, resid_off, + UIO_SYSSPACE, IO_NODELOCKED, + p->p_ucred, &resid, p); + if(error) { + printf("bsd_write_page_cache_file: Can't write name_element %x\n", user); + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files( + uid_files); + thread_funnel_set( + kernel_flock, + funnel_state); + return error; + } + buf_ptr += (vm_offset_t) + ele_size-resid; + resid_off += ele_size-resid; + ele_size = resid; + } + } + + if(name_offset < PAGE_SIZE * 4) { + header_size = name_offset + + sizeof(struct profile_element); + + } else { + header_size = + sizeof(struct profile_names_header); + } + buf_ptr = (vm_offset_t)profile_header; + resid_off = 0; + + /* write names file header */ + while(header_size) { + error = vn_rdwr(UIO_WRITE, names_vp, + (caddr_t)buf_ptr, + header_size, resid_off, + UIO_SYSSPACE, IO_NODELOCKED, + p->p_ucred, &resid, p); + if(error) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + printf("bsd_write_page_cache_file: Can't write header %x\n", user); + bsd_close_page_cache_files( + uid_files); + thread_funnel_set( + kernel_flock, funnel_state); + return error; + } + buf_ptr += (vm_offset_t)header_size-resid; + resid_off += header_size-resid; + header_size = resid; + } + /* write profile to data file */ + resid_off = data_vattr.va_size; + while(size) { + error = vn_rdwr(UIO_WRITE, data_vp, + (caddr_t)buffer, size, resid_off, + UIO_SYSSPACE, IO_NODELOCKED, + p->p_ucred, &resid, p); + if(error) { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + printf("bsd_write_page_cache_file: Can't write header %x\n", user); + bsd_close_page_cache_files( + uid_files); + thread_funnel_set( + kernel_flock, funnel_state); + return error; + } + buffer += size-resid; + resid_off += size-resid; + size = resid; + } + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return 0; + } + /* Someone else wrote a twin profile before us */ + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return 0; + } else { + VOP_UNLOCK(names_vp, 0, p); + VOP_UNLOCK(data_vp, 0, p); + bsd_close_page_cache_files(uid_files); + thread_funnel_set(kernel_flock, funnel_state); + return EINVAL; + } + +} + +int +prepare_profile_database(int user) +{ + char *cache_path = "/var/vm/app_profile/"; + struct proc *p; + int error; + int resid; + off_t resid_off; + unsigned int lru; + vm_size_t size; + + struct vnode *names_vp; + struct vnode *data_vp; + vm_offset_t names_buf; + vm_offset_t buf_ptr; + + int profile_names_length; + int profile_data_length; + char *profile_data_string; + char *profile_names_string; + char *substring; + + struct vattr vattr; + + struct profile_names_header *profile_header; + kern_return_t ret; + + struct nameidata nd_names; + struct nameidata nd_data; + + int i; + + p = current_proc(); + + ret = kmem_alloc(kernel_map, + (vm_offset_t *)&profile_data_string, PATH_MAX); + + if(ret) { + return ENOMEM; + } + + /* Split the buffer in half since we know the size of */ + /* our file path and our allocation is adequate for */ + /* both file path names */ + profile_names_string = profile_data_string + (PATH_MAX/2); + + + strcpy(profile_data_string, cache_path); + strcpy(profile_names_string, cache_path); + profile_names_length = profile_data_length + = strlen(profile_data_string); + substring = profile_data_string + profile_data_length; + sprintf(substring, "%x_data", user); + substring = profile_names_string + profile_names_length; + sprintf(substring, "%x_names", user); + + /* We now have the absolute file names */ + + ret = kmem_alloc(kernel_map, + (vm_offset_t *)&names_buf, 4 * PAGE_SIZE); + if(ret) { + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + return ENOMEM; + } + + NDINIT(&nd_names, LOOKUP, FOLLOW, + UIO_SYSSPACE, profile_names_string, p); + NDINIT(&nd_data, LOOKUP, FOLLOW, + UIO_SYSSPACE, profile_data_string, p); + + if (error = vn_open(&nd_data, + O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) { + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + return 0; + } + + data_vp = nd_data.ni_vp; + VOP_UNLOCK(data_vp, 0, p); + + if (error = vn_open(&nd_names, + O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) { + printf("prepare_profile_database: Can't create CacheNames %s\n", + profile_data_string); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + vrele(data_vp); + return error; + } + + names_vp = nd_names.ni_vp; + + + /* Write Header for new names file */ + + profile_header = (struct profile_names_header *)names_buf; + + profile_header->number_of_profiles = 0; + profile_header->user_id = user; + profile_header->version = 1; + profile_header->element_array = + sizeof(struct profile_names_header); + profile_header->spare1 = 0; + profile_header->spare2 = 0; + profile_header->spare3 = 0; + + size = sizeof(struct profile_names_header); + buf_ptr = (vm_offset_t)profile_header; + resid_off = 0; + + while(size) { + error = vn_rdwr(UIO_WRITE, names_vp, + (caddr_t)buf_ptr, size, resid_off, + UIO_SYSSPACE, IO_NODELOCKED, + p->p_ucred, &resid, p); + if(error) { + printf("prepare_profile_database: Can't write header %s\n", profile_names_string); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, + PATH_MAX); + vput(names_vp); + vrele(data_vp); + return error; + } + buf_ptr += size-resid; + resid_off += size-resid; + size = resid; + } + + VATTR_NULL(&vattr); + vattr.va_uid = user; + error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p); + if(error) { + printf("prepare_profile_database: " + "Can't set user %s\n", profile_names_string); + } + vput(names_vp); + + error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p); + if(error) { + vrele(data_vp); + printf("prepare_profile_database: cannot lock data file %s\n", + profile_data_string); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + } + VATTR_NULL(&vattr); + vattr.va_uid = user; + error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p); + if(error) { + printf("prepare_profile_database: " + "Can't set user %s\n", profile_data_string); + } + + vput(data_vp); + kmem_free(kernel_map, + (vm_offset_t)profile_data_string, PATH_MAX); + kmem_free(kernel_map, + (vm_offset_t)names_buf, 4 * PAGE_SIZE); + return 0; + +}