X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/43866e378188c25dd1e2208016ab3cbeb086ae6c..5eebf7385fedb1517b66b53c28e5aa6bb0a2be50:/bsd/vm/vm_unix.c diff --git a/bsd/vm/vm_unix.c b/bsd/vm/vm_unix.c index 7922e830f..a322679ae 100644 --- a/bsd/vm/vm_unix.c +++ b/bsd/vm/vm_unix.c @@ -1,24 +1,21 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * The contents of this file constitute Original Code as defined in and + * are subject to the Apple Public Source License Version 1.1 (the + * "License"). You may not use this file except in compliance with the + * License. Please obtain a copy of the License at + * http://www.apple.com/publicsource and read it before using this file. * - * This file contains Original Code and/or Modifications of Original Code - * as defined in and that are subject to the Apple Public Source License - * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. - * - * The Original Code and all software distributed under the License are - * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * This Original Code and all software distributed under the License are + * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. - * Please see the License for the specific language governing rights and - * limitations under the License. + * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the + * License for the specific language governing rights and limitations + * under the License. * * @APPLE_LICENSE_HEADER_END@ */ @@ -59,6 +56,9 @@ #include #include +#include +#include + #include #include #include @@ -69,7 +69,6 @@ #include -extern shared_region_mapping_t system_shared_region; extern zone_t lsf_zone; useracc(addr, len, prot) @@ -79,7 +78,7 @@ useracc(addr, len, prot) { return (vm_map_check_protection( current_map(), - trunc_page(addr), round_page(addr+len), + trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)), prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); } @@ -88,8 +87,8 @@ vslock(addr, len) int len; { kern_return_t kret; - kret = vm_map_wire(current_map(), trunc_page(addr), - round_page(addr+len), + kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr), + round_page_32((unsigned int)(addr+len)), VM_PROT_READ | VM_PROT_WRITE ,FALSE); switch (kret) { @@ -120,7 +119,7 @@ vsunlock(addr, len, dirtied) #if FIXME /* [ */ if (dirtied) { pmap = get_task_pmap(current_task()); - for (vaddr = trunc_page(addr); vaddr < round_page(addr+len); + for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len)); vaddr += PAGE_SIZE) { paddr = pmap_extract(pmap, vaddr); pg = PHYS_TO_VM_PAGE(paddr); @@ -131,8 +130,8 @@ vsunlock(addr, len, dirtied) #ifdef lint dirtied++; #endif /* lint */ - kret = vm_map_unwire(current_map(), trunc_page(addr), - round_page(addr+len), FALSE); + kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)), + round_page_32((unsigned int)(addr+len)), FALSE); switch (kret) { case KERN_SUCCESS: return (0); @@ -244,6 +243,9 @@ pid_for_task(t, x) kern_return_t err = KERN_SUCCESS; boolean_t funnel_state; + AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK); + AUDIT_ARG(mach_port1, t); + funnel_state = thread_funnel_set(kernel_flock, TRUE); t1 = port_name_to_task(t); @@ -261,8 +263,10 @@ pid_for_task(t, x) } task_deallocate(t1); pftout: + AUDIT_ARG(pid, pid); (void) copyout((char *) &pid, (char *) x, sizeof(*x)); thread_funnel_set(kernel_flock, funnel_state); + AUDIT_MACH_SYSCALL_EXIT(err); return(err); } @@ -290,9 +294,14 @@ task_for_pid(target_tport, pid, t) int error = 0; boolean_t funnel_state; + AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID); + AUDIT_ARG(pid, pid); + AUDIT_ARG(mach_port1, target_tport); + t1 = port_name_to_task(target_tport); if (t1 == TASK_NULL) { (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t)); + AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE); return(KERN_FAILURE); } @@ -300,8 +309,10 @@ task_for_pid(target_tport, pid, t) restart: p1 = get_bsdtask_info(t1); + p = pfind(pid); + AUDIT_ARG(process, p); if ( - ((p = pfind(pid)) != (struct proc *) 0) + (p != (struct proc *) 0) && (p1 != (struct proc *) 0) && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) && ((p->p_cred->p_ruid == p1->p_cred->p_ruid))) @@ -319,6 +330,7 @@ task_for_pid(target_tport, pid, t) get_task_ipcspace(current_task())); } else tret = MACH_PORT_NULL; + AUDIT_ARG(mach_port2, tret); (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t)); task_deallocate(t1); error = KERN_SUCCESS; @@ -330,6 +342,7 @@ task_for_pid(target_tport, pid, t) error = KERN_FAILURE; tfpout: thread_funnel_set(kernel_flock, funnel_state); + AUDIT_MACH_SYSCALL_EXIT(error); return(error); } @@ -382,7 +395,7 @@ load_shared_file( ndp = &nd; - + AUDIT_ARG(addr, base_address); /* Retrieve the base address */ if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { goto lsf_bailout; @@ -392,12 +405,33 @@ load_shared_file( } if(local_flags & QUERY_IS_SYSTEM_REGION) { + shared_region_mapping_t default_shared_region; vm_get_shared_region(current_task(), &shared_region); - if (shared_region == system_shared_region) { + task_mapping_info.self = (vm_offset_t)shared_region; + + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if (shared_region == default_shared_region) { local_flags = SYSTEM_REGION_BACKED; } else { local_flags = 0; } + shared_region_mapping_dealloc(default_shared_region); error = 0; error = copyout(&local_flags, flags, sizeof (int)); goto lsf_bailout; @@ -431,7 +465,7 @@ load_shared_file( /* * Get a vnode for the target file */ - NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, + NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE, filename_str, p); if ((error = namei(ndp))) { @@ -458,28 +492,6 @@ load_shared_file( goto lsf_bailout_free_vput; } - vm_get_shared_region(current_task(), &shared_region); - if(shared_region == system_shared_region) { - default_regions = 1; - } - if(((vp->v_mount != rootvnode->v_mount) - && (shared_region == system_shared_region)) - && (lsf_mapping_pool_gauge() < 75)) { - /* We don't want to run out of shared memory */ - /* map entries by starting too many private versions */ - /* of the shared library structures */ - int error; - if(p->p_flag & P_NOSHLIB) { - error = clone_system_shared_regions(FALSE); - } else { - error = clone_system_shared_regions(TRUE); - } - if (error) { - goto lsf_bailout_free_vput; - } - local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS; - vm_get_shared_region(current_task(), &shared_region); - } #ifdef notdef if(vattr.va_size != mapped_file_size) { error = EINVAL; @@ -493,13 +505,13 @@ load_shared_file( /* load alternate regions if the caller has requested. */ /* Note: the new regions are "clean slates" */ if (local_flags & NEW_LOCAL_SHARED_REGIONS) { - error = clone_system_shared_regions(FALSE); + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); if (error) { goto lsf_bailout_free_vput; } - vm_get_shared_region(current_task(), &shared_region); } + vm_get_shared_region(current_task(), &shared_region); task_mapping_info.self = (vm_offset_t)shared_region; shared_region_mapping_info(shared_region, @@ -511,8 +523,54 @@ load_shared_file( &(task_mapping_info.client_base), &(task_mapping_info.alternate_base), &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), &(task_mapping_info.flags), &next); + { + shared_region_mapping_t default_shared_region; + default_shared_region = + lookup_default_shared_region( + ENV_DEFAULT_ROOT, + task_mapping_info.system); + if(shared_region == default_shared_region) { + default_regions = 1; + } + shared_region_mapping_dealloc(default_shared_region); + } + /* If we are running on a removable file system we must not */ + /* be in a set of shared regions or the file system will not */ + /* be removable. */ + if(((vp->v_mount != rootvnode->v_mount) && (default_regions)) + && (lsf_mapping_pool_gauge() < 75)) { + /* We don't want to run out of shared memory */ + /* map entries by starting too many private versions */ + /* of the shared library structures */ + int error; + if(p->p_flag & P_NOSHLIB) { + error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT); + } else { + error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT); + } + if (error) { + goto lsf_bailout_free_vput; + } + local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS; + vm_get_shared_region(current_task(), &shared_region); + shared_region_mapping_info(shared_region, + &(task_mapping_info.text_region), + &(task_mapping_info.text_size), + &(task_mapping_info.data_region), + &(task_mapping_info.data_size), + &(task_mapping_info.region_mappings), + &(task_mapping_info.client_base), + &(task_mapping_info.alternate_base), + &(task_mapping_info.alternate_next), + &(task_mapping_info.fs_base), + &(task_mapping_info.system), + &(task_mapping_info.flags), &next); + } + /* This is a work-around to allow executables which have been */ /* built without knowledge of the proper shared segment to */ /* load. This code has been architected as a shared region */ @@ -622,6 +680,7 @@ reset_shared_file( int i; kern_return_t kret; + AUDIT_ARG(addr, base_address); /* Retrieve the base address */ if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { goto rsf_bailout; @@ -692,24 +751,8 @@ new_system_shared_regions( return EINVAL; } - /* get current shared region info for */ - /* restoration after new system shared */ - /* regions are in place */ - vm_get_shared_region(current_task(), ®ions); - - /* usually only called at boot time */ - /* shared_file_boot_time_init creates */ - /* a new set of system shared regions */ - /* and places them as the system */ - /* shared regions. */ - shared_file_boot_time_init(); - - /* set current task back to its */ - /* original regions. */ - vm_get_shared_region(current_task(), &new_regions); - shared_region_mapping_dealloc(new_regions); - - vm_set_shared_region(current_task(), regions); + /* clear all of our existing defaults */ + remove_all_shared_regions(); *retval = 0; return 0; @@ -718,7 +761,7 @@ new_system_shared_regions( int -clone_system_shared_regions(shared_regions_active) +clone_system_shared_regions(shared_regions_active, base_vnode) { shared_region_mapping_t new_shared_region; shared_region_mapping_t next; @@ -728,8 +771,6 @@ clone_system_shared_regions(shared_regions_active) struct proc *p; - if (shared_file_create_system_region(&new_shared_region)) - return (ENOMEM); vm_get_shared_region(current_task(), &old_shared_region); old_info.self = (vm_offset_t)old_shared_region; shared_region_mapping_info(old_shared_region, @@ -741,7 +782,27 @@ clone_system_shared_regions(shared_regions_active) &(old_info.client_base), &(old_info.alternate_base), &(old_info.alternate_next), + &(old_info.fs_base), + &(old_info.system), &(old_info.flags), &next); + if ((shared_regions_active) || + (base_vnode == ENV_DEFAULT_ROOT)) { + if (shared_file_create_system_region(&new_shared_region)) + return (ENOMEM); + } else { + new_shared_region = + lookup_default_shared_region( + base_vnode, old_info.system); + if(new_shared_region == NULL) { + shared_file_boot_time_init( + base_vnode, old_info.system); + vm_get_shared_region(current_task(), &new_shared_region); + } else { + vm_set_shared_region(current_task(), new_shared_region); + } + if(old_shared_region) + shared_region_mapping_dealloc(old_shared_region); + } new_info.self = (vm_offset_t)new_shared_region; shared_region_mapping_info(new_shared_region, &(new_info.text_region), @@ -752,6 +813,8 @@ clone_system_shared_regions(shared_regions_active) &(new_info.client_base), &(new_info.alternate_base), &(new_info.alternate_next), + &(new_info.fs_base), + &(new_info.system), &(new_info.flags), &next); if(shared_regions_active) { if(vm_region_clone(old_info.text_region, new_info.text_region)) { @@ -907,48 +970,92 @@ restart: } lru = global_user_profile_cache.age; + *profile = NULL; for(i = 0; iage = global_user_profile_cache.age; - global_user_profile_cache.age+=1; break; } + /* Otherwise grab the oldest entry */ if(global_user_profile_cache.profiles[i].age < lru) { lru = global_user_profile_cache.profiles[i].age; *profile = &global_user_profile_cache.profiles[i]; } } + /* Did we set it? */ + if (*profile == NULL) { + /* + * No entries are available; this can only happen if all + * of them are currently in the process of being reused; + * if this happens, we sleep on the address of the first + * element, and restart. This is less than ideal, but we + * know it will work because we know that there will be a + * wakeup on any entry currently in the process of being + * reused. + * + * XXX Reccomend a two handed clock and more than 3 total + * XXX cache entries at some point in the future. + */ + /* + * drop funnel and wait + */ + (void)tsleep((void *) + &global_user_profile_cache.profiles[0], + PRIBIO, "app_profile", 0); + goto restart; + } + + /* + * If it's currently busy, we've picked the one at the end of the + * LRU list, but it's currently being actively used. We sleep on + * its address and restart. + */ if ((*profile)->busy) { /* * drop funnel and wait */ (void)tsleep((void *) - &(global_user_profile_cache), + *profile, PRIBIO, "app_profile", 0); goto restart; } (*profile)->busy = 1; (*profile)->user = user; - if((*profile)->data_vp != NULL) { + /* + * put dummy value in for now to get competing request to wait + * above until we are finished + * + * Save the data_vp before setting it, so we can set it before + * we kmem_free() or vrele(). If we don't do this, then we + * have a potential funnel race condition we have to deal with. + */ + data_vp = (*profile)->data_vp; + (*profile)->data_vp = (struct vnode *)0xFFFFFFFF; + + /* + * Age the cache here in all cases; this guarantees that we won't + * be reusing only one entry over and over, once the system reaches + * steady-state. + */ + global_user_profile_cache.age+=1; + + if(data_vp != NULL) { kmem_free(kernel_map, (*profile)->buf_ptr, 4 * PAGE_SIZE); if ((*profile)->names_vp) { vrele((*profile)->names_vp); (*profile)->names_vp = NULL; } - if ((*profile)->data_vp) { - vrele((*profile)->data_vp); - (*profile)->data_vp = NULL; - } + vrele(data_vp); } - - /* put dummy value in for now to get */ - /* competing request to wait above */ - /* until we are finished */ - (*profile)->data_vp = (struct vnode *)0xFFFFFFFF; /* Try to open the appropriate users profile files */ /* If neither file is present, try to create them */ @@ -956,7 +1063,6 @@ restart: /* If the files do exist, check them for the app_file */ /* requested and read it in if present */ - ret = kmem_alloc(kernel_map, (vm_offset_t *)&profile_data_string, PATH_MAX); @@ -1337,7 +1443,7 @@ bsd_search_page_cache_data_base( resid_off = 0; while(size) { error = vn_rdwr(UIO_READ, vp, - (caddr_t)(local_buf + resid_off), + CAST_DOWN(caddr_t, (local_buf + resid_off)), size, file_off + resid_off, UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p); if((error) || (size == resid)) {