#include <vm/vm_shared_memory_server.h>
-extern shared_region_mapping_t system_shared_region;
extern zone_t lsf_zone;
useracc(addr, len, prot)
{
return (vm_map_check_protection(
current_map(),
- trunc_page(addr), round_page(addr+len),
+ trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)),
prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
}
int len;
{
kern_return_t kret;
- kret = vm_map_wire(current_map(), trunc_page(addr),
- round_page(addr+len),
+ kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr),
+ round_page_32((unsigned int)(addr+len)),
VM_PROT_READ | VM_PROT_WRITE ,FALSE);
switch (kret) {
#if FIXME /* [ */
if (dirtied) {
pmap = get_task_pmap(current_task());
- for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
+ for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len));
vaddr += PAGE_SIZE) {
paddr = pmap_extract(pmap, vaddr);
pg = PHYS_TO_VM_PAGE(paddr);
#ifdef lint
dirtied++;
#endif /* lint */
- kret = vm_map_unwire(current_map(), trunc_page(addr),
- round_page(addr+len), FALSE);
+ kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)),
+ round_page_32((unsigned int)(addr+len)), FALSE);
switch (kret) {
case KERN_SUCCESS:
return (0);
}
if(local_flags & QUERY_IS_SYSTEM_REGION) {
+ shared_region_mapping_t default_shared_region;
vm_get_shared_region(current_task(), &shared_region);
- if (shared_region == system_shared_region) {
+ task_mapping_info.self = (vm_offset_t)shared_region;
+
+ shared_region_mapping_info(shared_region,
+ &(task_mapping_info.text_region),
+ &(task_mapping_info.text_size),
+ &(task_mapping_info.data_region),
+ &(task_mapping_info.data_size),
+ &(task_mapping_info.region_mappings),
+ &(task_mapping_info.client_base),
+ &(task_mapping_info.alternate_base),
+ &(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
+ &(task_mapping_info.flags), &next);
+
+ default_shared_region =
+ lookup_default_shared_region(
+ ENV_DEFAULT_ROOT,
+ task_mapping_info.system);
+ if (shared_region == default_shared_region) {
local_flags = SYSTEM_REGION_BACKED;
} else {
local_flags = 0;
}
+ shared_region_mapping_dealloc(default_shared_region);
error = 0;
error = copyout(&local_flags, flags, sizeof (int));
goto lsf_bailout;
goto lsf_bailout_free_vput;
}
- vm_get_shared_region(current_task(), &shared_region);
- if(shared_region == system_shared_region) {
- default_regions = 1;
- }
- if(((vp->v_mount != rootvnode->v_mount)
- && (shared_region == system_shared_region))
- && (lsf_mapping_pool_gauge() < 75)) {
- /* We don't want to run out of shared memory */
- /* map entries by starting too many private versions */
- /* of the shared library structures */
- int error;
- if(p->p_flag & P_NOSHLIB) {
- error = clone_system_shared_regions(FALSE);
- } else {
- error = clone_system_shared_regions(TRUE);
- }
- if (error) {
- goto lsf_bailout_free_vput;
- }
- local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
- vm_get_shared_region(current_task(), &shared_region);
- }
#ifdef notdef
if(vattr.va_size != mapped_file_size) {
error = EINVAL;
/* load alternate regions if the caller has requested. */
/* Note: the new regions are "clean slates" */
if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
- error = clone_system_shared_regions(FALSE);
+ error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
if (error) {
goto lsf_bailout_free_vput;
}
- vm_get_shared_region(current_task(), &shared_region);
}
+ vm_get_shared_region(current_task(), &shared_region);
task_mapping_info.self = (vm_offset_t)shared_region;
shared_region_mapping_info(shared_region,
&(task_mapping_info.client_base),
&(task_mapping_info.alternate_base),
&(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
+ &(task_mapping_info.flags), &next);
+
+ {
+ shared_region_mapping_t default_shared_region;
+ default_shared_region =
+ lookup_default_shared_region(
+ ENV_DEFAULT_ROOT,
+ task_mapping_info.system);
+ if(shared_region == default_shared_region) {
+ default_regions = 1;
+ }
+ shared_region_mapping_dealloc(default_shared_region);
+ }
+ /* If we are running on a removable file system we must not */
+ /* be in a set of shared regions or the file system will not */
+ /* be removable. */
+ if(((vp->v_mount != rootvnode->v_mount) && (default_regions))
+ && (lsf_mapping_pool_gauge() < 75)) {
+ /* We don't want to run out of shared memory */
+ /* map entries by starting too many private versions */
+ /* of the shared library structures */
+ int error;
+ if(p->p_flag & P_NOSHLIB) {
+ error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
+ } else {
+ error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT);
+ }
+ if (error) {
+ goto lsf_bailout_free_vput;
+ }
+ local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
+ vm_get_shared_region(current_task(), &shared_region);
+ shared_region_mapping_info(shared_region,
+ &(task_mapping_info.text_region),
+ &(task_mapping_info.text_size),
+ &(task_mapping_info.data_region),
+ &(task_mapping_info.data_size),
+ &(task_mapping_info.region_mappings),
+ &(task_mapping_info.client_base),
+ &(task_mapping_info.alternate_base),
+ &(task_mapping_info.alternate_next),
+ &(task_mapping_info.fs_base),
+ &(task_mapping_info.system),
&(task_mapping_info.flags), &next);
+ }
/* This is a work-around to allow executables which have been */
/* built without knowledge of the proper shared segment to */
return EINVAL;
}
- /* get current shared region info for */
- /* restoration after new system shared */
- /* regions are in place */
- vm_get_shared_region(current_task(), ®ions);
-
- /* usually only called at boot time */
- /* shared_file_boot_time_init creates */
- /* a new set of system shared regions */
- /* and places them as the system */
- /* shared regions. */
- shared_file_boot_time_init();
-
- /* set current task back to its */
- /* original regions. */
- vm_get_shared_region(current_task(), &new_regions);
- shared_region_mapping_dealloc(new_regions);
-
- vm_set_shared_region(current_task(), regions);
+ /* clear all of our existing defaults */
+ remove_all_shared_regions();
*retval = 0;
return 0;
int
-clone_system_shared_regions(shared_regions_active)
+clone_system_shared_regions(shared_regions_active, base_vnode)
{
shared_region_mapping_t new_shared_region;
shared_region_mapping_t next;
struct proc *p;
- if (shared_file_create_system_region(&new_shared_region))
- return (ENOMEM);
vm_get_shared_region(current_task(), &old_shared_region);
old_info.self = (vm_offset_t)old_shared_region;
shared_region_mapping_info(old_shared_region,
&(old_info.client_base),
&(old_info.alternate_base),
&(old_info.alternate_next),
+ &(old_info.fs_base),
+ &(old_info.system),
&(old_info.flags), &next);
+ if ((shared_regions_active) ||
+ (base_vnode == ENV_DEFAULT_ROOT)) {
+ if (shared_file_create_system_region(&new_shared_region))
+ return (ENOMEM);
+ } else {
+ new_shared_region =
+ lookup_default_shared_region(
+ base_vnode, old_info.system);
+ if(new_shared_region == NULL) {
+ shared_file_boot_time_init(
+ base_vnode, old_info.system);
+ vm_get_shared_region(current_task(), &new_shared_region);
+ } else {
+ vm_set_shared_region(current_task(), new_shared_region);
+ }
+ if(old_shared_region)
+ shared_region_mapping_dealloc(old_shared_region);
+ }
new_info.self = (vm_offset_t)new_shared_region;
shared_region_mapping_info(new_shared_region,
&(new_info.text_region),
&(new_info.client_base),
&(new_info.alternate_base),
&(new_info.alternate_next),
+ &(new_info.fs_base),
+ &(new_info.system),
&(new_info.flags), &next);
if(shared_regions_active) {
if(vm_region_clone(old_info.text_region, new_info.text_region)) {
}
lru = global_user_profile_cache.age;
+ *profile = NULL;
for(i = 0; i<global_user_profile_cache.max_ele; i++) {
+ /* Skip entry if it is in the process of being reused */
+ if(global_user_profile_cache.profiles[i].data_vp ==
+ (struct vnode *)0xFFFFFFFF)
+ continue;
+ /* Otherwise grab the first empty entry */
if(global_user_profile_cache.profiles[i].data_vp == NULL) {
*profile = &global_user_profile_cache.profiles[i];
(*profile)->age = global_user_profile_cache.age;
- global_user_profile_cache.age+=1;
break;
}
+ /* Otherwise grab the oldest entry */
if(global_user_profile_cache.profiles[i].age < lru) {
lru = global_user_profile_cache.profiles[i].age;
*profile = &global_user_profile_cache.profiles[i];
}
}
+ /* Did we set it? */
+ if (*profile == NULL) {
+ /*
+ * No entries are available; this can only happen if all
+ * of them are currently in the process of being reused;
+ * if this happens, we sleep on the address of the first
+ * element, and restart. This is less than ideal, but we
+ * know it will work because we know that there will be a
+ * wakeup on any entry currently in the process of being
+ * reused.
+ *
+ * XXX Reccomend a two handed clock and more than 3 total
+ * XXX cache entries at some point in the future.
+ */
+ /*
+ * drop funnel and wait
+ */
+ (void)tsleep((void *)
+ &global_user_profile_cache.profiles[0],
+ PRIBIO, "app_profile", 0);
+ goto restart;
+ }
+
+ /*
+ * If it's currently busy, we've picked the one at the end of the
+ * LRU list, but it's currently being actively used. We sleep on
+ * its address and restart.
+ */
if ((*profile)->busy) {
/*
* drop funnel and wait
*/
(void)tsleep((void *)
- &(global_user_profile_cache),
+ *profile,
PRIBIO, "app_profile", 0);
goto restart;
}
(*profile)->busy = 1;
(*profile)->user = user;
- if((*profile)->data_vp != NULL) {
+ /*
+ * put dummy value in for now to get competing request to wait
+ * above until we are finished
+ *
+ * Save the data_vp before setting it, so we can set it before
+ * we kmem_free() or vrele(). If we don't do this, then we
+ * have a potential funnel race condition we have to deal with.
+ */
+ data_vp = (*profile)->data_vp;
+ (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
+
+ /*
+ * Age the cache here in all cases; this guarantees that we won't
+ * be reusing only one entry over and over, once the system reaches
+ * steady-state.
+ */
+ global_user_profile_cache.age+=1;
+
+ if(data_vp != NULL) {
kmem_free(kernel_map,
(*profile)->buf_ptr, 4 * PAGE_SIZE);
if ((*profile)->names_vp) {
vrele((*profile)->names_vp);
(*profile)->names_vp = NULL;
}
- if ((*profile)->data_vp) {
- vrele((*profile)->data_vp);
- (*profile)->data_vp = NULL;
- }
+ vrele(data_vp);
}
-
- /* put dummy value in for now to get */
- /* competing request to wait above */
- /* until we are finished */
- (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
/* Try to open the appropriate users profile files */
/* If neither file is present, try to create them */
/* If the files do exist, check them for the app_file */
/* requested and read it in if present */
-
ret = kmem_alloc(kernel_map,
(vm_offset_t *)&profile_data_string, PATH_MAX);
resid_off = 0;
while(size) {
error = vn_rdwr(UIO_READ, vp,
- (caddr_t)(local_buf + resid_off),
+ CAST_DOWN(caddr_t, (local_buf + resid_off)),
size, file_off + resid_off, UIO_SYSSPACE,
IO_NODELOCKED, p->p_ucred, &resid, p);
if((error) || (size == resid)) {