]> git.saurik.com Git - apple/xnu.git/blobdiff - bsd/vm/vm_unix.c
xnu-517.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
index 7922e830f6c3e5125c30d65d4428b5e7e8be7ceb..f84e8b4c54e02cdeac902636688a0fd48a829fa4 100644 (file)
@@ -69,7 +69,6 @@
 #include <vm/vm_shared_memory_server.h>
 
 
-extern shared_region_mapping_t system_shared_region;
 extern zone_t lsf_zone;
 
 useracc(addr, len, prot)
@@ -79,7 +78,7 @@ useracc(addr, len, prot)
 {
        return (vm_map_check_protection(
                        current_map(),
-                       trunc_page(addr), round_page(addr+len),
+                       trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)),
                        prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
 }
 
@@ -88,8 +87,8 @@ vslock(addr, len)
        int     len;
 {
 kern_return_t kret;
-       kret = vm_map_wire(current_map(), trunc_page(addr),
-                       round_page(addr+len), 
+       kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr),
+                       round_page_32((unsigned int)(addr+len)), 
                        VM_PROT_READ | VM_PROT_WRITE ,FALSE);
 
        switch (kret) {
@@ -120,7 +119,7 @@ vsunlock(addr, len, dirtied)
 #if FIXME  /* [ */
        if (dirtied) {
                pmap = get_task_pmap(current_task());
-               for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
+               for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len));
                                vaddr += PAGE_SIZE) {
                        paddr = pmap_extract(pmap, vaddr);
                        pg = PHYS_TO_VM_PAGE(paddr);
@@ -131,8 +130,8 @@ vsunlock(addr, len, dirtied)
 #ifdef lint
        dirtied++;
 #endif /* lint */
-       kret = vm_map_unwire(current_map(), trunc_page(addr),
-                               round_page(addr+len), FALSE);
+       kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)),
+                               round_page_32((unsigned int)(addr+len)), FALSE);
        switch (kret) {
        case KERN_SUCCESS:
                return (0);
@@ -392,12 +391,33 @@ load_shared_file(
         }
 
        if(local_flags & QUERY_IS_SYSTEM_REGION) {
+                       shared_region_mapping_t default_shared_region;
                        vm_get_shared_region(current_task(), &shared_region);
-                       if (shared_region == system_shared_region) {
+                       task_mapping_info.self = (vm_offset_t)shared_region;
+
+                       shared_region_mapping_info(shared_region, 
+                                       &(task_mapping_info.text_region), 
+                                       &(task_mapping_info.text_size),
+                                       &(task_mapping_info.data_region), 
+                                       &(task_mapping_info.data_size), 
+                                       &(task_mapping_info.region_mappings),
+                                       &(task_mapping_info.client_base), 
+                                       &(task_mapping_info.alternate_base),
+                                       &(task_mapping_info.alternate_next), 
+                                       &(task_mapping_info.fs_base),
+                                       &(task_mapping_info.system),
+                                       &(task_mapping_info.flags), &next);
+
+                       default_shared_region =
+                               lookup_default_shared_region(
+                                       ENV_DEFAULT_ROOT, 
+                                       task_mapping_info.system);
+                       if (shared_region == default_shared_region) {
                                local_flags = SYSTEM_REGION_BACKED;
                        } else {
                                local_flags = 0;
                        }
+                       shared_region_mapping_dealloc(default_shared_region);
                        error = 0;
                        error = copyout(&local_flags, flags, sizeof (int));
                        goto lsf_bailout;
@@ -458,28 +478,6 @@ load_shared_file(
                goto lsf_bailout_free_vput;
        }
 
-       vm_get_shared_region(current_task(), &shared_region);
-       if(shared_region == system_shared_region) {
-               default_regions = 1;
-       }
-       if(((vp->v_mount != rootvnode->v_mount)
-                       && (shared_region == system_shared_region)) 
-               && (lsf_mapping_pool_gauge() < 75)) {
-                               /* We don't want to run out of shared memory */
-                               /* map entries by starting too many private versions */
-                               /* of the shared library structures */
-               int     error;
-                       if(p->p_flag & P_NOSHLIB) {
-                               error = clone_system_shared_regions(FALSE);
-                       } else {
-                               error = clone_system_shared_regions(TRUE);
-                       }
-               if (error) {
-                       goto lsf_bailout_free_vput;
-               }
-               local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
-               vm_get_shared_region(current_task(), &shared_region);
-       }
 #ifdef notdef
        if(vattr.va_size != mapped_file_size) {
                error = EINVAL;
@@ -493,13 +491,13 @@ load_shared_file(
        /* load alternate regions if the caller has requested.  */
        /* Note: the new regions are "clean slates" */
        if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
-               error = clone_system_shared_regions(FALSE);
+               error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
                if (error) {
                        goto lsf_bailout_free_vput;
                }
-               vm_get_shared_region(current_task(), &shared_region);
        }
 
+       vm_get_shared_region(current_task(), &shared_region);
        task_mapping_info.self = (vm_offset_t)shared_region;
 
        shared_region_mapping_info(shared_region, 
@@ -511,7 +509,53 @@ load_shared_file(
                        &(task_mapping_info.client_base), 
                        &(task_mapping_info.alternate_base),
                        &(task_mapping_info.alternate_next), 
+                       &(task_mapping_info.fs_base),
+                       &(task_mapping_info.system),
+                       &(task_mapping_info.flags), &next);
+
+       {
+               shared_region_mapping_t default_shared_region;
+               default_shared_region =
+                       lookup_default_shared_region(
+                               ENV_DEFAULT_ROOT, 
+                               task_mapping_info.system);
+               if(shared_region == default_shared_region) {
+                       default_regions = 1;
+               }
+               shared_region_mapping_dealloc(default_shared_region);
+       }
+       /* If we are running on a removable file system we must not */
+       /* be in a set of shared regions or the file system will not */
+       /* be removable. */
+       if(((vp->v_mount != rootvnode->v_mount) && (default_regions)) 
+               && (lsf_mapping_pool_gauge() < 75)) {
+                               /* We don't want to run out of shared memory */
+                               /* map entries by starting too many private versions */
+                               /* of the shared library structures */
+               int     error;
+                       if(p->p_flag & P_NOSHLIB) {
+                               error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
+                       } else {
+                               error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT);
+                       }
+               if (error) {
+                       goto lsf_bailout_free_vput;
+               }
+               local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
+               vm_get_shared_region(current_task(), &shared_region);
+               shared_region_mapping_info(shared_region, 
+                       &(task_mapping_info.text_region), 
+                       &(task_mapping_info.text_size),
+                       &(task_mapping_info.data_region), 
+                       &(task_mapping_info.data_size), 
+                       &(task_mapping_info.region_mappings),
+                       &(task_mapping_info.client_base), 
+                       &(task_mapping_info.alternate_base),
+                       &(task_mapping_info.alternate_next), 
+                       &(task_mapping_info.fs_base),
+                       &(task_mapping_info.system),
                        &(task_mapping_info.flags), &next);
+       }
 
        /*  This is a work-around to allow executables which have been */
        /*  built without knowledge of the proper shared segment to    */
@@ -692,24 +736,8 @@ new_system_shared_regions(
                return EINVAL;
        }
 
-       /* get current shared region info for  */
-       /* restoration after new system shared */
-       /* regions are in place */
-       vm_get_shared_region(current_task(), &regions);
-
-       /* usually only called at boot time    */
-       /* shared_file_boot_time_init creates  */
-       /* a new set of system shared regions  */
-       /* and places them as the system       */
-       /* shared regions.                     */
-       shared_file_boot_time_init();
-
-       /* set current task back to its        */
-       /* original regions.                   */
-       vm_get_shared_region(current_task(), &new_regions);
-       shared_region_mapping_dealloc(new_regions);
-
-       vm_set_shared_region(current_task(), regions);
+       /* clear all of our existing defaults */
+       remove_all_shared_regions();
 
        *retval = 0;
        return 0;
@@ -718,7 +746,7 @@ new_system_shared_regions(
 
 
 int
-clone_system_shared_regions(shared_regions_active)
+clone_system_shared_regions(shared_regions_active, base_vnode)
 {
        shared_region_mapping_t new_shared_region;
        shared_region_mapping_t next;
@@ -728,8 +756,6 @@ clone_system_shared_regions(shared_regions_active)
 
        struct proc     *p;
 
-       if (shared_file_create_system_region(&new_shared_region))
-               return (ENOMEM);
        vm_get_shared_region(current_task(), &old_shared_region);
        old_info.self = (vm_offset_t)old_shared_region;
        shared_region_mapping_info(old_shared_region,
@@ -741,7 +767,27 @@ clone_system_shared_regions(shared_regions_active)
                &(old_info.client_base),
                &(old_info.alternate_base),
                &(old_info.alternate_next), 
+               &(old_info.fs_base),
+               &(old_info.system),
                &(old_info.flags), &next);
+       if ((shared_regions_active) ||
+               (base_vnode == ENV_DEFAULT_ROOT)) {
+          if (shared_file_create_system_region(&new_shared_region))
+               return (ENOMEM);
+       } else {
+          new_shared_region = 
+               lookup_default_shared_region(
+                       base_vnode, old_info.system);
+          if(new_shared_region == NULL) {
+               shared_file_boot_time_init(
+                       base_vnode, old_info.system);
+               vm_get_shared_region(current_task(), &new_shared_region);
+          } else {
+               vm_set_shared_region(current_task(), new_shared_region);
+          }
+          if(old_shared_region)
+               shared_region_mapping_dealloc(old_shared_region);
+       }
        new_info.self = (vm_offset_t)new_shared_region;
        shared_region_mapping_info(new_shared_region,
                &(new_info.text_region),   
@@ -752,6 +798,8 @@ clone_system_shared_regions(shared_regions_active)
                &(new_info.client_base),
                &(new_info.alternate_base),
                &(new_info.alternate_next), 
+               &(new_info.fs_base),
+               &(new_info.system),
                &(new_info.flags), &next);
        if(shared_regions_active) {
           if(vm_region_clone(old_info.text_region, new_info.text_region)) {
@@ -907,48 +955,92 @@ restart:
        }
 
        lru = global_user_profile_cache.age;
+       *profile = NULL;
        for(i = 0; i<global_user_profile_cache.max_ele; i++) {
+               /* Skip entry if it is in the process of being reused */
+               if(global_user_profile_cache.profiles[i].data_vp ==
+                                               (struct vnode *)0xFFFFFFFF)
+                       continue;
+               /* Otherwise grab the first empty entry */
                if(global_user_profile_cache.profiles[i].data_vp == NULL) {
                        *profile = &global_user_profile_cache.profiles[i];
                        (*profile)->age = global_user_profile_cache.age;
-                       global_user_profile_cache.age+=1;
                        break;
                }
+               /* Otherwise grab the oldest entry */
                if(global_user_profile_cache.profiles[i].age < lru) {
                        lru = global_user_profile_cache.profiles[i].age;
                        *profile = &global_user_profile_cache.profiles[i];
                }
        }
 
+       /* Did we set it? */
+       if (*profile == NULL) {
+               /*
+                * No entries are available; this can only happen if all
+                * of them are currently in the process of being reused;
+                * if this happens, we sleep on the address of the first
+                * element, and restart.  This is less than ideal, but we
+                * know it will work because we know that there will be a
+                * wakeup on any entry currently in the process of being
+                * reused.
+                *
+                * XXX Reccomend a two handed clock and more than 3 total
+                * XXX cache entries at some point in the future.
+                */
+                       /*
+                       * drop funnel and wait 
+                       */
+               (void)tsleep((void *)
+                &global_user_profile_cache.profiles[0],
+                       PRIBIO, "app_profile", 0);
+               goto restart;
+       }
+
+       /*
+        * If it's currently busy, we've picked the one at the end of the
+        * LRU list, but it's currently being actively used.  We sleep on
+        * its address and restart.
+        */
        if ((*profile)->busy) {
                        /*
                        * drop funnel and wait 
                        */
                (void)tsleep((void *)
-                       &(global_user_profile_cache)
+                       *profile
                        PRIBIO, "app_profile", 0);
                goto restart;
        }
        (*profile)->busy = 1;
        (*profile)->user = user;
 
-       if((*profile)->data_vp != NULL) {
+       /*
+        * put dummy value in for now to get competing request to wait
+        * above until we are finished
+        *
+        * Save the data_vp before setting it, so we can set it before
+        * we kmem_free() or vrele().  If we don't do this, then we
+        * have a potential funnel race condition we have to deal with.
+        */
+       data_vp = (*profile)->data_vp;
+       (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
+
+       /*
+        * Age the cache here in all cases; this guarantees that we won't
+        * be reusing only one entry over and over, once the system reaches
+        * steady-state.
+        */
+       global_user_profile_cache.age+=1;
+
+       if(data_vp != NULL) {
                kmem_free(kernel_map, 
                                (*profile)->buf_ptr, 4 * PAGE_SIZE);
                if ((*profile)->names_vp) {
                        vrele((*profile)->names_vp);
                        (*profile)->names_vp = NULL;
                }
-               if ((*profile)->data_vp) {
-                       vrele((*profile)->data_vp);
-                       (*profile)->data_vp = NULL;
-               }
+               vrele(data_vp);
        }
-
-       /* put dummy value in for now to get */
-       /* competing request to wait above   */
-       /* until we are finished */
-       (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
        
        /* Try to open the appropriate users profile files */
        /* If neither file is present, try to create them  */
@@ -956,7 +1048,6 @@ restart:
        /* If the files do exist, check them for the app_file */
        /* requested and read it in if present */
 
-
        ret = kmem_alloc(kernel_map,
                (vm_offset_t *)&profile_data_string, PATH_MAX);
 
@@ -1337,7 +1428,7 @@ bsd_search_page_cache_data_base(
                resid_off = 0;
                while(size) {
                        error = vn_rdwr(UIO_READ, vp, 
-                               (caddr_t)(local_buf + resid_off),
+                               CAST_DOWN(caddr_t, (local_buf + resid_off)),
                                size, file_off + resid_off, UIO_SYSSPACE, 
                                IO_NODELOCKED, p->p_ucred, &resid, p);
                        if((error) || (size == resid)) {