]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_shared_memory_server.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_shared_memory_server.c
index c685c5418d13e1a528a7781d13fc9b887927a0b2..e13fd2742f695fc6136e97f2c447bba7ec337879 100644 (file)
@@ -1,23 +1,31 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
- * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
- * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
+ * This file contains Original Code and/or Modifications of Original Code 
+ * as defined in and that are subject to the Apple Public Source License 
+ * Version 2.0 (the 'License'). You may not use this file except in 
+ * compliance with the License.  The rights granted to you under the 
+ * License may not be used to create, or enable the creation or 
+ * redistribution of, unlawful or unlicensed copies of an Apple operating 
+ * system, or to circumvent, violate, or enable the circumvention or 
+ * violation of, any terms of an Apple operating system software license 
+ * agreement.
+ *
+ * Please obtain a copy of the License at 
+ * http://www.opensource.apple.com/apsl/ and read it before using this 
+ * file.
+ *
+ * The Original Code and all software distributed under the License are 
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
+ * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
+ * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
+ * Please see the License for the specific language governing rights and 
+ * limitations under the License.
+ *
+ * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
  */
 /*
  *
  *      Support routines for an in-kernel shared memory allocator
  */
 
-#include <ipc/ipc_port.h>
-#include <kern/thread.h>
-#include <kern/zalloc.h>
+#include <debug.h>
+
+#include <mach/mach_types.h>
 #include <mach/kern_return.h>
 #include <mach/vm_inherit.h>
+#include <mach/vm_map.h>
+#include <machine/cpu_capabilities.h>
+
+#include <kern/kern_types.h>
+#include <kern/ipc_kobject.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+#include <kern/kalloc.h>
+
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_port.h>
+
 #include <vm/vm_kern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_page.h>
 
+#include <mach/mach_vm.h>
 #include <mach/shared_memory_server.h>
 #include <vm/vm_shared_memory_server.h>
 
+#if DEBUG
+int lsf_debug = 0;
+int lsf_alloc_debug = 0;
+#define LSF_DEBUG(args)                                \
+       MACRO_BEGIN                             \
+       if (lsf_debug) {                        \
+               kprintf args;                   \
+       }                                       \
+       MACRO_END
+#define LSF_ALLOC_DEBUG(args)                  \
+       MACRO_BEGIN                             \
+       if (lsf_alloc_debug) {                  \
+               kprintf args;                   \
+       }                                       \
+       MACRO_END
+#else /* DEBUG */
+#define LSF_DEBUG(args)
+#define LSF_ALLOC_DEBUG(args)
+#endif /* DEBUG */
+
 /* forward declarations */
+static kern_return_t
+shared_region_object_create(
+       vm_size_t               size,
+       ipc_port_t              *object_handle);
+
+static kern_return_t
+shared_region_mapping_dealloc_lock(
+       shared_region_mapping_t shared_region,
+       int need_sfh_lock,
+       int need_drl_lock);
+
+
 static kern_return_t           
 shared_file_init(               
-        ipc_port_t      *shared_text_region_handle,
+        ipc_port_t      *text_region_handle,
         vm_size_t       text_region_size,
-        ipc_port_t      *shared_data_region_handle,
+        ipc_port_t      *data_region_handle,
         vm_size_t       data_region_size, 
-        vm_offset_t     *shared_file_mapping_array);
+        vm_offset_t     *file_mapping_array);
+
+static kern_return_t
+shared_file_header_init(
+       shared_file_info_t      *shared_file_header);
 
 static load_struct_t  *
 lsf_hash_lookup(   
         queue_head_t                   *hash_table,
         void                           *file_object,  
+        vm_offset_t                     recognizableOffset,
         int                            size,
+       boolean_t                       regular,
        boolean_t                       alternate,
        shared_region_task_mappings_t   sm_info);
 
@@ -67,16 +126,22 @@ lsf_hash_insert(
         load_struct_t   *entry,
        shared_region_task_mappings_t   sm_info);
 
-static kern_return_t                   
-lsf_load(
-        vm_offset_t                    mapped_file,
-        vm_size_t                              mapped_file_size,
-        vm_offset_t                            *base_address,
-        sf_mapping_t                           *mappings,
-        int                                    map_cnt,
-        void                                   *file_object,
-        int                            flags,
-       shared_region_task_mappings_t   sm_info);
+static kern_return_t
+lsf_slide(
+       unsigned int                    map_cnt,
+       struct shared_file_mapping_np   *mappings,
+       shared_region_task_mappings_t   sm_info,
+       mach_vm_offset_t                *base_offset_p);
+
+static kern_return_t
+lsf_map(
+       struct shared_file_mapping_np   *mappings,
+       int                             map_cnt,
+       void                            *file_control,
+       memory_object_size_t            file_size,
+       shared_region_task_mappings_t   sm_info,
+       mach_vm_offset_t                base_offset,
+       mach_vm_offset_t                *slide_p);
 
 static void
 lsf_unload(
@@ -84,6 +149,13 @@ lsf_unload(
        vm_offset_t                     base_offset,
        shared_region_task_mappings_t   sm_info);
 
+static void
+lsf_deallocate(
+        void                           *file_object,
+       vm_offset_t                     base_offset,
+       shared_region_task_mappings_t   sm_info,
+       boolean_t                       unload);
+
 
 #define load_file_hash(file_object, size) \
                ((((natural_t)file_object) & 0xffffff) % size)
@@ -95,13 +167,383 @@ vm_offset_t               shared_file_data_region;
 ipc_port_t             shared_text_region_handle;
 ipc_port_t             shared_data_region_handle;
 vm_offset_t            shared_file_mapping_array = 0;
-shared_region_mapping_t        system_shared_region = NULL;
+
+shared_region_mapping_t default_environment_shared_regions = NULL;
+static decl_mutex_data(,default_regions_list_lock_data)
+
+#define default_regions_list_lock()            \
+               mutex_lock(&default_regions_list_lock_data)
+#define default_regions_list_lock_try()        \
+               mutex_try(&default_regions_list_lock_data)
+#define default_regions_list_unlock()  \
+               mutex_unlock(&default_regions_list_lock_data)
+
 
 ipc_port_t             sfma_handle = NULL;
 zone_t                 lsf_zone;
 
 int            shared_file_available_hash_ele;
 
+/* com region support */
+ipc_port_t             com_region_handle32 = NULL;
+ipc_port_t             com_region_handle64 = NULL;
+vm_map_t               com_region_map32 = NULL;
+vm_map_t               com_region_map64 = NULL;
+vm_size_t              com_region_size = _COMM_PAGE_AREA_LENGTH;
+shared_region_mapping_t        com_mapping_resource = NULL;
+
+
+#if DEBUG
+int shared_region_debug = 0;
+#endif /* DEBUG */
+
+
+kern_return_t
+vm_get_shared_region(
+       task_t  task,
+       shared_region_mapping_t *shared_region)
+{
+       *shared_region = (shared_region_mapping_t) task->system_shared_region;
+       if (*shared_region) {
+               assert((*shared_region)->ref_count > 0);
+       }
+       SHARED_REGION_DEBUG(("vm_get_shared_region(task=%p) -> %p\n",
+                            task, *shared_region));
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+vm_set_shared_region(
+       task_t  task,
+       shared_region_mapping_t shared_region)
+{
+       SHARED_REGION_DEBUG(("vm_set_shared_region(task=%p, "
+                            "shared_region=%p)\n",
+                            task, shared_region));
+       if (shared_region) {
+               assert(shared_region->ref_count > 0);
+       }
+       task->system_shared_region = shared_region;
+       return KERN_SUCCESS;
+}
+
+/*
+ * shared_region_object_chain_detach:
+ *
+ * Mark the shared region as being detached or standalone.  This means
+ * that we won't keep track of which file is mapped and how, for this shared
+ * region.  And we don't have a "shadow" shared region.
+ * This is used when we clone a private shared region and we intend to remove
+ * some mappings from it.  It won't need to maintain mappings info because it's
+ * now private.  It can't have a "shadow" shared region because we don't want
+ * to see the shadow of the mappings we're about to remove.
+ */
+void
+shared_region_object_chain_detached(
+       shared_region_mapping_t         target_region)
+{
+       shared_region_mapping_lock(target_region);
+       target_region->flags |= SHARED_REGION_STANDALONE;
+       shared_region_mapping_unlock(target_region);
+}
+
+/*
+ * shared_region_object_chain_attach:
+ *
+ * Link "target_region" to "object_chain_region".  "object_chain_region"
+ * is treated as a shadow of "target_region" for the purpose of looking up
+ * mappings.  Since the "target_region" preserves all the mappings of the
+ * older "object_chain_region", we won't duplicate all the mappings info and
+ * we'll just lookup the next region in the "object_chain" if we can't find
+ * what we're looking for in the "target_region".  See lsf_hash_lookup().
+ */
+kern_return_t
+shared_region_object_chain_attach(
+       shared_region_mapping_t         target_region,
+       shared_region_mapping_t         object_chain_region)
+{
+       shared_region_object_chain_t    object_ele;
+       
+       SHARED_REGION_DEBUG(("shared_region_object_chain_attach("
+                            "target_region=%p, object_chain_region=%p\n",
+                            target_region, object_chain_region));
+       assert(target_region->ref_count > 0);
+       assert(object_chain_region->ref_count > 0);
+       if(target_region->object_chain)
+               return KERN_FAILURE;
+       object_ele = (shared_region_object_chain_t)
+                       kalloc(sizeof (struct shared_region_object_chain));
+       shared_region_mapping_lock(object_chain_region);
+       target_region->object_chain = object_ele;
+       object_ele->object_chain_region = object_chain_region;
+       object_ele->next = object_chain_region->object_chain;
+       object_ele->depth = object_chain_region->depth;
+       object_chain_region->depth++;
+       target_region->alternate_next = object_chain_region->alternate_next;
+       shared_region_mapping_unlock(object_chain_region);
+       return KERN_SUCCESS;
+}
+
+/* LP64todo - need 64-bit safe version */
+kern_return_t
+shared_region_mapping_create(
+       ipc_port_t              text_region,
+       vm_size_t               text_size,
+       ipc_port_t              data_region,
+       vm_size_t               data_size,
+       vm_offset_t             region_mappings,
+       vm_offset_t             client_base,
+       shared_region_mapping_t *shared_region,
+       vm_offset_t             alt_base,
+       vm_offset_t             alt_next)
+{
+       SHARED_REGION_DEBUG(("shared_region_mapping_create()\n"));
+       *shared_region = (shared_region_mapping_t) 
+                       kalloc(sizeof (struct shared_region_mapping));
+       if(*shared_region == NULL) {
+               SHARED_REGION_DEBUG(("shared_region_mapping_create: "
+                                    "failure\n"));
+               return KERN_FAILURE;
+       }
+       shared_region_mapping_lock_init((*shared_region));
+       (*shared_region)->text_region = text_region;
+       (*shared_region)->text_size = text_size;
+       (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
+       (*shared_region)->system = cpu_type();
+       (*shared_region)->data_region = data_region;
+       (*shared_region)->data_size = data_size;
+       (*shared_region)->region_mappings = region_mappings;
+       (*shared_region)->client_base = client_base;
+       (*shared_region)->ref_count = 1;
+       (*shared_region)->next = NULL;
+       (*shared_region)->object_chain = NULL;
+       (*shared_region)->self = *shared_region;
+       (*shared_region)->flags = 0;
+       (*shared_region)->depth = 0;
+       (*shared_region)->default_env_list = NULL;
+       (*shared_region)->alternate_base = alt_base;
+       (*shared_region)->alternate_next = alt_next;
+       SHARED_REGION_DEBUG(("shared_region_mapping_create -> %p\n",
+                            *shared_region));
+       return KERN_SUCCESS;
+}
+
+/* LP64todo - need 64-bit safe version */
+kern_return_t
+shared_region_mapping_info(
+       shared_region_mapping_t shared_region,
+       ipc_port_t              *text_region,
+       vm_size_t               *text_size,
+       ipc_port_t              *data_region,
+       vm_size_t               *data_size,
+       vm_offset_t             *region_mappings,
+       vm_offset_t             *client_base,
+       vm_offset_t             *alt_base,
+       vm_offset_t             *alt_next,
+       unsigned int            *fs_base,
+       unsigned int            *system,
+       int                     *flags,
+       shared_region_mapping_t *next)
+{
+       shared_region_mapping_lock(shared_region);
+
+       SHARED_REGION_DEBUG(("shared_region_mapping_info(shared_region=%p)\n",
+                            shared_region));
+       assert(shared_region->ref_count > 0);
+       *text_region = shared_region->text_region;
+       *text_size = shared_region->text_size;
+       *data_region = shared_region->data_region;
+       *data_size = shared_region->data_size;
+       *region_mappings = shared_region->region_mappings;
+       *client_base = shared_region->client_base;
+       *alt_base = shared_region->alternate_base;
+       *alt_next = shared_region->alternate_next;
+       *flags = shared_region->flags;
+       *fs_base = shared_region->fs_base;
+       *system = shared_region->system;
+       *next = shared_region->next;
+
+       shared_region_mapping_unlock(shared_region);
+}
+
+kern_return_t
+shared_region_mapping_ref(
+       shared_region_mapping_t shared_region)
+{
+       SHARED_REGION_DEBUG(("shared_region_mapping_ref(shared_region=%p): "
+                            "ref_count=%d + 1\n",
+                            shared_region,
+                            shared_region ? shared_region->ref_count : 0));
+       if(shared_region == NULL)
+               return KERN_SUCCESS;
+       assert(shared_region->ref_count > 0);
+       hw_atomic_add(&shared_region->ref_count, 1);
+       return KERN_SUCCESS;
+}
+
+static kern_return_t
+shared_region_mapping_dealloc_lock(
+       shared_region_mapping_t shared_region,
+       int need_sfh_lock,
+       int need_drl_lock)
+{
+       struct shared_region_task_mappings sm_info;
+       shared_region_mapping_t next = NULL;
+       int ref_count;
+
+       SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock"
+                            "(shared_region=%p,%d,%d) ref_count=%d\n",
+                            shared_region, need_sfh_lock, need_drl_lock,
+                            shared_region ? shared_region->ref_count : 0));
+       while (shared_region) {
+               SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): "
+                                    "ref_count=%d\n",
+                                    shared_region, shared_region->ref_count));
+               assert(shared_region->ref_count > 0);
+               if ((ref_count = 
+                         hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
+                       shared_region_mapping_lock(shared_region);
+
+                       sm_info.text_region = shared_region->text_region;
+                       sm_info.text_size = shared_region->text_size;
+                       sm_info.data_region = shared_region->data_region;
+                       sm_info.data_size = shared_region->data_size;
+                       sm_info.region_mappings = shared_region->region_mappings;
+                       sm_info.client_base = shared_region->client_base;
+                       sm_info.alternate_base = shared_region->alternate_base;
+                       sm_info.alternate_next = shared_region->alternate_next;
+                       sm_info.flags = shared_region->flags;
+                       sm_info.self = (vm_offset_t)shared_region;
+
+                       if(shared_region->region_mappings) {
+                               lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_sfh_lock);
+                       }
+                       if(((vm_named_entry_t)
+                               (shared_region->text_region->ip_kobject))
+                                                        ->backing.map->pmap) {
+                           pmap_remove(((vm_named_entry_t)
+                               (shared_region->text_region->ip_kobject))
+                                                       ->backing.map->pmap, 
+                               sm_info.client_base, 
+                               sm_info.client_base + sm_info.text_size);
+                       }
+                       ipc_port_release_send(shared_region->text_region);
+                       if(shared_region->data_region)
+                               ipc_port_release_send(shared_region->data_region);
+                       if (shared_region->object_chain) {
+                               next = shared_region->object_chain->object_chain_region;
+                               kfree(shared_region->object_chain,
+                                     sizeof (struct shared_region_object_chain));
+                       } else {
+                               next = NULL;
+                       }
+                       shared_region_mapping_unlock(shared_region);
+                       SHARED_REGION_DEBUG(
+                               ("shared_region_mapping_dealloc_lock(%p): "
+                                "freeing\n",
+                                shared_region));
+                       bzero((void *)shared_region,
+                             sizeof (*shared_region)); /* FBDP debug */
+                       kfree(shared_region,
+                               sizeof (struct shared_region_mapping));
+                       shared_region = next;
+               } else {
+                       /* Stale indicates that a system region is no */
+                       /* longer in the default environment list.    */
+                       if((ref_count == 1) && 
+                         (shared_region->flags & SHARED_REGION_SYSTEM)
+                         && !(shared_region->flags & SHARED_REGION_STALE)) {
+                               SHARED_REGION_DEBUG(
+                                       ("shared_region_mapping_dealloc_lock"
+                                        "(%p): removing stale\n",
+                                        shared_region));
+                               remove_default_shared_region_lock(shared_region,need_sfh_lock, need_drl_lock);
+                       }
+                       break;
+               }
+       }
+       SHARED_REGION_DEBUG(("shared_region_mapping_dealloc_lock(%p): done\n",
+                            shared_region));
+       return KERN_SUCCESS;
+}
+
+/*
+ * Stub function; always indicates that the lock needs to be taken in the
+ * call to lsf_remove_regions_mappings_lock().
+ */
+kern_return_t
+shared_region_mapping_dealloc(
+       shared_region_mapping_t shared_region)
+{
+       SHARED_REGION_DEBUG(("shared_region_mapping_dealloc"
+                            "(shared_region=%p)\n",
+                            shared_region));
+       if (shared_region) {
+               assert(shared_region->ref_count > 0);
+       }
+       return shared_region_mapping_dealloc_lock(shared_region, 1, 1);
+}
+
+static 
+kern_return_t
+shared_region_object_create(
+       vm_size_t               size,
+       ipc_port_t              *object_handle)
+{
+       vm_named_entry_t        user_entry;
+       ipc_port_t              user_handle;
+
+       ipc_port_t      previous;
+       vm_map_t        new_map;
+       
+       user_entry = (vm_named_entry_t) 
+                       kalloc(sizeof (struct vm_named_entry));
+       if(user_entry == NULL) {
+               return KERN_FAILURE;
+       }
+       named_entry_lock_init(user_entry);
+       user_handle = ipc_port_alloc_kernel();
+
+
+       ip_lock(user_handle);
+
+       /* make a sonce right */
+       user_handle->ip_sorights++;
+       ip_reference(user_handle);
+
+       user_handle->ip_destination = IP_NULL;
+       user_handle->ip_receiver_name = MACH_PORT_NULL;
+       user_handle->ip_receiver = ipc_space_kernel;
+
+       /* make a send right */
+        user_handle->ip_mscount++;
+        user_handle->ip_srights++;
+        ip_reference(user_handle);
+
+       ipc_port_nsrequest(user_handle, 1, user_handle, &previous);
+       /* nsrequest unlocks user_handle */
+
+       /* Create a named object based on a submap of specified size */
+
+       new_map = vm_map_create(pmap_create(0), 0, size, TRUE);
+       user_entry->backing.map = new_map;
+       user_entry->internal = TRUE;
+       user_entry->is_sub_map = TRUE;
+       user_entry->is_pager = FALSE;
+       user_entry->offset = 0;
+       user_entry->protection = VM_PROT_ALL;
+       user_entry->size = size;
+       user_entry->ref_count = 1;
+
+       ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry,
+                                                       IKOT_NAMED_ENTRY);
+       *object_handle = user_handle;
+       return KERN_SUCCESS;
+}
+
+/* called for the non-default, private branch shared region support */
+/* system default fields for fs_base and system supported are not   */
+/* relevant as the system default flag is not set */
 kern_return_t
 shared_file_create_system_region(
                shared_region_mapping_t *shared_region)
@@ -113,55 +555,410 @@ shared_file_create_system_region(
        vm_offset_t             mapping_array;
        kern_return_t           kret;
 
+       SHARED_REGION_DEBUG(("shared_file_create_system_region()\n"));
+
        text_size = 0x10000000;
        data_size = 0x10000000;
 
        kret = shared_file_init(&text_handle,
                        text_size, &data_handle, data_size, &mapping_array);
-       if(kret)
+       if(kret) {
+               SHARED_REGION_DEBUG(("shared_file_create_system_region: "
+                                    "shared_file_init failed kret=0x%x\n",
+                                    kret));
                return kret;
+       }
        kret = shared_region_mapping_create(text_handle,
                        text_size, data_handle, data_size, mapping_array,
                        GLOBAL_SHARED_TEXT_SEGMENT, shared_region, 
-                       0x9000000, 0x9000000);
-       if(kret)
+                       SHARED_ALTERNATE_LOAD_BASE, SHARED_ALTERNATE_LOAD_BASE);
+       if(kret) {
+               SHARED_REGION_DEBUG(("shared_file_create_system_region: "
+                                    "shared_region_mapping_create failed "
+                                    "kret=0x%x\n",
+                                    kret));
                return kret;
+       }
        (*shared_region)->flags = 0;
+       if(com_mapping_resource) {
+               shared_region_mapping_ref(com_mapping_resource);
+               (*shared_region)->next = com_mapping_resource;
+       }
+
+       SHARED_REGION_DEBUG(("shared_file_create_system_region() "
+                            "-> shared_region=%p\n",
+                            *shared_region));
        return KERN_SUCCESS;
 }
 
+/*
+ * load a new default for a specified environment into the default share
+ * regions list.  If a previous default exists for the envrionment specification
+ * it is returned along with its reference.  It is expected that the new
+ * sytem region structure passes a reference.
+ */
+
+shared_region_mapping_t
+update_default_shared_region(
+               shared_region_mapping_t new_system_region)
+{
+       shared_region_mapping_t old_system_region;
+       unsigned int fs_base;
+       unsigned int system;
+
+       SHARED_REGION_DEBUG(("update_default_shared_region(new=%p)\n",
+                            new_system_region));
+       assert(new_system_region->ref_count > 0);
+       fs_base = new_system_region->fs_base;
+       system = new_system_region->system;
+       new_system_region->flags |= SHARED_REGION_SYSTEM;
+       default_regions_list_lock();
+       old_system_region = default_environment_shared_regions;
+
+       if((old_system_region != NULL) && 
+               (old_system_region->fs_base == fs_base) &&
+                       (old_system_region->system == system)) {
+               new_system_region->default_env_list =
+                       old_system_region->default_env_list;
+               old_system_region->default_env_list = NULL;
+               default_environment_shared_regions = new_system_region;
+               old_system_region->flags |= SHARED_REGION_STALE;
+               default_regions_list_unlock();
+               SHARED_REGION_DEBUG(("update_default_shared_region(%p): "
+                                    "old=%p stale 1\n",
+                                    new_system_region, old_system_region));
+               assert(old_system_region->ref_count > 0);
+               return old_system_region;
+       }
+       if (old_system_region) {
+          while(old_system_region->default_env_list != NULL) {
+               if((old_system_region->default_env_list->fs_base == fs_base) &&
+                     (old_system_region->default_env_list->system == system)) {
+                       shared_region_mapping_t tmp_system_region;
+
+                       tmp_system_region =
+                               old_system_region->default_env_list;
+                       new_system_region->default_env_list =
+                                       tmp_system_region->default_env_list;
+                       tmp_system_region->default_env_list = NULL;
+                       old_system_region->default_env_list = 
+                                       new_system_region;
+                       old_system_region = tmp_system_region;
+                       old_system_region->flags |= SHARED_REGION_STALE;
+                       default_regions_list_unlock();
+                       SHARED_REGION_DEBUG(("update_default_shared_region(%p)"
+                                            ": old=%p stale 2\n",
+                                            new_system_region,
+                                            old_system_region));
+                       assert(old_system_region->ref_count > 0);
+                       return old_system_region;
+               }
+               old_system_region = old_system_region->default_env_list;
+          }
+       }
+       /* If we get here, we are at the end of the system list and we */
+       /* did not find a pre-existing entry */
+       if(old_system_region) {
+               SHARED_REGION_DEBUG(("update_default_system_region(%p): "
+                                    "adding after old=%p\n",
+                                    new_system_region, old_system_region));
+               assert(old_system_region->ref_count > 0);
+               old_system_region->default_env_list = new_system_region;
+       } else {
+               SHARED_REGION_DEBUG(("update_default_system_region(%p): "
+                                    "new default\n",
+                                    new_system_region));
+               default_environment_shared_regions = new_system_region;
+       }
+       assert(new_system_region->ref_count > 0);
+       default_regions_list_unlock();
+       return NULL;
+}
+
+/* 
+ * lookup a system_shared_region for the environment specified.  If one is
+ * found, it is returned along with a reference against the structure
+ */
+
+shared_region_mapping_t
+lookup_default_shared_region(
+               unsigned int fs_base,
+               unsigned int system)
+{
+       shared_region_mapping_t system_region;
+       default_regions_list_lock();
+       system_region = default_environment_shared_regions;
+
+       SHARED_REGION_DEBUG(("lookup_default_shared_region"
+                            "(base=0x%x, system=0x%x)\n",
+                            fs_base, system));
+       while(system_region != NULL) {
+               SHARED_REGION_DEBUG(("lookup_default_shared_region(0x%x, 0x%x)"
+                                    ": system_region=%p base=0x%x system=0x%x"
+                                    " ref_count=%d\n",
+                                    fs_base, system, system_region,
+                                    system_region->fs_base,
+                                    system_region->system,
+                                    system_region->ref_count));
+               assert(system_region->ref_count > 0);
+               if((system_region->fs_base == fs_base) &&
+                       (system_region->system == system)) {
+                       break;
+               }
+               system_region = system_region->default_env_list;
+       }
+       if(system_region)
+               shared_region_mapping_ref(system_region);
+       default_regions_list_unlock();
+       SHARED_REGION_DEBUG(("lookup_default_system_region(0x%x,0x%x) -> %p\n",
+                            system_region));
+       return system_region;
+}
+
+/*
+ * remove a system_region default if it appears in the default regions list. 
+ * Drop a reference on removal.
+ */
+
+__private_extern__ void
+remove_default_shared_region_lock(
+               shared_region_mapping_t system_region,
+               int need_sfh_lock,
+               int need_drl_lock)
+{
+       shared_region_mapping_t old_system_region;
+
+       SHARED_REGION_DEBUG(("remove_default_shared_region_lock"
+                            "(system_region=%p, %d, %d)\n",
+                            system_region, need_sfh_lock, need_drl_lock));
+       if (need_drl_lock) {
+               default_regions_list_lock();
+       }
+       old_system_region = default_environment_shared_regions;
+
+       if(old_system_region == NULL) {
+               SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p)"
+                                    "-> default_env=NULL\n",
+                                    system_region));
+               if (need_drl_lock) {
+                       default_regions_list_unlock();
+               }
+               return;
+       }
+
+       SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
+                            "default_env=%p\n",
+                            system_region, old_system_region));
+       assert(old_system_region->ref_count > 0);
+       if (old_system_region == system_region) {
+               default_environment_shared_regions 
+                       = old_system_region->default_env_list;
+               old_system_region->default_env_list = NULL;
+               old_system_region->flags |= SHARED_REGION_STALE;
+               SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
+                                    "old=%p ref_count=%d STALE\n",
+                                    system_region, old_system_region,
+                                    old_system_region->ref_count));
+                       shared_region_mapping_dealloc_lock(old_system_region,
+                                                  need_sfh_lock,
+                                                  0);
+               if (need_drl_lock) {
+                       default_regions_list_unlock();
+               }
+               return;
+       }
+
+       while(old_system_region->default_env_list != NULL) {
+               SHARED_REGION_DEBUG(("remove_default_shared_region_lock(%p): "
+                                    "old=%p->default_env=%p\n",
+                                    system_region, old_system_region,
+                                    old_system_region->default_env_list));
+               assert(old_system_region->default_env_list->ref_count > 0);
+               if(old_system_region->default_env_list == system_region) {
+                       shared_region_mapping_t dead_region;
+                       dead_region = old_system_region->default_env_list;
+                       old_system_region->default_env_list = 
+                               dead_region->default_env_list;
+                       dead_region->default_env_list = NULL;
+                       dead_region->flags |= SHARED_REGION_STALE;
+                       SHARED_REGION_DEBUG(
+                               ("remove_default_shared_region_lock(%p): "
+                                "dead=%p ref_count=%d stale\n",
+                                system_region, dead_region,
+                                dead_region->ref_count));
+                               shared_region_mapping_dealloc_lock(dead_region,
+                                                          need_sfh_lock,
+                                                          0);
+                       if (need_drl_lock) {
+                               default_regions_list_unlock();
+                       }
+                       return;
+               }
+               old_system_region = old_system_region->default_env_list;
+       }
+       if (need_drl_lock) {
+               default_regions_list_unlock();
+       }
+}
+
+/*
+ * Symbol compatability; we believe shared_region_mapping_dealloc_lock() is
+ * the only caller.  Remove this stub function and the corresponding symbol
+ * export for Merlot.
+ */
+void
+remove_default_shared_region(
+               shared_region_mapping_t system_region)
+{
+       SHARED_REGION_DEBUG(("remove_default_shared_region(%p)\n",
+                            system_region));
+       if (system_region) {
+               assert(system_region->ref_count > 0);
+       }
+       remove_default_shared_region_lock(system_region, 1, 1);
+}
+
+void
+remove_all_shared_regions(void)
+{
+       shared_region_mapping_t system_region;
+       shared_region_mapping_t next_system_region;
+
+       SHARED_REGION_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
+       LSF_ALLOC_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
+       LSF_DEBUG(("***** REMOVE_ALL_SHARED_REGIONS()\n"));
+       default_regions_list_lock();
+       system_region = default_environment_shared_regions;
+
+       if(system_region == NULL) {
+               default_regions_list_unlock();
+               return;
+       }
+
+       while(system_region != NULL) {
+               next_system_region = system_region->default_env_list;
+               system_region->default_env_list = NULL;
+               system_region->flags |= SHARED_REGION_STALE;
+               SHARED_REGION_DEBUG(("remove_all_shared_regions(): "
+                                    "%p ref_count=%d stale\n",
+                                    system_region, system_region->ref_count));
+               assert(system_region->ref_count > 0);
+                       shared_region_mapping_dealloc_lock(system_region, 1, 0);
+               system_region = next_system_region;
+       }
+       default_environment_shared_regions = NULL;
+       default_regions_list_unlock();
+       SHARED_REGION_DEBUG(("***** remove_all_shared_regions() done\n"));
+       LSF_ALLOC_DEBUG(("***** remove_all_shared_regions() done\n"));
+       LSF_DEBUG(("***** remove_all_shared_regions() done\n"));
+}
+               
+/* shared_com_boot_time_init initializes the common page shared data and */
+/* text region.  This region is semi independent of the split libs       */
+/* and so its policies have to be handled differently by the code that   */
+/* manipulates the mapping of shared region environments.  However,      */
+/* the shared region delivery system supports both */
+void shared_com_boot_time_init(void);  /* forward */
+void
+shared_com_boot_time_init(void)
+{
+       kern_return_t            kret;
+       vm_named_entry_t        named_entry;
+
+       SHARED_REGION_DEBUG(("shared_com_boot_time_init()\n"));
+       if(com_region_handle32) {
+               panic("shared_com_boot_time_init: "
+                       "com_region_handle32 already set\n");
+       }
+       if(com_region_handle64) {
+               panic("shared_com_boot_time_init: "
+                       "com_region_handle64 already set\n");
+       }
+
+       /* create com page regions, 1 each for 32 and 64-bit code  */
+       if((kret = shared_region_object_create(
+                       com_region_size, 
+                       &com_region_handle32))) {
+               panic("shared_com_boot_time_init: "
+                               "unable to create 32-bit comm page\n");
+               return;
+       }
+       if((kret = shared_region_object_create(
+                       com_region_size, 
+                       &com_region_handle64))) {
+               panic("shared_com_boot_time_init: "
+                               "unable to create 64-bit comm page\n");
+               return;
+       }
+       
+       /* now set export the underlying region/map */
+       named_entry = (vm_named_entry_t)com_region_handle32->ip_kobject;
+       com_region_map32 = named_entry->backing.map;
+       named_entry = (vm_named_entry_t)com_region_handle64->ip_kobject;
+       com_region_map64 = named_entry->backing.map;
+       
+       /* wrap the com region in its own shared file mapping structure */
+       /* 64-bit todo: call "shared_region_mapping_create" on com_region_handle64 */
+       kret = shared_region_mapping_create(com_region_handle32,
+               com_region_size, NULL, 0, 0,
+               _COMM_PAGE_BASE_ADDRESS, &com_mapping_resource,
+               0, 0);
+       if (kret) {
+         panic("shared_region_mapping_create failed for commpage");
+       }
+}
+
+void
 shared_file_boot_time_init(
-)
+               unsigned int fs_base, 
+               unsigned int system)
 {
-       long                    shared_text_region_size;
-       long                    shared_data_region_size;
+       long                    text_region_size;
+       long                    data_region_size;
        shared_region_mapping_t new_system_region;
-       shared_region_mapping_t old_system_region;
+       shared_region_mapping_t old_default_env;
 
-       shared_text_region_size = 0x10000000;
-       shared_data_region_size = 0x10000000;
+       SHARED_REGION_DEBUG(("shared_file_boot_time_init"
+                            "(base=0x%x,system=0x%x)\n",
+                            fs_base, system));
+       text_region_size = 0x10000000;
+       data_region_size = 0x10000000;
        shared_file_init(&shared_text_region_handle,
-               shared_text_region_size, &shared_data_region_handle,
-               shared_data_region_size, &shared_file_mapping_array);
+                        text_region_size,
+                        &shared_data_region_handle,
+                        data_region_size,
+                        &shared_file_mapping_array);
        
        shared_region_mapping_create(shared_text_region_handle,
-               shared_text_region_size, shared_data_region_handle,
-               shared_data_region_size, shared_file_mapping_array,
-               GLOBAL_SHARED_TEXT_SEGMENT, &new_system_region,
-               0x9000000, 0x9000000);
-       old_system_region = system_shared_region;
-       system_shared_region = new_system_region;
-       system_shared_region->flags = SHARED_REGION_SYSTEM;
-        /* consume the reference held because this is the  */
-        /* system shared region */
-       if(old_system_region) {
-                shared_region_mapping_dealloc(old_system_region);
-       }
+                                    text_region_size,
+                                    shared_data_region_handle,
+                                    data_region_size,
+                                    shared_file_mapping_array,
+                                    GLOBAL_SHARED_TEXT_SEGMENT,
+                                    &new_system_region,
+                                    SHARED_ALTERNATE_LOAD_BASE,
+                                    SHARED_ALTERNATE_LOAD_BASE);
+
+       new_system_region->fs_base = fs_base;
+       new_system_region->system = system;
+       new_system_region->flags = SHARED_REGION_SYSTEM;
+
+       /* grab an extra reference for the caller */
+       /* remember to grab before call to update */
+       shared_region_mapping_ref(new_system_region);
+       old_default_env = update_default_shared_region(new_system_region);
        /* hold an extra reference because these are the system */
        /* shared regions. */
-       shared_region_mapping_ref(system_shared_region);
-       vm_set_shared_region(current_task(), system_shared_region);
-
+       if(old_default_env)
+               shared_region_mapping_dealloc(old_default_env);
+       if(com_mapping_resource == NULL) {
+               shared_com_boot_time_init();
+       }
+       shared_region_mapping_ref(com_mapping_resource);
+       new_system_region->next = com_mapping_resource;
+       vm_set_shared_region(current_task(), new_system_region);
+       SHARED_REGION_DEBUG(("shared_file_boot_time_init(0x%x,0x%x) done\n",
+                            fs_base, system));
 }
 
 
@@ -174,18 +971,16 @@ shared_file_boot_time_init(
 
 static kern_return_t
 shared_file_init(
-       ipc_port_t      *shared_text_region_handle,
+       ipc_port_t      *text_region_handle,
        vm_size_t       text_region_size, 
-       ipc_port_t      *shared_data_region_handle,
+       ipc_port_t      *data_region_handle,
        vm_size_t       data_region_size,
-       vm_offset_t     *mapping_array)
+       vm_offset_t     *file_mapping_array)
 {
-       vm_offset_t             aligned_address;
        shared_file_info_t      *sf_head;
        vm_offset_t             table_mapping_address;
        int                     data_table_size;
        int                     hash_size;
-       int                     i;
        kern_return_t           kret;
 
        vm_object_t             buf_object;
@@ -194,17 +989,19 @@ shared_file_init(
        vm_offset_t             b;
        vm_page_t               p;
 
+       SHARED_REGION_DEBUG(("shared_file_init()\n"));
        /* create text and data maps/regions */
-       if(kret = vm_region_object_create(kernel_map, 
-                               text_region_size, 
-                               shared_text_region_handle)) {
-               
+       kret = shared_region_object_create(
+                                      text_region_size, 
+                                      text_region_handle);
+       if (kret) {
                return kret;
        }
-       if(kret = vm_region_object_create(kernel_map, 
-                               data_region_size, 
-                               shared_data_region_handle)) {
-               ipc_port_release_send(*shared_text_region_handle);
+       kret = shared_region_object_create(
+                                      data_region_size, 
+                                      data_region_handle);
+       if (kret) {
+               ipc_port_release_send(*text_region_handle);
                return kret;
        }
 
@@ -213,18 +1010,21 @@ shared_file_init(
        table_mapping_address = data_region_size - data_table_size;
 
        if(shared_file_mapping_array == 0) {
+               vm_map_address_t map_addr;
                buf_object = vm_object_allocate(data_table_size);
 
-               if(vm_map_find_space(kernel_map, &shared_file_mapping_array, 
-                               data_table_size, 0, &entry) != KERN_SUCCESS) {
+               if(vm_map_find_space(kernel_map, &map_addr,
+                                    data_table_size, 0, &entry)
+                  != KERN_SUCCESS) {
                        panic("shared_file_init: no space");
                }
-               *mapping_array = shared_file_mapping_array;
+               shared_file_mapping_array = CAST_DOWN(vm_offset_t, map_addr);
+               *file_mapping_array = shared_file_mapping_array;
                vm_map_unlock(kernel_map);
                entry->object.vm_object = buf_object;
                entry->offset = 0;
 
-               for (b = *mapping_array, alloced = 0; 
+               for (b = *file_mapping_array, alloced = 0; 
                           alloced < (hash_size +
                                round_page(sizeof(struct sf_mapping)));
                           alloced += PAGE_SIZE,  b += PAGE_SIZE) {
@@ -235,88 +1035,144 @@ shared_file_init(
                        }       
                        p->busy = FALSE;
                        vm_object_unlock(buf_object);
-                       pmap_enter(kernel_pmap, b, p->phys_addr,
+                       pmap_enter(kernel_pmap, b, p->phys_page,
                                VM_PROT_READ | VM_PROT_WRITE, 
-                               VM_WIMG_USE_DEFAULT, TRUE);
+                               ((unsigned int)(p->object->wimg_bits)) 
+                                                       & VM_WIMG_MASK,
+                               TRUE);
                }
 
 
                /* initialize loaded file array */
-               sf_head = (shared_file_info_t *)*mapping_array;
+               sf_head = (shared_file_info_t *)*file_mapping_array;
                sf_head->hash = (queue_head_t *) 
-                               (((int)*mapping_array) + 
+                               (((int)*file_mapping_array) + 
                                        sizeof(struct shared_file_info));
                sf_head->hash_size = hash_size/sizeof(queue_head_t);
-               mutex_init(&(sf_head->lock), (ETAP_VM_MAP));
+               mutex_init(&(sf_head->lock), 0);
                sf_head->hash_init = FALSE;
 
 
                mach_make_memory_entry(kernel_map, &data_table_size, 
-                       *mapping_array, VM_PROT_READ, &sfma_handle,
+                       *file_mapping_array, VM_PROT_READ, &sfma_handle,
                        NULL);
 
-               if (vm_map_wire(kernel_map, *mapping_array, 
-                       *mapping_array + 
-                          (hash_size + round_page(sizeof(struct sf_mapping))),
+               if (vm_map_wire(kernel_map, 
+                       vm_map_trunc_page(*file_mapping_array),
+                       vm_map_round_page(*file_mapping_array + 
+                                         hash_size + 
+                                         round_page(sizeof(struct sf_mapping))),
                        VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
                        panic("shared_file_init: No memory for data table");
                }
 
                lsf_zone = zinit(sizeof(struct load_file_ele), 
                        data_table_size - 
-                          (hash_size + round_page(sizeof(struct sf_mapping))),
+                          (hash_size + round_page_32(sizeof(struct sf_mapping))),
                        0, "load_file_server"); 
 
                zone_change(lsf_zone, Z_EXHAUST, TRUE);
                zone_change(lsf_zone, Z_COLLECT, FALSE);
                zone_change(lsf_zone, Z_EXPAND, FALSE);
                zone_change(lsf_zone, Z_FOREIGN, TRUE);
+
+               /* initialize the global default environment lock */
+               mutex_init(&default_regions_list_lock_data, 0);
+
        } else {
-               *mapping_array = shared_file_mapping_array;
+               *file_mapping_array = shared_file_mapping_array;
        }
 
-       vm_map(((vm_named_entry_t)
-                       (*shared_data_region_handle)->ip_kobject)->backing.map,
-                       &table_mapping_address,
-                       data_table_size, 0, SHARED_LIB_ALIAS, 
-                       sfma_handle, 0, FALSE, 
-                       VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+       kret = vm_map(((vm_named_entry_t)
+                      (*data_region_handle)->ip_kobject)->backing.map,
+                     &table_mapping_address,
+                     data_table_size, 0,
+                     SHARED_LIB_ALIAS | VM_FLAGS_FIXED, 
+                     sfma_handle, 0, FALSE, 
+                     VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
 
+       SHARED_REGION_DEBUG(("shared_file_init() done\n"));
+       return kret;
 }
 
-/* A call made from user space, copyin_shared_file requires the user to */
-/* provide the address and size of a mapped file, the full path name of */
-/* that file and a list of offsets to be mapped into shared memory.     */
-/* By requiring that the file be pre-mapped, copyin_shared_file can     */
-/* guarantee that the file is neither deleted nor changed after the user */
-/* begins the call.  */
+static kern_return_t
+shared_file_header_init(
+       shared_file_info_t              *shared_file_header)
+{
+       vm_size_t               hash_table_size;
+       vm_size_t               hash_table_offset;
+       int                     i;
+       /* wire hash entry pool only as needed, since we are the only */
+       /* users, we take a few liberties with the population of our  */
+       /* zone. */
+       static int              allocable_hash_pages;
+       static vm_offset_t      hash_cram_address;
+       
+               
+       hash_table_size = shared_file_header->hash_size 
+               * sizeof (struct queue_entry);
+       hash_table_offset = hash_table_size + 
+               round_page(sizeof (struct sf_mapping));
+       for (i = 0; i < shared_file_header->hash_size; i++)
+               queue_init(&shared_file_header->hash[i]);
+
+       allocable_hash_pages = (((hash_table_size << 5) - hash_table_offset)
+                               / PAGE_SIZE);
+       hash_cram_address = ((vm_offset_t) shared_file_header)
+               + hash_table_offset;
+       shared_file_available_hash_ele = 0;
+
+       shared_file_header->hash_init = TRUE;
+
+       if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
+               int cram_pages, cram_size;
 
+               cram_pages = allocable_hash_pages > 3 ? 
+                                       3 : allocable_hash_pages;
+               cram_size = cram_pages * PAGE_SIZE;
+               if (vm_map_wire(kernel_map, hash_cram_address,
+                               hash_cram_address + cram_size, 
+                               VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
+                       printf("shared_file_header_init: "
+                              "No memory for data table\n");
+                       return KERN_NO_SPACE;
+               }
+               allocable_hash_pages -= cram_pages;
+               zcram(lsf_zone, (void *) hash_cram_address, cram_size);
+               shared_file_available_hash_ele 
+                               += cram_size/sizeof(struct load_file_ele);
+               hash_cram_address += cram_size;
+       }
+
+       return KERN_SUCCESS;
+}
+
+
+/*
+ * map_shared_file:
+ *
+ * Attempt to map a split library into the shared region.  Check if the mappings
+ * are already in place.
+ */
 kern_return_t
-copyin_shared_file(
-       vm_offset_t     mapped_file,
-       vm_size_t       mapped_file_size,
-       vm_offset_t     *base_address, 
-       int             map_cnt,
-       sf_mapping_t    *mappings,
-       memory_object_control_t file_control,
+map_shared_file(
+       int                             map_cnt,
+       struct shared_file_mapping_np   *mappings,
+       memory_object_control_t         file_control,
+       memory_object_size_t            file_size,
        shared_region_task_mappings_t   sm_info,
-       int             *flags)
+       mach_vm_offset_t                base_offset,
+       mach_vm_offset_t                *slide_p)
 {
-       vm_object_t     file_object;
-       vm_map_entry_t          entry;
+       vm_object_t             file_object;
        shared_file_info_t      *shared_file_header;
        load_struct_t           *file_entry;
        loaded_mapping_t        *file_mapping;
-       boolean_t               alternate;
        int                     i;
        kern_return_t           ret;
+       mach_vm_offset_t        slide;
 
-       /* wire hash entry pool only as needed, since we are the only */
-       /* users, we take a few liberties with the population of our  */
-       /* zone. */
-       static int                      allocable_hash_pages;
-       static vm_offset_t              hash_cram_address;
-       
+       SHARED_REGION_DEBUG(("map_shared_file()\n"));
 
        shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
 
@@ -327,89 +1183,29 @@ copyin_shared_file(
        /* mappings based on the file object */ 
 
        if(shared_file_header->hash_init == FALSE) {
-               vm_size_t       hash_table_size;
-               vm_size_t       hash_table_offset;
-               
-               hash_table_size = (shared_file_header->hash_size) 
-                                               * sizeof(struct queue_entry);
-               hash_table_offset = hash_table_size + 
-                                       round_page(sizeof(struct sf_mapping));
-               for (i = 0; i < shared_file_header->hash_size; i++)
-                       queue_init(&shared_file_header->hash[i]);
-
-               allocable_hash_pages = 
-                       ((hash_table_size<<5) - hash_table_offset)/PAGE_SIZE;
-               hash_cram_address = 
-                       sm_info->region_mappings + hash_table_offset;
-               shared_file_available_hash_ele = 0;
-
-               shared_file_header->hash_init = TRUE;
-       }
-
-       if ((shared_file_available_hash_ele < 20) && (allocable_hash_pages)) {
-               int cram_size;
-
-               cram_size = allocable_hash_pages > 3 ? 
-                                       3 : allocable_hash_pages;
-               allocable_hash_pages -= cram_size;
-               cram_size = cram_size * PAGE_SIZE;
-               if (vm_map_wire(kernel_map, hash_cram_address,
-                               hash_cram_address+cram_size, 
-                               VM_PROT_DEFAULT, FALSE) != KERN_SUCCESS) {
-                       panic("shared_file_init: No memory for data table");
+               ret = shared_file_header_init(shared_file_header);
+               if (ret != KERN_SUCCESS) {
+                       mutex_unlock(&shared_file_header->lock);
+                       return KERN_NO_SPACE;
                }
-               zcram(lsf_zone, hash_cram_address, cram_size);
-               shared_file_available_hash_ele 
-                               += cram_size/sizeof(struct load_file_ele);
-               hash_cram_address += cram_size;
        }
 
        
        /* Find the entry in the map associated with the current mapping */
        /* of the file object */
        file_object = memory_object_control_to_vm_object(file_control);
-       if(vm_map_lookup_entry(current_map(), mapped_file, &entry)) {
-               vm_object_t     mapped_object;
-               if(entry->is_sub_map) {
-                       mutex_unlock(&shared_file_header->lock);
-                       return KERN_INVALID_ADDRESS;
-               }
-               mapped_object = entry->object.vm_object;
-               while(mapped_object->shadow != NULL) {
-                       mapped_object = mapped_object->shadow;
-               }
-               /* check to see that the file object passed is indeed the */
-               /* same as the mapped object passed */
-               if(file_object != mapped_object) {
-                       if(sm_info->flags & SHARED_REGION_SYSTEM) {
-                               mutex_unlock(&shared_file_header->lock);
-                               return KERN_PROTECTION_FAILURE;
-                       } else {
-                               file_object = mapped_object;
-                       }
-               }
-       } else {
-               mutex_unlock(&shared_file_header->lock);
-               return KERN_INVALID_ADDRESS;
-       }
-
-       alternate = (*flags & ALTERNATE_LOAD_SITE) ? TRUE : FALSE;
 
-       if (file_entry = lsf_hash_lookup(shared_file_header->hash, 
-                       (void *) file_object, shared_file_header->hash_size, 
-                       alternate, sm_info)) {
+       file_entry = lsf_hash_lookup(shared_file_header->hash, 
+                                    (void *) file_object,
+                                    mappings[0].sfm_file_offset,
+                                    shared_file_header->hash_size, 
+                                    TRUE, TRUE, sm_info);
+       if (file_entry) {
                /* File is loaded, check the load manifest for exact match */
                /* we simplify by requiring that the elements be the same  */
                /* size and in the same order rather than checking for     */
                /* semantic equivalence. */
 
-               /* If the file is being loaded in the alternate        */
-               /* area, one load to alternate is allowed per mapped   */
-               /* object the base address is passed back to the       */
-               /* caller and the mappings field is filled in.  If the */
-               /* caller does not pass the precise mappings_cnt       */
-               /* and the Alternate is already loaded, an error       */
-               /* is returned.  */
                i = 0;
                file_mapping = file_entry->mappings;
                while(file_mapping != NULL) {
@@ -417,15 +1213,12 @@ copyin_shared_file(
                                mutex_unlock(&shared_file_header->lock);
                                return KERN_INVALID_ARGUMENT;
                        }
-                       if(((mappings[i].mapping_offset)
-                                               & SHARED_DATA_REGION_MASK) !=
-                                               file_mapping->mapping_offset ||
-                                       mappings[i].size != 
-                                               file_mapping->size ||   
-                                       mappings[i].file_offset != 
-                                               file_mapping->file_offset ||    
-                                       mappings[i].protection != 
-                                               file_mapping->protection) {
+                       if(((mappings[i].sfm_address)
+                           & SHARED_DATA_REGION_MASK) !=
+                          file_mapping->mapping_offset ||
+                          mappings[i].sfm_size != file_mapping->size ||        
+                          mappings[i].sfm_file_offset != file_mapping->file_offset ||  
+                          mappings[i].sfm_init_prot != file_mapping->protection) {
                                break;
                        }
                        file_mapping = file_mapping->next;
@@ -435,31 +1228,70 @@ copyin_shared_file(
                        mutex_unlock(&shared_file_header->lock);
                        return KERN_INVALID_ARGUMENT;
                }
-               *base_address = (*base_address & ~SHARED_TEXT_REGION_MASK) 
-                                               + file_entry->base_address;
-               *flags = SF_PREV_LOADED;
+
+               slide = file_entry->base_address - base_offset; 
+               if (slide_p != NULL) {
+                       /*
+                        * File already mapped but at different address,
+                        * and the caller is OK with the sliding.
+                        */
+                       *slide_p = slide;
+                       ret = KERN_SUCCESS;
+               } else {
+                       /*
+                        * The caller doesn't want any sliding.  The file needs
+                        * to be mapped at the requested address or not mapped.
+                        */
+                       if (slide != 0) {
+                               /*
+                                * The file is already mapped but at a different
+                                * address.
+                                * We fail.
+                                * XXX should we attempt to load at
+                                * requested address too ?
+                                */
+                               ret = KERN_FAILURE;
+                       } else {
+                               /*
+                                * The file is already mapped at the correct
+                                * address.
+                                * We're done !
+                                */
+                               ret = KERN_SUCCESS;
+                       }
+               }
                mutex_unlock(&shared_file_header->lock);
-               return KERN_SUCCESS;
+               return ret;
        } else {
                /* File is not loaded, lets attempt to load it */
-               ret = lsf_load(mapped_file, mapped_file_size, base_address,
-                                            mappings, map_cnt, 
-                                            (void *)file_object, 
-                                            *flags, sm_info);
-               *flags = 0;
+               ret = lsf_map(mappings, map_cnt, 
+                             (void *)file_control, 
+                             file_size,
+                             sm_info,
+                             base_offset,
+                             slide_p);
                if(ret == KERN_NO_SPACE) {
                        shared_region_mapping_t regions;
+                       shared_region_mapping_t system_region;
                        regions = (shared_region_mapping_t)sm_info->self;
                        regions->flags |= SHARED_REGION_FULL;
-                       if(regions == system_shared_region) {
-                               shared_region_mapping_t new_system_shared_regions;
-                               shared_file_boot_time_init();
+                       system_region = lookup_default_shared_region(
+                               regions->fs_base, regions->system);
+                       if (system_region == regions) {
+                               shared_region_mapping_t new_system_shared_region;
+                               shared_file_boot_time_init(
+                                       regions->fs_base, regions->system);
                                /* current task must stay with its current */
                                /* regions, drop count on system_shared_region */
                                /* and put back our original set */
-                               vm_get_shared_region(current_task(), &new_system_shared_regions);
-                               shared_region_mapping_dealloc(new_system_shared_regions);
+                               vm_get_shared_region(current_task(), 
+                                               &new_system_shared_region);
+                               shared_region_mapping_dealloc_lock(
+                                       new_system_shared_region, 0, 1);
                                vm_set_shared_region(current_task(), regions);
+                       } else if (system_region != NULL) {
+                               shared_region_mapping_dealloc_lock(
+                                       system_region, 0, 1);
                        }
                }
                mutex_unlock(&shared_file_header->lock);
@@ -467,6 +1299,175 @@ copyin_shared_file(
        }
 }
 
+/*
+ * shared_region_cleanup:
+ *
+ * Deallocates all the mappings in the shared region, except those explicitly
+ * specified in the "ranges" set of address ranges.
+ */
+kern_return_t
+shared_region_cleanup(
+       unsigned int                    range_count,
+       struct shared_region_range_np   *ranges,
+       shared_region_task_mappings_t   sm_info)
+{
+       kern_return_t           kr;
+       ipc_port_t              region_handle;
+       vm_named_entry_t        region_named_entry;
+       vm_map_t                text_submap, data_submap, submap, next_submap;
+       unsigned int            i_range;
+       vm_map_offset_t         range_start, range_end;
+       vm_map_offset_t         submap_base, submap_end, submap_offset;
+       vm_map_size_t           delete_size;
+
+       struct shared_region_range_np   tmp_range;
+       unsigned int                    sort_index, sorted_index;
+       vm_map_offset_t                 sort_min_address;
+       unsigned int                    sort_min_index;
+
+       /*
+        * Since we want to deallocate the holes between the "ranges",
+        * sort the array by increasing addresses.
+        */
+       for (sorted_index = 0;
+            sorted_index < range_count;
+            sorted_index++) {
+
+               /* first remaining entry is our new starting point */
+               sort_min_index = sorted_index;
+               sort_min_address = ranges[sort_min_index].srr_address;
+
+               /* find the lowest mapping_offset in the remaining entries */
+               for (sort_index = sorted_index + 1;
+                    sort_index < range_count;
+                    sort_index++) {
+                       if (ranges[sort_index].srr_address < sort_min_address) {
+                               /* lowest address so far... */
+                               sort_min_index = sort_index;
+                               sort_min_address =
+                                       ranges[sort_min_index].srr_address;
+                       }
+               }
+
+               if (sort_min_index != sorted_index) {
+                       /* swap entries */
+                       tmp_range = ranges[sort_min_index];
+                       ranges[sort_min_index] = ranges[sorted_index];
+                       ranges[sorted_index] = tmp_range;
+               }
+       }
+
+       region_handle = (ipc_port_t) sm_info->text_region;
+       region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
+       text_submap = region_named_entry->backing.map;
+
+       region_handle = (ipc_port_t) sm_info->data_region;
+       region_named_entry = (vm_named_entry_t) region_handle->ip_kobject;
+       data_submap = region_named_entry->backing.map;
+
+       submap = text_submap;
+       next_submap = submap;
+       submap_base = sm_info->client_base;
+       submap_offset = 0;
+       submap_end = submap_base + sm_info->text_size;
+       for (i_range = 0;
+            i_range < range_count;
+            i_range++) {
+
+               /* get the next range of addresses to keep */
+               range_start = ranges[i_range].srr_address;
+               range_end = range_start + ranges[i_range].srr_size;
+               /* align them to page boundaries */
+               range_start = vm_map_trunc_page(range_start);
+               range_end = vm_map_round_page(range_end);
+
+               /* make sure we don't go beyond the submap's boundaries */
+               if (range_start < submap_base) {
+                       range_start = submap_base;
+               } else if (range_start >= submap_end) {
+                       range_start = submap_end;
+               }
+               if (range_end < submap_base) {
+                       range_end = submap_base;
+               } else if (range_end >= submap_end) {
+                       range_end = submap_end;
+               }
+
+               if (range_start > submap_base + submap_offset) {
+                       /*
+                        * Deallocate everything between the last offset in the
+                        * submap and the start of this range.
+                        */
+                       delete_size = range_start -
+                               (submap_base + submap_offset);
+                       (void) vm_deallocate(submap,
+                                            submap_offset,
+                                            delete_size);
+               } else {
+                       delete_size = 0;
+               }
+
+               /* skip to the end of the range */
+               submap_offset += delete_size + (range_end - range_start);
+
+               if (submap_base + submap_offset >= submap_end) {
+                       /* get to next submap */
+
+                       if (submap == data_submap) {
+                               /* no other submap after data: done ! */
+                               break;
+                       }
+
+                       /* get original range again */
+                       range_start = ranges[i_range].srr_address;
+                       range_end = range_start + ranges[i_range].srr_size;
+                       range_start = vm_map_trunc_page(range_start);
+                       range_end = vm_map_round_page(range_end);
+
+                       if (range_end > submap_end) {
+                               /*
+                                * This last range overlaps with the next
+                                * submap.  We need to process it again
+                                * after switching submaps.  Otherwise, we'll
+                                * just continue with the next range.
+                                */
+                               i_range--;
+                       }
+
+                       if (submap == text_submap) {
+                               /*
+                                * Switch to the data submap.
+                                */
+                               submap = data_submap;
+                               submap_offset = 0;
+                               submap_base = sm_info->client_base + 
+                                       sm_info->text_size;
+                               submap_end = submap_base + sm_info->data_size;
+                       }
+               }
+       }
+
+       if (submap_base + submap_offset < submap_end) {
+               /* delete remainder of this submap, from "offset" to the end */
+               (void) vm_deallocate(submap,
+                                    submap_offset,
+                                    submap_end - submap_base - submap_offset);
+               /* if nothing to keep in data submap, delete it all */
+               if (submap == text_submap) {
+                       submap = data_submap;
+                       submap_offset = 0;
+                       submap_base = sm_info->client_base + sm_info->text_size;
+                       submap_end = submap_base + sm_info->data_size;
+                       (void) vm_deallocate(data_submap,
+                                            0,
+                                            submap_end - submap_base);
+               }
+       }
+
+       kr = KERN_SUCCESS;
+       return kr;
+}
+
 /* A hash lookup function for the list of loaded files in      */
 /* shared_memory_server space.  */
 
@@ -474,7 +1475,9 @@ static load_struct_t  *
 lsf_hash_lookup(
        queue_head_t                    *hash_table,
        void                            *file_object,
+  vm_offset_t                           recognizableOffset,
        int                             size,
+       boolean_t                       regular,
        boolean_t                       alternate,
        shared_region_task_mappings_t   sm_info)
 {
@@ -483,25 +1486,45 @@ lsf_hash_lookup(
        shared_region_mapping_t target_region;
        int                     depth;
        
+       LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
+                  "reg=%d alt=%d sm_info=%p\n",
+                  hash_table, file_object, recognizableOffset, size,
+                  regular, alternate, sm_info));
+
        bucket = &(hash_table[load_file_hash((int)file_object, size)]);
        for (entry = (load_struct_t *)queue_first(bucket);
                !queue_end(bucket, &entry->links);
                entry = (load_struct_t *)queue_next(&entry->links)) {
-               if (entry->file_object == (int)file_object) {
+
+               if ((entry->file_object == (int)file_object) &&
+                    (entry->file_offset == recognizableOffset)) {
                   target_region = (shared_region_mapping_t)sm_info->self;
                   depth = target_region->depth;
                   while(target_region) {
                      if((!(sm_info->self)) ||
                                ((target_region == entry->regions_instance) &&
                                (target_region->depth >= entry->depth))) {
-                       if(alternate) {
-                               if (entry->base_address >= 
-                                               sm_info->alternate_base) 
-                                       return entry;
-                       } else {
-                               if (entry->base_address < 
-                                               sm_info->alternate_base) 
-                                       return entry;
+                       if(alternate &&
+                          entry->base_address >= sm_info->alternate_base) {
+                               LSF_DEBUG(("lsf_hash_lookup: "
+                                          "alt=%d found entry %p "
+                                          "(base=0x%x "
+                                          "alt_base=0x%x)\n",
+                                          alternate, entry,
+                                          entry->base_address,
+                                          sm_info->alternate_base));
+                               return entry;
+                       }
+                       if (regular &&
+                           entry->base_address < sm_info->alternate_base) {
+                               LSF_DEBUG(("lsf_hash_lookup: "
+                                          "reg=%d found entry %p "
+                                          "(base=0x%x "
+                                          "alt_base=0x%x)\n",
+                                          regular, entry,
+                                          entry->base_address,
+                                          sm_info->alternate_base));
+                               return entry;
                        }
                      }
                      if(target_region->object_chain) {
@@ -515,26 +1538,38 @@ lsf_hash_lookup(
                }
        }
 
+       LSF_DEBUG(("lsf_hash_lookup: table=%p, file=%p, offset=0x%x size=0x%x "
+                  "reg=%d alt=%d sm_info=%p NOT FOUND\n",
+                  hash_table, file_object, recognizableOffset, size,
+                  regular, alternate, sm_info));
        return (load_struct_t *)0;
 }
 
-load_struct_t *
-lsf_remove_regions_mappings(
+__private_extern__ load_struct_t *
+lsf_remove_regions_mappings_lock(
        shared_region_mapping_t region,
-       shared_region_task_mappings_t   sm_info)
+       shared_region_task_mappings_t   sm_info,
+       int need_sfh_lock)
 {
        int                     i;
        register queue_t        bucket;
        shared_file_info_t      *shared_file_header;
        load_struct_t           *entry;
        load_struct_t           *next_entry;
-       load_struct_t           *prev_entry;
 
        shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
 
-       mutex_lock(&shared_file_header->lock);
+       LSF_DEBUG(("lsf_remove_regions_mappings_lock(region=%p,sm_info=%p) "
+                  "sfh=%p\n",
+                  region, sm_info, shared_file_header));
+       if (need_sfh_lock)
+               mutex_lock(&shared_file_header->lock);
        if(shared_file_header->hash_init == FALSE) {
-               mutex_unlock(&shared_file_header->lock);
+               if (need_sfh_lock)
+                       mutex_unlock(&shared_file_header->lock);
+               LSF_DEBUG(("lsf_remove_regions_mappings_lock"
+                          "(region=%p,sm_info=%p): not inited\n",
+                          region, sm_info));
                return NULL;
        }
        for(i = 0;  i<shared_file_header->hash_size; i++) {
@@ -543,13 +1578,40 @@ lsf_remove_regions_mappings(
                        !queue_end(bucket, &entry->links);) {
                   next_entry = (load_struct_t *)queue_next(&entry->links);
                   if(region == entry->regions_instance) {
-                       lsf_unload((void *)entry->file_object, 
+                          LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
+                                     "entry %p region %p: "
+                                     "unloading\n",
+                                     entry, region));
+                          lsf_unload((void *)entry->file_object, 
                                        entry->base_address, sm_info);
+                  } else {
+                          LSF_DEBUG(("lsf_remove_regions_mapping_lock: "
+                                     "entry %p region %p target region %p: "
+                                     "not unloading\n",
+                                     entry, entry->regions_instance, region));
                   }
+                          
                   entry = next_entry;
                }
        }
-       mutex_unlock(&shared_file_header->lock);
+       if (need_sfh_lock)
+               mutex_unlock(&shared_file_header->lock);
+       LSF_DEBUG(("lsf_removed_regions_mapping_lock done\n"));
+
+       return NULL;    /* XXX */
+}
+
+/*
+ * Symbol compatability; we believe shared_region_mapping_dealloc() is the
+ * only caller.  Remove this stub function and the corresponding symbol
+ * export for Merlot.
+ */
+load_struct_t *
+lsf_remove_regions_mappings(
+       shared_region_mapping_t region,
+       shared_region_task_mappings_t   sm_info)
+{
+       return lsf_remove_regions_mappings_lock(region, sm_info, 1);
 }
 
 /* Removes a map_list, (list of loaded extents) for a file from     */
@@ -564,7 +1626,9 @@ lsf_hash_delete(
        register queue_t        bucket;
        shared_file_info_t      *shared_file_header;
        load_struct_t           *entry;
-       load_struct_t           *prev_entry;
+
+       LSF_DEBUG(("lsf_hash_delete(file=%p,base=0x%x,sm_info=%p)\n",
+                  file_object, base_offset, sm_info));
 
        shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
 
@@ -580,11 +1644,13 @@ lsf_hash_delete(
                                (entry->base_address == base_offset)) {
                                queue_remove(bucket, entry, 
                                                load_struct_ptr_t, links);
+                               LSF_DEBUG(("lsf_hash_delete: found it\n"));
                                return entry;
                        }
                }
        }
 
+       LSF_DEBUG(("lsf_hash_delete; not found\n"));
        return (load_struct_t *)0;
 }
 
@@ -598,6 +1664,9 @@ lsf_hash_insert(
 {
        shared_file_info_t *shared_file_header;
 
+       LSF_DEBUG(("lsf_hash_insert(entry=%p,sm_info=%p): file=%p base=0x%x\n",
+                  entry, sm_info, entry->file_object, entry->base_address));
+
        shared_file_header = (shared_file_info_t *)sm_info->region_mappings;
        queue_enter(&shared_file_header->hash
                        [load_file_hash(entry->file_object, 
@@ -605,34 +1674,328 @@ lsf_hash_insert(
                        entry, load_struct_ptr_t, links);
 }
        
-/* Looks up the file type requested.  If already loaded and the */
-/* file extents are an exact match, returns Success.  If not    */
-/* loaded attempts to load the file extents at the given offsets */
-/* if any extent fails to load or if the file was already loaded */
-/* in a different configuration, lsf_load fails.                 */
 
+
+/*
+ * lsf_slide:
+ *
+ * Look in the shared region, starting from the end, for a place to fit all the
+ * mappings while respecting their relative offsets.
+ */
 static kern_return_t
-lsf_load(
-       vm_offset_t     mapped_file,
-       vm_size_t       mapped_file_size,
-       vm_offset_t     *base_address, 
-       sf_mapping_t    *mappings,
-       int             map_cnt,
-       void            *file_object,
-       int             flags,
-       shared_region_task_mappings_t   sm_info)
+lsf_slide(
+       unsigned int                    map_cnt,
+       struct shared_file_mapping_np   *mappings_in,
+       shared_region_task_mappings_t   sm_info,
+       mach_vm_offset_t                *base_offset_p)
 {
+       mach_vm_offset_t                max_mapping_offset;
+       int                             i;
+       vm_map_entry_t                  map_entry, prev_entry, next_entry;
+       mach_vm_offset_t                prev_hole_start, prev_hole_end;
+       mach_vm_offset_t                mapping_offset, mapping_end_offset;
+       mach_vm_offset_t                base_offset;
+       mach_vm_size_t                  mapping_size;
+       mach_vm_offset_t                wiggle_room, wiggle;
+       vm_map_t                        text_map, data_map, map;
+       vm_named_entry_t                region_entry;
+       ipc_port_t                      region_handle;
+       kern_return_t                   kr;
+
+       struct shared_file_mapping_np   *mappings, tmp_mapping;
+       unsigned int                    sort_index, sorted_index;
+       vm_map_offset_t                 sort_min_address;
+       unsigned int                    sort_min_index;
+
+       /*
+        * Sort the mappings array, so that we can try and fit them in
+        * in the right order as we progress along the VM maps.
+        *
+        * We can't modify the original array (the original order is
+        * important when doing lookups of the mappings), so copy it first.
+        */
+
+       kr = kmem_alloc(kernel_map,
+                       (vm_offset_t *) &mappings,
+                       (vm_size_t) (map_cnt * sizeof (mappings[0])));
+       if (kr != KERN_SUCCESS) {
+               return KERN_NO_SPACE;
+       }
 
+       bcopy(mappings_in, mappings, map_cnt * sizeof (mappings[0]));
+
+       max_mapping_offset = 0;
+       for (sorted_index = 0;
+            sorted_index < map_cnt;
+            sorted_index++) {
+
+               /* first remaining entry is our new starting point */
+               sort_min_index = sorted_index;
+               mapping_end_offset = ((mappings[sort_min_index].sfm_address &
+                                      SHARED_TEXT_REGION_MASK) +
+                                     mappings[sort_min_index].sfm_size);
+               sort_min_address = mapping_end_offset;
+               /* compute the highest mapping_offset as well... */
+               if (mapping_end_offset > max_mapping_offset) {
+                       max_mapping_offset = mapping_end_offset;
+               }
+               /* find the lowest mapping_offset in the remaining entries */
+               for (sort_index = sorted_index + 1;
+                    sort_index < map_cnt;
+                    sort_index++) {
+
+                       mapping_end_offset =
+                               ((mappings[sort_index].sfm_address &
+                                 SHARED_TEXT_REGION_MASK) +
+                                mappings[sort_index].sfm_size);
+
+                       if (mapping_end_offset < sort_min_address) {
+                               /* lowest mapping_offset so far... */
+                               sort_min_index = sort_index;
+                               sort_min_address = mapping_end_offset;
+                       }
+               }
+               if (sort_min_index != sorted_index) {
+                       /* swap entries */
+                       tmp_mapping = mappings[sort_min_index];
+                       mappings[sort_min_index] = mappings[sorted_index];
+                       mappings[sorted_index] = tmp_mapping;
+               }
+
+       }
+
+       max_mapping_offset = vm_map_round_page(max_mapping_offset);
+
+       /* start from the end of the shared area */
+       base_offset = sm_info->text_size;
+
+       /* can all the mappings fit ? */
+       if (max_mapping_offset > base_offset) {
+               kmem_free(kernel_map,
+                         (vm_offset_t) mappings,
+                         map_cnt * sizeof (mappings[0]));
+               return KERN_FAILURE;
+       }
+
+       /*
+        * Align the last mapping to the end of the submaps
+        * and start from there.
+        */
+       base_offset -= max_mapping_offset;
+
+       region_handle = (ipc_port_t) sm_info->text_region;
+       region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+       text_map = region_entry->backing.map;
+
+       region_handle = (ipc_port_t) sm_info->data_region;
+       region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+       data_map = region_entry->backing.map;
+
+       vm_map_lock_read(text_map);
+       vm_map_lock_read(data_map);
+
+start_over:
+       /*
+        * At first, we can wiggle all the way from our starting point
+        * (base_offset) towards the start of the map (0), if needed.
+        */
+       wiggle_room = base_offset;
+
+       for (i = (signed) map_cnt - 1; i >= 0; i--) {
+               if (mappings[i].sfm_init_prot & VM_PROT_COW) {
+                       /* copy-on-write mappings are in the data submap */
+                       map = data_map;
+               } else {
+                       /* other mappings are in the text submap */
+                       map = text_map;
+               }
+               /* get the offset within the appropriate submap */
+               mapping_offset = (mappings[i].sfm_address &
+                                 SHARED_TEXT_REGION_MASK);
+               mapping_size = mappings[i].sfm_size;
+               mapping_end_offset = mapping_offset + mapping_size;
+               mapping_offset = vm_map_trunc_page(mapping_offset);
+               mapping_end_offset = vm_map_round_page(mapping_end_offset);
+               mapping_size = mapping_end_offset - mapping_offset;
+
+               for (;;) {
+                       if (vm_map_lookup_entry(map,
+                                               base_offset + mapping_offset,
+                                               &map_entry)) {
+                               /*
+                                * The start address for that mapping
+                                * is already mapped: no fit.
+                                * Locate the hole immediately before this map
+                                * entry.
+                                */
+                               prev_hole_end = map_entry->vme_start;
+                               prev_entry = map_entry->vme_prev;
+                               if (prev_entry == vm_map_to_entry(map)) {
+                                       /* no previous entry */
+                                       prev_hole_start = map->min_offset;
+                               } else {
+                                       /* previous entry ends here */
+                                       prev_hole_start = prev_entry->vme_end;
+                               }
+                       } else {
+                               /*
+                                * The start address for that mapping is not
+                                * mapped.
+                                * Locate the start and end of the hole
+                                * at that location.
+                                */
+                               /* map_entry is the previous entry */
+                               if (map_entry == vm_map_to_entry(map)) {
+                                       /* no previous entry */
+                                       prev_hole_start = map->min_offset;
+                               } else {
+                                       /* previous entry ends there */
+                                       prev_hole_start = map_entry->vme_end;
+                               }
+                               next_entry = map_entry->vme_next;
+                               if (next_entry == vm_map_to_entry(map)) {
+                                       /* no next entry */
+                                       prev_hole_end = map->max_offset;
+                               } else {
+                                       prev_hole_end = next_entry->vme_start;
+                               }
+                       }
+
+                       if (prev_hole_end <= base_offset + mapping_offset) {
+                               /* hole is to our left: try and wiggle to fit */
+                               wiggle = base_offset + mapping_offset - prev_hole_end + mapping_size;
+                               if (wiggle > base_offset) {
+                                       /* we're getting out of the map */
+                                       kr = KERN_FAILURE;
+                                       goto done;
+                               }
+                               base_offset -= wiggle;
+                               if (wiggle > wiggle_room) {
+                                       /* can't wiggle that much: start over */
+                                       goto start_over;
+                               }
+                               /* account for the wiggling done */
+                               wiggle_room -= wiggle;
+                       }
+
+                       if (prev_hole_end >
+                           base_offset + mapping_offset + mapping_size) {
+                               /*
+                                * The hole extends further to the right
+                                * than what we need.  Ignore the extra space.
+                                */
+                               prev_hole_end = (base_offset + mapping_offset +
+                                                mapping_size);
+                       }
+
+                       if (prev_hole_end <
+                           base_offset + mapping_offset + mapping_size) {
+                               /*
+                                * The hole is not big enough to establish
+                                * the mapping right there:  wiggle towards
+                                * the beginning of the hole so that the end
+                                * of our mapping fits in the hole...
+                                */
+                               wiggle = base_offset + mapping_offset
+                                       + mapping_size - prev_hole_end;
+                               if (wiggle > base_offset) {
+                                       /* we're getting out of the map */
+                                       kr = KERN_FAILURE;
+                                       goto done;
+                               }
+                               base_offset -= wiggle;
+                               if (wiggle > wiggle_room) {
+                                       /* can't wiggle that much: start over */
+                                       goto start_over;
+                               }
+                               /* account for the wiggling done */
+                               wiggle_room -= wiggle;
+
+                               /* keep searching from this new base */
+                               continue;
+                       }
+
+                       if (prev_hole_start > base_offset + mapping_offset) {
+                               /* no hole found: keep looking */
+                               continue;
+                       }
+
+                       /* compute wiggling room at this hole */
+                       wiggle = base_offset + mapping_offset - prev_hole_start;
+                       if (wiggle < wiggle_room) {
+                               /* less wiggle room than before... */
+                               wiggle_room = wiggle;
+                       }
+
+                       /* found a hole that fits: skip to next mapping */
+                       break;
+               } /* while we look for a hole */
+       } /* for each mapping */
+
+       *base_offset_p = base_offset;
+       kr = KERN_SUCCESS;
+
+done:
+       vm_map_unlock_read(text_map);
+       vm_map_unlock_read(data_map);
+
+       kmem_free(kernel_map,
+                 (vm_offset_t) mappings,
+                 map_cnt * sizeof (mappings[0]));
+
+       return kr;
+}
+
+/*
+ * lsf_map:
+ *
+ * Attempt to establish the mappings for a split library into the shared region.
+ */
+static kern_return_t
+lsf_map(
+       struct shared_file_mapping_np   *mappings,
+       int                             map_cnt,
+       void                            *file_control,
+       memory_object_offset_t          file_size,
+       shared_region_task_mappings_t   sm_info,
+       mach_vm_offset_t                base_offset,
+       mach_vm_offset_t                *slide_p)
+{
        load_struct_t           *entry;
-       vm_map_copy_t           copy_object;
        loaded_mapping_t        *file_mapping;
        loaded_mapping_t        **tptr;
+       ipc_port_t              region_handle;
+       vm_named_entry_t        region_entry;
+       mach_port_t             map_port;
+       vm_object_t             file_object;
+       kern_return_t           kr;
        int                     i;
-       ipc_port_t      local_map;
-       vm_offset_t     original_alt_load_next;
-       vm_offset_t     alternate_load_next;
+       mach_vm_offset_t        original_base_offset;
+
+       /* get the VM object from the file's memory object handle */
+       file_object = memory_object_control_to_vm_object(file_control);
 
+       original_base_offset = base_offset;
+
+       LSF_DEBUG(("lsf_map"
+                  "(cnt=%d,file=%p,sm_info=%p)"
+                  "\n",
+                  map_cnt, file_object,
+                  sm_info));
+
+restart_after_slide:
+       /* get a new "load_struct_t" to described the mappings for that file */
        entry = (load_struct_t *)zalloc(lsf_zone);
+       LSF_ALLOC_DEBUG(("lsf_map: entry=%p map_cnt=%d\n", entry, map_cnt));
+       LSF_DEBUG(("lsf_map"
+                  "(cnt=%d,file=%p,sm_info=%p) "
+                  "entry=%p\n",
+                  map_cnt, file_object,
+                  sm_info, entry));
+       if (entry == NULL) {
+               printf("lsf_map: unable to allocate memory\n");
+               return KERN_NO_SPACE;
+       }
        shared_file_available_hash_ele--;
        entry->file_object = (int)file_object;
        entry->mapping_cnt = map_cnt;
@@ -641,135 +2004,141 @@ lsf_load(
        entry->links.next = (queue_entry_t) 0;
        entry->regions_instance = (shared_region_mapping_t)sm_info->self;
        entry->depth=((shared_region_mapping_t)sm_info->self)->depth;
+        entry->file_offset = mappings[0].sfm_file_offset;
 
+       /* insert the new file entry in the hash table, for later lookups */
        lsf_hash_insert(entry, sm_info);
-       tptr = &(entry->mappings);
-
-
-       alternate_load_next = sm_info->alternate_next;
-       original_alt_load_next = alternate_load_next;
-       if (flags & ALTERNATE_LOAD_SITE) {
-               int     max_loadfile_offset;
-
-               *base_address = ((*base_address) & ~SHARED_TEXT_REGION_MASK) +
-                                               sm_info->alternate_next;
-               max_loadfile_offset = 0;
-               for(i = 0; i<map_cnt; i++) {
-                       if(((mappings[i].mapping_offset 
-                               & SHARED_TEXT_REGION_MASK)+ mappings[i].size) >
-                               max_loadfile_offset) {
-                               max_loadfile_offset = 
-                                       (mappings[i].mapping_offset 
-                                               & SHARED_TEXT_REGION_MASK)
-                                               + mappings[i].size;
-                       }
-               }
-               if((alternate_load_next + round_page(max_loadfile_offset)) >=
-                       (sm_info->data_size - (sm_info->data_size>>9))) {
 
-                       return KERN_NO_SPACE;
-               }
-               alternate_load_next += round_page(max_loadfile_offset);
+       /* where we should add the next mapping description for that file */
+       tptr = &(entry->mappings);
 
-       } else {
-               if (((*base_address) & SHARED_TEXT_REGION_MASK) > 
-                                       sm_info->alternate_base) {
-                       entry->base_address = 
-                               (*base_address) & SHARED_TEXT_REGION_MASK;
-                       lsf_unload(file_object, entry->base_address, sm_info);
-                       return KERN_INVALID_ARGUMENT;
-               } 
-       }
+       entry->base_address = base_offset;
 
-       entry->base_address = (*base_address) & SHARED_TEXT_REGION_MASK;
 
-       /* copyin mapped file data */
-       for(i = 0; i<map_cnt; i++) {
-               vm_offset_t     target_address;
-               vm_offset_t     region_mask;
+       /* establish each requested mapping */
+       for (i = 0; i < map_cnt; i++) {
+               mach_vm_offset_t        target_address;
+               mach_vm_offset_t        region_mask;
 
-               if(mappings[i].protection & VM_PROT_COW) {
-                       local_map = (ipc_port_t)sm_info->data_region;
+               if (mappings[i].sfm_init_prot & VM_PROT_COW) {
+                       region_handle = (ipc_port_t)sm_info->data_region;
                        region_mask = SHARED_DATA_REGION_MASK;
-                       if((mappings[i].mapping_offset 
-                               & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) {
+                       if ((((mappings[i].sfm_address + base_offset)
+                             & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000) ||
+                           (((mappings[i].sfm_address + base_offset +
+                              mappings[i].sfm_size - 1)
+                             & GLOBAL_SHARED_SEGMENT_MASK) != 0x10000000)) {
                                lsf_unload(file_object, 
                                        entry->base_address, sm_info);
                                return KERN_INVALID_ARGUMENT;
                        }
                } else {
                        region_mask = SHARED_TEXT_REGION_MASK;
-                       local_map = (ipc_port_t)sm_info->text_region;
-                       if(mappings[i].mapping_offset 
-                                       & GLOBAL_SHARED_SEGMENT_MASK)  {
+                       region_handle = (ipc_port_t)sm_info->text_region;
+                       if (((mappings[i].sfm_address + base_offset)
+                            & GLOBAL_SHARED_SEGMENT_MASK) ||
+                           ((mappings[i].sfm_address + base_offset +
+                             mappings[i].sfm_size - 1)
+                            & GLOBAL_SHARED_SEGMENT_MASK)) {
                                lsf_unload(file_object, 
                                        entry->base_address, sm_info);
                                return KERN_INVALID_ARGUMENT;
                        }
                }
-               if(!(mappings[i].protection & VM_PROT_ZF)
-                               && ((mapped_file + mappings[i].file_offset + 
-                               mappings[i].size) > 
-                               (mapped_file + mapped_file_size))) {
+               if (!(mappings[i].sfm_init_prot & VM_PROT_ZF) &&
+                   ((mappings[i].sfm_file_offset + mappings[i].sfm_size) >
+                    (file_size))) {
                        lsf_unload(file_object, entry->base_address, sm_info);
                        return KERN_INVALID_ARGUMENT;
                }
-               target_address = ((mappings[i].mapping_offset) & region_mask)
-                                       + entry->base_address;
-               if(vm_allocate(((vm_named_entry_t)local_map->ip_kobject)
-                               ->backing.map, &target_address,
-                               mappings[i].size, FALSE)) {
-                       lsf_unload(file_object, entry->base_address, sm_info);
-                       return KERN_FAILURE;
+               target_address = entry->base_address +
+                       ((mappings[i].sfm_address) & region_mask);
+               if (mappings[i].sfm_init_prot & VM_PROT_ZF) {
+                       map_port = MACH_PORT_NULL;
+               } else {
+                       map_port = (ipc_port_t) file_object->pager;
                }
-               target_address = ((mappings[i].mapping_offset) & region_mask)
-                                       + entry->base_address;
-               if(!(mappings[i].protection & VM_PROT_ZF)) {
-                  if(vm_map_copyin(current_map(), 
-                       mapped_file + mappings[i].file_offset, 
-                       round_page(mappings[i].size), FALSE, &copy_object)) {
-                       vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
-                             ->backing.map, target_address, mappings[i].size);
-                       lsf_unload(file_object, entry->base_address, sm_info);
-                       return KERN_FAILURE;
-                  }
-                  if(vm_map_copy_overwrite(((vm_named_entry_t)
-                       local_map->ip_kobject)->backing.map, target_address,
-                       copy_object, FALSE)) {
-                       vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
-                            ->backing.map, target_address, mappings[i].size);
+               region_entry = (vm_named_entry_t) region_handle->ip_kobject;
+
+               if (mach_vm_map(region_entry->backing.map,
+                               &target_address,
+                               vm_map_round_page(mappings[i].sfm_size),
+                               0,
+                               VM_FLAGS_FIXED,
+                               map_port,
+                               mappings[i].sfm_file_offset,
+                               TRUE,
+                               (mappings[i].sfm_init_prot &
+                                (VM_PROT_READ|VM_PROT_EXECUTE)),
+                               (mappings[i].sfm_max_prot &
+                                (VM_PROT_READ|VM_PROT_EXECUTE)),
+                               VM_INHERIT_DEFAULT) != KERN_SUCCESS) {
                        lsf_unload(file_object, entry->base_address, sm_info);
+
+                       if (slide_p != NULL) {
+                               /*
+                                * Requested mapping failed but the caller
+                                * is OK with sliding the library in the
+                                * shared region, so let's try and slide it...
+                                */
+
+                               /* lookup an appropriate spot */
+                               kr = lsf_slide(map_cnt, mappings,
+                                              sm_info, &base_offset);
+                               if (kr == KERN_SUCCESS) {
+                                       /* try and map it there ... */
+                                       entry->base_address = base_offset;
+                                       goto restart_after_slide;
+                               }
+                               /* couldn't slide ... */
+                       }
+
                        return KERN_FAILURE;
-                  }
                }
-               vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
-                               ->backing.map, target_address,
-                               round_page(target_address + mappings[i].size),
-                               (mappings[i].protection & 
-                                       (VM_PROT_READ | VM_PROT_EXECUTE)),
-                               TRUE);
-               vm_map_protect(((vm_named_entry_t)local_map->ip_kobject)
-                               ->backing.map, target_address,
-                               round_page(target_address + mappings[i].size),
-                               (mappings[i].protection & 
-                                       (VM_PROT_READ | VM_PROT_EXECUTE)),
-                               FALSE);
+
+               /* record this mapping */
                file_mapping = (loaded_mapping_t *)zalloc(lsf_zone);
-               if(file_mapping == 0) 
-                       panic("lsf_load: OUT OF MAPPINGS!");
+               if (file_mapping == NULL) {
+                       lsf_unload(file_object, entry->base_address, sm_info);
+                       printf("lsf_map: unable to allocate memory\n");
+                       return KERN_NO_SPACE;
+               }
                shared_file_available_hash_ele--;
-               file_mapping->mapping_offset = (mappings[i].mapping_offset
+               file_mapping->mapping_offset = (mappings[i].sfm_address
                                                                & region_mask;
-               file_mapping->size = mappings[i].size;
-               file_mapping->file_offset = mappings[i].file_offset;
-               file_mapping->protection = mappings[i].protection;
+               file_mapping->size = mappings[i].sfm_size;
+               file_mapping->file_offset = mappings[i].sfm_file_offset;
+               file_mapping->protection = mappings[i].sfm_init_prot;
                file_mapping->next = NULL;
+               LSF_DEBUG(("lsf_map: file_mapping %p "
+                          "for offset=0x%x size=0x%x\n",
+                          file_mapping, file_mapping->mapping_offset,
+                          file_mapping->size));
+
+               /* and link it to the file entry */
                *tptr = file_mapping;
+
+               /* where to put the next mapping's description */
                tptr = &(file_mapping->next);
        }
-       shared_region_mapping_set_alt_next(sm_info->self, alternate_load_next);
-       return KERN_SUCCESS;
-                       
+
+       if (slide_p != NULL) {
+               *slide_p = base_offset - original_base_offset;
+       }
+
+       if (sm_info->flags & SHARED_REGION_STANDALONE) {
+               /*
+                * We have a standalone and private shared region, so we
+                * don't really need to keep the information about each file
+                * and each mapping.  Just deallocate it all.
+                * XXX we still have the hash table, though...
+                */
+               lsf_deallocate(file_object, entry->base_address, sm_info,
+                              FALSE);
+       }
+
+       LSF_DEBUG(("lsf_map: done\n"));
+       return KERN_SUCCESS;                    
 }
 
 
@@ -782,38 +2151,73 @@ lsf_unload(
        void                    *file_object,
        vm_offset_t             base_offset,
        shared_region_task_mappings_t   sm_info)
+{
+       lsf_deallocate(file_object, base_offset, sm_info, TRUE);
+}
+
+/*
+ * lsf_deallocate:
+ *
+ * Deallocates all the "shared region" internal data structures describing
+ * the file and its mappings.
+ * Also deallocate the actual file mappings if requested ("unload" arg).
+ */
+static void
+lsf_deallocate(
+       void                    *file_object,
+       vm_offset_t             base_offset,
+       shared_region_task_mappings_t   sm_info,
+       boolean_t               unload)
 {
        load_struct_t           *entry;
-       ipc_port_t              local_map;
        loaded_mapping_t        *map_ele;
        loaded_mapping_t        *back_ptr;
 
+       LSF_DEBUG(("lsf_deallocate(file=%p,base=0x%x,sm_info=%p,unload=%d)\n",
+                  file_object, base_offset, sm_info, unload));
        entry = lsf_hash_delete(file_object, base_offset, sm_info);
        if(entry) {
                map_ele = entry->mappings;
                while(map_ele != NULL) {
-                       if(map_ele->protection & VM_PROT_COW) {
-                               local_map = (ipc_port_t)sm_info->data_region;
-                       } else {
-                               local_map = (ipc_port_t)sm_info->text_region;
+                       if (unload) {
+                               ipc_port_t              region_handle;
+                               vm_named_entry_t        region_entry;
+
+                               if(map_ele->protection & VM_PROT_COW) {
+                                       region_handle = (ipc_port_t)
+                                               sm_info->data_region;
+                               } else {
+                                       region_handle = (ipc_port_t)
+                                               sm_info->text_region;
+                               }
+                               region_entry = (vm_named_entry_t)
+                                       region_handle->ip_kobject;
+                               
+                               vm_deallocate(region_entry->backing.map,
+                                             (entry->base_address + 
+                                              map_ele->mapping_offset),
+                                             map_ele->size);
                        }
-                       vm_deallocate(((vm_named_entry_t)local_map->ip_kobject)
-                                       ->backing.map, entry->base_address + 
-                                       map_ele->mapping_offset,
-                                       map_ele->size);
                        back_ptr = map_ele;
                        map_ele = map_ele->next;
-                       zfree(lsf_zone, (vm_offset_t)back_ptr);
+                       LSF_DEBUG(("lsf_deallocate: freeing mapping %p "
+                                  "offset 0x%x size 0x%x\n",
+                                  back_ptr, back_ptr->mapping_offset,
+                                  back_ptr->size));
+                       zfree(lsf_zone, back_ptr);
                        shared_file_available_hash_ele++;
                }
-               zfree(lsf_zone, (vm_offset_t)entry);
+               LSF_DEBUG(("lsf_deallocate: freeing entry %p\n", entry));
+               LSF_ALLOC_DEBUG(("lsf_deallocate: entry=%p", entry));
+               zfree(lsf_zone, entry);
                shared_file_available_hash_ele++;
        }
+       LSF_DEBUG(("lsf_unload: done\n"));
 }
 
 /* integer is from 1 to 100 and represents percent full */
 unsigned int
-lsf_mapping_pool_gauge()
+lsf_mapping_pool_gauge(void)
 {
        return ((lsf_zone->count * lsf_zone->elem_size) * 100)/lsf_zone->max_size;
 }