]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_user.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / vm / vm_user.c
index 371898e0284b638088c6efe972d5d8ad869f86c4..a1f18add39aa57a7952c63af7de707082df70939 100644 (file)
@@ -1,21 +1,24 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
  * 
  *     User-exported virtual memory functions.
  */
-#ifdef MACH_BSD
-/* remove after component interface available */
-extern int     vnode_pager_workaround;
-#endif
 
 #include <vm_cpm.h>
 #include <mach/boolean.h>
@@ -71,7 +70,9 @@ extern int    vnode_pager_workaround;
 #include <mach/vm_statistics.h>
 #include <mach/vm_map_server.h>
 #include <mach/mach_syscalls.h>
+
 #include <mach/shared_memory_server.h>
+#include <vm/vm_shared_memory_server.h>
 
 #include <kern/host.h>
 #include <kern/task.h>
@@ -82,6 +83,11 @@ extern int   vnode_pager_workaround;
 #include <vm/memory_object.h>
 #include <vm/vm_pageout.h>
 
+__private_extern__ load_struct_t *
+lsf_remove_regions_mappings_lock(
+       shared_region_mapping_t region,
+       shared_region_task_mappings_t   sm_info,
+       int need_lock);
 
 
 vm_size_t        upl_offset_to_pagelist = 0;
@@ -116,8 +122,8 @@ vm_allocate(
        if (anywhere)
                *addr = vm_map_min(map);
        else
-               *addr = trunc_page(*addr);
-       size = round_page(size);
+               *addr = trunc_page_32(*addr);
+       size = round_page_32(size);
        if (size == 0) {
          return(KERN_INVALID_ARGUMENT);
        }
@@ -154,8 +160,8 @@ vm_deallocate(
        if (size == (vm_offset_t) 0)
                return(KERN_SUCCESS);
 
-       return(vm_map_remove(map, trunc_page(start),
-                            round_page(start+size), VM_MAP_NO_FLAGS));
+       return(vm_map_remove(map, trunc_page_32(start),
+                            round_page_32(start+size), VM_MAP_NO_FLAGS));
 }
 
 /*
@@ -176,8 +182,8 @@ vm_inherit(
                 return(KERN_INVALID_ARGUMENT);
 
        return(vm_map_inherit(map,
-                             trunc_page(start),
-                             round_page(start+size),
+                             trunc_page_32(start),
+                             round_page_32(start+size),
                              new_inheritance));
 }
 
@@ -199,8 +205,8 @@ vm_protect(
                return(KERN_INVALID_ARGUMENT);
 
        return(vm_map_protect(map,
-                             trunc_page(start),
-                             round_page(start+size),
+                             trunc_page_32(start),
+                             round_page_32(start+size),
                              new_protection,
                              set_maximum));
 }
@@ -457,8 +463,8 @@ vm_map_64(
                        vm_map_entry_t          map_entry;
 
                        named_entry_unlock(named_entry);
-                       *address = trunc_page(*address);
-                       size = round_page(size);
+                       *address = trunc_page_32(*address);
+                       size = round_page_64(size);
                        vm_object_reference(vm_submap_object);
                        if ((result = vm_map_enter(target_map,
                                address, size, mask, flags,
@@ -503,38 +509,102 @@ vm_map_64(
                        vm_object_reference(named_entry->object);
                        object = named_entry->object;
                } else {
-                       object = vm_object_enter(named_entry->backing.pager, 
-                                       named_entry->size, 
-                                       named_entry->internal, 
-                                       FALSE,
-                                       FALSE);
+                       unsigned int            access;
+                       vm_prot_t               protections;
+                       unsigned int            wimg_mode;
+                       boolean_t       cache_attr;
+
+                       protections = named_entry->protection 
+                                                       & VM_PROT_ALL;
+                       access = GET_MAP_MEM(named_entry->protection);
+
+                       object = vm_object_enter(
+                               named_entry->backing.pager, 
+                               named_entry->size, 
+                               named_entry->internal, 
+                               FALSE,
+                               FALSE);
                        if (object == VM_OBJECT_NULL) {
                                named_entry_unlock(named_entry);
                                return(KERN_INVALID_OBJECT);
                        }
+
+                       vm_object_lock(object);
+
+                       /* create an extra ref for the named entry */
+                       vm_object_reference_locked(object);
                        named_entry->object = object;
                        named_entry_unlock(named_entry);
-                       /* create an extra reference for the named entry */
-                       vm_object_reference(named_entry->object);
-                       /* wait for object (if any) to be ready */
-                       if (object != VM_OBJECT_NULL) {
-                               vm_object_lock(object);
+
+                       wimg_mode = object->wimg_bits;
+                       if(access == MAP_MEM_IO) {
+                               wimg_mode = VM_WIMG_IO;
+                       } else if (access == MAP_MEM_COPYBACK) {
+                               wimg_mode = VM_WIMG_USE_DEFAULT;
+                       } else if (access == MAP_MEM_WTHRU) {
+                               wimg_mode = VM_WIMG_WTHRU;
+                       } else if (access == MAP_MEM_WCOMB) {
+                               wimg_mode = VM_WIMG_WCOMB;
+                       }
+                       if ((wimg_mode == VM_WIMG_IO)
+                               || (wimg_mode == VM_WIMG_WCOMB))
+                               cache_attr = TRUE;
+                       else 
+                               cache_attr = FALSE;
+
+                       if (named_entry->backing.pager) {
+                               /* wait for object (if any) to be ready */
                                while (!object->pager_ready) {
                                        vm_object_wait(object,
-                                               VM_OBJECT_EVENT_PAGER_READY,
-                                               THREAD_UNINT);
+                                                  VM_OBJECT_EVENT_PAGER_READY,
+                                                  THREAD_UNINT);
                                        vm_object_lock(object);
                                }
-                               vm_object_unlock(object);
                        }
+                       if(object->wimg_bits != wimg_mode) {
+                               vm_page_t p;
+
+                               vm_object_paging_wait(object, THREAD_UNINT);
+
+                               object->wimg_bits = wimg_mode;
+                               queue_iterate(&object->memq, p, vm_page_t, listq) {
+                                       if (!p->fictitious) {
+                                               pmap_page_protect(
+                                                       p->phys_page, 
+                                                       VM_PROT_NONE);
+                                               if(cache_attr)
+                                                       pmap_sync_caches_phys(
+                                                               p->phys_page);
+                                       }
+                               }
+                       }
+                       object->true_share = TRUE;
+                       if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+                               object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+                       vm_object_unlock(object);
                }
-       } else {
-               if ((object = vm_object_enter(port, size, FALSE, FALSE, FALSE))
+       } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) {
+               /*
+                * JMM - This is temporary until we unify named entries
+                * and raw memory objects.
+                *
+                * Detected fake ip_kotype for a memory object.  In
+                * this case, the port isn't really a port at all, but
+                * instead is just a raw memory object.
+                */
+                
+               if ((object = vm_object_enter((memory_object_t)port,
+                                             size, FALSE, FALSE, FALSE))
                        == VM_OBJECT_NULL)
                        return(KERN_INVALID_OBJECT);
 
                /* wait for object (if any) to be ready */
                if (object != VM_OBJECT_NULL) {
+                       if(object == kernel_object) {
+                               printf("Warning: Attempt to map kernel object"
+                                       " by a non-private kernel entity\n");
+                               return(KERN_INVALID_OBJECT);
+                       }
                        vm_object_lock(object);
                        while (!object->pager_ready) {
                                vm_object_wait(object,
@@ -544,10 +614,12 @@ vm_map_64(
                        }
                        vm_object_unlock(object);
                }
+       } else {
+               return (KERN_INVALID_OBJECT);
        }
 
-       *address = trunc_page(*address);
-       size = round_page(size);
+       *address = trunc_page_32(*address);
+       size = round_page_64(size);
 
        /*
         *      Perform the copy if requested
@@ -610,6 +682,7 @@ vm_map_64(
 }
 
 /* temporary, until world build */
+kern_return_t
 vm_map(
        vm_map_t                target_map,
        vm_offset_t             *address,
@@ -623,7 +696,7 @@ vm_map(
        vm_prot_t               max_protection,
        vm_inherit_t            inheritance)
 {
-       vm_map_64(target_map, address, size, mask, flags, 
+       return vm_map_64(target_map, address, size, mask, flags, 
                        port, (vm_object_offset_t)offset, copy,
                        cur_protection, max_protection, inheritance);
 }
@@ -663,11 +736,11 @@ vm_wire(
                return KERN_INVALID_ARGUMENT;
 
        if (access != VM_PROT_NONE) {
-               rc = vm_map_wire(map, trunc_page(start),
-                                round_page(start+size), access, TRUE);
+               rc = vm_map_wire(map, trunc_page_32(start),
+                                round_page_32(start+size), access, TRUE);
        } else {
-               rc = vm_map_unwire(map, trunc_page(start),
-                                  round_page(start+size), TRUE);
+               rc = vm_map_unwire(map, trunc_page_32(start),
+                                  round_page_32(start+size), TRUE);
        }
        return rc;
 }
@@ -728,8 +801,8 @@ vm_msync(
        /*
         * align address and size on page boundaries
         */
-       size = round_page(address + size) - trunc_page(address);
-       address = trunc_page(address);
+       size = round_page_32(address + size) - trunc_page_32(address);
+       address = trunc_page_32(address);
 
         if (map == VM_MAP_NULL)
                 return(KERN_INVALID_TASK);
@@ -837,7 +910,7 @@ vm_msync(
                                        kill_pages = -1;
                        }
                        if (kill_pages != -1)
-                               memory_object_deactivate_pages(object, offset, 
+                               vm_object_deactivate_pages(object, offset, 
                                                               (vm_object_size_t)flush_size, kill_pages);
                        vm_object_unlock(object);
                        vm_map_unlock(map);
@@ -848,8 +921,8 @@ vm_msync(
                 * Don't bother to sync internal objects, since there can't
                 * be any "permanent" storage for these objects anyway.
                 */
-               if ((object->pager == IP_NULL) || (object->internal) ||
-                   (object->private)) {
+               if ((object->pager == MEMORY_OBJECT_NULL) ||
+                   (object->internal) || (object->private)) {
                        vm_object_unlock(object);
                        vm_map_unlock(map);
                        continue;
@@ -864,7 +937,7 @@ vm_msync(
 
                vm_map_unlock(map);
 
-               do_sync_req = memory_object_sync(object,
+               do_sync_req = vm_object_sync(object,
                                        offset,
                                        flush_size,
                                        sync_flags & VM_SYNC_INVALIDATE,
@@ -917,31 +990,11 @@ re_iterate:
 
                queue_enter(&req_q, new_msr, msync_req_t, req_q);
 
-#ifdef MACH_BSD
-               if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) ==
-               ((rpc_subsystem_t) &vnode_pager_workaround)) {
-                       (void) vnode_pager_synchronize(
-                               object->pager,
-                               object->pager_request,
-                               offset,
-                               flush_size,
-                               sync_flags);
-               } else {
-                       (void) memory_object_synchronize(
-                               object->pager,
-                               object->pager_request,
-                               offset,
-                               flush_size,
-                               sync_flags);
-               }
-#else
                (void) memory_object_synchronize(
                                object->pager,
-                               object->pager_request,
                                offset,
                                flush_size,
                                sync_flags);
-#endif
        }/* while */
 
        /*
@@ -1007,8 +1060,8 @@ vm_behavior_set(
        if (map == VM_MAP_NULL)
                return(KERN_INVALID_ARGUMENT);
 
-       return(vm_map_behavior_set(map, trunc_page(start), 
-                                  round_page(start+size), new_behavior));
+       return(vm_map_behavior_set(map, trunc_page_32(start), 
+                                  round_page_32(start+size), new_behavior));
 }
 
 #if    VM_CPM
@@ -1075,8 +1128,8 @@ vm_allocate_cpm(
        if (anywhere)
                *addr = vm_map_min(map);
        else
-               *addr = trunc_page(*addr);
-       size = round_page(size);
+               *addr = trunc_page_32(*addr);
+       size = round_page_32(size);
 
        if ((kr = cpm_allocate(size, &pages, TRUE)) != KERN_SUCCESS)
                return kr;
@@ -1104,7 +1157,7 @@ vm_allocate_cpm(
                assert(!m->pageout);
                assert(!m->tabled);
                assert(m->busy);
-               assert(m->phys_addr>=avail_start && m->phys_addr<=avail_end);
+               assert(m->phys_page>=avail_start && m->phys_page<=avail_end);
 
                m->busy = FALSE;
                vm_page_insert(m, cpm_obj, offset);
@@ -1178,7 +1231,9 @@ vm_allocate_cpm(
                m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset);
                vm_object_unlock(cpm_obj);
                assert(m != VM_PAGE_NULL);
-               PMAP_ENTER(pmap, va, m, VM_PROT_ALL, TRUE);
+               PMAP_ENTER(pmap, va, m, VM_PROT_ALL,    
+                       ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK,
+                       TRUE);
        }
 
 #if    MACH_ASSERT
@@ -1203,7 +1258,7 @@ vm_allocate_cpm(
                assert(!m->precious);
                assert(!m->clustered);
                if (offset != 0) {
-                       if (m->phys_addr != prev_addr + PAGE_SIZE) {
+                       if (m->phys_page != prev_addr + 1) {
                                printf("start 0x%x end 0x%x va 0x%x\n",
                                       start, end, va);
                                printf("obj 0x%x off 0x%x\n", cpm_obj, offset);
@@ -1212,7 +1267,7 @@ vm_allocate_cpm(
                                panic("vm_allocate_cpm:  pages not contig!");
                        }
                }
-               prev_addr = m->phys_addr;
+               prev_addr = m->phys_page;
        }
 #endif /* MACH_ASSERT */
 
@@ -1249,9 +1304,10 @@ mach_memory_object_memory_entry_64(
        boolean_t               internal,
        vm_object_offset_t      size,
        vm_prot_t               permission,
-       ipc_port_t              pager,
+       memory_object_t         pager,
        ipc_port_t              *entry_handle)
 {
+       unsigned int            access;
        vm_named_entry_t        user_object;
        ipc_port_t              user_handle;
        ipc_port_t              previous;
@@ -1288,7 +1344,9 @@ mach_memory_object_memory_entry_64(
        user_object->size = size;
        user_object->offset = 0;
        user_object->backing.pager = pager;
-       user_object->protection = permission;
+       user_object->protection = permission & VM_PROT_ALL;
+       access = GET_MAP_MEM(permission);
+       SET_MAP_MEM(access, user_object->protection);
        user_object->internal = internal;
        user_object->is_sub_map = FALSE;
        user_object->ref_count = 1;
@@ -1305,7 +1363,7 @@ mach_memory_object_memory_entry(
        boolean_t       internal,
        vm_size_t       size,
        vm_prot_t       permission,
-       ipc_port_t      pager,
+       memory_object_t pager,
        ipc_port_t      *entry_handle)
 {
        return mach_memory_object_memory_entry_64( host, internal, 
@@ -1334,19 +1392,102 @@ mach_make_memory_entry_64(
        vm_map_t                pmap_map;
 
        /* needed for call to vm_map_lookup_locked */
-       boolean_t               wired;
+       boolean_t                       wired;
        vm_object_offset_t      obj_off;
-       vm_prot_t               prot;
+       vm_prot_t                       prot;
        vm_object_offset_t      lo_offset, hi_offset;
        vm_behavior_t           behavior;
-       vm_object_t             object;
+       vm_object_t                     object;
+       vm_object_t                     shadow_object;
 
        /* needed for direct map entry manipulation */
        vm_map_entry_t          map_entry;
-       vm_map_t                local_map;
+       vm_map_entry_t          next_entry;
+       vm_map_t                        local_map;
+       vm_map_t                        original_map = target_map;
+       vm_offset_t                     local_offset;
        vm_object_size_t        mappable_size;
+       vm_object_size_t        total_size;
+
+       unsigned int                    access;
+       vm_prot_t                       protections;
+       unsigned int                    wimg_mode;
+       boolean_t                       cache_attr;
+
+       protections = permission & VM_PROT_ALL;
+       access = GET_MAP_MEM(permission);
+
+
+       offset = trunc_page_64(offset);
+       *size = round_page_64(*size);
+
+       if((parent_entry != NULL)
+               && (permission & MAP_MEM_ONLY)) {
+               vm_named_entry_t        parent_object;
+               if(ip_kotype(parent_entry) != IKOT_NAMED_ENTRY) {
+                       return KERN_INVALID_ARGUMENT;
+               }
+               parent_object = (vm_named_entry_t)parent_entry->ip_kobject;
+               object = parent_object->object;
+               if(object != VM_OBJECT_NULL)
+                       wimg_mode = object->wimg_bits;
+               if((access != GET_MAP_MEM(parent_object->protection)) &&
+                               !(parent_object->protection & VM_PROT_WRITE)) { 
+                       return KERN_INVALID_RIGHT;
+               }
+               if(access == MAP_MEM_IO) {
+                  SET_MAP_MEM(access, parent_object->protection);
+                  wimg_mode = VM_WIMG_IO;
+               } else if (access == MAP_MEM_COPYBACK) {
+                  SET_MAP_MEM(access, parent_object->protection);
+                  wimg_mode = VM_WIMG_DEFAULT;
+               } else if (access == MAP_MEM_WTHRU) {
+                  SET_MAP_MEM(access, parent_object->protection);
+                  wimg_mode = VM_WIMG_WTHRU;
+               } else if (access == MAP_MEM_WCOMB) {
+                  SET_MAP_MEM(access, parent_object->protection);
+                  wimg_mode = VM_WIMG_WCOMB;
+               }
+               if(object &&
+                       (access != MAP_MEM_NOOP) && 
+                       (!(object->nophyscache))) {
+                       if(object->wimg_bits != wimg_mode) {
+                          vm_page_t p;
+                          if ((wimg_mode == VM_WIMG_IO)
+                               || (wimg_mode == VM_WIMG_WCOMB))
+                               cache_attr = TRUE;
+                          else 
+                               cache_attr = FALSE;
+                          vm_object_lock(object);
+                          while(object->paging_in_progress) {
+                               vm_object_unlock(object);
+                               vm_object_wait(object,
+                                  VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
+                                  THREAD_UNINT);
+                               vm_object_lock(object);
+                          }
+                          object->wimg_bits = wimg_mode;
+                          queue_iterate(&object->memq, 
+                                               p, vm_page_t, listq) {
+                               if (!p->fictitious) {
+                                       pmap_page_protect(
+                                               p->phys_page, 
+                                               VM_PROT_NONE);
+                                        if(cache_attr)
+                                           pmap_sync_caches_phys(
+                                                       p->phys_page);
+                               }
+                          }
+                          vm_object_unlock(object);
+                       }
+               }
+               return KERN_SUCCESS;
+       }
+
+       if(permission & MAP_MEM_ONLY) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       
        user_object = (vm_named_entry_t) 
                        kalloc(sizeof (struct vm_named_entry));
        if(user_object == NULL)
@@ -1374,11 +1515,28 @@ mach_make_memory_entry_64(
        user_object->backing.pager = NULL;
        user_object->ref_count = 1;
 
+       if(permission & MAP_MEM_NAMED_CREATE) {
+               user_object->object = NULL;
+               user_object->internal = TRUE;
+               user_object->is_sub_map = FALSE;
+               user_object->offset = 0;
+               user_object->protection = protections;
+               SET_MAP_MEM(access, user_object->protection);
+               user_object->size = *size;
+
+               /* user_object pager and internal fields are not used */
+               /* when the object field is filled in.                */
+
+               ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
+                                                       IKOT_NAMED_ENTRY);
+               *object_handle = user_handle;
+               return KERN_SUCCESS;
+       }
+
        if(parent_entry == NULL) {
        /* Create a named object based on address range within the task map */
        /* Go find the object at given address */
 
-               permission &= VM_PROT_ALL;
                vm_map_lock_read(target_map);
 
                /* get the object associated with the target address */
@@ -1386,29 +1544,44 @@ mach_make_memory_entry_64(
                /* that requested by the caller */
 
                kr = vm_map_lookup_locked(&target_map, offset, 
-                               permission, &version,
+                               protections, &version,
                                &object, &obj_off, &prot, &wired, &behavior,
                                &lo_offset, &hi_offset, &pmap_map);
                if (kr != KERN_SUCCESS) {
                        vm_map_unlock_read(target_map);
                        goto make_mem_done;
                }
-               if ((prot & permission) != permission) {
+               if (((prot & protections) != protections) 
+                                       || (object == kernel_object)) {
                        kr = KERN_INVALID_RIGHT;
                        vm_object_unlock(object);
                        vm_map_unlock_read(target_map);
                        if(pmap_map != target_map)
                                vm_map_unlock_read(pmap_map);
+                       if(object == kernel_object) {
+                               printf("Warning: Attempt to create a named"
+                                       " entry from the kernel_object\n");
+                       }
                        goto make_mem_done;
                }
 
                /* We have an object, now check to see if this object */
                /* is suitable.  If not, create a shadow and share that */
                
-               local_map = target_map;
 redo_lookup:
+               local_map = original_map;
+               local_offset = offset;
+               if(target_map != local_map) {
+                       vm_map_unlock_read(target_map);
+                       if(pmap_map != target_map)
+                               vm_map_unlock_read(pmap_map);
+                       vm_map_lock_read(local_map);
+                       target_map = local_map;
+                       pmap_map = local_map;
+               }
                while(TRUE) {
-                  if(!vm_map_lookup_entry(local_map, offset, &map_entry)) {
+                  if(!vm_map_lookup_entry(local_map, 
+                                               local_offset, &map_entry)) {
                        kr = KERN_INVALID_ARGUMENT;
                         vm_object_unlock(object);
                         vm_map_unlock_read(target_map);
@@ -1425,17 +1598,25 @@ redo_lookup:
                                vm_map_unlock_read(pmap_map);
                          goto make_mem_done;
                      }
+                     if(map_entry->wired_count) {
+                        /* JMM - The check below should be reworked instead. */
+                        object->true_share = TRUE;
+                     }
                      break;
                   } else {
+                       vm_map_t        tmap;
+                       tmap = local_map;
                        local_map = map_entry->object.sub_map;
+                       
                        vm_map_lock_read(local_map);
-                       vm_map_unlock_read(target_map);
-                       if(pmap_map != target_map)
-                               vm_map_unlock_read(pmap_map);
+                       vm_map_unlock_read(tmap);
                        target_map = local_map;
+                       pmap_map = local_map;
+                       local_offset = local_offset - map_entry->vme_start;
+                       local_offset += map_entry->offset;
                   }
                }
-               if(((map_entry->max_protection) & permission) != permission) {
+               if(((map_entry->max_protection) & protections) != protections) {
                         kr = KERN_INVALID_RIGHT;
                          vm_object_unlock(object);
                          vm_map_unlock_read(target_map);
@@ -1443,6 +1624,43 @@ redo_lookup:
                                vm_map_unlock_read(pmap_map);
                          goto make_mem_done;
                }
+
+               mappable_size  =  hi_offset - obj_off;
+               total_size = map_entry->vme_end - map_entry->vme_start;
+               if(*size > mappable_size) {
+                       /* try to extend mappable size if the entries */
+                       /* following are from the same object and are */
+                       /* compatible */
+                       next_entry = map_entry->vme_next;
+                       /* lets see if the next map entry is still   */
+                       /* pointing at this object and is contiguous */
+                       while(*size > mappable_size) {
+                               if((next_entry->object.vm_object == object) &&
+                                       (next_entry->vme_start == 
+                                               next_entry->vme_prev->vme_end) &&
+                                       (next_entry->offset == 
+                                          next_entry->vme_prev->offset + 
+                                          (next_entry->vme_prev->vme_end - 
+                                          next_entry->vme_prev->vme_start))) {
+                                       if(((next_entry->max_protection) 
+                                               & protections) != protections) {
+                                               break;
+                                       }
+                                       if (next_entry->needs_copy !=
+                                           map_entry->needs_copy)
+                                               break;
+                                       mappable_size += next_entry->vme_end
+                                               - next_entry->vme_start;
+                                       total_size += next_entry->vme_end
+                                               - next_entry->vme_start;
+                                       next_entry = next_entry->vme_next;
+                               } else {
+                                       break;
+                               }
+                       
+                       }
+               }
+
                if(object->internal) {
                        /* vm_map_lookup_locked will create a shadow if   */
                        /* needs_copy is set but does not check for the   */
@@ -1450,36 +1668,67 @@ redo_lookup:
                        /* set up an object which will not be pulled from */
                        /* under us.  */
 
-                       if (map_entry->needs_copy  || object->shadowed ||
-                            (object->size > 
-                                      ((vm_object_size_t)map_entry->vme_end -
-                                                     map_entry->vme_start))) {
+                       if ((map_entry->needs_copy  || object->shadowed ||
+                            (object->size > total_size))
+                                       && !object->true_share) {
                                if (vm_map_lock_read_to_write(target_map)) {
                                        vm_map_lock_read(target_map);
                                        goto redo_lookup;
                                }
 
-
+                               /* 
+                                * JMM - We need to avoid coming here when the object
+                                * is wired by anybody, not just the current map.  Why
+                                * couldn't we use the standard vm_object_copy_quickly()
+                                * approach here?
+                                */
+                                
                                /* create a shadow object */
+                               vm_object_shadow(&map_entry->object.vm_object,
+                                               &map_entry->offset, total_size);
+                               shadow_object = map_entry->object.vm_object;
+                               vm_object_unlock(object);
+                               vm_object_pmap_protect(
+                                       object, map_entry->offset,
+                                       total_size,
+                                       ((map_entry->is_shared 
+                                               || target_map->mapped)
+                                                       ? PMAP_NULL :
+                                                       target_map->pmap),
+                                       map_entry->vme_start,
+                                       map_entry->protection & ~VM_PROT_WRITE);
+                               total_size -= (map_entry->vme_end 
+                                               - map_entry->vme_start);
+                               next_entry = map_entry->vme_next;
+                               map_entry->needs_copy = FALSE;
+                               while (total_size) {
+                                  if(next_entry->object.vm_object == object) {
+                                       shadow_object->ref_count++; 
+                                       vm_object_res_reference(shadow_object);
+                                       next_entry->object.vm_object 
+                                                       = shadow_object;
+                                       vm_object_deallocate(object);
+                                       next_entry->offset 
+                                               = next_entry->vme_prev->offset +
+                                               (next_entry->vme_prev->vme_end 
+                                               - next_entry->vme_prev->vme_start);
+                                               next_entry->needs_copy = FALSE;
+                                       } else {
+                                               panic("mach_make_memory_entry_64:"
+                                                 " map entries out of sync\n");
+                                       }
+                                       total_size -= 
+                                               next_entry->vme_end 
+                                                       - next_entry->vme_start;
+                                       next_entry = next_entry->vme_next;
+                               }
+
+                               object = shadow_object;
+                               vm_object_lock(object);
+                               obj_off = (local_offset - map_entry->vme_start)
+                                                        + map_entry->offset;
+                               vm_map_lock_write_to_read(target_map);
 
-                               vm_object_shadow(&map_entry->object.vm_object, 
-                                       &map_entry->offset, 
-                                       (map_entry->vme_end
-                                        - map_entry->vme_start));
-                               map_entry->needs_copy = FALSE;
-                               vm_object_unlock(object);
-                               object = map_entry->object.vm_object;
-                               vm_object_lock(object);
-                               object->size = map_entry->vme_end 
-                                               - map_entry->vme_start;
-                               obj_off = (offset - map_entry->vme_start) + 
-                                                       map_entry->offset;
-                               lo_offset = map_entry->offset;
-                               hi_offset = (map_entry->vme_end -
-                                       map_entry->vme_start) +
-                                       map_entry->offset;
-
-                               vm_map_lock_write_to_read(target_map);
 
                        }
                }
@@ -1494,9 +1743,47 @@ redo_lookup:
                /* target of ipc's, etc.  The code above, protecting    */
                /* against delayed copy, etc. is mostly defensive.      */
 
-
+               wimg_mode = object->wimg_bits;
+               if(!(object->nophyscache)) {
+                       if(access == MAP_MEM_IO) {
+                               wimg_mode = VM_WIMG_IO;
+                       } else if (access == MAP_MEM_COPYBACK) {
+                               wimg_mode = VM_WIMG_USE_DEFAULT;
+                       } else if (access == MAP_MEM_WTHRU) {
+                               wimg_mode = VM_WIMG_WTHRU;
+                       } else if (access == MAP_MEM_WCOMB) {
+                               wimg_mode = VM_WIMG_WCOMB;
+                       }
+               }
 
                object->true_share = TRUE;
+               if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)
+                       object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
+
+               /* we now point to this object, hold on to it */
+               vm_object_reference_locked(object);
+               vm_map_unlock_read(target_map);
+               if(pmap_map != target_map)
+                       vm_map_unlock_read(pmap_map);
+
+               if(object->wimg_bits != wimg_mode) {
+                       vm_page_t p;
+
+                       vm_object_paging_wait(object, THREAD_UNINT);
+
+                       queue_iterate(&object->memq, 
+                                               p, vm_page_t, listq) {
+                               if (!p->fictitious) {
+                                       pmap_page_protect(
+                                               p->phys_page, 
+                                               VM_PROT_NONE);
+                                        if(cache_attr)
+                                           pmap_sync_caches_phys(
+                                                       p->phys_page);
+                               }
+                       }
+                       object->wimg_bits = wimg_mode;
+               }
                user_object->object = object;
                user_object->internal = object->internal;
                user_object->is_sub_map = FALSE;
@@ -1509,7 +1796,6 @@ redo_lookup:
                /*                   offset of our beg addr within entry  */
                /* it corresponds to this:                                */
 
-               mappable_size  =  hi_offset - obj_off;
                if(*size > mappable_size)
                        *size = mappable_size;
 
@@ -1518,16 +1804,10 @@ redo_lookup:
                /* user_object pager and internal fields are not used */
                /* when the object field is filled in.                */
 
-               object->ref_count++; /* we now point to this object, hold on */
-               vm_object_res_reference(object);
                vm_object_unlock(object);
                ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
                                                        IKOT_NAMED_ENTRY);
-               *size = user_object->size;
                *object_handle = user_handle;
-               vm_map_unlock_read(target_map);
-               if(pmap_map != target_map)
-                       vm_map_unlock_read(pmap_map);
                return KERN_SUCCESS;
        } else {
 
@@ -1539,10 +1819,6 @@ redo_lookup:
                        goto make_mem_done;
                }
                parent_object =  (vm_named_entry_t)parent_entry->ip_kobject;
-               if(permission & parent_object->protection != permission) {
-                       kr = KERN_INVALID_ARGUMENT;
-                       goto make_mem_done;
-               }
                if((offset + *size) > parent_object->size) {
                        kr = KERN_INVALID_ARGUMENT;
                        goto make_mem_done;
@@ -1551,7 +1827,12 @@ redo_lookup:
                user_object->object = parent_object->object;
                user_object->size = *size;
                user_object->offset = parent_object->offset + offset;
-               user_object->protection = permission;
+               user_object->protection = parent_object->protection;
+               user_object->protection &= ~VM_PROT_ALL;
+               user_object->protection = permission & VM_PROT_ALL;
+               if(access != MAP_MEM_NOOP) {
+                       SET_MAP_MEM(access, user_object->protection);
+               }
                if(parent_object->is_sub_map) {
                   user_object->backing.map = parent_object->backing.map;
                   vm_map_lock(user_object->backing.map);
@@ -1569,6 +1850,10 @@ redo_lookup:
                        vm_object_reference(parent_object->object); 
                        vm_object_lock(parent_object->object);
                        parent_object->object->true_share = TRUE;
+                       if (parent_object->object->copy_strategy == 
+                           MEMORY_OBJECT_COPY_SYMMETRIC)
+                               parent_object->object->copy_strategy =
+                                       MEMORY_OBJECT_COPY_DELAY;
                        vm_object_unlock(parent_object->object);
                }
                ipc_kobject_set(user_handle, (ipc_kobject_t) user_object,
@@ -1618,16 +1903,12 @@ vm_region_object_create(
        ipc_port_t              user_handle;
        kern_return_t           kr;
 
-       pmap_t          new_pmap = pmap_create((vm_size_t) 0);
        ipc_port_t      previous;
        vm_map_t        new_map;
        
-       if(new_pmap == PMAP_NULL)
-               return KERN_FAILURE;
        user_object = (vm_named_entry_t) 
                        kalloc(sizeof (struct vm_named_entry));
        if(user_object == NULL) {
-               pmap_destroy(new_pmap);
                return KERN_FAILURE;
        }
        named_entry_lock_init(user_object);
@@ -1654,7 +1935,7 @@ vm_region_object_create(
 
        /* Create a named object based on a submap of specified size */
 
-       new_map = vm_map_create(new_pmap, 0, size, TRUE);
+       new_map = vm_map_create(0, 0, size, TRUE);
        user_object->backing.map = new_map;
 
 
@@ -1717,7 +1998,15 @@ kern_return_t vm_map_region_replace(
                        vm_map_unlock(target_map);
                        return KERN_SUCCESS;
                }
-               vm_map_lookup_entry(target_map, addr, &entry);
+       }
+       if ((entry->use_pmap) && 
+                       (new_submap->pmap == NULL)) {
+               new_submap->pmap = pmap_create((vm_size_t) 0);
+               if(new_submap->pmap == PMAP_NULL) {
+                       vm_map_unlock(old_submap);
+                       vm_map_unlock(target_map);
+                       return(KERN_NO_SPACE);
+               }
        }
        addr = entry->vme_start;
        vm_map_reference(old_submap);
@@ -1725,16 +2014,17 @@ kern_return_t vm_map_region_replace(
                                        (entry->vme_start < end)) {
                if((entry->is_sub_map) && 
                        (entry->object.sub_map == old_submap)) {
-                       entry->object.sub_map = new_submap;
                        if(entry->use_pmap) {
-                               if((start & 0xfffffff) || 
+                               if((start & 0x0fffffff) || 
                                        ((end - start) != 0x10000000)) {
                                        vm_map_unlock(old_submap);
+                                       vm_map_deallocate(old_submap);
                                        vm_map_unlock(target_map);
                                        return  KERN_INVALID_ARGUMENT;
                                }
                                nested_pmap = 1;
                        }
+                       entry->object.sub_map = new_submap;
                        vm_map_reference(new_submap);
                        vm_map_deallocate(old_submap);
                }
@@ -1743,14 +2033,21 @@ kern_return_t vm_map_region_replace(
        }
        if(nested_pmap) {
 #ifndef i386
-               pmap_unnest(target_map->pmap, start, end - start);
+               pmap_unnest(target_map->pmap, (addr64_t)start);
+               if(target_map->mapped) {
+                       vm_map_submap_pmap_clean(target_map,
+                               start, end, old_submap, 0);
+               }
                pmap_nest(target_map->pmap, new_submap->pmap, 
-                                               start, end - start);
-#endif i386
+                               (addr64_t)start, (addr64_t)start, 
+                               (addr64_t)(end - start));
+#endif /* i386 */
        } else {
-               pmap_remove(target_map->pmap, start, end);
+               vm_map_submap_pmap_clean(target_map,
+                               start, end, old_submap, 0);
        }
        vm_map_unlock(old_submap);
+       vm_map_deallocate(old_submap);
        vm_map_unlock(target_map);
        return KERN_SUCCESS;
 }
@@ -1855,12 +2152,12 @@ restart_page_query:
 
        if (m->dirty)
                *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
-       else if(pmap_is_modified(m->phys_addr))
+       else if(pmap_is_modified(m->phys_page))
                *disposition |= VM_PAGE_QUERY_PAGE_DIRTY;
 
        if (m->reference)
                *disposition |= VM_PAGE_QUERY_PAGE_REF;
-       else if(pmap_is_referenced(m->phys_addr))
+       else if(pmap_is_referenced(m->phys_page))
                *disposition |= VM_PAGE_QUERY_PAGE_REF;
 
        vm_object_unlock(object);
@@ -1875,6 +2172,10 @@ set_dp_control_port(
 {
         if (host_priv == HOST_PRIV_NULL)
                 return (KERN_INVALID_HOST);
+
+       if (IP_VALID(dynamic_pager_control_port))
+               ipc_port_release_send(dynamic_pager_control_port);
+
        dynamic_pager_control_port = control_port;
        return KERN_SUCCESS;
 }
@@ -1886,54 +2187,68 @@ get_dp_control_port(
 {
         if (host_priv == HOST_PRIV_NULL)
                 return (KERN_INVALID_HOST);
-       *control_port = dynamic_pager_control_port;
+
+       *control_port = ipc_port_copy_send(dynamic_pager_control_port);
        return KERN_SUCCESS;
        
 }
 
-void
-mach_destroy_upl(
-       ipc_port_t      port)
-{
-       upl_t   upl;
-#if MACH_ASSERT
-       assert(ip_kotype(port) == IKOT_NAMED_ENTRY);
-#endif /* MACH_ASSERT */
-       upl = (upl_t)port->ip_kobject;
-       mutex_lock(&(upl)->Lock);
-       upl->ref_count-=1;
-       if(upl->ref_count == 0) {
-               mutex_unlock(&(upl)->Lock);
-               uc_upl_abort(upl, UPL_ABORT_ERROR);
-       } else
-               mutex_unlock(&(upl)->Lock);
-}
 
 /* Retrieve a upl for an object underlying an address range in a map */
 
 kern_return_t
 vm_map_get_upl(
-       vm_map_t        map,
-       vm_offset_t     offset,
-       vm_size_t       *upl_size,
-       upl_t           *upl,
-       upl_page_info_t **page_list,
-       int             *count,
-       int             *flags,
-       int             force_data_sync)
+       vm_map_t                map,
+       vm_address_t            offset,
+       vm_size_t               *upl_size,
+       upl_t                   *upl,
+       upl_page_info_array_t   page_list,
+       unsigned int            *count,
+       int                     *flags,
+       int                     force_data_sync)
 {
        vm_map_entry_t  entry;
        int             caller_flags;
+       int             sync_cow_data = FALSE;
+       vm_object_t     local_object;
+       vm_offset_t     local_offset;
+       vm_offset_t     local_start;
+       kern_return_t   ret;
 
        caller_flags = *flags;
+       if (!(caller_flags & UPL_COPYOUT_FROM)) {
+               sync_cow_data = TRUE;
+       }
        if(upl == NULL)
                return KERN_INVALID_ARGUMENT;
+
+
 REDISCOVER_ENTRY:
        vm_map_lock(map);
        if (vm_map_lookup_entry(map, offset, &entry)) {
+               if (entry->object.vm_object == VM_OBJECT_NULL ||
+                       !entry->object.vm_object->phys_contiguous) {
+                       if((*upl_size/page_size) > MAX_UPL_TRANSFER) {
+                                       *upl_size = MAX_UPL_TRANSFER * page_size;
+                       }
+               }
                if((entry->vme_end - offset) < *upl_size) {
                        *upl_size = entry->vme_end - offset;
                }
+               if (caller_flags & UPL_QUERY_OBJECT_TYPE) {
+                       if (entry->object.vm_object == VM_OBJECT_NULL) {
+                               *flags = 0;
+                       } else if (entry->object.vm_object->private) {
+                               *flags = UPL_DEV_MEMORY;
+                               if (entry->object.vm_object->phys_contiguous) {
+                                       *flags |= UPL_PHYS_CONTIG;
+                               }
+                       } else  {
+                               *flags = 0;
+                       }
+                       vm_map_unlock(map);
+                       return KERN_SUCCESS;
+               }
                /*
                 *      Create an object if necessary.
                 */
@@ -1943,8 +2258,11 @@ REDISCOVER_ENTRY:
                        entry->offset = 0;
                }
                if (!(caller_flags & UPL_COPYOUT_FROM)) {
-                       if (entry->needs_copy
-                                   || entry->object.vm_object->copy) {
+                       if (!(entry->protection & VM_PROT_WRITE)) {
+                               vm_map_unlock(map);
+                               return KERN_PROTECTION_FAILURE;
+                       }
+                       if (entry->needs_copy)  {
                                vm_map_t                local_map;
                                vm_object_t             object;
                                vm_object_offset_t      offset_hi;
@@ -1977,49 +2295,76 @@ REDISCOVER_ENTRY:
                        }
                }
                if (entry->is_sub_map) {
+                       vm_map_t        submap;
+
+                       submap = entry->object.sub_map;
+                       local_start = entry->vme_start;
+                       local_offset = entry->offset;
+                       vm_map_reference(submap);
                        vm_map_unlock(map);
-                       return (vm_map_get_upl(entry->object.sub_map, 
-                               entry->offset + (offset - entry->vme_start), 
+
+                       ret = (vm_map_get_upl(submap, 
+                               local_offset + (offset - local_start), 
                                upl_size, upl, page_list, count, 
                                flags, force_data_sync));
+
+                       vm_map_deallocate(submap);
+                       return ret;
                }
                                        
-               if (!(caller_flags & UPL_COPYOUT_FROM)) {
-                       if (entry->object.vm_object->shadow) {
-                               int     flags;
+               if (sync_cow_data) {
+                       if (entry->object.vm_object->shadow
+                                   || entry->object.vm_object->copy) {
+                               int             flags;
+
+                               local_object = entry->object.vm_object;
+                               local_start = entry->vme_start;
+                               local_offset = entry->offset;
+                               vm_object_reference(local_object);
                                vm_map_unlock(map);
 
-                               vm_object_reference(entry->object.vm_object);
-                               if(entry->object.vm_object->copy == NULL) {
+                               if(local_object->copy == NULL) {
                                        flags = MEMORY_OBJECT_DATA_SYNC;
                                } else {
                                        flags = MEMORY_OBJECT_COPY_SYNC;
                                }
-                                       
-                               memory_object_lock_request(
-                                       entry->object.vm_object,
-                                       (offset - entry->vme_start) 
-                                               + entry->offset,
-                                       (vm_object_size_t)*upl_size, FALSE, 
-                                       flags,
-                                       VM_PROT_NO_CHANGE, NULL, 0);
-                               vm_map_lock(map);
+
+                               if (entry->object.vm_object->shadow && 
+                                          entry->object.vm_object->copy) {
+                                  vm_object_lock_request(
+                                       local_object->shadow,
+                                       (vm_object_offset_t)
+                                       ((offset - local_start) +
+                                        local_offset) +
+                                       local_object->shadow_offset,
+                                       *upl_size, FALSE, 
+                                       MEMORY_OBJECT_DATA_SYNC,
+                                       VM_PROT_NO_CHANGE);
+                               }
+                               sync_cow_data = FALSE;
+                               vm_object_deallocate(local_object);
+                               goto REDISCOVER_ENTRY;
                        }
                }
 
                if (force_data_sync) {
+
+                       local_object = entry->object.vm_object;
+                       local_start = entry->vme_start;
+                       local_offset = entry->offset;
+                       vm_object_reference(local_object);
                        vm_map_unlock(map);
-                       vm_object_reference(entry->object.vm_object);
-
-                       memory_object_lock_request(
-                                                  entry->object.vm_object,
-                                                  (offset - entry->vme_start) 
-                                                  + entry->offset,
-                                                  (vm_object_size_t)*upl_size, FALSE, 
-                                                  MEMORY_OBJECT_DATA_SYNC,
-                                                  VM_PROT_NO_CHANGE, 
-                                                  NULL, 0);
-                       vm_map_lock(map);
+
+                       vm_object_lock_request(
+                                  local_object,
+                                  (vm_object_offset_t)
+                                  ((offset - local_start) + local_offset),
+                                  (vm_object_size_t)*upl_size, FALSE, 
+                                  MEMORY_OBJECT_DATA_SYNC,
+                                  VM_PROT_NO_CHANGE);
+                       force_data_sync = FALSE;
+                       vm_object_deallocate(local_object);
+                       goto REDISCOVER_ENTRY;
                }
 
                if(!(entry->object.vm_object->private)) {
@@ -2033,14 +2378,34 @@ REDISCOVER_ENTRY:
                } else {
                        *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
                }
+               local_object = entry->object.vm_object;
+               local_offset = entry->offset;
+               local_start = entry->vme_start;
+               vm_object_reference(local_object);
                vm_map_unlock(map);
-               return(vm_fault_list_request(entry->object.vm_object, 
-                       ((offset - entry->vme_start) + entry->offset),
-                       *upl_size,
-                       upl,
-                       page_list,
-                       *count,
-                       caller_flags));
+               if(caller_flags & UPL_SET_IO_WIRE) {
+                       ret = (vm_object_iopl_request(local_object, 
+                               (vm_object_offset_t)
+                                  ((offset - local_start) 
+                                               + local_offset),
+                               *upl_size,
+                               upl,
+                               page_list,
+                               count,
+                               caller_flags));
+               } else {
+                       ret = (vm_object_upl_request(local_object, 
+                               (vm_object_offset_t)
+                                  ((offset - local_start) 
+                                               + local_offset),
+                               *upl_size,
+                               upl,
+                               page_list,
+                               count,
+                               caller_flags));
+               }
+               vm_object_deallocate(local_object);
+               return(ret);
        } 
 
        vm_map_unlock(map);
@@ -2048,227 +2413,6 @@ REDISCOVER_ENTRY:
 
 }
 
-
-kern_return_t
-vm_object_upl_request(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_size_t               size,
-       ipc_port_t              *upl,
-       upl_page_info_t         *page_list,
-       mach_msg_type_number_t  *count,
-       int                     cntrl_flags)
-{
-       upl_t   upl_object;
-       ipc_port_t      upl_port;
-       ipc_port_t      previous;
-       upl_page_info_t *pl;
-       kern_return_t   kr;
-
-       pl = page_list;
-       kr = vm_fault_list_request(object, offset, size, &upl_object,
-                                               &pl, *count, cntrl_flags);
-
-       
-       if(kr != KERN_SUCCESS) {
-               *upl = MACH_PORT_NULL;
-               return KERN_FAILURE;
-       }
-
-       upl_port = ipc_port_alloc_kernel();
-
-
-       ip_lock(upl_port);
-
-       /* make a sonce right */
-       upl_port->ip_sorights++;
-       ip_reference(upl_port);
-
-       upl_port->ip_destination = IP_NULL;
-       upl_port->ip_receiver_name = MACH_PORT_NULL;
-       upl_port->ip_receiver = ipc_space_kernel;
-
-       /* make a send right */
-        upl_port->ip_mscount++;
-        upl_port->ip_srights++;
-        ip_reference(upl_port);
-
-       ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
-       /* nsrequest unlocks user_handle */
-
-       /* Create a named object based on a submap of specified size */
-
-
-       ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
-       *upl = upl_port;
-       return KERN_SUCCESS;
-}
-
-kern_return_t
-vm_pager_upl_request(
-       vm_object_t             object,
-       vm_object_offset_t      offset,
-       vm_size_t               size,
-       vm_size_t               super_size,
-       ipc_port_t              *upl,
-       upl_page_info_t         *page_list,
-       mach_msg_type_number_t  *count,
-       int                     cntrl_flags)
-{
-       upl_t   upl_object;
-       ipc_port_t      upl_port;
-       ipc_port_t      previous;
-       upl_page_info_t *pl;
-       kern_return_t   kr;
-
-       pl = page_list;
-       kr = upl_system_list_request(object, offset, size, super_size, 
-                                       &upl_object, &pl, *count, cntrl_flags);
-
-       if(kr != KERN_SUCCESS) {
-               *upl = MACH_PORT_NULL;
-               return KERN_FAILURE;
-       }
-
-       
-       upl_port = ipc_port_alloc_kernel();
-
-
-       ip_lock(upl_port);
-
-       /* make a sonce right */
-       upl_port->ip_sorights++;
-       ip_reference(upl_port);
-
-       upl_port->ip_destination = IP_NULL;
-       upl_port->ip_receiver_name = MACH_PORT_NULL;
-       upl_port->ip_receiver = ipc_space_kernel;
-
-       /* make a send right */
-        upl_port->ip_mscount++;
-        upl_port->ip_srights++;
-        ip_reference(upl_port);
-
-       ipc_port_nsrequest(upl_port, 1, upl_port, &previous);
-       /* nsrequest unlocks user_handle */
-
-       /* Create a named object based on a submap of specified size */
-
-
-       ipc_kobject_set(upl_port, (ipc_kobject_t) upl_object, IKOT_UPL);
-       *upl = upl_port;
-       return KERN_SUCCESS;
-}
-
-kern_return_t
-vm_upl_map(
-       vm_map_t        map,
-       ipc_port_t      upl_port,
-       vm_offset_t     *dst_addr)
-{
-       upl_t           upl;
-       kern_return_t   kr;
-
-       if (!IP_VALID(upl_port)) {
-               return KERN_INVALID_ARGUMENT;
-       } else if (ip_kotype(upl_port) == IKOT_UPL) {
-               upl_lock(upl);
-               upl = (upl_t)upl_port->ip_kobject;
-               kr = uc_upl_map(map, upl, dst_addr);
-               upl_unlock(upl);
-               return kr;
-       } else {
-               return KERN_FAILURE;
-       }
-}
-
-
-kern_return_t
-vm_upl_unmap(
-       vm_map_t        map,
-       ipc_port_t      upl_port)
-{
-       upl_t           upl;
-       kern_return_t   kr;
-
-       if (!IP_VALID(upl_port)) {
-               return KERN_INVALID_ARGUMENT;
-       } else if (ip_kotype(upl_port) == IKOT_UPL) {
-               upl_lock(upl);
-               upl = (upl_t)upl_port->ip_kobject;
-               kr = uc_upl_un_map(map, upl);
-               upl_unlock(upl);
-               return kr;
-       } else {
-               return KERN_FAILURE;
-       }
-}
-
-kern_return_t
-vm_upl_commit(
-       upl_t                   upl,
-       upl_page_list_ptr_t     page_list,
-       mach_msg_type_number_t  count)
-{
-       kern_return_t kr;
-       upl_lock(upl);
-       if(count) {
-               kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
-       } else {
-               kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
-       }
-       upl_unlock(upl);
-       return kr;
-}
-
-kern_return_t
-vm_upl_commit_range(
-       upl_t                   upl,
-       vm_offset_t             offset,
-       vm_size_t               size,
-       upl_page_list_ptr_t     page_list,
-       int                     flags,
-       mach_msg_type_number_t  count)
-{
-       kern_return_t kr;
-       upl_lock(upl);
-       if(count) {
-               kr = uc_upl_commit_range(upl, offset, size, flags, 
-                                       (upl_page_info_t *)page_list);
-       } else {
-               kr = uc_upl_commit_range(upl, offset, size, flags,
-                                       (upl_page_info_t *) NULL);
-       }
-       upl_unlock(upl);
-       return kr;
-}
-       
-kern_return_t
-vm_upl_abort_range(
-       upl_t           upl,
-       vm_offset_t     offset,
-       vm_size_t       size,
-       int             abort_flags)
-{
-       kern_return_t kr;
-       upl_lock(upl);
-       kr = uc_upl_abort_range(upl, offset, size, abort_flags);
-       upl_unlock(upl);
-       return kr;
-}
-
-kern_return_t
-vm_upl_abort(
-       upl_t           upl,
-       int             abort_type)
-{
-       kern_return_t kr;
-       upl_lock(upl);
-       kr = uc_upl_abort(upl, abort_type);
-       upl_unlock(upl);
-       return kr;
-}
-
 /* ******* Temporary Internal calls to UPL for BSD ***** */
 kern_return_t
 kernel_upl_map(
@@ -2276,209 +2420,88 @@ kernel_upl_map(
        upl_t           upl,
        vm_offset_t     *dst_addr)
 {
-       kern_return_t   kr;
-
-       upl_lock(upl);
-       kr = uc_upl_map(map, upl, dst_addr);
-       if(kr ==  KERN_SUCCESS) {
-               upl->ref_count += 1;
-       }
-       upl_unlock(upl);
-       return kr;
+       return (vm_upl_map(map, upl, dst_addr));
 }
 
 
 kern_return_t
 kernel_upl_unmap(
        vm_map_t        map,
-       upl_t   upl)
+       upl_t           upl)
 {
-       kern_return_t   kr;
-
-               upl_lock(upl);
-               kr = uc_upl_un_map(map, upl);
-               if(kr ==  KERN_SUCCESS) {
-                       if(upl->ref_count == 1) {
-                               upl_dealloc(upl);
-                       } else {
-                               upl->ref_count -= 1;
-                               upl_unlock(upl);
-                       }
-               } else {
-                       upl_unlock(upl);
-               }
-               return kr;
+       return(vm_upl_unmap(map, upl));
 }
 
 kern_return_t
 kernel_upl_commit(
        upl_t                   upl,
-       upl_page_list_ptr_t     page_list,
-       mach_msg_type_number_t  count)
+       upl_page_info_t         *pl,
+       mach_msg_type_number_t  count)
 {
-       kern_return_t kr;
-       upl_lock(upl);
-       upl->ref_count += 1;
-       if(count) {
-               kr = uc_upl_commit(upl, (upl_page_info_t *)page_list);
-       } else {
-               kr = uc_upl_commit(upl, (upl_page_info_t *) NULL);
-       }
-       if(upl->ref_count == 1) {
-               upl_dealloc(upl);
-       } else {
-               upl->ref_count -= 1;
-               upl_unlock(upl);
-       }
+       kern_return_t   kr;
+
+       kr = upl_commit(upl, pl, count);
+       upl_deallocate(upl);
        return kr;
 }
 
+
 kern_return_t
 kernel_upl_commit_range(
        upl_t                   upl,
        vm_offset_t             offset,
        vm_size_t               size,
        int                     flags,
-       upl_page_list_ptr_t     page_list,
-       mach_msg_type_number_t  count)
+       upl_page_info_array_t   pl,
+       mach_msg_type_number_t  count)
 {
-       kern_return_t kr;
-       upl_lock(upl);
-       upl->ref_count += 1;
-       if(count) {
-               kr = uc_upl_commit_range(upl, offset, size, flags, 
-                                       (upl_page_info_t *)page_list);
-       } else {
-               kr = uc_upl_commit_range(upl, offset, size, flags,
-                                       (upl_page_info_t *) NULL);
-       }
-       if(upl->ref_count == 1) {
-               upl_dealloc(upl);
-       } else {
-               upl->ref_count -= 1;
-               upl_unlock(upl);
-       }
+       boolean_t               finished = FALSE;
+       kern_return_t           kr;
+
+       if (flags & UPL_COMMIT_FREE_ON_EMPTY)
+               flags |= UPL_COMMIT_NOTIFY_EMPTY;
+
+       kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished);
+
+       if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished)
+               upl_deallocate(upl);
+
        return kr;
 }
        
 kern_return_t
 kernel_upl_abort_range(
-       upl_t           upl,
-       vm_offset_t     offset,
-       vm_size_t       size,
-       int             abort_flags)
+       upl_t                   upl,
+       vm_offset_t             offset,
+       vm_size_t               size,
+       int                     abort_flags)
 {
-       kern_return_t kr;
-       upl_lock(upl);
-       upl->ref_count += 1;
-       kr = uc_upl_abort_range(upl, offset, size, abort_flags);
-       if(upl->ref_count == 1) {
-               upl_dealloc(upl);
-       } else {
-               upl->ref_count -= 1;
-               upl_unlock(upl);
-       }
-       return kr;
-}
+       kern_return_t           kr;
+       boolean_t               finished = FALSE;
 
-kern_return_t
-kernel_upl_abort(
-       upl_t           upl,
-       int             abort_type)
-{
-       kern_return_t kr;
-       upl_lock(upl);
-       upl->ref_count += 1;
-       kr = uc_upl_abort(upl, abort_type);
-       if(upl->ref_count == 1) {
-               upl_dealloc(upl);
-       } else {
-               upl->ref_count -= 1;
-               upl_unlock(upl);
-       }
-       return kr;
-}
+       if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY)
+               abort_flags |= UPL_COMMIT_NOTIFY_EMPTY;
 
+       kr = upl_abort_range(upl, offset, size, abort_flags, &finished);
 
+       if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished)
+               upl_deallocate(upl);
 
-/* code snippet from vm_map */
-kern_return_t   
-vm_object_create_nomap(ipc_port_t  port, vm_object_size_t   size)
-{
-       vm_object_t     object_ptr;
-       return memory_object_create_named(port, size, &object_ptr);
+       return kr;
 }
 
-
-/* 
- * Temporary interface to overcome old style ipc artifacts, and allow
- * ubc to call this routine directly.  Will disappear with new RPC
- * component architecture.
- * NOTE: call to memory_object_destroy removes the vm_object's association
- * with its abstract memory object and hence the named flag is set to false.
- */
 kern_return_t
-memory_object_destroy_named(
-       vm_object_t     object,
-       kern_return_t   reason)
+kernel_upl_abort(
+       upl_t                   upl,
+       int                     abort_type)
 {
-       vm_object_lock(object);
-       if(object->named == FALSE) {
-               panic("memory_object_destroy_named called by party which doesn't hold right");
-       }
-       object->ref_count++;
-       vm_object_res_reference(object);
-       vm_object_unlock(object);
-       return (memory_object_destroy(object, reason));
-}
+       kern_return_t   kr;
 
-/* 
- * Temporary interface to overcome old style ipc artifacts, and allow
- * ubc to call this routine directly.  Will disappear with new RPC
- * component architecture.
- * Note: No change is made in the named flag.
- */
-kern_return_t
-memory_object_lock_request_named(
-       vm_object_t                     object,
-       vm_object_offset_t              offset,
-       vm_object_size_t                size,
-       memory_object_return_t          should_return,
-       boolean_t                       should_flush,
-       vm_prot_t                       prot,
-       ipc_port_t                      reply_to)
-{
-       vm_object_lock(object);
-       if(object->named == FALSE) {
-               panic("memory_object_lock_request_named called by party which doesn't hold right");
-       }
-       object->ref_count++;
-       vm_object_res_reference(object);
-       vm_object_unlock(object);
-       return (memory_object_lock_request(object,
-                       offset, size, should_return, should_flush, prot,
-                       reply_to, 0));
+       kr = upl_abort(upl, abort_type);
+       upl_deallocate(upl);
+       return kr;
 }
 
-kern_return_t
-memory_object_change_attributes_named(
-        vm_object_t             object,
-        memory_object_flavor_t  flavor,
-       memory_object_info_t    attributes,
-       mach_msg_type_number_t  count,
-        ipc_port_t              reply_to,
-        mach_msg_type_name_t    reply_to_type)
-{
-       vm_object_lock(object);
-       if(object->named == FALSE) {
-               panic("memory_object_lock_request_named called by party which doesn't hold right");
-       }
-       object->ref_count++;
-       vm_object_res_reference(object);
-       vm_object_unlock(object);
-       return (memory_object_change_attributes(object, 
-                       flavor, attributes, count, reply_to, reply_to_type));
-}
 
 kern_return_t
 vm_get_shared_region(
@@ -2509,6 +2532,8 @@ shared_region_mapping_info(
        vm_offset_t             *client_base,
        vm_offset_t             *alt_base,
        vm_offset_t             *alt_next,
+       unsigned int            *fs_base,
+       unsigned int            *system,
        int                     *flags,
        shared_region_mapping_t *next)
 {
@@ -2523,6 +2548,8 @@ shared_region_mapping_info(
        *alt_base = shared_region->alternate_base;
        *alt_next = shared_region->alternate_next;
        *flags = shared_region->flags;
+       *fs_base = shared_region->fs_base;
+       *system = shared_region->system;
        *next = shared_region->next;
 
        shared_region_mapping_unlock(shared_region);
@@ -2569,6 +2596,8 @@ shared_region_mapping_create(
        shared_region_mapping_lock_init((*shared_region));
        (*shared_region)->text_region = text_region;
        (*shared_region)->text_size = text_size;
+       (*shared_region)->fs_base = ENV_DEFAULT_ROOT;
+       (*shared_region)->system = machine_slot[cpu_number()].cpu_type;
        (*shared_region)->data_region = data_region;
        (*shared_region)->data_size = data_size;
        (*shared_region)->region_mappings = region_mappings;
@@ -2579,6 +2608,7 @@ shared_region_mapping_create(
        (*shared_region)->self = *shared_region;
        (*shared_region)->flags = 0;
        (*shared_region)->depth = 0;
+       (*shared_region)->default_env_list = NULL;
        (*shared_region)->alternate_base = alt_base;
        (*shared_region)->alternate_next = alt_next;
        return KERN_SUCCESS;
@@ -2599,59 +2629,87 @@ shared_region_mapping_ref(
 {
        if(shared_region == NULL)
                return KERN_SUCCESS;
-       shared_region_mapping_lock(shared_region);
-       shared_region->ref_count++;
-       shared_region_mapping_unlock(shared_region);
+       hw_atomic_add(&shared_region->ref_count, 1);
        return KERN_SUCCESS;
 }
 
-kern_return_t
-shared_region_mapping_dealloc(
-       shared_region_mapping_t shared_region)
+__private_extern__ kern_return_t
+shared_region_mapping_dealloc_lock(
+       shared_region_mapping_t shared_region,
+       int need_lock)
 {
        struct shared_region_task_mappings sm_info;
-       shared_region_mapping_t         next;
-
-       if(shared_region == NULL)
-               return KERN_SUCCESS;
-       shared_region_mapping_lock(shared_region);
-
-       if((--shared_region->ref_count) == 0) {
-
-               sm_info.text_region = shared_region->text_region;
-               sm_info.text_size = shared_region->text_size;
-               sm_info.data_region = shared_region->data_region;
-               sm_info.data_size = shared_region->data_size;
-               sm_info.region_mappings = shared_region->region_mappings;
-               sm_info.client_base = shared_region->client_base;
-               sm_info.alternate_base = shared_region->alternate_base;
-               sm_info.alternate_next = shared_region->alternate_next;
-               sm_info.flags = shared_region->flags;
-               sm_info.self = shared_region;
-
-               lsf_remove_regions_mappings(shared_region, &sm_info);
-               pmap_remove(((vm_named_entry_t)
-                       (shared_region->text_region->ip_kobject))
-                                               ->backing.map->pmap, 
-                       sm_info.client_base, 
-                       sm_info.client_base + sm_info.text_size);
-               ipc_port_release_send(shared_region->text_region);
-               ipc_port_release_send(shared_region->data_region);
-               if(shared_region->object_chain) {
-                       shared_region_mapping_dealloc(
-                            shared_region->object_chain->object_chain_region);
-                       kfree((vm_offset_t)shared_region->object_chain,
-                               sizeof (struct shared_region_object_chain));
-               }
-               kfree((vm_offset_t)shared_region,
+       shared_region_mapping_t next = NULL;
+       int ref_count;
+
+       while (shared_region) {
+               if ((ref_count = 
+                         hw_atomic_sub(&shared_region->ref_count, 1)) == 0) {
+                       shared_region_mapping_lock(shared_region);
+
+                       sm_info.text_region = shared_region->text_region;
+                       sm_info.text_size = shared_region->text_size;
+                       sm_info.data_region = shared_region->data_region;
+                       sm_info.data_size = shared_region->data_size;
+                       sm_info.region_mappings = shared_region->region_mappings;
+                       sm_info.client_base = shared_region->client_base;
+                       sm_info.alternate_base = shared_region->alternate_base;
+                       sm_info.alternate_next = shared_region->alternate_next;
+                       sm_info.flags = shared_region->flags;
+                       sm_info.self = (vm_offset_t)shared_region;
+
+                       if(shared_region->region_mappings) {
+                               lsf_remove_regions_mappings_lock(shared_region, &sm_info, need_lock);
+                       }
+                       if(((vm_named_entry_t)
+                               (shared_region->text_region->ip_kobject))
+                                                        ->backing.map->pmap) {
+                           pmap_remove(((vm_named_entry_t)
+                               (shared_region->text_region->ip_kobject))
+                                                       ->backing.map->pmap, 
+                               sm_info.client_base, 
+                               sm_info.client_base + sm_info.text_size);
+                       }
+                       ipc_port_release_send(shared_region->text_region);
+                       if(shared_region->data_region)
+                               ipc_port_release_send(shared_region->data_region);
+                       if (shared_region->object_chain) {
+                               next = shared_region->object_chain->object_chain_region;
+                               kfree((vm_offset_t)shared_region->object_chain,
+                                       sizeof (struct shared_region_object_chain));
+                       } else {
+                               next = NULL;
+                       }
+                       shared_region_mapping_unlock(shared_region);
+                       kfree((vm_offset_t)shared_region,
                                sizeof (struct shared_region_mapping));
-               return KERN_SUCCESS;
+                       shared_region = next;
+               } else {
+                       /* Stale indicates that a system region is no */
+                       /* longer in the default environment list.    */
+                       if((ref_count == 1) && 
+                         (shared_region->flags & SHARED_REGION_SYSTEM)
+                         && (shared_region->flags & ~SHARED_REGION_STALE)) {
+                               remove_default_shared_region_lock(shared_region,need_lock);
+                       }
+                       break;
+               }
        }
-       shared_region_mapping_unlock(shared_region);
        return KERN_SUCCESS;
 }
 
-vm_offset_t
+/*
+ * Stub function; always indicates that the lock needs to be taken in the
+ * call to lsf_remove_regions_mappings_lock().
+ */
+kern_return_t
+shared_region_mapping_dealloc(
+       shared_region_mapping_t shared_region)
+{
+       return shared_region_mapping_dealloc_lock(shared_region, 1);
+}
+
+ppnum_t
 vm_map_get_phys_page(
        vm_map_t        map,
        vm_offset_t     offset)
@@ -2659,7 +2717,7 @@ vm_map_get_phys_page(
        vm_map_entry_t  entry;
        int             ops;
        int             flags;
-       vm_offset_t     phys_addr = 0;
+       ppnum_t         phys_page = 0;
        vm_object_t     object;
 
        vm_map_lock(map);
@@ -2678,6 +2736,26 @@ vm_map_get_phys_page(
                        vm_map_unlock(old_map);
                        continue;
                }
+               if (entry->object.vm_object->phys_contiguous) {
+                       /* These are  not standard pageable memory mappings */
+                       /* If they are not present in the object they will  */
+                       /* have to be picked up from the pager through the  */
+                       /* fault mechanism.  */
+                       if(entry->object.vm_object->shadow_offset == 0) {
+                               /* need to call vm_fault */
+                               vm_map_unlock(map);
+                               vm_fault(map, offset, VM_PROT_NONE, 
+                                       FALSE, THREAD_UNINT, NULL, 0);
+                               vm_map_lock(map);
+                               continue;
+                       }
+                       offset = entry->offset + (offset - entry->vme_start);
+                       phys_page = (ppnum_t)
+                               ((entry->object.vm_object->shadow_offset 
+                                                       + offset) >> 12);
+                       break;
+                       
+               }
                offset = entry->offset + (offset - entry->vme_start);
                object = entry->object.vm_object;
                vm_object_lock(object);
@@ -2696,7 +2774,7 @@ vm_map_get_phys_page(
                                        break;
                                }
                        } else {
-                               phys_addr = dst_page->phys_addr;
+                               phys_page = (ppnum_t)(dst_page->phys_page);
                                vm_object_unlock(object);
                                break;
                        }
@@ -2706,6 +2784,116 @@ vm_map_get_phys_page(
        } 
 
        vm_map_unlock(map);
-       return phys_addr;
+       return phys_page;
 }
+
+
+
+kern_return_t
+kernel_object_iopl_request(
+       vm_named_entry_t        named_entry,
+       memory_object_offset_t  offset,
+       vm_size_t               *upl_size,
+       upl_t                   *upl_ptr,
+       upl_page_info_array_t   user_page_list,
+       unsigned int            *page_list_count,
+       int                     *flags)
+{
+       vm_object_t             object;
+       kern_return_t           ret;
+
+       int                     caller_flags;
+
+       caller_flags = *flags;
+
+       /* a few checks to make sure user is obeying rules */
+       if(*upl_size == 0) {
+               if(offset >= named_entry->size)
+                       return(KERN_INVALID_RIGHT);
+               *upl_size = named_entry->size - offset;
+       }
+       if(caller_flags & UPL_COPYOUT_FROM) {
+               if((named_entry->protection & VM_PROT_READ) 
+                                       != VM_PROT_READ) {
+                       return(KERN_INVALID_RIGHT);
+               }
+       } else {
+               if((named_entry->protection & 
+                       (VM_PROT_READ | VM_PROT_WRITE)) 
+                       != (VM_PROT_READ | VM_PROT_WRITE)) {
+                       return(KERN_INVALID_RIGHT);
+               }
+       }
+       if(named_entry->size < (offset + *upl_size))
+               return(KERN_INVALID_ARGUMENT);
+
+       /* the callers parameter offset is defined to be the */
+       /* offset from beginning of named entry offset in object */
+       offset = offset + named_entry->offset;
+
+       if(named_entry->is_sub_map) 
+               return (KERN_INVALID_ARGUMENT);
+               
+       named_entry_lock(named_entry);
+
+       if(named_entry->object) {
+               /* This is the case where we are going to map */
+               /* an already mapped object.  If the object is */
+               /* not ready it is internal.  An external     */
+               /* object cannot be mapped until it is ready  */
+               /* we can therefore avoid the ready check     */
+               /* in this case.  */
+               vm_object_reference(named_entry->object);
+               object = named_entry->object;
+               named_entry_unlock(named_entry);
+       } else {
+               object = vm_object_enter(named_entry->backing.pager, 
+                               named_entry->offset + named_entry->size, 
+                               named_entry->internal, 
+                               FALSE,
+                               FALSE);
+               if (object == VM_OBJECT_NULL) {
+                       named_entry_unlock(named_entry);
+                       return(KERN_INVALID_OBJECT);
+               }
+               vm_object_lock(object);
+
+               /* create an extra reference for the named entry */
+               vm_object_reference_locked(object);
+               named_entry->object = object;
+               named_entry_unlock(named_entry);
+
+               /* wait for object (if any) to be ready */
+               while (!object->pager_ready) {
+                       vm_object_wait(object,
+                               VM_OBJECT_EVENT_PAGER_READY,
+                               THREAD_UNINT);
+                       vm_object_lock(object);
+               }
+               vm_object_unlock(object);
+       }
+
+       if (!object->private) {
+               if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE))
+                       *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE);
+               if (object->phys_contiguous) {
+                       *flags = UPL_PHYS_CONTIG;
+               } else {
+                       *flags = 0;
+               }
+       } else {
+               *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG;
+       }
+
+       ret = vm_object_iopl_request(object,
+                                    offset,
+                                    *upl_size,
+                                    upl_ptr,
+                                    user_page_list,
+                                    page_list_count,
+                                    caller_flags);
+       vm_object_deallocate(object);
+       return ret;
+}
+
 #endif /* VM_CPM */