]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.c
index 40e3da3fa35c077acdb9ceeea6c0ea88a9bc51fc..aa82856e826fd361e492c2fc8823a62ceea73418 100644 (file)
@@ -1,21 +1,24 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -66,6 +69,7 @@
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
 #include <mach/vm_behavior.h>
+#include <mach/vm_statistics.h>
 #include <kern/assert.h>
 #include <kern/counters.h>
 #include <kern/zalloc.h>
@@ -81,6 +85,7 @@
 #include <mach/vm_map_server.h>
 #include <mach/mach_host_server.h>
 #include <ddb/tr.h>
+#include <machine/db_machdep.h>
 #include <kern/xpr.h>
 
 /* Internal prototypes
@@ -299,6 +304,9 @@ int         kentry_count = 2048;            /* to init kentry_data_size */
  */
 vm_size_t vm_map_aggressive_enter_max;         /* set by bootstrap */
 
+/* Skip acquiring locks if we're in the midst of a kernel core dump */
+extern unsigned int not_in_kdp;
+
 void
 vm_map_init(
        void)
@@ -332,7 +340,7 @@ void
 vm_map_steal_memory(
        void)
 {
-       map_data_size = round_page(10 * sizeof(struct vm_map));
+       map_data_size = round_page_32(10 * sizeof(struct vm_map));
        map_data = pmap_steal_memory(map_data_size);
 
 #if 0
@@ -348,7 +356,7 @@ vm_map_steal_memory(
 
 
        kentry_data_size =
-               round_page(kentry_count * sizeof(struct vm_map_entry));
+               round_page_32(kentry_count * sizeof(struct vm_map_entry));
        kentry_data = pmap_steal_memory(kentry_data_size);
 }
 
@@ -388,6 +396,7 @@ vm_map_create(
        result->max_offset = max;
        result->wiring_required = FALSE;
        result->no_zero_fill = FALSE;
+       result->mapped = FALSE;
        result->wait_for_space = FALSE;
        result->first_free = vm_map_to_entry(result);
        result->hint = vm_map_to_entry(result);
@@ -473,8 +482,8 @@ first_free_is_valid(
                
        entry = vm_map_to_entry(map);
        next = entry->vme_next;
-       while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) ||
-              (trunc_page(next->vme_start) == trunc_page(entry->vme_start) &&
+       while (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_end) ||
+              (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_start) &&
                next != vm_map_to_entry(map))) {
                entry = next;
                next = entry->vme_next;
@@ -504,10 +513,10 @@ MACRO_BEGIN                                                               \
        UFF_map = (map);                                                \
        UFF_first_free = (new_first_free);                              \
        UFF_next_entry = UFF_first_free->vme_next;                      \
-       while (trunc_page(UFF_next_entry->vme_start) ==                 \
-              trunc_page(UFF_first_free->vme_end) ||                   \
-              (trunc_page(UFF_next_entry->vme_start) ==                \
-               trunc_page(UFF_first_free->vme_start) &&                \
+       while (trunc_page_32(UFF_next_entry->vme_start) ==              \
+              trunc_page_32(UFF_first_free->vme_end) ||                        \
+              (trunc_page_32(UFF_next_entry->vme_start) ==             \
+               trunc_page_32(UFF_first_free->vme_start) &&             \
                UFF_next_entry != vm_map_to_entry(UFF_map))) {          \
                UFF_first_free = UFF_next_entry;                        \
                UFF_next_entry = UFF_first_free->vme_next;              \
@@ -570,53 +579,7 @@ MACRO_END
        (entry)->vme_prev->vme_next = (entry)->vme_next;                \
        MACRO_END
 
-/*
- *     kernel_vm_map_reference:
- *
- *     kernel internal export version for iokit and bsd components
- *     in lieu of component interface semantics.
- *
- */
-void
-kernel_vm_map_reference(
-       register vm_map_t       map)
-{
-       if (map == VM_MAP_NULL)
-               return;
-
-       mutex_lock(&map->s_lock);
-#if    TASK_SWAPPER
-       assert(map->res_count > 0);
-       assert(map->ref_count >= map->res_count);
-       map->res_count++;
-#endif
-       map->ref_count++;
-       mutex_unlock(&map->s_lock);
-}
-
 #if    MACH_ASSERT && TASK_SWAPPER
-/*
- *     vm_map_reference:
- *
- *     Adds valid reference and residence counts to the given map.
- *     The map must be in memory (i.e. non-zero residence count).
- *
- */
-void
-vm_map_reference(
-       register vm_map_t       map)
-{
-       if (map == VM_MAP_NULL)
-               return;
-
-       mutex_lock(&map->s_lock);
-       assert(map->res_count > 0);
-       assert(map->ref_count >= map->res_count);
-       map->ref_count++;
-       map->res_count++;
-       mutex_unlock(&map->s_lock);
-}
-
 /*
  *     vm_map_res_reference:
  *
@@ -685,44 +648,6 @@ void vm_map_res_deallocate(register vm_map_t map)
 }
 #endif /* MACH_ASSERT && TASK_SWAPPER */
 
-/*
- *     vm_map_deallocate:
- *
- *     Removes a reference from the specified map,
- *     destroying it if no references remain.
- *     The map should not be locked.
- */
-void
-vm_map_deallocate(
-       register vm_map_t       map)
-{
-       unsigned int            ref;
-
-       if (map == VM_MAP_NULL)
-               return;
-
-       mutex_lock(&map->s_lock);
-       ref = --map->ref_count;
-       if (ref > 0) {
-               vm_map_res_deallocate(map);
-               mutex_unlock(&map->s_lock);
-               return;
-       }
-       assert(map->ref_count == 0);
-       mutex_unlock(&map->s_lock);
-
-#if    TASK_SWAPPER
-       /*
-        * The map residence count isn't decremented here because
-        * the vm_map_delete below will traverse the entire map, 
-        * deleting entries, and the residence counts on objects
-        * and sharing maps will go away then.
-        */
-#endif
-
-       vm_map_destroy(map);
-}
-
 /*
  *     vm_map_destroy:
  *
@@ -737,7 +662,8 @@ vm_map_destroy(
                             map->max_offset, VM_MAP_NO_FLAGS);
        vm_map_unlock(map);
 
-       pmap_destroy(map->pmap);
+       if(map->pmap)
+               pmap_destroy(map->pmap);
 
        zfree(vm_map_zone, (vm_offset_t) map);
 }
@@ -933,9 +859,11 @@ void vm_map_swapout(vm_map_t map)
  *     future lookups.  Performs necessary interlocks.
  */
 #define        SAVE_HINT(map,value) \
+MACRO_BEGIN \
                mutex_lock(&(map)->s_lock); \
                (map)->hint = (value); \
-               mutex_unlock(&(map)->s_lock);
+               mutex_unlock(&(map)->s_lock); \
+MACRO_END
 
 /*
  *     vm_map_lookup_entry:    [ internal use only ]
@@ -960,10 +888,11 @@ vm_map_lookup_entry(
         *      Start looking either from the head of the
         *      list, or from the hint.
         */
-
-       mutex_lock(&map->s_lock);
+       if (not_in_kdp)
+         mutex_lock(&map->s_lock);
        cur = map->hint;
-       mutex_unlock(&map->s_lock);
+       if (not_in_kdp)
+         mutex_unlock(&map->s_lock);
 
        if (cur == vm_map_to_entry(map))
                cur = cur->vme_next;
@@ -1007,7 +936,8 @@ vm_map_lookup_entry(
                                 */
 
                                *entry = cur;
-                               SAVE_HINT(map, cur);
+                               if (not_in_kdp)
+                                 SAVE_HINT(map, cur);
                                return(TRUE);
                        }
                        break;
@@ -1015,7 +945,8 @@ vm_map_lookup_entry(
                cur = cur->vme_next;
        }
        *entry = cur->vme_prev;
-       SAVE_HINT(map, *entry);
+       if (not_in_kdp)
+         SAVE_HINT(map, *entry);
        return(FALSE);
 }
 
@@ -1190,9 +1121,11 @@ vm_map_pmap_enter(
        vm_object_offset_t      offset,
        vm_prot_t               protection)
 {
+       unsigned int            cache_attr;
+
+       if(map->pmap == 0)
+               return;
 
-       vm_machine_attribute_val_t mv_cache_sync = MATTR_VAL_CACHE_SYNC;
-       
        while (addr < end_addr) {
                register vm_page_t      m;
 
@@ -1216,24 +1149,21 @@ vm_map_pmap_enter(
                        printf("map: %x, addr: %x, object: %x, offset: %x\n",
                                map, addr, object, offset);
                }
-
                m->busy = TRUE;
+
+               if (m->no_isync == TRUE) {
+                       pmap_sync_caches_phys(m->phys_page);
+                       m->no_isync = FALSE;
+               }
+
+               cache_attr = ((unsigned int)object->wimg_bits) & VM_WIMG_MASK;
                vm_object_unlock(object);
 
-               PMAP_ENTER(map->pmap, addr, m,
-                          protection, FALSE);
+               PMAP_ENTER(map->pmap, addr, m, 
+                               protection, cache_attr, FALSE);
 
-               if (m->no_isync) {
-                       pmap_attribute(map->pmap,
-                              addr,
-                              PAGE_SIZE,
-                              MATTR_CACHE,
-                              &mv_cache_sync);
-               }
                vm_object_lock(object);
 
-               m->no_isync = FALSE;
-
                PAGE_WAKEUP_DONE(m);
                vm_page_lock_queues();
                if (!m->active && !m->inactive)
@@ -1448,7 +1378,7 @@ vm_map_enter(
            (entry->max_protection == max_protection) &&
            (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
            (entry->in_transition == 0) &&
-           ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) &&
+           ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) &&
            (entry->wired_count == 0)) { /* implies user_wired_count == 0 */
                if (vm_object_coalesce(entry->object.vm_object,
                                VM_OBJECT_NULL,
@@ -1530,13 +1460,16 @@ MACRO_BEGIN                                                             \
        if (VMCS_startaddr > VMCS_entry->vme_start) {                   \
                if(entry->use_pmap) {                                   \
                        vm_offset_t     pmap_base_addr;                 \
-                       vm_offset_t     pmap_end_addr;                  \
                                                                        \
                        pmap_base_addr = 0xF0000000 & entry->vme_start; \
-                       pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
-                       pmap_unnest(map->pmap, pmap_base_addr,          \
-                               (pmap_end_addr - pmap_base_addr) + 1);  \
+                       pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
                        entry->use_pmap = FALSE;                        \
+               } else if(entry->object.vm_object                       \
+                       && !entry->is_sub_map                           \
+                       && entry->object.vm_object->phys_contiguous) {  \
+                       pmap_remove(map->pmap,                          \
+                               (addr64_t)(entry->vme_start),           \
+                               (addr64_t)(entry->vme_end));            \
                }                                                       \
                _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
        }                                                               \
@@ -1619,13 +1552,16 @@ MACRO_BEGIN                                                             \
        if (VMCE_endaddr < VMCE_entry->vme_end) {                       \
                if(entry->use_pmap) {                                   \
                        vm_offset_t     pmap_base_addr;                 \
-                       vm_offset_t     pmap_end_addr;                  \
                                                                        \
                        pmap_base_addr = 0xF0000000 & entry->vme_start; \
-                       pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; \
-                       pmap_unnest(map->pmap, pmap_base_addr,          \
-                               (pmap_end_addr - pmap_base_addr) + 1);  \
+                       pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
                        entry->use_pmap = FALSE;                        \
+               } else if(entry->object.vm_object                       \
+                       && !entry->is_sub_map                           \
+                       && entry->object.vm_object->phys_contiguous) {  \
+                       pmap_remove(map->pmap,                          \
+                               (addr64_t)(entry->vme_start),           \
+                               (addr64_t)(entry->vme_end));            \
                }                                                       \
                _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
        }                                                               \
@@ -1795,6 +1731,8 @@ vm_map_submap(
 
        vm_map_lock(map);
 
+       submap->mapped = TRUE;
+
        VM_MAP_RANGE_CHECK(map, start, end);
 
        if (vm_map_lookup_entry(map, start, &entry)) {
@@ -1817,25 +1755,32 @@ vm_map_submap(
            (object->copy == VM_OBJECT_NULL) &&
            (object->shadow == VM_OBJECT_NULL) &&
            (!object->pager_created)) {
-               entry->offset = (vm_object_offset_t)offset;
-               entry->object.vm_object = VM_OBJECT_NULL;
-               vm_object_deallocate(object);
-               entry->is_sub_map = TRUE;
-               vm_map_reference(entry->object.sub_map = submap);
+                       entry->offset = (vm_object_offset_t)offset;
+                       entry->object.vm_object = VM_OBJECT_NULL;
+                       vm_object_deallocate(object);
+                       entry->is_sub_map = TRUE;
+                       entry->object.sub_map = submap;
+                       vm_map_reference(submap);
 #ifndef i386
-               if ((use_pmap) && (offset == 0)) {
-                       /* nest if platform code will allow */
-                       result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, 
-                                                       start, end - start);
-                       if(result)
-                               panic("pmap_nest failed!");
-                       entry->use_pmap = TRUE;
-               }
+                       if ((use_pmap) && (offset == 0)) {
+                               /* nest if platform code will allow */
+                               if(submap->pmap == NULL) {
+                                       submap->pmap = pmap_create((vm_size_t) 0);
+                                       if(submap->pmap == PMAP_NULL) {
+                                               return(KERN_NO_SPACE);
+                                       }
+                               }
+                               result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, 
+                                                               (addr64_t)start, (addr64_t)start, (uint64_t)(end - start));
+                               if(result)
+                                       panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result);
+                               entry->use_pmap = TRUE;
+                       }
 #endif
 #ifdef i386
-               pmap_remove(map->pmap, start, end);
+                       pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end);
 #endif
-               result = KERN_SUCCESS;
+                       result = KERN_SUCCESS;
        }
        vm_map_unlock(map);
 
@@ -1987,8 +1932,7 @@ vm_map_protect(
                                local_entry->use_pmap = FALSE;
                                local_entry = local_entry->vme_next;
                           }
-                          pmap_unnest(map->pmap, pmap_base_addr,
-                                       (pmap_end_addr - pmap_base_addr) + 1);
+                          pmap_unnest(map->pmap, (addr64_t)pmap_base_addr);
 #endif
                   }
                   if (!(current->protection & VM_PROT_WRITE)) {
@@ -2090,7 +2034,8 @@ vm_map_wire_nested(
        register vm_offset_t    end,
        register vm_prot_t      access_type,
        boolean_t               user_wire,
-       pmap_t                  map_pmap)
+       pmap_t                  map_pmap, 
+       vm_offset_t             pmap_addr)
 {
        register vm_map_entry_t entry;
        struct vm_map_entry     *first_entry, tmp_entry;
@@ -2099,7 +2044,7 @@ vm_map_wire_nested(
        kern_return_t           rc;
        boolean_t               need_wakeup;
        boolean_t               main_map = FALSE;
-       boolean_t               interruptible_state;
+       wait_interrupt_t        interruptible_state;
        thread_t                cur_thread;
        unsigned int            last_timestamp;
        vm_size_t               size;
@@ -2136,6 +2081,8 @@ vm_map_wire_nested(
                 * block after informing other thread to wake us up.
                 */
                if (entry->in_transition) {
+                       wait_result_t wait_result;
+
                        /*
                         * We have not clipped the entry.  Make sure that
                         * the start address is in range so that the lookup
@@ -2156,22 +2103,21 @@ vm_map_wire_nested(
                        /*
                         * User wiring is interruptible
                         */
-                       vm_map_entry_wait(map, 
+                       wait_result = vm_map_entry_wait(map, 
                                          (user_wire) ? THREAD_ABORTSAFE :
                                                        THREAD_UNINT);
-                       if (user_wire && cur_thread->wait_result ==
-                                                       THREAD_INTERRUPTED) {
+                       if (user_wire && wait_result == THREAD_INTERRUPTED) {
                                /*
                                 * undo the wirings we have done so far
                                 * We do not clear the needs_wakeup flag,
                                 * because we cannot tell if we were the
                                 * only one waiting.
                                 */
+                               vm_map_unlock(map);
                                vm_map_unwire(map, start, s, user_wire);
                                return(KERN_FAILURE);
                        }
 
-                       vm_map_lock(map);
                        /*
                         * Cannot avoid a lookup here. reset timestamp.
                         */
@@ -2201,13 +2147,14 @@ vm_map_wire_nested(
                if(entry->is_sub_map) {
                        vm_offset_t     sub_start;
                        vm_offset_t     sub_end;
+                       vm_offset_t     local_start;
                        vm_offset_t     local_end;
                        pmap_t          pmap;
                        
                        vm_map_clip_start(map, entry, start);
                        vm_map_clip_end(map, entry, end);
 
-                       sub_start += entry->offset;
+                       sub_start = entry->offset;
                        sub_end = entry->vme_end - entry->vme_start;
                        sub_end += entry->offset;
                        
@@ -2215,8 +2162,16 @@ vm_map_wire_nested(
                        if(map_pmap == NULL) {
                                if(entry->use_pmap) {
                                        pmap = entry->object.sub_map->pmap;
+                                       /* ppc implementation requires that */
+                                       /* submaps pmap address ranges line */
+                                       /* up with parent map */
+#ifdef notdef
+                                       pmap_addr = sub_start;
+#endif
+                                       pmap_addr = start;
                                } else {
                                        pmap = map->pmap;
+                                       pmap_addr = start;
                                }
                                if (entry->wired_count) {
                                        if (entry->wired_count 
@@ -2231,8 +2186,10 @@ vm_map_wire_nested(
                                                entry->vme_start, user_wire);
                                           return(KERN_FAILURE);
                                        }
-                                       if (!user_wire || 
-                                             (entry->user_wired_count++ == 0))
+                                       if(user_wire)
+                                               entry->user_wired_count++;
+                                       if((!user_wire) ||
+                                            (entry->user_wired_count == 0))
                                                entry->wired_count++;
                                        entry = entry->vme_next;
                                        continue;
@@ -2245,7 +2202,6 @@ vm_map_wire_nested(
                                        vm_prot_t               prot;
                                        boolean_t               wired;
                                        vm_behavior_t           behavior;
-                                       vm_offset_t             local_start;
                                        vm_map_entry_t          local_entry;
                                        vm_map_version_t         version;
                                        vm_map_t                lookup_map;
@@ -2258,7 +2214,7 @@ vm_map_wire_nested(
                                        vm_map_lock_write_to_read(map);
                                        if(vm_map_lookup_locked(
                                                &lookup_map, local_start, 
-                                               VM_PROT_WRITE,
+                                               access_type,
                                                &version, &object,
                                                &offset, &prot, &wired,
                                                &behavior, &offset_lo,
@@ -2271,17 +2227,11 @@ vm_map_wire_nested(
                                        }
                                        if(pmap_map != lookup_map)
                                                vm_map_unlock(pmap_map);
-                                       if(lookup_map != map) {
-                                               vm_map_unlock(lookup_map);
-                                               vm_map_lock(map);
-                                       } else {
-                                               vm_map_unlock(map);
-                                               vm_map_lock(map);
-                                       }
-                                       last_timestamp = 
-                                               version.main_timestamp;
+                                       vm_map_unlock_read(lookup_map);
+                                       vm_map_lock(map);
                                        vm_object_unlock(object);
-                                       if (vm_map_lookup_entry(map, 
+
+                                       if (!vm_map_lookup_entry(map, 
                                                local_start, &local_entry)) {
                                                vm_map_unlock(map);
                                                vm_map_unwire(map, start,
@@ -2289,11 +2239,15 @@ vm_map_wire_nested(
                                                return(KERN_FAILURE);
                                        }
                                        /* did we have a change of type? */
-                                       if (!local_entry->is_sub_map)
+                                       if (!local_entry->is_sub_map) {
+                                               last_timestamp = map->timestamp;
                                                continue;
+                                       }
                                        entry = local_entry;
                                        if (user_wire)
                                                entry->user_wired_count++;
+                                       if((!user_wire) || 
+                                               (entry->user_wired_count == 1))
                                        entry->wired_count++;
 
                                        entry->in_transition = TRUE;
@@ -2303,32 +2257,34 @@ vm_map_wire_nested(
                                                entry->object.sub_map, 
                                                sub_start, sub_end,
                                                access_type, 
-                                               user_wire, pmap);
+                                               user_wire, pmap, pmap_addr);
                                        vm_map_lock(map);
-                                       last_timestamp = map->timestamp;
                                }
                        } else {
+                               local_start = entry->vme_start;
+                               if (user_wire)
+                                       entry->user_wired_count++;
+                               if((!user_wire) || 
+                                       (entry->user_wired_count == 1))
+                                       entry->wired_count++;
                                vm_map_unlock(map);
                                rc = vm_map_wire_nested(entry->object.sub_map, 
                                                sub_start, sub_end,
                                                access_type, 
-                                               user_wire, pmap);
+                                               user_wire, map_pmap, pmap_addr);
                                vm_map_lock(map);
-                               last_timestamp = map->timestamp;
                        }
                        s = entry->vme_start;
                        e = entry->vme_end;
-                       if (last_timestamp+1 != map->timestamp) {
+
                        /*
                         * Find the entry again.  It could have been clipped
                         * after we unlocked the map.
                         */
-                               if (!vm_map_lookup_entry(map, local_end,
-                                                        &first_entry))
-                                       panic("vm_map_wire: re-lookup failed");
-
-                               entry = first_entry;
-                       }
+                       if (!vm_map_lookup_entry(map, local_start,
+                                                &first_entry))
+                               panic("vm_map_wire: re-lookup failed");
+                       entry = first_entry;
 
                        last_timestamp = map->timestamp;
                        while ((entry != vm_map_to_entry(map)) &&
@@ -2340,11 +2296,11 @@ vm_map_wire_nested(
                                        need_wakeup = TRUE;
                                }
                                if (rc != KERN_SUCCESS) {/* from vm_*_wire */
-                                  if(main_map) {
                                        if (user_wire)
                                                entry->user_wired_count--;
-                                       entry->wired_count--;
-                                  }
+                                       if ((!user_wire) || 
+                                               (entry->user_wired_count == 0))
+                                               entry->wired_count--;
                                }
                                entry = entry->vme_next;
                        }
@@ -2365,7 +2321,7 @@ vm_map_wire_nested(
                 * If this entry is already wired then increment
                 * the appropriate wire reference count.
                 */
-               if (entry->wired_count && main_map) {
+               if (entry->wired_count) {
                        /* sanity check: wired_count is a short */
                        if (entry->wired_count >= MAX_WIRE_COUNT)
                                panic("vm_map_wire: too many wirings");
@@ -2383,7 +2339,9 @@ vm_map_wire_nested(
                         */
                        vm_map_clip_start(map, entry, start);
                        vm_map_clip_end(map, entry, end);
-                       if (!user_wire || (entry->user_wired_count++ == 0))
+                       if (user_wire)
+                               entry->user_wired_count++;
+                       if ((!user_wire) || (entry->user_wired_count == 1))
                                entry->wired_count++;
 
                        entry = entry->vme_next;
@@ -2449,11 +2407,10 @@ vm_map_wire_nested(
 
                assert(entry->wired_count == 0 && entry->user_wired_count == 0);
 
-               if(main_map) {
-                       if (user_wire)
-                               entry->user_wired_count++;
+               if (user_wire)
+                       entry->user_wired_count++;
+               if ((!user_wire) || (entry->user_wired_count == 1))
                        entry->wired_count++;
-               }
 
                entry->in_transition = TRUE;
 
@@ -2475,18 +2432,19 @@ vm_map_wire_nested(
                 */
                vm_map_unlock(map);
 
-               if (!user_wire && cur_thread != THREAD_NULL) {
-                       interruptible_state = cur_thread->interruptible;
-                       cur_thread->interruptible = FALSE;
-               }
-                 
+               if (!user_wire && cur_thread != THREAD_NULL)
+                       interruptible_state = thread_interrupt_level(THREAD_UNINT);
+
                if(map_pmap)
-                       rc = vm_fault_wire(map, &tmp_entry, map_pmap);
+                       rc = vm_fault_wire(map, 
+                                       &tmp_entry, map_pmap, pmap_addr);
                else
-                       rc = vm_fault_wire(map, &tmp_entry, map->pmap);
+                       rc = vm_fault_wire(map, 
+                                       &tmp_entry, map->pmap, 
+                                       tmp_entry.vme_start);
 
                if (!user_wire && cur_thread != THREAD_NULL)
-                       cur_thread->interruptible = interruptible_state;
+                       thread_interrupt_level(interruptible_state);
 
                vm_map_lock(map);
 
@@ -2513,11 +2471,11 @@ vm_map_wire_nested(
                                need_wakeup = TRUE;
                        }
                        if (rc != KERN_SUCCESS) {       /* from vm_*_wire */
-                               if(main_map) {
-                                       if (user_wire)
-                                               entry->user_wired_count--;
+                               if (user_wire)
+                                       entry->user_wired_count--;
+                               if ((!user_wire) || 
+                                               (entry->user_wired_count == 0))
                                        entry->wired_count--;
-                               }
                        }
                        entry = entry->vme_next;
                }
@@ -2570,7 +2528,7 @@ vm_map_wire(
         mapping_prealloc(end - start);
 #endif
        kret = vm_map_wire_nested(map, start, end, access_type, 
-                                               user_wire, (pmap_t)NULL);
+                                               user_wire, (pmap_t)NULL, 0);
 #ifdef ppc
        mapping_relpre();
 #endif
@@ -2596,7 +2554,8 @@ vm_map_unwire_nested(
        register vm_offset_t    start,
        register vm_offset_t    end,
        boolean_t               user_wire,
-       pmap_t                  map_pmap)
+       pmap_t                  map_pmap,
+       vm_offset_t             pmap_addr)
 {
        register vm_map_entry_t entry;
        struct vm_map_entry     *first_entry, tmp_entry;
@@ -2663,8 +2622,10 @@ vm_map_unwire_nested(
                        if(map_pmap == NULL) {
                           if(entry->use_pmap) {
                                        pmap = entry->object.sub_map->pmap;
+                                       pmap_addr = sub_start;
                           } else {
                                        pmap = map->pmap;
+                                       pmap_addr = start;
                           }
                           if (entry->wired_count == 0 ||
                               (user_wire && entry->user_wired_count == 0)) {
@@ -2708,7 +2669,7 @@ vm_map_unwire_nested(
                            */
                           vm_map_unlock(map);
                           vm_map_unwire_nested(entry->object.sub_map, 
-                                       sub_start, sub_end, user_wire, pmap);
+                               sub_start, sub_end, user_wire, pmap, pmap_addr);
                           vm_map_lock(map);
 
                           if (last_timestamp+1 != map->timestamp) {
@@ -2745,8 +2706,9 @@ vm_map_unwire_nested(
                           continue;
                        } else {
                           vm_map_unlock(map);
-                          vm_map_unwire_nested(entry->object.sub_map, 
-                                       sub_start, sub_end, user_wire, pmap);
+                          vm_map_unwire_nested(entry->object.sub_map,
+                               sub_start, sub_end, user_wire, map_pmap,
+                               pmap_addr);
                           vm_map_lock(map);
 
                           if (last_timestamp+1 != map->timestamp) {
@@ -2768,8 +2730,8 @@ vm_map_unwire_nested(
                }
 
 
-               if (main_map && (entry->wired_count == 0 ||
-                  (user_wire && entry->user_wired_count == 0))) {
+               if ((entry->wired_count == 0) ||
+                  (user_wire && entry->user_wired_count == 0)) {
                        if (!user_wire)
                                panic("vm_map_unwire: entry is unwired");
 
@@ -2798,14 +2760,12 @@ vm_map_unwire_nested(
                        continue;
                }
 
-               if(main_map) {
-                  if (!user_wire || (--entry->user_wired_count == 0))
+               if (!user_wire || (--entry->user_wired_count == 0))
                        entry->wired_count--;
 
-                  if (entry->wired_count != 0) {
+               if (entry->wired_count != 0) {
                        entry = entry->vme_next;
                        continue;
-                  }
                }
 
                entry->in_transition = TRUE;
@@ -2817,9 +2777,12 @@ vm_map_unwire_nested(
                 */
                vm_map_unlock(map);
                if(map_pmap) {
-                       vm_fault_unwire(map, &tmp_entry, FALSE, map_pmap);
+                       vm_fault_unwire(map, 
+                               &tmp_entry, FALSE, map_pmap, pmap_addr);
                } else {
-                       vm_fault_unwire(map, &tmp_entry, FALSE, map->pmap);
+                       vm_fault_unwire(map, 
+                               &tmp_entry, FALSE, map->pmap, 
+                               tmp_entry.vme_start);
                }
                vm_map_lock(map);
 
@@ -2871,7 +2834,8 @@ vm_map_unwire(
        register vm_offset_t    end,
        boolean_t               user_wire)
 {
-       return vm_map_unwire_nested(map, start, end, user_wire, (pmap_t)NULL);
+       return vm_map_unwire_nested(map, start, end, 
+                                       user_wire, (pmap_t)NULL, 0);
 }
 
 
@@ -2957,7 +2921,21 @@ vm_map_submap_pmap_clean(
                                entry->object.sub_map,
                                entry->offset);
                } else {
-                       pmap_remove(map->pmap, start, start + remove_size);
+
+                       if((map->mapped) && (map->ref_count)
+                               && (entry->object.vm_object != NULL)) {
+                               vm_object_pmap_protect(
+                                       entry->object.vm_object,
+                                       entry->offset,
+                                       remove_size,
+                                       PMAP_NULL,
+                                       entry->vme_start,
+                                       VM_PROT_NONE);
+                       } else {
+                               pmap_remove(map->pmap, 
+                                       (addr64_t)start, 
+                                       (addr64_t)(start + remove_size));
+                       }
                }
        }
 
@@ -2977,9 +2955,22 @@ vm_map_submap_pmap_clean(
                                entry->object.sub_map,
                                entry->offset);
                } else {
-                       pmap_remove(map->pmap, 
-                               (start + entry->vme_start) - offset,
-                               ((start + entry->vme_start) - offset) + remove_size);
+                       if((map->mapped) && (map->ref_count)
+                               && (entry->object.vm_object != NULL)) {
+                               vm_object_pmap_protect(
+                                       entry->object.vm_object,
+                                       entry->offset,
+                                       remove_size,
+                                       PMAP_NULL,
+                                       entry->vme_start,
+                                       VM_PROT_NONE);
+                       } else {
+                               pmap_remove(map->pmap, 
+                                  (addr64_t)((start + entry->vme_start) 
+                                                               - offset),
+                                  (addr64_t)(((start + entry->vme_start) 
+                                       - offset) + remove_size));
+                       }
                }
                entry = entry->vme_next;
        } 
@@ -3053,6 +3044,8 @@ vm_map_delete(
 
                vm_map_clip_end(map, entry, end);
                if (entry->in_transition) {
+                       wait_result_t wait_result;
+
                        /*
                         * Another thread is wiring/unwiring this entry.
                         * Let the other thread know we are waiting.
@@ -3069,21 +3062,17 @@ vm_map_delete(
                                need_wakeup = FALSE;
                        }
 
-                       vm_map_entry_wait(map, interruptible);
+                       wait_result = vm_map_entry_wait(map, interruptible);
 
                        if (interruptible &&
-                          current_thread()->wait_result == THREAD_INTERRUPTED)
+                           wait_result == THREAD_INTERRUPTED) {
                                /*
                                 * We do not clear the needs_wakeup flag,
                                 * since we cannot tell if we were the only one.
                                 */
+                               vm_map_unlock(map);
                                return KERN_ABORTED;
-
-                       vm_map_lock(map);
-                       /*
-                        * Cannot avoid a lookup here. reset timestamp.
-                        */
-                       last_timestamp = map->timestamp;
+                       }
 
                        /*
                         * The entry could have been clipped or it
@@ -3100,6 +3089,7 @@ vm_map_delete(
                                entry = first_entry;
                                SAVE_HINT(map, entry->vme_prev);
                        }
+                       last_timestamp = map->timestamp;
                        continue;
                } /* end in_transition */
 
@@ -3126,28 +3116,24 @@ vm_map_delete(
                                 * error.
                                 */
                                if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) {
+                                       wait_result_t wait_result;
 
                                        s = entry->vme_start;
                                        entry->needs_wakeup = TRUE;
-                                       vm_map_entry_wait(map, interruptible);
+                                       wait_result = vm_map_entry_wait(map,
+                                                               interruptible);
 
                                        if (interruptible &&
-                                           current_thread()->wait_result == 
-                                                       THREAD_INTERRUPTED)
+                                           wait_result == THREAD_INTERRUPTED) {
                                                /*
                                                 * We do not clear the 
                                                 * needs_wakeup flag, since we 
                                                 * cannot tell if we were the 
                                                 * only one.
                                                 */
+                                               vm_map_unlock(map);
                                                return KERN_ABORTED;
-
-                                       vm_map_lock(map);
-                                       /*
-                                        * Cannot avoid a lookup here. reset 
-                                        * timestamp.
-                                        */
-                                       last_timestamp = map->timestamp;
+                                       }
 
                                        /*
                                         * The entry could have been clipped or
@@ -3166,6 +3152,7 @@ vm_map_delete(
                                                entry = first_entry;
                                                SAVE_HINT(map, entry->vme_prev);
                                        }
+                                       last_timestamp = map->timestamp;
                                        continue;
                                }
                                else {
@@ -3188,7 +3175,7 @@ vm_map_delete(
                        vm_map_unlock(map);
                        vm_fault_unwire(map, &tmp_entry,
                                tmp_entry.object.vm_object == kernel_object,
-                               map->pmap);
+                               map->pmap, tmp_entry.vme_start);
                        vm_map_lock(map);
 
                        if (last_timestamp+1 != map->timestamp) {
@@ -3239,9 +3226,16 @@ vm_map_delete(
                        if(entry->is_sub_map) {
                           if(entry->use_pmap) {
 #ifndef i386
-                               pmap_unnest(map->pmap, entry->vme_start,
-                                       entry->vme_end - entry->vme_start);
+                               pmap_unnest(map->pmap, (addr64_t)entry->vme_start);
 #endif
+                               if((map->mapped) && (map->ref_count)) {
+                                       /* clean up parent map/maps */
+                                       vm_map_submap_pmap_clean(
+                                               map, entry->vme_start,
+                                               entry->vme_end,
+                                               entry->object.sub_map,
+                                               entry->offset);
+                               }
                           } else {
                                vm_map_submap_pmap_clean(
                                        map, entry->vme_start, entry->vme_end,
@@ -3249,8 +3243,46 @@ vm_map_delete(
                                        entry->offset);
                           }
                        } else {
-                               pmap_remove(map->pmap, 
-                                       entry->vme_start, entry->vme_end);
+                          object = entry->object.vm_object;
+                          if((map->mapped) && (map->ref_count)) {
+                             vm_object_pmap_protect(
+                                       object, entry->offset,
+                                       entry->vme_end - entry->vme_start,
+                                       PMAP_NULL,
+                                       entry->vme_start,
+                                       VM_PROT_NONE);
+                          } else if(object != NULL) {
+                             if ((object->shadow != NULL) || 
+                               (object->phys_contiguous) ||
+                               (object->resident_page_count > 
+                               atop((entry->vme_end - entry->vme_start)/4))) {
+                                       pmap_remove(map->pmap, 
+                                               (addr64_t)(entry->vme_start), 
+                                               (addr64_t)(entry->vme_end));
+                             } else {
+                               vm_page_t p;
+                               vm_object_offset_t start_off;
+                               vm_object_offset_t end_off;
+                               start_off = entry->offset;
+                               end_off = start_off + 
+                                          (entry->vme_end - entry->vme_start);
+                               vm_object_lock(object);
+                               queue_iterate(&object->memq,
+                                                p, vm_page_t, listq) {
+                                  if ((!p->fictitious) && 
+                                       (p->offset >= start_off) &&
+                                       (p->offset < end_off)) {
+                                       vm_offset_t start;
+                                       start = entry->vme_start;
+                                       start += p->offset - start_off;
+                                       pmap_remove(
+                                               map->pmap, start, 
+                                               start + PAGE_SIZE);
+                                  }
+                               }
+                               vm_object_unlock(object);
+                            }
+                         }
                        }
                }
 
@@ -3316,12 +3348,25 @@ vm_map_remove(
        register boolean_t      flags)
 {
        register kern_return_t  result;
+       boolean_t       funnel_set = FALSE;
+       funnel_t        *curflock;
+       thread_t        cur_thread;
 
+       cur_thread = current_thread();
+
+       if ((cur_thread->funnel_state & TH_FN_OWNED) == TH_FN_OWNED) {
+               funnel_set = TRUE;
+               curflock = cur_thread->funnel_lock;
+               thread_funnel_set( curflock , FALSE);
+       }
        vm_map_lock(map);
        VM_MAP_RANGE_CHECK(map, start, end);
        result = vm_map_delete(map, start, end, flags);
        vm_map_unlock(map);
-
+       if (funnel_set) {
+               thread_funnel_set( curflock, TRUE);
+               funnel_set = FALSE;
+       }
        return(result);
 }
 
@@ -3450,16 +3495,16 @@ vm_map_overwrite_submap_recurse(
         *      splitting entries in strange ways.
         */
 
-       dst_end = round_page(dst_addr + dst_size);
+       dst_end = round_page_32(dst_addr + dst_size);
+       vm_map_lock(dst_map);
 
 start_pass_1:
-       vm_map_lock(dst_map);
        if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
                vm_map_unlock(dst_map);
                return(KERN_INVALID_ADDRESS);
        }
 
-       vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+       vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
 
        for (entry = tmp_entry;;) {
                vm_map_entry_t  next;
@@ -3680,18 +3725,19 @@ vm_map_copy_overwrite_nested(
                !page_aligned (dst_addr))
        {
                aligned = FALSE;
-               dst_end = round_page(dst_addr + copy->size);
+               dst_end = round_page_32(dst_addr + copy->size);
        } else {
                dst_end = dst_addr + copy->size;
        }
 
-start_pass_1:
        vm_map_lock(dst_map);
+
+start_pass_1:
        if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
                vm_map_unlock(dst_map);
                return(KERN_INVALID_ADDRESS);
        }
-       vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+       vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
        for (entry = tmp_entry;;) {
                vm_map_entry_t  next = entry->vme_next;
 
@@ -3860,7 +3906,6 @@ start_overwrite:
                                        entry->needs_wakeup = TRUE;
                                        vm_map_entry_wait(dst_map, THREAD_UNINT);
 
-                               vm_map_lock(dst_map);
                                if(!vm_map_lookup_entry(dst_map, base_addr, 
                                                                &tmp_entry)) {
                                        vm_map_unlock(dst_map);
@@ -4177,7 +4222,7 @@ start_overwrite:
                                break;
                        }
                }
-               vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr));
+               vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(base_addr));
 
                entry = tmp_entry;
        } /* while */
@@ -4542,32 +4587,48 @@ vm_map_copy_overwrite_aligned(
 
                        if (old_object != VM_OBJECT_NULL) {
                                if(entry->is_sub_map) {
-                                  if(entry->use_pmap) {
+                                       if(entry->use_pmap) {
 #ifndef i386
-                                     pmap_unnest(dst_map->pmap, 
-                                       entry->vme_start,
-                                       entry->vme_end - entry->vme_start);
+                                               pmap_unnest(dst_map->pmap, 
+                                                       entry->vme_start,
+                                                       entry->vme_end 
+                                                         - entry->vme_start);
 #endif
-                                  } else {
-                                     vm_map_submap_pmap_clean(
-                                       dst_map, entry->vme_start, 
-                                       entry->vme_end,
-                                       entry->object.sub_map,
-                                       entry->offset);
-                                  }
-                                  vm_map_deallocate(
+                                               if(dst_map->mapped) {
+                                                       /* clean up parent */
+                                                       /* map/maps */
+                                                  vm_map_submap_pmap_clean(
+                                                       dst_map, entry->vme_start,
+                                                       entry->vme_end,
+                                                       entry->object.sub_map,
+                                                       entry->offset);
+                                               }
+                                       } else {
+                                               vm_map_submap_pmap_clean(
+                                                       dst_map, entry->vme_start, 
+                                                       entry->vme_end,
+                                                       entry->object.sub_map,
+                                                       entry->offset);
+                                       }
+                                       vm_map_deallocate(
                                                entry->object.sub_map);
-                               } else {
-                                       vm_object_pmap_protect(
-                                               old_object,
-                                               old_offset,
-                                               size,
-                                               pmap,
-                                               tmp_entry->vme_start,
-                                               VM_PROT_NONE);
-
+                               } else {
+                                       if(dst_map->mapped) {
+                                               vm_object_pmap_protect(
+                                                       entry->object.vm_object,
+                                                       entry->offset,
+                                                       entry->vme_end 
+                                                               - entry->vme_start,
+                                                       PMAP_NULL,
+                                                       entry->vme_start,
+                                                       VM_PROT_NONE);
+                                       } else {
+                                          pmap_remove(dst_map->pmap, 
+                                            (addr64_t)(entry->vme_start), 
+                                            (addr64_t)(entry->vme_end));
+                                       }
                                        vm_object_deallocate(old_object);
-                               }
+                               }
                        }
 
                        entry->is_sub_map = FALSE;
@@ -4630,8 +4691,11 @@ vm_map_copy_overwrite_aligned(
                                        /* a pc to execute it.         */
                                        /* No isync here */
 
-                                       PMAP_ENTER(pmap, va, m,
-                                                  prot, FALSE);
+                                       PMAP_ENTER(pmap, va, m, prot, 
+                                               ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                       & VM_WIMG_MASK,
+                                               FALSE);
                
                                        vm_object_lock(object);
                                        vm_page_lock_queues();
@@ -4672,7 +4736,8 @@ vm_map_copy_overwrite_aligned(
 
                        vm_object_reference(dst_object);
 
-                       version.main_timestamp = dst_map->timestamp;
+                       /* account for unlock bumping up timestamp */
+                       version.main_timestamp = dst_map->timestamp + 1;
 
                        vm_map_unlock(dst_map);
 
@@ -4725,7 +4790,7 @@ vm_map_copy_overwrite_aligned(
 
                        start += copy_size;
                        vm_map_lock(dst_map);
-                       if ((version.main_timestamp + 1) == dst_map->timestamp) {
+                       if (version.main_timestamp == dst_map->timestamp) {
                                /* We can safely use saved tmp_entry value */
 
                                vm_map_clip_end(dst_map, tmp_entry, start);
@@ -4792,8 +4857,8 @@ vm_map_copyin_kernel_buffer(
                        VM_MAP_REMOVE_INTERRUPTIBLE;
        }
        if (src_destroy) {
-               (void) vm_map_remove(src_map, trunc_page(src_addr), 
-                                    round_page(src_addr + len),
+               (void) vm_map_remove(src_map, trunc_page_32(src_addr), 
+                                    round_page_32(src_addr + len),
                                     flags);
        }
        *copy_result = copy;
@@ -4829,7 +4894,7 @@ vm_map_copyout_kernel_buffer(
                *addr = 0;
                kr = vm_map_enter(map, 
                                  addr, 
-                                 round_page(copy->size),
+                                 round_page_32(copy->size),
                                  (vm_offset_t) 0, 
                                  TRUE,
                                  VM_OBJECT_NULL, 
@@ -4953,7 +5018,7 @@ vm_map_copyout(
                vm_object_offset_t      offset;
 
                offset = trunc_page_64(copy->offset);
-               size = round_page(copy->size + 
+               size = round_page_32(copy->size + 
                                (vm_size_t)(copy->offset - offset));
                *dst_addr = 0;
                kr = vm_map_enter(dst_map, dst_addr, size,
@@ -4984,7 +5049,7 @@ vm_map_copyout(
         */
 
        vm_copy_start = trunc_page_64(copy->offset);
-       size =  round_page((vm_size_t)copy->offset + copy->size) 
+       size =  round_page_32((vm_size_t)copy->offset + copy->size) 
                                                        - vm_copy_start;
 
  StartAgain: ;
@@ -5134,8 +5199,11 @@ vm_map_copyout(
                        m->busy = TRUE;
                        vm_object_unlock(object);
 
-                       PMAP_ENTER(dst_map->pmap, va, m,
-                                  entry->protection, TRUE);
+                       PMAP_ENTER(dst_map->pmap, va, m, entry->protection, 
+                                               ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                       & VM_WIMG_MASK,
+                                               TRUE);
 
                        vm_object_lock(object);
                        PAGE_WAKEUP_DONE(m);
@@ -5184,7 +5252,11 @@ vm_map_copyout(
                                                        prot &= ~VM_PROT_WRITE;
 
                                                PMAP_ENTER(dst_map->pmap, va, 
-                                                          m, prot, FALSE);
+                                                       m, prot, 
+                                                       ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                          & VM_WIMG_MASK,
+                                                       FALSE);
 
                                                vm_object_lock(object);
                                                vm_page_lock_queues();
@@ -5321,8 +5393,8 @@ vm_map_copyin_common(
         *      Compute start and end of region
         */
 
-       src_start = trunc_page(src_addr);
-       src_end = round_page(src_addr + len);
+       src_start = trunc_page_32(src_addr);
+       src_end = round_page_32(src_addr + len);
 
        XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0);
 
@@ -5356,6 +5428,8 @@ vm_map_copyin_common(
 #define        RETURN(x)                                               \
        MACRO_BEGIN                                             \
        vm_map_unlock(src_map);                                 \
+       if(src_map != base_map)                                 \
+               vm_map_deallocate(src_map);                     \
        if (new_entry != VM_MAP_ENTRY_NULL)                     \
                vm_map_copy_entry_dispose(copy,new_entry);      \
        vm_map_copy_discard(copy);                              \
@@ -5364,6 +5438,8 @@ vm_map_copyin_common(
                                                                \
                for(ptr = parent_maps; ptr != NULL; ptr = parent_maps) { \
                        parent_maps=parent_maps->next;          \
+                       if (ptr->parent_map != base_map)        \
+                               vm_map_deallocate(ptr->parent_map);     \
                        kfree((vm_offset_t)ptr, sizeof(submap_map_t));  \
                }                                               \
        }                                                       \
@@ -5434,6 +5510,9 @@ vm_map_copyin_common(
                        src_end = src_start + submap_len;
                        src_map = tmp_entry->object.sub_map;
                        vm_map_lock(src_map);
+                       /* keep an outstanding reference for all maps in */
+                       /* the parents tree except the base map */
+                       vm_map_reference(src_map);
                        vm_map_unlock(ptr->parent_map);
                        if (!vm_map_lookup_entry(
                                        src_map, src_start, &tmp_entry))
@@ -5445,11 +5524,13 @@ vm_map_copyin_common(
                }
                if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) && 
                    (tmp_entry->object.vm_object->phys_contiguous)) {
-                       /* This is not, cannot be supported for now */
-                       /* we need a description of the caching mode */
-                       /* reflected in the object before we can     */
-                       /* support copyin, and then the support will */
-                       /* be for direct copy */
+                       /* This is not, supported for now.In future */
+                       /* we will need to detect the phys_contig   */
+                       /* condition and then upgrade copy_slowly   */
+                       /* to do physical copy from the device mem  */
+                       /* based object. We can piggy-back off of   */
+                       /* the was wired boolean to set-up the      */
+                       /* proper handling */
                        RETURN(KERN_PROTECTION_FAILURE);
                }
                /*
@@ -5528,8 +5609,9 @@ RestartCopy:
                XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
                    src_object, new_entry, new_entry->object.vm_object,
                    was_wired, 0);
-               if (!was_wired &&
-                   vm_object_copy_quickly(
+               if ((src_object == VM_OBJECT_NULL ||
+                       (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+                       vm_object_copy_quickly(
                                &new_entry->object.vm_object,
                                src_offset,
                                src_size,
@@ -5543,49 +5625,17 @@ RestartCopy:
                         */
 
                        if (src_needs_copy && !tmp_entry->needs_copy) {
-                               if (tmp_entry->is_shared  || 
-                                    tmp_entry->object.vm_object->true_share ||
-                                    map_share) {
-                                       vm_map_unlock(src_map);
-                                       new_entry->object.vm_object = 
-                                               vm_object_copy_delayed(
-                                                       src_object,
-                                                       src_offset,     
-                                                       src_size);
-                                       /* dec ref gained in copy_quickly */
-                                       vm_object_lock(src_object);
-                                       src_object->ref_count--;
-                                       assert(src_object->ref_count > 0);
-                                       vm_object_res_deallocate(src_object);
-                                       vm_object_unlock(src_object);
-                                       vm_map_lock(src_map);
-                                       /* 
-                                        * it turns out that we have
-                                        * finished our copy. No matter
-                                        * what the state of the map
-                                        * we will lock it again here
-                                        * knowing that if there is
-                                        * additional data to copy
-                                        * it will be checked at
-                                        * the top of the loop
-                                        *
-                                        * Don't do timestamp check
-                                        */
-                                       
-                               } else {
-                                       vm_object_pmap_protect(
-                                               src_object,
-                                               src_offset,
-                                               src_size,
-                                               (src_entry->is_shared ? 
-                                                       PMAP_NULL
-                                                       : src_map->pmap),
-                                               src_entry->vme_start,
-                                               src_entry->protection &
-                                                       ~VM_PROT_WRITE);
-
-                                       tmp_entry->needs_copy = TRUE;
-                               }
+                               vm_object_pmap_protect(
+                                       src_object,
+                                       src_offset,
+                                       src_size,
+                                       (src_entry->is_shared ? 
+                                               PMAP_NULL
+                                               : src_map->pmap),
+                                       src_entry->vme_start,
+                                       src_entry->protection &
+                                               ~VM_PROT_WRITE);
+                               tmp_entry->needs_copy = TRUE;
                        }
 
                        /*
@@ -5597,8 +5647,6 @@ RestartCopy:
                        goto CopySuccessful;
                }
 
-               new_entry->needs_copy = FALSE;
-
                /*
                 *      Take an object reference, so that we may
                 *      release the map lock(s).
@@ -5613,13 +5661,14 @@ RestartCopy:
                 */
 
                version.main_timestamp = src_map->timestamp;
-               vm_map_unlock(src_map);
+               vm_map_unlock(src_map); /* Increments timestamp once! */
 
                /*
                 *      Perform the copy
                 */
 
                if (was_wired) {
+               CopySlowly:
                        vm_object_lock(src_object);
                        result = vm_object_copy_slowly(
                                        src_object,
@@ -5629,6 +5678,24 @@ RestartCopy:
                                        &new_entry->object.vm_object);
                        new_entry->offset = 0;
                        new_entry->needs_copy = FALSE;
+
+               }
+               else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+                       (tmp_entry->is_shared  || map_share)) {
+                       vm_object_t new_object;
+
+                       vm_object_lock(src_object);
+                       new_object = vm_object_copy_delayed(
+                                       src_object,
+                                       src_offset,     
+                                       src_size);
+                       if (new_object == VM_OBJECT_NULL)
+                               goto CopySlowly;
+
+                       new_entry->object.vm_object = new_object;
+                       new_entry->needs_copy = TRUE;
+                       result = KERN_SUCCESS;
+
                } else {
                        result = vm_object_copy_strategically(src_object,
                                src_offset,
@@ -5638,7 +5705,6 @@ RestartCopy:
                                &new_entry_needs_copy);
 
                        new_entry->needs_copy = new_entry_needs_copy;
-                       
                }
 
                if (result != KERN_SUCCESS &&
@@ -5658,7 +5724,7 @@ RestartCopy:
                 *      changed while the copy was being made.
                 */
 
-               vm_map_lock(src_map);   /* Increments timestamp once! */
+               vm_map_lock(src_map);
 
                if ((version.main_timestamp + 1) == src_map->timestamp)
                        goto VerificationSuccessful;
@@ -5741,8 +5807,9 @@ RestartCopy:
                                ptr = parent_maps;
                                assert(ptr != NULL);
                                parent_maps = parent_maps->next;
-                               vm_map_lock(ptr->parent_map);
                                vm_map_unlock(src_map);
+                               vm_map_deallocate(src_map);
+                               vm_map_lock(ptr->parent_map);
                                src_map = ptr->parent_map;
                                src_start = ptr->base_start;
                                src_end = ptr->base_end;
@@ -5776,7 +5843,7 @@ RestartCopy:
         */
        if (src_destroy) {
                (void) vm_map_delete(src_map,
-                                    trunc_page(src_addr),
+                                    trunc_page_32(src_addr),
                                     src_end,
                                     (src_map == kernel_map) ?
                                        VM_MAP_REMOVE_KUNWIRE :
@@ -5863,8 +5930,9 @@ vm_map_fork_share(
                if(old_entry->use_pmap) {
                        result = pmap_nest(new_map->pmap, 
                                (old_entry->object.sub_map)->pmap, 
-                               old_entry->vme_start,
-                               old_entry->vme_end - old_entry->vme_start);
+                               (addr64_t)old_entry->vme_start,
+                               (addr64_t)old_entry->vme_start,
+                               (uint64_t)(old_entry->vme_end - old_entry->vme_start));
                        if(result)
                                panic("vm_map_fork_share: pmap_nest failed!");
                }
@@ -5983,16 +6051,19 @@ vm_map_fork_share(
                 *      to remove write permission.
                 */
 
-/* CDY FIX this! page_protect! */
                if (!old_entry->needs_copy &&
                    (old_entry->protection & VM_PROT_WRITE)) {
-                       if(old_entry->is_sub_map && old_entry->use_pmap) {
-                               pmap_protect(old_entry->object.sub_map->pmap,
-                                    old_entry->vme_start,
-                                    old_entry->vme_end,
-                                    old_entry->protection & ~VM_PROT_WRITE);
+                       if(old_map->mapped) {
+                               vm_object_pmap_protect(
+                                       old_entry->object.vm_object,
+                                       old_entry->offset,
+                                       (old_entry->vme_end -
+                                                       old_entry->vme_start),
+                                       PMAP_NULL,
+                                       old_entry->vme_start,
+                                       old_entry->protection & ~VM_PROT_WRITE);
                        } else {
-                               pmap_protect(vm_map_pmap(old_map),
+                               pmap_protect(old_map->pmap,
                                     old_entry->vme_start,
                                     old_entry->vme_end,
                                     old_entry->protection & ~VM_PROT_WRITE);
@@ -6086,8 +6157,7 @@ vm_map_fork_copy(
                 */
                vm_map_lock(old_map);
                if (!vm_map_lookup_entry(old_map, start, &last) ||
-                   last->max_protection & VM_PROT_READ ==
-                                        VM_PROT_NONE) {
+                   (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
                        last = last->vme_next;
                }
                *old_entry_p = last;
@@ -6180,7 +6250,9 @@ vm_map_fork(
 
                        if(old_entry->is_sub_map)
                                break;
-                       if (old_entry->wired_count != 0) {
+                       if ((old_entry->wired_count != 0) ||
+                               ((old_entry->object.vm_object != NULL) &&
+                               (old_entry->object.vm_object->true_share))) {
                                goto slow_vm_map_fork_copy;
                        }
 
@@ -6211,7 +6283,7 @@ vm_map_fork(
                                        (old_entry->vme_end -
                                                        old_entry->vme_start),
                                        ((old_entry->is_shared 
-                                               || old_entry->is_sub_map)
+                                               || old_map->mapped)
                                                        ? PMAP_NULL :
                                                        old_map->pmap),
                                        old_entry->vme_start,
@@ -6495,7 +6567,8 @@ RetrySubMap:
                                        submap_entry->offset,
                                        submap_entry->vme_end - 
                                                submap_entry->vme_start,
-                                       submap_entry->is_shared ?
+                                       (submap_entry->is_shared 
+                                               || map->mapped) ?
                                                PMAP_NULL : map->pmap,
                                        submap_entry->vme_start,
                                        submap_entry->protection &
@@ -7119,11 +7192,13 @@ vm_region_recurse_64(
        recurse_count = *nesting_depth;
 
 LOOKUP_NEXT_BASE_ENTRY:
-       vm_map_lock_read(map);
+       if (not_in_kdp)
+         vm_map_lock_read(map);
         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
                if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
-                       vm_map_unlock_read(map);
-                       return(KERN_INVALID_ADDRESS);
+                 if (not_in_kdp)
+                   vm_map_unlock_read(map);
+                 return(KERN_INVALID_ADDRESS);
                }
        } else {
                entry = tmp_entry;
@@ -7137,7 +7212,8 @@ LOOKUP_NEXT_BASE_ENTRY:
 
        while(entry->is_sub_map && recurse_count) {
                recurse_count--;
-               vm_map_lock_read(entry->object.sub_map);
+               if (not_in_kdp)
+                 vm_map_lock_read(entry->object.sub_map);
 
 
                if(entry == base_entry) {
@@ -7146,13 +7222,15 @@ LOOKUP_NEXT_BASE_ENTRY:
                }
 
                submap = entry->object.sub_map;
-               vm_map_unlock_read(map);
+               if (not_in_kdp)
+                 vm_map_unlock_read(map);
                map = submap;
 
                if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
                        if ((entry = tmp_entry->vme_next) 
                                                == vm_map_to_entry(map)) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7172,7 +7250,8 @@ LOOKUP_NEXT_BASE_ENTRY:
                        }
                        if(base_next <= 
                                (base_addr += (entry->vme_start - start))) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7198,7 +7277,8 @@ LOOKUP_NEXT_BASE_ENTRY:
                        }
                        base_addr += entry->vme_start;
                        if(base_addr >= base_next) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7238,7 +7318,8 @@ LOOKUP_NEXT_BASE_ENTRY:
        extended.pages_dirtied = 0;
        extended.external_pager = 0;
        extended.shadow_depth = 0;
-
+       
+       if (not_in_kdp)
        if(!entry->is_sub_map) {
                vm_region_walk(entry, &extended, entry->offset, 
                                entry->vme_end - start, map, start);
@@ -7262,8 +7343,8 @@ LOOKUP_NEXT_BASE_ENTRY:
        submap_info->pages_dirtied = extended.pages_dirtied;
        submap_info->external_pager = extended.external_pager;
        submap_info->shadow_depth = extended.shadow_depth;
-
-       vm_map_unlock_read(map);
+       if (not_in_kdp)
+         vm_map_unlock_read(map);
        return(KERN_SUCCESS);
 }
 
@@ -7434,15 +7515,13 @@ vm_region_top_walk(
         register struct vm_object *obj, *tmp_obj;
        register int    ref_count;
 
-       if (entry->object.vm_object == 0) {
+       if (entry->object.vm_object == 0 || entry->is_sub_map) {
            top->share_mode = SM_EMPTY;
            top->ref_count = 0;
            top->obj_id = 0;
            return;
        }
-        if (entry->is_sub_map)
-           vm_region_top_walk((vm_map_entry_t)entry->object.sub_map, top);
-       else {
+       {
            obj = entry->object.vm_object;
 
            vm_object_lock(obj);
@@ -7506,16 +7585,14 @@ vm_region_walk(
        register int               ref_count;
        void vm_region_look_for_page();
 
-       if ((entry->object.vm_object == 0) || 
+       if ((entry->object.vm_object == 0) ||
+               (entry->is_sub_map) ||
                (entry->object.vm_object->phys_contiguous)) {
            extended->share_mode = SM_EMPTY;
            extended->ref_count = 0;
            return;
        }
-        if (entry->is_sub_map)
-           vm_region_walk((vm_map_entry_t)entry->object.sub_map, extended, offset + entry->offset,
-                          range, map, va);
-       else {
+       {
            obj = entry->object.vm_object;
 
            vm_object_lock(obj);
@@ -7608,12 +7685,13 @@ vm_region_look_for_page(
                        if (shadow && (max_refcnt == 1))
                                extended->pages_shared_now_private++;
 
-                       if (p->dirty || pmap_is_modified(p->phys_addr))
+                       if (!p->fictitious && 
+                               (p->dirty || pmap_is_modified(p->phys_page)))
                                extended->pages_dirtied++;
                        extended->pages_resident++;
 
                        if(object != caller_object)
-                               vm_object_unlock(object);
+                            vm_object_unlock(object);
 
                        return;
                }
@@ -7623,13 +7701,13 @@ vm_region_look_for_page(
                                extended->pages_swapped_out++;
 
                                if(object != caller_object)
-                                       vm_object_unlock(object);
+                                    vm_object_unlock(object);
 
                                return;
                        }
                }
                if (shadow) {
-                       vm_object_lock(shadow);
+                   vm_object_lock(shadow);
 
                        if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
                                ref_count--;
@@ -7641,7 +7719,7 @@ vm_region_look_for_page(
                                max_refcnt = ref_count;
                        
                        if(object != caller_object)
-                               vm_object_unlock(object);
+                            vm_object_unlock(object);
 
                        object = shadow;
                        shadow = object->shadow;
@@ -7649,7 +7727,7 @@ vm_region_look_for_page(
                        continue;
                }
                if(object != caller_object)
-                       vm_object_unlock(object);
+                    vm_object_unlock(object);
                break;
        }
 }
@@ -7667,7 +7745,7 @@ vm_region_count_obj_refs(
            return(0);
 
         if (entry->is_sub_map)
-           ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object);
+           return(0);
        else {
            ref_count = 0;
 
@@ -7678,9 +7756,9 @@ vm_region_count_obj_refs(
                if (chk_obj == object)
                    ref_count++;
                if (tmp_obj = chk_obj->shadow)
-                   vm_object_lock(tmp_obj);
+                  vm_object_lock(tmp_obj);
                vm_object_unlock(chk_obj);
-               
+
                chk_obj = tmp_obj;
            }
        }
@@ -7810,14 +7888,96 @@ vm_map_machine_attribute(
        vm_machine_attribute_val_t* value)              /* IN/OUT */
 {
        kern_return_t   ret;
-
+       vm_size_t sync_size;
+       vm_offset_t     start;
+       vm_map_entry_t entry;
+       
        if (address < vm_map_min(map) ||
            (address + size) > vm_map_max(map))
                return KERN_INVALID_ADDRESS;
 
        vm_map_lock(map);
+       
+       if (attribute != MATTR_CACHE) { 
+               /* If we don't have to find physical addresses, we */
+               /* don't have to do an explicit traversal here.    */
+               ret = pmap_attribute(map->pmap, 
+                               address, size, attribute, value);
+               vm_map_unlock(map);
+               return ret;
+       }
+
+       /* Get the starting address */
+       start = trunc_page_32(address);
+       /* Figure how much memory we need to flush (in page increments) */
+       sync_size = round_page_32(start + size) - start;        
+
 
-       ret = pmap_attribute(map->pmap, address, size, attribute, value);
+       ret = KERN_SUCCESS;                                                                             /* Assume it all worked */
+
+       while(sync_size) {
+               if (vm_map_lookup_entry(map, start, &entry)) {
+                       vm_size_t       sub_size;
+                       if((entry->vme_end - start) > sync_size) {
+                               sub_size = sync_size;
+                               sync_size = 0;
+                       } else {
+                               sub_size = entry->vme_end - start;
+                                       sync_size -= sub_size;
+                       }
+                       if(entry->is_sub_map) {
+                               vm_map_machine_attribute(
+                                       entry->object.sub_map, 
+                                       (start - entry->vme_start) 
+                                                       + entry->offset,
+                                       sub_size,
+                                       attribute, value);
+                       } else {
+                               if(entry->object.vm_object) {
+                                       vm_page_t               m;
+                                       vm_object_t             object;
+                                       vm_object_t             base_object;
+                                       vm_object_offset_t      offset;
+                                       vm_object_offset_t      base_offset;
+                                       vm_size_t               range;
+                                       range = sub_size;
+                                       offset = (start - entry->vme_start)
+                                                       + entry->offset;
+                                       base_offset = offset;
+                                       object = entry->object.vm_object;
+                                       base_object = object;
+                                       while(range) {
+                                               m = vm_page_lookup(
+                                                       object, offset);
+                                               if(m && !m->fictitious) {
+       
+                                                 ret = 
+                                                    pmap_attribute_cache_sync(
+                                                       m->phys_page,   
+                                                       PAGE_SIZE, 
+                                                       attribute, value);
+                                               } else if (object->shadow) {
+                                                  offset = offset + 
+                                                       object->shadow_offset;
+                                                  object = object->shadow;
+                                                  continue;
+                                               }
+                                               range -= PAGE_SIZE;
+                                               /* Bump to the next page */
+                                               base_offset += PAGE_SIZE;
+                                               offset = base_offset;
+                                               object = base_object;
+                                               
+                                       }
+                               }
+                       }
+                       start += sub_size;
+               } else {
+                       vm_map_unlock(map);
+                       return KERN_FAILURE;
+               }
+               
+       }
 
        vm_map_unlock(map);
 
@@ -7852,6 +8012,10 @@ vm_map_behavior_set(
        case VM_BEHAVIOR_SEQUENTIAL:
        case VM_BEHAVIOR_RSEQNTL:
                break;
+       case VM_BEHAVIOR_WILLNEED:
+       case VM_BEHAVIOR_DONTNEED:
+               new_behavior = VM_BEHAVIOR_DEFAULT;
+               break;
        default:
                return(KERN_INVALID_ARGUMENT);
        }
@@ -7919,7 +8083,7 @@ void
 vm_map_links_print(
        struct vm_map_links     *links)
 {
-       iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
+       iprintf("prev = %08X  next = %08X  start = %08X  end = %08X\n",
                links->prev,
                links->next,
                links->start,
@@ -7934,7 +8098,7 @@ vm_map_header_print(
        struct vm_map_header    *header)
 {
        vm_map_links_print(&header->links);
-       iprintf("nentries=0x%x, %sentries_pageable\n",
+       iprintf("nentries = %08X, %sentries_pageable\n",
                header->nentries,
                (header->entries_pageable ? "" : "!"));
 }
@@ -7949,7 +8113,7 @@ vm_follow_entry(
        extern int db_indent;
        int shadows;
 
-       iprintf("map entry 0x%x:\n", entry);
+       iprintf("map entry %08X\n", entry);
 
        db_indent += 2;
 
@@ -7970,20 +8134,20 @@ vm_map_entry_print(
        static char *inheritance_name[4] = { "share", "copy", "none", "?"};
        static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" };
        
-       iprintf("map entry 0x%x:\n", entry);
+       iprintf("map entry %08X n", entry);
 
        db_indent += 2;
 
        vm_map_links_print(&entry->links);
 
-       iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
+       iprintf("start = %08X  end = %08X, prot=%x/%x/%s\n",
                entry->vme_start,
                entry->vme_end,
                entry->protection,
                entry->max_protection,
                inheritance_name[(entry->inheritance & 0x3)]);
 
-       iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
+       iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
                behavior_name[(entry->behavior & 0x3)],
                entry->wired_count,
                entry->user_wired_count);
@@ -7992,11 +8156,11 @@ vm_map_entry_print(
                (entry->needs_wakeup ? "" : "!"));
 
        if (entry->is_sub_map) {
-               iprintf("submap=0x%x, offset=0x%x\n",
+               iprintf("submap = %08X - offset=%08X\n",
                       entry->object.sub_map,
                       entry->offset);
        } else {
-               iprintf("object=0x%x, offset=0x%x, ",
+               iprintf("object=%08X, offset=%08X, ",
                        entry->object.vm_object,
                        entry->offset);
                printf("%sis_shared, %sneeds_copy\n",
@@ -8017,7 +8181,7 @@ vm_follow_map(
        register vm_map_entry_t entry;
        extern int db_indent;
 
-       iprintf("task map 0x%x:\n", map);
+       iprintf("task map %08X\n", map);
 
        db_indent += 2;
 
@@ -8035,26 +8199,29 @@ vm_follow_map(
  */
 void
 vm_map_print(
-       register vm_map_t       map)
+       db_addr_t inmap)
 {
        register vm_map_entry_t entry;
+       vm_map_t map;
        extern int db_indent;
        char *swstate;
 
-       iprintf("task map 0x%x:\n", map);
+       map = (vm_map_t)inmap;                          /* Make sure we have the right type */
+
+       iprintf("task map %08X\n", map);
 
        db_indent += 2;
 
        vm_map_header_print(&map->hdr);
 
-       iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
+       iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
                map->pmap,
                map->size,
                map->ref_count,
                map->hint,
                map->first_free);
 
-       iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
+       iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
                (map->wait_for_space ? "" : "!"),
                (map->wiring_required ? "" : "!"),
                map->timestamp);
@@ -8071,7 +8238,7 @@ vm_map_print(
                swstate = "????";
                break;
        }
-       iprintf("res=%d, sw_state=%s\n", map->res_count, swstate);
+       iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
 #endif /* TASK_SWAPPER */
 
        for (entry = vm_map_first_entry(map);
@@ -8091,12 +8258,15 @@ vm_map_print(
 
 void
 vm_map_copy_print(
-       vm_map_copy_t   copy)
+       db_addr_t       incopy)
 {
        extern int db_indent;
+       vm_map_copy_t copy;
        int i, npages;
        vm_map_entry_t entry;
 
+       copy = (vm_map_copy_t)incopy;   /* Make sure we have the right type */
+
        printf("copy object 0x%x\n", copy);
 
        db_indent += 2;
@@ -8153,10 +8323,13 @@ vm_map_copy_print(
  */
 vm_size_t
 db_vm_map_total_size(
-       vm_map_t        map)
+       db_addr_t       inmap)
 {
        vm_map_entry_t  entry;
        vm_size_t       total;
+       vm_map_t map;
+
+       map = (vm_map_t)inmap;  /* Make sure we have the right type */
 
        total = 0;
        for (entry = vm_map_first_entry(map);
@@ -8266,7 +8439,7 @@ vm_remap_extract(
        boolean_t               new_entry_needs_copy;
 
        assert(map != VM_MAP_NULL);
-       assert(size != 0 && size == round_page(size));
+       assert(size != 0 && size == round_page_32(size));
        assert(inheritance == VM_INHERIT_NONE ||
               inheritance == VM_INHERIT_COPY ||
               inheritance == VM_INHERIT_SHARE);
@@ -8274,8 +8447,8 @@ vm_remap_extract(
        /*
         *      Compute start and end of region.
         */
-       src_start = trunc_page(addr);
-       src_end = round_page(src_start + size);
+       src_start = trunc_page_32(addr);
+       src_end = round_page_32(src_start + size);
 
        /*
         *      Initialize map_header.
@@ -8354,11 +8527,22 @@ vm_remap_extract(
 
                                if (!src_entry->needs_copy &&
                                    (src_entry->protection & VM_PROT_WRITE)) {
-                                       pmap_protect(vm_map_pmap(map),
-                                            src_entry->vme_start,
-                                            src_entry->vme_end,
-                                            src_entry->protection &
+                                       if(map->mapped) {
+                                          vm_object_pmap_protect(
+                                               src_entry->object.vm_object,
+                                               src_entry->offset,
+                                               entry_size,
+                                               PMAP_NULL,
+                                               src_entry->vme_start,
+                                               src_entry->protection &
+                                                      ~VM_PROT_WRITE);
+                                       } else {
+                                          pmap_protect(vm_map_pmap(map),
+                                                src_entry->vme_start,
+                                               src_entry->vme_end,
+                                               src_entry->protection &
                                                     ~VM_PROT_WRITE);
+                                       }
                                }
 
                                object = src_entry->object.vm_object;
@@ -8419,7 +8603,8 @@ vm_remap_extract(
                                vm_object_pmap_protect(object,
                                                       offset,
                                                       entry_size,
-                                                      (src_entry->is_shared ?
+                                                      ((src_entry->is_shared 
+                                                         || map->mapped) ?
                                                        PMAP_NULL : map->pmap),
                                                       src_entry->vme_start,
                                                       src_entry->protection &
@@ -8443,7 +8628,7 @@ vm_remap_extract(
                         * verification, and unlock the map.
                         */
                        version.main_timestamp = map->timestamp;
-                       vm_map_unlock(map);
+                       vm_map_unlock(map);     /* Increments timestamp once! */
 
                        /*
                         * Perform the copy.
@@ -8487,7 +8672,7 @@ vm_remap_extract(
                         * changed while the copy was being made.
                         */
 
-                       vm_map_lock(map);       /* Increments timestamp once! */
+                       vm_map_lock(map);
                        if (version.main_timestamp + 1 != map->timestamp) {
                                /*
                                 * Simple version comparison failed.
@@ -8583,7 +8768,7 @@ vm_remap(
                return KERN_INVALID_ARGUMENT;
        }
 
-       size = round_page(size);
+       size = round_page_32(size);
 
        result = vm_remap_extract(src_map, memory_address,
                                  size, copy, &map_header,
@@ -8601,7 +8786,7 @@ vm_remap(
         * Allocate/check a range of free virtual address
         * space for the target
         */
-       *address = trunc_page(*address);
+       *address = trunc_page_32(*address);
        vm_map_lock(target_map);
        result = vm_remap_range_allocate(target_map, address, size,
                                         mask, anywhere, &insp_entry);
@@ -9059,3 +9244,77 @@ boolean_t vm_map_check_protection(map, start, end, protection)
        vm_map_unlock(map);
        return(TRUE);
 }
+
+/*
+ *      This routine is obsolete, but included for backward
+ *      compatibility for older drivers.
+ */
+void
+kernel_vm_map_reference(
+       vm_map_t map)
+{
+       vm_map_reference(map);
+}
+
+/*
+ *     vm_map_reference:
+ *
+ *     Most code internal to the osfmk will go through a
+ *     macro defining this.  This is always here for the
+ *     use of other kernel components.
+ */
+#undef vm_map_reference
+void
+vm_map_reference(
+       register vm_map_t       map)
+{
+       if (map == VM_MAP_NULL)
+               return;
+
+       mutex_lock(&map->s_lock);
+#if    TASK_SWAPPER
+       assert(map->res_count > 0);
+       assert(map->ref_count >= map->res_count);
+       map->res_count++;
+#endif
+       map->ref_count++;
+       mutex_unlock(&map->s_lock);
+}
+
+/*
+ *     vm_map_deallocate:
+ *
+ *     Removes a reference from the specified map,
+ *     destroying it if no references remain.
+ *     The map should not be locked.
+ */
+void
+vm_map_deallocate(
+       register vm_map_t       map)
+{
+       unsigned int            ref;
+
+       if (map == VM_MAP_NULL)
+               return;
+
+       mutex_lock(&map->s_lock);
+       ref = --map->ref_count;
+       if (ref > 0) {
+               vm_map_res_deallocate(map);
+               mutex_unlock(&map->s_lock);
+               return;
+       }
+       assert(map->ref_count == 0);
+       mutex_unlock(&map->s_lock);
+
+#if    TASK_SWAPPER
+       /*
+        * The map residence count isn't decremented here because
+        * the vm_map_delete below will traverse the entire map, 
+        * deleting entries, and the residence counts on objects
+        * and sharing maps will go away then.
+        */
+#endif
+
+       vm_map_destroy(map);
+}