]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_map.c
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map.c
index 87b0b30db23af06e254b0e5903dcbea691790d51..aa82856e826fd361e492c2fc8823a62ceea73418 100644 (file)
@@ -3,19 +3,22 @@
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
  * @APPLE_LICENSE_HEADER_END@
  */
@@ -66,6 +69,7 @@
 #include <mach/vm_attributes.h>
 #include <mach/vm_param.h>
 #include <mach/vm_behavior.h>
+#include <mach/vm_statistics.h>
 #include <kern/assert.h>
 #include <kern/counters.h>
 #include <kern/zalloc.h>
@@ -81,6 +85,7 @@
 #include <mach/vm_map_server.h>
 #include <mach/mach_host_server.h>
 #include <ddb/tr.h>
+#include <machine/db_machdep.h>
 #include <kern/xpr.h>
 
 /* Internal prototypes
@@ -299,6 +304,9 @@ int         kentry_count = 2048;            /* to init kentry_data_size */
  */
 vm_size_t vm_map_aggressive_enter_max;         /* set by bootstrap */
 
+/* Skip acquiring locks if we're in the midst of a kernel core dump */
+extern unsigned int not_in_kdp;
+
 void
 vm_map_init(
        void)
@@ -332,7 +340,7 @@ void
 vm_map_steal_memory(
        void)
 {
-       map_data_size = round_page(10 * sizeof(struct vm_map));
+       map_data_size = round_page_32(10 * sizeof(struct vm_map));
        map_data = pmap_steal_memory(map_data_size);
 
 #if 0
@@ -348,7 +356,7 @@ vm_map_steal_memory(
 
 
        kentry_data_size =
-               round_page(kentry_count * sizeof(struct vm_map_entry));
+               round_page_32(kentry_count * sizeof(struct vm_map_entry));
        kentry_data = pmap_steal_memory(kentry_data_size);
 }
 
@@ -474,8 +482,8 @@ first_free_is_valid(
                
        entry = vm_map_to_entry(map);
        next = entry->vme_next;
-       while (trunc_page(next->vme_start) == trunc_page(entry->vme_end) ||
-              (trunc_page(next->vme_start) == trunc_page(entry->vme_start) &&
+       while (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_end) ||
+              (trunc_page_32(next->vme_start) == trunc_page_32(entry->vme_start) &&
                next != vm_map_to_entry(map))) {
                entry = next;
                next = entry->vme_next;
@@ -505,10 +513,10 @@ MACRO_BEGIN                                                               \
        UFF_map = (map);                                                \
        UFF_first_free = (new_first_free);                              \
        UFF_next_entry = UFF_first_free->vme_next;                      \
-       while (trunc_page(UFF_next_entry->vme_start) ==                 \
-              trunc_page(UFF_first_free->vme_end) ||                   \
-              (trunc_page(UFF_next_entry->vme_start) ==                \
-               trunc_page(UFF_first_free->vme_start) &&                \
+       while (trunc_page_32(UFF_next_entry->vme_start) ==              \
+              trunc_page_32(UFF_first_free->vme_end) ||                        \
+              (trunc_page_32(UFF_next_entry->vme_start) ==             \
+               trunc_page_32(UFF_first_free->vme_start) &&             \
                UFF_next_entry != vm_map_to_entry(UFF_map))) {          \
                UFF_first_free = UFF_next_entry;                        \
                UFF_next_entry = UFF_first_free->vme_next;              \
@@ -654,7 +662,8 @@ vm_map_destroy(
                             map->max_offset, VM_MAP_NO_FLAGS);
        vm_map_unlock(map);
 
-       pmap_destroy(map->pmap);
+       if(map->pmap)
+               pmap_destroy(map->pmap);
 
        zfree(vm_map_zone, (vm_offset_t) map);
 }
@@ -850,9 +859,11 @@ void vm_map_swapout(vm_map_t map)
  *     future lookups.  Performs necessary interlocks.
  */
 #define        SAVE_HINT(map,value) \
+MACRO_BEGIN \
                mutex_lock(&(map)->s_lock); \
                (map)->hint = (value); \
-               mutex_unlock(&(map)->s_lock);
+               mutex_unlock(&(map)->s_lock); \
+MACRO_END
 
 /*
  *     vm_map_lookup_entry:    [ internal use only ]
@@ -877,10 +888,11 @@ vm_map_lookup_entry(
         *      Start looking either from the head of the
         *      list, or from the hint.
         */
-
-       mutex_lock(&map->s_lock);
+       if (not_in_kdp)
+         mutex_lock(&map->s_lock);
        cur = map->hint;
-       mutex_unlock(&map->s_lock);
+       if (not_in_kdp)
+         mutex_unlock(&map->s_lock);
 
        if (cur == vm_map_to_entry(map))
                cur = cur->vme_next;
@@ -924,7 +936,8 @@ vm_map_lookup_entry(
                                 */
 
                                *entry = cur;
-                               SAVE_HINT(map, cur);
+                               if (not_in_kdp)
+                                 SAVE_HINT(map, cur);
                                return(TRUE);
                        }
                        break;
@@ -932,7 +945,8 @@ vm_map_lookup_entry(
                cur = cur->vme_next;
        }
        *entry = cur->vme_prev;
-       SAVE_HINT(map, *entry);
+       if (not_in_kdp)
+         SAVE_HINT(map, *entry);
        return(FALSE);
 }
 
@@ -1109,6 +1123,9 @@ vm_map_pmap_enter(
 {
        unsigned int            cache_attr;
 
+       if(map->pmap == 0)
+               return;
+
        while (addr < end_addr) {
                register vm_page_t      m;
 
@@ -1135,7 +1152,7 @@ vm_map_pmap_enter(
                m->busy = TRUE;
 
                if (m->no_isync == TRUE) {
-                       pmap_sync_caches_phys(m->phys_addr);
+                       pmap_sync_caches_phys(m->phys_page);
                        m->no_isync = FALSE;
                }
 
@@ -1361,7 +1378,7 @@ vm_map_enter(
            (entry->max_protection == max_protection) &&
            (entry->behavior == VM_BEHAVIOR_DEFAULT) &&
            (entry->in_transition == 0) &&
-           ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT) &&
+           ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) &&
            (entry->wired_count == 0)) { /* implies user_wired_count == 0 */
                if (vm_object_coalesce(entry->object.vm_object,
                                VM_OBJECT_NULL,
@@ -1445,14 +1462,14 @@ MACRO_BEGIN                                                             \
                        vm_offset_t     pmap_base_addr;                 \
                                                                        \
                        pmap_base_addr = 0xF0000000 & entry->vme_start; \
-                       pmap_unnest(map->pmap, pmap_base_addr,          \
-                                                       0x10000000);    \
+                       pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
                        entry->use_pmap = FALSE;                        \
                } else if(entry->object.vm_object                       \
                        && !entry->is_sub_map                           \
                        && entry->object.vm_object->phys_contiguous) {  \
                        pmap_remove(map->pmap,                          \
-                               entry->vme_start, entry->vme_end);      \
+                               (addr64_t)(entry->vme_start),           \
+                               (addr64_t)(entry->vme_end));            \
                }                                                       \
                _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\
        }                                                               \
@@ -1537,14 +1554,14 @@ MACRO_BEGIN                                                             \
                        vm_offset_t     pmap_base_addr;                 \
                                                                        \
                        pmap_base_addr = 0xF0000000 & entry->vme_start; \
-                       pmap_unnest(map->pmap, pmap_base_addr,          \
-                                                       0x10000000);    \
+                       pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \
                        entry->use_pmap = FALSE;                        \
                } else if(entry->object.vm_object                       \
                        && !entry->is_sub_map                           \
                        && entry->object.vm_object->phys_contiguous) {  \
                        pmap_remove(map->pmap,                          \
-                               entry->vme_start, entry->vme_end);      \
+                               (addr64_t)(entry->vme_start),           \
+                               (addr64_t)(entry->vme_end));            \
                }                                                       \
                _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \
        }                                                               \
@@ -1738,25 +1755,32 @@ vm_map_submap(
            (object->copy == VM_OBJECT_NULL) &&
            (object->shadow == VM_OBJECT_NULL) &&
            (!object->pager_created)) {
-               entry->offset = (vm_object_offset_t)offset;
-               entry->object.vm_object = VM_OBJECT_NULL;
-               vm_object_deallocate(object);
-               entry->is_sub_map = TRUE;
-               vm_map_reference(entry->object.sub_map = submap);
+                       entry->offset = (vm_object_offset_t)offset;
+                       entry->object.vm_object = VM_OBJECT_NULL;
+                       vm_object_deallocate(object);
+                       entry->is_sub_map = TRUE;
+                       entry->object.sub_map = submap;
+                       vm_map_reference(submap);
 #ifndef i386
-               if ((use_pmap) && (offset == 0)) {
-                       /* nest if platform code will allow */
-                       result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, 
-                                                       start, end - start);
-                       if(result)
-                               panic("pmap_nest failed!");
-                       entry->use_pmap = TRUE;
-               }
+                       if ((use_pmap) && (offset == 0)) {
+                               /* nest if platform code will allow */
+                               if(submap->pmap == NULL) {
+                                       submap->pmap = pmap_create((vm_size_t) 0);
+                                       if(submap->pmap == PMAP_NULL) {
+                                               return(KERN_NO_SPACE);
+                                       }
+                               }
+                               result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, 
+                                                               (addr64_t)start, (addr64_t)start, (uint64_t)(end - start));
+                               if(result)
+                                       panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result);
+                               entry->use_pmap = TRUE;
+                       }
 #endif
 #ifdef i386
-               pmap_remove(map->pmap, start, end);
+                       pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end);
 #endif
-               result = KERN_SUCCESS;
+                       result = KERN_SUCCESS;
        }
        vm_map_unlock(map);
 
@@ -1908,8 +1932,7 @@ vm_map_protect(
                                local_entry->use_pmap = FALSE;
                                local_entry = local_entry->vme_next;
                           }
-                          pmap_unnest(map->pmap, pmap_base_addr,
-                                       (pmap_end_addr - pmap_base_addr) + 1);
+                          pmap_unnest(map->pmap, (addr64_t)pmap_base_addr);
 #endif
                   }
                   if (!(current->protection & VM_PROT_WRITE)) {
@@ -2248,7 +2271,7 @@ vm_map_wire_nested(
                                rc = vm_map_wire_nested(entry->object.sub_map, 
                                                sub_start, sub_end,
                                                access_type, 
-                                               user_wire, pmap, pmap_addr);
+                                               user_wire, map_pmap, pmap_addr);
                                vm_map_lock(map);
                        }
                        s = entry->vme_start;
@@ -2683,8 +2706,9 @@ vm_map_unwire_nested(
                           continue;
                        } else {
                           vm_map_unlock(map);
-                          vm_map_unwire_nested(entry->object.sub_map, 
-                               sub_start, sub_end, user_wire, pmap, pmap_addr);
+                          vm_map_unwire_nested(entry->object.sub_map,
+                               sub_start, sub_end, user_wire, map_pmap,
+                               pmap_addr);
                           vm_map_lock(map);
 
                           if (last_timestamp+1 != map->timestamp) {
@@ -2909,7 +2933,8 @@ vm_map_submap_pmap_clean(
                                        VM_PROT_NONE);
                        } else {
                                pmap_remove(map->pmap, 
-                                       start, start + remove_size);
+                                       (addr64_t)start, 
+                                       (addr64_t)(start + remove_size));
                        }
                }
        }
@@ -2941,9 +2966,10 @@ vm_map_submap_pmap_clean(
                                        VM_PROT_NONE);
                        } else {
                                pmap_remove(map->pmap, 
-                                  (start + entry->vme_start) - offset,
-                                  ((start + entry->vme_start) 
-                                       - offset) + remove_size);
+                                  (addr64_t)((start + entry->vme_start) 
+                                                               - offset),
+                                  (addr64_t)(((start + entry->vme_start) 
+                                       - offset) + remove_size));
                        }
                }
                entry = entry->vme_next;
@@ -3200,8 +3226,7 @@ vm_map_delete(
                        if(entry->is_sub_map) {
                           if(entry->use_pmap) {
 #ifndef i386
-                               pmap_unnest(map->pmap, entry->vme_start,
-                                       entry->vme_end - entry->vme_start);
+                               pmap_unnest(map->pmap, (addr64_t)entry->vme_start);
 #endif
                                if((map->mapped) && (map->ref_count)) {
                                        /* clean up parent map/maps */
@@ -3218,19 +3243,46 @@ vm_map_delete(
                                        entry->offset);
                           }
                        } else {
-                               if((map->mapped) && (map->ref_count)) {
-                                       vm_object_pmap_protect(
-                                               entry->object.vm_object,
-                                               entry->offset,
-                                               entry->vme_end - entry->vme_start,
-                                               PMAP_NULL,
-                                               entry->vme_start,
-                                               VM_PROT_NONE);
-                               } else {
+                          object = entry->object.vm_object;
+                          if((map->mapped) && (map->ref_count)) {
+                             vm_object_pmap_protect(
+                                       object, entry->offset,
+                                       entry->vme_end - entry->vme_start,
+                                       PMAP_NULL,
+                                       entry->vme_start,
+                                       VM_PROT_NONE);
+                          } else if(object != NULL) {
+                             if ((object->shadow != NULL) || 
+                               (object->phys_contiguous) ||
+                               (object->resident_page_count > 
+                               atop((entry->vme_end - entry->vme_start)/4))) {
                                        pmap_remove(map->pmap, 
-                                               entry->vme_start, 
-                                               entry->vme_end);
+                                               (addr64_t)(entry->vme_start), 
+                                               (addr64_t)(entry->vme_end));
+                             } else {
+                               vm_page_t p;
+                               vm_object_offset_t start_off;
+                               vm_object_offset_t end_off;
+                               start_off = entry->offset;
+                               end_off = start_off + 
+                                          (entry->vme_end - entry->vme_start);
+                               vm_object_lock(object);
+                               queue_iterate(&object->memq,
+                                                p, vm_page_t, listq) {
+                                  if ((!p->fictitious) && 
+                                       (p->offset >= start_off) &&
+                                       (p->offset < end_off)) {
+                                       vm_offset_t start;
+                                       start = entry->vme_start;
+                                       start += p->offset - start_off;
+                                       pmap_remove(
+                                               map->pmap, start, 
+                                               start + PAGE_SIZE);
+                                  }
                                }
+                               vm_object_unlock(object);
+                            }
+                         }
                        }
                }
 
@@ -3443,7 +3495,7 @@ vm_map_overwrite_submap_recurse(
         *      splitting entries in strange ways.
         */
 
-       dst_end = round_page(dst_addr + dst_size);
+       dst_end = round_page_32(dst_addr + dst_size);
        vm_map_lock(dst_map);
 
 start_pass_1:
@@ -3452,7 +3504,7 @@ start_pass_1:
                return(KERN_INVALID_ADDRESS);
        }
 
-       vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+       vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
 
        for (entry = tmp_entry;;) {
                vm_map_entry_t  next;
@@ -3673,7 +3725,7 @@ vm_map_copy_overwrite_nested(
                !page_aligned (dst_addr))
        {
                aligned = FALSE;
-               dst_end = round_page(dst_addr + copy->size);
+               dst_end = round_page_32(dst_addr + copy->size);
        } else {
                dst_end = dst_addr + copy->size;
        }
@@ -3685,7 +3737,7 @@ start_pass_1:
                vm_map_unlock(dst_map);
                return(KERN_INVALID_ADDRESS);
        }
-       vm_map_clip_start(dst_map, tmp_entry, trunc_page(dst_addr));
+       vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(dst_addr));
        for (entry = tmp_entry;;) {
                vm_map_entry_t  next = entry->vme_next;
 
@@ -4170,7 +4222,7 @@ start_overwrite:
                                break;
                        }
                }
-               vm_map_clip_start(dst_map, tmp_entry, trunc_page(base_addr));
+               vm_map_clip_start(dst_map, tmp_entry, trunc_page_32(base_addr));
 
                entry = tmp_entry;
        } /* while */
@@ -4571,9 +4623,9 @@ vm_map_copy_overwrite_aligned(
                                                        entry->vme_start,
                                                        VM_PROT_NONE);
                                        } else {
-                                               pmap_remove(dst_map->pmap, 
-                                                       entry->vme_start
-                                                       entry->vme_end);
+                                          pmap_remove(dst_map->pmap, 
+                                            (addr64_t)(entry->vme_start)
+                                            (addr64_t)(entry->vme_end));
                                        }
                                        vm_object_deallocate(old_object);
                                }
@@ -4640,7 +4692,10 @@ vm_map_copy_overwrite_aligned(
                                        /* No isync here */
 
                                        PMAP_ENTER(pmap, va, m, prot, 
-                                               VM_WIMG_USE_DEFAULT, FALSE);
+                                               ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                       & VM_WIMG_MASK,
+                                               FALSE);
                
                                        vm_object_lock(object);
                                        vm_page_lock_queues();
@@ -4802,8 +4857,8 @@ vm_map_copyin_kernel_buffer(
                        VM_MAP_REMOVE_INTERRUPTIBLE;
        }
        if (src_destroy) {
-               (void) vm_map_remove(src_map, trunc_page(src_addr), 
-                                    round_page(src_addr + len),
+               (void) vm_map_remove(src_map, trunc_page_32(src_addr), 
+                                    round_page_32(src_addr + len),
                                     flags);
        }
        *copy_result = copy;
@@ -4839,7 +4894,7 @@ vm_map_copyout_kernel_buffer(
                *addr = 0;
                kr = vm_map_enter(map, 
                                  addr, 
-                                 round_page(copy->size),
+                                 round_page_32(copy->size),
                                  (vm_offset_t) 0, 
                                  TRUE,
                                  VM_OBJECT_NULL, 
@@ -4963,7 +5018,7 @@ vm_map_copyout(
                vm_object_offset_t      offset;
 
                offset = trunc_page_64(copy->offset);
-               size = round_page(copy->size + 
+               size = round_page_32(copy->size + 
                                (vm_size_t)(copy->offset - offset));
                *dst_addr = 0;
                kr = vm_map_enter(dst_map, dst_addr, size,
@@ -4994,7 +5049,7 @@ vm_map_copyout(
         */
 
        vm_copy_start = trunc_page_64(copy->offset);
-       size =  round_page((vm_size_t)copy->offset + copy->size) 
+       size =  round_page_32((vm_size_t)copy->offset + copy->size) 
                                                        - vm_copy_start;
 
  StartAgain: ;
@@ -5145,7 +5200,10 @@ vm_map_copyout(
                        vm_object_unlock(object);
 
                        PMAP_ENTER(dst_map->pmap, va, m, entry->protection, 
-                                               VM_WIMG_USE_DEFAULT, TRUE);
+                                               ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                       & VM_WIMG_MASK,
+                                               TRUE);
 
                        vm_object_lock(object);
                        PAGE_WAKEUP_DONE(m);
@@ -5195,7 +5253,9 @@ vm_map_copyout(
 
                                                PMAP_ENTER(dst_map->pmap, va, 
                                                        m, prot, 
-                                                       VM_WIMG_USE_DEFAULT, 
+                                                       ((unsigned int)
+                                                       (m->object->wimg_bits)) 
+                                                          & VM_WIMG_MASK,
                                                        FALSE);
 
                                                vm_object_lock(object);
@@ -5333,8 +5393,8 @@ vm_map_copyin_common(
         *      Compute start and end of region
         */
 
-       src_start = trunc_page(src_addr);
-       src_end = round_page(src_addr + len);
+       src_start = trunc_page_32(src_addr);
+       src_end = round_page_32(src_addr + len);
 
        XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0);
 
@@ -5463,13 +5523,14 @@ vm_map_copyin_common(
                        src_entry = tmp_entry;
                }
                if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) && 
-                   ((tmp_entry->object.vm_object->wimg_bits != VM_WIMG_DEFAULT) ||
-                   (tmp_entry->object.vm_object->phys_contiguous))) {
-                       /* This is not, cannot be supported for now */
-                       /* we need a description of the caching mode */
-                       /* reflected in the object before we can     */
-                       /* support copyin, and then the support will */
-                       /* be for direct copy */
+                   (tmp_entry->object.vm_object->phys_contiguous)) {
+                       /* This is not, supported for now.In future */
+                       /* we will need to detect the phys_contig   */
+                       /* condition and then upgrade copy_slowly   */
+                       /* to do physical copy from the device mem  */
+                       /* based object. We can piggy-back off of   */
+                       /* the was wired boolean to set-up the      */
+                       /* proper handling */
                        RETURN(KERN_PROTECTION_FAILURE);
                }
                /*
@@ -5548,8 +5609,9 @@ RestartCopy:
                XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n",
                    src_object, new_entry, new_entry->object.vm_object,
                    was_wired, 0);
-               if (!was_wired &&
-                   vm_object_copy_quickly(
+               if ((src_object == VM_OBJECT_NULL ||
+                       (!was_wired && !map_share && !tmp_entry->is_shared)) &&
+                       vm_object_copy_quickly(
                                &new_entry->object.vm_object,
                                src_offset,
                                src_size,
@@ -5563,49 +5625,17 @@ RestartCopy:
                         */
 
                        if (src_needs_copy && !tmp_entry->needs_copy) {
-                               if (tmp_entry->is_shared  || 
-                                    tmp_entry->object.vm_object->true_share ||
-                                    map_share) {
-                                       vm_map_unlock(src_map);
-                                       new_entry->object.vm_object = 
-                                               vm_object_copy_delayed(
-                                                       src_object,
-                                                       src_offset,     
-                                                       src_size);
-                                       /* dec ref gained in copy_quickly */
-                                       vm_object_lock(src_object);
-                                       src_object->ref_count--;
-                                       assert(src_object->ref_count > 0);
-                                       vm_object_res_deallocate(src_object);
-                                       vm_object_unlock(src_object);
-                                       vm_map_lock(src_map);
-                                       /* 
-                                        * it turns out that we have
-                                        * finished our copy. No matter
-                                        * what the state of the map
-                                        * we will lock it again here
-                                        * knowing that if there is
-                                        * additional data to copy
-                                        * it will be checked at
-                                        * the top of the loop
-                                        *
-                                        * Don't do timestamp check
-                                        */
-                                       
-                               } else {
-                                       vm_object_pmap_protect(
-                                               src_object,
-                                               src_offset,
-                                               src_size,
-                                               (src_entry->is_shared ? 
-                                                       PMAP_NULL
-                                                       : src_map->pmap),
-                                               src_entry->vme_start,
-                                               src_entry->protection &
-                                                       ~VM_PROT_WRITE);
-
-                                       tmp_entry->needs_copy = TRUE;
-                               }
+                               vm_object_pmap_protect(
+                                       src_object,
+                                       src_offset,
+                                       src_size,
+                                       (src_entry->is_shared ? 
+                                               PMAP_NULL
+                                               : src_map->pmap),
+                                       src_entry->vme_start,
+                                       src_entry->protection &
+                                               ~VM_PROT_WRITE);
+                               tmp_entry->needs_copy = TRUE;
                        }
 
                        /*
@@ -5617,8 +5647,6 @@ RestartCopy:
                        goto CopySuccessful;
                }
 
-               new_entry->needs_copy = FALSE;
-
                /*
                 *      Take an object reference, so that we may
                 *      release the map lock(s).
@@ -5640,6 +5668,7 @@ RestartCopy:
                 */
 
                if (was_wired) {
+               CopySlowly:
                        vm_object_lock(src_object);
                        result = vm_object_copy_slowly(
                                        src_object,
@@ -5649,6 +5678,24 @@ RestartCopy:
                                        &new_entry->object.vm_object);
                        new_entry->offset = 0;
                        new_entry->needs_copy = FALSE;
+
+               }
+               else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
+                       (tmp_entry->is_shared  || map_share)) {
+                       vm_object_t new_object;
+
+                       vm_object_lock(src_object);
+                       new_object = vm_object_copy_delayed(
+                                       src_object,
+                                       src_offset,     
+                                       src_size);
+                       if (new_object == VM_OBJECT_NULL)
+                               goto CopySlowly;
+
+                       new_entry->object.vm_object = new_object;
+                       new_entry->needs_copy = TRUE;
+                       result = KERN_SUCCESS;
+
                } else {
                        result = vm_object_copy_strategically(src_object,
                                src_offset,
@@ -5658,7 +5705,6 @@ RestartCopy:
                                &new_entry_needs_copy);
 
                        new_entry->needs_copy = new_entry_needs_copy;
-                       
                }
 
                if (result != KERN_SUCCESS &&
@@ -5797,7 +5843,7 @@ RestartCopy:
         */
        if (src_destroy) {
                (void) vm_map_delete(src_map,
-                                    trunc_page(src_addr),
+                                    trunc_page_32(src_addr),
                                     src_end,
                                     (src_map == kernel_map) ?
                                        VM_MAP_REMOVE_KUNWIRE :
@@ -5884,8 +5930,9 @@ vm_map_fork_share(
                if(old_entry->use_pmap) {
                        result = pmap_nest(new_map->pmap, 
                                (old_entry->object.sub_map)->pmap, 
-                               old_entry->vme_start,
-                               old_entry->vme_end - old_entry->vme_start);
+                               (addr64_t)old_entry->vme_start,
+                               (addr64_t)old_entry->vme_start,
+                               (uint64_t)(old_entry->vme_end - old_entry->vme_start));
                        if(result)
                                panic("vm_map_fork_share: pmap_nest failed!");
                }
@@ -6110,8 +6157,7 @@ vm_map_fork_copy(
                 */
                vm_map_lock(old_map);
                if (!vm_map_lookup_entry(old_map, start, &last) ||
-                   last->max_protection & VM_PROT_READ ==
-                                        VM_PROT_NONE) {
+                   (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) {
                        last = last->vme_next;
                }
                *old_entry_p = last;
@@ -7146,11 +7192,13 @@ vm_region_recurse_64(
        recurse_count = *nesting_depth;
 
 LOOKUP_NEXT_BASE_ENTRY:
-       vm_map_lock_read(map);
+       if (not_in_kdp)
+         vm_map_lock_read(map);
         if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
                if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
-                       vm_map_unlock_read(map);
-                       return(KERN_INVALID_ADDRESS);
+                 if (not_in_kdp)
+                   vm_map_unlock_read(map);
+                 return(KERN_INVALID_ADDRESS);
                }
        } else {
                entry = tmp_entry;
@@ -7164,7 +7212,8 @@ LOOKUP_NEXT_BASE_ENTRY:
 
        while(entry->is_sub_map && recurse_count) {
                recurse_count--;
-               vm_map_lock_read(entry->object.sub_map);
+               if (not_in_kdp)
+                 vm_map_lock_read(entry->object.sub_map);
 
 
                if(entry == base_entry) {
@@ -7173,13 +7222,15 @@ LOOKUP_NEXT_BASE_ENTRY:
                }
 
                submap = entry->object.sub_map;
-               vm_map_unlock_read(map);
+               if (not_in_kdp)
+                 vm_map_unlock_read(map);
                map = submap;
 
                if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
                        if ((entry = tmp_entry->vme_next) 
                                                == vm_map_to_entry(map)) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7199,7 +7250,8 @@ LOOKUP_NEXT_BASE_ENTRY:
                        }
                        if(base_next <= 
                                (base_addr += (entry->vme_start - start))) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7225,7 +7277,8 @@ LOOKUP_NEXT_BASE_ENTRY:
                        }
                        base_addr += entry->vme_start;
                        if(base_addr >= base_next) {
-                               vm_map_unlock_read(map);
+                               if (not_in_kdp)
+                                 vm_map_unlock_read(map);
                                map = base_map;
                                start = base_next;
                                recurse_count = 0;
@@ -7265,7 +7318,8 @@ LOOKUP_NEXT_BASE_ENTRY:
        extended.pages_dirtied = 0;
        extended.external_pager = 0;
        extended.shadow_depth = 0;
-
+       
+       if (not_in_kdp)
        if(!entry->is_sub_map) {
                vm_region_walk(entry, &extended, entry->offset, 
                                entry->vme_end - start, map, start);
@@ -7289,8 +7343,8 @@ LOOKUP_NEXT_BASE_ENTRY:
        submap_info->pages_dirtied = extended.pages_dirtied;
        submap_info->external_pager = extended.external_pager;
        submap_info->shadow_depth = extended.shadow_depth;
-
-       vm_map_unlock_read(map);
+       if (not_in_kdp)
+         vm_map_unlock_read(map);
        return(KERN_SUCCESS);
 }
 
@@ -7631,12 +7685,13 @@ vm_region_look_for_page(
                        if (shadow && (max_refcnt == 1))
                                extended->pages_shared_now_private++;
 
-                       if (p->dirty || pmap_is_modified(p->phys_addr))
+                       if (!p->fictitious && 
+                               (p->dirty || pmap_is_modified(p->phys_page)))
                                extended->pages_dirtied++;
                        extended->pages_resident++;
 
                        if(object != caller_object)
-                               vm_object_unlock(object);
+                            vm_object_unlock(object);
 
                        return;
                }
@@ -7646,13 +7701,13 @@ vm_region_look_for_page(
                                extended->pages_swapped_out++;
 
                                if(object != caller_object)
-                                       vm_object_unlock(object);
+                                    vm_object_unlock(object);
 
                                return;
                        }
                }
                if (shadow) {
-                       vm_object_lock(shadow);
+                   vm_object_lock(shadow);
 
                        if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress)
                                ref_count--;
@@ -7664,7 +7719,7 @@ vm_region_look_for_page(
                                max_refcnt = ref_count;
                        
                        if(object != caller_object)
-                               vm_object_unlock(object);
+                            vm_object_unlock(object);
 
                        object = shadow;
                        shadow = object->shadow;
@@ -7672,7 +7727,7 @@ vm_region_look_for_page(
                        continue;
                }
                if(object != caller_object)
-                       vm_object_unlock(object);
+                    vm_object_unlock(object);
                break;
        }
 }
@@ -7690,7 +7745,7 @@ vm_region_count_obj_refs(
            return(0);
 
         if (entry->is_sub_map)
-           ref_count = vm_region_count_obj_refs((vm_map_entry_t)entry->object.sub_map, object);
+           return(0);
        else {
            ref_count = 0;
 
@@ -7701,9 +7756,9 @@ vm_region_count_obj_refs(
                if (chk_obj == object)
                    ref_count++;
                if (tmp_obj = chk_obj->shadow)
-                   vm_object_lock(tmp_obj);
+                  vm_object_lock(tmp_obj);
                vm_object_unlock(chk_obj);
-               
+
                chk_obj = tmp_obj;
            }
        }
@@ -7853,9 +7908,9 @@ vm_map_machine_attribute(
        }
 
        /* Get the starting address */
-       start = trunc_page(address);
+       start = trunc_page_32(address);
        /* Figure how much memory we need to flush (in page increments) */
-       sync_size = round_page(start + size) - start;   
+       sync_size = round_page_32(start + size) - start;        
 
 
        ret = KERN_SUCCESS;                                                                             /* Assume it all worked */
@@ -7898,7 +7953,7 @@ vm_map_machine_attribute(
        
                                                  ret = 
                                                     pmap_attribute_cache_sync(
-                                                       m->phys_addr, 
+                                                       m->phys_page,   
                                                        PAGE_SIZE, 
                                                        attribute, value);
                                                } else if (object->shadow) {
@@ -8028,7 +8083,7 @@ void
 vm_map_links_print(
        struct vm_map_links     *links)
 {
-       iprintf("prev=0x%x, next=0x%x, start=0x%x, end=0x%x\n",
+       iprintf("prev = %08X  next = %08X  start = %08X  end = %08X\n",
                links->prev,
                links->next,
                links->start,
@@ -8043,7 +8098,7 @@ vm_map_header_print(
        struct vm_map_header    *header)
 {
        vm_map_links_print(&header->links);
-       iprintf("nentries=0x%x, %sentries_pageable\n",
+       iprintf("nentries = %08X, %sentries_pageable\n",
                header->nentries,
                (header->entries_pageable ? "" : "!"));
 }
@@ -8058,7 +8113,7 @@ vm_follow_entry(
        extern int db_indent;
        int shadows;
 
-       iprintf("map entry 0x%x:\n", entry);
+       iprintf("map entry %08X\n", entry);
 
        db_indent += 2;
 
@@ -8079,20 +8134,20 @@ vm_map_entry_print(
        static char *inheritance_name[4] = { "share", "copy", "none", "?"};
        static char *behavior_name[4] = { "dflt", "rand", "seqtl", "rseqntl" };
        
-       iprintf("map entry 0x%x:\n", entry);
+       iprintf("map entry %08X n", entry);
 
        db_indent += 2;
 
        vm_map_links_print(&entry->links);
 
-       iprintf("start=0x%x, end=0x%x, prot=%x/%x/%s\n",
+       iprintf("start = %08X  end = %08X, prot=%x/%x/%s\n",
                entry->vme_start,
                entry->vme_end,
                entry->protection,
                entry->max_protection,
                inheritance_name[(entry->inheritance & 0x3)]);
 
-       iprintf("behavior=%s, wired_count=%d, user_wired_count=%d\n",
+       iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n",
                behavior_name[(entry->behavior & 0x3)],
                entry->wired_count,
                entry->user_wired_count);
@@ -8101,11 +8156,11 @@ vm_map_entry_print(
                (entry->needs_wakeup ? "" : "!"));
 
        if (entry->is_sub_map) {
-               iprintf("submap=0x%x, offset=0x%x\n",
+               iprintf("submap = %08X - offset=%08X\n",
                       entry->object.sub_map,
                       entry->offset);
        } else {
-               iprintf("object=0x%x, offset=0x%x, ",
+               iprintf("object=%08X, offset=%08X, ",
                        entry->object.vm_object,
                        entry->offset);
                printf("%sis_shared, %sneeds_copy\n",
@@ -8126,7 +8181,7 @@ vm_follow_map(
        register vm_map_entry_t entry;
        extern int db_indent;
 
-       iprintf("task map 0x%x:\n", map);
+       iprintf("task map %08X\n", map);
 
        db_indent += 2;
 
@@ -8144,26 +8199,29 @@ vm_follow_map(
  */
 void
 vm_map_print(
-       register vm_map_t       map)
+       db_addr_t inmap)
 {
        register vm_map_entry_t entry;
+       vm_map_t map;
        extern int db_indent;
        char *swstate;
 
-       iprintf("task map 0x%x:\n", map);
+       map = (vm_map_t)inmap;                          /* Make sure we have the right type */
+
+       iprintf("task map %08X\n", map);
 
        db_indent += 2;
 
        vm_map_header_print(&map->hdr);
 
-       iprintf("pmap=0x%x, size=%d, ref=%d, hint=0x%x, first_free=0x%x\n",
+       iprintf("pmap = %08X, size = %08X, ref = %d, hint = %08X, first_free = %08X\n",
                map->pmap,
                map->size,
                map->ref_count,
                map->hint,
                map->first_free);
 
-       iprintf("%swait_for_space, %swiring_required, timestamp=%d\n",
+       iprintf("%swait_for_space, %swiring_required, timestamp = %d\n",
                (map->wait_for_space ? "" : "!"),
                (map->wiring_required ? "" : "!"),
                map->timestamp);
@@ -8180,7 +8238,7 @@ vm_map_print(
                swstate = "????";
                break;
        }
-       iprintf("res=%d, sw_state=%s\n", map->res_count, swstate);
+       iprintf("res = %d, sw_state = %s\n", map->res_count, swstate);
 #endif /* TASK_SWAPPER */
 
        for (entry = vm_map_first_entry(map);
@@ -8200,12 +8258,15 @@ vm_map_print(
 
 void
 vm_map_copy_print(
-       vm_map_copy_t   copy)
+       db_addr_t       incopy)
 {
        extern int db_indent;
+       vm_map_copy_t copy;
        int i, npages;
        vm_map_entry_t entry;
 
+       copy = (vm_map_copy_t)incopy;   /* Make sure we have the right type */
+
        printf("copy object 0x%x\n", copy);
 
        db_indent += 2;
@@ -8262,10 +8323,13 @@ vm_map_copy_print(
  */
 vm_size_t
 db_vm_map_total_size(
-       vm_map_t        map)
+       db_addr_t       inmap)
 {
        vm_map_entry_t  entry;
        vm_size_t       total;
+       vm_map_t map;
+
+       map = (vm_map_t)inmap;  /* Make sure we have the right type */
 
        total = 0;
        for (entry = vm_map_first_entry(map);
@@ -8375,7 +8439,7 @@ vm_remap_extract(
        boolean_t               new_entry_needs_copy;
 
        assert(map != VM_MAP_NULL);
-       assert(size != 0 && size == round_page(size));
+       assert(size != 0 && size == round_page_32(size));
        assert(inheritance == VM_INHERIT_NONE ||
               inheritance == VM_INHERIT_COPY ||
               inheritance == VM_INHERIT_SHARE);
@@ -8383,8 +8447,8 @@ vm_remap_extract(
        /*
         *      Compute start and end of region.
         */
-       src_start = trunc_page(addr);
-       src_end = round_page(src_start + size);
+       src_start = trunc_page_32(addr);
+       src_end = round_page_32(src_start + size);
 
        /*
         *      Initialize map_header.
@@ -8704,7 +8768,7 @@ vm_remap(
                return KERN_INVALID_ARGUMENT;
        }
 
-       size = round_page(size);
+       size = round_page_32(size);
 
        result = vm_remap_extract(src_map, memory_address,
                                  size, copy, &map_header,
@@ -8722,7 +8786,7 @@ vm_remap(
         * Allocate/check a range of free virtual address
         * space for the target
         */
-       *address = trunc_page(*address);
+       *address = trunc_page_32(*address);
        vm_map_lock(target_map);
        result = vm_remap_range_allocate(target_map, address, size,
                                         mask, anywhere, &insp_entry);