]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_debug.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
index d786d081a39dab4b6ebc2cba3c6550c8926821a2..e29eed60f307d52cd1b135ff27c5b1a031d142e4 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -59,7 +65,6 @@
 #include <mach_vm_debug.h>
 #include <mach/kern_return.h>
 #include <mach/mach_host_server.h>
-#include <mach/vm_map_server.h>
 #include <mach_debug/vm_info.h>
 #include <mach_debug/page_info.h>
 #include <mach_debug/hash_info.h>
 #define __DEBUG_ONLY
 #endif /* !MACH_VM_DEBUG */
 
+#ifdef VM32_SUPPORT
+
+#include <mach/vm32_map_server.h>
+#include <mach/vm_map.h>
+
 /*
  *     Routine:        mach_vm_region_info [kernel call]
  *     Purpose:
  */
 
 kern_return_t
-mach_vm_region_info(
+vm32_region_info(
        __DEBUG_ONLY vm_map_t                   map,
-       __DEBUG_ONLY vm_offset_t                address,
+       __DEBUG_ONLY vm32_offset_t              address,
        __DEBUG_ONLY vm_info_region_t           *regionp,
        __DEBUG_ONLY vm_info_object_array_t     *objectsp,
        __DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
@@ -112,7 +122,7 @@ mach_vm_region_info(
         return KERN_FAILURE;
 #else
        vm_map_copy_t copy;
-       vm_offset_t addr;       /* memory for OOL data */
+       vm_offset_t addr = 0;   /* memory for OOL data */
        vm_size_t size;         /* size of the memory */
        unsigned int room;      /* room for this many objects */
        unsigned int used;      /* actually this many objects */
@@ -150,7 +160,7 @@ mach_vm_region_info(
                        }
 
                        if (entry->is_sub_map)
-                               nmap = entry->object.sub_map;
+                               nmap = VME_SUBMAP(entry);
                        else
                                break;
 
@@ -162,11 +172,11 @@ mach_vm_region_info(
 
                /* cmap is read-locked; we have a real entry */
 
-               object = entry->object.vm_object;
-               region.vir_start = entry->vme_start;
-               region.vir_end = entry->vme_end;
-               region.vir_object = (vm_offset_t) object;
-               region.vir_offset = entry->offset;
+               object = VME_OBJECT(entry);
+               region.vir_start = (natural_t) entry->vme_start;
+               region.vir_end = (natural_t) entry->vme_end;
+               region.vir_object = (natural_t)(uintptr_t) object;
+               region.vir_offset = (natural_t) VME_OFFSET(entry);
                region.vir_needs_copy = entry->needs_copy;
                region.vir_protection = entry->protection;
                region.vir_max_protection = entry->max_protection;
@@ -175,7 +185,7 @@ mach_vm_region_info(
                region.vir_user_wired_count = entry->user_wired_count;
 
                used = 0;
-               room = size / sizeof(vm_info_object_t);
+               room = (unsigned int) (size / sizeof(vm_info_object_t));
 
                if (object == VM_OBJECT_NULL) {
                        vm_map_unlock_read(cmap);
@@ -194,29 +204,28 @@ mach_vm_region_info(
                                        &((vm_info_object_t *) addr)[used];
 
                                vio->vio_object =
-                                       (vm_offset_t) cobject;
+                                       (natural_t)(uintptr_t) cobject;
                                vio->vio_size =
-                                       cobject->size;
+                                       (natural_t) cobject->vo_size;
                                vio->vio_ref_count =
                                        cobject->ref_count;
                                vio->vio_resident_page_count =
                                        cobject->resident_page_count;
-                               vio->vio_absent_count =
-                                       cobject->absent_count;
                                vio->vio_copy =
-                                       (vm_offset_t) cobject->copy;
+                                       (natural_t)(uintptr_t) cobject->copy;
                                vio->vio_shadow =
-                                       (vm_offset_t) cobject->shadow;
+                                       (natural_t)(uintptr_t) cobject->shadow;
                                vio->vio_shadow_offset =
-                                       cobject->shadow_offset;
+                                       (natural_t) cobject->vo_shadow_offset;
                                vio->vio_paging_offset =
-                                       cobject->paging_offset;
+                                       (natural_t) cobject->paging_offset;
                                vio->vio_copy_strategy =
                                        cobject->copy_strategy;
                                vio->vio_last_alloc =
-                                       cobject->last_alloc;
+                                       (vm_offset_t) cobject->last_alloc;
                                vio->vio_paging_in_progress =
-                                       cobject->paging_in_progress;
+                                       cobject->paging_in_progress +
+                                       cobject->activity_in_progress;
                                vio->vio_pager_created =
                                        cobject->pager_created;
                                vio->vio_pager_initialized =
@@ -232,10 +241,10 @@ mach_vm_region_info(
                                vio->vio_alive =
                                        cobject->alive;
                                vio->vio_purgable =
-                                       (cobject->purgable != VM_OBJECT_NONPURGABLE);
+                                       (cobject->purgable != VM_PURGABLE_DENY);
                                vio->vio_purgable_volatile =
-                                       (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-                                        cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
+                                       (cobject->purgable == VM_PURGABLE_VOLATILE ||
+                                        cobject->purgable == VM_PURGABLE_EMPTY);
                        }
 
                        used++;
@@ -258,15 +267,21 @@ mach_vm_region_info(
 
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
-               size = round_page_32(2 * used * sizeof(vm_info_object_t));
+               size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
+                                        VM_MAP_PAGE_MASK(ipc_kernel_map));
 
-               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
+               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
-               kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
-                                vm_map_round_page(addr + size),
-                                VM_PROT_READ|VM_PROT_WRITE, FALSE);
+               kr = vm_map_wire(
+                       ipc_kernel_map,
+                       vm_map_trunc_page(addr,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       vm_map_round_page(addr + size,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       VM_PROT_READ|VM_PROT_WRITE,
+                       FALSE);
                assert(kr == KERN_SUCCESS);
        }
 
@@ -278,20 +293,26 @@ mach_vm_region_info(
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
        } else {
-               vm_size_t size_used =
-                       round_page_32(used * sizeof(vm_info_object_t));
-
-               kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
-                                  vm_map_round_page(addr + size_used), FALSE);
+               vm_size_t size_used = (used * sizeof(vm_info_object_t));
+               vm_size_t vmsize_used = vm_map_round_page(size_used,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map));
+
+               kr = vm_map_unwire(
+                       ipc_kernel_map,
+                       vm_map_trunc_page(addr,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       vm_map_round_page(addr + size_used,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       FALSE);
                assert(kr == KERN_SUCCESS);
 
                kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
                                   (vm_map_size_t)size_used, TRUE, &copy);
                assert(kr == KERN_SUCCESS);
 
-               if (size != size_used)
+               if (size != vmsize_used)
                        kmem_free(ipc_kernel_map,
-                                 addr + size_used, size - size_used);
+                                 addr + vmsize_used, size - vmsize_used);
        }
 
        *regionp = region;
@@ -306,9 +327,9 @@ mach_vm_region_info(
  */
 
 kern_return_t
-mach_vm_region_info_64(
+vm32_region_info_64(
        __DEBUG_ONLY vm_map_t                   map,
-       __DEBUG_ONLY vm_offset_t                address,
+       __DEBUG_ONLY vm32_offset_t              address,
        __DEBUG_ONLY vm_info_region_64_t        *regionp,
        __DEBUG_ONLY vm_info_object_array_t     *objectsp,
        __DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
@@ -317,7 +338,7 @@ mach_vm_region_info_64(
         return KERN_FAILURE;
 #else
        vm_map_copy_t copy;
-       vm_offset_t addr;       /* memory for OOL data */
+       vm_offset_t addr = 0;   /* memory for OOL data */
        vm_size_t size;         /* size of the memory */
        unsigned int room;      /* room for this many objects */
        unsigned int used;      /* actually this many objects */
@@ -353,7 +374,7 @@ mach_vm_region_info_64(
                        }
 
                        if (entry->is_sub_map)
-                               nmap = entry->object.sub_map;
+                               nmap = VME_SUBMAP(entry);
                        else
                                break;
 
@@ -365,11 +386,11 @@ mach_vm_region_info_64(
 
                /* cmap is read-locked; we have a real entry */
 
-               object = entry->object.vm_object;
-               region.vir_start = entry->vme_start;
-               region.vir_end = entry->vme_end;
-               region.vir_object = (vm_offset_t) object;
-               region.vir_offset = entry->offset;
+               object = VME_OBJECT(entry);
+               region.vir_start = (natural_t) entry->vme_start;
+               region.vir_end = (natural_t) entry->vme_end;
+               region.vir_object = (natural_t)(uintptr_t) object;
+               region.vir_offset = VME_OFFSET(entry);
                region.vir_needs_copy = entry->needs_copy;
                region.vir_protection = entry->protection;
                region.vir_max_protection = entry->max_protection;
@@ -378,7 +399,7 @@ mach_vm_region_info_64(
                region.vir_user_wired_count = entry->user_wired_count;
 
                used = 0;
-               room = size / sizeof(vm_info_object_t);
+               room = (unsigned int) (size / sizeof(vm_info_object_t));
 
                if (object == VM_OBJECT_NULL) {
                        vm_map_unlock_read(cmap);
@@ -397,29 +418,28 @@ mach_vm_region_info_64(
                                        &((vm_info_object_t *) addr)[used];
 
                                vio->vio_object =
-                                       (vm_offset_t) cobject;
+                                       (natural_t)(uintptr_t) cobject;
                                vio->vio_size =
-                                       cobject->size;
+                                       (natural_t) cobject->vo_size;
                                vio->vio_ref_count =
                                        cobject->ref_count;
                                vio->vio_resident_page_count =
                                        cobject->resident_page_count;
-                               vio->vio_absent_count =
-                                       cobject->absent_count;
                                vio->vio_copy =
-                                       (vm_offset_t) cobject->copy;
+                                       (natural_t)(uintptr_t) cobject->copy;
                                vio->vio_shadow =
-                                       (vm_offset_t) cobject->shadow;
+                                       (natural_t)(uintptr_t) cobject->shadow;
                                vio->vio_shadow_offset =
-                                       cobject->shadow_offset;
+                                       (natural_t) cobject->vo_shadow_offset;
                                vio->vio_paging_offset =
-                                       cobject->paging_offset;
+                                       (natural_t) cobject->paging_offset;
                                vio->vio_copy_strategy =
                                        cobject->copy_strategy;
                                vio->vio_last_alloc =
-                                       cobject->last_alloc;
+                                       (vm_offset_t) cobject->last_alloc;
                                vio->vio_paging_in_progress =
-                                       cobject->paging_in_progress;
+                                       cobject->paging_in_progress +
+                                       cobject->activity_in_progress;
                                vio->vio_pager_created =
                                        cobject->pager_created;
                                vio->vio_pager_initialized =
@@ -435,10 +455,10 @@ mach_vm_region_info_64(
                                vio->vio_alive =
                                        cobject->alive;
                                vio->vio_purgable =
-                                       (cobject->purgable != VM_OBJECT_NONPURGABLE);
+                                       (cobject->purgable != VM_PURGABLE_DENY);
                                vio->vio_purgable_volatile =
-                                       (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
-                                        cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
+                                       (cobject->purgable == VM_PURGABLE_VOLATILE ||
+                                        cobject->purgable == VM_PURGABLE_EMPTY);
                        }
 
                        used++;
@@ -461,15 +481,21 @@ mach_vm_region_info_64(
 
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
-               size = round_page_32(2 * used * sizeof(vm_info_object_t));
+               size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
+                                        VM_MAP_PAGE_MASK(ipc_kernel_map));
 
-               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
+               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
-               kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
-                                vm_map_round_page(addr + size),
-                                VM_PROT_READ|VM_PROT_WRITE, FALSE);
+               kr = vm_map_wire(
+                       ipc_kernel_map,
+                       vm_map_trunc_page(addr,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       vm_map_round_page(addr + size,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       VM_PROT_READ|VM_PROT_WRITE,
+                       FALSE);
                assert(kr == KERN_SUCCESS);
        }
 
@@ -481,20 +507,26 @@ mach_vm_region_info_64(
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
        } else {
-               vm_size_t size_used =
-                       round_page_32(used * sizeof(vm_info_object_t));
-
-               kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
-                                  vm_map_round_page(addr + size_used), FALSE);
+               vm_size_t size_used = (used * sizeof(vm_info_object_t));
+               vm_size_t vmsize_used = vm_map_round_page(size_used,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map));
+
+               kr = vm_map_unwire(
+                       ipc_kernel_map,
+                       vm_map_trunc_page(addr,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       vm_map_round_page(addr + size_used,
+                                         VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                       FALSE);
                assert(kr == KERN_SUCCESS);
 
                kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
                                   (vm_map_size_t)size_used, TRUE, &copy);
                assert(kr == KERN_SUCCESS);
 
-               if (size != size_used)
+               if (size != vmsize_used)
                        kmem_free(ipc_kernel_map,
-                                 addr + size_used, size - size_used);
+                                 addr + vmsize_used, size - vmsize_used);
        }
 
        *regionp = region;
@@ -507,7 +539,7 @@ mach_vm_region_info_64(
  * Return an array of virtual pages that are mapped to a task.
  */
 kern_return_t
-vm_mapped_pages_info(
+vm32_mapped_pages_info(
        __DEBUG_ONLY vm_map_t                   map,
        __DEBUG_ONLY page_address_array_t       *pages,
        __DEBUG_ONLY mach_msg_type_number_t     *pages_count)
@@ -519,22 +551,28 @@ vm_mapped_pages_info(
        vm_size_t       size, size_used;
        unsigned int    actual, space;
        page_address_array_t list;
-       vm_offset_t     addr;
+       vm_offset_t     addr = 0;
 
        if (map == VM_MAP_NULL)
            return (KERN_INVALID_ARGUMENT);
 
        pmap = map->pmap;
        size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
-       size = round_page_32(size);
+       size = vm_map_round_page(size,
+                                VM_MAP_PAGE_MASK(ipc_kernel_map));
 
        for (;;) {
-           (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
-           (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
-                                vm_map_round_page(addr + size), FALSE);
+           (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+           (void) vm_map_unwire(
+                   ipc_kernel_map,
+                   vm_map_trunc_page(addr,
+                                     VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                   vm_map_round_page(addr + size,
+                                     VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                   FALSE);
 
            list = (page_address_array_t) addr;
-           space = size / sizeof(vm_offset_t);
+           space = (unsigned int) (size / sizeof(vm_offset_t));
 
            actual = pmap_list_resident_pages(pmap,
                                        list,
@@ -550,7 +588,8 @@ vm_mapped_pages_info(
            /*
             * Try again, doubling the size
             */
-           size = round_page_32(actual * sizeof(vm_offset_t));
+           size = vm_map_round_page(actual * sizeof(vm_offset_t),
+                                    VM_MAP_PAGE_MASK(ipc_kernel_map));
        }
        if (actual == 0) {
            *pages = 0;
@@ -558,20 +597,28 @@ vm_mapped_pages_info(
            (void) kmem_free(ipc_kernel_map, addr, size);
        }
        else {
+           vm_size_t vmsize_used;
            *pages_count = actual;
-           size_used = round_page_32(actual * sizeof(vm_offset_t));
-           (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
-                               vm_map_round_page(addr + size), 
-                               VM_PROT_READ|VM_PROT_WRITE, FALSE);
+           size_used = (actual * sizeof(vm_offset_t));
+           vmsize_used = vm_map_round_page(size_used,
+                                           VM_MAP_PAGE_MASK(ipc_kernel_map));
+           (void) vm_map_wire(
+                   ipc_kernel_map,
+                   vm_map_trunc_page(addr,
+                                     VM_MAP_PAGE_MASK(ipc_kernel_map)),
+                   vm_map_round_page(addr + size,
+                                     VM_MAP_PAGE_MASK(ipc_kernel_map)), 
+                   VM_PROT_READ|VM_PROT_WRITE,
+                   FALSE);
            (void) vm_map_copyin(ipc_kernel_map,
                                (vm_map_address_t)addr,
                                (vm_map_size_t)size_used,
                                TRUE,
                                (vm_map_copy_t *)pages);
-           if (size_used != size) {
+           if (vmsize_used != size) {
                (void) kmem_free(ipc_kernel_map,
-                               addr + size_used,
-                               size - size_used);
+                               addr + vmsize_used,
+                               size - vmsize_used);
            }
        }
 
@@ -579,6 +626,8 @@ vm_mapped_pages_info(
 #endif /* MACH_VM_DEBUG */
 }
 
+#endif /* VM32_SUPPORT */
+
 /*
  *     Routine:        host_virtual_physical_table_info
  *     Purpose:
@@ -600,7 +649,7 @@ host_virtual_physical_table_info(
 #if !MACH_VM_DEBUG
         return KERN_FAILURE;
 #else
-       vm_offset_t addr;
+       vm_offset_t addr = 0;
        vm_size_t size = 0;
        hash_info_bucket_t *info;
        unsigned int potential, actual;
@@ -624,13 +673,15 @@ host_virtual_physical_table_info(
                if (info != *infop)
                        kmem_free(ipc_kernel_map, addr, size);
 
-               size = round_page_32(actual * sizeof *info);
-               kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+               size = vm_map_round_page(actual * sizeof *info,
+                                        VM_MAP_PAGE_MASK(ipc_kernel_map));
+               kr = vm_allocate(ipc_kernel_map, &addr, size,
+                                VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
                info = (hash_info_bucket_t *) addr;
-               potential = size/sizeof *info;
+               potential = (unsigned int) (size/sizeof (*info));
        }
 
        if (info == *infop) {
@@ -643,12 +694,13 @@ host_virtual_physical_table_info(
                *countp = 0;
        } else {
                vm_map_copy_t copy;
-               vm_size_t used;
+               vm_size_t used, vmused;
 
-               used = round_page_32(actual * sizeof *info);
+               used = (actual * sizeof(*info));
+               vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
 
-               if (used != size)
-                       kmem_free(ipc_kernel_map, addr + used, size - used);
+               if (vmused != size)
+                       kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
 
                kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
                                   (vm_map_size_t)used, TRUE, &copy);