]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/vm/vm_debug.c
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
index 87b81f23c6358792c5abfecade7942047bfd7c1f..1dfa947efcbc20394e6e364d1e239d599ac089f0 100644 (file)
@@ -1,16 +1,19 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * This file contains Original Code and/or Modifications of Original Code
  * as defined in and that are subject to the Apple Public Source License
  * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ * 
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
  * 
  * The Original Code and all software distributed under the License are
  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
@@ -20,7 +23,7 @@
  * Please see the License for the specific language governing rights and
  * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
@@ -62,7 +65,6 @@
 #include <mach_vm_debug.h>
 #include <mach/kern_return.h>
 #include <mach/mach_host_server.h>
-#include <mach/vm_map_server.h>
 #include <mach_debug/vm_info.h>
 #include <mach_debug/page_info.h>
 #include <mach_debug/hash_info.h>
 #include <vm/vm_debug.h>
 #endif
 
+#if !MACH_VM_DEBUG
+#define __DEBUG_ONLY __unused
+#else /* !MACH_VM_DEBUG */
+#define __DEBUG_ONLY
+#endif /* !MACH_VM_DEBUG */
+
+#if VM32_SUPPORT
+
+#include <mach/vm32_map_server.h>
+#include <mach/vm_map.h>
+
 /*
  *     Routine:        mach_vm_region_info [kernel call]
  *     Purpose:
  */
 
 kern_return_t
-mach_vm_region_info(
-       vm_map_t                map,
-       vm_offset_t             address,
-       vm_info_region_t        *regionp,
-       vm_info_object_array_t  *objectsp,
-       mach_msg_type_number_t  *objectsCntp)
+vm32_region_info(
+       __DEBUG_ONLY vm_map_t                   map,
+       __DEBUG_ONLY vm32_offset_t              address,
+       __DEBUG_ONLY vm_info_region_t           *regionp,
+       __DEBUG_ONLY vm_info_object_array_t     *objectsp,
+       __DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
 {
 #if !MACH_VM_DEBUG
         return KERN_FAILURE;
@@ -133,7 +146,9 @@ mach_vm_region_info(
                for (cmap = map;; cmap = nmap) {
                        /* cmap is read-locked */
 
-                       if (!vm_map_lookup_entry(cmap, address, &entry)) {
+                       if (!vm_map_lookup_entry(cmap, 
+                               (vm_map_address_t)address, &entry)) {
+
                                entry = entry->vme_next;
                                if (entry == vm_map_to_entry(cmap)) {
                                        vm_map_unlock_read(cmap);
@@ -158,10 +173,10 @@ mach_vm_region_info(
                /* cmap is read-locked; we have a real entry */
 
                object = entry->object.vm_object;
-               region.vir_start = entry->vme_start;
-               region.vir_end = entry->vme_end;
-               region.vir_object = (vm_offset_t) object;
-               region.vir_offset = entry->offset;
+               region.vir_start = (natural_t) entry->vme_start;
+               region.vir_end = (natural_t) entry->vme_end;
+               region.vir_object = (natural_t)(uintptr_t) object;
+               region.vir_offset = (natural_t) entry->offset;
                region.vir_needs_copy = entry->needs_copy;
                region.vir_protection = entry->protection;
                region.vir_max_protection = entry->max_protection;
@@ -170,7 +185,7 @@ mach_vm_region_info(
                region.vir_user_wired_count = entry->user_wired_count;
 
                used = 0;
-               room = size / sizeof(vm_info_object_t);
+               room = (unsigned int) (size / sizeof(vm_info_object_t));
 
                if (object == VM_OBJECT_NULL) {
                        vm_map_unlock_read(cmap);
@@ -189,29 +204,28 @@ mach_vm_region_info(
                                        &((vm_info_object_t *) addr)[used];
 
                                vio->vio_object =
-                                       (vm_offset_t) cobject;
+                                       (natural_t)(uintptr_t) cobject;
                                vio->vio_size =
-                                       cobject->size;
+                                       (natural_t) cobject->vo_size;
                                vio->vio_ref_count =
                                        cobject->ref_count;
                                vio->vio_resident_page_count =
                                        cobject->resident_page_count;
-                               vio->vio_absent_count =
-                                       cobject->absent_count;
                                vio->vio_copy =
-                                       (vm_offset_t) cobject->copy;
+                                       (natural_t)(uintptr_t) cobject->copy;
                                vio->vio_shadow =
-                                       (vm_offset_t) cobject->shadow;
+                                       (natural_t)(uintptr_t) cobject->shadow;
                                vio->vio_shadow_offset =
-                                       cobject->shadow_offset;
+                                       (natural_t) cobject->vo_shadow_offset;
                                vio->vio_paging_offset =
-                                       cobject->paging_offset;
+                                       (natural_t) cobject->paging_offset;
                                vio->vio_copy_strategy =
                                        cobject->copy_strategy;
                                vio->vio_last_alloc =
-                                       cobject->last_alloc;
+                                       (vm_offset_t) cobject->last_alloc;
                                vio->vio_paging_in_progress =
-                                       cobject->paging_in_progress;
+                                       cobject->paging_in_progress +
+                                       cobject->activity_in_progress;
                                vio->vio_pager_created =
                                        cobject->pager_created;
                                vio->vio_pager_initialized =
@@ -226,10 +240,11 @@ mach_vm_region_info(
                                        cobject->temporary;
                                vio->vio_alive =
                                        cobject->alive;
-                               vio->vio_lock_in_progress =
-                                       cobject->lock_in_progress;
-                               vio->vio_lock_restart =
-                                       cobject->lock_restart;
+                               vio->vio_purgable =
+                                       (cobject->purgable != VM_PURGABLE_DENY);
+                               vio->vio_purgable_volatile =
+                                       (cobject->purgable == VM_PURGABLE_VOLATILE ||
+                                        cobject->purgable == VM_PURGABLE_EMPTY);
                        }
 
                        used++;
@@ -252,14 +267,15 @@ mach_vm_region_info(
 
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
-               size = round_page_32(2 * used * sizeof(vm_info_object_t));
+               size = round_page(2 * used * sizeof(vm_info_object_t));
 
-               kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
+               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
-               kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
-                                    VM_PROT_READ|VM_PROT_WRITE, FALSE);
+               kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
+                                vm_map_round_page(addr + size),
+                                VM_PROT_READ|VM_PROT_WRITE, FALSE);
                assert(kr == KERN_SUCCESS);
        }
 
@@ -272,13 +288,14 @@ mach_vm_region_info(
                        kmem_free(ipc_kernel_map, addr, size);
        } else {
                vm_size_t size_used =
-                       round_page_32(used * sizeof(vm_info_object_t));
+                       round_page(used * sizeof(vm_info_object_t));
 
-               kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
+               kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
+                                  vm_map_round_page(addr + size_used), FALSE);
                assert(kr == KERN_SUCCESS);
 
-               kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
-                                  TRUE, &copy);
+               kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
+                                  (vm_map_size_t)size_used, TRUE, &copy);
                assert(kr == KERN_SUCCESS);
 
                if (size != size_used)
@@ -292,17 +309,18 @@ mach_vm_region_info(
        return KERN_SUCCESS;
 #endif /* MACH_VM_DEBUG */
 }
+
 /*
  *  Temporary call for 64 bit data path interface transiotion
  */
 
 kern_return_t
-mach_vm_region_info_64(
-       vm_map_t                map,
-       vm_offset_t             address,
-       vm_info_region_64_t     *regionp,
-       vm_info_object_array_t  *objectsp,
-       mach_msg_type_number_t  *objectsCntp)
+vm32_region_info_64(
+       __DEBUG_ONLY vm_map_t                   map,
+       __DEBUG_ONLY vm32_offset_t              address,
+       __DEBUG_ONLY vm_info_region_64_t        *regionp,
+       __DEBUG_ONLY vm_info_object_array_t     *objectsp,
+       __DEBUG_ONLY mach_msg_type_number_t     *objectsCntp)
 {
 #if !MACH_VM_DEBUG
         return KERN_FAILURE;
@@ -357,9 +375,9 @@ mach_vm_region_info_64(
                /* cmap is read-locked; we have a real entry */
 
                object = entry->object.vm_object;
-               region.vir_start = entry->vme_start;
-               region.vir_end = entry->vme_end;
-               region.vir_object = (vm_offset_t) object;
+               region.vir_start = (natural_t) entry->vme_start;
+               region.vir_end = (natural_t) entry->vme_end;
+               region.vir_object = (natural_t)(uintptr_t) object;
                region.vir_offset = entry->offset;
                region.vir_needs_copy = entry->needs_copy;
                region.vir_protection = entry->protection;
@@ -369,7 +387,7 @@ mach_vm_region_info_64(
                region.vir_user_wired_count = entry->user_wired_count;
 
                used = 0;
-               room = size / sizeof(vm_info_object_t);
+               room = (unsigned int) (size / sizeof(vm_info_object_t));
 
                if (object == VM_OBJECT_NULL) {
                        vm_map_unlock_read(cmap);
@@ -388,29 +406,28 @@ mach_vm_region_info_64(
                                        &((vm_info_object_t *) addr)[used];
 
                                vio->vio_object =
-                                       (vm_offset_t) cobject;
+                                       (natural_t)(uintptr_t) cobject;
                                vio->vio_size =
-                                       cobject->size;
+                                       (natural_t) cobject->vo_size;
                                vio->vio_ref_count =
                                        cobject->ref_count;
                                vio->vio_resident_page_count =
                                        cobject->resident_page_count;
-                               vio->vio_absent_count =
-                                       cobject->absent_count;
                                vio->vio_copy =
-                                       (vm_offset_t) cobject->copy;
+                                       (natural_t)(uintptr_t) cobject->copy;
                                vio->vio_shadow =
-                                       (vm_offset_t) cobject->shadow;
+                                       (natural_t)(uintptr_t) cobject->shadow;
                                vio->vio_shadow_offset =
-                                       cobject->shadow_offset;
+                                       (natural_t) cobject->vo_shadow_offset;
                                vio->vio_paging_offset =
-                                       cobject->paging_offset;
+                                       (natural_t) cobject->paging_offset;
                                vio->vio_copy_strategy =
                                        cobject->copy_strategy;
                                vio->vio_last_alloc =
-                                       cobject->last_alloc;
+                                       (vm_offset_t) cobject->last_alloc;
                                vio->vio_paging_in_progress =
-                                       cobject->paging_in_progress;
+                                       cobject->paging_in_progress +
+                                       cobject->activity_in_progress;
                                vio->vio_pager_created =
                                        cobject->pager_created;
                                vio->vio_pager_initialized =
@@ -425,10 +442,11 @@ mach_vm_region_info_64(
                                        cobject->temporary;
                                vio->vio_alive =
                                        cobject->alive;
-                               vio->vio_lock_in_progress =
-                                       cobject->lock_in_progress;
-                               vio->vio_lock_restart =
-                                       cobject->lock_restart;
+                               vio->vio_purgable =
+                                       (cobject->purgable != VM_PURGABLE_DENY);
+                               vio->vio_purgable_volatile =
+                                       (cobject->purgable == VM_PURGABLE_VOLATILE ||
+                                        cobject->purgable == VM_PURGABLE_EMPTY);
                        }
 
                        used++;
@@ -451,14 +469,15 @@ mach_vm_region_info_64(
 
                if (size != 0)
                        kmem_free(ipc_kernel_map, addr, size);
-               size = round_page_32(2 * used * sizeof(vm_info_object_t));
+               size = round_page(2 * used * sizeof(vm_info_object_t));
 
-               kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
+               kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
-               kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
-                                    VM_PROT_READ|VM_PROT_WRITE, FALSE);
+               kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
+                                vm_map_round_page(addr + size),
+                                VM_PROT_READ|VM_PROT_WRITE, FALSE);
                assert(kr == KERN_SUCCESS);
        }
 
@@ -471,13 +490,14 @@ mach_vm_region_info_64(
                        kmem_free(ipc_kernel_map, addr, size);
        } else {
                vm_size_t size_used =
-                       round_page_32(used * sizeof(vm_info_object_t));
+                       round_page(used * sizeof(vm_info_object_t));
 
-               kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
+               kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
+                                  vm_map_round_page(addr + size_used), FALSE);
                assert(kr == KERN_SUCCESS);
 
-               kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
-                                  TRUE, &copy);
+               kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
+                                  (vm_map_size_t)size_used, TRUE, &copy);
                assert(kr == KERN_SUCCESS);
 
                if (size != size_used)
@@ -495,10 +515,10 @@ mach_vm_region_info_64(
  * Return an array of virtual pages that are mapped to a task.
  */
 kern_return_t
-vm_mapped_pages_info(
-       vm_map_t                map,
-       page_address_array_t    *pages,
-       mach_msg_type_number_t  *pages_count)
+vm32_mapped_pages_info(
+       __DEBUG_ONLY vm_map_t                   map,
+       __DEBUG_ONLY page_address_array_t       *pages,
+       __DEBUG_ONLY mach_msg_type_number_t     *pages_count)
 {
 #if !MACH_VM_DEBUG
         return KERN_FAILURE;
@@ -514,14 +534,15 @@ vm_mapped_pages_info(
 
        pmap = map->pmap;
        size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
-       size = round_page_32(size);
+       size = round_page(size);
 
        for (;;) {
-           (void) vm_allocate(ipc_kernel_map, &addr, size, TRUE);
-           (void) vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE);
+           (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
+           (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
+                                vm_map_round_page(addr + size), FALSE);
 
            list = (page_address_array_t) addr;
-           space = size / sizeof(vm_offset_t);
+           space = (unsigned int) (size / sizeof(vm_offset_t));
 
            actual = pmap_list_resident_pages(pmap,
                                        list,
@@ -537,7 +558,7 @@ vm_mapped_pages_info(
            /*
             * Try again, doubling the size
             */
-           size = round_page_32(actual * sizeof(vm_offset_t));
+           size = round_page(actual * sizeof(vm_offset_t));
        }
        if (actual == 0) {
            *pages = 0;
@@ -546,14 +567,13 @@ vm_mapped_pages_info(
        }
        else {
            *pages_count = actual;
-           size_used = round_page_32(actual * sizeof(vm_offset_t));
-           (void) vm_map_wire(ipc_kernel_map,
-                               addr, addr + size
+           size_used = round_page(actual * sizeof(vm_offset_t));
+           (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
+                               vm_map_round_page(addr + size)
                                VM_PROT_READ|VM_PROT_WRITE, FALSE);
-           (void) vm_map_copyin(
-                               ipc_kernel_map,
-                               addr,
-                               size_used,
+           (void) vm_map_copyin(ipc_kernel_map,
+                               (vm_map_address_t)addr,
+                               (vm_map_size_t)size_used,
                                TRUE,
                                (vm_map_copy_t *)pages);
            if (size_used != size) {
@@ -567,6 +587,8 @@ vm_mapped_pages_info(
 #endif /* MACH_VM_DEBUG */
 }
 
+#endif /* VM32_SUPPORT */
+
 /*
  *     Routine:        host_virtual_physical_table_info
  *     Purpose:
@@ -581,15 +603,15 @@ vm_mapped_pages_info(
 
 kern_return_t
 host_virtual_physical_table_info(
-       host_t                          host,
-       hash_info_bucket_array_t        *infop,
-       mach_msg_type_number_t          *countp)
+       __DEBUG_ONLY host_t                     host,
+       __DEBUG_ONLY hash_info_bucket_array_t   *infop,
+       __DEBUG_ONLY mach_msg_type_number_t     *countp)
 {
 #if !MACH_VM_DEBUG
         return KERN_FAILURE;
 #else
        vm_offset_t addr;
-       vm_size_t size;
+       vm_size_t size = 0;
        hash_info_bucket_t *info;
        unsigned int potential, actual;
        kern_return_t kr;
@@ -612,13 +634,13 @@ host_virtual_physical_table_info(
                if (info != *infop)
                        kmem_free(ipc_kernel_map, addr, size);
 
-               size = round_page_32(actual * sizeof *info);
+               size = round_page(actual * sizeof *info);
                kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
                if (kr != KERN_SUCCESS)
                        return KERN_RESOURCE_SHORTAGE;
 
                info = (hash_info_bucket_t *) addr;
-               potential = size/sizeof *info;
+               potential = (unsigned int) (size/sizeof (*info));
        }
 
        if (info == *infop) {
@@ -633,13 +655,13 @@ host_virtual_physical_table_info(
                vm_map_copy_t copy;
                vm_size_t used;
 
-               used = round_page_32(actual * sizeof *info);
+               used = round_page(actual * sizeof *info);
 
                if (used != size)
                        kmem_free(ipc_kernel_map, addr + used, size - used);
 
-               kr = vm_map_copyin(ipc_kernel_map, addr, used,
-                                  TRUE, &copy);
+               kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
+                                  (vm_map_size_t)used, TRUE, &copy);
                assert(kr == KERN_SUCCESS);
 
                *infop = (hash_info_bucket_t *) copy;