2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_debug.c.
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach/vm_map_server.h>
69 #include <mach_debug/vm_info.h>
70 #include <mach_debug/page_info.h>
71 #include <mach_debug/hash_info.h>
74 #include <mach/machine/vm_types.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_inherit.h>
78 #include <mach/vm_param.h>
79 #include <kern/thread.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <kern/task.h>
84 #include <kern/host.h>
85 #include <ipc/ipc_port.h>
86 #include <vm/vm_debug.h>
90 #define __DEBUG_ONLY __unused
91 #else /* !MACH_VM_DEBUG */
93 #endif /* !MACH_VM_DEBUG */
96 * Routine: mach_vm_region_info [kernel call]
98 * Retrieve information about a VM region,
99 * including info about the object chain.
103 * KERN_SUCCESS Retrieve region/object info.
104 * KERN_INVALID_TASK The map is null.
105 * KERN_NO_SPACE There is no entry at/after the address.
106 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 __DEBUG_ONLY vm_map_t map
,
112 __DEBUG_ONLY vm_offset_t address
,
113 __DEBUG_ONLY vm_info_region_t
*regionp
,
114 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
115 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
121 vm_offset_t addr
; /* memory for OOL data */
122 vm_size_t size
; /* size of the memory */
123 unsigned int room
; /* room for this many objects */
124 unsigned int used
; /* actually this many objects */
125 vm_info_region_t region
;
128 if (map
== VM_MAP_NULL
)
129 return KERN_INVALID_TASK
;
131 size
= 0; /* no memory allocated yet */
134 vm_map_t cmap
; /* current map in traversal */
135 vm_map_t nmap
; /* next map to look at */
136 vm_map_entry_t entry
;
137 vm_object_t object
, cobject
, nobject
;
139 /* nothing is locked */
141 vm_map_lock_read(map
);
142 for (cmap
= map
;; cmap
= nmap
) {
143 /* cmap is read-locked */
145 if (!vm_map_lookup_entry(cmap
,
146 (vm_map_address_t
)address
, &entry
)) {
148 entry
= entry
->vme_next
;
149 if (entry
== vm_map_to_entry(cmap
)) {
150 vm_map_unlock_read(cmap
);
152 kmem_free(ipc_kernel_map
,
154 return KERN_NO_SPACE
;
158 if (entry
->is_sub_map
)
159 nmap
= entry
->object
.sub_map
;
163 /* move down to the lower map */
165 vm_map_lock_read(nmap
);
166 vm_map_unlock_read(cmap
);
169 /* cmap is read-locked; we have a real entry */
171 object
= entry
->object
.vm_object
;
172 region
.vir_start
= entry
->vme_start
;
173 region
.vir_end
= entry
->vme_end
;
174 region
.vir_object
= (vm_offset_t
) object
;
175 region
.vir_offset
= entry
->offset
;
176 region
.vir_needs_copy
= entry
->needs_copy
;
177 region
.vir_protection
= entry
->protection
;
178 region
.vir_max_protection
= entry
->max_protection
;
179 region
.vir_inheritance
= entry
->inheritance
;
180 region
.vir_wired_count
= entry
->wired_count
;
181 region
.vir_user_wired_count
= entry
->user_wired_count
;
184 room
= size
/ sizeof(vm_info_object_t
);
186 if (object
== VM_OBJECT_NULL
) {
187 vm_map_unlock_read(cmap
);
188 /* no memory needed */
192 vm_object_lock(object
);
193 vm_map_unlock_read(cmap
);
195 for (cobject
= object
;; cobject
= nobject
) {
196 /* cobject is locked */
199 vm_info_object_t
*vio
=
200 &((vm_info_object_t
*) addr
)[used
];
203 (vm_offset_t
) cobject
;
208 vio
->vio_resident_page_count
=
209 cobject
->resident_page_count
;
210 vio
->vio_absent_count
=
211 cobject
->absent_count
;
213 (vm_offset_t
) cobject
->copy
;
215 (vm_offset_t
) cobject
->shadow
;
216 vio
->vio_shadow_offset
=
217 cobject
->shadow_offset
;
218 vio
->vio_paging_offset
=
219 cobject
->paging_offset
;
220 vio
->vio_copy_strategy
=
221 cobject
->copy_strategy
;
222 vio
->vio_last_alloc
=
224 vio
->vio_paging_in_progress
=
225 cobject
->paging_in_progress
;
226 vio
->vio_pager_created
=
227 cobject
->pager_created
;
228 vio
->vio_pager_initialized
=
229 cobject
->pager_initialized
;
230 vio
->vio_pager_ready
=
231 cobject
->pager_ready
;
232 vio
->vio_can_persist
=
233 cobject
->can_persist
;
241 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
242 vio
->vio_purgable_volatile
=
243 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
244 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
248 nobject
= cobject
->shadow
;
249 if (nobject
== VM_OBJECT_NULL
) {
250 vm_object_unlock(cobject
);
254 vm_object_lock(nobject
);
255 vm_object_unlock(cobject
);
263 /* must allocate more memory */
266 kmem_free(ipc_kernel_map
, addr
, size
);
267 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
269 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
270 if (kr
!= KERN_SUCCESS
)
271 return KERN_RESOURCE_SHORTAGE
;
273 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
274 vm_map_round_page(addr
+ size
),
275 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
276 assert(kr
== KERN_SUCCESS
);
279 /* free excess memory; make remaining memory pageable */
282 copy
= VM_MAP_COPY_NULL
;
285 kmem_free(ipc_kernel_map
, addr
, size
);
287 vm_size_t size_used
=
288 round_page_32(used
* sizeof(vm_info_object_t
));
290 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
291 vm_map_round_page(addr
+ size_used
), FALSE
);
292 assert(kr
== KERN_SUCCESS
);
294 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
295 (vm_map_size_t
)size_used
, TRUE
, ©
);
296 assert(kr
== KERN_SUCCESS
);
298 if (size
!= size_used
)
299 kmem_free(ipc_kernel_map
,
300 addr
+ size_used
, size
- size_used
);
304 *objectsp
= (vm_info_object_array_t
) copy
;
307 #endif /* MACH_VM_DEBUG */
311 * Temporary call for 64 bit data path interface transiotion
315 mach_vm_region_info_64(
316 __DEBUG_ONLY vm_map_t map
,
317 __DEBUG_ONLY vm_offset_t address
,
318 __DEBUG_ONLY vm_info_region_64_t
*regionp
,
319 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
320 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
326 vm_offset_t addr
; /* memory for OOL data */
327 vm_size_t size
; /* size of the memory */
328 unsigned int room
; /* room for this many objects */
329 unsigned int used
; /* actually this many objects */
330 vm_info_region_64_t region
;
333 if (map
== VM_MAP_NULL
)
334 return KERN_INVALID_TASK
;
336 size
= 0; /* no memory allocated yet */
339 vm_map_t cmap
; /* current map in traversal */
340 vm_map_t nmap
; /* next map to look at */
341 vm_map_entry_t entry
;
342 vm_object_t object
, cobject
, nobject
;
344 /* nothing is locked */
346 vm_map_lock_read(map
);
347 for (cmap
= map
;; cmap
= nmap
) {
348 /* cmap is read-locked */
350 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
351 entry
= entry
->vme_next
;
352 if (entry
== vm_map_to_entry(cmap
)) {
353 vm_map_unlock_read(cmap
);
355 kmem_free(ipc_kernel_map
,
357 return KERN_NO_SPACE
;
361 if (entry
->is_sub_map
)
362 nmap
= entry
->object
.sub_map
;
366 /* move down to the lower map */
368 vm_map_lock_read(nmap
);
369 vm_map_unlock_read(cmap
);
372 /* cmap is read-locked; we have a real entry */
374 object
= entry
->object
.vm_object
;
375 region
.vir_start
= entry
->vme_start
;
376 region
.vir_end
= entry
->vme_end
;
377 region
.vir_object
= (vm_offset_t
) object
;
378 region
.vir_offset
= entry
->offset
;
379 region
.vir_needs_copy
= entry
->needs_copy
;
380 region
.vir_protection
= entry
->protection
;
381 region
.vir_max_protection
= entry
->max_protection
;
382 region
.vir_inheritance
= entry
->inheritance
;
383 region
.vir_wired_count
= entry
->wired_count
;
384 region
.vir_user_wired_count
= entry
->user_wired_count
;
387 room
= size
/ sizeof(vm_info_object_t
);
389 if (object
== VM_OBJECT_NULL
) {
390 vm_map_unlock_read(cmap
);
391 /* no memory needed */
395 vm_object_lock(object
);
396 vm_map_unlock_read(cmap
);
398 for (cobject
= object
;; cobject
= nobject
) {
399 /* cobject is locked */
402 vm_info_object_t
*vio
=
403 &((vm_info_object_t
*) addr
)[used
];
406 (vm_offset_t
) cobject
;
411 vio
->vio_resident_page_count
=
412 cobject
->resident_page_count
;
413 vio
->vio_absent_count
=
414 cobject
->absent_count
;
416 (vm_offset_t
) cobject
->copy
;
418 (vm_offset_t
) cobject
->shadow
;
419 vio
->vio_shadow_offset
=
420 cobject
->shadow_offset
;
421 vio
->vio_paging_offset
=
422 cobject
->paging_offset
;
423 vio
->vio_copy_strategy
=
424 cobject
->copy_strategy
;
425 vio
->vio_last_alloc
=
427 vio
->vio_paging_in_progress
=
428 cobject
->paging_in_progress
;
429 vio
->vio_pager_created
=
430 cobject
->pager_created
;
431 vio
->vio_pager_initialized
=
432 cobject
->pager_initialized
;
433 vio
->vio_pager_ready
=
434 cobject
->pager_ready
;
435 vio
->vio_can_persist
=
436 cobject
->can_persist
;
444 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
445 vio
->vio_purgable_volatile
=
446 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
447 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
451 nobject
= cobject
->shadow
;
452 if (nobject
== VM_OBJECT_NULL
) {
453 vm_object_unlock(cobject
);
457 vm_object_lock(nobject
);
458 vm_object_unlock(cobject
);
466 /* must allocate more memory */
469 kmem_free(ipc_kernel_map
, addr
, size
);
470 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
472 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
473 if (kr
!= KERN_SUCCESS
)
474 return KERN_RESOURCE_SHORTAGE
;
476 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
477 vm_map_round_page(addr
+ size
),
478 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
479 assert(kr
== KERN_SUCCESS
);
482 /* free excess memory; make remaining memory pageable */
485 copy
= VM_MAP_COPY_NULL
;
488 kmem_free(ipc_kernel_map
, addr
, size
);
490 vm_size_t size_used
=
491 round_page_32(used
* sizeof(vm_info_object_t
));
493 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
494 vm_map_round_page(addr
+ size_used
), FALSE
);
495 assert(kr
== KERN_SUCCESS
);
497 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
498 (vm_map_size_t
)size_used
, TRUE
, ©
);
499 assert(kr
== KERN_SUCCESS
);
501 if (size
!= size_used
)
502 kmem_free(ipc_kernel_map
,
503 addr
+ size_used
, size
- size_used
);
507 *objectsp
= (vm_info_object_array_t
) copy
;
510 #endif /* MACH_VM_DEBUG */
513 * Return an array of virtual pages that are mapped to a task.
516 vm_mapped_pages_info(
517 __DEBUG_ONLY vm_map_t map
,
518 __DEBUG_ONLY page_address_array_t
*pages
,
519 __DEBUG_ONLY mach_msg_type_number_t
*pages_count
)
525 vm_size_t size
, size_used
;
526 unsigned int actual
, space
;
527 page_address_array_t list
;
530 if (map
== VM_MAP_NULL
)
531 return (KERN_INVALID_ARGUMENT
);
534 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
535 size
= round_page_32(size
);
538 (void) vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
539 (void) vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
540 vm_map_round_page(addr
+ size
), FALSE
);
542 list
= (page_address_array_t
) addr
;
543 space
= size
/ sizeof(vm_offset_t
);
545 actual
= pmap_list_resident_pages(pmap
,
552 * Free memory if not enough
554 (void) kmem_free(ipc_kernel_map
, addr
, size
);
557 * Try again, doubling the size
559 size
= round_page_32(actual
* sizeof(vm_offset_t
));
564 (void) kmem_free(ipc_kernel_map
, addr
, size
);
567 *pages_count
= actual
;
568 size_used
= round_page_32(actual
* sizeof(vm_offset_t
));
569 (void) vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
570 vm_map_round_page(addr
+ size
),
571 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
572 (void) vm_map_copyin(ipc_kernel_map
,
573 (vm_map_address_t
)addr
,
574 (vm_map_size_t
)size_used
,
576 (vm_map_copy_t
*)pages
);
577 if (size_used
!= size
) {
578 (void) kmem_free(ipc_kernel_map
,
584 return (KERN_SUCCESS
);
585 #endif /* MACH_VM_DEBUG */
589 * Routine: host_virtual_physical_table_info
591 * Return information about the VP table.
593 * Nothing locked. Obeys CountInOut protocol.
595 * KERN_SUCCESS Returned information.
596 * KERN_INVALID_HOST The host is null.
597 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
601 host_virtual_physical_table_info(
602 __DEBUG_ONLY host_t host
,
603 __DEBUG_ONLY hash_info_bucket_array_t
*infop
,
604 __DEBUG_ONLY mach_msg_type_number_t
*countp
)
611 hash_info_bucket_t
*info
;
612 unsigned int potential
, actual
;
615 if (host
== HOST_NULL
)
616 return KERN_INVALID_HOST
;
618 /* start with in-line data */
624 actual
= vm_page_info(info
, potential
);
625 if (actual
<= potential
)
628 /* allocate more memory */
631 kmem_free(ipc_kernel_map
, addr
, size
);
633 size
= round_page_32(actual
* sizeof *info
);
634 kr
= kmem_alloc_pageable(ipc_kernel_map
, &addr
, size
);
635 if (kr
!= KERN_SUCCESS
)
636 return KERN_RESOURCE_SHORTAGE
;
638 info
= (hash_info_bucket_t
*) addr
;
639 potential
= size
/sizeof *info
;
642 if (info
== *infop
) {
643 /* data fit in-line; nothing to deallocate */
646 } else if (actual
== 0) {
647 kmem_free(ipc_kernel_map
, addr
, size
);
654 used
= round_page_32(actual
* sizeof *info
);
657 kmem_free(ipc_kernel_map
, addr
+ used
, size
- used
);
659 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
660 (vm_map_size_t
)used
, TRUE
, ©
);
661 assert(kr
== KERN_SUCCESS
);
663 *infop
= (hash_info_bucket_t
*) copy
;
668 #endif /* MACH_VM_DEBUG */