2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/vm_debug.c.
57 * Exported kernel calls. See mach_debug/mach_debug.defs.
59 #include <mach_vm_debug.h>
60 #include <mach/kern_return.h>
61 #include <mach/mach_host_server.h>
62 #include <mach/vm_map_server.h>
63 #include <mach_debug/vm_info.h>
64 #include <mach_debug/page_info.h>
65 #include <mach_debug/hash_info.h>
68 #include <mach/machine/vm_types.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_inherit.h>
72 #include <mach/vm_param.h>
73 #include <kern/thread.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_object.h>
77 #include <kern/task.h>
78 #include <kern/host.h>
79 #include <ipc/ipc_port.h>
80 #include <vm/vm_debug.h>
84 #define __DEBUG_ONLY __unused
85 #else /* !MACH_VM_DEBUG */
87 #endif /* !MACH_VM_DEBUG */
90 * Routine: mach_vm_region_info [kernel call]
92 * Retrieve information about a VM region,
93 * including info about the object chain.
97 * KERN_SUCCESS Retrieve region/object info.
98 * KERN_INVALID_TASK The map is null.
99 * KERN_NO_SPACE There is no entry at/after the address.
100 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
105 __DEBUG_ONLY vm_map_t map
,
106 __DEBUG_ONLY vm_offset_t address
,
107 __DEBUG_ONLY vm_info_region_t
*regionp
,
108 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
109 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
115 vm_offset_t addr
; /* memory for OOL data */
116 vm_size_t size
; /* size of the memory */
117 unsigned int room
; /* room for this many objects */
118 unsigned int used
; /* actually this many objects */
119 vm_info_region_t region
;
122 if (map
== VM_MAP_NULL
)
123 return KERN_INVALID_TASK
;
125 size
= 0; /* no memory allocated yet */
128 vm_map_t cmap
; /* current map in traversal */
129 vm_map_t nmap
; /* next map to look at */
130 vm_map_entry_t entry
;
131 vm_object_t object
, cobject
, nobject
;
133 /* nothing is locked */
135 vm_map_lock_read(map
);
136 for (cmap
= map
;; cmap
= nmap
) {
137 /* cmap is read-locked */
139 if (!vm_map_lookup_entry(cmap
,
140 (vm_map_address_t
)address
, &entry
)) {
142 entry
= entry
->vme_next
;
143 if (entry
== vm_map_to_entry(cmap
)) {
144 vm_map_unlock_read(cmap
);
146 kmem_free(ipc_kernel_map
,
148 return KERN_NO_SPACE
;
152 if (entry
->is_sub_map
)
153 nmap
= entry
->object
.sub_map
;
157 /* move down to the lower map */
159 vm_map_lock_read(nmap
);
160 vm_map_unlock_read(cmap
);
163 /* cmap is read-locked; we have a real entry */
165 object
= entry
->object
.vm_object
;
166 region
.vir_start
= entry
->vme_start
;
167 region
.vir_end
= entry
->vme_end
;
168 region
.vir_object
= (vm_offset_t
) object
;
169 region
.vir_offset
= entry
->offset
;
170 region
.vir_needs_copy
= entry
->needs_copy
;
171 region
.vir_protection
= entry
->protection
;
172 region
.vir_max_protection
= entry
->max_protection
;
173 region
.vir_inheritance
= entry
->inheritance
;
174 region
.vir_wired_count
= entry
->wired_count
;
175 region
.vir_user_wired_count
= entry
->user_wired_count
;
178 room
= size
/ sizeof(vm_info_object_t
);
180 if (object
== VM_OBJECT_NULL
) {
181 vm_map_unlock_read(cmap
);
182 /* no memory needed */
186 vm_object_lock(object
);
187 vm_map_unlock_read(cmap
);
189 for (cobject
= object
;; cobject
= nobject
) {
190 /* cobject is locked */
193 vm_info_object_t
*vio
=
194 &((vm_info_object_t
*) addr
)[used
];
197 (vm_offset_t
) cobject
;
202 vio
->vio_resident_page_count
=
203 cobject
->resident_page_count
;
204 vio
->vio_absent_count
=
205 cobject
->absent_count
;
207 (vm_offset_t
) cobject
->copy
;
209 (vm_offset_t
) cobject
->shadow
;
210 vio
->vio_shadow_offset
=
211 cobject
->shadow_offset
;
212 vio
->vio_paging_offset
=
213 cobject
->paging_offset
;
214 vio
->vio_copy_strategy
=
215 cobject
->copy_strategy
;
216 vio
->vio_last_alloc
=
218 vio
->vio_paging_in_progress
=
219 cobject
->paging_in_progress
;
220 vio
->vio_pager_created
=
221 cobject
->pager_created
;
222 vio
->vio_pager_initialized
=
223 cobject
->pager_initialized
;
224 vio
->vio_pager_ready
=
225 cobject
->pager_ready
;
226 vio
->vio_can_persist
=
227 cobject
->can_persist
;
235 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
236 vio
->vio_purgable_volatile
=
237 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
238 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
242 nobject
= cobject
->shadow
;
243 if (nobject
== VM_OBJECT_NULL
) {
244 vm_object_unlock(cobject
);
248 vm_object_lock(nobject
);
249 vm_object_unlock(cobject
);
257 /* must allocate more memory */
260 kmem_free(ipc_kernel_map
, addr
, size
);
261 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
263 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
264 if (kr
!= KERN_SUCCESS
)
265 return KERN_RESOURCE_SHORTAGE
;
267 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
268 vm_map_round_page(addr
+ size
),
269 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
270 assert(kr
== KERN_SUCCESS
);
273 /* free excess memory; make remaining memory pageable */
276 copy
= VM_MAP_COPY_NULL
;
279 kmem_free(ipc_kernel_map
, addr
, size
);
281 vm_size_t size_used
=
282 round_page_32(used
* sizeof(vm_info_object_t
));
284 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
285 vm_map_round_page(addr
+ size_used
), FALSE
);
286 assert(kr
== KERN_SUCCESS
);
288 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
289 (vm_map_size_t
)size_used
, TRUE
, ©
);
290 assert(kr
== KERN_SUCCESS
);
292 if (size
!= size_used
)
293 kmem_free(ipc_kernel_map
,
294 addr
+ size_used
, size
- size_used
);
298 *objectsp
= (vm_info_object_array_t
) copy
;
301 #endif /* MACH_VM_DEBUG */
305 * Temporary call for 64 bit data path interface transiotion
309 mach_vm_region_info_64(
310 __DEBUG_ONLY vm_map_t map
,
311 __DEBUG_ONLY vm_offset_t address
,
312 __DEBUG_ONLY vm_info_region_64_t
*regionp
,
313 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
314 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
320 vm_offset_t addr
; /* memory for OOL data */
321 vm_size_t size
; /* size of the memory */
322 unsigned int room
; /* room for this many objects */
323 unsigned int used
; /* actually this many objects */
324 vm_info_region_64_t region
;
327 if (map
== VM_MAP_NULL
)
328 return KERN_INVALID_TASK
;
330 size
= 0; /* no memory allocated yet */
333 vm_map_t cmap
; /* current map in traversal */
334 vm_map_t nmap
; /* next map to look at */
335 vm_map_entry_t entry
;
336 vm_object_t object
, cobject
, nobject
;
338 /* nothing is locked */
340 vm_map_lock_read(map
);
341 for (cmap
= map
;; cmap
= nmap
) {
342 /* cmap is read-locked */
344 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
345 entry
= entry
->vme_next
;
346 if (entry
== vm_map_to_entry(cmap
)) {
347 vm_map_unlock_read(cmap
);
349 kmem_free(ipc_kernel_map
,
351 return KERN_NO_SPACE
;
355 if (entry
->is_sub_map
)
356 nmap
= entry
->object
.sub_map
;
360 /* move down to the lower map */
362 vm_map_lock_read(nmap
);
363 vm_map_unlock_read(cmap
);
366 /* cmap is read-locked; we have a real entry */
368 object
= entry
->object
.vm_object
;
369 region
.vir_start
= entry
->vme_start
;
370 region
.vir_end
= entry
->vme_end
;
371 region
.vir_object
= (vm_offset_t
) object
;
372 region
.vir_offset
= entry
->offset
;
373 region
.vir_needs_copy
= entry
->needs_copy
;
374 region
.vir_protection
= entry
->protection
;
375 region
.vir_max_protection
= entry
->max_protection
;
376 region
.vir_inheritance
= entry
->inheritance
;
377 region
.vir_wired_count
= entry
->wired_count
;
378 region
.vir_user_wired_count
= entry
->user_wired_count
;
381 room
= size
/ sizeof(vm_info_object_t
);
383 if (object
== VM_OBJECT_NULL
) {
384 vm_map_unlock_read(cmap
);
385 /* no memory needed */
389 vm_object_lock(object
);
390 vm_map_unlock_read(cmap
);
392 for (cobject
= object
;; cobject
= nobject
) {
393 /* cobject is locked */
396 vm_info_object_t
*vio
=
397 &((vm_info_object_t
*) addr
)[used
];
400 (vm_offset_t
) cobject
;
405 vio
->vio_resident_page_count
=
406 cobject
->resident_page_count
;
407 vio
->vio_absent_count
=
408 cobject
->absent_count
;
410 (vm_offset_t
) cobject
->copy
;
412 (vm_offset_t
) cobject
->shadow
;
413 vio
->vio_shadow_offset
=
414 cobject
->shadow_offset
;
415 vio
->vio_paging_offset
=
416 cobject
->paging_offset
;
417 vio
->vio_copy_strategy
=
418 cobject
->copy_strategy
;
419 vio
->vio_last_alloc
=
421 vio
->vio_paging_in_progress
=
422 cobject
->paging_in_progress
;
423 vio
->vio_pager_created
=
424 cobject
->pager_created
;
425 vio
->vio_pager_initialized
=
426 cobject
->pager_initialized
;
427 vio
->vio_pager_ready
=
428 cobject
->pager_ready
;
429 vio
->vio_can_persist
=
430 cobject
->can_persist
;
438 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
439 vio
->vio_purgable_volatile
=
440 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
441 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
445 nobject
= cobject
->shadow
;
446 if (nobject
== VM_OBJECT_NULL
) {
447 vm_object_unlock(cobject
);
451 vm_object_lock(nobject
);
452 vm_object_unlock(cobject
);
460 /* must allocate more memory */
463 kmem_free(ipc_kernel_map
, addr
, size
);
464 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
466 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
467 if (kr
!= KERN_SUCCESS
)
468 return KERN_RESOURCE_SHORTAGE
;
470 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
471 vm_map_round_page(addr
+ size
),
472 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
473 assert(kr
== KERN_SUCCESS
);
476 /* free excess memory; make remaining memory pageable */
479 copy
= VM_MAP_COPY_NULL
;
482 kmem_free(ipc_kernel_map
, addr
, size
);
484 vm_size_t size_used
=
485 round_page_32(used
* sizeof(vm_info_object_t
));
487 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
488 vm_map_round_page(addr
+ size_used
), FALSE
);
489 assert(kr
== KERN_SUCCESS
);
491 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
492 (vm_map_size_t
)size_used
, TRUE
, ©
);
493 assert(kr
== KERN_SUCCESS
);
495 if (size
!= size_used
)
496 kmem_free(ipc_kernel_map
,
497 addr
+ size_used
, size
- size_used
);
501 *objectsp
= (vm_info_object_array_t
) copy
;
504 #endif /* MACH_VM_DEBUG */
507 * Return an array of virtual pages that are mapped to a task.
510 vm_mapped_pages_info(
511 __DEBUG_ONLY vm_map_t map
,
512 __DEBUG_ONLY page_address_array_t
*pages
,
513 __DEBUG_ONLY mach_msg_type_number_t
*pages_count
)
519 vm_size_t size
, size_used
;
520 unsigned int actual
, space
;
521 page_address_array_t list
;
524 if (map
== VM_MAP_NULL
)
525 return (KERN_INVALID_ARGUMENT
);
528 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
529 size
= round_page_32(size
);
532 (void) vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
533 (void) vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
534 vm_map_round_page(addr
+ size
), FALSE
);
536 list
= (page_address_array_t
) addr
;
537 space
= size
/ sizeof(vm_offset_t
);
539 actual
= pmap_list_resident_pages(pmap
,
546 * Free memory if not enough
548 (void) kmem_free(ipc_kernel_map
, addr
, size
);
551 * Try again, doubling the size
553 size
= round_page_32(actual
* sizeof(vm_offset_t
));
558 (void) kmem_free(ipc_kernel_map
, addr
, size
);
561 *pages_count
= actual
;
562 size_used
= round_page_32(actual
* sizeof(vm_offset_t
));
563 (void) vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
564 vm_map_round_page(addr
+ size
),
565 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
566 (void) vm_map_copyin(ipc_kernel_map
,
567 (vm_map_address_t
)addr
,
568 (vm_map_size_t
)size_used
,
570 (vm_map_copy_t
*)pages
);
571 if (size_used
!= size
) {
572 (void) kmem_free(ipc_kernel_map
,
578 return (KERN_SUCCESS
);
579 #endif /* MACH_VM_DEBUG */
583 * Routine: host_virtual_physical_table_info
585 * Return information about the VP table.
587 * Nothing locked. Obeys CountInOut protocol.
589 * KERN_SUCCESS Returned information.
590 * KERN_INVALID_HOST The host is null.
591 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
595 host_virtual_physical_table_info(
596 __DEBUG_ONLY host_t host
,
597 __DEBUG_ONLY hash_info_bucket_array_t
*infop
,
598 __DEBUG_ONLY mach_msg_type_number_t
*countp
)
605 hash_info_bucket_t
*info
;
606 unsigned int potential
, actual
;
609 if (host
== HOST_NULL
)
610 return KERN_INVALID_HOST
;
612 /* start with in-line data */
618 actual
= vm_page_info(info
, potential
);
619 if (actual
<= potential
)
622 /* allocate more memory */
625 kmem_free(ipc_kernel_map
, addr
, size
);
627 size
= round_page_32(actual
* sizeof *info
);
628 kr
= kmem_alloc_pageable(ipc_kernel_map
, &addr
, size
);
629 if (kr
!= KERN_SUCCESS
)
630 return KERN_RESOURCE_SHORTAGE
;
632 info
= (hash_info_bucket_t
*) addr
;
633 potential
= size
/sizeof *info
;
636 if (info
== *infop
) {
637 /* data fit in-line; nothing to deallocate */
640 } else if (actual
== 0) {
641 kmem_free(ipc_kernel_map
, addr
, size
);
648 used
= round_page_32(actual
* sizeof *info
);
651 kmem_free(ipc_kernel_map
, addr
+ used
, size
- used
);
653 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
654 (vm_map_size_t
)used
, TRUE
, ©
);
655 assert(kr
== KERN_SUCCESS
);
657 *infop
= (hash_info_bucket_t
*) copy
;
662 #endif /* MACH_VM_DEBUG */