2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
54 * File: vm/vm_debug.c.
58 * Exported kernel calls. See mach_debug/mach_debug.defs.
60 #include <mach_vm_debug.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_host_server.h>
63 #include <mach/vm_map_server.h>
64 #include <mach_debug/vm_info.h>
65 #include <mach_debug/page_info.h>
66 #include <mach_debug/hash_info.h>
69 #include <mach/machine/vm_types.h>
70 #include <mach/memory_object_types.h>
71 #include <mach/vm_prot.h>
72 #include <mach/vm_inherit.h>
73 #include <mach/vm_param.h>
74 #include <kern/thread.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_object.h>
78 #include <kern/task.h>
79 #include <kern/host.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_debug.h>
85 #define __DEBUG_ONLY __unused
86 #else /* !MACH_VM_DEBUG */
88 #endif /* !MACH_VM_DEBUG */
91 * Routine: mach_vm_region_info [kernel call]
93 * Retrieve information about a VM region,
94 * including info about the object chain.
98 * KERN_SUCCESS Retrieve region/object info.
99 * KERN_INVALID_TASK The map is null.
100 * KERN_NO_SPACE There is no entry at/after the address.
101 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
106 __DEBUG_ONLY vm_map_t map
,
107 __DEBUG_ONLY vm_offset_t address
,
108 __DEBUG_ONLY vm_info_region_t
*regionp
,
109 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
110 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
116 vm_offset_t addr
; /* memory for OOL data */
117 vm_size_t size
; /* size of the memory */
118 unsigned int room
; /* room for this many objects */
119 unsigned int used
; /* actually this many objects */
120 vm_info_region_t region
;
123 if (map
== VM_MAP_NULL
)
124 return KERN_INVALID_TASK
;
126 size
= 0; /* no memory allocated yet */
129 vm_map_t cmap
; /* current map in traversal */
130 vm_map_t nmap
; /* next map to look at */
131 vm_map_entry_t entry
;
132 vm_object_t object
, cobject
, nobject
;
134 /* nothing is locked */
136 vm_map_lock_read(map
);
137 for (cmap
= map
;; cmap
= nmap
) {
138 /* cmap is read-locked */
140 if (!vm_map_lookup_entry(cmap
,
141 (vm_map_address_t
)address
, &entry
)) {
143 entry
= entry
->vme_next
;
144 if (entry
== vm_map_to_entry(cmap
)) {
145 vm_map_unlock_read(cmap
);
147 kmem_free(ipc_kernel_map
,
149 return KERN_NO_SPACE
;
153 if (entry
->is_sub_map
)
154 nmap
= entry
->object
.sub_map
;
158 /* move down to the lower map */
160 vm_map_lock_read(nmap
);
161 vm_map_unlock_read(cmap
);
164 /* cmap is read-locked; we have a real entry */
166 object
= entry
->object
.vm_object
;
167 region
.vir_start
= entry
->vme_start
;
168 region
.vir_end
= entry
->vme_end
;
169 region
.vir_object
= (vm_offset_t
) object
;
170 region
.vir_offset
= entry
->offset
;
171 region
.vir_needs_copy
= entry
->needs_copy
;
172 region
.vir_protection
= entry
->protection
;
173 region
.vir_max_protection
= entry
->max_protection
;
174 region
.vir_inheritance
= entry
->inheritance
;
175 region
.vir_wired_count
= entry
->wired_count
;
176 region
.vir_user_wired_count
= entry
->user_wired_count
;
179 room
= size
/ sizeof(vm_info_object_t
);
181 if (object
== VM_OBJECT_NULL
) {
182 vm_map_unlock_read(cmap
);
183 /* no memory needed */
187 vm_object_lock(object
);
188 vm_map_unlock_read(cmap
);
190 for (cobject
= object
;; cobject
= nobject
) {
191 /* cobject is locked */
194 vm_info_object_t
*vio
=
195 &((vm_info_object_t
*) addr
)[used
];
198 (vm_offset_t
) cobject
;
203 vio
->vio_resident_page_count
=
204 cobject
->resident_page_count
;
205 vio
->vio_absent_count
=
206 cobject
->absent_count
;
208 (vm_offset_t
) cobject
->copy
;
210 (vm_offset_t
) cobject
->shadow
;
211 vio
->vio_shadow_offset
=
212 cobject
->shadow_offset
;
213 vio
->vio_paging_offset
=
214 cobject
->paging_offset
;
215 vio
->vio_copy_strategy
=
216 cobject
->copy_strategy
;
217 vio
->vio_last_alloc
=
219 vio
->vio_paging_in_progress
=
220 cobject
->paging_in_progress
;
221 vio
->vio_pager_created
=
222 cobject
->pager_created
;
223 vio
->vio_pager_initialized
=
224 cobject
->pager_initialized
;
225 vio
->vio_pager_ready
=
226 cobject
->pager_ready
;
227 vio
->vio_can_persist
=
228 cobject
->can_persist
;
236 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
237 vio
->vio_purgable_volatile
=
238 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
239 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
243 nobject
= cobject
->shadow
;
244 if (nobject
== VM_OBJECT_NULL
) {
245 vm_object_unlock(cobject
);
249 vm_object_lock(nobject
);
250 vm_object_unlock(cobject
);
258 /* must allocate more memory */
261 kmem_free(ipc_kernel_map
, addr
, size
);
262 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
264 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
265 if (kr
!= KERN_SUCCESS
)
266 return KERN_RESOURCE_SHORTAGE
;
268 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
269 vm_map_round_page(addr
+ size
),
270 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
271 assert(kr
== KERN_SUCCESS
);
274 /* free excess memory; make remaining memory pageable */
277 copy
= VM_MAP_COPY_NULL
;
280 kmem_free(ipc_kernel_map
, addr
, size
);
282 vm_size_t size_used
=
283 round_page_32(used
* sizeof(vm_info_object_t
));
285 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
286 vm_map_round_page(addr
+ size_used
), FALSE
);
287 assert(kr
== KERN_SUCCESS
);
289 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
290 (vm_map_size_t
)size_used
, TRUE
, ©
);
291 assert(kr
== KERN_SUCCESS
);
293 if (size
!= size_used
)
294 kmem_free(ipc_kernel_map
,
295 addr
+ size_used
, size
- size_used
);
299 *objectsp
= (vm_info_object_array_t
) copy
;
302 #endif /* MACH_VM_DEBUG */
306 * Temporary call for 64 bit data path interface transiotion
310 mach_vm_region_info_64(
311 __DEBUG_ONLY vm_map_t map
,
312 __DEBUG_ONLY vm_offset_t address
,
313 __DEBUG_ONLY vm_info_region_64_t
*regionp
,
314 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
315 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
321 vm_offset_t addr
; /* memory for OOL data */
322 vm_size_t size
; /* size of the memory */
323 unsigned int room
; /* room for this many objects */
324 unsigned int used
; /* actually this many objects */
325 vm_info_region_64_t region
;
328 if (map
== VM_MAP_NULL
)
329 return KERN_INVALID_TASK
;
331 size
= 0; /* no memory allocated yet */
334 vm_map_t cmap
; /* current map in traversal */
335 vm_map_t nmap
; /* next map to look at */
336 vm_map_entry_t entry
;
337 vm_object_t object
, cobject
, nobject
;
339 /* nothing is locked */
341 vm_map_lock_read(map
);
342 for (cmap
= map
;; cmap
= nmap
) {
343 /* cmap is read-locked */
345 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
346 entry
= entry
->vme_next
;
347 if (entry
== vm_map_to_entry(cmap
)) {
348 vm_map_unlock_read(cmap
);
350 kmem_free(ipc_kernel_map
,
352 return KERN_NO_SPACE
;
356 if (entry
->is_sub_map
)
357 nmap
= entry
->object
.sub_map
;
361 /* move down to the lower map */
363 vm_map_lock_read(nmap
);
364 vm_map_unlock_read(cmap
);
367 /* cmap is read-locked; we have a real entry */
369 object
= entry
->object
.vm_object
;
370 region
.vir_start
= entry
->vme_start
;
371 region
.vir_end
= entry
->vme_end
;
372 region
.vir_object
= (vm_offset_t
) object
;
373 region
.vir_offset
= entry
->offset
;
374 region
.vir_needs_copy
= entry
->needs_copy
;
375 region
.vir_protection
= entry
->protection
;
376 region
.vir_max_protection
= entry
->max_protection
;
377 region
.vir_inheritance
= entry
->inheritance
;
378 region
.vir_wired_count
= entry
->wired_count
;
379 region
.vir_user_wired_count
= entry
->user_wired_count
;
382 room
= size
/ sizeof(vm_info_object_t
);
384 if (object
== VM_OBJECT_NULL
) {
385 vm_map_unlock_read(cmap
);
386 /* no memory needed */
390 vm_object_lock(object
);
391 vm_map_unlock_read(cmap
);
393 for (cobject
= object
;; cobject
= nobject
) {
394 /* cobject is locked */
397 vm_info_object_t
*vio
=
398 &((vm_info_object_t
*) addr
)[used
];
401 (vm_offset_t
) cobject
;
406 vio
->vio_resident_page_count
=
407 cobject
->resident_page_count
;
408 vio
->vio_absent_count
=
409 cobject
->absent_count
;
411 (vm_offset_t
) cobject
->copy
;
413 (vm_offset_t
) cobject
->shadow
;
414 vio
->vio_shadow_offset
=
415 cobject
->shadow_offset
;
416 vio
->vio_paging_offset
=
417 cobject
->paging_offset
;
418 vio
->vio_copy_strategy
=
419 cobject
->copy_strategy
;
420 vio
->vio_last_alloc
=
422 vio
->vio_paging_in_progress
=
423 cobject
->paging_in_progress
;
424 vio
->vio_pager_created
=
425 cobject
->pager_created
;
426 vio
->vio_pager_initialized
=
427 cobject
->pager_initialized
;
428 vio
->vio_pager_ready
=
429 cobject
->pager_ready
;
430 vio
->vio_can_persist
=
431 cobject
->can_persist
;
439 (cobject
->purgable
!= VM_OBJECT_NONPURGABLE
);
440 vio
->vio_purgable_volatile
=
441 (cobject
->purgable
== VM_OBJECT_PURGABLE_VOLATILE
||
442 cobject
->purgable
== VM_OBJECT_PURGABLE_EMPTY
);
446 nobject
= cobject
->shadow
;
447 if (nobject
== VM_OBJECT_NULL
) {
448 vm_object_unlock(cobject
);
452 vm_object_lock(nobject
);
453 vm_object_unlock(cobject
);
461 /* must allocate more memory */
464 kmem_free(ipc_kernel_map
, addr
, size
);
465 size
= round_page_32(2 * used
* sizeof(vm_info_object_t
));
467 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
468 if (kr
!= KERN_SUCCESS
)
469 return KERN_RESOURCE_SHORTAGE
;
471 kr
= vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
472 vm_map_round_page(addr
+ size
),
473 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
474 assert(kr
== KERN_SUCCESS
);
477 /* free excess memory; make remaining memory pageable */
480 copy
= VM_MAP_COPY_NULL
;
483 kmem_free(ipc_kernel_map
, addr
, size
);
485 vm_size_t size_used
=
486 round_page_32(used
* sizeof(vm_info_object_t
));
488 kr
= vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
489 vm_map_round_page(addr
+ size_used
), FALSE
);
490 assert(kr
== KERN_SUCCESS
);
492 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
493 (vm_map_size_t
)size_used
, TRUE
, ©
);
494 assert(kr
== KERN_SUCCESS
);
496 if (size
!= size_used
)
497 kmem_free(ipc_kernel_map
,
498 addr
+ size_used
, size
- size_used
);
502 *objectsp
= (vm_info_object_array_t
) copy
;
505 #endif /* MACH_VM_DEBUG */
508 * Return an array of virtual pages that are mapped to a task.
511 vm_mapped_pages_info(
512 __DEBUG_ONLY vm_map_t map
,
513 __DEBUG_ONLY page_address_array_t
*pages
,
514 __DEBUG_ONLY mach_msg_type_number_t
*pages_count
)
520 vm_size_t size
, size_used
;
521 unsigned int actual
, space
;
522 page_address_array_t list
;
525 if (map
== VM_MAP_NULL
)
526 return (KERN_INVALID_ARGUMENT
);
529 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
530 size
= round_page_32(size
);
533 (void) vm_allocate(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
);
534 (void) vm_map_unwire(ipc_kernel_map
, vm_map_trunc_page(addr
),
535 vm_map_round_page(addr
+ size
), FALSE
);
537 list
= (page_address_array_t
) addr
;
538 space
= size
/ sizeof(vm_offset_t
);
540 actual
= pmap_list_resident_pages(pmap
,
547 * Free memory if not enough
549 (void) kmem_free(ipc_kernel_map
, addr
, size
);
552 * Try again, doubling the size
554 size
= round_page_32(actual
* sizeof(vm_offset_t
));
559 (void) kmem_free(ipc_kernel_map
, addr
, size
);
562 *pages_count
= actual
;
563 size_used
= round_page_32(actual
* sizeof(vm_offset_t
));
564 (void) vm_map_wire(ipc_kernel_map
, vm_map_trunc_page(addr
),
565 vm_map_round_page(addr
+ size
),
566 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
567 (void) vm_map_copyin(ipc_kernel_map
,
568 (vm_map_address_t
)addr
,
569 (vm_map_size_t
)size_used
,
571 (vm_map_copy_t
*)pages
);
572 if (size_used
!= size
) {
573 (void) kmem_free(ipc_kernel_map
,
579 return (KERN_SUCCESS
);
580 #endif /* MACH_VM_DEBUG */
584 * Routine: host_virtual_physical_table_info
586 * Return information about the VP table.
588 * Nothing locked. Obeys CountInOut protocol.
590 * KERN_SUCCESS Returned information.
591 * KERN_INVALID_HOST The host is null.
592 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
596 host_virtual_physical_table_info(
597 __DEBUG_ONLY host_t host
,
598 __DEBUG_ONLY hash_info_bucket_array_t
*infop
,
599 __DEBUG_ONLY mach_msg_type_number_t
*countp
)
606 hash_info_bucket_t
*info
;
607 unsigned int potential
, actual
;
610 if (host
== HOST_NULL
)
611 return KERN_INVALID_HOST
;
613 /* start with in-line data */
619 actual
= vm_page_info(info
, potential
);
620 if (actual
<= potential
)
623 /* allocate more memory */
626 kmem_free(ipc_kernel_map
, addr
, size
);
628 size
= round_page_32(actual
* sizeof *info
);
629 kr
= kmem_alloc_pageable(ipc_kernel_map
, &addr
, size
);
630 if (kr
!= KERN_SUCCESS
)
631 return KERN_RESOURCE_SHORTAGE
;
633 info
= (hash_info_bucket_t
*) addr
;
634 potential
= size
/sizeof *info
;
637 if (info
== *infop
) {
638 /* data fit in-line; nothing to deallocate */
641 } else if (actual
== 0) {
642 kmem_free(ipc_kernel_map
, addr
, size
);
649 used
= round_page_32(actual
* sizeof *info
);
652 kmem_free(ipc_kernel_map
, addr
+ used
, size
- used
);
654 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
655 (vm_map_size_t
)used
, TRUE
, ©
);
656 assert(kr
== KERN_SUCCESS
);
658 *infop
= (hash_info_bucket_t
*) copy
;
663 #endif /* MACH_VM_DEBUG */