2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/vm_debug.c.
60 * Exported kernel calls. See mach_debug/mach_debug.defs.
62 #include <mach_vm_debug.h>
63 #include <mach/kern_return.h>
64 #include <mach/mach_host_server.h>
65 #include <mach/vm_map_server.h>
66 #include <mach_debug/vm_info.h>
67 #include <mach_debug/page_info.h>
68 #include <mach_debug/hash_info.h>
71 #include <mach/machine/vm_types.h>
72 #include <mach/memory_object_types.h>
73 #include <mach/vm_prot.h>
74 #include <mach/vm_inherit.h>
75 #include <mach/vm_param.h>
76 #include <kern/thread.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <kern/task.h>
81 #include <kern/host.h>
82 #include <ipc/ipc_port.h>
83 #include <vm/vm_debug.h>
87 * Routine: mach_vm_region_info [kernel call]
89 * Retrieve information about a VM region,
90 * including info about the object chain.
94 * KERN_SUCCESS Retrieve region/object info.
95 * KERN_INVALID_TASK The map is null.
96 * KERN_NO_SPACE There is no entry at/after the address.
97 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
104 vm_info_region_t
*regionp
,
105 vm_info_object_array_t
*objectsp
,
106 mach_msg_type_number_t
*objectsCntp
)
112 vm_offset_t addr
; /* memory for OOL data */
113 vm_size_t size
; /* size of the memory */
114 unsigned int room
; /* room for this many objects */
115 unsigned int used
; /* actually this many objects */
116 vm_info_region_t region
;
119 if (map
== VM_MAP_NULL
)
120 return KERN_INVALID_TASK
;
122 size
= 0; /* no memory allocated yet */
125 vm_map_t cmap
; /* current map in traversal */
126 vm_map_t nmap
; /* next map to look at */
127 vm_map_entry_t entry
;
128 vm_object_t object
, cobject
, nobject
;
130 /* nothing is locked */
132 vm_map_lock_read(map
);
133 for (cmap
= map
;; cmap
= nmap
) {
134 /* cmap is read-locked */
136 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
137 entry
= entry
->vme_next
;
138 if (entry
== vm_map_to_entry(cmap
)) {
139 vm_map_unlock_read(cmap
);
141 kmem_free(ipc_kernel_map
,
143 return KERN_NO_SPACE
;
147 if (entry
->is_sub_map
)
148 nmap
= entry
->object
.sub_map
;
152 /* move down to the lower map */
154 vm_map_lock_read(nmap
);
155 vm_map_unlock_read(cmap
);
158 /* cmap is read-locked; we have a real entry */
160 object
= entry
->object
.vm_object
;
161 region
.vir_start
= entry
->vme_start
;
162 region
.vir_end
= entry
->vme_end
;
163 region
.vir_object
= (vm_offset_t
) object
;
164 region
.vir_offset
= entry
->offset
;
165 region
.vir_needs_copy
= entry
->needs_copy
;
166 region
.vir_protection
= entry
->protection
;
167 region
.vir_max_protection
= entry
->max_protection
;
168 region
.vir_inheritance
= entry
->inheritance
;
169 region
.vir_wired_count
= entry
->wired_count
;
170 region
.vir_user_wired_count
= entry
->user_wired_count
;
173 room
= size
/ sizeof(vm_info_object_t
);
175 if (object
== VM_OBJECT_NULL
) {
176 vm_map_unlock_read(cmap
);
177 /* no memory needed */
181 vm_object_lock(object
);
182 vm_map_unlock_read(cmap
);
184 for (cobject
= object
;; cobject
= nobject
) {
185 /* cobject is locked */
188 vm_info_object_t
*vio
=
189 &((vm_info_object_t
*) addr
)[used
];
192 (vm_offset_t
) cobject
;
197 vio
->vio_resident_page_count
=
198 cobject
->resident_page_count
;
199 vio
->vio_absent_count
=
200 cobject
->absent_count
;
202 (vm_offset_t
) cobject
->copy
;
204 (vm_offset_t
) cobject
->shadow
;
205 vio
->vio_shadow_offset
=
206 cobject
->shadow_offset
;
207 vio
->vio_paging_offset
=
208 cobject
->paging_offset
;
209 vio
->vio_copy_strategy
=
210 cobject
->copy_strategy
;
211 vio
->vio_last_alloc
=
213 vio
->vio_paging_in_progress
=
214 cobject
->paging_in_progress
;
215 vio
->vio_pager_created
=
216 cobject
->pager_created
;
217 vio
->vio_pager_initialized
=
218 cobject
->pager_initialized
;
219 vio
->vio_pager_ready
=
220 cobject
->pager_ready
;
221 vio
->vio_can_persist
=
222 cobject
->can_persist
;
229 vio
->vio_lock_in_progress
=
230 cobject
->lock_in_progress
;
231 vio
->vio_lock_restart
=
232 cobject
->lock_restart
;
236 nobject
= cobject
->shadow
;
237 if (nobject
== VM_OBJECT_NULL
) {
238 vm_object_unlock(cobject
);
242 vm_object_lock(nobject
);
243 vm_object_unlock(cobject
);
251 /* must allocate more memory */
254 kmem_free(ipc_kernel_map
, addr
, size
);
255 size
= round_page(2 * used
* sizeof(vm_info_object_t
));
257 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, TRUE
);
258 if (kr
!= KERN_SUCCESS
)
259 return KERN_RESOURCE_SHORTAGE
;
261 kr
= vm_map_wire(ipc_kernel_map
, addr
, addr
+ size
,
262 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
263 assert(kr
== KERN_SUCCESS
);
266 /* free excess memory; make remaining memory pageable */
269 copy
= VM_MAP_COPY_NULL
;
272 kmem_free(ipc_kernel_map
, addr
, size
);
274 vm_size_t size_used
=
275 round_page(used
* sizeof(vm_info_object_t
));
277 kr
= vm_map_unwire(ipc_kernel_map
, addr
, addr
+ size_used
, FALSE
);
278 assert(kr
== KERN_SUCCESS
);
280 kr
= vm_map_copyin(ipc_kernel_map
, addr
, size_used
,
282 assert(kr
== KERN_SUCCESS
);
284 if (size
!= size_used
)
285 kmem_free(ipc_kernel_map
,
286 addr
+ size_used
, size
- size_used
);
290 *objectsp
= (vm_info_object_array_t
) copy
;
293 #endif /* MACH_VM_DEBUG */
296 * Temporary call for 64 bit data path interface transiotion
300 mach_vm_region_info_64(
303 vm_info_region_64_t
*regionp
,
304 vm_info_object_array_t
*objectsp
,
305 mach_msg_type_number_t
*objectsCntp
)
311 vm_offset_t addr
; /* memory for OOL data */
312 vm_size_t size
; /* size of the memory */
313 unsigned int room
; /* room for this many objects */
314 unsigned int used
; /* actually this many objects */
315 vm_info_region_64_t region
;
318 if (map
== VM_MAP_NULL
)
319 return KERN_INVALID_TASK
;
321 size
= 0; /* no memory allocated yet */
324 vm_map_t cmap
; /* current map in traversal */
325 vm_map_t nmap
; /* next map to look at */
326 vm_map_entry_t entry
;
327 vm_object_t object
, cobject
, nobject
;
329 /* nothing is locked */
331 vm_map_lock_read(map
);
332 for (cmap
= map
;; cmap
= nmap
) {
333 /* cmap is read-locked */
335 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
336 entry
= entry
->vme_next
;
337 if (entry
== vm_map_to_entry(cmap
)) {
338 vm_map_unlock_read(cmap
);
340 kmem_free(ipc_kernel_map
,
342 return KERN_NO_SPACE
;
346 if (entry
->is_sub_map
)
347 nmap
= entry
->object
.sub_map
;
351 /* move down to the lower map */
353 vm_map_lock_read(nmap
);
354 vm_map_unlock_read(cmap
);
357 /* cmap is read-locked; we have a real entry */
359 object
= entry
->object
.vm_object
;
360 region
.vir_start
= entry
->vme_start
;
361 region
.vir_end
= entry
->vme_end
;
362 region
.vir_object
= (vm_offset_t
) object
;
363 region
.vir_offset
= entry
->offset
;
364 region
.vir_needs_copy
= entry
->needs_copy
;
365 region
.vir_protection
= entry
->protection
;
366 region
.vir_max_protection
= entry
->max_protection
;
367 region
.vir_inheritance
= entry
->inheritance
;
368 region
.vir_wired_count
= entry
->wired_count
;
369 region
.vir_user_wired_count
= entry
->user_wired_count
;
372 room
= size
/ sizeof(vm_info_object_t
);
374 if (object
== VM_OBJECT_NULL
) {
375 vm_map_unlock_read(cmap
);
376 /* no memory needed */
380 vm_object_lock(object
);
381 vm_map_unlock_read(cmap
);
383 for (cobject
= object
;; cobject
= nobject
) {
384 /* cobject is locked */
387 vm_info_object_t
*vio
=
388 &((vm_info_object_t
*) addr
)[used
];
391 (vm_offset_t
) cobject
;
396 vio
->vio_resident_page_count
=
397 cobject
->resident_page_count
;
398 vio
->vio_absent_count
=
399 cobject
->absent_count
;
401 (vm_offset_t
) cobject
->copy
;
403 (vm_offset_t
) cobject
->shadow
;
404 vio
->vio_shadow_offset
=
405 cobject
->shadow_offset
;
406 vio
->vio_paging_offset
=
407 cobject
->paging_offset
;
408 vio
->vio_copy_strategy
=
409 cobject
->copy_strategy
;
410 vio
->vio_last_alloc
=
412 vio
->vio_paging_in_progress
=
413 cobject
->paging_in_progress
;
414 vio
->vio_pager_created
=
415 cobject
->pager_created
;
416 vio
->vio_pager_initialized
=
417 cobject
->pager_initialized
;
418 vio
->vio_pager_ready
=
419 cobject
->pager_ready
;
420 vio
->vio_can_persist
=
421 cobject
->can_persist
;
428 vio
->vio_lock_in_progress
=
429 cobject
->lock_in_progress
;
430 vio
->vio_lock_restart
=
431 cobject
->lock_restart
;
435 nobject
= cobject
->shadow
;
436 if (nobject
== VM_OBJECT_NULL
) {
437 vm_object_unlock(cobject
);
441 vm_object_lock(nobject
);
442 vm_object_unlock(cobject
);
450 /* must allocate more memory */
453 kmem_free(ipc_kernel_map
, addr
, size
);
454 size
= round_page(2 * used
* sizeof(vm_info_object_t
));
456 kr
= vm_allocate(ipc_kernel_map
, &addr
, size
, TRUE
);
457 if (kr
!= KERN_SUCCESS
)
458 return KERN_RESOURCE_SHORTAGE
;
460 kr
= vm_map_wire(ipc_kernel_map
, addr
, addr
+ size
,
461 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
462 assert(kr
== KERN_SUCCESS
);
465 /* free excess memory; make remaining memory pageable */
468 copy
= VM_MAP_COPY_NULL
;
471 kmem_free(ipc_kernel_map
, addr
, size
);
473 vm_size_t size_used
=
474 round_page(used
* sizeof(vm_info_object_t
));
476 kr
= vm_map_unwire(ipc_kernel_map
, addr
, addr
+ size_used
, FALSE
);
477 assert(kr
== KERN_SUCCESS
);
479 kr
= vm_map_copyin(ipc_kernel_map
, addr
, size_used
,
481 assert(kr
== KERN_SUCCESS
);
483 if (size
!= size_used
)
484 kmem_free(ipc_kernel_map
,
485 addr
+ size_used
, size
- size_used
);
489 *objectsp
= (vm_info_object_array_t
) copy
;
492 #endif /* MACH_VM_DEBUG */
495 * Return an array of virtual pages that are mapped to a task.
498 vm_mapped_pages_info(
500 page_address_array_t
*pages
,
501 mach_msg_type_number_t
*pages_count
)
507 vm_size_t size
, size_used
;
508 unsigned int actual
, space
;
509 page_address_array_t list
;
512 if (map
== VM_MAP_NULL
)
513 return (KERN_INVALID_ARGUMENT
);
516 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
517 size
= round_page(size
);
520 (void) vm_allocate(ipc_kernel_map
, &addr
, size
, TRUE
);
521 (void) vm_map_unwire(ipc_kernel_map
, addr
, addr
+ size
, FALSE
);
523 list
= (page_address_array_t
) addr
;
524 space
= size
/ sizeof(vm_offset_t
);
526 actual
= pmap_list_resident_pages(pmap
,
533 * Free memory if not enough
535 (void) kmem_free(ipc_kernel_map
, addr
, size
);
538 * Try again, doubling the size
540 size
= round_page(actual
* sizeof(vm_offset_t
));
545 (void) kmem_free(ipc_kernel_map
, addr
, size
);
548 *pages_count
= actual
;
549 size_used
= round_page(actual
* sizeof(vm_offset_t
));
550 (void) vm_map_wire(ipc_kernel_map
,
552 VM_PROT_READ
|VM_PROT_WRITE
, FALSE
);
553 (void) vm_map_copyin(
558 (vm_map_copy_t
*)pages
);
559 if (size_used
!= size
) {
560 (void) kmem_free(ipc_kernel_map
,
566 return (KERN_SUCCESS
);
567 #endif /* MACH_VM_DEBUG */
571 * Routine: host_virtual_physical_table_info
573 * Return information about the VP table.
575 * Nothing locked. Obeys CountInOut protocol.
577 * KERN_SUCCESS Returned information.
578 * KERN_INVALID_HOST The host is null.
579 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
583 host_virtual_physical_table_info(
585 hash_info_bucket_array_t
*infop
,
586 mach_msg_type_number_t
*countp
)
593 hash_info_bucket_t
*info
;
594 unsigned int potential
, actual
;
597 if (host
== HOST_NULL
)
598 return KERN_INVALID_HOST
;
600 /* start with in-line data */
606 actual
= vm_page_info(info
, potential
);
607 if (actual
<= potential
)
610 /* allocate more memory */
613 kmem_free(ipc_kernel_map
, addr
, size
);
615 size
= round_page(actual
* sizeof *info
);
616 kr
= kmem_alloc_pageable(ipc_kernel_map
, &addr
, size
);
617 if (kr
!= KERN_SUCCESS
)
618 return KERN_RESOURCE_SHORTAGE
;
620 info
= (hash_info_bucket_t
*) addr
;
621 potential
= size
/sizeof *info
;
624 if (info
== *infop
) {
625 /* data fit in-line; nothing to deallocate */
628 } else if (actual
== 0) {
629 kmem_free(ipc_kernel_map
, addr
, size
);
636 used
= round_page(actual
* sizeof *info
);
639 kmem_free(ipc_kernel_map
, addr
+ used
, size
- used
);
641 kr
= vm_map_copyin(ipc_kernel_map
, addr
, used
,
643 assert(kr
== KERN_SUCCESS
);
645 *infop
= (hash_info_bucket_t
*) copy
;
650 #endif /* MACH_VM_DEBUG */