2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_debug.c.
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_object.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug.h>
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
92 #endif /* !MACH_VM_DEBUG */
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
100 * Routine: mach_vm_region_info [kernel call]
102 * Retrieve information about a VM region,
103 * including info about the object chain.
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
115 __DEBUG_ONLY vm_map_t map
,
116 __DEBUG_ONLY vm32_offset_t address
,
117 __DEBUG_ONLY vm_info_region_t
*regionp
,
118 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
119 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
125 vm_offset_t addr
= 0; /* memory for OOL data */
126 vm_size_t size
; /* size of the memory */
127 unsigned int room
; /* room for this many objects */
128 unsigned int used
; /* actually this many objects */
129 vm_info_region_t region
;
132 if (map
== VM_MAP_NULL
) {
133 return KERN_INVALID_TASK
;
136 size
= 0; /* no memory allocated yet */
139 vm_map_t cmap
; /* current map in traversal */
140 vm_map_t nmap
; /* next map to look at */
141 vm_map_entry_t entry
;
142 vm_object_t object
, cobject
, nobject
;
144 /* nothing is locked */
146 vm_map_lock_read(map
);
147 for (cmap
= map
;; cmap
= nmap
) {
148 /* cmap is read-locked */
150 if (!vm_map_lookup_entry(cmap
,
151 (vm_map_address_t
)address
, &entry
)) {
152 entry
= entry
->vme_next
;
153 if (entry
== vm_map_to_entry(cmap
)) {
154 vm_map_unlock_read(cmap
);
156 kmem_free(ipc_kernel_map
,
159 return KERN_NO_SPACE
;
163 if (entry
->is_sub_map
) {
164 nmap
= VME_SUBMAP(entry
);
169 /* move down to the lower map */
171 vm_map_lock_read(nmap
);
172 vm_map_unlock_read(cmap
);
175 /* cmap is read-locked; we have a real entry */
177 object
= VME_OBJECT(entry
);
178 region
.vir_start
= (natural_t
) entry
->vme_start
;
179 region
.vir_end
= (natural_t
) entry
->vme_end
;
180 region
.vir_object
= (natural_t
)(uintptr_t) object
;
181 region
.vir_offset
= (natural_t
) VME_OFFSET(entry
);
182 region
.vir_needs_copy
= entry
->needs_copy
;
183 region
.vir_protection
= entry
->protection
;
184 region
.vir_max_protection
= entry
->max_protection
;
185 region
.vir_inheritance
= entry
->inheritance
;
186 region
.vir_wired_count
= entry
->wired_count
;
187 region
.vir_user_wired_count
= entry
->user_wired_count
;
190 room
= (unsigned int) (size
/ sizeof(vm_info_object_t
));
192 if (object
== VM_OBJECT_NULL
) {
193 vm_map_unlock_read(cmap
);
194 /* no memory needed */
198 vm_object_lock(object
);
199 vm_map_unlock_read(cmap
);
201 for (cobject
= object
;; cobject
= nobject
) {
202 /* cobject is locked */
205 vm_info_object_t
*vio
=
206 &((vm_info_object_t
*) addr
)[used
];
209 (natural_t
)(uintptr_t) cobject
;
211 (natural_t
) cobject
->vo_size
;
214 vio
->vio_resident_page_count
=
215 cobject
->resident_page_count
;
217 (natural_t
)(uintptr_t) cobject
->copy
;
219 (natural_t
)(uintptr_t) cobject
->shadow
;
220 vio
->vio_shadow_offset
=
221 (natural_t
) cobject
->vo_shadow_offset
;
222 vio
->vio_paging_offset
=
223 (natural_t
) cobject
->paging_offset
;
224 vio
->vio_copy_strategy
=
225 cobject
->copy_strategy
;
226 vio
->vio_last_alloc
=
227 (vm_offset_t
) cobject
->last_alloc
;
228 vio
->vio_paging_in_progress
=
229 cobject
->paging_in_progress
+
230 cobject
->activity_in_progress
;
231 vio
->vio_pager_created
=
232 cobject
->pager_created
;
233 vio
->vio_pager_initialized
=
234 cobject
->pager_initialized
;
235 vio
->vio_pager_ready
=
236 cobject
->pager_ready
;
237 vio
->vio_can_persist
=
238 cobject
->can_persist
;
246 (cobject
->purgable
!= VM_PURGABLE_DENY
);
247 vio
->vio_purgable_volatile
=
248 (cobject
->purgable
== VM_PURGABLE_VOLATILE
||
249 cobject
->purgable
== VM_PURGABLE_EMPTY
);
253 nobject
= cobject
->shadow
;
254 if (nobject
== VM_OBJECT_NULL
) {
255 vm_object_unlock(cobject
);
259 vm_object_lock(nobject
);
260 vm_object_unlock(cobject
);
269 /* must allocate more memory */
272 kmem_free(ipc_kernel_map
, addr
, size
);
274 size
= vm_map_round_page(2 * used
* sizeof(vm_info_object_t
),
275 VM_MAP_PAGE_MASK(ipc_kernel_map
));
277 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
278 if (kr
!= KERN_SUCCESS
) {
279 return KERN_RESOURCE_SHORTAGE
;
282 kr
= vm_map_wire_kernel(
284 vm_map_trunc_page(addr
,
285 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
286 vm_map_round_page(addr
+ size
,
287 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
288 VM_PROT_READ
| VM_PROT_WRITE
,
291 assert(kr
== KERN_SUCCESS
);
294 /* free excess memory; make remaining memory pageable */
297 copy
= VM_MAP_COPY_NULL
;
300 kmem_free(ipc_kernel_map
, addr
, size
);
303 vm_size_t size_used
= (used
* sizeof(vm_info_object_t
));
304 vm_size_t vmsize_used
= vm_map_round_page(size_used
,
305 VM_MAP_PAGE_MASK(ipc_kernel_map
));
309 vm_map_trunc_page(addr
,
310 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
311 vm_map_round_page(addr
+ size_used
,
312 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
314 assert(kr
== KERN_SUCCESS
);
316 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
317 (vm_map_size_t
)size_used
, TRUE
, ©
);
318 assert(kr
== KERN_SUCCESS
);
320 if (size
!= vmsize_used
) {
321 kmem_free(ipc_kernel_map
,
322 addr
+ vmsize_used
, size
- vmsize_used
);
327 *objectsp
= (vm_info_object_array_t
) copy
;
330 #endif /* MACH_VM_DEBUG */
334 * Temporary call for 64 bit data path interface transiotion
339 __DEBUG_ONLY vm_map_t map
,
340 __DEBUG_ONLY vm32_offset_t address
,
341 __DEBUG_ONLY vm_info_region_64_t
*regionp
,
342 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
343 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
349 vm_offset_t addr
= 0; /* memory for OOL data */
350 vm_size_t size
; /* size of the memory */
351 unsigned int room
; /* room for this many objects */
352 unsigned int used
; /* actually this many objects */
353 vm_info_region_64_t region
;
356 if (map
== VM_MAP_NULL
) {
357 return KERN_INVALID_TASK
;
360 size
= 0; /* no memory allocated yet */
363 vm_map_t cmap
; /* current map in traversal */
364 vm_map_t nmap
; /* next map to look at */
365 vm_map_entry_t entry
;
366 vm_object_t object
, cobject
, nobject
;
368 /* nothing is locked */
370 vm_map_lock_read(map
);
371 for (cmap
= map
;; cmap
= nmap
) {
372 /* cmap is read-locked */
374 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
375 entry
= entry
->vme_next
;
376 if (entry
== vm_map_to_entry(cmap
)) {
377 vm_map_unlock_read(cmap
);
379 kmem_free(ipc_kernel_map
,
382 return KERN_NO_SPACE
;
386 if (entry
->is_sub_map
) {
387 nmap
= VME_SUBMAP(entry
);
392 /* move down to the lower map */
394 vm_map_lock_read(nmap
);
395 vm_map_unlock_read(cmap
);
398 /* cmap is read-locked; we have a real entry */
400 object
= VME_OBJECT(entry
);
401 region
.vir_start
= (natural_t
) entry
->vme_start
;
402 region
.vir_end
= (natural_t
) entry
->vme_end
;
403 region
.vir_object
= (natural_t
)(uintptr_t) object
;
404 region
.vir_offset
= VME_OFFSET(entry
);
405 region
.vir_needs_copy
= entry
->needs_copy
;
406 region
.vir_protection
= entry
->protection
;
407 region
.vir_max_protection
= entry
->max_protection
;
408 region
.vir_inheritance
= entry
->inheritance
;
409 region
.vir_wired_count
= entry
->wired_count
;
410 region
.vir_user_wired_count
= entry
->user_wired_count
;
413 room
= (unsigned int) (size
/ sizeof(vm_info_object_t
));
415 if (object
== VM_OBJECT_NULL
) {
416 vm_map_unlock_read(cmap
);
417 /* no memory needed */
421 vm_object_lock(object
);
422 vm_map_unlock_read(cmap
);
424 for (cobject
= object
;; cobject
= nobject
) {
425 /* cobject is locked */
428 vm_info_object_t
*vio
=
429 &((vm_info_object_t
*) addr
)[used
];
432 (natural_t
)(uintptr_t) cobject
;
434 (natural_t
) cobject
->vo_size
;
437 vio
->vio_resident_page_count
=
438 cobject
->resident_page_count
;
440 (natural_t
)(uintptr_t) cobject
->copy
;
442 (natural_t
)(uintptr_t) cobject
->shadow
;
443 vio
->vio_shadow_offset
=
444 (natural_t
) cobject
->vo_shadow_offset
;
445 vio
->vio_paging_offset
=
446 (natural_t
) cobject
->paging_offset
;
447 vio
->vio_copy_strategy
=
448 cobject
->copy_strategy
;
449 vio
->vio_last_alloc
=
450 (vm_offset_t
) cobject
->last_alloc
;
451 vio
->vio_paging_in_progress
=
452 cobject
->paging_in_progress
+
453 cobject
->activity_in_progress
;
454 vio
->vio_pager_created
=
455 cobject
->pager_created
;
456 vio
->vio_pager_initialized
=
457 cobject
->pager_initialized
;
458 vio
->vio_pager_ready
=
459 cobject
->pager_ready
;
460 vio
->vio_can_persist
=
461 cobject
->can_persist
;
469 (cobject
->purgable
!= VM_PURGABLE_DENY
);
470 vio
->vio_purgable_volatile
=
471 (cobject
->purgable
== VM_PURGABLE_VOLATILE
||
472 cobject
->purgable
== VM_PURGABLE_EMPTY
);
476 nobject
= cobject
->shadow
;
477 if (nobject
== VM_OBJECT_NULL
) {
478 vm_object_unlock(cobject
);
482 vm_object_lock(nobject
);
483 vm_object_unlock(cobject
);
492 /* must allocate more memory */
495 kmem_free(ipc_kernel_map
, addr
, size
);
497 size
= vm_map_round_page(2 * used
* sizeof(vm_info_object_t
),
498 VM_MAP_PAGE_MASK(ipc_kernel_map
));
500 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
501 if (kr
!= KERN_SUCCESS
) {
502 return KERN_RESOURCE_SHORTAGE
;
505 kr
= vm_map_wire_kernel(
507 vm_map_trunc_page(addr
,
508 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
509 vm_map_round_page(addr
+ size
,
510 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
511 VM_PROT_READ
| VM_PROT_WRITE
,
514 assert(kr
== KERN_SUCCESS
);
517 /* free excess memory; make remaining memory pageable */
520 copy
= VM_MAP_COPY_NULL
;
523 kmem_free(ipc_kernel_map
, addr
, size
);
526 vm_size_t size_used
= (used
* sizeof(vm_info_object_t
));
527 vm_size_t vmsize_used
= vm_map_round_page(size_used
,
528 VM_MAP_PAGE_MASK(ipc_kernel_map
));
532 vm_map_trunc_page(addr
,
533 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
534 vm_map_round_page(addr
+ size_used
,
535 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
537 assert(kr
== KERN_SUCCESS
);
539 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
540 (vm_map_size_t
)size_used
, TRUE
, ©
);
541 assert(kr
== KERN_SUCCESS
);
543 if (size
!= vmsize_used
) {
544 kmem_free(ipc_kernel_map
,
545 addr
+ vmsize_used
, size
- vmsize_used
);
550 *objectsp
= (vm_info_object_array_t
) copy
;
553 #endif /* MACH_VM_DEBUG */
556 * Return an array of virtual pages that are mapped to a task.
559 vm32_mapped_pages_info(
560 __DEBUG_ONLY vm_map_t map
,
561 __DEBUG_ONLY page_address_array_t
*pages
,
562 __DEBUG_ONLY mach_msg_type_number_t
*pages_count
)
568 vm_size_t size
, size_used
;
569 unsigned int actual
, space
;
570 page_address_array_t list
;
571 vm_offset_t addr
= 0;
573 if (map
== VM_MAP_NULL
) {
574 return KERN_INVALID_ARGUMENT
;
578 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
579 size
= vm_map_round_page(size
,
580 VM_MAP_PAGE_MASK(ipc_kernel_map
));
583 (void) vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
584 (void) vm_map_unwire(
586 vm_map_trunc_page(addr
,
587 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
588 vm_map_round_page(addr
+ size
,
589 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
592 list
= (page_address_array_t
) addr
;
593 space
= (unsigned int) (size
/ sizeof(vm_offset_t
));
595 actual
= pmap_list_resident_pages(pmap
,
598 if (actual
<= space
) {
603 * Free memory if not enough
605 (void) kmem_free(ipc_kernel_map
, addr
, size
);
608 * Try again, doubling the size
610 size
= vm_map_round_page(actual
* sizeof(vm_offset_t
),
611 VM_MAP_PAGE_MASK(ipc_kernel_map
));
616 (void) kmem_free(ipc_kernel_map
, addr
, size
);
618 vm_size_t vmsize_used
;
619 *pages_count
= actual
;
620 size_used
= (actual
* sizeof(vm_offset_t
));
621 vmsize_used
= vm_map_round_page(size_used
,
622 VM_MAP_PAGE_MASK(ipc_kernel_map
));
623 (void) vm_map_wire_kernel(
625 vm_map_trunc_page(addr
,
626 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
627 vm_map_round_page(addr
+ size
,
628 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
629 VM_PROT_READ
| VM_PROT_WRITE
,
632 (void) vm_map_copyin(ipc_kernel_map
,
633 (vm_map_address_t
)addr
,
634 (vm_map_size_t
)size_used
,
636 (vm_map_copy_t
*)pages
);
637 if (vmsize_used
!= size
) {
638 (void) kmem_free(ipc_kernel_map
,
645 #endif /* MACH_VM_DEBUG */
648 #endif /* VM32_SUPPORT */
651 * Routine: host_virtual_physical_table_info
653 * Return information about the VP table.
655 * Nothing locked. Obeys CountInOut protocol.
657 * KERN_SUCCESS Returned information.
658 * KERN_INVALID_HOST The host is null.
659 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
663 host_virtual_physical_table_info(
664 __DEBUG_ONLY host_t host
,
665 __DEBUG_ONLY hash_info_bucket_array_t
*infop
,
666 __DEBUG_ONLY mach_msg_type_number_t
*countp
)
671 vm_offset_t addr
= 0;
673 hash_info_bucket_t
*info
;
674 unsigned int potential
, actual
;
677 if (host
== HOST_NULL
) {
678 return KERN_INVALID_HOST
;
681 /* start with in-line data */
687 actual
= vm_page_info(info
, potential
);
688 if (actual
<= potential
) {
692 /* allocate more memory */
694 if (info
!= *infop
) {
695 kmem_free(ipc_kernel_map
, addr
, size
);
698 size
= vm_map_round_page(actual
* sizeof *info
,
699 VM_MAP_PAGE_MASK(ipc_kernel_map
));
700 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
,
701 VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
702 if (kr
!= KERN_SUCCESS
) {
703 return KERN_RESOURCE_SHORTAGE
;
706 info
= (hash_info_bucket_t
*) addr
;
707 potential
= (unsigned int) (size
/ sizeof(*info
));
710 if (info
== *infop
) {
711 /* data fit in-line; nothing to deallocate */
714 } else if (actual
== 0) {
715 kmem_free(ipc_kernel_map
, addr
, size
);
720 vm_size_t used
, vmused
;
722 used
= (actual
* sizeof(*info
));
723 vmused
= vm_map_round_page(used
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
725 if (vmused
!= size
) {
726 kmem_free(ipc_kernel_map
, addr
+ vmused
, size
- vmused
);
729 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
730 (vm_map_size_t
)used
, TRUE
, ©
);
731 assert(kr
== KERN_SUCCESS
);
733 *infop
= (hash_info_bucket_t
*) copy
;
738 #endif /* MACH_VM_DEBUG */