2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_debug.c.
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_object.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug.h>
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
92 #endif /* !MACH_VM_DEBUG */
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
100 * Routine: mach_vm_region_info [kernel call]
102 * Retrieve information about a VM region,
103 * including info about the object chain.
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
115 __DEBUG_ONLY vm_map_t map
,
116 __DEBUG_ONLY vm32_offset_t address
,
117 __DEBUG_ONLY vm_info_region_t
*regionp
,
118 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
119 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
125 vm_offset_t addr
= 0; /* memory for OOL data */
126 vm_size_t size
; /* size of the memory */
127 unsigned int room
; /* room for this many objects */
128 unsigned int used
; /* actually this many objects */
129 vm_info_region_t region
;
132 if (map
== VM_MAP_NULL
)
133 return KERN_INVALID_TASK
;
135 size
= 0; /* no memory allocated yet */
138 vm_map_t cmap
; /* current map in traversal */
139 vm_map_t nmap
; /* next map to look at */
140 vm_map_entry_t entry
;
141 vm_object_t object
, cobject
, nobject
;
143 /* nothing is locked */
145 vm_map_lock_read(map
);
146 for (cmap
= map
;; cmap
= nmap
) {
147 /* cmap is read-locked */
149 if (!vm_map_lookup_entry(cmap
,
150 (vm_map_address_t
)address
, &entry
)) {
152 entry
= entry
->vme_next
;
153 if (entry
== vm_map_to_entry(cmap
)) {
154 vm_map_unlock_read(cmap
);
156 kmem_free(ipc_kernel_map
,
158 return KERN_NO_SPACE
;
162 if (entry
->is_sub_map
)
163 nmap
= VME_SUBMAP(entry
);
167 /* move down to the lower map */
169 vm_map_lock_read(nmap
);
170 vm_map_unlock_read(cmap
);
173 /* cmap is read-locked; we have a real entry */
175 object
= VME_OBJECT(entry
);
176 region
.vir_start
= (natural_t
) entry
->vme_start
;
177 region
.vir_end
= (natural_t
) entry
->vme_end
;
178 region
.vir_object
= (natural_t
)(uintptr_t) object
;
179 region
.vir_offset
= (natural_t
) VME_OFFSET(entry
);
180 region
.vir_needs_copy
= entry
->needs_copy
;
181 region
.vir_protection
= entry
->protection
;
182 region
.vir_max_protection
= entry
->max_protection
;
183 region
.vir_inheritance
= entry
->inheritance
;
184 region
.vir_wired_count
= entry
->wired_count
;
185 region
.vir_user_wired_count
= entry
->user_wired_count
;
188 room
= (unsigned int) (size
/ sizeof(vm_info_object_t
));
190 if (object
== VM_OBJECT_NULL
) {
191 vm_map_unlock_read(cmap
);
192 /* no memory needed */
196 vm_object_lock(object
);
197 vm_map_unlock_read(cmap
);
199 for (cobject
= object
;; cobject
= nobject
) {
200 /* cobject is locked */
203 vm_info_object_t
*vio
=
204 &((vm_info_object_t
*) addr
)[used
];
207 (natural_t
)(uintptr_t) cobject
;
209 (natural_t
) cobject
->vo_size
;
212 vio
->vio_resident_page_count
=
213 cobject
->resident_page_count
;
215 (natural_t
)(uintptr_t) cobject
->copy
;
217 (natural_t
)(uintptr_t) cobject
->shadow
;
218 vio
->vio_shadow_offset
=
219 (natural_t
) cobject
->vo_shadow_offset
;
220 vio
->vio_paging_offset
=
221 (natural_t
) cobject
->paging_offset
;
222 vio
->vio_copy_strategy
=
223 cobject
->copy_strategy
;
224 vio
->vio_last_alloc
=
225 (vm_offset_t
) cobject
->last_alloc
;
226 vio
->vio_paging_in_progress
=
227 cobject
->paging_in_progress
+
228 cobject
->activity_in_progress
;
229 vio
->vio_pager_created
=
230 cobject
->pager_created
;
231 vio
->vio_pager_initialized
=
232 cobject
->pager_initialized
;
233 vio
->vio_pager_ready
=
234 cobject
->pager_ready
;
235 vio
->vio_can_persist
=
236 cobject
->can_persist
;
244 (cobject
->purgable
!= VM_PURGABLE_DENY
);
245 vio
->vio_purgable_volatile
=
246 (cobject
->purgable
== VM_PURGABLE_VOLATILE
||
247 cobject
->purgable
== VM_PURGABLE_EMPTY
);
251 nobject
= cobject
->shadow
;
252 if (nobject
== VM_OBJECT_NULL
) {
253 vm_object_unlock(cobject
);
257 vm_object_lock(nobject
);
258 vm_object_unlock(cobject
);
266 /* must allocate more memory */
269 kmem_free(ipc_kernel_map
, addr
, size
);
270 size
= vm_map_round_page(2 * used
* sizeof(vm_info_object_t
),
271 VM_MAP_PAGE_MASK(ipc_kernel_map
));
273 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
274 if (kr
!= KERN_SUCCESS
)
275 return KERN_RESOURCE_SHORTAGE
;
277 kr
= vm_map_wire_kernel(
279 vm_map_trunc_page(addr
,
280 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
281 vm_map_round_page(addr
+ size
,
282 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
283 VM_PROT_READ
|VM_PROT_WRITE
,
286 assert(kr
== KERN_SUCCESS
);
289 /* free excess memory; make remaining memory pageable */
292 copy
= VM_MAP_COPY_NULL
;
295 kmem_free(ipc_kernel_map
, addr
, size
);
297 vm_size_t size_used
= (used
* sizeof(vm_info_object_t
));
298 vm_size_t vmsize_used
= vm_map_round_page(size_used
,
299 VM_MAP_PAGE_MASK(ipc_kernel_map
));
303 vm_map_trunc_page(addr
,
304 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
305 vm_map_round_page(addr
+ size_used
,
306 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
308 assert(kr
== KERN_SUCCESS
);
310 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
311 (vm_map_size_t
)size_used
, TRUE
, ©
);
312 assert(kr
== KERN_SUCCESS
);
314 if (size
!= vmsize_used
)
315 kmem_free(ipc_kernel_map
,
316 addr
+ vmsize_used
, size
- vmsize_used
);
320 *objectsp
= (vm_info_object_array_t
) copy
;
323 #endif /* MACH_VM_DEBUG */
327 * Temporary call for 64 bit data path interface transiotion
332 __DEBUG_ONLY vm_map_t map
,
333 __DEBUG_ONLY vm32_offset_t address
,
334 __DEBUG_ONLY vm_info_region_64_t
*regionp
,
335 __DEBUG_ONLY vm_info_object_array_t
*objectsp
,
336 __DEBUG_ONLY mach_msg_type_number_t
*objectsCntp
)
342 vm_offset_t addr
= 0; /* memory for OOL data */
343 vm_size_t size
; /* size of the memory */
344 unsigned int room
; /* room for this many objects */
345 unsigned int used
; /* actually this many objects */
346 vm_info_region_64_t region
;
349 if (map
== VM_MAP_NULL
)
350 return KERN_INVALID_TASK
;
352 size
= 0; /* no memory allocated yet */
355 vm_map_t cmap
; /* current map in traversal */
356 vm_map_t nmap
; /* next map to look at */
357 vm_map_entry_t entry
;
358 vm_object_t object
, cobject
, nobject
;
360 /* nothing is locked */
362 vm_map_lock_read(map
);
363 for (cmap
= map
;; cmap
= nmap
) {
364 /* cmap is read-locked */
366 if (!vm_map_lookup_entry(cmap
, address
, &entry
)) {
367 entry
= entry
->vme_next
;
368 if (entry
== vm_map_to_entry(cmap
)) {
369 vm_map_unlock_read(cmap
);
371 kmem_free(ipc_kernel_map
,
373 return KERN_NO_SPACE
;
377 if (entry
->is_sub_map
)
378 nmap
= VME_SUBMAP(entry
);
382 /* move down to the lower map */
384 vm_map_lock_read(nmap
);
385 vm_map_unlock_read(cmap
);
388 /* cmap is read-locked; we have a real entry */
390 object
= VME_OBJECT(entry
);
391 region
.vir_start
= (natural_t
) entry
->vme_start
;
392 region
.vir_end
= (natural_t
) entry
->vme_end
;
393 region
.vir_object
= (natural_t
)(uintptr_t) object
;
394 region
.vir_offset
= VME_OFFSET(entry
);
395 region
.vir_needs_copy
= entry
->needs_copy
;
396 region
.vir_protection
= entry
->protection
;
397 region
.vir_max_protection
= entry
->max_protection
;
398 region
.vir_inheritance
= entry
->inheritance
;
399 region
.vir_wired_count
= entry
->wired_count
;
400 region
.vir_user_wired_count
= entry
->user_wired_count
;
403 room
= (unsigned int) (size
/ sizeof(vm_info_object_t
));
405 if (object
== VM_OBJECT_NULL
) {
406 vm_map_unlock_read(cmap
);
407 /* no memory needed */
411 vm_object_lock(object
);
412 vm_map_unlock_read(cmap
);
414 for (cobject
= object
;; cobject
= nobject
) {
415 /* cobject is locked */
418 vm_info_object_t
*vio
=
419 &((vm_info_object_t
*) addr
)[used
];
422 (natural_t
)(uintptr_t) cobject
;
424 (natural_t
) cobject
->vo_size
;
427 vio
->vio_resident_page_count
=
428 cobject
->resident_page_count
;
430 (natural_t
)(uintptr_t) cobject
->copy
;
432 (natural_t
)(uintptr_t) cobject
->shadow
;
433 vio
->vio_shadow_offset
=
434 (natural_t
) cobject
->vo_shadow_offset
;
435 vio
->vio_paging_offset
=
436 (natural_t
) cobject
->paging_offset
;
437 vio
->vio_copy_strategy
=
438 cobject
->copy_strategy
;
439 vio
->vio_last_alloc
=
440 (vm_offset_t
) cobject
->last_alloc
;
441 vio
->vio_paging_in_progress
=
442 cobject
->paging_in_progress
+
443 cobject
->activity_in_progress
;
444 vio
->vio_pager_created
=
445 cobject
->pager_created
;
446 vio
->vio_pager_initialized
=
447 cobject
->pager_initialized
;
448 vio
->vio_pager_ready
=
449 cobject
->pager_ready
;
450 vio
->vio_can_persist
=
451 cobject
->can_persist
;
459 (cobject
->purgable
!= VM_PURGABLE_DENY
);
460 vio
->vio_purgable_volatile
=
461 (cobject
->purgable
== VM_PURGABLE_VOLATILE
||
462 cobject
->purgable
== VM_PURGABLE_EMPTY
);
466 nobject
= cobject
->shadow
;
467 if (nobject
== VM_OBJECT_NULL
) {
468 vm_object_unlock(cobject
);
472 vm_object_lock(nobject
);
473 vm_object_unlock(cobject
);
481 /* must allocate more memory */
484 kmem_free(ipc_kernel_map
, addr
, size
);
485 size
= vm_map_round_page(2 * used
* sizeof(vm_info_object_t
),
486 VM_MAP_PAGE_MASK(ipc_kernel_map
));
488 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
489 if (kr
!= KERN_SUCCESS
)
490 return KERN_RESOURCE_SHORTAGE
;
492 kr
= vm_map_wire_kernel(
494 vm_map_trunc_page(addr
,
495 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
496 vm_map_round_page(addr
+ size
,
497 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
498 VM_PROT_READ
|VM_PROT_WRITE
,
501 assert(kr
== KERN_SUCCESS
);
504 /* free excess memory; make remaining memory pageable */
507 copy
= VM_MAP_COPY_NULL
;
510 kmem_free(ipc_kernel_map
, addr
, size
);
512 vm_size_t size_used
= (used
* sizeof(vm_info_object_t
));
513 vm_size_t vmsize_used
= vm_map_round_page(size_used
,
514 VM_MAP_PAGE_MASK(ipc_kernel_map
));
518 vm_map_trunc_page(addr
,
519 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
520 vm_map_round_page(addr
+ size_used
,
521 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
523 assert(kr
== KERN_SUCCESS
);
525 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
526 (vm_map_size_t
)size_used
, TRUE
, ©
);
527 assert(kr
== KERN_SUCCESS
);
529 if (size
!= vmsize_used
)
530 kmem_free(ipc_kernel_map
,
531 addr
+ vmsize_used
, size
- vmsize_used
);
535 *objectsp
= (vm_info_object_array_t
) copy
;
538 #endif /* MACH_VM_DEBUG */
541 * Return an array of virtual pages that are mapped to a task.
544 vm32_mapped_pages_info(
545 __DEBUG_ONLY vm_map_t map
,
546 __DEBUG_ONLY page_address_array_t
*pages
,
547 __DEBUG_ONLY mach_msg_type_number_t
*pages_count
)
553 vm_size_t size
, size_used
;
554 unsigned int actual
, space
;
555 page_address_array_t list
;
556 vm_offset_t addr
= 0;
558 if (map
== VM_MAP_NULL
)
559 return (KERN_INVALID_ARGUMENT
);
562 size
= pmap_resident_count(pmap
) * sizeof(vm_offset_t
);
563 size
= vm_map_round_page(size
,
564 VM_MAP_PAGE_MASK(ipc_kernel_map
));
567 (void) vm_allocate_kernel(ipc_kernel_map
, &addr
, size
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
568 (void) vm_map_unwire(
570 vm_map_trunc_page(addr
,
571 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
572 vm_map_round_page(addr
+ size
,
573 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
576 list
= (page_address_array_t
) addr
;
577 space
= (unsigned int) (size
/ sizeof(vm_offset_t
));
579 actual
= pmap_list_resident_pages(pmap
,
586 * Free memory if not enough
588 (void) kmem_free(ipc_kernel_map
, addr
, size
);
591 * Try again, doubling the size
593 size
= vm_map_round_page(actual
* sizeof(vm_offset_t
),
594 VM_MAP_PAGE_MASK(ipc_kernel_map
));
599 (void) kmem_free(ipc_kernel_map
, addr
, size
);
602 vm_size_t vmsize_used
;
603 *pages_count
= actual
;
604 size_used
= (actual
* sizeof(vm_offset_t
));
605 vmsize_used
= vm_map_round_page(size_used
,
606 VM_MAP_PAGE_MASK(ipc_kernel_map
));
607 (void) vm_map_wire_kernel(
609 vm_map_trunc_page(addr
,
610 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
611 vm_map_round_page(addr
+ size
,
612 VM_MAP_PAGE_MASK(ipc_kernel_map
)),
613 VM_PROT_READ
|VM_PROT_WRITE
,
616 (void) vm_map_copyin(ipc_kernel_map
,
617 (vm_map_address_t
)addr
,
618 (vm_map_size_t
)size_used
,
620 (vm_map_copy_t
*)pages
);
621 if (vmsize_used
!= size
) {
622 (void) kmem_free(ipc_kernel_map
,
628 return (KERN_SUCCESS
);
629 #endif /* MACH_VM_DEBUG */
632 #endif /* VM32_SUPPORT */
635 * Routine: host_virtual_physical_table_info
637 * Return information about the VP table.
639 * Nothing locked. Obeys CountInOut protocol.
641 * KERN_SUCCESS Returned information.
642 * KERN_INVALID_HOST The host is null.
643 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
647 host_virtual_physical_table_info(
648 __DEBUG_ONLY host_t host
,
649 __DEBUG_ONLY hash_info_bucket_array_t
*infop
,
650 __DEBUG_ONLY mach_msg_type_number_t
*countp
)
655 vm_offset_t addr
= 0;
657 hash_info_bucket_t
*info
;
658 unsigned int potential
, actual
;
661 if (host
== HOST_NULL
)
662 return KERN_INVALID_HOST
;
664 /* start with in-line data */
670 actual
= vm_page_info(info
, potential
);
671 if (actual
<= potential
)
674 /* allocate more memory */
677 kmem_free(ipc_kernel_map
, addr
, size
);
679 size
= vm_map_round_page(actual
* sizeof *info
,
680 VM_MAP_PAGE_MASK(ipc_kernel_map
));
681 kr
= vm_allocate_kernel(ipc_kernel_map
, &addr
, size
,
682 VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
);
683 if (kr
!= KERN_SUCCESS
)
684 return KERN_RESOURCE_SHORTAGE
;
686 info
= (hash_info_bucket_t
*) addr
;
687 potential
= (unsigned int) (size
/sizeof (*info
));
690 if (info
== *infop
) {
691 /* data fit in-line; nothing to deallocate */
694 } else if (actual
== 0) {
695 kmem_free(ipc_kernel_map
, addr
, size
);
700 vm_size_t used
, vmused
;
702 used
= (actual
* sizeof(*info
));
703 vmused
= vm_map_round_page(used
, VM_MAP_PAGE_MASK(ipc_kernel_map
));
706 kmem_free(ipc_kernel_map
, addr
+ vmused
, size
- vmused
);
708 kr
= vm_map_copyin(ipc_kernel_map
, (vm_map_address_t
)addr
,
709 (vm_map_size_t
)used
, TRUE
, ©
);
710 assert(kr
== KERN_SUCCESS
);
712 *infop
= (hash_info_bucket_t
*) copy
;
717 #endif /* MACH_VM_DEBUG */