]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach/vm_map_server.h>
69 #include <mach_debug/vm_info.h>
70 #include <mach_debug/page_info.h>
71 #include <mach_debug/hash_info.h>
72
73 #if MACH_VM_DEBUG
74 #include <mach/machine/vm_types.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_inherit.h>
78 #include <mach/vm_param.h>
79 #include <kern/thread.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <kern/task.h>
84 #include <kern/host.h>
85 #include <ipc/ipc_port.h>
86 #include <vm/vm_debug.h>
87 #endif
88
89 #if !MACH_VM_DEBUG
90 #define __DEBUG_ONLY __unused
91 #else /* !MACH_VM_DEBUG */
92 #define __DEBUG_ONLY
93 #endif /* !MACH_VM_DEBUG */
94
95 /*
96 * Routine: mach_vm_region_info [kernel call]
97 * Purpose:
98 * Retrieve information about a VM region,
99 * including info about the object chain.
100 * Conditions:
101 * Nothing locked.
102 * Returns:
103 * KERN_SUCCESS Retrieve region/object info.
104 * KERN_INVALID_TASK The map is null.
105 * KERN_NO_SPACE There is no entry at/after the address.
106 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
107 */
108
109 kern_return_t
110 mach_vm_region_info(
111 __DEBUG_ONLY vm_map_t map,
112 __DEBUG_ONLY vm_offset_t address,
113 __DEBUG_ONLY vm_info_region_t *regionp,
114 __DEBUG_ONLY vm_info_object_array_t *objectsp,
115 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
116 {
117 #if !MACH_VM_DEBUG
118 return KERN_FAILURE;
119 #else
120 vm_map_copy_t copy;
121 vm_offset_t addr; /* memory for OOL data */
122 vm_size_t size; /* size of the memory */
123 unsigned int room; /* room for this many objects */
124 unsigned int used; /* actually this many objects */
125 vm_info_region_t region;
126 kern_return_t kr;
127
128 if (map == VM_MAP_NULL)
129 return KERN_INVALID_TASK;
130
131 size = 0; /* no memory allocated yet */
132
133 for (;;) {
134 vm_map_t cmap; /* current map in traversal */
135 vm_map_t nmap; /* next map to look at */
136 vm_map_entry_t entry;
137 vm_object_t object, cobject, nobject;
138
139 /* nothing is locked */
140
141 vm_map_lock_read(map);
142 for (cmap = map;; cmap = nmap) {
143 /* cmap is read-locked */
144
145 if (!vm_map_lookup_entry(cmap,
146 (vm_map_address_t)address, &entry)) {
147
148 entry = entry->vme_next;
149 if (entry == vm_map_to_entry(cmap)) {
150 vm_map_unlock_read(cmap);
151 if (size != 0)
152 kmem_free(ipc_kernel_map,
153 addr, size);
154 return KERN_NO_SPACE;
155 }
156 }
157
158 if (entry->is_sub_map)
159 nmap = entry->object.sub_map;
160 else
161 break;
162
163 /* move down to the lower map */
164
165 vm_map_lock_read(nmap);
166 vm_map_unlock_read(cmap);
167 }
168
169 /* cmap is read-locked; we have a real entry */
170
171 object = entry->object.vm_object;
172 region.vir_start = entry->vme_start;
173 region.vir_end = entry->vme_end;
174 region.vir_object = (vm_offset_t) object;
175 region.vir_offset = entry->offset;
176 region.vir_needs_copy = entry->needs_copy;
177 region.vir_protection = entry->protection;
178 region.vir_max_protection = entry->max_protection;
179 region.vir_inheritance = entry->inheritance;
180 region.vir_wired_count = entry->wired_count;
181 region.vir_user_wired_count = entry->user_wired_count;
182
183 used = 0;
184 room = size / sizeof(vm_info_object_t);
185
186 if (object == VM_OBJECT_NULL) {
187 vm_map_unlock_read(cmap);
188 /* no memory needed */
189 break;
190 }
191
192 vm_object_lock(object);
193 vm_map_unlock_read(cmap);
194
195 for (cobject = object;; cobject = nobject) {
196 /* cobject is locked */
197
198 if (used < room) {
199 vm_info_object_t *vio =
200 &((vm_info_object_t *) addr)[used];
201
202 vio->vio_object =
203 (vm_offset_t) cobject;
204 vio->vio_size =
205 cobject->size;
206 vio->vio_ref_count =
207 cobject->ref_count;
208 vio->vio_resident_page_count =
209 cobject->resident_page_count;
210 vio->vio_absent_count =
211 cobject->absent_count;
212 vio->vio_copy =
213 (vm_offset_t) cobject->copy;
214 vio->vio_shadow =
215 (vm_offset_t) cobject->shadow;
216 vio->vio_shadow_offset =
217 cobject->shadow_offset;
218 vio->vio_paging_offset =
219 cobject->paging_offset;
220 vio->vio_copy_strategy =
221 cobject->copy_strategy;
222 vio->vio_last_alloc =
223 cobject->last_alloc;
224 vio->vio_paging_in_progress =
225 cobject->paging_in_progress;
226 vio->vio_pager_created =
227 cobject->pager_created;
228 vio->vio_pager_initialized =
229 cobject->pager_initialized;
230 vio->vio_pager_ready =
231 cobject->pager_ready;
232 vio->vio_can_persist =
233 cobject->can_persist;
234 vio->vio_internal =
235 cobject->internal;
236 vio->vio_temporary =
237 cobject->temporary;
238 vio->vio_alive =
239 cobject->alive;
240 vio->vio_purgable =
241 (cobject->purgable != VM_OBJECT_NONPURGABLE);
242 vio->vio_purgable_volatile =
243 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
244 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
245 }
246
247 used++;
248 nobject = cobject->shadow;
249 if (nobject == VM_OBJECT_NULL) {
250 vm_object_unlock(cobject);
251 break;
252 }
253
254 vm_object_lock(nobject);
255 vm_object_unlock(cobject);
256 }
257
258 /* nothing locked */
259
260 if (used <= room)
261 break;
262
263 /* must allocate more memory */
264
265 if (size != 0)
266 kmem_free(ipc_kernel_map, addr, size);
267 size = round_page_32(2 * used * sizeof(vm_info_object_t));
268
269 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
270 if (kr != KERN_SUCCESS)
271 return KERN_RESOURCE_SHORTAGE;
272
273 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
274 vm_map_round_page(addr + size),
275 VM_PROT_READ|VM_PROT_WRITE, FALSE);
276 assert(kr == KERN_SUCCESS);
277 }
278
279 /* free excess memory; make remaining memory pageable */
280
281 if (used == 0) {
282 copy = VM_MAP_COPY_NULL;
283
284 if (size != 0)
285 kmem_free(ipc_kernel_map, addr, size);
286 } else {
287 vm_size_t size_used =
288 round_page_32(used * sizeof(vm_info_object_t));
289
290 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
291 vm_map_round_page(addr + size_used), FALSE);
292 assert(kr == KERN_SUCCESS);
293
294 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
295 (vm_map_size_t)size_used, TRUE, &copy);
296 assert(kr == KERN_SUCCESS);
297
298 if (size != size_used)
299 kmem_free(ipc_kernel_map,
300 addr + size_used, size - size_used);
301 }
302
303 *regionp = region;
304 *objectsp = (vm_info_object_array_t) copy;
305 *objectsCntp = used;
306 return KERN_SUCCESS;
307 #endif /* MACH_VM_DEBUG */
308 }
309
310 /*
311 * Temporary call for 64 bit data path interface transiotion
312 */
313
314 kern_return_t
315 mach_vm_region_info_64(
316 __DEBUG_ONLY vm_map_t map,
317 __DEBUG_ONLY vm_offset_t address,
318 __DEBUG_ONLY vm_info_region_64_t *regionp,
319 __DEBUG_ONLY vm_info_object_array_t *objectsp,
320 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
321 {
322 #if !MACH_VM_DEBUG
323 return KERN_FAILURE;
324 #else
325 vm_map_copy_t copy;
326 vm_offset_t addr; /* memory for OOL data */
327 vm_size_t size; /* size of the memory */
328 unsigned int room; /* room for this many objects */
329 unsigned int used; /* actually this many objects */
330 vm_info_region_64_t region;
331 kern_return_t kr;
332
333 if (map == VM_MAP_NULL)
334 return KERN_INVALID_TASK;
335
336 size = 0; /* no memory allocated yet */
337
338 for (;;) {
339 vm_map_t cmap; /* current map in traversal */
340 vm_map_t nmap; /* next map to look at */
341 vm_map_entry_t entry;
342 vm_object_t object, cobject, nobject;
343
344 /* nothing is locked */
345
346 vm_map_lock_read(map);
347 for (cmap = map;; cmap = nmap) {
348 /* cmap is read-locked */
349
350 if (!vm_map_lookup_entry(cmap, address, &entry)) {
351 entry = entry->vme_next;
352 if (entry == vm_map_to_entry(cmap)) {
353 vm_map_unlock_read(cmap);
354 if (size != 0)
355 kmem_free(ipc_kernel_map,
356 addr, size);
357 return KERN_NO_SPACE;
358 }
359 }
360
361 if (entry->is_sub_map)
362 nmap = entry->object.sub_map;
363 else
364 break;
365
366 /* move down to the lower map */
367
368 vm_map_lock_read(nmap);
369 vm_map_unlock_read(cmap);
370 }
371
372 /* cmap is read-locked; we have a real entry */
373
374 object = entry->object.vm_object;
375 region.vir_start = entry->vme_start;
376 region.vir_end = entry->vme_end;
377 region.vir_object = (vm_offset_t) object;
378 region.vir_offset = entry->offset;
379 region.vir_needs_copy = entry->needs_copy;
380 region.vir_protection = entry->protection;
381 region.vir_max_protection = entry->max_protection;
382 region.vir_inheritance = entry->inheritance;
383 region.vir_wired_count = entry->wired_count;
384 region.vir_user_wired_count = entry->user_wired_count;
385
386 used = 0;
387 room = size / sizeof(vm_info_object_t);
388
389 if (object == VM_OBJECT_NULL) {
390 vm_map_unlock_read(cmap);
391 /* no memory needed */
392 break;
393 }
394
395 vm_object_lock(object);
396 vm_map_unlock_read(cmap);
397
398 for (cobject = object;; cobject = nobject) {
399 /* cobject is locked */
400
401 if (used < room) {
402 vm_info_object_t *vio =
403 &((vm_info_object_t *) addr)[used];
404
405 vio->vio_object =
406 (vm_offset_t) cobject;
407 vio->vio_size =
408 cobject->size;
409 vio->vio_ref_count =
410 cobject->ref_count;
411 vio->vio_resident_page_count =
412 cobject->resident_page_count;
413 vio->vio_absent_count =
414 cobject->absent_count;
415 vio->vio_copy =
416 (vm_offset_t) cobject->copy;
417 vio->vio_shadow =
418 (vm_offset_t) cobject->shadow;
419 vio->vio_shadow_offset =
420 cobject->shadow_offset;
421 vio->vio_paging_offset =
422 cobject->paging_offset;
423 vio->vio_copy_strategy =
424 cobject->copy_strategy;
425 vio->vio_last_alloc =
426 cobject->last_alloc;
427 vio->vio_paging_in_progress =
428 cobject->paging_in_progress;
429 vio->vio_pager_created =
430 cobject->pager_created;
431 vio->vio_pager_initialized =
432 cobject->pager_initialized;
433 vio->vio_pager_ready =
434 cobject->pager_ready;
435 vio->vio_can_persist =
436 cobject->can_persist;
437 vio->vio_internal =
438 cobject->internal;
439 vio->vio_temporary =
440 cobject->temporary;
441 vio->vio_alive =
442 cobject->alive;
443 vio->vio_purgable =
444 (cobject->purgable != VM_OBJECT_NONPURGABLE);
445 vio->vio_purgable_volatile =
446 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
447 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
448 }
449
450 used++;
451 nobject = cobject->shadow;
452 if (nobject == VM_OBJECT_NULL) {
453 vm_object_unlock(cobject);
454 break;
455 }
456
457 vm_object_lock(nobject);
458 vm_object_unlock(cobject);
459 }
460
461 /* nothing locked */
462
463 if (used <= room)
464 break;
465
466 /* must allocate more memory */
467
468 if (size != 0)
469 kmem_free(ipc_kernel_map, addr, size);
470 size = round_page_32(2 * used * sizeof(vm_info_object_t));
471
472 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
473 if (kr != KERN_SUCCESS)
474 return KERN_RESOURCE_SHORTAGE;
475
476 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
477 vm_map_round_page(addr + size),
478 VM_PROT_READ|VM_PROT_WRITE, FALSE);
479 assert(kr == KERN_SUCCESS);
480 }
481
482 /* free excess memory; make remaining memory pageable */
483
484 if (used == 0) {
485 copy = VM_MAP_COPY_NULL;
486
487 if (size != 0)
488 kmem_free(ipc_kernel_map, addr, size);
489 } else {
490 vm_size_t size_used =
491 round_page_32(used * sizeof(vm_info_object_t));
492
493 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
494 vm_map_round_page(addr + size_used), FALSE);
495 assert(kr == KERN_SUCCESS);
496
497 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
498 (vm_map_size_t)size_used, TRUE, &copy);
499 assert(kr == KERN_SUCCESS);
500
501 if (size != size_used)
502 kmem_free(ipc_kernel_map,
503 addr + size_used, size - size_used);
504 }
505
506 *regionp = region;
507 *objectsp = (vm_info_object_array_t) copy;
508 *objectsCntp = used;
509 return KERN_SUCCESS;
510 #endif /* MACH_VM_DEBUG */
511 }
512 /*
513 * Return an array of virtual pages that are mapped to a task.
514 */
515 kern_return_t
516 vm_mapped_pages_info(
517 __DEBUG_ONLY vm_map_t map,
518 __DEBUG_ONLY page_address_array_t *pages,
519 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
520 {
521 #if !MACH_VM_DEBUG
522 return KERN_FAILURE;
523 #else
524 pmap_t pmap;
525 vm_size_t size, size_used;
526 unsigned int actual, space;
527 page_address_array_t list;
528 vm_offset_t addr;
529
530 if (map == VM_MAP_NULL)
531 return (KERN_INVALID_ARGUMENT);
532
533 pmap = map->pmap;
534 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
535 size = round_page_32(size);
536
537 for (;;) {
538 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
539 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
540 vm_map_round_page(addr + size), FALSE);
541
542 list = (page_address_array_t) addr;
543 space = size / sizeof(vm_offset_t);
544
545 actual = pmap_list_resident_pages(pmap,
546 list,
547 space);
548 if (actual <= space)
549 break;
550
551 /*
552 * Free memory if not enough
553 */
554 (void) kmem_free(ipc_kernel_map, addr, size);
555
556 /*
557 * Try again, doubling the size
558 */
559 size = round_page_32(actual * sizeof(vm_offset_t));
560 }
561 if (actual == 0) {
562 *pages = 0;
563 *pages_count = 0;
564 (void) kmem_free(ipc_kernel_map, addr, size);
565 }
566 else {
567 *pages_count = actual;
568 size_used = round_page_32(actual * sizeof(vm_offset_t));
569 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
570 vm_map_round_page(addr + size),
571 VM_PROT_READ|VM_PROT_WRITE, FALSE);
572 (void) vm_map_copyin(ipc_kernel_map,
573 (vm_map_address_t)addr,
574 (vm_map_size_t)size_used,
575 TRUE,
576 (vm_map_copy_t *)pages);
577 if (size_used != size) {
578 (void) kmem_free(ipc_kernel_map,
579 addr + size_used,
580 size - size_used);
581 }
582 }
583
584 return (KERN_SUCCESS);
585 #endif /* MACH_VM_DEBUG */
586 }
587
588 /*
589 * Routine: host_virtual_physical_table_info
590 * Purpose:
591 * Return information about the VP table.
592 * Conditions:
593 * Nothing locked. Obeys CountInOut protocol.
594 * Returns:
595 * KERN_SUCCESS Returned information.
596 * KERN_INVALID_HOST The host is null.
597 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
598 */
599
600 kern_return_t
601 host_virtual_physical_table_info(
602 __DEBUG_ONLY host_t host,
603 __DEBUG_ONLY hash_info_bucket_array_t *infop,
604 __DEBUG_ONLY mach_msg_type_number_t *countp)
605 {
606 #if !MACH_VM_DEBUG
607 return KERN_FAILURE;
608 #else
609 vm_offset_t addr;
610 vm_size_t size = 0;
611 hash_info_bucket_t *info;
612 unsigned int potential, actual;
613 kern_return_t kr;
614
615 if (host == HOST_NULL)
616 return KERN_INVALID_HOST;
617
618 /* start with in-line data */
619
620 info = *infop;
621 potential = *countp;
622
623 for (;;) {
624 actual = vm_page_info(info, potential);
625 if (actual <= potential)
626 break;
627
628 /* allocate more memory */
629
630 if (info != *infop)
631 kmem_free(ipc_kernel_map, addr, size);
632
633 size = round_page_32(actual * sizeof *info);
634 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
635 if (kr != KERN_SUCCESS)
636 return KERN_RESOURCE_SHORTAGE;
637
638 info = (hash_info_bucket_t *) addr;
639 potential = size/sizeof *info;
640 }
641
642 if (info == *infop) {
643 /* data fit in-line; nothing to deallocate */
644
645 *countp = actual;
646 } else if (actual == 0) {
647 kmem_free(ipc_kernel_map, addr, size);
648
649 *countp = 0;
650 } else {
651 vm_map_copy_t copy;
652 vm_size_t used;
653
654 used = round_page_32(actual * sizeof *info);
655
656 if (used != size)
657 kmem_free(ipc_kernel_map, addr + used, size - used);
658
659 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
660 (vm_map_size_t)used, TRUE, &copy);
661 assert(kr == KERN_SUCCESS);
662
663 *infop = (hash_info_bucket_t *) copy;
664 *countp = actual;
665 }
666
667 return KERN_SUCCESS;
668 #endif /* MACH_VM_DEBUG */
669 }