]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
8c44aed182f3c6cb96e977ec66aa88754a6d5350
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 /*
54 * File: vm/vm_debug.c.
55 * Author: Rich Draves
56 * Date: March, 1990
57 *
58 * Exported kernel calls. See mach_debug/mach_debug.defs.
59 */
60 #include <mach_vm_debug.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_host_server.h>
63 #include <mach/vm_map_server.h>
64 #include <mach_debug/vm_info.h>
65 #include <mach_debug/page_info.h>
66 #include <mach_debug/hash_info.h>
67
68 #if MACH_VM_DEBUG
69 #include <mach/machine/vm_types.h>
70 #include <mach/memory_object_types.h>
71 #include <mach/vm_prot.h>
72 #include <mach/vm_inherit.h>
73 #include <mach/vm_param.h>
74 #include <kern/thread.h>
75 #include <vm/vm_map.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_object.h>
78 #include <kern/task.h>
79 #include <kern/host.h>
80 #include <ipc/ipc_port.h>
81 #include <vm/vm_debug.h>
82 #endif
83
84 #if !MACH_VM_DEBUG
85 #define __DEBUG_ONLY __unused
86 #else /* !MACH_VM_DEBUG */
87 #define __DEBUG_ONLY
88 #endif /* !MACH_VM_DEBUG */
89
90 /*
91 * Routine: mach_vm_region_info [kernel call]
92 * Purpose:
93 * Retrieve information about a VM region,
94 * including info about the object chain.
95 * Conditions:
96 * Nothing locked.
97 * Returns:
98 * KERN_SUCCESS Retrieve region/object info.
99 * KERN_INVALID_TASK The map is null.
100 * KERN_NO_SPACE There is no entry at/after the address.
101 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
102 */
103
104 kern_return_t
105 mach_vm_region_info(
106 __DEBUG_ONLY vm_map_t map,
107 __DEBUG_ONLY vm_offset_t address,
108 __DEBUG_ONLY vm_info_region_t *regionp,
109 __DEBUG_ONLY vm_info_object_array_t *objectsp,
110 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
111 {
112 #if !MACH_VM_DEBUG
113 return KERN_FAILURE;
114 #else
115 vm_map_copy_t copy;
116 vm_offset_t addr; /* memory for OOL data */
117 vm_size_t size; /* size of the memory */
118 unsigned int room; /* room for this many objects */
119 unsigned int used; /* actually this many objects */
120 vm_info_region_t region;
121 kern_return_t kr;
122
123 if (map == VM_MAP_NULL)
124 return KERN_INVALID_TASK;
125
126 size = 0; /* no memory allocated yet */
127
128 for (;;) {
129 vm_map_t cmap; /* current map in traversal */
130 vm_map_t nmap; /* next map to look at */
131 vm_map_entry_t entry;
132 vm_object_t object, cobject, nobject;
133
134 /* nothing is locked */
135
136 vm_map_lock_read(map);
137 for (cmap = map;; cmap = nmap) {
138 /* cmap is read-locked */
139
140 if (!vm_map_lookup_entry(cmap,
141 (vm_map_address_t)address, &entry)) {
142
143 entry = entry->vme_next;
144 if (entry == vm_map_to_entry(cmap)) {
145 vm_map_unlock_read(cmap);
146 if (size != 0)
147 kmem_free(ipc_kernel_map,
148 addr, size);
149 return KERN_NO_SPACE;
150 }
151 }
152
153 if (entry->is_sub_map)
154 nmap = entry->object.sub_map;
155 else
156 break;
157
158 /* move down to the lower map */
159
160 vm_map_lock_read(nmap);
161 vm_map_unlock_read(cmap);
162 }
163
164 /* cmap is read-locked; we have a real entry */
165
166 object = entry->object.vm_object;
167 region.vir_start = entry->vme_start;
168 region.vir_end = entry->vme_end;
169 region.vir_object = (vm_offset_t) object;
170 region.vir_offset = entry->offset;
171 region.vir_needs_copy = entry->needs_copy;
172 region.vir_protection = entry->protection;
173 region.vir_max_protection = entry->max_protection;
174 region.vir_inheritance = entry->inheritance;
175 region.vir_wired_count = entry->wired_count;
176 region.vir_user_wired_count = entry->user_wired_count;
177
178 used = 0;
179 room = size / sizeof(vm_info_object_t);
180
181 if (object == VM_OBJECT_NULL) {
182 vm_map_unlock_read(cmap);
183 /* no memory needed */
184 break;
185 }
186
187 vm_object_lock(object);
188 vm_map_unlock_read(cmap);
189
190 for (cobject = object;; cobject = nobject) {
191 /* cobject is locked */
192
193 if (used < room) {
194 vm_info_object_t *vio =
195 &((vm_info_object_t *) addr)[used];
196
197 vio->vio_object =
198 (vm_offset_t) cobject;
199 vio->vio_size =
200 cobject->size;
201 vio->vio_ref_count =
202 cobject->ref_count;
203 vio->vio_resident_page_count =
204 cobject->resident_page_count;
205 vio->vio_absent_count =
206 cobject->absent_count;
207 vio->vio_copy =
208 (vm_offset_t) cobject->copy;
209 vio->vio_shadow =
210 (vm_offset_t) cobject->shadow;
211 vio->vio_shadow_offset =
212 cobject->shadow_offset;
213 vio->vio_paging_offset =
214 cobject->paging_offset;
215 vio->vio_copy_strategy =
216 cobject->copy_strategy;
217 vio->vio_last_alloc =
218 cobject->last_alloc;
219 vio->vio_paging_in_progress =
220 cobject->paging_in_progress;
221 vio->vio_pager_created =
222 cobject->pager_created;
223 vio->vio_pager_initialized =
224 cobject->pager_initialized;
225 vio->vio_pager_ready =
226 cobject->pager_ready;
227 vio->vio_can_persist =
228 cobject->can_persist;
229 vio->vio_internal =
230 cobject->internal;
231 vio->vio_temporary =
232 cobject->temporary;
233 vio->vio_alive =
234 cobject->alive;
235 vio->vio_purgable =
236 (cobject->purgable != VM_OBJECT_NONPURGABLE);
237 vio->vio_purgable_volatile =
238 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
239 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
240 }
241
242 used++;
243 nobject = cobject->shadow;
244 if (nobject == VM_OBJECT_NULL) {
245 vm_object_unlock(cobject);
246 break;
247 }
248
249 vm_object_lock(nobject);
250 vm_object_unlock(cobject);
251 }
252
253 /* nothing locked */
254
255 if (used <= room)
256 break;
257
258 /* must allocate more memory */
259
260 if (size != 0)
261 kmem_free(ipc_kernel_map, addr, size);
262 size = round_page_32(2 * used * sizeof(vm_info_object_t));
263
264 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
265 if (kr != KERN_SUCCESS)
266 return KERN_RESOURCE_SHORTAGE;
267
268 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
269 vm_map_round_page(addr + size),
270 VM_PROT_READ|VM_PROT_WRITE, FALSE);
271 assert(kr == KERN_SUCCESS);
272 }
273
274 /* free excess memory; make remaining memory pageable */
275
276 if (used == 0) {
277 copy = VM_MAP_COPY_NULL;
278
279 if (size != 0)
280 kmem_free(ipc_kernel_map, addr, size);
281 } else {
282 vm_size_t size_used =
283 round_page_32(used * sizeof(vm_info_object_t));
284
285 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
286 vm_map_round_page(addr + size_used), FALSE);
287 assert(kr == KERN_SUCCESS);
288
289 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
290 (vm_map_size_t)size_used, TRUE, &copy);
291 assert(kr == KERN_SUCCESS);
292
293 if (size != size_used)
294 kmem_free(ipc_kernel_map,
295 addr + size_used, size - size_used);
296 }
297
298 *regionp = region;
299 *objectsp = (vm_info_object_array_t) copy;
300 *objectsCntp = used;
301 return KERN_SUCCESS;
302 #endif /* MACH_VM_DEBUG */
303 }
304
305 /*
306 * Temporary call for 64 bit data path interface transiotion
307 */
308
309 kern_return_t
310 mach_vm_region_info_64(
311 __DEBUG_ONLY vm_map_t map,
312 __DEBUG_ONLY vm_offset_t address,
313 __DEBUG_ONLY vm_info_region_64_t *regionp,
314 __DEBUG_ONLY vm_info_object_array_t *objectsp,
315 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
316 {
317 #if !MACH_VM_DEBUG
318 return KERN_FAILURE;
319 #else
320 vm_map_copy_t copy;
321 vm_offset_t addr; /* memory for OOL data */
322 vm_size_t size; /* size of the memory */
323 unsigned int room; /* room for this many objects */
324 unsigned int used; /* actually this many objects */
325 vm_info_region_64_t region;
326 kern_return_t kr;
327
328 if (map == VM_MAP_NULL)
329 return KERN_INVALID_TASK;
330
331 size = 0; /* no memory allocated yet */
332
333 for (;;) {
334 vm_map_t cmap; /* current map in traversal */
335 vm_map_t nmap; /* next map to look at */
336 vm_map_entry_t entry;
337 vm_object_t object, cobject, nobject;
338
339 /* nothing is locked */
340
341 vm_map_lock_read(map);
342 for (cmap = map;; cmap = nmap) {
343 /* cmap is read-locked */
344
345 if (!vm_map_lookup_entry(cmap, address, &entry)) {
346 entry = entry->vme_next;
347 if (entry == vm_map_to_entry(cmap)) {
348 vm_map_unlock_read(cmap);
349 if (size != 0)
350 kmem_free(ipc_kernel_map,
351 addr, size);
352 return KERN_NO_SPACE;
353 }
354 }
355
356 if (entry->is_sub_map)
357 nmap = entry->object.sub_map;
358 else
359 break;
360
361 /* move down to the lower map */
362
363 vm_map_lock_read(nmap);
364 vm_map_unlock_read(cmap);
365 }
366
367 /* cmap is read-locked; we have a real entry */
368
369 object = entry->object.vm_object;
370 region.vir_start = entry->vme_start;
371 region.vir_end = entry->vme_end;
372 region.vir_object = (vm_offset_t) object;
373 region.vir_offset = entry->offset;
374 region.vir_needs_copy = entry->needs_copy;
375 region.vir_protection = entry->protection;
376 region.vir_max_protection = entry->max_protection;
377 region.vir_inheritance = entry->inheritance;
378 region.vir_wired_count = entry->wired_count;
379 region.vir_user_wired_count = entry->user_wired_count;
380
381 used = 0;
382 room = size / sizeof(vm_info_object_t);
383
384 if (object == VM_OBJECT_NULL) {
385 vm_map_unlock_read(cmap);
386 /* no memory needed */
387 break;
388 }
389
390 vm_object_lock(object);
391 vm_map_unlock_read(cmap);
392
393 for (cobject = object;; cobject = nobject) {
394 /* cobject is locked */
395
396 if (used < room) {
397 vm_info_object_t *vio =
398 &((vm_info_object_t *) addr)[used];
399
400 vio->vio_object =
401 (vm_offset_t) cobject;
402 vio->vio_size =
403 cobject->size;
404 vio->vio_ref_count =
405 cobject->ref_count;
406 vio->vio_resident_page_count =
407 cobject->resident_page_count;
408 vio->vio_absent_count =
409 cobject->absent_count;
410 vio->vio_copy =
411 (vm_offset_t) cobject->copy;
412 vio->vio_shadow =
413 (vm_offset_t) cobject->shadow;
414 vio->vio_shadow_offset =
415 cobject->shadow_offset;
416 vio->vio_paging_offset =
417 cobject->paging_offset;
418 vio->vio_copy_strategy =
419 cobject->copy_strategy;
420 vio->vio_last_alloc =
421 cobject->last_alloc;
422 vio->vio_paging_in_progress =
423 cobject->paging_in_progress;
424 vio->vio_pager_created =
425 cobject->pager_created;
426 vio->vio_pager_initialized =
427 cobject->pager_initialized;
428 vio->vio_pager_ready =
429 cobject->pager_ready;
430 vio->vio_can_persist =
431 cobject->can_persist;
432 vio->vio_internal =
433 cobject->internal;
434 vio->vio_temporary =
435 cobject->temporary;
436 vio->vio_alive =
437 cobject->alive;
438 vio->vio_purgable =
439 (cobject->purgable != VM_OBJECT_NONPURGABLE);
440 vio->vio_purgable_volatile =
441 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
442 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
443 }
444
445 used++;
446 nobject = cobject->shadow;
447 if (nobject == VM_OBJECT_NULL) {
448 vm_object_unlock(cobject);
449 break;
450 }
451
452 vm_object_lock(nobject);
453 vm_object_unlock(cobject);
454 }
455
456 /* nothing locked */
457
458 if (used <= room)
459 break;
460
461 /* must allocate more memory */
462
463 if (size != 0)
464 kmem_free(ipc_kernel_map, addr, size);
465 size = round_page_32(2 * used * sizeof(vm_info_object_t));
466
467 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
468 if (kr != KERN_SUCCESS)
469 return KERN_RESOURCE_SHORTAGE;
470
471 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
472 vm_map_round_page(addr + size),
473 VM_PROT_READ|VM_PROT_WRITE, FALSE);
474 assert(kr == KERN_SUCCESS);
475 }
476
477 /* free excess memory; make remaining memory pageable */
478
479 if (used == 0) {
480 copy = VM_MAP_COPY_NULL;
481
482 if (size != 0)
483 kmem_free(ipc_kernel_map, addr, size);
484 } else {
485 vm_size_t size_used =
486 round_page_32(used * sizeof(vm_info_object_t));
487
488 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
489 vm_map_round_page(addr + size_used), FALSE);
490 assert(kr == KERN_SUCCESS);
491
492 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
493 (vm_map_size_t)size_used, TRUE, &copy);
494 assert(kr == KERN_SUCCESS);
495
496 if (size != size_used)
497 kmem_free(ipc_kernel_map,
498 addr + size_used, size - size_used);
499 }
500
501 *regionp = region;
502 *objectsp = (vm_info_object_array_t) copy;
503 *objectsCntp = used;
504 return KERN_SUCCESS;
505 #endif /* MACH_VM_DEBUG */
506 }
507 /*
508 * Return an array of virtual pages that are mapped to a task.
509 */
510 kern_return_t
511 vm_mapped_pages_info(
512 __DEBUG_ONLY vm_map_t map,
513 __DEBUG_ONLY page_address_array_t *pages,
514 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
515 {
516 #if !MACH_VM_DEBUG
517 return KERN_FAILURE;
518 #else
519 pmap_t pmap;
520 vm_size_t size, size_used;
521 unsigned int actual, space;
522 page_address_array_t list;
523 vm_offset_t addr;
524
525 if (map == VM_MAP_NULL)
526 return (KERN_INVALID_ARGUMENT);
527
528 pmap = map->pmap;
529 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
530 size = round_page_32(size);
531
532 for (;;) {
533 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
534 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
535 vm_map_round_page(addr + size), FALSE);
536
537 list = (page_address_array_t) addr;
538 space = size / sizeof(vm_offset_t);
539
540 actual = pmap_list_resident_pages(pmap,
541 list,
542 space);
543 if (actual <= space)
544 break;
545
546 /*
547 * Free memory if not enough
548 */
549 (void) kmem_free(ipc_kernel_map, addr, size);
550
551 /*
552 * Try again, doubling the size
553 */
554 size = round_page_32(actual * sizeof(vm_offset_t));
555 }
556 if (actual == 0) {
557 *pages = 0;
558 *pages_count = 0;
559 (void) kmem_free(ipc_kernel_map, addr, size);
560 }
561 else {
562 *pages_count = actual;
563 size_used = round_page_32(actual * sizeof(vm_offset_t));
564 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
565 vm_map_round_page(addr + size),
566 VM_PROT_READ|VM_PROT_WRITE, FALSE);
567 (void) vm_map_copyin(ipc_kernel_map,
568 (vm_map_address_t)addr,
569 (vm_map_size_t)size_used,
570 TRUE,
571 (vm_map_copy_t *)pages);
572 if (size_used != size) {
573 (void) kmem_free(ipc_kernel_map,
574 addr + size_used,
575 size - size_used);
576 }
577 }
578
579 return (KERN_SUCCESS);
580 #endif /* MACH_VM_DEBUG */
581 }
582
583 /*
584 * Routine: host_virtual_physical_table_info
585 * Purpose:
586 * Return information about the VP table.
587 * Conditions:
588 * Nothing locked. Obeys CountInOut protocol.
589 * Returns:
590 * KERN_SUCCESS Returned information.
591 * KERN_INVALID_HOST The host is null.
592 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
593 */
594
595 kern_return_t
596 host_virtual_physical_table_info(
597 __DEBUG_ONLY host_t host,
598 __DEBUG_ONLY hash_info_bucket_array_t *infop,
599 __DEBUG_ONLY mach_msg_type_number_t *countp)
600 {
601 #if !MACH_VM_DEBUG
602 return KERN_FAILURE;
603 #else
604 vm_offset_t addr;
605 vm_size_t size = 0;
606 hash_info_bucket_t *info;
607 unsigned int potential, actual;
608 kern_return_t kr;
609
610 if (host == HOST_NULL)
611 return KERN_INVALID_HOST;
612
613 /* start with in-line data */
614
615 info = *infop;
616 potential = *countp;
617
618 for (;;) {
619 actual = vm_page_info(info, potential);
620 if (actual <= potential)
621 break;
622
623 /* allocate more memory */
624
625 if (info != *infop)
626 kmem_free(ipc_kernel_map, addr, size);
627
628 size = round_page_32(actual * sizeof *info);
629 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
630 if (kr != KERN_SUCCESS)
631 return KERN_RESOURCE_SHORTAGE;
632
633 info = (hash_info_bucket_t *) addr;
634 potential = size/sizeof *info;
635 }
636
637 if (info == *infop) {
638 /* data fit in-line; nothing to deallocate */
639
640 *countp = actual;
641 } else if (actual == 0) {
642 kmem_free(ipc_kernel_map, addr, size);
643
644 *countp = 0;
645 } else {
646 vm_map_copy_t copy;
647 vm_size_t used;
648
649 used = round_page_32(actual * sizeof *info);
650
651 if (used != size)
652 kmem_free(ipc_kernel_map, addr + used, size - used);
653
654 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
655 (vm_map_size_t)used, TRUE, &copy);
656 assert(kr == KERN_SUCCESS);
657
658 *infop = (hash_info_bucket_t *) copy;
659 *countp = actual;
660 }
661
662 return KERN_SUCCESS;
663 #endif /* MACH_VM_DEBUG */
664 }