]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: vm/vm_debug.c.
62 * Author: Rich Draves
63 * Date: March, 1990
64 *
65 * Exported kernel calls. See mach_debug/mach_debug.defs.
66 */
67 #include <mach_vm_debug.h>
68 #include <mach/kern_return.h>
69 #include <mach/mach_host_server.h>
70 #include <mach/vm_map_server.h>
71 #include <mach_debug/vm_info.h>
72 #include <mach_debug/page_info.h>
73 #include <mach_debug/hash_info.h>
74
75 #if MACH_VM_DEBUG
76 #include <mach/machine/vm_types.h>
77 #include <mach/memory_object_types.h>
78 #include <mach/vm_prot.h>
79 #include <mach/vm_inherit.h>
80 #include <mach/vm_param.h>
81 #include <kern/thread.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
84 #include <vm/vm_object.h>
85 #include <kern/task.h>
86 #include <kern/host.h>
87 #include <ipc/ipc_port.h>
88 #include <vm/vm_debug.h>
89 #endif
90
91 #if !MACH_VM_DEBUG
92 #define __DEBUG_ONLY __unused
93 #else /* !MACH_VM_DEBUG */
94 #define __DEBUG_ONLY
95 #endif /* !MACH_VM_DEBUG */
96
97 /*
98 * Routine: mach_vm_region_info [kernel call]
99 * Purpose:
100 * Retrieve information about a VM region,
101 * including info about the object chain.
102 * Conditions:
103 * Nothing locked.
104 * Returns:
105 * KERN_SUCCESS Retrieve region/object info.
106 * KERN_INVALID_TASK The map is null.
107 * KERN_NO_SPACE There is no entry at/after the address.
108 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
109 */
110
111 kern_return_t
112 mach_vm_region_info(
113 __DEBUG_ONLY vm_map_t map,
114 __DEBUG_ONLY vm_offset_t address,
115 __DEBUG_ONLY vm_info_region_t *regionp,
116 __DEBUG_ONLY vm_info_object_array_t *objectsp,
117 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
118 {
119 #if !MACH_VM_DEBUG
120 return KERN_FAILURE;
121 #else
122 vm_map_copy_t copy;
123 vm_offset_t addr; /* memory for OOL data */
124 vm_size_t size; /* size of the memory */
125 unsigned int room; /* room for this many objects */
126 unsigned int used; /* actually this many objects */
127 vm_info_region_t region;
128 kern_return_t kr;
129
130 if (map == VM_MAP_NULL)
131 return KERN_INVALID_TASK;
132
133 size = 0; /* no memory allocated yet */
134
135 for (;;) {
136 vm_map_t cmap; /* current map in traversal */
137 vm_map_t nmap; /* next map to look at */
138 vm_map_entry_t entry;
139 vm_object_t object, cobject, nobject;
140
141 /* nothing is locked */
142
143 vm_map_lock_read(map);
144 for (cmap = map;; cmap = nmap) {
145 /* cmap is read-locked */
146
147 if (!vm_map_lookup_entry(cmap,
148 (vm_map_address_t)address, &entry)) {
149
150 entry = entry->vme_next;
151 if (entry == vm_map_to_entry(cmap)) {
152 vm_map_unlock_read(cmap);
153 if (size != 0)
154 kmem_free(ipc_kernel_map,
155 addr, size);
156 return KERN_NO_SPACE;
157 }
158 }
159
160 if (entry->is_sub_map)
161 nmap = entry->object.sub_map;
162 else
163 break;
164
165 /* move down to the lower map */
166
167 vm_map_lock_read(nmap);
168 vm_map_unlock_read(cmap);
169 }
170
171 /* cmap is read-locked; we have a real entry */
172
173 object = entry->object.vm_object;
174 region.vir_start = entry->vme_start;
175 region.vir_end = entry->vme_end;
176 region.vir_object = (vm_offset_t) object;
177 region.vir_offset = entry->offset;
178 region.vir_needs_copy = entry->needs_copy;
179 region.vir_protection = entry->protection;
180 region.vir_max_protection = entry->max_protection;
181 region.vir_inheritance = entry->inheritance;
182 region.vir_wired_count = entry->wired_count;
183 region.vir_user_wired_count = entry->user_wired_count;
184
185 used = 0;
186 room = size / sizeof(vm_info_object_t);
187
188 if (object == VM_OBJECT_NULL) {
189 vm_map_unlock_read(cmap);
190 /* no memory needed */
191 break;
192 }
193
194 vm_object_lock(object);
195 vm_map_unlock_read(cmap);
196
197 for (cobject = object;; cobject = nobject) {
198 /* cobject is locked */
199
200 if (used < room) {
201 vm_info_object_t *vio =
202 &((vm_info_object_t *) addr)[used];
203
204 vio->vio_object =
205 (vm_offset_t) cobject;
206 vio->vio_size =
207 cobject->size;
208 vio->vio_ref_count =
209 cobject->ref_count;
210 vio->vio_resident_page_count =
211 cobject->resident_page_count;
212 vio->vio_absent_count =
213 cobject->absent_count;
214 vio->vio_copy =
215 (vm_offset_t) cobject->copy;
216 vio->vio_shadow =
217 (vm_offset_t) cobject->shadow;
218 vio->vio_shadow_offset =
219 cobject->shadow_offset;
220 vio->vio_paging_offset =
221 cobject->paging_offset;
222 vio->vio_copy_strategy =
223 cobject->copy_strategy;
224 vio->vio_last_alloc =
225 cobject->last_alloc;
226 vio->vio_paging_in_progress =
227 cobject->paging_in_progress;
228 vio->vio_pager_created =
229 cobject->pager_created;
230 vio->vio_pager_initialized =
231 cobject->pager_initialized;
232 vio->vio_pager_ready =
233 cobject->pager_ready;
234 vio->vio_can_persist =
235 cobject->can_persist;
236 vio->vio_internal =
237 cobject->internal;
238 vio->vio_temporary =
239 cobject->temporary;
240 vio->vio_alive =
241 cobject->alive;
242 vio->vio_purgable =
243 (cobject->purgable != VM_OBJECT_NONPURGABLE);
244 vio->vio_purgable_volatile =
245 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
246 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
247 }
248
249 used++;
250 nobject = cobject->shadow;
251 if (nobject == VM_OBJECT_NULL) {
252 vm_object_unlock(cobject);
253 break;
254 }
255
256 vm_object_lock(nobject);
257 vm_object_unlock(cobject);
258 }
259
260 /* nothing locked */
261
262 if (used <= room)
263 break;
264
265 /* must allocate more memory */
266
267 if (size != 0)
268 kmem_free(ipc_kernel_map, addr, size);
269 size = round_page_32(2 * used * sizeof(vm_info_object_t));
270
271 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
272 if (kr != KERN_SUCCESS)
273 return KERN_RESOURCE_SHORTAGE;
274
275 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
276 vm_map_round_page(addr + size),
277 VM_PROT_READ|VM_PROT_WRITE, FALSE);
278 assert(kr == KERN_SUCCESS);
279 }
280
281 /* free excess memory; make remaining memory pageable */
282
283 if (used == 0) {
284 copy = VM_MAP_COPY_NULL;
285
286 if (size != 0)
287 kmem_free(ipc_kernel_map, addr, size);
288 } else {
289 vm_size_t size_used =
290 round_page_32(used * sizeof(vm_info_object_t));
291
292 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
293 vm_map_round_page(addr + size_used), FALSE);
294 assert(kr == KERN_SUCCESS);
295
296 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
297 (vm_map_size_t)size_used, TRUE, &copy);
298 assert(kr == KERN_SUCCESS);
299
300 if (size != size_used)
301 kmem_free(ipc_kernel_map,
302 addr + size_used, size - size_used);
303 }
304
305 *regionp = region;
306 *objectsp = (vm_info_object_array_t) copy;
307 *objectsCntp = used;
308 return KERN_SUCCESS;
309 #endif /* MACH_VM_DEBUG */
310 }
311
312 /*
313 * Temporary call for 64 bit data path interface transiotion
314 */
315
316 kern_return_t
317 mach_vm_region_info_64(
318 __DEBUG_ONLY vm_map_t map,
319 __DEBUG_ONLY vm_offset_t address,
320 __DEBUG_ONLY vm_info_region_64_t *regionp,
321 __DEBUG_ONLY vm_info_object_array_t *objectsp,
322 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
323 {
324 #if !MACH_VM_DEBUG
325 return KERN_FAILURE;
326 #else
327 vm_map_copy_t copy;
328 vm_offset_t addr; /* memory for OOL data */
329 vm_size_t size; /* size of the memory */
330 unsigned int room; /* room for this many objects */
331 unsigned int used; /* actually this many objects */
332 vm_info_region_64_t region;
333 kern_return_t kr;
334
335 if (map == VM_MAP_NULL)
336 return KERN_INVALID_TASK;
337
338 size = 0; /* no memory allocated yet */
339
340 for (;;) {
341 vm_map_t cmap; /* current map in traversal */
342 vm_map_t nmap; /* next map to look at */
343 vm_map_entry_t entry;
344 vm_object_t object, cobject, nobject;
345
346 /* nothing is locked */
347
348 vm_map_lock_read(map);
349 for (cmap = map;; cmap = nmap) {
350 /* cmap is read-locked */
351
352 if (!vm_map_lookup_entry(cmap, address, &entry)) {
353 entry = entry->vme_next;
354 if (entry == vm_map_to_entry(cmap)) {
355 vm_map_unlock_read(cmap);
356 if (size != 0)
357 kmem_free(ipc_kernel_map,
358 addr, size);
359 return KERN_NO_SPACE;
360 }
361 }
362
363 if (entry->is_sub_map)
364 nmap = entry->object.sub_map;
365 else
366 break;
367
368 /* move down to the lower map */
369
370 vm_map_lock_read(nmap);
371 vm_map_unlock_read(cmap);
372 }
373
374 /* cmap is read-locked; we have a real entry */
375
376 object = entry->object.vm_object;
377 region.vir_start = entry->vme_start;
378 region.vir_end = entry->vme_end;
379 region.vir_object = (vm_offset_t) object;
380 region.vir_offset = entry->offset;
381 region.vir_needs_copy = entry->needs_copy;
382 region.vir_protection = entry->protection;
383 region.vir_max_protection = entry->max_protection;
384 region.vir_inheritance = entry->inheritance;
385 region.vir_wired_count = entry->wired_count;
386 region.vir_user_wired_count = entry->user_wired_count;
387
388 used = 0;
389 room = size / sizeof(vm_info_object_t);
390
391 if (object == VM_OBJECT_NULL) {
392 vm_map_unlock_read(cmap);
393 /* no memory needed */
394 break;
395 }
396
397 vm_object_lock(object);
398 vm_map_unlock_read(cmap);
399
400 for (cobject = object;; cobject = nobject) {
401 /* cobject is locked */
402
403 if (used < room) {
404 vm_info_object_t *vio =
405 &((vm_info_object_t *) addr)[used];
406
407 vio->vio_object =
408 (vm_offset_t) cobject;
409 vio->vio_size =
410 cobject->size;
411 vio->vio_ref_count =
412 cobject->ref_count;
413 vio->vio_resident_page_count =
414 cobject->resident_page_count;
415 vio->vio_absent_count =
416 cobject->absent_count;
417 vio->vio_copy =
418 (vm_offset_t) cobject->copy;
419 vio->vio_shadow =
420 (vm_offset_t) cobject->shadow;
421 vio->vio_shadow_offset =
422 cobject->shadow_offset;
423 vio->vio_paging_offset =
424 cobject->paging_offset;
425 vio->vio_copy_strategy =
426 cobject->copy_strategy;
427 vio->vio_last_alloc =
428 cobject->last_alloc;
429 vio->vio_paging_in_progress =
430 cobject->paging_in_progress;
431 vio->vio_pager_created =
432 cobject->pager_created;
433 vio->vio_pager_initialized =
434 cobject->pager_initialized;
435 vio->vio_pager_ready =
436 cobject->pager_ready;
437 vio->vio_can_persist =
438 cobject->can_persist;
439 vio->vio_internal =
440 cobject->internal;
441 vio->vio_temporary =
442 cobject->temporary;
443 vio->vio_alive =
444 cobject->alive;
445 vio->vio_purgable =
446 (cobject->purgable != VM_OBJECT_NONPURGABLE);
447 vio->vio_purgable_volatile =
448 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
449 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
450 }
451
452 used++;
453 nobject = cobject->shadow;
454 if (nobject == VM_OBJECT_NULL) {
455 vm_object_unlock(cobject);
456 break;
457 }
458
459 vm_object_lock(nobject);
460 vm_object_unlock(cobject);
461 }
462
463 /* nothing locked */
464
465 if (used <= room)
466 break;
467
468 /* must allocate more memory */
469
470 if (size != 0)
471 kmem_free(ipc_kernel_map, addr, size);
472 size = round_page_32(2 * used * sizeof(vm_info_object_t));
473
474 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
475 if (kr != KERN_SUCCESS)
476 return KERN_RESOURCE_SHORTAGE;
477
478 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
479 vm_map_round_page(addr + size),
480 VM_PROT_READ|VM_PROT_WRITE, FALSE);
481 assert(kr == KERN_SUCCESS);
482 }
483
484 /* free excess memory; make remaining memory pageable */
485
486 if (used == 0) {
487 copy = VM_MAP_COPY_NULL;
488
489 if (size != 0)
490 kmem_free(ipc_kernel_map, addr, size);
491 } else {
492 vm_size_t size_used =
493 round_page_32(used * sizeof(vm_info_object_t));
494
495 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
496 vm_map_round_page(addr + size_used), FALSE);
497 assert(kr == KERN_SUCCESS);
498
499 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
500 (vm_map_size_t)size_used, TRUE, &copy);
501 assert(kr == KERN_SUCCESS);
502
503 if (size != size_used)
504 kmem_free(ipc_kernel_map,
505 addr + size_used, size - size_used);
506 }
507
508 *regionp = region;
509 *objectsp = (vm_info_object_array_t) copy;
510 *objectsCntp = used;
511 return KERN_SUCCESS;
512 #endif /* MACH_VM_DEBUG */
513 }
514 /*
515 * Return an array of virtual pages that are mapped to a task.
516 */
517 kern_return_t
518 vm_mapped_pages_info(
519 __DEBUG_ONLY vm_map_t map,
520 __DEBUG_ONLY page_address_array_t *pages,
521 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
522 {
523 #if !MACH_VM_DEBUG
524 return KERN_FAILURE;
525 #else
526 pmap_t pmap;
527 vm_size_t size, size_used;
528 unsigned int actual, space;
529 page_address_array_t list;
530 vm_offset_t addr;
531
532 if (map == VM_MAP_NULL)
533 return (KERN_INVALID_ARGUMENT);
534
535 pmap = map->pmap;
536 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
537 size = round_page_32(size);
538
539 for (;;) {
540 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
541 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
542 vm_map_round_page(addr + size), FALSE);
543
544 list = (page_address_array_t) addr;
545 space = size / sizeof(vm_offset_t);
546
547 actual = pmap_list_resident_pages(pmap,
548 list,
549 space);
550 if (actual <= space)
551 break;
552
553 /*
554 * Free memory if not enough
555 */
556 (void) kmem_free(ipc_kernel_map, addr, size);
557
558 /*
559 * Try again, doubling the size
560 */
561 size = round_page_32(actual * sizeof(vm_offset_t));
562 }
563 if (actual == 0) {
564 *pages = 0;
565 *pages_count = 0;
566 (void) kmem_free(ipc_kernel_map, addr, size);
567 }
568 else {
569 *pages_count = actual;
570 size_used = round_page_32(actual * sizeof(vm_offset_t));
571 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
572 vm_map_round_page(addr + size),
573 VM_PROT_READ|VM_PROT_WRITE, FALSE);
574 (void) vm_map_copyin(ipc_kernel_map,
575 (vm_map_address_t)addr,
576 (vm_map_size_t)size_used,
577 TRUE,
578 (vm_map_copy_t *)pages);
579 if (size_used != size) {
580 (void) kmem_free(ipc_kernel_map,
581 addr + size_used,
582 size - size_used);
583 }
584 }
585
586 return (KERN_SUCCESS);
587 #endif /* MACH_VM_DEBUG */
588 }
589
590 /*
591 * Routine: host_virtual_physical_table_info
592 * Purpose:
593 * Return information about the VP table.
594 * Conditions:
595 * Nothing locked. Obeys CountInOut protocol.
596 * Returns:
597 * KERN_SUCCESS Returned information.
598 * KERN_INVALID_HOST The host is null.
599 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
600 */
601
602 kern_return_t
603 host_virtual_physical_table_info(
604 __DEBUG_ONLY host_t host,
605 __DEBUG_ONLY hash_info_bucket_array_t *infop,
606 __DEBUG_ONLY mach_msg_type_number_t *countp)
607 {
608 #if !MACH_VM_DEBUG
609 return KERN_FAILURE;
610 #else
611 vm_offset_t addr;
612 vm_size_t size = 0;
613 hash_info_bucket_t *info;
614 unsigned int potential, actual;
615 kern_return_t kr;
616
617 if (host == HOST_NULL)
618 return KERN_INVALID_HOST;
619
620 /* start with in-line data */
621
622 info = *infop;
623 potential = *countp;
624
625 for (;;) {
626 actual = vm_page_info(info, potential);
627 if (actual <= potential)
628 break;
629
630 /* allocate more memory */
631
632 if (info != *infop)
633 kmem_free(ipc_kernel_map, addr, size);
634
635 size = round_page_32(actual * sizeof *info);
636 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
637 if (kr != KERN_SUCCESS)
638 return KERN_RESOURCE_SHORTAGE;
639
640 info = (hash_info_bucket_t *) addr;
641 potential = size/sizeof *info;
642 }
643
644 if (info == *infop) {
645 /* data fit in-line; nothing to deallocate */
646
647 *countp = actual;
648 } else if (actual == 0) {
649 kmem_free(ipc_kernel_map, addr, size);
650
651 *countp = 0;
652 } else {
653 vm_map_copy_t copy;
654 vm_size_t used;
655
656 used = round_page_32(actual * sizeof *info);
657
658 if (used != size)
659 kmem_free(ipc_kernel_map, addr + used, size - used);
660
661 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
662 (vm_map_size_t)used, TRUE, &copy);
663 assert(kr == KERN_SUCCESS);
664
665 *infop = (hash_info_bucket_t *) copy;
666 *countp = actual;
667 }
668
669 return KERN_SUCCESS;
670 #endif /* MACH_VM_DEBUG */
671 }