]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach/vm_map_server.h>
69 #include <mach_debug/vm_info.h>
70 #include <mach_debug/page_info.h>
71 #include <mach_debug/hash_info.h>
72
73 #if MACH_VM_DEBUG
74 #include <mach/machine/vm_types.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_inherit.h>
78 #include <mach/vm_param.h>
79 #include <kern/thread.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_kern.h>
82 #include <vm/vm_object.h>
83 #include <kern/task.h>
84 #include <kern/host.h>
85 #include <ipc/ipc_port.h>
86 #include <vm/vm_debug.h>
87 #endif
88
89 #if !MACH_VM_DEBUG
90 #define __DEBUG_ONLY __unused
91 #else /* !MACH_VM_DEBUG */
92 #define __DEBUG_ONLY
93 #endif /* !MACH_VM_DEBUG */
94
95 /*
96 * Routine: mach_vm_region_info [kernel call]
97 * Purpose:
98 * Retrieve information about a VM region,
99 * including info about the object chain.
100 * Conditions:
101 * Nothing locked.
102 * Returns:
103 * KERN_SUCCESS Retrieve region/object info.
104 * KERN_INVALID_TASK The map is null.
105 * KERN_NO_SPACE There is no entry at/after the address.
106 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
107 */
108
109 kern_return_t
110 mach_vm_region_info(
111 __DEBUG_ONLY vm_map_t map,
112 __DEBUG_ONLY vm_offset_t address,
113 __DEBUG_ONLY vm_info_region_t *regionp,
114 __DEBUG_ONLY vm_info_object_array_t *objectsp,
115 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
116 {
117 #if !MACH_VM_DEBUG
118 return KERN_FAILURE;
119 #else
120 vm_map_copy_t copy;
121 vm_offset_t addr; /* memory for OOL data */
122 vm_size_t size; /* size of the memory */
123 unsigned int room; /* room for this many objects */
124 unsigned int used; /* actually this many objects */
125 vm_info_region_t region;
126 kern_return_t kr;
127
128 if (map == VM_MAP_NULL)
129 return KERN_INVALID_TASK;
130
131 size = 0; /* no memory allocated yet */
132
133 for (;;) {
134 vm_map_t cmap; /* current map in traversal */
135 vm_map_t nmap; /* next map to look at */
136 vm_map_entry_t entry;
137 vm_object_t object, cobject, nobject;
138
139 /* nothing is locked */
140
141 vm_map_lock_read(map);
142 for (cmap = map;; cmap = nmap) {
143 /* cmap is read-locked */
144
145 if (!vm_map_lookup_entry(cmap,
146 (vm_map_address_t)address, &entry)) {
147
148 entry = entry->vme_next;
149 if (entry == vm_map_to_entry(cmap)) {
150 vm_map_unlock_read(cmap);
151 if (size != 0)
152 kmem_free(ipc_kernel_map,
153 addr, size);
154 return KERN_NO_SPACE;
155 }
156 }
157
158 if (entry->is_sub_map)
159 nmap = entry->object.sub_map;
160 else
161 break;
162
163 /* move down to the lower map */
164
165 vm_map_lock_read(nmap);
166 vm_map_unlock_read(cmap);
167 }
168
169 /* cmap is read-locked; we have a real entry */
170
171 object = entry->object.vm_object;
172 region.vir_start = entry->vme_start;
173 region.vir_end = entry->vme_end;
174 region.vir_object = (vm_offset_t) object;
175 region.vir_offset = entry->offset;
176 region.vir_needs_copy = entry->needs_copy;
177 region.vir_protection = entry->protection;
178 region.vir_max_protection = entry->max_protection;
179 region.vir_inheritance = entry->inheritance;
180 region.vir_wired_count = entry->wired_count;
181 region.vir_user_wired_count = entry->user_wired_count;
182
183 used = 0;
184 room = size / sizeof(vm_info_object_t);
185
186 if (object == VM_OBJECT_NULL) {
187 vm_map_unlock_read(cmap);
188 /* no memory needed */
189 break;
190 }
191
192 vm_object_lock(object);
193 vm_map_unlock_read(cmap);
194
195 for (cobject = object;; cobject = nobject) {
196 /* cobject is locked */
197
198 if (used < room) {
199 vm_info_object_t *vio =
200 &((vm_info_object_t *) addr)[used];
201
202 vio->vio_object =
203 (vm_offset_t) cobject;
204 vio->vio_size =
205 cobject->size;
206 vio->vio_ref_count =
207 cobject->ref_count;
208 vio->vio_resident_page_count =
209 cobject->resident_page_count;
210 vio->vio_copy =
211 (vm_offset_t) cobject->copy;
212 vio->vio_shadow =
213 (vm_offset_t) cobject->shadow;
214 vio->vio_shadow_offset =
215 cobject->shadow_offset;
216 vio->vio_paging_offset =
217 cobject->paging_offset;
218 vio->vio_copy_strategy =
219 cobject->copy_strategy;
220 vio->vio_last_alloc =
221 cobject->last_alloc;
222 vio->vio_paging_in_progress =
223 cobject->paging_in_progress;
224 vio->vio_pager_created =
225 cobject->pager_created;
226 vio->vio_pager_initialized =
227 cobject->pager_initialized;
228 vio->vio_pager_ready =
229 cobject->pager_ready;
230 vio->vio_can_persist =
231 cobject->can_persist;
232 vio->vio_internal =
233 cobject->internal;
234 vio->vio_temporary =
235 cobject->temporary;
236 vio->vio_alive =
237 cobject->alive;
238 vio->vio_purgable =
239 (cobject->purgable != VM_PURGABLE_DENY);
240 vio->vio_purgable_volatile =
241 (cobject->purgable == VM_PURGABLE_VOLATILE ||
242 cobject->purgable == VM_PURGABLE_EMPTY);
243 }
244
245 used++;
246 nobject = cobject->shadow;
247 if (nobject == VM_OBJECT_NULL) {
248 vm_object_unlock(cobject);
249 break;
250 }
251
252 vm_object_lock(nobject);
253 vm_object_unlock(cobject);
254 }
255
256 /* nothing locked */
257
258 if (used <= room)
259 break;
260
261 /* must allocate more memory */
262
263 if (size != 0)
264 kmem_free(ipc_kernel_map, addr, size);
265 size = round_page_32(2 * used * sizeof(vm_info_object_t));
266
267 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
268 if (kr != KERN_SUCCESS)
269 return KERN_RESOURCE_SHORTAGE;
270
271 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
272 vm_map_round_page(addr + size),
273 VM_PROT_READ|VM_PROT_WRITE, FALSE);
274 assert(kr == KERN_SUCCESS);
275 }
276
277 /* free excess memory; make remaining memory pageable */
278
279 if (used == 0) {
280 copy = VM_MAP_COPY_NULL;
281
282 if (size != 0)
283 kmem_free(ipc_kernel_map, addr, size);
284 } else {
285 vm_size_t size_used =
286 round_page_32(used * sizeof(vm_info_object_t));
287
288 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
289 vm_map_round_page(addr + size_used), FALSE);
290 assert(kr == KERN_SUCCESS);
291
292 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
293 (vm_map_size_t)size_used, TRUE, &copy);
294 assert(kr == KERN_SUCCESS);
295
296 if (size != size_used)
297 kmem_free(ipc_kernel_map,
298 addr + size_used, size - size_used);
299 }
300
301 *regionp = region;
302 *objectsp = (vm_info_object_array_t) copy;
303 *objectsCntp = used;
304 return KERN_SUCCESS;
305 #endif /* MACH_VM_DEBUG */
306 }
307
308 /*
309 * Temporary call for 64 bit data path interface transiotion
310 */
311
312 kern_return_t
313 mach_vm_region_info_64(
314 __DEBUG_ONLY vm_map_t map,
315 __DEBUG_ONLY vm_offset_t address,
316 __DEBUG_ONLY vm_info_region_64_t *regionp,
317 __DEBUG_ONLY vm_info_object_array_t *objectsp,
318 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
319 {
320 #if !MACH_VM_DEBUG
321 return KERN_FAILURE;
322 #else
323 vm_map_copy_t copy;
324 vm_offset_t addr; /* memory for OOL data */
325 vm_size_t size; /* size of the memory */
326 unsigned int room; /* room for this many objects */
327 unsigned int used; /* actually this many objects */
328 vm_info_region_64_t region;
329 kern_return_t kr;
330
331 if (map == VM_MAP_NULL)
332 return KERN_INVALID_TASK;
333
334 size = 0; /* no memory allocated yet */
335
336 for (;;) {
337 vm_map_t cmap; /* current map in traversal */
338 vm_map_t nmap; /* next map to look at */
339 vm_map_entry_t entry;
340 vm_object_t object, cobject, nobject;
341
342 /* nothing is locked */
343
344 vm_map_lock_read(map);
345 for (cmap = map;; cmap = nmap) {
346 /* cmap is read-locked */
347
348 if (!vm_map_lookup_entry(cmap, address, &entry)) {
349 entry = entry->vme_next;
350 if (entry == vm_map_to_entry(cmap)) {
351 vm_map_unlock_read(cmap);
352 if (size != 0)
353 kmem_free(ipc_kernel_map,
354 addr, size);
355 return KERN_NO_SPACE;
356 }
357 }
358
359 if (entry->is_sub_map)
360 nmap = entry->object.sub_map;
361 else
362 break;
363
364 /* move down to the lower map */
365
366 vm_map_lock_read(nmap);
367 vm_map_unlock_read(cmap);
368 }
369
370 /* cmap is read-locked; we have a real entry */
371
372 object = entry->object.vm_object;
373 region.vir_start = entry->vme_start;
374 region.vir_end = entry->vme_end;
375 region.vir_object = (vm_offset_t) object;
376 region.vir_offset = entry->offset;
377 region.vir_needs_copy = entry->needs_copy;
378 region.vir_protection = entry->protection;
379 region.vir_max_protection = entry->max_protection;
380 region.vir_inheritance = entry->inheritance;
381 region.vir_wired_count = entry->wired_count;
382 region.vir_user_wired_count = entry->user_wired_count;
383
384 used = 0;
385 room = size / sizeof(vm_info_object_t);
386
387 if (object == VM_OBJECT_NULL) {
388 vm_map_unlock_read(cmap);
389 /* no memory needed */
390 break;
391 }
392
393 vm_object_lock(object);
394 vm_map_unlock_read(cmap);
395
396 for (cobject = object;; cobject = nobject) {
397 /* cobject is locked */
398
399 if (used < room) {
400 vm_info_object_t *vio =
401 &((vm_info_object_t *) addr)[used];
402
403 vio->vio_object =
404 (vm_offset_t) cobject;
405 vio->vio_size =
406 cobject->size;
407 vio->vio_ref_count =
408 cobject->ref_count;
409 vio->vio_resident_page_count =
410 cobject->resident_page_count;
411 vio->vio_copy =
412 (vm_offset_t) cobject->copy;
413 vio->vio_shadow =
414 (vm_offset_t) cobject->shadow;
415 vio->vio_shadow_offset =
416 cobject->shadow_offset;
417 vio->vio_paging_offset =
418 cobject->paging_offset;
419 vio->vio_copy_strategy =
420 cobject->copy_strategy;
421 vio->vio_last_alloc =
422 cobject->last_alloc;
423 vio->vio_paging_in_progress =
424 cobject->paging_in_progress;
425 vio->vio_pager_created =
426 cobject->pager_created;
427 vio->vio_pager_initialized =
428 cobject->pager_initialized;
429 vio->vio_pager_ready =
430 cobject->pager_ready;
431 vio->vio_can_persist =
432 cobject->can_persist;
433 vio->vio_internal =
434 cobject->internal;
435 vio->vio_temporary =
436 cobject->temporary;
437 vio->vio_alive =
438 cobject->alive;
439 vio->vio_purgable =
440 (cobject->purgable != VM_PURGABLE_DENY);
441 vio->vio_purgable_volatile =
442 (cobject->purgable == VM_PURGABLE_VOLATILE ||
443 cobject->purgable == VM_PURGABLE_EMPTY);
444 }
445
446 used++;
447 nobject = cobject->shadow;
448 if (nobject == VM_OBJECT_NULL) {
449 vm_object_unlock(cobject);
450 break;
451 }
452
453 vm_object_lock(nobject);
454 vm_object_unlock(cobject);
455 }
456
457 /* nothing locked */
458
459 if (used <= room)
460 break;
461
462 /* must allocate more memory */
463
464 if (size != 0)
465 kmem_free(ipc_kernel_map, addr, size);
466 size = round_page_32(2 * used * sizeof(vm_info_object_t));
467
468 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
469 if (kr != KERN_SUCCESS)
470 return KERN_RESOURCE_SHORTAGE;
471
472 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
473 vm_map_round_page(addr + size),
474 VM_PROT_READ|VM_PROT_WRITE, FALSE);
475 assert(kr == KERN_SUCCESS);
476 }
477
478 /* free excess memory; make remaining memory pageable */
479
480 if (used == 0) {
481 copy = VM_MAP_COPY_NULL;
482
483 if (size != 0)
484 kmem_free(ipc_kernel_map, addr, size);
485 } else {
486 vm_size_t size_used =
487 round_page_32(used * sizeof(vm_info_object_t));
488
489 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
490 vm_map_round_page(addr + size_used), FALSE);
491 assert(kr == KERN_SUCCESS);
492
493 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
494 (vm_map_size_t)size_used, TRUE, &copy);
495 assert(kr == KERN_SUCCESS);
496
497 if (size != size_used)
498 kmem_free(ipc_kernel_map,
499 addr + size_used, size - size_used);
500 }
501
502 *regionp = region;
503 *objectsp = (vm_info_object_array_t) copy;
504 *objectsCntp = used;
505 return KERN_SUCCESS;
506 #endif /* MACH_VM_DEBUG */
507 }
508 /*
509 * Return an array of virtual pages that are mapped to a task.
510 */
511 kern_return_t
512 vm_mapped_pages_info(
513 __DEBUG_ONLY vm_map_t map,
514 __DEBUG_ONLY page_address_array_t *pages,
515 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
516 {
517 #if !MACH_VM_DEBUG
518 return KERN_FAILURE;
519 #else
520 pmap_t pmap;
521 vm_size_t size, size_used;
522 unsigned int actual, space;
523 page_address_array_t list;
524 vm_offset_t addr;
525
526 if (map == VM_MAP_NULL)
527 return (KERN_INVALID_ARGUMENT);
528
529 pmap = map->pmap;
530 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
531 size = round_page_32(size);
532
533 for (;;) {
534 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
535 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
536 vm_map_round_page(addr + size), FALSE);
537
538 list = (page_address_array_t) addr;
539 space = size / sizeof(vm_offset_t);
540
541 actual = pmap_list_resident_pages(pmap,
542 list,
543 space);
544 if (actual <= space)
545 break;
546
547 /*
548 * Free memory if not enough
549 */
550 (void) kmem_free(ipc_kernel_map, addr, size);
551
552 /*
553 * Try again, doubling the size
554 */
555 size = round_page_32(actual * sizeof(vm_offset_t));
556 }
557 if (actual == 0) {
558 *pages = 0;
559 *pages_count = 0;
560 (void) kmem_free(ipc_kernel_map, addr, size);
561 }
562 else {
563 *pages_count = actual;
564 size_used = round_page_32(actual * sizeof(vm_offset_t));
565 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
566 vm_map_round_page(addr + size),
567 VM_PROT_READ|VM_PROT_WRITE, FALSE);
568 (void) vm_map_copyin(ipc_kernel_map,
569 (vm_map_address_t)addr,
570 (vm_map_size_t)size_used,
571 TRUE,
572 (vm_map_copy_t *)pages);
573 if (size_used != size) {
574 (void) kmem_free(ipc_kernel_map,
575 addr + size_used,
576 size - size_used);
577 }
578 }
579
580 return (KERN_SUCCESS);
581 #endif /* MACH_VM_DEBUG */
582 }
583
584 /*
585 * Routine: host_virtual_physical_table_info
586 * Purpose:
587 * Return information about the VP table.
588 * Conditions:
589 * Nothing locked. Obeys CountInOut protocol.
590 * Returns:
591 * KERN_SUCCESS Returned information.
592 * KERN_INVALID_HOST The host is null.
593 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
594 */
595
596 kern_return_t
597 host_virtual_physical_table_info(
598 __DEBUG_ONLY host_t host,
599 __DEBUG_ONLY hash_info_bucket_array_t *infop,
600 __DEBUG_ONLY mach_msg_type_number_t *countp)
601 {
602 #if !MACH_VM_DEBUG
603 return KERN_FAILURE;
604 #else
605 vm_offset_t addr;
606 vm_size_t size = 0;
607 hash_info_bucket_t *info;
608 unsigned int potential, actual;
609 kern_return_t kr;
610
611 if (host == HOST_NULL)
612 return KERN_INVALID_HOST;
613
614 /* start with in-line data */
615
616 info = *infop;
617 potential = *countp;
618
619 for (;;) {
620 actual = vm_page_info(info, potential);
621 if (actual <= potential)
622 break;
623
624 /* allocate more memory */
625
626 if (info != *infop)
627 kmem_free(ipc_kernel_map, addr, size);
628
629 size = round_page_32(actual * sizeof *info);
630 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
631 if (kr != KERN_SUCCESS)
632 return KERN_RESOURCE_SHORTAGE;
633
634 info = (hash_info_bucket_t *) addr;
635 potential = size/sizeof *info;
636 }
637
638 if (info == *infop) {
639 /* data fit in-line; nothing to deallocate */
640
641 *countp = actual;
642 } else if (actual == 0) {
643 kmem_free(ipc_kernel_map, addr, size);
644
645 *countp = 0;
646 } else {
647 vm_map_copy_t copy;
648 vm_size_t used;
649
650 used = round_page_32(actual * sizeof *info);
651
652 if (used != size)
653 kmem_free(ipc_kernel_map, addr + used, size - used);
654
655 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
656 (vm_map_size_t)used, TRUE, &copy);
657 assert(kr == KERN_SUCCESS);
658
659 *infop = (hash_info_bucket_t *) copy;
660 *countp = actual;
661 }
662
663 return KERN_SUCCESS;
664 #endif /* MACH_VM_DEBUG */
665 }