]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: vm/vm_debug.c.
54 * Author: Rich Draves
55 * Date: March, 1990
56 *
57 * Exported kernel calls. See mach_debug/mach_debug.defs.
58 */
59 #include <mach_vm_debug.h>
60 #include <mach/kern_return.h>
61 #include <mach/mach_host_server.h>
62 #include <mach/vm_map_server.h>
63 #include <mach_debug/vm_info.h>
64 #include <mach_debug/page_info.h>
65 #include <mach_debug/hash_info.h>
66
67 #if MACH_VM_DEBUG
68 #include <mach/machine/vm_types.h>
69 #include <mach/memory_object_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_inherit.h>
72 #include <mach/vm_param.h>
73 #include <kern/thread.h>
74 #include <vm/vm_map.h>
75 #include <vm/vm_kern.h>
76 #include <vm/vm_object.h>
77 #include <kern/task.h>
78 #include <kern/host.h>
79 #include <ipc/ipc_port.h>
80 #include <vm/vm_debug.h>
81 #endif
82
83 #if !MACH_VM_DEBUG
84 #define __DEBUG_ONLY __unused
85 #else /* !MACH_VM_DEBUG */
86 #define __DEBUG_ONLY
87 #endif /* !MACH_VM_DEBUG */
88
89 /*
90 * Routine: mach_vm_region_info [kernel call]
91 * Purpose:
92 * Retrieve information about a VM region,
93 * including info about the object chain.
94 * Conditions:
95 * Nothing locked.
96 * Returns:
97 * KERN_SUCCESS Retrieve region/object info.
98 * KERN_INVALID_TASK The map is null.
99 * KERN_NO_SPACE There is no entry at/after the address.
100 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
101 */
102
103 kern_return_t
104 mach_vm_region_info(
105 __DEBUG_ONLY vm_map_t map,
106 __DEBUG_ONLY vm_offset_t address,
107 __DEBUG_ONLY vm_info_region_t *regionp,
108 __DEBUG_ONLY vm_info_object_array_t *objectsp,
109 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
110 {
111 #if !MACH_VM_DEBUG
112 return KERN_FAILURE;
113 #else
114 vm_map_copy_t copy;
115 vm_offset_t addr; /* memory for OOL data */
116 vm_size_t size; /* size of the memory */
117 unsigned int room; /* room for this many objects */
118 unsigned int used; /* actually this many objects */
119 vm_info_region_t region;
120 kern_return_t kr;
121
122 if (map == VM_MAP_NULL)
123 return KERN_INVALID_TASK;
124
125 size = 0; /* no memory allocated yet */
126
127 for (;;) {
128 vm_map_t cmap; /* current map in traversal */
129 vm_map_t nmap; /* next map to look at */
130 vm_map_entry_t entry;
131 vm_object_t object, cobject, nobject;
132
133 /* nothing is locked */
134
135 vm_map_lock_read(map);
136 for (cmap = map;; cmap = nmap) {
137 /* cmap is read-locked */
138
139 if (!vm_map_lookup_entry(cmap,
140 (vm_map_address_t)address, &entry)) {
141
142 entry = entry->vme_next;
143 if (entry == vm_map_to_entry(cmap)) {
144 vm_map_unlock_read(cmap);
145 if (size != 0)
146 kmem_free(ipc_kernel_map,
147 addr, size);
148 return KERN_NO_SPACE;
149 }
150 }
151
152 if (entry->is_sub_map)
153 nmap = entry->object.sub_map;
154 else
155 break;
156
157 /* move down to the lower map */
158
159 vm_map_lock_read(nmap);
160 vm_map_unlock_read(cmap);
161 }
162
163 /* cmap is read-locked; we have a real entry */
164
165 object = entry->object.vm_object;
166 region.vir_start = entry->vme_start;
167 region.vir_end = entry->vme_end;
168 region.vir_object = (vm_offset_t) object;
169 region.vir_offset = entry->offset;
170 region.vir_needs_copy = entry->needs_copy;
171 region.vir_protection = entry->protection;
172 region.vir_max_protection = entry->max_protection;
173 region.vir_inheritance = entry->inheritance;
174 region.vir_wired_count = entry->wired_count;
175 region.vir_user_wired_count = entry->user_wired_count;
176
177 used = 0;
178 room = size / sizeof(vm_info_object_t);
179
180 if (object == VM_OBJECT_NULL) {
181 vm_map_unlock_read(cmap);
182 /* no memory needed */
183 break;
184 }
185
186 vm_object_lock(object);
187 vm_map_unlock_read(cmap);
188
189 for (cobject = object;; cobject = nobject) {
190 /* cobject is locked */
191
192 if (used < room) {
193 vm_info_object_t *vio =
194 &((vm_info_object_t *) addr)[used];
195
196 vio->vio_object =
197 (vm_offset_t) cobject;
198 vio->vio_size =
199 cobject->size;
200 vio->vio_ref_count =
201 cobject->ref_count;
202 vio->vio_resident_page_count =
203 cobject->resident_page_count;
204 vio->vio_absent_count =
205 cobject->absent_count;
206 vio->vio_copy =
207 (vm_offset_t) cobject->copy;
208 vio->vio_shadow =
209 (vm_offset_t) cobject->shadow;
210 vio->vio_shadow_offset =
211 cobject->shadow_offset;
212 vio->vio_paging_offset =
213 cobject->paging_offset;
214 vio->vio_copy_strategy =
215 cobject->copy_strategy;
216 vio->vio_last_alloc =
217 cobject->last_alloc;
218 vio->vio_paging_in_progress =
219 cobject->paging_in_progress;
220 vio->vio_pager_created =
221 cobject->pager_created;
222 vio->vio_pager_initialized =
223 cobject->pager_initialized;
224 vio->vio_pager_ready =
225 cobject->pager_ready;
226 vio->vio_can_persist =
227 cobject->can_persist;
228 vio->vio_internal =
229 cobject->internal;
230 vio->vio_temporary =
231 cobject->temporary;
232 vio->vio_alive =
233 cobject->alive;
234 vio->vio_purgable =
235 (cobject->purgable != VM_OBJECT_NONPURGABLE);
236 vio->vio_purgable_volatile =
237 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
238 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
239 }
240
241 used++;
242 nobject = cobject->shadow;
243 if (nobject == VM_OBJECT_NULL) {
244 vm_object_unlock(cobject);
245 break;
246 }
247
248 vm_object_lock(nobject);
249 vm_object_unlock(cobject);
250 }
251
252 /* nothing locked */
253
254 if (used <= room)
255 break;
256
257 /* must allocate more memory */
258
259 if (size != 0)
260 kmem_free(ipc_kernel_map, addr, size);
261 size = round_page_32(2 * used * sizeof(vm_info_object_t));
262
263 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
264 if (kr != KERN_SUCCESS)
265 return KERN_RESOURCE_SHORTAGE;
266
267 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
268 vm_map_round_page(addr + size),
269 VM_PROT_READ|VM_PROT_WRITE, FALSE);
270 assert(kr == KERN_SUCCESS);
271 }
272
273 /* free excess memory; make remaining memory pageable */
274
275 if (used == 0) {
276 copy = VM_MAP_COPY_NULL;
277
278 if (size != 0)
279 kmem_free(ipc_kernel_map, addr, size);
280 } else {
281 vm_size_t size_used =
282 round_page_32(used * sizeof(vm_info_object_t));
283
284 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
285 vm_map_round_page(addr + size_used), FALSE);
286 assert(kr == KERN_SUCCESS);
287
288 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
289 (vm_map_size_t)size_used, TRUE, &copy);
290 assert(kr == KERN_SUCCESS);
291
292 if (size != size_used)
293 kmem_free(ipc_kernel_map,
294 addr + size_used, size - size_used);
295 }
296
297 *regionp = region;
298 *objectsp = (vm_info_object_array_t) copy;
299 *objectsCntp = used;
300 return KERN_SUCCESS;
301 #endif /* MACH_VM_DEBUG */
302 }
303
304 /*
305 * Temporary call for 64 bit data path interface transiotion
306 */
307
308 kern_return_t
309 mach_vm_region_info_64(
310 __DEBUG_ONLY vm_map_t map,
311 __DEBUG_ONLY vm_offset_t address,
312 __DEBUG_ONLY vm_info_region_64_t *regionp,
313 __DEBUG_ONLY vm_info_object_array_t *objectsp,
314 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
315 {
316 #if !MACH_VM_DEBUG
317 return KERN_FAILURE;
318 #else
319 vm_map_copy_t copy;
320 vm_offset_t addr; /* memory for OOL data */
321 vm_size_t size; /* size of the memory */
322 unsigned int room; /* room for this many objects */
323 unsigned int used; /* actually this many objects */
324 vm_info_region_64_t region;
325 kern_return_t kr;
326
327 if (map == VM_MAP_NULL)
328 return KERN_INVALID_TASK;
329
330 size = 0; /* no memory allocated yet */
331
332 for (;;) {
333 vm_map_t cmap; /* current map in traversal */
334 vm_map_t nmap; /* next map to look at */
335 vm_map_entry_t entry;
336 vm_object_t object, cobject, nobject;
337
338 /* nothing is locked */
339
340 vm_map_lock_read(map);
341 for (cmap = map;; cmap = nmap) {
342 /* cmap is read-locked */
343
344 if (!vm_map_lookup_entry(cmap, address, &entry)) {
345 entry = entry->vme_next;
346 if (entry == vm_map_to_entry(cmap)) {
347 vm_map_unlock_read(cmap);
348 if (size != 0)
349 kmem_free(ipc_kernel_map,
350 addr, size);
351 return KERN_NO_SPACE;
352 }
353 }
354
355 if (entry->is_sub_map)
356 nmap = entry->object.sub_map;
357 else
358 break;
359
360 /* move down to the lower map */
361
362 vm_map_lock_read(nmap);
363 vm_map_unlock_read(cmap);
364 }
365
366 /* cmap is read-locked; we have a real entry */
367
368 object = entry->object.vm_object;
369 region.vir_start = entry->vme_start;
370 region.vir_end = entry->vme_end;
371 region.vir_object = (vm_offset_t) object;
372 region.vir_offset = entry->offset;
373 region.vir_needs_copy = entry->needs_copy;
374 region.vir_protection = entry->protection;
375 region.vir_max_protection = entry->max_protection;
376 region.vir_inheritance = entry->inheritance;
377 region.vir_wired_count = entry->wired_count;
378 region.vir_user_wired_count = entry->user_wired_count;
379
380 used = 0;
381 room = size / sizeof(vm_info_object_t);
382
383 if (object == VM_OBJECT_NULL) {
384 vm_map_unlock_read(cmap);
385 /* no memory needed */
386 break;
387 }
388
389 vm_object_lock(object);
390 vm_map_unlock_read(cmap);
391
392 for (cobject = object;; cobject = nobject) {
393 /* cobject is locked */
394
395 if (used < room) {
396 vm_info_object_t *vio =
397 &((vm_info_object_t *) addr)[used];
398
399 vio->vio_object =
400 (vm_offset_t) cobject;
401 vio->vio_size =
402 cobject->size;
403 vio->vio_ref_count =
404 cobject->ref_count;
405 vio->vio_resident_page_count =
406 cobject->resident_page_count;
407 vio->vio_absent_count =
408 cobject->absent_count;
409 vio->vio_copy =
410 (vm_offset_t) cobject->copy;
411 vio->vio_shadow =
412 (vm_offset_t) cobject->shadow;
413 vio->vio_shadow_offset =
414 cobject->shadow_offset;
415 vio->vio_paging_offset =
416 cobject->paging_offset;
417 vio->vio_copy_strategy =
418 cobject->copy_strategy;
419 vio->vio_last_alloc =
420 cobject->last_alloc;
421 vio->vio_paging_in_progress =
422 cobject->paging_in_progress;
423 vio->vio_pager_created =
424 cobject->pager_created;
425 vio->vio_pager_initialized =
426 cobject->pager_initialized;
427 vio->vio_pager_ready =
428 cobject->pager_ready;
429 vio->vio_can_persist =
430 cobject->can_persist;
431 vio->vio_internal =
432 cobject->internal;
433 vio->vio_temporary =
434 cobject->temporary;
435 vio->vio_alive =
436 cobject->alive;
437 vio->vio_purgable =
438 (cobject->purgable != VM_OBJECT_NONPURGABLE);
439 vio->vio_purgable_volatile =
440 (cobject->purgable == VM_OBJECT_PURGABLE_VOLATILE ||
441 cobject->purgable == VM_OBJECT_PURGABLE_EMPTY);
442 }
443
444 used++;
445 nobject = cobject->shadow;
446 if (nobject == VM_OBJECT_NULL) {
447 vm_object_unlock(cobject);
448 break;
449 }
450
451 vm_object_lock(nobject);
452 vm_object_unlock(cobject);
453 }
454
455 /* nothing locked */
456
457 if (used <= room)
458 break;
459
460 /* must allocate more memory */
461
462 if (size != 0)
463 kmem_free(ipc_kernel_map, addr, size);
464 size = round_page_32(2 * used * sizeof(vm_info_object_t));
465
466 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
467 if (kr != KERN_SUCCESS)
468 return KERN_RESOURCE_SHORTAGE;
469
470 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
471 vm_map_round_page(addr + size),
472 VM_PROT_READ|VM_PROT_WRITE, FALSE);
473 assert(kr == KERN_SUCCESS);
474 }
475
476 /* free excess memory; make remaining memory pageable */
477
478 if (used == 0) {
479 copy = VM_MAP_COPY_NULL;
480
481 if (size != 0)
482 kmem_free(ipc_kernel_map, addr, size);
483 } else {
484 vm_size_t size_used =
485 round_page_32(used * sizeof(vm_info_object_t));
486
487 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
488 vm_map_round_page(addr + size_used), FALSE);
489 assert(kr == KERN_SUCCESS);
490
491 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
492 (vm_map_size_t)size_used, TRUE, &copy);
493 assert(kr == KERN_SUCCESS);
494
495 if (size != size_used)
496 kmem_free(ipc_kernel_map,
497 addr + size_used, size - size_used);
498 }
499
500 *regionp = region;
501 *objectsp = (vm_info_object_array_t) copy;
502 *objectsCntp = used;
503 return KERN_SUCCESS;
504 #endif /* MACH_VM_DEBUG */
505 }
506 /*
507 * Return an array of virtual pages that are mapped to a task.
508 */
509 kern_return_t
510 vm_mapped_pages_info(
511 __DEBUG_ONLY vm_map_t map,
512 __DEBUG_ONLY page_address_array_t *pages,
513 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
514 {
515 #if !MACH_VM_DEBUG
516 return KERN_FAILURE;
517 #else
518 pmap_t pmap;
519 vm_size_t size, size_used;
520 unsigned int actual, space;
521 page_address_array_t list;
522 vm_offset_t addr;
523
524 if (map == VM_MAP_NULL)
525 return (KERN_INVALID_ARGUMENT);
526
527 pmap = map->pmap;
528 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
529 size = round_page_32(size);
530
531 for (;;) {
532 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
533 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
534 vm_map_round_page(addr + size), FALSE);
535
536 list = (page_address_array_t) addr;
537 space = size / sizeof(vm_offset_t);
538
539 actual = pmap_list_resident_pages(pmap,
540 list,
541 space);
542 if (actual <= space)
543 break;
544
545 /*
546 * Free memory if not enough
547 */
548 (void) kmem_free(ipc_kernel_map, addr, size);
549
550 /*
551 * Try again, doubling the size
552 */
553 size = round_page_32(actual * sizeof(vm_offset_t));
554 }
555 if (actual == 0) {
556 *pages = 0;
557 *pages_count = 0;
558 (void) kmem_free(ipc_kernel_map, addr, size);
559 }
560 else {
561 *pages_count = actual;
562 size_used = round_page_32(actual * sizeof(vm_offset_t));
563 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
564 vm_map_round_page(addr + size),
565 VM_PROT_READ|VM_PROT_WRITE, FALSE);
566 (void) vm_map_copyin(ipc_kernel_map,
567 (vm_map_address_t)addr,
568 (vm_map_size_t)size_used,
569 TRUE,
570 (vm_map_copy_t *)pages);
571 if (size_used != size) {
572 (void) kmem_free(ipc_kernel_map,
573 addr + size_used,
574 size - size_used);
575 }
576 }
577
578 return (KERN_SUCCESS);
579 #endif /* MACH_VM_DEBUG */
580 }
581
582 /*
583 * Routine: host_virtual_physical_table_info
584 * Purpose:
585 * Return information about the VP table.
586 * Conditions:
587 * Nothing locked. Obeys CountInOut protocol.
588 * Returns:
589 * KERN_SUCCESS Returned information.
590 * KERN_INVALID_HOST The host is null.
591 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
592 */
593
594 kern_return_t
595 host_virtual_physical_table_info(
596 __DEBUG_ONLY host_t host,
597 __DEBUG_ONLY hash_info_bucket_array_t *infop,
598 __DEBUG_ONLY mach_msg_type_number_t *countp)
599 {
600 #if !MACH_VM_DEBUG
601 return KERN_FAILURE;
602 #else
603 vm_offset_t addr;
604 vm_size_t size = 0;
605 hash_info_bucket_t *info;
606 unsigned int potential, actual;
607 kern_return_t kr;
608
609 if (host == HOST_NULL)
610 return KERN_INVALID_HOST;
611
612 /* start with in-line data */
613
614 info = *infop;
615 potential = *countp;
616
617 for (;;) {
618 actual = vm_page_info(info, potential);
619 if (actual <= potential)
620 break;
621
622 /* allocate more memory */
623
624 if (info != *infop)
625 kmem_free(ipc_kernel_map, addr, size);
626
627 size = round_page_32(actual * sizeof *info);
628 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
629 if (kr != KERN_SUCCESS)
630 return KERN_RESOURCE_SHORTAGE;
631
632 info = (hash_info_bucket_t *) addr;
633 potential = size/sizeof *info;
634 }
635
636 if (info == *infop) {
637 /* data fit in-line; nothing to deallocate */
638
639 *countp = actual;
640 } else if (actual == 0) {
641 kmem_free(ipc_kernel_map, addr, size);
642
643 *countp = 0;
644 } else {
645 vm_map_copy_t copy;
646 vm_size_t used;
647
648 used = round_page_32(actual * sizeof *info);
649
650 if (used != size)
651 kmem_free(ipc_kernel_map, addr + used, size - used);
652
653 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
654 (vm_map_size_t)used, TRUE, &copy);
655 assert(kr == KERN_SUCCESS);
656
657 *infop = (hash_info_bucket_t *) copy;
658 *countp = actual;
659 }
660
661 return KERN_SUCCESS;
662 #endif /* MACH_VM_DEBUG */
663 }