]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
97ae91e5a17e1dfb80b2a2499f466d0a9a8a703b
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: vm/vm_debug.c.
57 * Author: Rich Draves
58 * Date: March, 1990
59 *
60 * Exported kernel calls. See mach_debug/mach_debug.defs.
61 */
62 #include <mach_vm_debug.h>
63 #include <mach/kern_return.h>
64 #include <mach/mach_host_server.h>
65 #include <mach/vm_map_server.h>
66 #include <mach_debug/vm_info.h>
67 #include <mach_debug/page_info.h>
68 #include <mach_debug/hash_info.h>
69
70 #if MACH_VM_DEBUG
71 #include <mach/machine/vm_types.h>
72 #include <mach/memory_object_types.h>
73 #include <mach/vm_prot.h>
74 #include <mach/vm_inherit.h>
75 #include <mach/vm_param.h>
76 #include <kern/thread.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <kern/task.h>
81 #include <kern/host.h>
82 #include <ipc/ipc_port.h>
83 #include <vm/vm_debug.h>
84 #endif
85
86 /*
87 * Routine: mach_vm_region_info [kernel call]
88 * Purpose:
89 * Retrieve information about a VM region,
90 * including info about the object chain.
91 * Conditions:
92 * Nothing locked.
93 * Returns:
94 * KERN_SUCCESS Retrieve region/object info.
95 * KERN_INVALID_TASK The map is null.
96 * KERN_NO_SPACE There is no entry at/after the address.
97 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
98 */
99
100 kern_return_t
101 mach_vm_region_info(
102 vm_map_t map,
103 vm_offset_t address,
104 vm_info_region_t *regionp,
105 vm_info_object_array_t *objectsp,
106 mach_msg_type_number_t *objectsCntp)
107 {
108 #if !MACH_VM_DEBUG
109 return KERN_FAILURE;
110 #else
111 vm_map_copy_t copy;
112 vm_offset_t addr; /* memory for OOL data */
113 vm_size_t size; /* size of the memory */
114 unsigned int room; /* room for this many objects */
115 unsigned int used; /* actually this many objects */
116 vm_info_region_t region;
117 kern_return_t kr;
118
119 if (map == VM_MAP_NULL)
120 return KERN_INVALID_TASK;
121
122 size = 0; /* no memory allocated yet */
123
124 for (;;) {
125 vm_map_t cmap; /* current map in traversal */
126 vm_map_t nmap; /* next map to look at */
127 vm_map_entry_t entry;
128 vm_object_t object, cobject, nobject;
129
130 /* nothing is locked */
131
132 vm_map_lock_read(map);
133 for (cmap = map;; cmap = nmap) {
134 /* cmap is read-locked */
135
136 if (!vm_map_lookup_entry(cmap, address, &entry)) {
137 entry = entry->vme_next;
138 if (entry == vm_map_to_entry(cmap)) {
139 vm_map_unlock_read(cmap);
140 if (size != 0)
141 kmem_free(ipc_kernel_map,
142 addr, size);
143 return KERN_NO_SPACE;
144 }
145 }
146
147 if (entry->is_sub_map)
148 nmap = entry->object.sub_map;
149 else
150 break;
151
152 /* move down to the lower map */
153
154 vm_map_lock_read(nmap);
155 vm_map_unlock_read(cmap);
156 }
157
158 /* cmap is read-locked; we have a real entry */
159
160 object = entry->object.vm_object;
161 region.vir_start = entry->vme_start;
162 region.vir_end = entry->vme_end;
163 region.vir_object = (vm_offset_t) object;
164 region.vir_offset = entry->offset;
165 region.vir_needs_copy = entry->needs_copy;
166 region.vir_protection = entry->protection;
167 region.vir_max_protection = entry->max_protection;
168 region.vir_inheritance = entry->inheritance;
169 region.vir_wired_count = entry->wired_count;
170 region.vir_user_wired_count = entry->user_wired_count;
171
172 used = 0;
173 room = size / sizeof(vm_info_object_t);
174
175 if (object == VM_OBJECT_NULL) {
176 vm_map_unlock_read(cmap);
177 /* no memory needed */
178 break;
179 }
180
181 vm_object_lock(object);
182 vm_map_unlock_read(cmap);
183
184 for (cobject = object;; cobject = nobject) {
185 /* cobject is locked */
186
187 if (used < room) {
188 vm_info_object_t *vio =
189 &((vm_info_object_t *) addr)[used];
190
191 vio->vio_object =
192 (vm_offset_t) cobject;
193 vio->vio_size =
194 cobject->size;
195 vio->vio_ref_count =
196 cobject->ref_count;
197 vio->vio_resident_page_count =
198 cobject->resident_page_count;
199 vio->vio_absent_count =
200 cobject->absent_count;
201 vio->vio_copy =
202 (vm_offset_t) cobject->copy;
203 vio->vio_shadow =
204 (vm_offset_t) cobject->shadow;
205 vio->vio_shadow_offset =
206 cobject->shadow_offset;
207 vio->vio_paging_offset =
208 cobject->paging_offset;
209 vio->vio_copy_strategy =
210 cobject->copy_strategy;
211 vio->vio_last_alloc =
212 cobject->last_alloc;
213 vio->vio_paging_in_progress =
214 cobject->paging_in_progress;
215 vio->vio_pager_created =
216 cobject->pager_created;
217 vio->vio_pager_initialized =
218 cobject->pager_initialized;
219 vio->vio_pager_ready =
220 cobject->pager_ready;
221 vio->vio_can_persist =
222 cobject->can_persist;
223 vio->vio_internal =
224 cobject->internal;
225 vio->vio_temporary =
226 cobject->temporary;
227 vio->vio_alive =
228 cobject->alive;
229 vio->vio_lock_in_progress =
230 cobject->lock_in_progress;
231 vio->vio_lock_restart =
232 cobject->lock_restart;
233 }
234
235 used++;
236 nobject = cobject->shadow;
237 if (nobject == VM_OBJECT_NULL) {
238 vm_object_unlock(cobject);
239 break;
240 }
241
242 vm_object_lock(nobject);
243 vm_object_unlock(cobject);
244 }
245
246 /* nothing locked */
247
248 if (used <= room)
249 break;
250
251 /* must allocate more memory */
252
253 if (size != 0)
254 kmem_free(ipc_kernel_map, addr, size);
255 size = round_page(2 * used * sizeof(vm_info_object_t));
256
257 kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
258 if (kr != KERN_SUCCESS)
259 return KERN_RESOURCE_SHORTAGE;
260
261 kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
262 VM_PROT_READ|VM_PROT_WRITE, FALSE);
263 assert(kr == KERN_SUCCESS);
264 }
265
266 /* free excess memory; make remaining memory pageable */
267
268 if (used == 0) {
269 copy = VM_MAP_COPY_NULL;
270
271 if (size != 0)
272 kmem_free(ipc_kernel_map, addr, size);
273 } else {
274 vm_size_t size_used =
275 round_page(used * sizeof(vm_info_object_t));
276
277 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
278 assert(kr == KERN_SUCCESS);
279
280 kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
281 TRUE, &copy);
282 assert(kr == KERN_SUCCESS);
283
284 if (size != size_used)
285 kmem_free(ipc_kernel_map,
286 addr + size_used, size - size_used);
287 }
288
289 *regionp = region;
290 *objectsp = (vm_info_object_array_t) copy;
291 *objectsCntp = used;
292 return KERN_SUCCESS;
293 #endif /* MACH_VM_DEBUG */
294 }
295 /*
296 * Temporary call for 64 bit data path interface transiotion
297 */
298
299 kern_return_t
300 mach_vm_region_info_64(
301 vm_map_t map,
302 vm_offset_t address,
303 vm_info_region_64_t *regionp,
304 vm_info_object_array_t *objectsp,
305 mach_msg_type_number_t *objectsCntp)
306 {
307 #if !MACH_VM_DEBUG
308 return KERN_FAILURE;
309 #else
310 vm_map_copy_t copy;
311 vm_offset_t addr; /* memory for OOL data */
312 vm_size_t size; /* size of the memory */
313 unsigned int room; /* room for this many objects */
314 unsigned int used; /* actually this many objects */
315 vm_info_region_64_t region;
316 kern_return_t kr;
317
318 if (map == VM_MAP_NULL)
319 return KERN_INVALID_TASK;
320
321 size = 0; /* no memory allocated yet */
322
323 for (;;) {
324 vm_map_t cmap; /* current map in traversal */
325 vm_map_t nmap; /* next map to look at */
326 vm_map_entry_t entry;
327 vm_object_t object, cobject, nobject;
328
329 /* nothing is locked */
330
331 vm_map_lock_read(map);
332 for (cmap = map;; cmap = nmap) {
333 /* cmap is read-locked */
334
335 if (!vm_map_lookup_entry(cmap, address, &entry)) {
336 entry = entry->vme_next;
337 if (entry == vm_map_to_entry(cmap)) {
338 vm_map_unlock_read(cmap);
339 if (size != 0)
340 kmem_free(ipc_kernel_map,
341 addr, size);
342 return KERN_NO_SPACE;
343 }
344 }
345
346 if (entry->is_sub_map)
347 nmap = entry->object.sub_map;
348 else
349 break;
350
351 /* move down to the lower map */
352
353 vm_map_lock_read(nmap);
354 vm_map_unlock_read(cmap);
355 }
356
357 /* cmap is read-locked; we have a real entry */
358
359 object = entry->object.vm_object;
360 region.vir_start = entry->vme_start;
361 region.vir_end = entry->vme_end;
362 region.vir_object = (vm_offset_t) object;
363 region.vir_offset = entry->offset;
364 region.vir_needs_copy = entry->needs_copy;
365 region.vir_protection = entry->protection;
366 region.vir_max_protection = entry->max_protection;
367 region.vir_inheritance = entry->inheritance;
368 region.vir_wired_count = entry->wired_count;
369 region.vir_user_wired_count = entry->user_wired_count;
370
371 used = 0;
372 room = size / sizeof(vm_info_object_t);
373
374 if (object == VM_OBJECT_NULL) {
375 vm_map_unlock_read(cmap);
376 /* no memory needed */
377 break;
378 }
379
380 vm_object_lock(object);
381 vm_map_unlock_read(cmap);
382
383 for (cobject = object;; cobject = nobject) {
384 /* cobject is locked */
385
386 if (used < room) {
387 vm_info_object_t *vio =
388 &((vm_info_object_t *) addr)[used];
389
390 vio->vio_object =
391 (vm_offset_t) cobject;
392 vio->vio_size =
393 cobject->size;
394 vio->vio_ref_count =
395 cobject->ref_count;
396 vio->vio_resident_page_count =
397 cobject->resident_page_count;
398 vio->vio_absent_count =
399 cobject->absent_count;
400 vio->vio_copy =
401 (vm_offset_t) cobject->copy;
402 vio->vio_shadow =
403 (vm_offset_t) cobject->shadow;
404 vio->vio_shadow_offset =
405 cobject->shadow_offset;
406 vio->vio_paging_offset =
407 cobject->paging_offset;
408 vio->vio_copy_strategy =
409 cobject->copy_strategy;
410 vio->vio_last_alloc =
411 cobject->last_alloc;
412 vio->vio_paging_in_progress =
413 cobject->paging_in_progress;
414 vio->vio_pager_created =
415 cobject->pager_created;
416 vio->vio_pager_initialized =
417 cobject->pager_initialized;
418 vio->vio_pager_ready =
419 cobject->pager_ready;
420 vio->vio_can_persist =
421 cobject->can_persist;
422 vio->vio_internal =
423 cobject->internal;
424 vio->vio_temporary =
425 cobject->temporary;
426 vio->vio_alive =
427 cobject->alive;
428 vio->vio_lock_in_progress =
429 cobject->lock_in_progress;
430 vio->vio_lock_restart =
431 cobject->lock_restart;
432 }
433
434 used++;
435 nobject = cobject->shadow;
436 if (nobject == VM_OBJECT_NULL) {
437 vm_object_unlock(cobject);
438 break;
439 }
440
441 vm_object_lock(nobject);
442 vm_object_unlock(cobject);
443 }
444
445 /* nothing locked */
446
447 if (used <= room)
448 break;
449
450 /* must allocate more memory */
451
452 if (size != 0)
453 kmem_free(ipc_kernel_map, addr, size);
454 size = round_page(2 * used * sizeof(vm_info_object_t));
455
456 kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
457 if (kr != KERN_SUCCESS)
458 return KERN_RESOURCE_SHORTAGE;
459
460 kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
461 VM_PROT_READ|VM_PROT_WRITE, FALSE);
462 assert(kr == KERN_SUCCESS);
463 }
464
465 /* free excess memory; make remaining memory pageable */
466
467 if (used == 0) {
468 copy = VM_MAP_COPY_NULL;
469
470 if (size != 0)
471 kmem_free(ipc_kernel_map, addr, size);
472 } else {
473 vm_size_t size_used =
474 round_page(used * sizeof(vm_info_object_t));
475
476 kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE);
477 assert(kr == KERN_SUCCESS);
478
479 kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
480 TRUE, &copy);
481 assert(kr == KERN_SUCCESS);
482
483 if (size != size_used)
484 kmem_free(ipc_kernel_map,
485 addr + size_used, size - size_used);
486 }
487
488 *regionp = region;
489 *objectsp = (vm_info_object_array_t) copy;
490 *objectsCntp = used;
491 return KERN_SUCCESS;
492 #endif /* MACH_VM_DEBUG */
493 }
494 /*
495 * Return an array of virtual pages that are mapped to a task.
496 */
497 kern_return_t
498 vm_mapped_pages_info(
499 vm_map_t map,
500 page_address_array_t *pages,
501 mach_msg_type_number_t *pages_count)
502 {
503 #if !MACH_VM_DEBUG
504 return KERN_FAILURE;
505 #else
506 pmap_t pmap;
507 vm_size_t size, size_used;
508 unsigned int actual, space;
509 page_address_array_t list;
510 vm_offset_t addr;
511
512 if (map == VM_MAP_NULL)
513 return (KERN_INVALID_ARGUMENT);
514
515 pmap = map->pmap;
516 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
517 size = round_page(size);
518
519 for (;;) {
520 (void) vm_allocate(ipc_kernel_map, &addr, size, TRUE);
521 (void) vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE);
522
523 list = (page_address_array_t) addr;
524 space = size / sizeof(vm_offset_t);
525
526 actual = pmap_list_resident_pages(pmap,
527 list,
528 space);
529 if (actual <= space)
530 break;
531
532 /*
533 * Free memory if not enough
534 */
535 (void) kmem_free(ipc_kernel_map, addr, size);
536
537 /*
538 * Try again, doubling the size
539 */
540 size = round_page(actual * sizeof(vm_offset_t));
541 }
542 if (actual == 0) {
543 *pages = 0;
544 *pages_count = 0;
545 (void) kmem_free(ipc_kernel_map, addr, size);
546 }
547 else {
548 *pages_count = actual;
549 size_used = round_page(actual * sizeof(vm_offset_t));
550 (void) vm_map_wire(ipc_kernel_map,
551 addr, addr + size,
552 VM_PROT_READ|VM_PROT_WRITE, FALSE);
553 (void) vm_map_copyin(
554 ipc_kernel_map,
555 addr,
556 size_used,
557 TRUE,
558 (vm_map_copy_t *)pages);
559 if (size_used != size) {
560 (void) kmem_free(ipc_kernel_map,
561 addr + size_used,
562 size - size_used);
563 }
564 }
565
566 return (KERN_SUCCESS);
567 #endif /* MACH_VM_DEBUG */
568 }
569
570 /*
571 * Routine: host_virtual_physical_table_info
572 * Purpose:
573 * Return information about the VP table.
574 * Conditions:
575 * Nothing locked. Obeys CountInOut protocol.
576 * Returns:
577 * KERN_SUCCESS Returned information.
578 * KERN_INVALID_HOST The host is null.
579 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
580 */
581
582 kern_return_t
583 host_virtual_physical_table_info(
584 host_t host,
585 hash_info_bucket_array_t *infop,
586 mach_msg_type_number_t *countp)
587 {
588 #if !MACH_VM_DEBUG
589 return KERN_FAILURE;
590 #else
591 vm_offset_t addr;
592 vm_size_t size;
593 hash_info_bucket_t *info;
594 unsigned int potential, actual;
595 kern_return_t kr;
596
597 if (host == HOST_NULL)
598 return KERN_INVALID_HOST;
599
600 /* start with in-line data */
601
602 info = *infop;
603 potential = *countp;
604
605 for (;;) {
606 actual = vm_page_info(info, potential);
607 if (actual <= potential)
608 break;
609
610 /* allocate more memory */
611
612 if (info != *infop)
613 kmem_free(ipc_kernel_map, addr, size);
614
615 size = round_page(actual * sizeof *info);
616 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
617 if (kr != KERN_SUCCESS)
618 return KERN_RESOURCE_SHORTAGE;
619
620 info = (hash_info_bucket_t *) addr;
621 potential = size/sizeof *info;
622 }
623
624 if (info == *infop) {
625 /* data fit in-line; nothing to deallocate */
626
627 *countp = actual;
628 } else if (actual == 0) {
629 kmem_free(ipc_kernel_map, addr, size);
630
631 *countp = 0;
632 } else {
633 vm_map_copy_t copy;
634 vm_size_t used;
635
636 used = round_page(actual * sizeof *info);
637
638 if (used != size)
639 kmem_free(ipc_kernel_map, addr + used, size - used);
640
641 kr = vm_map_copyin(ipc_kernel_map, addr, used,
642 TRUE, &copy);
643 assert(kr == KERN_SUCCESS);
644
645 *infop = (hash_info_bucket_t *) copy;
646 *countp = actual;
647 }
648
649 return KERN_SUCCESS;
650 #endif /* MACH_VM_DEBUG */
651 }