]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_debug.c
xnu-2422.100.13.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65 #include <mach_vm_debug.h>
66 #include <mach/kern_return.h>
67 #include <mach/mach_host_server.h>
68 #include <mach_debug/vm_info.h>
69 #include <mach_debug/page_info.h>
70 #include <mach_debug/hash_info.h>
71
72 #if MACH_VM_DEBUG
73 #include <mach/machine/vm_types.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/vm_prot.h>
76 #include <mach/vm_inherit.h>
77 #include <mach/vm_param.h>
78 #include <kern/thread.h>
79 #include <vm/vm_map.h>
80 #include <vm/vm_kern.h>
81 #include <vm/vm_object.h>
82 #include <kern/task.h>
83 #include <kern/host.h>
84 #include <ipc/ipc_port.h>
85 #include <vm/vm_debug.h>
86 #endif
87
88 #if !MACH_VM_DEBUG
89 #define __DEBUG_ONLY __unused
90 #else /* !MACH_VM_DEBUG */
91 #define __DEBUG_ONLY
92 #endif /* !MACH_VM_DEBUG */
93
94 #if VM32_SUPPORT
95
96 #include <mach/vm32_map_server.h>
97 #include <mach/vm_map.h>
98
99 /*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113 kern_return_t
114 vm32_region_info(
115 __DEBUG_ONLY vm_map_t map,
116 __DEBUG_ONLY vm32_offset_t address,
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
120 {
121 #if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123 #else
124 vm_map_copy_t copy;
125 vm_offset_t addr; /* memory for OOL data */
126 vm_size_t size; /* size of the memory */
127 unsigned int room; /* room for this many objects */
128 unsigned int used; /* actually this many objects */
129 vm_info_region_t region;
130 kern_return_t kr;
131
132 if (map == VM_MAP_NULL)
133 return KERN_INVALID_TASK;
134
135 size = 0; /* no memory allocated yet */
136
137 for (;;) {
138 vm_map_t cmap; /* current map in traversal */
139 vm_map_t nmap; /* next map to look at */
140 vm_map_entry_t entry;
141 vm_object_t object, cobject, nobject;
142
143 /* nothing is locked */
144
145 vm_map_lock_read(map);
146 for (cmap = map;; cmap = nmap) {
147 /* cmap is read-locked */
148
149 if (!vm_map_lookup_entry(cmap,
150 (vm_map_address_t)address, &entry)) {
151
152 entry = entry->vme_next;
153 if (entry == vm_map_to_entry(cmap)) {
154 vm_map_unlock_read(cmap);
155 if (size != 0)
156 kmem_free(ipc_kernel_map,
157 addr, size);
158 return KERN_NO_SPACE;
159 }
160 }
161
162 if (entry->is_sub_map)
163 nmap = entry->object.sub_map;
164 else
165 break;
166
167 /* move down to the lower map */
168
169 vm_map_lock_read(nmap);
170 vm_map_unlock_read(cmap);
171 }
172
173 /* cmap is read-locked; we have a real entry */
174
175 object = entry->object.vm_object;
176 region.vir_start = (natural_t) entry->vme_start;
177 region.vir_end = (natural_t) entry->vme_end;
178 region.vir_object = (natural_t)(uintptr_t) object;
179 region.vir_offset = (natural_t) entry->offset;
180 region.vir_needs_copy = entry->needs_copy;
181 region.vir_protection = entry->protection;
182 region.vir_max_protection = entry->max_protection;
183 region.vir_inheritance = entry->inheritance;
184 region.vir_wired_count = entry->wired_count;
185 region.vir_user_wired_count = entry->user_wired_count;
186
187 used = 0;
188 room = (unsigned int) (size / sizeof(vm_info_object_t));
189
190 if (object == VM_OBJECT_NULL) {
191 vm_map_unlock_read(cmap);
192 /* no memory needed */
193 break;
194 }
195
196 vm_object_lock(object);
197 vm_map_unlock_read(cmap);
198
199 for (cobject = object;; cobject = nobject) {
200 /* cobject is locked */
201
202 if (used < room) {
203 vm_info_object_t *vio =
204 &((vm_info_object_t *) addr)[used];
205
206 vio->vio_object =
207 (natural_t)(uintptr_t) cobject;
208 vio->vio_size =
209 (natural_t) cobject->vo_size;
210 vio->vio_ref_count =
211 cobject->ref_count;
212 vio->vio_resident_page_count =
213 cobject->resident_page_count;
214 vio->vio_copy =
215 (natural_t)(uintptr_t) cobject->copy;
216 vio->vio_shadow =
217 (natural_t)(uintptr_t) cobject->shadow;
218 vio->vio_shadow_offset =
219 (natural_t) cobject->vo_shadow_offset;
220 vio->vio_paging_offset =
221 (natural_t) cobject->paging_offset;
222 vio->vio_copy_strategy =
223 cobject->copy_strategy;
224 vio->vio_last_alloc =
225 (vm_offset_t) cobject->last_alloc;
226 vio->vio_paging_in_progress =
227 cobject->paging_in_progress +
228 cobject->activity_in_progress;
229 vio->vio_pager_created =
230 cobject->pager_created;
231 vio->vio_pager_initialized =
232 cobject->pager_initialized;
233 vio->vio_pager_ready =
234 cobject->pager_ready;
235 vio->vio_can_persist =
236 cobject->can_persist;
237 vio->vio_internal =
238 cobject->internal;
239 vio->vio_temporary =
240 cobject->temporary;
241 vio->vio_alive =
242 cobject->alive;
243 vio->vio_purgable =
244 (cobject->purgable != VM_PURGABLE_DENY);
245 vio->vio_purgable_volatile =
246 (cobject->purgable == VM_PURGABLE_VOLATILE ||
247 cobject->purgable == VM_PURGABLE_EMPTY);
248 }
249
250 used++;
251 nobject = cobject->shadow;
252 if (nobject == VM_OBJECT_NULL) {
253 vm_object_unlock(cobject);
254 break;
255 }
256
257 vm_object_lock(nobject);
258 vm_object_unlock(cobject);
259 }
260
261 /* nothing locked */
262
263 if (used <= room)
264 break;
265
266 /* must allocate more memory */
267
268 if (size != 0)
269 kmem_free(ipc_kernel_map, addr, size);
270 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
271 VM_MAP_PAGE_MASK(ipc_kernel_map));
272
273 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
274 if (kr != KERN_SUCCESS)
275 return KERN_RESOURCE_SHORTAGE;
276
277 kr = vm_map_wire(
278 ipc_kernel_map,
279 vm_map_trunc_page(addr,
280 VM_MAP_PAGE_MASK(ipc_kernel_map)),
281 vm_map_round_page(addr + size,
282 VM_MAP_PAGE_MASK(ipc_kernel_map)),
283 VM_PROT_READ|VM_PROT_WRITE,
284 FALSE);
285 assert(kr == KERN_SUCCESS);
286 }
287
288 /* free excess memory; make remaining memory pageable */
289
290 if (used == 0) {
291 copy = VM_MAP_COPY_NULL;
292
293 if (size != 0)
294 kmem_free(ipc_kernel_map, addr, size);
295 } else {
296 vm_size_t size_used =
297 vm_map_round_page(used * sizeof(vm_info_object_t),
298 VM_MAP_PAGE_MASK(ipc_kernel_map));
299
300 kr = vm_map_unwire(
301 ipc_kernel_map,
302 vm_map_trunc_page(addr,
303 VM_MAP_PAGE_MASK(ipc_kernel_map)),
304 vm_map_round_page(addr + size_used,
305 VM_MAP_PAGE_MASK(ipc_kernel_map)),
306 FALSE);
307 assert(kr == KERN_SUCCESS);
308
309 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
310 (vm_map_size_t)size_used, TRUE, &copy);
311 assert(kr == KERN_SUCCESS);
312
313 if (size != size_used)
314 kmem_free(ipc_kernel_map,
315 addr + size_used, size - size_used);
316 }
317
318 *regionp = region;
319 *objectsp = (vm_info_object_array_t) copy;
320 *objectsCntp = used;
321 return KERN_SUCCESS;
322 #endif /* MACH_VM_DEBUG */
323 }
324
325 /*
326 * Temporary call for 64 bit data path interface transiotion
327 */
328
329 kern_return_t
330 vm32_region_info_64(
331 __DEBUG_ONLY vm_map_t map,
332 __DEBUG_ONLY vm32_offset_t address,
333 __DEBUG_ONLY vm_info_region_64_t *regionp,
334 __DEBUG_ONLY vm_info_object_array_t *objectsp,
335 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
336 {
337 #if !MACH_VM_DEBUG
338 return KERN_FAILURE;
339 #else
340 vm_map_copy_t copy;
341 vm_offset_t addr; /* memory for OOL data */
342 vm_size_t size; /* size of the memory */
343 unsigned int room; /* room for this many objects */
344 unsigned int used; /* actually this many objects */
345 vm_info_region_64_t region;
346 kern_return_t kr;
347
348 if (map == VM_MAP_NULL)
349 return KERN_INVALID_TASK;
350
351 size = 0; /* no memory allocated yet */
352
353 for (;;) {
354 vm_map_t cmap; /* current map in traversal */
355 vm_map_t nmap; /* next map to look at */
356 vm_map_entry_t entry;
357 vm_object_t object, cobject, nobject;
358
359 /* nothing is locked */
360
361 vm_map_lock_read(map);
362 for (cmap = map;; cmap = nmap) {
363 /* cmap is read-locked */
364
365 if (!vm_map_lookup_entry(cmap, address, &entry)) {
366 entry = entry->vme_next;
367 if (entry == vm_map_to_entry(cmap)) {
368 vm_map_unlock_read(cmap);
369 if (size != 0)
370 kmem_free(ipc_kernel_map,
371 addr, size);
372 return KERN_NO_SPACE;
373 }
374 }
375
376 if (entry->is_sub_map)
377 nmap = entry->object.sub_map;
378 else
379 break;
380
381 /* move down to the lower map */
382
383 vm_map_lock_read(nmap);
384 vm_map_unlock_read(cmap);
385 }
386
387 /* cmap is read-locked; we have a real entry */
388
389 object = entry->object.vm_object;
390 region.vir_start = (natural_t) entry->vme_start;
391 region.vir_end = (natural_t) entry->vme_end;
392 region.vir_object = (natural_t)(uintptr_t) object;
393 region.vir_offset = entry->offset;
394 region.vir_needs_copy = entry->needs_copy;
395 region.vir_protection = entry->protection;
396 region.vir_max_protection = entry->max_protection;
397 region.vir_inheritance = entry->inheritance;
398 region.vir_wired_count = entry->wired_count;
399 region.vir_user_wired_count = entry->user_wired_count;
400
401 used = 0;
402 room = (unsigned int) (size / sizeof(vm_info_object_t));
403
404 if (object == VM_OBJECT_NULL) {
405 vm_map_unlock_read(cmap);
406 /* no memory needed */
407 break;
408 }
409
410 vm_object_lock(object);
411 vm_map_unlock_read(cmap);
412
413 for (cobject = object;; cobject = nobject) {
414 /* cobject is locked */
415
416 if (used < room) {
417 vm_info_object_t *vio =
418 &((vm_info_object_t *) addr)[used];
419
420 vio->vio_object =
421 (natural_t)(uintptr_t) cobject;
422 vio->vio_size =
423 (natural_t) cobject->vo_size;
424 vio->vio_ref_count =
425 cobject->ref_count;
426 vio->vio_resident_page_count =
427 cobject->resident_page_count;
428 vio->vio_copy =
429 (natural_t)(uintptr_t) cobject->copy;
430 vio->vio_shadow =
431 (natural_t)(uintptr_t) cobject->shadow;
432 vio->vio_shadow_offset =
433 (natural_t) cobject->vo_shadow_offset;
434 vio->vio_paging_offset =
435 (natural_t) cobject->paging_offset;
436 vio->vio_copy_strategy =
437 cobject->copy_strategy;
438 vio->vio_last_alloc =
439 (vm_offset_t) cobject->last_alloc;
440 vio->vio_paging_in_progress =
441 cobject->paging_in_progress +
442 cobject->activity_in_progress;
443 vio->vio_pager_created =
444 cobject->pager_created;
445 vio->vio_pager_initialized =
446 cobject->pager_initialized;
447 vio->vio_pager_ready =
448 cobject->pager_ready;
449 vio->vio_can_persist =
450 cobject->can_persist;
451 vio->vio_internal =
452 cobject->internal;
453 vio->vio_temporary =
454 cobject->temporary;
455 vio->vio_alive =
456 cobject->alive;
457 vio->vio_purgable =
458 (cobject->purgable != VM_PURGABLE_DENY);
459 vio->vio_purgable_volatile =
460 (cobject->purgable == VM_PURGABLE_VOLATILE ||
461 cobject->purgable == VM_PURGABLE_EMPTY);
462 }
463
464 used++;
465 nobject = cobject->shadow;
466 if (nobject == VM_OBJECT_NULL) {
467 vm_object_unlock(cobject);
468 break;
469 }
470
471 vm_object_lock(nobject);
472 vm_object_unlock(cobject);
473 }
474
475 /* nothing locked */
476
477 if (used <= room)
478 break;
479
480 /* must allocate more memory */
481
482 if (size != 0)
483 kmem_free(ipc_kernel_map, addr, size);
484 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
485 VM_MAP_PAGE_MASK(ipc_kernel_map));
486
487 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
488 if (kr != KERN_SUCCESS)
489 return KERN_RESOURCE_SHORTAGE;
490
491 kr = vm_map_wire(
492 ipc_kernel_map,
493 vm_map_trunc_page(addr,
494 VM_MAP_PAGE_MASK(ipc_kernel_map)),
495 vm_map_round_page(addr + size,
496 VM_MAP_PAGE_MASK(ipc_kernel_map)),
497 VM_PROT_READ|VM_PROT_WRITE,
498 FALSE);
499 assert(kr == KERN_SUCCESS);
500 }
501
502 /* free excess memory; make remaining memory pageable */
503
504 if (used == 0) {
505 copy = VM_MAP_COPY_NULL;
506
507 if (size != 0)
508 kmem_free(ipc_kernel_map, addr, size);
509 } else {
510 vm_size_t size_used =
511 vm_map_round_page(used * sizeof(vm_info_object_t),
512 VM_MAP_PAGE_MASK(ipc_kernel_map));
513
514 kr = vm_map_unwire(
515 ipc_kernel_map,
516 vm_map_trunc_page(addr,
517 VM_MAP_PAGE_MASK(ipc_kernel_map)),
518 vm_map_round_page(addr + size_used,
519 VM_MAP_PAGE_MASK(ipc_kernel_map)),
520 FALSE);
521 assert(kr == KERN_SUCCESS);
522
523 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
524 (vm_map_size_t)size_used, TRUE, &copy);
525 assert(kr == KERN_SUCCESS);
526
527 if (size != size_used)
528 kmem_free(ipc_kernel_map,
529 addr + size_used, size - size_used);
530 }
531
532 *regionp = region;
533 *objectsp = (vm_info_object_array_t) copy;
534 *objectsCntp = used;
535 return KERN_SUCCESS;
536 #endif /* MACH_VM_DEBUG */
537 }
538 /*
539 * Return an array of virtual pages that are mapped to a task.
540 */
541 kern_return_t
542 vm32_mapped_pages_info(
543 __DEBUG_ONLY vm_map_t map,
544 __DEBUG_ONLY page_address_array_t *pages,
545 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
546 {
547 #if !MACH_VM_DEBUG
548 return KERN_FAILURE;
549 #else
550 pmap_t pmap;
551 vm_size_t size, size_used;
552 unsigned int actual, space;
553 page_address_array_t list;
554 vm_offset_t addr;
555
556 if (map == VM_MAP_NULL)
557 return (KERN_INVALID_ARGUMENT);
558
559 pmap = map->pmap;
560 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
561 size = vm_map_round_page(size,
562 VM_MAP_PAGE_MASK(ipc_kernel_map));
563
564 for (;;) {
565 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
566 (void) vm_map_unwire(
567 ipc_kernel_map,
568 vm_map_trunc_page(addr,
569 VM_MAP_PAGE_MASK(ipc_kernel_map)),
570 vm_map_round_page(addr + size,
571 VM_MAP_PAGE_MASK(ipc_kernel_map)),
572 FALSE);
573
574 list = (page_address_array_t) addr;
575 space = (unsigned int) (size / sizeof(vm_offset_t));
576
577 actual = pmap_list_resident_pages(pmap,
578 list,
579 space);
580 if (actual <= space)
581 break;
582
583 /*
584 * Free memory if not enough
585 */
586 (void) kmem_free(ipc_kernel_map, addr, size);
587
588 /*
589 * Try again, doubling the size
590 */
591 size = vm_map_round_page(actual * sizeof(vm_offset_t),
592 VM_MAP_PAGE_MASK(ipc_kernel_map));
593 }
594 if (actual == 0) {
595 *pages = 0;
596 *pages_count = 0;
597 (void) kmem_free(ipc_kernel_map, addr, size);
598 }
599 else {
600 *pages_count = actual;
601 size_used = vm_map_round_page(actual * sizeof(vm_offset_t),
602 VM_MAP_PAGE_MASK(ipc_kernel_map));
603 (void) vm_map_wire(
604 ipc_kernel_map,
605 vm_map_trunc_page(addr,
606 VM_MAP_PAGE_MASK(ipc_kernel_map)),
607 vm_map_round_page(addr + size,
608 VM_MAP_PAGE_MASK(ipc_kernel_map)),
609 VM_PROT_READ|VM_PROT_WRITE,
610 FALSE);
611 (void) vm_map_copyin(ipc_kernel_map,
612 (vm_map_address_t)addr,
613 (vm_map_size_t)size_used,
614 TRUE,
615 (vm_map_copy_t *)pages);
616 if (size_used != size) {
617 (void) kmem_free(ipc_kernel_map,
618 addr + size_used,
619 size - size_used);
620 }
621 }
622
623 return (KERN_SUCCESS);
624 #endif /* MACH_VM_DEBUG */
625 }
626
627 #endif /* VM32_SUPPORT */
628
629 /*
630 * Routine: host_virtual_physical_table_info
631 * Purpose:
632 * Return information about the VP table.
633 * Conditions:
634 * Nothing locked. Obeys CountInOut protocol.
635 * Returns:
636 * KERN_SUCCESS Returned information.
637 * KERN_INVALID_HOST The host is null.
638 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
639 */
640
641 kern_return_t
642 host_virtual_physical_table_info(
643 __DEBUG_ONLY host_t host,
644 __DEBUG_ONLY hash_info_bucket_array_t *infop,
645 __DEBUG_ONLY mach_msg_type_number_t *countp)
646 {
647 #if !MACH_VM_DEBUG
648 return KERN_FAILURE;
649 #else
650 vm_offset_t addr;
651 vm_size_t size = 0;
652 hash_info_bucket_t *info;
653 unsigned int potential, actual;
654 kern_return_t kr;
655
656 if (host == HOST_NULL)
657 return KERN_INVALID_HOST;
658
659 /* start with in-line data */
660
661 info = *infop;
662 potential = *countp;
663
664 for (;;) {
665 actual = vm_page_info(info, potential);
666 if (actual <= potential)
667 break;
668
669 /* allocate more memory */
670
671 if (info != *infop)
672 kmem_free(ipc_kernel_map, addr, size);
673
674 size = vm_map_round_page(actual * sizeof *info,
675 VM_MAP_PAGE_MASK(ipc_kernel_map));
676 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
677 if (kr != KERN_SUCCESS)
678 return KERN_RESOURCE_SHORTAGE;
679
680 info = (hash_info_bucket_t *) addr;
681 potential = (unsigned int) (size/sizeof (*info));
682 }
683
684 if (info == *infop) {
685 /* data fit in-line; nothing to deallocate */
686
687 *countp = actual;
688 } else if (actual == 0) {
689 kmem_free(ipc_kernel_map, addr, size);
690
691 *countp = 0;
692 } else {
693 vm_map_copy_t copy;
694 vm_size_t used;
695
696 used = vm_map_round_page(actual * sizeof *info,
697 VM_MAP_PAGE_MASK(ipc_kernel_map));
698
699 if (used != size)
700 kmem_free(ipc_kernel_map, addr + used, size - used);
701
702 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
703 (vm_map_size_t)used, TRUE, &copy);
704 assert(kr == KERN_SUCCESS);
705
706 *infop = (hash_info_bucket_t *) copy;
707 *countp = actual;
708 }
709
710 return KERN_SUCCESS;
711 #endif /* MACH_VM_DEBUG */
712 }