]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_debug.c
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
8f6c56a5 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65#include <mach_vm_debug.h>
66#include <mach/kern_return.h>
67#include <mach/mach_host_server.h>
1c79356b
A
68#include <mach_debug/vm_info.h>
69#include <mach_debug/page_info.h>
70#include <mach_debug/hash_info.h>
71
72#if MACH_VM_DEBUG
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_inherit.h>
77#include <mach/vm_param.h>
78#include <kern/thread.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_object.h>
82#include <kern/task.h>
83#include <kern/host.h>
84#include <ipc/ipc_port.h>
85#include <vm/vm_debug.h>
86#endif
87
91447636
A
88#if !MACH_VM_DEBUG
89#define __DEBUG_ONLY __unused
90#else /* !MACH_VM_DEBUG */
91#define __DEBUG_ONLY
92#endif /* !MACH_VM_DEBUG */
93
b0d623f7
A
94#if VM32_SUPPORT
95
96#include <mach/vm32_map_server.h>
97#include <mach/vm_map.h>
98
1c79356b
A
99/*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113kern_return_t
b0d623f7 114vm32_region_info(
91447636 115 __DEBUG_ONLY vm_map_t map,
b0d623f7 116 __DEBUG_ONLY vm32_offset_t address,
91447636
A
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
1c79356b
A
120{
121#if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123#else
124 vm_map_copy_t copy;
125 vm_offset_t addr; /* memory for OOL data */
126 vm_size_t size; /* size of the memory */
127 unsigned int room; /* room for this many objects */
128 unsigned int used; /* actually this many objects */
129 vm_info_region_t region;
130 kern_return_t kr;
131
132 if (map == VM_MAP_NULL)
133 return KERN_INVALID_TASK;
134
135 size = 0; /* no memory allocated yet */
136
137 for (;;) {
138 vm_map_t cmap; /* current map in traversal */
139 vm_map_t nmap; /* next map to look at */
140 vm_map_entry_t entry;
141 vm_object_t object, cobject, nobject;
142
143 /* nothing is locked */
144
145 vm_map_lock_read(map);
146 for (cmap = map;; cmap = nmap) {
147 /* cmap is read-locked */
148
91447636
A
149 if (!vm_map_lookup_entry(cmap,
150 (vm_map_address_t)address, &entry)) {
151
1c79356b
A
152 entry = entry->vme_next;
153 if (entry == vm_map_to_entry(cmap)) {
154 vm_map_unlock_read(cmap);
155 if (size != 0)
156 kmem_free(ipc_kernel_map,
157 addr, size);
158 return KERN_NO_SPACE;
159 }
160 }
161
162 if (entry->is_sub_map)
163 nmap = entry->object.sub_map;
164 else
165 break;
166
167 /* move down to the lower map */
168
169 vm_map_lock_read(nmap);
170 vm_map_unlock_read(cmap);
171 }
172
173 /* cmap is read-locked; we have a real entry */
174
175 object = entry->object.vm_object;
b0d623f7
A
176 region.vir_start = (natural_t) entry->vme_start;
177 region.vir_end = (natural_t) entry->vme_end;
178 region.vir_object = (natural_t)(uintptr_t) object;
179 region.vir_offset = (natural_t) entry->offset;
1c79356b
A
180 region.vir_needs_copy = entry->needs_copy;
181 region.vir_protection = entry->protection;
182 region.vir_max_protection = entry->max_protection;
183 region.vir_inheritance = entry->inheritance;
184 region.vir_wired_count = entry->wired_count;
185 region.vir_user_wired_count = entry->user_wired_count;
186
187 used = 0;
b0d623f7 188 room = (unsigned int) (size / sizeof(vm_info_object_t));
1c79356b
A
189
190 if (object == VM_OBJECT_NULL) {
191 vm_map_unlock_read(cmap);
192 /* no memory needed */
193 break;
194 }
195
196 vm_object_lock(object);
197 vm_map_unlock_read(cmap);
198
199 for (cobject = object;; cobject = nobject) {
200 /* cobject is locked */
201
202 if (used < room) {
203 vm_info_object_t *vio =
204 &((vm_info_object_t *) addr)[used];
205
206 vio->vio_object =
b0d623f7 207 (natural_t)(uintptr_t) cobject;
1c79356b 208 vio->vio_size =
6d2010ae 209 (natural_t) cobject->vo_size;
1c79356b
A
210 vio->vio_ref_count =
211 cobject->ref_count;
212 vio->vio_resident_page_count =
213 cobject->resident_page_count;
1c79356b 214 vio->vio_copy =
b0d623f7 215 (natural_t)(uintptr_t) cobject->copy;
1c79356b 216 vio->vio_shadow =
b0d623f7 217 (natural_t)(uintptr_t) cobject->shadow;
1c79356b 218 vio->vio_shadow_offset =
6d2010ae 219 (natural_t) cobject->vo_shadow_offset;
1c79356b 220 vio->vio_paging_offset =
b0d623f7 221 (natural_t) cobject->paging_offset;
1c79356b
A
222 vio->vio_copy_strategy =
223 cobject->copy_strategy;
224 vio->vio_last_alloc =
b0d623f7 225 (vm_offset_t) cobject->last_alloc;
1c79356b 226 vio->vio_paging_in_progress =
b0d623f7
A
227 cobject->paging_in_progress +
228 cobject->activity_in_progress;
1c79356b
A
229 vio->vio_pager_created =
230 cobject->pager_created;
231 vio->vio_pager_initialized =
232 cobject->pager_initialized;
233 vio->vio_pager_ready =
234 cobject->pager_ready;
235 vio->vio_can_persist =
236 cobject->can_persist;
237 vio->vio_internal =
238 cobject->internal;
239 vio->vio_temporary =
240 cobject->temporary;
241 vio->vio_alive =
242 cobject->alive;
91447636 243 vio->vio_purgable =
2d21ac55 244 (cobject->purgable != VM_PURGABLE_DENY);
91447636 245 vio->vio_purgable_volatile =
2d21ac55
A
246 (cobject->purgable == VM_PURGABLE_VOLATILE ||
247 cobject->purgable == VM_PURGABLE_EMPTY);
1c79356b
A
248 }
249
250 used++;
251 nobject = cobject->shadow;
252 if (nobject == VM_OBJECT_NULL) {
253 vm_object_unlock(cobject);
254 break;
255 }
256
257 vm_object_lock(nobject);
258 vm_object_unlock(cobject);
259 }
260
261 /* nothing locked */
262
263 if (used <= room)
264 break;
265
266 /* must allocate more memory */
267
268 if (size != 0)
269 kmem_free(ipc_kernel_map, addr, size);
b0d623f7 270 size = round_page(2 * used * sizeof(vm_info_object_t));
1c79356b 271
91447636 272 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
1c79356b
A
273 if (kr != KERN_SUCCESS)
274 return KERN_RESOURCE_SHORTAGE;
275
91447636
A
276 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
277 vm_map_round_page(addr + size),
278 VM_PROT_READ|VM_PROT_WRITE, FALSE);
1c79356b
A
279 assert(kr == KERN_SUCCESS);
280 }
281
282 /* free excess memory; make remaining memory pageable */
283
284 if (used == 0) {
285 copy = VM_MAP_COPY_NULL;
286
287 if (size != 0)
288 kmem_free(ipc_kernel_map, addr, size);
289 } else {
290 vm_size_t size_used =
b0d623f7 291 round_page(used * sizeof(vm_info_object_t));
1c79356b 292
91447636
A
293 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
294 vm_map_round_page(addr + size_used), FALSE);
1c79356b
A
295 assert(kr == KERN_SUCCESS);
296
91447636
A
297 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
298 (vm_map_size_t)size_used, TRUE, &copy);
1c79356b
A
299 assert(kr == KERN_SUCCESS);
300
301 if (size != size_used)
302 kmem_free(ipc_kernel_map,
303 addr + size_used, size - size_used);
304 }
305
306 *regionp = region;
307 *objectsp = (vm_info_object_array_t) copy;
308 *objectsCntp = used;
309 return KERN_SUCCESS;
310#endif /* MACH_VM_DEBUG */
311}
91447636 312
1c79356b
A
313/*
314 * Temporary call for 64 bit data path interface transiotion
315 */
316
317kern_return_t
b0d623f7 318vm32_region_info_64(
91447636 319 __DEBUG_ONLY vm_map_t map,
b0d623f7 320 __DEBUG_ONLY vm32_offset_t address,
91447636
A
321 __DEBUG_ONLY vm_info_region_64_t *regionp,
322 __DEBUG_ONLY vm_info_object_array_t *objectsp,
323 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
1c79356b
A
324{
325#if !MACH_VM_DEBUG
326 return KERN_FAILURE;
327#else
328 vm_map_copy_t copy;
329 vm_offset_t addr; /* memory for OOL data */
330 vm_size_t size; /* size of the memory */
331 unsigned int room; /* room for this many objects */
332 unsigned int used; /* actually this many objects */
333 vm_info_region_64_t region;
334 kern_return_t kr;
335
336 if (map == VM_MAP_NULL)
337 return KERN_INVALID_TASK;
338
339 size = 0; /* no memory allocated yet */
340
341 for (;;) {
342 vm_map_t cmap; /* current map in traversal */
343 vm_map_t nmap; /* next map to look at */
344 vm_map_entry_t entry;
345 vm_object_t object, cobject, nobject;
346
347 /* nothing is locked */
348
349 vm_map_lock_read(map);
350 for (cmap = map;; cmap = nmap) {
351 /* cmap is read-locked */
352
353 if (!vm_map_lookup_entry(cmap, address, &entry)) {
354 entry = entry->vme_next;
355 if (entry == vm_map_to_entry(cmap)) {
356 vm_map_unlock_read(cmap);
357 if (size != 0)
358 kmem_free(ipc_kernel_map,
359 addr, size);
360 return KERN_NO_SPACE;
361 }
362 }
363
364 if (entry->is_sub_map)
365 nmap = entry->object.sub_map;
366 else
367 break;
368
369 /* move down to the lower map */
370
371 vm_map_lock_read(nmap);
372 vm_map_unlock_read(cmap);
373 }
374
375 /* cmap is read-locked; we have a real entry */
376
377 object = entry->object.vm_object;
b0d623f7
A
378 region.vir_start = (natural_t) entry->vme_start;
379 region.vir_end = (natural_t) entry->vme_end;
380 region.vir_object = (natural_t)(uintptr_t) object;
1c79356b
A
381 region.vir_offset = entry->offset;
382 region.vir_needs_copy = entry->needs_copy;
383 region.vir_protection = entry->protection;
384 region.vir_max_protection = entry->max_protection;
385 region.vir_inheritance = entry->inheritance;
386 region.vir_wired_count = entry->wired_count;
387 region.vir_user_wired_count = entry->user_wired_count;
388
389 used = 0;
b0d623f7 390 room = (unsigned int) (size / sizeof(vm_info_object_t));
1c79356b
A
391
392 if (object == VM_OBJECT_NULL) {
393 vm_map_unlock_read(cmap);
394 /* no memory needed */
395 break;
396 }
397
398 vm_object_lock(object);
399 vm_map_unlock_read(cmap);
400
401 for (cobject = object;; cobject = nobject) {
402 /* cobject is locked */
403
404 if (used < room) {
405 vm_info_object_t *vio =
406 &((vm_info_object_t *) addr)[used];
407
408 vio->vio_object =
b0d623f7 409 (natural_t)(uintptr_t) cobject;
1c79356b 410 vio->vio_size =
6d2010ae 411 (natural_t) cobject->vo_size;
1c79356b
A
412 vio->vio_ref_count =
413 cobject->ref_count;
414 vio->vio_resident_page_count =
415 cobject->resident_page_count;
1c79356b 416 vio->vio_copy =
b0d623f7 417 (natural_t)(uintptr_t) cobject->copy;
1c79356b 418 vio->vio_shadow =
b0d623f7 419 (natural_t)(uintptr_t) cobject->shadow;
1c79356b 420 vio->vio_shadow_offset =
6d2010ae 421 (natural_t) cobject->vo_shadow_offset;
1c79356b 422 vio->vio_paging_offset =
b0d623f7 423 (natural_t) cobject->paging_offset;
1c79356b
A
424 vio->vio_copy_strategy =
425 cobject->copy_strategy;
426 vio->vio_last_alloc =
b0d623f7 427 (vm_offset_t) cobject->last_alloc;
1c79356b 428 vio->vio_paging_in_progress =
b0d623f7
A
429 cobject->paging_in_progress +
430 cobject->activity_in_progress;
1c79356b
A
431 vio->vio_pager_created =
432 cobject->pager_created;
433 vio->vio_pager_initialized =
434 cobject->pager_initialized;
435 vio->vio_pager_ready =
436 cobject->pager_ready;
437 vio->vio_can_persist =
438 cobject->can_persist;
439 vio->vio_internal =
440 cobject->internal;
441 vio->vio_temporary =
442 cobject->temporary;
443 vio->vio_alive =
444 cobject->alive;
91447636 445 vio->vio_purgable =
2d21ac55 446 (cobject->purgable != VM_PURGABLE_DENY);
91447636 447 vio->vio_purgable_volatile =
2d21ac55
A
448 (cobject->purgable == VM_PURGABLE_VOLATILE ||
449 cobject->purgable == VM_PURGABLE_EMPTY);
1c79356b
A
450 }
451
452 used++;
453 nobject = cobject->shadow;
454 if (nobject == VM_OBJECT_NULL) {
455 vm_object_unlock(cobject);
456 break;
457 }
458
459 vm_object_lock(nobject);
460 vm_object_unlock(cobject);
461 }
462
463 /* nothing locked */
464
465 if (used <= room)
466 break;
467
468 /* must allocate more memory */
469
470 if (size != 0)
471 kmem_free(ipc_kernel_map, addr, size);
b0d623f7 472 size = round_page(2 * used * sizeof(vm_info_object_t));
1c79356b 473
91447636 474 kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
1c79356b
A
475 if (kr != KERN_SUCCESS)
476 return KERN_RESOURCE_SHORTAGE;
477
91447636
A
478 kr = vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
479 vm_map_round_page(addr + size),
480 VM_PROT_READ|VM_PROT_WRITE, FALSE);
1c79356b
A
481 assert(kr == KERN_SUCCESS);
482 }
483
484 /* free excess memory; make remaining memory pageable */
485
486 if (used == 0) {
487 copy = VM_MAP_COPY_NULL;
488
489 if (size != 0)
490 kmem_free(ipc_kernel_map, addr, size);
491 } else {
492 vm_size_t size_used =
b0d623f7 493 round_page(used * sizeof(vm_info_object_t));
1c79356b 494
91447636
A
495 kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
496 vm_map_round_page(addr + size_used), FALSE);
1c79356b
A
497 assert(kr == KERN_SUCCESS);
498
91447636
A
499 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
500 (vm_map_size_t)size_used, TRUE, &copy);
1c79356b
A
501 assert(kr == KERN_SUCCESS);
502
503 if (size != size_used)
504 kmem_free(ipc_kernel_map,
505 addr + size_used, size - size_used);
506 }
507
508 *regionp = region;
509 *objectsp = (vm_info_object_array_t) copy;
510 *objectsCntp = used;
511 return KERN_SUCCESS;
512#endif /* MACH_VM_DEBUG */
513}
514/*
515 * Return an array of virtual pages that are mapped to a task.
516 */
517kern_return_t
b0d623f7 518vm32_mapped_pages_info(
91447636
A
519 __DEBUG_ONLY vm_map_t map,
520 __DEBUG_ONLY page_address_array_t *pages,
521 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
1c79356b
A
522{
523#if !MACH_VM_DEBUG
524 return KERN_FAILURE;
525#else
526 pmap_t pmap;
527 vm_size_t size, size_used;
528 unsigned int actual, space;
529 page_address_array_t list;
530 vm_offset_t addr;
531
532 if (map == VM_MAP_NULL)
533 return (KERN_INVALID_ARGUMENT);
534
535 pmap = map->pmap;
536 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
b0d623f7 537 size = round_page(size);
1c79356b
A
538
539 for (;;) {
91447636
A
540 (void) vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE);
541 (void) vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(addr),
542 vm_map_round_page(addr + size), FALSE);
1c79356b
A
543
544 list = (page_address_array_t) addr;
b0d623f7 545 space = (unsigned int) (size / sizeof(vm_offset_t));
1c79356b
A
546
547 actual = pmap_list_resident_pages(pmap,
548 list,
549 space);
550 if (actual <= space)
551 break;
552
553 /*
554 * Free memory if not enough
555 */
556 (void) kmem_free(ipc_kernel_map, addr, size);
557
558 /*
559 * Try again, doubling the size
560 */
b0d623f7 561 size = round_page(actual * sizeof(vm_offset_t));
1c79356b
A
562 }
563 if (actual == 0) {
564 *pages = 0;
565 *pages_count = 0;
566 (void) kmem_free(ipc_kernel_map, addr, size);
567 }
568 else {
569 *pages_count = actual;
b0d623f7 570 size_used = round_page(actual * sizeof(vm_offset_t));
91447636
A
571 (void) vm_map_wire(ipc_kernel_map, vm_map_trunc_page(addr),
572 vm_map_round_page(addr + size),
1c79356b 573 VM_PROT_READ|VM_PROT_WRITE, FALSE);
91447636
A
574 (void) vm_map_copyin(ipc_kernel_map,
575 (vm_map_address_t)addr,
576 (vm_map_size_t)size_used,
1c79356b
A
577 TRUE,
578 (vm_map_copy_t *)pages);
579 if (size_used != size) {
580 (void) kmem_free(ipc_kernel_map,
581 addr + size_used,
582 size - size_used);
583 }
584 }
585
586 return (KERN_SUCCESS);
587#endif /* MACH_VM_DEBUG */
588}
589
b0d623f7
A
590#endif /* VM32_SUPPORT */
591
1c79356b
A
592/*
593 * Routine: host_virtual_physical_table_info
594 * Purpose:
595 * Return information about the VP table.
596 * Conditions:
597 * Nothing locked. Obeys CountInOut protocol.
598 * Returns:
599 * KERN_SUCCESS Returned information.
600 * KERN_INVALID_HOST The host is null.
601 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
602 */
603
604kern_return_t
605host_virtual_physical_table_info(
91447636
A
606 __DEBUG_ONLY host_t host,
607 __DEBUG_ONLY hash_info_bucket_array_t *infop,
608 __DEBUG_ONLY mach_msg_type_number_t *countp)
1c79356b
A
609{
610#if !MACH_VM_DEBUG
611 return KERN_FAILURE;
612#else
613 vm_offset_t addr;
91447636 614 vm_size_t size = 0;
1c79356b
A
615 hash_info_bucket_t *info;
616 unsigned int potential, actual;
617 kern_return_t kr;
618
619 if (host == HOST_NULL)
620 return KERN_INVALID_HOST;
621
622 /* start with in-line data */
623
624 info = *infop;
625 potential = *countp;
626
627 for (;;) {
628 actual = vm_page_info(info, potential);
629 if (actual <= potential)
630 break;
631
632 /* allocate more memory */
633
634 if (info != *infop)
635 kmem_free(ipc_kernel_map, addr, size);
636
b0d623f7 637 size = round_page(actual * sizeof *info);
1c79356b
A
638 kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
639 if (kr != KERN_SUCCESS)
640 return KERN_RESOURCE_SHORTAGE;
641
642 info = (hash_info_bucket_t *) addr;
b0d623f7 643 potential = (unsigned int) (size/sizeof (*info));
1c79356b
A
644 }
645
646 if (info == *infop) {
647 /* data fit in-line; nothing to deallocate */
648
649 *countp = actual;
650 } else if (actual == 0) {
651 kmem_free(ipc_kernel_map, addr, size);
652
653 *countp = 0;
654 } else {
655 vm_map_copy_t copy;
656 vm_size_t used;
657
b0d623f7 658 used = round_page(actual * sizeof *info);
1c79356b
A
659
660 if (used != size)
661 kmem_free(ipc_kernel_map, addr + used, size - used);
662
91447636
A
663 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
664 (vm_map_size_t)used, TRUE, &copy);
1c79356b
A
665 assert(kr == KERN_SUCCESS);
666
667 *infop = (hash_info_bucket_t *) copy;
668 *countp = actual;
669 }
670
671 return KERN_SUCCESS;
672#endif /* MACH_VM_DEBUG */
673}