]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/vm/vm_debug.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_debug.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: vm/vm_debug.c.
60 * Author: Rich Draves
61 * Date: March, 1990
62 *
63 * Exported kernel calls. See mach_debug/mach_debug.defs.
64 */
65#include <mach_vm_debug.h>
66#include <mach/kern_return.h>
67#include <mach/mach_host_server.h>
68#include <mach_debug/vm_info.h>
69#include <mach_debug/page_info.h>
70#include <mach_debug/hash_info.h>
71
72#if MACH_VM_DEBUG
73#include <mach/machine/vm_types.h>
74#include <mach/memory_object_types.h>
75#include <mach/vm_prot.h>
76#include <mach/vm_inherit.h>
77#include <mach/vm_param.h>
78#include <kern/thread.h>
79#include <vm/vm_map.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_object.h>
82#include <kern/task.h>
83#include <kern/host.h>
84#include <ipc/ipc_port.h>
85#include <vm/vm_debug.h>
86#endif
87
88#if !MACH_VM_DEBUG
89#define __DEBUG_ONLY __unused
90#else /* !MACH_VM_DEBUG */
91#define __DEBUG_ONLY
92#endif /* !MACH_VM_DEBUG */
93
94#ifdef VM32_SUPPORT
95
96#include <mach/vm32_map_server.h>
97#include <mach/vm_map.h>
98
99/*
100 * Routine: mach_vm_region_info [kernel call]
101 * Purpose:
102 * Retrieve information about a VM region,
103 * including info about the object chain.
104 * Conditions:
105 * Nothing locked.
106 * Returns:
107 * KERN_SUCCESS Retrieve region/object info.
108 * KERN_INVALID_TASK The map is null.
109 * KERN_NO_SPACE There is no entry at/after the address.
110 * KERN_RESOURCE_SHORTAGE Can't allocate memory.
111 */
112
113kern_return_t
114vm32_region_info(
115 __DEBUG_ONLY vm_map_t map,
116 __DEBUG_ONLY vm32_offset_t address,
117 __DEBUG_ONLY vm_info_region_t *regionp,
118 __DEBUG_ONLY vm_info_object_array_t *objectsp,
119 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
120{
121#if !MACH_VM_DEBUG
122 return KERN_FAILURE;
123#else
124 vm_map_copy_t copy;
125 vm_offset_t addr = 0; /* memory for OOL data */
126 vm_size_t size; /* size of the memory */
127 unsigned int room; /* room for this many objects */
128 unsigned int used; /* actually this many objects */
129 vm_info_region_t region;
130 kern_return_t kr;
131
132 if (map == VM_MAP_NULL) {
133 return KERN_INVALID_TASK;
134 }
135
136 size = 0; /* no memory allocated yet */
137
138 for (;;) {
139 vm_map_t cmap; /* current map in traversal */
140 vm_map_t nmap; /* next map to look at */
141 vm_map_entry_t entry;
142 vm_object_t object, cobject, nobject;
143
144 /* nothing is locked */
145
146 vm_map_lock_read(map);
147 for (cmap = map;; cmap = nmap) {
148 /* cmap is read-locked */
149
150 if (!vm_map_lookup_entry(cmap,
151 (vm_map_address_t)address, &entry)) {
152 entry = entry->vme_next;
153 if (entry == vm_map_to_entry(cmap)) {
154 vm_map_unlock_read(cmap);
155 if (size != 0) {
156 kmem_free(ipc_kernel_map,
157 addr, size);
158 }
159 return KERN_NO_SPACE;
160 }
161 }
162
163 if (entry->is_sub_map) {
164 nmap = VME_SUBMAP(entry);
165 } else {
166 break;
167 }
168
169 /* move down to the lower map */
170
171 vm_map_lock_read(nmap);
172 vm_map_unlock_read(cmap);
173 }
174
175 /* cmap is read-locked; we have a real entry */
176
177 object = VME_OBJECT(entry);
178 region.vir_start = (natural_t) entry->vme_start;
179 region.vir_end = (natural_t) entry->vme_end;
180 region.vir_object = (natural_t)(uintptr_t) object;
181 region.vir_offset = (natural_t) VME_OFFSET(entry);
182 region.vir_needs_copy = entry->needs_copy;
183 region.vir_protection = entry->protection;
184 region.vir_max_protection = entry->max_protection;
185 region.vir_inheritance = entry->inheritance;
186 region.vir_wired_count = entry->wired_count;
187 region.vir_user_wired_count = entry->user_wired_count;
188
189 used = 0;
190 room = (unsigned int) (size / sizeof(vm_info_object_t));
191
192 if (object == VM_OBJECT_NULL) {
193 vm_map_unlock_read(cmap);
194 /* no memory needed */
195 break;
196 }
197
198 vm_object_lock(object);
199 vm_map_unlock_read(cmap);
200
201 for (cobject = object;; cobject = nobject) {
202 /* cobject is locked */
203
204 if (used < room) {
205 vm_info_object_t *vio =
206 &((vm_info_object_t *) addr)[used];
207
208 vio->vio_object =
209 (natural_t)(uintptr_t) cobject;
210 vio->vio_size =
211 (natural_t) cobject->vo_size;
212 vio->vio_ref_count =
213 cobject->ref_count;
214 vio->vio_resident_page_count =
215 cobject->resident_page_count;
216 vio->vio_copy =
217 (natural_t)(uintptr_t) cobject->copy;
218 vio->vio_shadow =
219 (natural_t)(uintptr_t) cobject->shadow;
220 vio->vio_shadow_offset =
221 (natural_t) cobject->vo_shadow_offset;
222 vio->vio_paging_offset =
223 (natural_t) cobject->paging_offset;
224 vio->vio_copy_strategy =
225 cobject->copy_strategy;
226 vio->vio_last_alloc =
227 (vm_offset_t) cobject->last_alloc;
228 vio->vio_paging_in_progress =
229 cobject->paging_in_progress +
230 cobject->activity_in_progress;
231 vio->vio_pager_created =
232 cobject->pager_created;
233 vio->vio_pager_initialized =
234 cobject->pager_initialized;
235 vio->vio_pager_ready =
236 cobject->pager_ready;
237 vio->vio_can_persist =
238 cobject->can_persist;
239 vio->vio_internal =
240 cobject->internal;
241 vio->vio_temporary =
242 FALSE;
243 vio->vio_alive =
244 cobject->alive;
245 vio->vio_purgable =
246 (cobject->purgable != VM_PURGABLE_DENY);
247 vio->vio_purgable_volatile =
248 (cobject->purgable == VM_PURGABLE_VOLATILE ||
249 cobject->purgable == VM_PURGABLE_EMPTY);
250 }
251
252 used++;
253 nobject = cobject->shadow;
254 if (nobject == VM_OBJECT_NULL) {
255 vm_object_unlock(cobject);
256 break;
257 }
258
259 vm_object_lock(nobject);
260 vm_object_unlock(cobject);
261 }
262
263 /* nothing locked */
264
265 if (used <= room) {
266 break;
267 }
268
269 /* must allocate more memory */
270
271 if (size != 0) {
272 kmem_free(ipc_kernel_map, addr, size);
273 }
274 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
275 VM_MAP_PAGE_MASK(ipc_kernel_map));
276
277 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
278 if (kr != KERN_SUCCESS) {
279 return KERN_RESOURCE_SHORTAGE;
280 }
281
282 kr = vm_map_wire_kernel(
283 ipc_kernel_map,
284 vm_map_trunc_page(addr,
285 VM_MAP_PAGE_MASK(ipc_kernel_map)),
286 vm_map_round_page(addr + size,
287 VM_MAP_PAGE_MASK(ipc_kernel_map)),
288 VM_PROT_READ | VM_PROT_WRITE,
289 VM_KERN_MEMORY_IPC,
290 FALSE);
291 assert(kr == KERN_SUCCESS);
292 }
293
294 /* free excess memory; make remaining memory pageable */
295
296 if (used == 0) {
297 copy = VM_MAP_COPY_NULL;
298
299 if (size != 0) {
300 kmem_free(ipc_kernel_map, addr, size);
301 }
302 } else {
303 vm_size_t size_used = (used * sizeof(vm_info_object_t));
304 vm_size_t vmsize_used = vm_map_round_page(size_used,
305 VM_MAP_PAGE_MASK(ipc_kernel_map));
306
307 kr = vm_map_unwire(
308 ipc_kernel_map,
309 vm_map_trunc_page(addr,
310 VM_MAP_PAGE_MASK(ipc_kernel_map)),
311 vm_map_round_page(addr + size_used,
312 VM_MAP_PAGE_MASK(ipc_kernel_map)),
313 FALSE);
314 assert(kr == KERN_SUCCESS);
315
316 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
317 (vm_map_size_t)size_used, TRUE, &copy);
318 assert(kr == KERN_SUCCESS);
319
320 if (size != vmsize_used) {
321 kmem_free(ipc_kernel_map,
322 addr + vmsize_used, size - vmsize_used);
323 }
324 }
325
326 *regionp = region;
327 *objectsp = (vm_info_object_array_t) copy;
328 *objectsCntp = used;
329 return KERN_SUCCESS;
330#endif /* MACH_VM_DEBUG */
331}
332
333/*
334 * Temporary call for 64 bit data path interface transiotion
335 */
336
337kern_return_t
338vm32_region_info_64(
339 __DEBUG_ONLY vm_map_t map,
340 __DEBUG_ONLY vm32_offset_t address,
341 __DEBUG_ONLY vm_info_region_64_t *regionp,
342 __DEBUG_ONLY vm_info_object_array_t *objectsp,
343 __DEBUG_ONLY mach_msg_type_number_t *objectsCntp)
344{
345#if !MACH_VM_DEBUG
346 return KERN_FAILURE;
347#else
348 vm_map_copy_t copy;
349 vm_offset_t addr = 0; /* memory for OOL data */
350 vm_size_t size; /* size of the memory */
351 unsigned int room; /* room for this many objects */
352 unsigned int used; /* actually this many objects */
353 vm_info_region_64_t region;
354 kern_return_t kr;
355
356 if (map == VM_MAP_NULL) {
357 return KERN_INVALID_TASK;
358 }
359
360 size = 0; /* no memory allocated yet */
361
362 for (;;) {
363 vm_map_t cmap; /* current map in traversal */
364 vm_map_t nmap; /* next map to look at */
365 vm_map_entry_t entry;
366 vm_object_t object, cobject, nobject;
367
368 /* nothing is locked */
369
370 vm_map_lock_read(map);
371 for (cmap = map;; cmap = nmap) {
372 /* cmap is read-locked */
373
374 if (!vm_map_lookup_entry(cmap, address, &entry)) {
375 entry = entry->vme_next;
376 if (entry == vm_map_to_entry(cmap)) {
377 vm_map_unlock_read(cmap);
378 if (size != 0) {
379 kmem_free(ipc_kernel_map,
380 addr, size);
381 }
382 return KERN_NO_SPACE;
383 }
384 }
385
386 if (entry->is_sub_map) {
387 nmap = VME_SUBMAP(entry);
388 } else {
389 break;
390 }
391
392 /* move down to the lower map */
393
394 vm_map_lock_read(nmap);
395 vm_map_unlock_read(cmap);
396 }
397
398 /* cmap is read-locked; we have a real entry */
399
400 object = VME_OBJECT(entry);
401 region.vir_start = (natural_t) entry->vme_start;
402 region.vir_end = (natural_t) entry->vme_end;
403 region.vir_object = (natural_t)(uintptr_t) object;
404 region.vir_offset = VME_OFFSET(entry);
405 region.vir_needs_copy = entry->needs_copy;
406 region.vir_protection = entry->protection;
407 region.vir_max_protection = entry->max_protection;
408 region.vir_inheritance = entry->inheritance;
409 region.vir_wired_count = entry->wired_count;
410 region.vir_user_wired_count = entry->user_wired_count;
411
412 used = 0;
413 room = (unsigned int) (size / sizeof(vm_info_object_t));
414
415 if (object == VM_OBJECT_NULL) {
416 vm_map_unlock_read(cmap);
417 /* no memory needed */
418 break;
419 }
420
421 vm_object_lock(object);
422 vm_map_unlock_read(cmap);
423
424 for (cobject = object;; cobject = nobject) {
425 /* cobject is locked */
426
427 if (used < room) {
428 vm_info_object_t *vio =
429 &((vm_info_object_t *) addr)[used];
430
431 vio->vio_object =
432 (natural_t)(uintptr_t) cobject;
433 vio->vio_size =
434 (natural_t) cobject->vo_size;
435 vio->vio_ref_count =
436 cobject->ref_count;
437 vio->vio_resident_page_count =
438 cobject->resident_page_count;
439 vio->vio_copy =
440 (natural_t)(uintptr_t) cobject->copy;
441 vio->vio_shadow =
442 (natural_t)(uintptr_t) cobject->shadow;
443 vio->vio_shadow_offset =
444 (natural_t) cobject->vo_shadow_offset;
445 vio->vio_paging_offset =
446 (natural_t) cobject->paging_offset;
447 vio->vio_copy_strategy =
448 cobject->copy_strategy;
449 vio->vio_last_alloc =
450 (vm_offset_t) cobject->last_alloc;
451 vio->vio_paging_in_progress =
452 cobject->paging_in_progress +
453 cobject->activity_in_progress;
454 vio->vio_pager_created =
455 cobject->pager_created;
456 vio->vio_pager_initialized =
457 cobject->pager_initialized;
458 vio->vio_pager_ready =
459 cobject->pager_ready;
460 vio->vio_can_persist =
461 cobject->can_persist;
462 vio->vio_internal =
463 cobject->internal;
464 vio->vio_temporary =
465 FALSE;
466 vio->vio_alive =
467 cobject->alive;
468 vio->vio_purgable =
469 (cobject->purgable != VM_PURGABLE_DENY);
470 vio->vio_purgable_volatile =
471 (cobject->purgable == VM_PURGABLE_VOLATILE ||
472 cobject->purgable == VM_PURGABLE_EMPTY);
473 }
474
475 used++;
476 nobject = cobject->shadow;
477 if (nobject == VM_OBJECT_NULL) {
478 vm_object_unlock(cobject);
479 break;
480 }
481
482 vm_object_lock(nobject);
483 vm_object_unlock(cobject);
484 }
485
486 /* nothing locked */
487
488 if (used <= room) {
489 break;
490 }
491
492 /* must allocate more memory */
493
494 if (size != 0) {
495 kmem_free(ipc_kernel_map, addr, size);
496 }
497 size = vm_map_round_page(2 * used * sizeof(vm_info_object_t),
498 VM_MAP_PAGE_MASK(ipc_kernel_map));
499
500 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
501 if (kr != KERN_SUCCESS) {
502 return KERN_RESOURCE_SHORTAGE;
503 }
504
505 kr = vm_map_wire_kernel(
506 ipc_kernel_map,
507 vm_map_trunc_page(addr,
508 VM_MAP_PAGE_MASK(ipc_kernel_map)),
509 vm_map_round_page(addr + size,
510 VM_MAP_PAGE_MASK(ipc_kernel_map)),
511 VM_PROT_READ | VM_PROT_WRITE,
512 VM_KERN_MEMORY_IPC,
513 FALSE);
514 assert(kr == KERN_SUCCESS);
515 }
516
517 /* free excess memory; make remaining memory pageable */
518
519 if (used == 0) {
520 copy = VM_MAP_COPY_NULL;
521
522 if (size != 0) {
523 kmem_free(ipc_kernel_map, addr, size);
524 }
525 } else {
526 vm_size_t size_used = (used * sizeof(vm_info_object_t));
527 vm_size_t vmsize_used = vm_map_round_page(size_used,
528 VM_MAP_PAGE_MASK(ipc_kernel_map));
529
530 kr = vm_map_unwire(
531 ipc_kernel_map,
532 vm_map_trunc_page(addr,
533 VM_MAP_PAGE_MASK(ipc_kernel_map)),
534 vm_map_round_page(addr + size_used,
535 VM_MAP_PAGE_MASK(ipc_kernel_map)),
536 FALSE);
537 assert(kr == KERN_SUCCESS);
538
539 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
540 (vm_map_size_t)size_used, TRUE, &copy);
541 assert(kr == KERN_SUCCESS);
542
543 if (size != vmsize_used) {
544 kmem_free(ipc_kernel_map,
545 addr + vmsize_used, size - vmsize_used);
546 }
547 }
548
549 *regionp = region;
550 *objectsp = (vm_info_object_array_t) copy;
551 *objectsCntp = used;
552 return KERN_SUCCESS;
553#endif /* MACH_VM_DEBUG */
554}
555/*
556 * Return an array of virtual pages that are mapped to a task.
557 */
558kern_return_t
559vm32_mapped_pages_info(
560 __DEBUG_ONLY vm_map_t map,
561 __DEBUG_ONLY page_address_array_t *pages,
562 __DEBUG_ONLY mach_msg_type_number_t *pages_count)
563{
564#if !MACH_VM_DEBUG
565 return KERN_FAILURE;
566#else
567 pmap_t pmap;
568 vm_size_t size, size_used;
569 unsigned int actual, space;
570 page_address_array_t list;
571 vm_offset_t addr = 0;
572
573 if (map == VM_MAP_NULL) {
574 return KERN_INVALID_ARGUMENT;
575 }
576
577 pmap = map->pmap;
578 size = pmap_resident_count(pmap) * sizeof(vm_offset_t);
579 size = vm_map_round_page(size,
580 VM_MAP_PAGE_MASK(ipc_kernel_map));
581
582 for (;;) {
583 (void) vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
584 (void) vm_map_unwire(
585 ipc_kernel_map,
586 vm_map_trunc_page(addr,
587 VM_MAP_PAGE_MASK(ipc_kernel_map)),
588 vm_map_round_page(addr + size,
589 VM_MAP_PAGE_MASK(ipc_kernel_map)),
590 FALSE);
591
592 list = (page_address_array_t) addr;
593 space = (unsigned int) (size / sizeof(vm_offset_t));
594
595 actual = pmap_list_resident_pages(pmap,
596 list,
597 space);
598 if (actual <= space) {
599 break;
600 }
601
602 /*
603 * Free memory if not enough
604 */
605 (void) kmem_free(ipc_kernel_map, addr, size);
606
607 /*
608 * Try again, doubling the size
609 */
610 size = vm_map_round_page(actual * sizeof(vm_offset_t),
611 VM_MAP_PAGE_MASK(ipc_kernel_map));
612 }
613 if (actual == 0) {
614 *pages = 0;
615 *pages_count = 0;
616 (void) kmem_free(ipc_kernel_map, addr, size);
617 } else {
618 vm_size_t vmsize_used;
619 *pages_count = actual;
620 size_used = (actual * sizeof(vm_offset_t));
621 vmsize_used = vm_map_round_page(size_used,
622 VM_MAP_PAGE_MASK(ipc_kernel_map));
623 (void) vm_map_wire_kernel(
624 ipc_kernel_map,
625 vm_map_trunc_page(addr,
626 VM_MAP_PAGE_MASK(ipc_kernel_map)),
627 vm_map_round_page(addr + size,
628 VM_MAP_PAGE_MASK(ipc_kernel_map)),
629 VM_PROT_READ | VM_PROT_WRITE,
630 VM_KERN_MEMORY_IPC,
631 FALSE);
632 (void) vm_map_copyin(ipc_kernel_map,
633 (vm_map_address_t)addr,
634 (vm_map_size_t)size_used,
635 TRUE,
636 (vm_map_copy_t *)pages);
637 if (vmsize_used != size) {
638 (void) kmem_free(ipc_kernel_map,
639 addr + vmsize_used,
640 size - vmsize_used);
641 }
642 }
643
644 return KERN_SUCCESS;
645#endif /* MACH_VM_DEBUG */
646}
647
648#endif /* VM32_SUPPORT */
649
650/*
651 * Routine: host_virtual_physical_table_info
652 * Purpose:
653 * Return information about the VP table.
654 * Conditions:
655 * Nothing locked. Obeys CountInOut protocol.
656 * Returns:
657 * KERN_SUCCESS Returned information.
658 * KERN_INVALID_HOST The host is null.
659 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
660 */
661
662kern_return_t
663host_virtual_physical_table_info(
664 __DEBUG_ONLY host_t host,
665 __DEBUG_ONLY hash_info_bucket_array_t *infop,
666 __DEBUG_ONLY mach_msg_type_number_t *countp)
667{
668#if !MACH_VM_DEBUG
669 return KERN_FAILURE;
670#else
671 vm_offset_t addr = 0;
672 vm_size_t size = 0;
673 hash_info_bucket_t *info;
674 unsigned int potential, actual;
675 kern_return_t kr;
676
677 if (host == HOST_NULL) {
678 return KERN_INVALID_HOST;
679 }
680
681 /* start with in-line data */
682
683 info = *infop;
684 potential = *countp;
685
686 for (;;) {
687 actual = vm_page_info(info, potential);
688 if (actual <= potential) {
689 break;
690 }
691
692 /* allocate more memory */
693
694 if (info != *infop) {
695 kmem_free(ipc_kernel_map, addr, size);
696 }
697
698 size = vm_map_round_page(actual * sizeof *info,
699 VM_MAP_PAGE_MASK(ipc_kernel_map));
700 kr = vm_allocate_kernel(ipc_kernel_map, &addr, size,
701 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
702 if (kr != KERN_SUCCESS) {
703 return KERN_RESOURCE_SHORTAGE;
704 }
705
706 info = (hash_info_bucket_t *) addr;
707 potential = (unsigned int) (size / sizeof(*info));
708 }
709
710 if (info == *infop) {
711 /* data fit in-line; nothing to deallocate */
712
713 *countp = actual;
714 } else if (actual == 0) {
715 kmem_free(ipc_kernel_map, addr, size);
716
717 *countp = 0;
718 } else {
719 vm_map_copy_t copy;
720 vm_size_t used, vmused;
721
722 used = (actual * sizeof(*info));
723 vmused = vm_map_round_page(used, VM_MAP_PAGE_MASK(ipc_kernel_map));
724
725 if (vmused != size) {
726 kmem_free(ipc_kernel_map, addr + vmused, size - vmused);
727 }
728
729 kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
730 (vm_map_size_t)used, TRUE, &copy);
731 assert(kr == KERN_SUCCESS);
732
733 *infop = (hash_info_bucket_t *) copy;
734 *countp = actual;
735 }
736
737 return KERN_SUCCESS;
738#endif /* MACH_VM_DEBUG */
739}