]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/vm_debug.c. | |
54 | * Author: Rich Draves | |
55 | * Date: March, 1990 | |
56 | * | |
57 | * Exported kernel calls. See mach_debug/mach_debug.defs. | |
58 | */ | |
59 | #include <mach_vm_debug.h> | |
60 | #include <mach/kern_return.h> | |
61 | #include <mach/mach_host_server.h> | |
62 | #include <mach/vm_map_server.h> | |
63 | #include <mach_debug/vm_info.h> | |
64 | #include <mach_debug/page_info.h> | |
65 | #include <mach_debug/hash_info.h> | |
66 | ||
67 | #if MACH_VM_DEBUG | |
68 | #include <mach/machine/vm_types.h> | |
69 | #include <mach/memory_object_types.h> | |
70 | #include <mach/vm_prot.h> | |
71 | #include <mach/vm_inherit.h> | |
72 | #include <mach/vm_param.h> | |
73 | #include <kern/thread.h> | |
74 | #include <vm/vm_map.h> | |
75 | #include <vm/vm_kern.h> | |
76 | #include <vm/vm_object.h> | |
77 | #include <kern/task.h> | |
78 | #include <kern/host.h> | |
79 | #include <ipc/ipc_port.h> | |
80 | #include <vm/vm_debug.h> | |
81 | #endif | |
82 | ||
83 | /* | |
84 | * Routine: mach_vm_region_info [kernel call] | |
85 | * Purpose: | |
86 | * Retrieve information about a VM region, | |
87 | * including info about the object chain. | |
88 | * Conditions: | |
89 | * Nothing locked. | |
90 | * Returns: | |
91 | * KERN_SUCCESS Retrieve region/object info. | |
92 | * KERN_INVALID_TASK The map is null. | |
93 | * KERN_NO_SPACE There is no entry at/after the address. | |
94 | * KERN_RESOURCE_SHORTAGE Can't allocate memory. | |
95 | */ | |
96 | ||
97 | kern_return_t | |
98 | mach_vm_region_info( | |
99 | vm_map_t map, | |
100 | vm_offset_t address, | |
101 | vm_info_region_t *regionp, | |
102 | vm_info_object_array_t *objectsp, | |
103 | mach_msg_type_number_t *objectsCntp) | |
104 | { | |
105 | #if !MACH_VM_DEBUG | |
106 | return KERN_FAILURE; | |
107 | #else | |
108 | vm_map_copy_t copy; | |
109 | vm_offset_t addr; /* memory for OOL data */ | |
110 | vm_size_t size; /* size of the memory */ | |
111 | unsigned int room; /* room for this many objects */ | |
112 | unsigned int used; /* actually this many objects */ | |
113 | vm_info_region_t region; | |
114 | kern_return_t kr; | |
115 | ||
116 | if (map == VM_MAP_NULL) | |
117 | return KERN_INVALID_TASK; | |
118 | ||
119 | size = 0; /* no memory allocated yet */ | |
120 | ||
121 | for (;;) { | |
122 | vm_map_t cmap; /* current map in traversal */ | |
123 | vm_map_t nmap; /* next map to look at */ | |
124 | vm_map_entry_t entry; | |
125 | vm_object_t object, cobject, nobject; | |
126 | ||
127 | /* nothing is locked */ | |
128 | ||
129 | vm_map_lock_read(map); | |
130 | for (cmap = map;; cmap = nmap) { | |
131 | /* cmap is read-locked */ | |
132 | ||
133 | if (!vm_map_lookup_entry(cmap, address, &entry)) { | |
134 | entry = entry->vme_next; | |
135 | if (entry == vm_map_to_entry(cmap)) { | |
136 | vm_map_unlock_read(cmap); | |
137 | if (size != 0) | |
138 | kmem_free(ipc_kernel_map, | |
139 | addr, size); | |
140 | return KERN_NO_SPACE; | |
141 | } | |
142 | } | |
143 | ||
144 | if (entry->is_sub_map) | |
145 | nmap = entry->object.sub_map; | |
146 | else | |
147 | break; | |
148 | ||
149 | /* move down to the lower map */ | |
150 | ||
151 | vm_map_lock_read(nmap); | |
152 | vm_map_unlock_read(cmap); | |
153 | } | |
154 | ||
155 | /* cmap is read-locked; we have a real entry */ | |
156 | ||
157 | object = entry->object.vm_object; | |
158 | region.vir_start = entry->vme_start; | |
159 | region.vir_end = entry->vme_end; | |
160 | region.vir_object = (vm_offset_t) object; | |
161 | region.vir_offset = entry->offset; | |
162 | region.vir_needs_copy = entry->needs_copy; | |
163 | region.vir_protection = entry->protection; | |
164 | region.vir_max_protection = entry->max_protection; | |
165 | region.vir_inheritance = entry->inheritance; | |
166 | region.vir_wired_count = entry->wired_count; | |
167 | region.vir_user_wired_count = entry->user_wired_count; | |
168 | ||
169 | used = 0; | |
170 | room = size / sizeof(vm_info_object_t); | |
171 | ||
172 | if (object == VM_OBJECT_NULL) { | |
173 | vm_map_unlock_read(cmap); | |
174 | /* no memory needed */ | |
175 | break; | |
176 | } | |
177 | ||
178 | vm_object_lock(object); | |
179 | vm_map_unlock_read(cmap); | |
180 | ||
181 | for (cobject = object;; cobject = nobject) { | |
182 | /* cobject is locked */ | |
183 | ||
184 | if (used < room) { | |
185 | vm_info_object_t *vio = | |
186 | &((vm_info_object_t *) addr)[used]; | |
187 | ||
188 | vio->vio_object = | |
189 | (vm_offset_t) cobject; | |
190 | vio->vio_size = | |
191 | cobject->size; | |
192 | vio->vio_ref_count = | |
193 | cobject->ref_count; | |
194 | vio->vio_resident_page_count = | |
195 | cobject->resident_page_count; | |
196 | vio->vio_absent_count = | |
197 | cobject->absent_count; | |
198 | vio->vio_copy = | |
199 | (vm_offset_t) cobject->copy; | |
200 | vio->vio_shadow = | |
201 | (vm_offset_t) cobject->shadow; | |
202 | vio->vio_shadow_offset = | |
203 | cobject->shadow_offset; | |
204 | vio->vio_paging_offset = | |
205 | cobject->paging_offset; | |
206 | vio->vio_copy_strategy = | |
207 | cobject->copy_strategy; | |
208 | vio->vio_last_alloc = | |
209 | cobject->last_alloc; | |
210 | vio->vio_paging_in_progress = | |
211 | cobject->paging_in_progress; | |
212 | vio->vio_pager_created = | |
213 | cobject->pager_created; | |
214 | vio->vio_pager_initialized = | |
215 | cobject->pager_initialized; | |
216 | vio->vio_pager_ready = | |
217 | cobject->pager_ready; | |
218 | vio->vio_can_persist = | |
219 | cobject->can_persist; | |
220 | vio->vio_internal = | |
221 | cobject->internal; | |
222 | vio->vio_temporary = | |
223 | cobject->temporary; | |
224 | vio->vio_alive = | |
225 | cobject->alive; | |
226 | vio->vio_lock_in_progress = | |
227 | cobject->lock_in_progress; | |
228 | vio->vio_lock_restart = | |
229 | cobject->lock_restart; | |
230 | } | |
231 | ||
232 | used++; | |
233 | nobject = cobject->shadow; | |
234 | if (nobject == VM_OBJECT_NULL) { | |
235 | vm_object_unlock(cobject); | |
236 | break; | |
237 | } | |
238 | ||
239 | vm_object_lock(nobject); | |
240 | vm_object_unlock(cobject); | |
241 | } | |
242 | ||
243 | /* nothing locked */ | |
244 | ||
245 | if (used <= room) | |
246 | break; | |
247 | ||
248 | /* must allocate more memory */ | |
249 | ||
250 | if (size != 0) | |
251 | kmem_free(ipc_kernel_map, addr, size); | |
252 | size = round_page(2 * used * sizeof(vm_info_object_t)); | |
253 | ||
254 | kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); | |
255 | if (kr != KERN_SUCCESS) | |
256 | return KERN_RESOURCE_SHORTAGE; | |
257 | ||
258 | kr = vm_map_wire(ipc_kernel_map, addr, addr + size, | |
259 | VM_PROT_READ|VM_PROT_WRITE, FALSE); | |
260 | assert(kr == KERN_SUCCESS); | |
261 | } | |
262 | ||
263 | /* free excess memory; make remaining memory pageable */ | |
264 | ||
265 | if (used == 0) { | |
266 | copy = VM_MAP_COPY_NULL; | |
267 | ||
268 | if (size != 0) | |
269 | kmem_free(ipc_kernel_map, addr, size); | |
270 | } else { | |
271 | vm_size_t size_used = | |
272 | round_page(used * sizeof(vm_info_object_t)); | |
273 | ||
274 | kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); | |
275 | assert(kr == KERN_SUCCESS); | |
276 | ||
277 | kr = vm_map_copyin(ipc_kernel_map, addr, size_used, | |
278 | TRUE, ©); | |
279 | assert(kr == KERN_SUCCESS); | |
280 | ||
281 | if (size != size_used) | |
282 | kmem_free(ipc_kernel_map, | |
283 | addr + size_used, size - size_used); | |
284 | } | |
285 | ||
286 | *regionp = region; | |
287 | *objectsp = (vm_info_object_array_t) copy; | |
288 | *objectsCntp = used; | |
289 | return KERN_SUCCESS; | |
290 | #endif /* MACH_VM_DEBUG */ | |
291 | } | |
292 | /* | |
293 | * Temporary call for 64 bit data path interface transiotion | |
294 | */ | |
295 | ||
296 | kern_return_t | |
297 | mach_vm_region_info_64( | |
298 | vm_map_t map, | |
299 | vm_offset_t address, | |
300 | vm_info_region_64_t *regionp, | |
301 | vm_info_object_array_t *objectsp, | |
302 | mach_msg_type_number_t *objectsCntp) | |
303 | { | |
304 | #if !MACH_VM_DEBUG | |
305 | return KERN_FAILURE; | |
306 | #else | |
307 | vm_map_copy_t copy; | |
308 | vm_offset_t addr; /* memory for OOL data */ | |
309 | vm_size_t size; /* size of the memory */ | |
310 | unsigned int room; /* room for this many objects */ | |
311 | unsigned int used; /* actually this many objects */ | |
312 | vm_info_region_64_t region; | |
313 | kern_return_t kr; | |
314 | ||
315 | if (map == VM_MAP_NULL) | |
316 | return KERN_INVALID_TASK; | |
317 | ||
318 | size = 0; /* no memory allocated yet */ | |
319 | ||
320 | for (;;) { | |
321 | vm_map_t cmap; /* current map in traversal */ | |
322 | vm_map_t nmap; /* next map to look at */ | |
323 | vm_map_entry_t entry; | |
324 | vm_object_t object, cobject, nobject; | |
325 | ||
326 | /* nothing is locked */ | |
327 | ||
328 | vm_map_lock_read(map); | |
329 | for (cmap = map;; cmap = nmap) { | |
330 | /* cmap is read-locked */ | |
331 | ||
332 | if (!vm_map_lookup_entry(cmap, address, &entry)) { | |
333 | entry = entry->vme_next; | |
334 | if (entry == vm_map_to_entry(cmap)) { | |
335 | vm_map_unlock_read(cmap); | |
336 | if (size != 0) | |
337 | kmem_free(ipc_kernel_map, | |
338 | addr, size); | |
339 | return KERN_NO_SPACE; | |
340 | } | |
341 | } | |
342 | ||
343 | if (entry->is_sub_map) | |
344 | nmap = entry->object.sub_map; | |
345 | else | |
346 | break; | |
347 | ||
348 | /* move down to the lower map */ | |
349 | ||
350 | vm_map_lock_read(nmap); | |
351 | vm_map_unlock_read(cmap); | |
352 | } | |
353 | ||
354 | /* cmap is read-locked; we have a real entry */ | |
355 | ||
356 | object = entry->object.vm_object; | |
357 | region.vir_start = entry->vme_start; | |
358 | region.vir_end = entry->vme_end; | |
359 | region.vir_object = (vm_offset_t) object; | |
360 | region.vir_offset = entry->offset; | |
361 | region.vir_needs_copy = entry->needs_copy; | |
362 | region.vir_protection = entry->protection; | |
363 | region.vir_max_protection = entry->max_protection; | |
364 | region.vir_inheritance = entry->inheritance; | |
365 | region.vir_wired_count = entry->wired_count; | |
366 | region.vir_user_wired_count = entry->user_wired_count; | |
367 | ||
368 | used = 0; | |
369 | room = size / sizeof(vm_info_object_t); | |
370 | ||
371 | if (object == VM_OBJECT_NULL) { | |
372 | vm_map_unlock_read(cmap); | |
373 | /* no memory needed */ | |
374 | break; | |
375 | } | |
376 | ||
377 | vm_object_lock(object); | |
378 | vm_map_unlock_read(cmap); | |
379 | ||
380 | for (cobject = object;; cobject = nobject) { | |
381 | /* cobject is locked */ | |
382 | ||
383 | if (used < room) { | |
384 | vm_info_object_t *vio = | |
385 | &((vm_info_object_t *) addr)[used]; | |
386 | ||
387 | vio->vio_object = | |
388 | (vm_offset_t) cobject; | |
389 | vio->vio_size = | |
390 | cobject->size; | |
391 | vio->vio_ref_count = | |
392 | cobject->ref_count; | |
393 | vio->vio_resident_page_count = | |
394 | cobject->resident_page_count; | |
395 | vio->vio_absent_count = | |
396 | cobject->absent_count; | |
397 | vio->vio_copy = | |
398 | (vm_offset_t) cobject->copy; | |
399 | vio->vio_shadow = | |
400 | (vm_offset_t) cobject->shadow; | |
401 | vio->vio_shadow_offset = | |
402 | cobject->shadow_offset; | |
403 | vio->vio_paging_offset = | |
404 | cobject->paging_offset; | |
405 | vio->vio_copy_strategy = | |
406 | cobject->copy_strategy; | |
407 | vio->vio_last_alloc = | |
408 | cobject->last_alloc; | |
409 | vio->vio_paging_in_progress = | |
410 | cobject->paging_in_progress; | |
411 | vio->vio_pager_created = | |
412 | cobject->pager_created; | |
413 | vio->vio_pager_initialized = | |
414 | cobject->pager_initialized; | |
415 | vio->vio_pager_ready = | |
416 | cobject->pager_ready; | |
417 | vio->vio_can_persist = | |
418 | cobject->can_persist; | |
419 | vio->vio_internal = | |
420 | cobject->internal; | |
421 | vio->vio_temporary = | |
422 | cobject->temporary; | |
423 | vio->vio_alive = | |
424 | cobject->alive; | |
425 | vio->vio_lock_in_progress = | |
426 | cobject->lock_in_progress; | |
427 | vio->vio_lock_restart = | |
428 | cobject->lock_restart; | |
429 | } | |
430 | ||
431 | used++; | |
432 | nobject = cobject->shadow; | |
433 | if (nobject == VM_OBJECT_NULL) { | |
434 | vm_object_unlock(cobject); | |
435 | break; | |
436 | } | |
437 | ||
438 | vm_object_lock(nobject); | |
439 | vm_object_unlock(cobject); | |
440 | } | |
441 | ||
442 | /* nothing locked */ | |
443 | ||
444 | if (used <= room) | |
445 | break; | |
446 | ||
447 | /* must allocate more memory */ | |
448 | ||
449 | if (size != 0) | |
450 | kmem_free(ipc_kernel_map, addr, size); | |
451 | size = round_page(2 * used * sizeof(vm_info_object_t)); | |
452 | ||
453 | kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE); | |
454 | if (kr != KERN_SUCCESS) | |
455 | return KERN_RESOURCE_SHORTAGE; | |
456 | ||
457 | kr = vm_map_wire(ipc_kernel_map, addr, addr + size, | |
458 | VM_PROT_READ|VM_PROT_WRITE, FALSE); | |
459 | assert(kr == KERN_SUCCESS); | |
460 | } | |
461 | ||
462 | /* free excess memory; make remaining memory pageable */ | |
463 | ||
464 | if (used == 0) { | |
465 | copy = VM_MAP_COPY_NULL; | |
466 | ||
467 | if (size != 0) | |
468 | kmem_free(ipc_kernel_map, addr, size); | |
469 | } else { | |
470 | vm_size_t size_used = | |
471 | round_page(used * sizeof(vm_info_object_t)); | |
472 | ||
473 | kr = vm_map_unwire(ipc_kernel_map, addr, addr + size_used, FALSE); | |
474 | assert(kr == KERN_SUCCESS); | |
475 | ||
476 | kr = vm_map_copyin(ipc_kernel_map, addr, size_used, | |
477 | TRUE, ©); | |
478 | assert(kr == KERN_SUCCESS); | |
479 | ||
480 | if (size != size_used) | |
481 | kmem_free(ipc_kernel_map, | |
482 | addr + size_used, size - size_used); | |
483 | } | |
484 | ||
485 | *regionp = region; | |
486 | *objectsp = (vm_info_object_array_t) copy; | |
487 | *objectsCntp = used; | |
488 | return KERN_SUCCESS; | |
489 | #endif /* MACH_VM_DEBUG */ | |
490 | } | |
491 | /* | |
492 | * Return an array of virtual pages that are mapped to a task. | |
493 | */ | |
494 | kern_return_t | |
495 | vm_mapped_pages_info( | |
496 | vm_map_t map, | |
497 | page_address_array_t *pages, | |
498 | mach_msg_type_number_t *pages_count) | |
499 | { | |
500 | #if !MACH_VM_DEBUG | |
501 | return KERN_FAILURE; | |
502 | #else | |
503 | pmap_t pmap; | |
504 | vm_size_t size, size_used; | |
505 | unsigned int actual, space; | |
506 | page_address_array_t list; | |
507 | vm_offset_t addr; | |
508 | ||
509 | if (map == VM_MAP_NULL) | |
510 | return (KERN_INVALID_ARGUMENT); | |
511 | ||
512 | pmap = map->pmap; | |
513 | size = pmap_resident_count(pmap) * sizeof(vm_offset_t); | |
514 | size = round_page(size); | |
515 | ||
516 | for (;;) { | |
517 | (void) vm_allocate(ipc_kernel_map, &addr, size, TRUE); | |
518 | (void) vm_map_unwire(ipc_kernel_map, addr, addr + size, FALSE); | |
519 | ||
520 | list = (page_address_array_t) addr; | |
521 | space = size / sizeof(vm_offset_t); | |
522 | ||
523 | actual = pmap_list_resident_pages(pmap, | |
524 | list, | |
525 | space); | |
526 | if (actual <= space) | |
527 | break; | |
528 | ||
529 | /* | |
530 | * Free memory if not enough | |
531 | */ | |
532 | (void) kmem_free(ipc_kernel_map, addr, size); | |
533 | ||
534 | /* | |
535 | * Try again, doubling the size | |
536 | */ | |
537 | size = round_page(actual * sizeof(vm_offset_t)); | |
538 | } | |
539 | if (actual == 0) { | |
540 | *pages = 0; | |
541 | *pages_count = 0; | |
542 | (void) kmem_free(ipc_kernel_map, addr, size); | |
543 | } | |
544 | else { | |
545 | *pages_count = actual; | |
546 | size_used = round_page(actual * sizeof(vm_offset_t)); | |
547 | (void) vm_map_wire(ipc_kernel_map, | |
548 | addr, addr + size, | |
549 | VM_PROT_READ|VM_PROT_WRITE, FALSE); | |
550 | (void) vm_map_copyin( | |
551 | ipc_kernel_map, | |
552 | addr, | |
553 | size_used, | |
554 | TRUE, | |
555 | (vm_map_copy_t *)pages); | |
556 | if (size_used != size) { | |
557 | (void) kmem_free(ipc_kernel_map, | |
558 | addr + size_used, | |
559 | size - size_used); | |
560 | } | |
561 | } | |
562 | ||
563 | return (KERN_SUCCESS); | |
564 | #endif /* MACH_VM_DEBUG */ | |
565 | } | |
566 | ||
567 | /* | |
568 | * Routine: host_virtual_physical_table_info | |
569 | * Purpose: | |
570 | * Return information about the VP table. | |
571 | * Conditions: | |
572 | * Nothing locked. Obeys CountInOut protocol. | |
573 | * Returns: | |
574 | * KERN_SUCCESS Returned information. | |
575 | * KERN_INVALID_HOST The host is null. | |
576 | * KERN_RESOURCE_SHORTAGE Couldn't allocate memory. | |
577 | */ | |
578 | ||
579 | kern_return_t | |
580 | host_virtual_physical_table_info( | |
581 | host_t host, | |
582 | hash_info_bucket_array_t *infop, | |
583 | mach_msg_type_number_t *countp) | |
584 | { | |
585 | #if !MACH_VM_DEBUG | |
586 | return KERN_FAILURE; | |
587 | #else | |
588 | vm_offset_t addr; | |
589 | vm_size_t size; | |
590 | hash_info_bucket_t *info; | |
591 | unsigned int potential, actual; | |
592 | kern_return_t kr; | |
593 | ||
594 | if (host == HOST_NULL) | |
595 | return KERN_INVALID_HOST; | |
596 | ||
597 | /* start with in-line data */ | |
598 | ||
599 | info = *infop; | |
600 | potential = *countp; | |
601 | ||
602 | for (;;) { | |
603 | actual = vm_page_info(info, potential); | |
604 | if (actual <= potential) | |
605 | break; | |
606 | ||
607 | /* allocate more memory */ | |
608 | ||
609 | if (info != *infop) | |
610 | kmem_free(ipc_kernel_map, addr, size); | |
611 | ||
612 | size = round_page(actual * sizeof *info); | |
613 | kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size); | |
614 | if (kr != KERN_SUCCESS) | |
615 | return KERN_RESOURCE_SHORTAGE; | |
616 | ||
617 | info = (hash_info_bucket_t *) addr; | |
618 | potential = size/sizeof *info; | |
619 | } | |
620 | ||
621 | if (info == *infop) { | |
622 | /* data fit in-line; nothing to deallocate */ | |
623 | ||
624 | *countp = actual; | |
625 | } else if (actual == 0) { | |
626 | kmem_free(ipc_kernel_map, addr, size); | |
627 | ||
628 | *countp = 0; | |
629 | } else { | |
630 | vm_map_copy_t copy; | |
631 | vm_size_t used; | |
632 | ||
633 | used = round_page(actual * sizeof *info); | |
634 | ||
635 | if (used != size) | |
636 | kmem_free(ipc_kernel_map, addr + used, size - used); | |
637 | ||
638 | kr = vm_map_copyin(ipc_kernel_map, addr, used, | |
639 | TRUE, ©); | |
640 | assert(kr == KERN_SUCCESS); | |
641 | ||
642 | *infop = (hash_info_bucket_t *) copy; | |
643 | *countp = actual; | |
644 | } | |
645 | ||
646 | return KERN_SUCCESS; | |
647 | #endif /* MACH_VM_DEBUG */ | |
648 | } |