]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
ff6e181a A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
1c79356b | 12 | * |
ff6e181a A |
13 | * The Original Code and all software distributed under the License are |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
ff6e181a A |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
1c79356b A |
20 | * |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * @OSF_COPYRIGHT@ | |
25 | */ | |
26 | /* | |
27 | * Mach Operating System | |
28 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
29 | * All Rights Reserved. | |
30 | * | |
31 | * Permission to use, copy, modify and distribute this software and its | |
32 | * documentation is hereby granted, provided that both the copyright | |
33 | * notice and this permission notice appear in all copies of the | |
34 | * software, derivative works or modified versions, and any portions | |
35 | * thereof, and that both notices appear in supporting documentation. | |
36 | * | |
37 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
38 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
39 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
40 | * | |
41 | * Carnegie Mellon requests users of this software to return to | |
42 | * | |
43 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
44 | * School of Computer Science | |
45 | * Carnegie Mellon University | |
46 | * Pittsburgh PA 15213-3890 | |
47 | * | |
48 | * any improvements or extensions that they make and grant Carnegie Mellon | |
49 | * the rights to redistribute these changes. | |
50 | */ | |
51 | /* | |
52 | */ | |
53 | /* | |
54 | * File: vm/vm_map.c | |
55 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
56 | * Date: 1985 | |
57 | * | |
58 | * Virtual memory mapping module. | |
59 | */ | |
60 | ||
1c79356b A |
61 | #include <task_swapper.h> |
62 | #include <mach_assert.h> | |
91447636 | 63 | #include <libkern/OSAtomic.h> |
1c79356b A |
64 | |
65 | #include <mach/kern_return.h> | |
66 | #include <mach/port.h> | |
67 | #include <mach/vm_attributes.h> | |
68 | #include <mach/vm_param.h> | |
69 | #include <mach/vm_behavior.h> | |
55e303ae | 70 | #include <mach/vm_statistics.h> |
91447636 A |
71 | #include <mach/memory_object.h> |
72 | #include <machine/cpu_capabilities.h> | |
73 | ||
1c79356b A |
74 | #include <kern/assert.h> |
75 | #include <kern/counters.h> | |
91447636 | 76 | #include <kern/kalloc.h> |
1c79356b | 77 | #include <kern/zalloc.h> |
91447636 A |
78 | |
79 | #include <vm/cpm.h> | |
1c79356b A |
80 | #include <vm/vm_init.h> |
81 | #include <vm/vm_fault.h> | |
82 | #include <vm/vm_map.h> | |
83 | #include <vm/vm_object.h> | |
84 | #include <vm/vm_page.h> | |
85 | #include <vm/vm_kern.h> | |
86 | #include <ipc/ipc_port.h> | |
87 | #include <kern/sched_prim.h> | |
88 | #include <kern/misc_protos.h> | |
1c79356b | 89 | #include <ddb/tr.h> |
55e303ae | 90 | #include <machine/db_machdep.h> |
1c79356b A |
91 | #include <kern/xpr.h> |
92 | ||
91447636 A |
93 | #include <mach/vm_map_server.h> |
94 | #include <mach/mach_host_server.h> | |
95 | #include <vm/vm_shared_memory_server.h> | |
96 | #include <vm/vm_protos.h> // for vm_map_commpage64 and vm_map_remove_compage64 | |
97 | ||
98 | #ifdef ppc | |
99 | #include <ppc/mappings.h> | |
100 | #endif /* ppc */ | |
101 | ||
102 | #include <vm/vm_protos.h> | |
103 | ||
1c79356b A |
104 | /* Internal prototypes |
105 | */ | |
91447636 A |
106 | |
107 | static void vm_map_simplify_range( | |
108 | vm_map_t map, | |
109 | vm_map_offset_t start, | |
110 | vm_map_offset_t end); /* forward */ | |
111 | ||
112 | static boolean_t vm_map_range_check( | |
1c79356b | 113 | vm_map_t map, |
91447636 A |
114 | vm_map_offset_t start, |
115 | vm_map_offset_t end, | |
1c79356b A |
116 | vm_map_entry_t *entry); |
117 | ||
91447636 | 118 | static vm_map_entry_t _vm_map_entry_create( |
1c79356b A |
119 | struct vm_map_header *map_header); |
120 | ||
91447636 | 121 | static void _vm_map_entry_dispose( |
1c79356b A |
122 | struct vm_map_header *map_header, |
123 | vm_map_entry_t entry); | |
124 | ||
91447636 | 125 | static void vm_map_pmap_enter( |
1c79356b | 126 | vm_map_t map, |
91447636 A |
127 | vm_map_offset_t addr, |
128 | vm_map_offset_t end_addr, | |
1c79356b A |
129 | vm_object_t object, |
130 | vm_object_offset_t offset, | |
131 | vm_prot_t protection); | |
132 | ||
91447636 A |
133 | static void _vm_map_clip_end( |
134 | struct vm_map_header *map_header, | |
135 | vm_map_entry_t entry, | |
136 | vm_map_offset_t end); | |
137 | ||
138 | static void _vm_map_clip_start( | |
1c79356b A |
139 | struct vm_map_header *map_header, |
140 | vm_map_entry_t entry, | |
91447636 | 141 | vm_map_offset_t start); |
1c79356b | 142 | |
91447636 | 143 | static void vm_map_entry_delete( |
1c79356b A |
144 | vm_map_t map, |
145 | vm_map_entry_t entry); | |
146 | ||
91447636 | 147 | static kern_return_t vm_map_delete( |
1c79356b | 148 | vm_map_t map, |
91447636 A |
149 | vm_map_offset_t start, |
150 | vm_map_offset_t end, | |
151 | int flags, | |
152 | vm_map_t zap_map); | |
1c79356b | 153 | |
91447636 | 154 | static kern_return_t vm_map_copy_overwrite_unaligned( |
1c79356b A |
155 | vm_map_t dst_map, |
156 | vm_map_entry_t entry, | |
157 | vm_map_copy_t copy, | |
91447636 | 158 | vm_map_address_t start); |
1c79356b | 159 | |
91447636 | 160 | static kern_return_t vm_map_copy_overwrite_aligned( |
1c79356b A |
161 | vm_map_t dst_map, |
162 | vm_map_entry_t tmp_entry, | |
163 | vm_map_copy_t copy, | |
91447636 | 164 | vm_map_offset_t start, |
1c79356b A |
165 | pmap_t pmap); |
166 | ||
91447636 | 167 | static kern_return_t vm_map_copyin_kernel_buffer( |
1c79356b | 168 | vm_map_t src_map, |
91447636 A |
169 | vm_map_address_t src_addr, |
170 | vm_map_size_t len, | |
1c79356b A |
171 | boolean_t src_destroy, |
172 | vm_map_copy_t *copy_result); /* OUT */ | |
173 | ||
91447636 | 174 | static kern_return_t vm_map_copyout_kernel_buffer( |
1c79356b | 175 | vm_map_t map, |
91447636 | 176 | vm_map_address_t *addr, /* IN/OUT */ |
1c79356b A |
177 | vm_map_copy_t copy, |
178 | boolean_t overwrite); | |
179 | ||
91447636 | 180 | static void vm_map_fork_share( |
1c79356b A |
181 | vm_map_t old_map, |
182 | vm_map_entry_t old_entry, | |
183 | vm_map_t new_map); | |
184 | ||
91447636 | 185 | static boolean_t vm_map_fork_copy( |
1c79356b A |
186 | vm_map_t old_map, |
187 | vm_map_entry_t *old_entry_p, | |
188 | vm_map_t new_map); | |
189 | ||
91447636 | 190 | static void vm_map_region_top_walk( |
1c79356b A |
191 | vm_map_entry_t entry, |
192 | vm_region_top_info_t top); | |
193 | ||
91447636 A |
194 | static void vm_map_region_walk( |
195 | vm_map_t map, | |
196 | vm_map_offset_t va, | |
1c79356b | 197 | vm_map_entry_t entry, |
1c79356b | 198 | vm_object_offset_t offset, |
91447636 A |
199 | vm_object_size_t range, |
200 | vm_region_extended_info_t extended); | |
201 | ||
202 | static kern_return_t vm_map_wire_nested( | |
1c79356b | 203 | vm_map_t map, |
91447636 A |
204 | vm_map_offset_t start, |
205 | vm_map_offset_t end, | |
206 | vm_prot_t access_type, | |
207 | boolean_t user_wire, | |
208 | pmap_t map_pmap, | |
209 | vm_map_offset_t pmap_addr); | |
210 | ||
211 | static kern_return_t vm_map_unwire_nested( | |
212 | vm_map_t map, | |
213 | vm_map_offset_t start, | |
214 | vm_map_offset_t end, | |
215 | boolean_t user_wire, | |
216 | pmap_t map_pmap, | |
217 | vm_map_offset_t pmap_addr); | |
218 | ||
219 | static kern_return_t vm_map_overwrite_submap_recurse( | |
220 | vm_map_t dst_map, | |
221 | vm_map_offset_t dst_addr, | |
222 | vm_map_size_t dst_size); | |
223 | ||
224 | static kern_return_t vm_map_copy_overwrite_nested( | |
225 | vm_map_t dst_map, | |
226 | vm_map_offset_t dst_addr, | |
227 | vm_map_copy_t copy, | |
228 | boolean_t interruptible, | |
229 | pmap_t pmap); | |
230 | ||
231 | static kern_return_t vm_map_remap_extract( | |
232 | vm_map_t map, | |
233 | vm_map_offset_t addr, | |
234 | vm_map_size_t size, | |
235 | boolean_t copy, | |
236 | struct vm_map_header *map_header, | |
237 | vm_prot_t *cur_protection, | |
238 | vm_prot_t *max_protection, | |
239 | vm_inherit_t inheritance, | |
240 | boolean_t pageable); | |
241 | ||
242 | static kern_return_t vm_map_remap_range_allocate( | |
243 | vm_map_t map, | |
244 | vm_map_address_t *address, | |
245 | vm_map_size_t size, | |
246 | vm_map_offset_t mask, | |
247 | boolean_t anywhere, | |
248 | vm_map_entry_t *map_entry); | |
249 | ||
250 | static void vm_map_region_look_for_page( | |
251 | vm_map_t map, | |
252 | vm_map_offset_t va, | |
253 | vm_object_t object, | |
254 | vm_object_offset_t offset, | |
255 | int max_refcnt, | |
256 | int depth, | |
257 | vm_region_extended_info_t extended); | |
258 | ||
259 | static int vm_map_region_count_obj_refs( | |
260 | vm_map_entry_t entry, | |
261 | vm_object_t object); | |
1c79356b A |
262 | |
263 | /* | |
264 | * Macros to copy a vm_map_entry. We must be careful to correctly | |
265 | * manage the wired page count. vm_map_entry_copy() creates a new | |
266 | * map entry to the same memory - the wired count in the new entry | |
267 | * must be set to zero. vm_map_entry_copy_full() creates a new | |
268 | * entry that is identical to the old entry. This preserves the | |
269 | * wire count; it's used for map splitting and zone changing in | |
270 | * vm_map_copyout. | |
271 | */ | |
272 | #define vm_map_entry_copy(NEW,OLD) \ | |
273 | MACRO_BEGIN \ | |
274 | *(NEW) = *(OLD); \ | |
275 | (NEW)->is_shared = FALSE; \ | |
276 | (NEW)->needs_wakeup = FALSE; \ | |
277 | (NEW)->in_transition = FALSE; \ | |
278 | (NEW)->wired_count = 0; \ | |
279 | (NEW)->user_wired_count = 0; \ | |
280 | MACRO_END | |
281 | ||
282 | #define vm_map_entry_copy_full(NEW,OLD) (*(NEW) = *(OLD)) | |
283 | ||
284 | /* | |
285 | * Virtual memory maps provide for the mapping, protection, | |
286 | * and sharing of virtual memory objects. In addition, | |
287 | * this module provides for an efficient virtual copy of | |
288 | * memory from one map to another. | |
289 | * | |
290 | * Synchronization is required prior to most operations. | |
291 | * | |
292 | * Maps consist of an ordered doubly-linked list of simple | |
293 | * entries; a single hint is used to speed up lookups. | |
294 | * | |
295 | * Sharing maps have been deleted from this version of Mach. | |
296 | * All shared objects are now mapped directly into the respective | |
297 | * maps. This requires a change in the copy on write strategy; | |
298 | * the asymmetric (delayed) strategy is used for shared temporary | |
299 | * objects instead of the symmetric (shadow) strategy. All maps | |
300 | * are now "top level" maps (either task map, kernel map or submap | |
301 | * of the kernel map). | |
302 | * | |
303 | * Since portions of maps are specified by start/end addreses, | |
304 | * which may not align with existing map entries, all | |
305 | * routines merely "clip" entries to these start/end values. | |
306 | * [That is, an entry is split into two, bordering at a | |
307 | * start or end value.] Note that these clippings may not | |
308 | * always be necessary (as the two resulting entries are then | |
309 | * not changed); however, the clipping is done for convenience. | |
310 | * No attempt is currently made to "glue back together" two | |
311 | * abutting entries. | |
312 | * | |
313 | * The symmetric (shadow) copy strategy implements virtual copy | |
314 | * by copying VM object references from one map to | |
315 | * another, and then marking both regions as copy-on-write. | |
316 | * It is important to note that only one writeable reference | |
317 | * to a VM object region exists in any map when this strategy | |
318 | * is used -- this means that shadow object creation can be | |
319 | * delayed until a write operation occurs. The symmetric (delayed) | |
320 | * strategy allows multiple maps to have writeable references to | |
321 | * the same region of a vm object, and hence cannot delay creating | |
322 | * its copy objects. See vm_object_copy_quickly() in vm_object.c. | |
323 | * Copying of permanent objects is completely different; see | |
324 | * vm_object_copy_strategically() in vm_object.c. | |
325 | */ | |
326 | ||
91447636 A |
327 | static zone_t vm_map_zone; /* zone for vm_map structures */ |
328 | static zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ | |
329 | static zone_t vm_map_kentry_zone; /* zone for kernel entry structures */ | |
330 | static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ | |
1c79356b A |
331 | |
332 | ||
333 | /* | |
334 | * Placeholder object for submap operations. This object is dropped | |
335 | * into the range by a call to vm_map_find, and removed when | |
336 | * vm_map_submap creates the submap. | |
337 | */ | |
338 | ||
339 | vm_object_t vm_submap_object; | |
340 | ||
341 | /* | |
342 | * vm_map_init: | |
343 | * | |
344 | * Initialize the vm_map module. Must be called before | |
345 | * any other vm_map routines. | |
346 | * | |
347 | * Map and entry structures are allocated from zones -- we must | |
348 | * initialize those zones. | |
349 | * | |
350 | * There are three zones of interest: | |
351 | * | |
352 | * vm_map_zone: used to allocate maps. | |
353 | * vm_map_entry_zone: used to allocate map entries. | |
354 | * vm_map_kentry_zone: used to allocate map entries for the kernel. | |
355 | * | |
356 | * The kernel allocates map entries from a special zone that is initially | |
357 | * "crammed" with memory. It would be difficult (perhaps impossible) for | |
358 | * the kernel to allocate more memory to a entry zone when it became | |
359 | * empty since the very act of allocating memory implies the creation | |
360 | * of a new entry. | |
361 | */ | |
362 | ||
91447636 A |
363 | static void *map_data; |
364 | static vm_map_size_t map_data_size; | |
365 | static void *kentry_data; | |
366 | static vm_map_size_t kentry_data_size; | |
367 | static int kentry_count = 2048; /* to init kentry_data_size */ | |
1c79356b | 368 | |
0b4e3aa0 A |
369 | #define NO_COALESCE_LIMIT (1024 * 128) |
370 | ||
1c79356b A |
371 | /* |
372 | * Threshold for aggressive (eager) page map entering for vm copyout | |
373 | * operations. Any copyout larger will NOT be aggressively entered. | |
374 | */ | |
91447636 | 375 | static vm_map_size_t vm_map_aggressive_enter_max; /* set by bootstrap */ |
1c79356b | 376 | |
55e303ae A |
377 | /* Skip acquiring locks if we're in the midst of a kernel core dump */ |
378 | extern unsigned int not_in_kdp; | |
379 | ||
1c79356b A |
380 | void |
381 | vm_map_init( | |
382 | void) | |
383 | { | |
91447636 | 384 | vm_map_zone = zinit((vm_map_size_t) sizeof(struct vm_map), 40*1024, |
1c79356b A |
385 | PAGE_SIZE, "maps"); |
386 | ||
91447636 | 387 | vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), |
1c79356b A |
388 | 1024*1024, PAGE_SIZE*5, |
389 | "non-kernel map entries"); | |
390 | ||
91447636 | 391 | vm_map_kentry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), |
1c79356b A |
392 | kentry_data_size, kentry_data_size, |
393 | "kernel map entries"); | |
394 | ||
91447636 | 395 | vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy), |
1c79356b A |
396 | 16*1024, PAGE_SIZE, "map copies"); |
397 | ||
398 | /* | |
399 | * Cram the map and kentry zones with initial data. | |
400 | * Set kentry_zone non-collectible to aid zone_gc(). | |
401 | */ | |
402 | zone_change(vm_map_zone, Z_COLLECT, FALSE); | |
403 | zone_change(vm_map_kentry_zone, Z_COLLECT, FALSE); | |
404 | zone_change(vm_map_kentry_zone, Z_EXPAND, FALSE); | |
405 | zcram(vm_map_zone, map_data, map_data_size); | |
406 | zcram(vm_map_kentry_zone, kentry_data, kentry_data_size); | |
407 | } | |
408 | ||
409 | void | |
410 | vm_map_steal_memory( | |
411 | void) | |
412 | { | |
91447636 | 413 | map_data_size = vm_map_round_page(10 * sizeof(struct vm_map)); |
1c79356b A |
414 | map_data = pmap_steal_memory(map_data_size); |
415 | ||
416 | #if 0 | |
417 | /* | |
418 | * Limiting worst case: vm_map_kentry_zone needs to map each "available" | |
419 | * physical page (i.e. that beyond the kernel image and page tables) | |
420 | * individually; we guess at most one entry per eight pages in the | |
421 | * real world. This works out to roughly .1 of 1% of physical memory, | |
422 | * or roughly 1900 entries (64K) for a 64M machine with 4K pages. | |
423 | */ | |
424 | #endif | |
425 | kentry_count = pmap_free_pages() / 8; | |
426 | ||
427 | ||
428 | kentry_data_size = | |
91447636 | 429 | vm_map_round_page(kentry_count * sizeof(struct vm_map_entry)); |
1c79356b A |
430 | kentry_data = pmap_steal_memory(kentry_data_size); |
431 | } | |
432 | ||
433 | /* | |
434 | * vm_map_create: | |
435 | * | |
436 | * Creates and returns a new empty VM map with | |
437 | * the given physical map structure, and having | |
438 | * the given lower and upper address bounds. | |
439 | */ | |
440 | vm_map_t | |
441 | vm_map_create( | |
91447636 A |
442 | pmap_t pmap, |
443 | vm_map_offset_t min, | |
444 | vm_map_offset_t max, | |
445 | boolean_t pageable) | |
1c79356b A |
446 | { |
447 | register vm_map_t result; | |
448 | ||
449 | result = (vm_map_t) zalloc(vm_map_zone); | |
450 | if (result == VM_MAP_NULL) | |
451 | panic("vm_map_create"); | |
452 | ||
453 | vm_map_first_entry(result) = vm_map_to_entry(result); | |
454 | vm_map_last_entry(result) = vm_map_to_entry(result); | |
455 | result->hdr.nentries = 0; | |
456 | result->hdr.entries_pageable = pageable; | |
457 | ||
458 | result->size = 0; | |
459 | result->ref_count = 1; | |
460 | #if TASK_SWAPPER | |
461 | result->res_count = 1; | |
462 | result->sw_state = MAP_SW_IN; | |
463 | #endif /* TASK_SWAPPER */ | |
464 | result->pmap = pmap; | |
465 | result->min_offset = min; | |
466 | result->max_offset = max; | |
467 | result->wiring_required = FALSE; | |
468 | result->no_zero_fill = FALSE; | |
9bccf70c | 469 | result->mapped = FALSE; |
1c79356b A |
470 | result->wait_for_space = FALSE; |
471 | result->first_free = vm_map_to_entry(result); | |
472 | result->hint = vm_map_to_entry(result); | |
473 | vm_map_lock_init(result); | |
91447636 | 474 | mutex_init(&result->s_lock, 0); |
1c79356b A |
475 | |
476 | return(result); | |
477 | } | |
478 | ||
479 | /* | |
480 | * vm_map_entry_create: [ internal use only ] | |
481 | * | |
482 | * Allocates a VM map entry for insertion in the | |
483 | * given map (or map copy). No fields are filled. | |
484 | */ | |
485 | #define vm_map_entry_create(map) \ | |
486 | _vm_map_entry_create(&(map)->hdr) | |
487 | ||
488 | #define vm_map_copy_entry_create(copy) \ | |
489 | _vm_map_entry_create(&(copy)->cpy_hdr) | |
490 | ||
91447636 | 491 | static vm_map_entry_t |
1c79356b A |
492 | _vm_map_entry_create( |
493 | register struct vm_map_header *map_header) | |
494 | { | |
495 | register zone_t zone; | |
496 | register vm_map_entry_t entry; | |
497 | ||
498 | if (map_header->entries_pageable) | |
499 | zone = vm_map_entry_zone; | |
500 | else | |
501 | zone = vm_map_kentry_zone; | |
502 | ||
503 | entry = (vm_map_entry_t) zalloc(zone); | |
504 | if (entry == VM_MAP_ENTRY_NULL) | |
505 | panic("vm_map_entry_create"); | |
506 | ||
507 | return(entry); | |
508 | } | |
509 | ||
510 | /* | |
511 | * vm_map_entry_dispose: [ internal use only ] | |
512 | * | |
513 | * Inverse of vm_map_entry_create. | |
514 | */ | |
515 | #define vm_map_entry_dispose(map, entry) \ | |
516 | MACRO_BEGIN \ | |
517 | if((entry) == (map)->first_free) \ | |
518 | (map)->first_free = vm_map_to_entry(map); \ | |
519 | if((entry) == (map)->hint) \ | |
520 | (map)->hint = vm_map_to_entry(map); \ | |
521 | _vm_map_entry_dispose(&(map)->hdr, (entry)); \ | |
522 | MACRO_END | |
523 | ||
524 | #define vm_map_copy_entry_dispose(map, entry) \ | |
525 | _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) | |
526 | ||
91447636 | 527 | static void |
1c79356b A |
528 | _vm_map_entry_dispose( |
529 | register struct vm_map_header *map_header, | |
530 | register vm_map_entry_t entry) | |
531 | { | |
532 | register zone_t zone; | |
533 | ||
534 | if (map_header->entries_pageable) | |
535 | zone = vm_map_entry_zone; | |
536 | else | |
537 | zone = vm_map_kentry_zone; | |
538 | ||
91447636 | 539 | zfree(zone, entry); |
1c79356b A |
540 | } |
541 | ||
91447636 A |
542 | #if MACH_ASSERT |
543 | static boolean_t first_free_is_valid(vm_map_t map); /* forward */ | |
544 | static boolean_t first_free_check = FALSE; | |
545 | static boolean_t | |
1c79356b A |
546 | first_free_is_valid( |
547 | vm_map_t map) | |
548 | { | |
549 | vm_map_entry_t entry, next; | |
550 | ||
551 | if (!first_free_check) | |
552 | return TRUE; | |
553 | ||
554 | entry = vm_map_to_entry(map); | |
555 | next = entry->vme_next; | |
91447636 A |
556 | while (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_end) || |
557 | (vm_map_trunc_page(next->vme_start) == vm_map_trunc_page(entry->vme_start) && | |
1c79356b A |
558 | next != vm_map_to_entry(map))) { |
559 | entry = next; | |
560 | next = entry->vme_next; | |
561 | if (entry == vm_map_to_entry(map)) | |
562 | break; | |
563 | } | |
564 | if (map->first_free != entry) { | |
565 | printf("Bad first_free for map 0x%x: 0x%x should be 0x%x\n", | |
566 | map, map->first_free, entry); | |
567 | return FALSE; | |
568 | } | |
569 | return TRUE; | |
570 | } | |
91447636 | 571 | #endif /* MACH_ASSERT */ |
1c79356b A |
572 | |
573 | /* | |
574 | * UPDATE_FIRST_FREE: | |
575 | * | |
576 | * Updates the map->first_free pointer to the | |
577 | * entry immediately before the first hole in the map. | |
578 | * The map should be locked. | |
579 | */ | |
580 | #define UPDATE_FIRST_FREE(map, new_first_free) \ | |
581 | MACRO_BEGIN \ | |
582 | vm_map_t UFF_map; \ | |
583 | vm_map_entry_t UFF_first_free; \ | |
584 | vm_map_entry_t UFF_next_entry; \ | |
585 | UFF_map = (map); \ | |
586 | UFF_first_free = (new_first_free); \ | |
587 | UFF_next_entry = UFF_first_free->vme_next; \ | |
91447636 A |
588 | while (vm_map_trunc_page(UFF_next_entry->vme_start) == \ |
589 | vm_map_trunc_page(UFF_first_free->vme_end) || \ | |
590 | (vm_map_trunc_page(UFF_next_entry->vme_start) == \ | |
591 | vm_map_trunc_page(UFF_first_free->vme_start) && \ | |
1c79356b A |
592 | UFF_next_entry != vm_map_to_entry(UFF_map))) { \ |
593 | UFF_first_free = UFF_next_entry; \ | |
594 | UFF_next_entry = UFF_first_free->vme_next; \ | |
595 | if (UFF_first_free == vm_map_to_entry(UFF_map)) \ | |
596 | break; \ | |
597 | } \ | |
598 | UFF_map->first_free = UFF_first_free; \ | |
599 | assert(first_free_is_valid(UFF_map)); \ | |
600 | MACRO_END | |
601 | ||
602 | /* | |
603 | * vm_map_entry_{un,}link: | |
604 | * | |
605 | * Insert/remove entries from maps (or map copies). | |
606 | */ | |
607 | #define vm_map_entry_link(map, after_where, entry) \ | |
608 | MACRO_BEGIN \ | |
609 | vm_map_t VMEL_map; \ | |
610 | vm_map_entry_t VMEL_entry; \ | |
611 | VMEL_map = (map); \ | |
612 | VMEL_entry = (entry); \ | |
613 | _vm_map_entry_link(&VMEL_map->hdr, after_where, VMEL_entry); \ | |
614 | UPDATE_FIRST_FREE(VMEL_map, VMEL_map->first_free); \ | |
615 | MACRO_END | |
616 | ||
617 | ||
618 | #define vm_map_copy_entry_link(copy, after_where, entry) \ | |
619 | _vm_map_entry_link(&(copy)->cpy_hdr, after_where, (entry)) | |
620 | ||
621 | #define _vm_map_entry_link(hdr, after_where, entry) \ | |
622 | MACRO_BEGIN \ | |
623 | (hdr)->nentries++; \ | |
624 | (entry)->vme_prev = (after_where); \ | |
625 | (entry)->vme_next = (after_where)->vme_next; \ | |
626 | (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \ | |
627 | MACRO_END | |
628 | ||
629 | #define vm_map_entry_unlink(map, entry) \ | |
630 | MACRO_BEGIN \ | |
631 | vm_map_t VMEU_map; \ | |
632 | vm_map_entry_t VMEU_entry; \ | |
633 | vm_map_entry_t VMEU_first_free; \ | |
634 | VMEU_map = (map); \ | |
635 | VMEU_entry = (entry); \ | |
636 | if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) \ | |
637 | VMEU_first_free = VMEU_entry->vme_prev; \ | |
638 | else \ | |
639 | VMEU_first_free = VMEU_map->first_free; \ | |
640 | _vm_map_entry_unlink(&VMEU_map->hdr, VMEU_entry); \ | |
641 | UPDATE_FIRST_FREE(VMEU_map, VMEU_first_free); \ | |
642 | MACRO_END | |
643 | ||
644 | #define vm_map_copy_entry_unlink(copy, entry) \ | |
645 | _vm_map_entry_unlink(&(copy)->cpy_hdr, (entry)) | |
646 | ||
647 | #define _vm_map_entry_unlink(hdr, entry) \ | |
648 | MACRO_BEGIN \ | |
649 | (hdr)->nentries--; \ | |
650 | (entry)->vme_next->vme_prev = (entry)->vme_prev; \ | |
651 | (entry)->vme_prev->vme_next = (entry)->vme_next; \ | |
652 | MACRO_END | |
653 | ||
1c79356b | 654 | #if MACH_ASSERT && TASK_SWAPPER |
1c79356b A |
655 | /* |
656 | * vm_map_res_reference: | |
657 | * | |
658 | * Adds another valid residence count to the given map. | |
659 | * | |
660 | * Map is locked so this function can be called from | |
661 | * vm_map_swapin. | |
662 | * | |
663 | */ | |
664 | void vm_map_res_reference(register vm_map_t map) | |
665 | { | |
666 | /* assert map is locked */ | |
667 | assert(map->res_count >= 0); | |
668 | assert(map->ref_count >= map->res_count); | |
669 | if (map->res_count == 0) { | |
670 | mutex_unlock(&map->s_lock); | |
671 | vm_map_lock(map); | |
672 | vm_map_swapin(map); | |
673 | mutex_lock(&map->s_lock); | |
674 | ++map->res_count; | |
675 | vm_map_unlock(map); | |
676 | } else | |
677 | ++map->res_count; | |
678 | } | |
679 | ||
680 | /* | |
681 | * vm_map_reference_swap: | |
682 | * | |
683 | * Adds valid reference and residence counts to the given map. | |
684 | * | |
685 | * The map may not be in memory (i.e. zero residence count). | |
686 | * | |
687 | */ | |
688 | void vm_map_reference_swap(register vm_map_t map) | |
689 | { | |
690 | assert(map != VM_MAP_NULL); | |
691 | mutex_lock(&map->s_lock); | |
692 | assert(map->res_count >= 0); | |
693 | assert(map->ref_count >= map->res_count); | |
694 | map->ref_count++; | |
695 | vm_map_res_reference(map); | |
696 | mutex_unlock(&map->s_lock); | |
697 | } | |
698 | ||
699 | /* | |
700 | * vm_map_res_deallocate: | |
701 | * | |
702 | * Decrement residence count on a map; possibly causing swapout. | |
703 | * | |
704 | * The map must be in memory (i.e. non-zero residence count). | |
705 | * | |
706 | * The map is locked, so this function is callable from vm_map_deallocate. | |
707 | * | |
708 | */ | |
709 | void vm_map_res_deallocate(register vm_map_t map) | |
710 | { | |
711 | assert(map->res_count > 0); | |
712 | if (--map->res_count == 0) { | |
713 | mutex_unlock(&map->s_lock); | |
714 | vm_map_lock(map); | |
715 | vm_map_swapout(map); | |
716 | vm_map_unlock(map); | |
717 | mutex_lock(&map->s_lock); | |
718 | } | |
719 | assert(map->ref_count >= map->res_count); | |
720 | } | |
721 | #endif /* MACH_ASSERT && TASK_SWAPPER */ | |
722 | ||
1c79356b A |
723 | /* |
724 | * vm_map_destroy: | |
725 | * | |
726 | * Actually destroy a map. | |
727 | */ | |
728 | void | |
729 | vm_map_destroy( | |
730 | register vm_map_t map) | |
91447636 | 731 | { |
1c79356b A |
732 | vm_map_lock(map); |
733 | (void) vm_map_delete(map, map->min_offset, | |
91447636 A |
734 | map->max_offset, VM_MAP_NO_FLAGS, |
735 | VM_MAP_NULL); | |
1c79356b | 736 | vm_map_unlock(map); |
91447636 A |
737 | |
738 | #ifdef __PPC__ | |
739 | if (map->hdr.nentries!=0) | |
740 | vm_map_remove_commpage64(map); | |
741 | #endif /* __PPC__ */ | |
742 | ||
743 | assert(map->hdr.nentries==0); | |
1c79356b | 744 | |
55e303ae A |
745 | if(map->pmap) |
746 | pmap_destroy(map->pmap); | |
1c79356b | 747 | |
91447636 | 748 | zfree(vm_map_zone, map); |
1c79356b A |
749 | } |
750 | ||
751 | #if TASK_SWAPPER | |
752 | /* | |
753 | * vm_map_swapin/vm_map_swapout | |
754 | * | |
755 | * Swap a map in and out, either referencing or releasing its resources. | |
756 | * These functions are internal use only; however, they must be exported | |
757 | * because they may be called from macros, which are exported. | |
758 | * | |
759 | * In the case of swapout, there could be races on the residence count, | |
760 | * so if the residence count is up, we return, assuming that a | |
761 | * vm_map_deallocate() call in the near future will bring us back. | |
762 | * | |
763 | * Locking: | |
764 | * -- We use the map write lock for synchronization among races. | |
765 | * -- The map write lock, and not the simple s_lock, protects the | |
766 | * swap state of the map. | |
767 | * -- If a map entry is a share map, then we hold both locks, in | |
768 | * hierarchical order. | |
769 | * | |
770 | * Synchronization Notes: | |
771 | * 1) If a vm_map_swapin() call happens while swapout in progress, it | |
772 | * will block on the map lock and proceed when swapout is through. | |
773 | * 2) A vm_map_reference() call at this time is illegal, and will | |
774 | * cause a panic. vm_map_reference() is only allowed on resident | |
775 | * maps, since it refuses to block. | |
776 | * 3) A vm_map_swapin() call during a swapin will block, and | |
777 | * proceeed when the first swapin is done, turning into a nop. | |
778 | * This is the reason the res_count is not incremented until | |
779 | * after the swapin is complete. | |
780 | * 4) There is a timing hole after the checks of the res_count, before | |
781 | * the map lock is taken, during which a swapin may get the lock | |
782 | * before a swapout about to happen. If this happens, the swapin | |
783 | * will detect the state and increment the reference count, causing | |
784 | * the swapout to be a nop, thereby delaying it until a later | |
785 | * vm_map_deallocate. If the swapout gets the lock first, then | |
786 | * the swapin will simply block until the swapout is done, and | |
787 | * then proceed. | |
788 | * | |
789 | * Because vm_map_swapin() is potentially an expensive operation, it | |
790 | * should be used with caution. | |
791 | * | |
792 | * Invariants: | |
793 | * 1) A map with a residence count of zero is either swapped, or | |
794 | * being swapped. | |
795 | * 2) A map with a non-zero residence count is either resident, | |
796 | * or being swapped in. | |
797 | */ | |
798 | ||
799 | int vm_map_swap_enable = 1; | |
800 | ||
801 | void vm_map_swapin (vm_map_t map) | |
802 | { | |
803 | register vm_map_entry_t entry; | |
804 | ||
805 | if (!vm_map_swap_enable) /* debug */ | |
806 | return; | |
807 | ||
808 | /* | |
809 | * Map is locked | |
810 | * First deal with various races. | |
811 | */ | |
812 | if (map->sw_state == MAP_SW_IN) | |
813 | /* | |
814 | * we raced with swapout and won. Returning will incr. | |
815 | * the res_count, turning the swapout into a nop. | |
816 | */ | |
817 | return; | |
818 | ||
819 | /* | |
820 | * The residence count must be zero. If we raced with another | |
821 | * swapin, the state would have been IN; if we raced with a | |
822 | * swapout (after another competing swapin), we must have lost | |
823 | * the race to get here (see above comment), in which case | |
824 | * res_count is still 0. | |
825 | */ | |
826 | assert(map->res_count == 0); | |
827 | ||
828 | /* | |
829 | * There are no intermediate states of a map going out or | |
830 | * coming in, since the map is locked during the transition. | |
831 | */ | |
832 | assert(map->sw_state == MAP_SW_OUT); | |
833 | ||
834 | /* | |
835 | * We now operate upon each map entry. If the entry is a sub- | |
836 | * or share-map, we call vm_map_res_reference upon it. | |
837 | * If the entry is an object, we call vm_object_res_reference | |
838 | * (this may iterate through the shadow chain). | |
839 | * Note that we hold the map locked the entire time, | |
840 | * even if we get back here via a recursive call in | |
841 | * vm_map_res_reference. | |
842 | */ | |
843 | entry = vm_map_first_entry(map); | |
844 | ||
845 | while (entry != vm_map_to_entry(map)) { | |
846 | if (entry->object.vm_object != VM_OBJECT_NULL) { | |
847 | if (entry->is_sub_map) { | |
848 | vm_map_t lmap = entry->object.sub_map; | |
849 | mutex_lock(&lmap->s_lock); | |
850 | vm_map_res_reference(lmap); | |
851 | mutex_unlock(&lmap->s_lock); | |
852 | } else { | |
853 | vm_object_t object = entry->object.vm_object; | |
854 | vm_object_lock(object); | |
855 | /* | |
856 | * This call may iterate through the | |
857 | * shadow chain. | |
858 | */ | |
859 | vm_object_res_reference(object); | |
860 | vm_object_unlock(object); | |
861 | } | |
862 | } | |
863 | entry = entry->vme_next; | |
864 | } | |
865 | assert(map->sw_state == MAP_SW_OUT); | |
866 | map->sw_state = MAP_SW_IN; | |
867 | } | |
868 | ||
869 | void vm_map_swapout(vm_map_t map) | |
870 | { | |
871 | register vm_map_entry_t entry; | |
872 | ||
873 | /* | |
874 | * Map is locked | |
875 | * First deal with various races. | |
876 | * If we raced with a swapin and lost, the residence count | |
877 | * will have been incremented to 1, and we simply return. | |
878 | */ | |
879 | mutex_lock(&map->s_lock); | |
880 | if (map->res_count != 0) { | |
881 | mutex_unlock(&map->s_lock); | |
882 | return; | |
883 | } | |
884 | mutex_unlock(&map->s_lock); | |
885 | ||
886 | /* | |
887 | * There are no intermediate states of a map going out or | |
888 | * coming in, since the map is locked during the transition. | |
889 | */ | |
890 | assert(map->sw_state == MAP_SW_IN); | |
891 | ||
892 | if (!vm_map_swap_enable) | |
893 | return; | |
894 | ||
895 | /* | |
896 | * We now operate upon each map entry. If the entry is a sub- | |
897 | * or share-map, we call vm_map_res_deallocate upon it. | |
898 | * If the entry is an object, we call vm_object_res_deallocate | |
899 | * (this may iterate through the shadow chain). | |
900 | * Note that we hold the map locked the entire time, | |
901 | * even if we get back here via a recursive call in | |
902 | * vm_map_res_deallocate. | |
903 | */ | |
904 | entry = vm_map_first_entry(map); | |
905 | ||
906 | while (entry != vm_map_to_entry(map)) { | |
907 | if (entry->object.vm_object != VM_OBJECT_NULL) { | |
908 | if (entry->is_sub_map) { | |
909 | vm_map_t lmap = entry->object.sub_map; | |
910 | mutex_lock(&lmap->s_lock); | |
911 | vm_map_res_deallocate(lmap); | |
912 | mutex_unlock(&lmap->s_lock); | |
913 | } else { | |
914 | vm_object_t object = entry->object.vm_object; | |
915 | vm_object_lock(object); | |
916 | /* | |
917 | * This call may take a long time, | |
918 | * since it could actively push | |
919 | * out pages (if we implement it | |
920 | * that way). | |
921 | */ | |
922 | vm_object_res_deallocate(object); | |
923 | vm_object_unlock(object); | |
924 | } | |
925 | } | |
926 | entry = entry->vme_next; | |
927 | } | |
928 | assert(map->sw_state == MAP_SW_IN); | |
929 | map->sw_state = MAP_SW_OUT; | |
930 | } | |
931 | ||
932 | #endif /* TASK_SWAPPER */ | |
933 | ||
934 | ||
935 | /* | |
936 | * SAVE_HINT: | |
937 | * | |
938 | * Saves the specified entry as the hint for | |
939 | * future lookups. Performs necessary interlocks. | |
940 | */ | |
941 | #define SAVE_HINT(map,value) \ | |
55e303ae | 942 | MACRO_BEGIN \ |
1c79356b A |
943 | mutex_lock(&(map)->s_lock); \ |
944 | (map)->hint = (value); \ | |
55e303ae A |
945 | mutex_unlock(&(map)->s_lock); \ |
946 | MACRO_END | |
1c79356b A |
947 | |
948 | /* | |
949 | * vm_map_lookup_entry: [ internal use only ] | |
950 | * | |
951 | * Finds the map entry containing (or | |
952 | * immediately preceding) the specified address | |
953 | * in the given map; the entry is returned | |
954 | * in the "entry" parameter. The boolean | |
955 | * result indicates whether the address is | |
956 | * actually contained in the map. | |
957 | */ | |
958 | boolean_t | |
959 | vm_map_lookup_entry( | |
91447636 A |
960 | register vm_map_t map, |
961 | register vm_map_offset_t address, | |
1c79356b A |
962 | vm_map_entry_t *entry) /* OUT */ |
963 | { | |
964 | register vm_map_entry_t cur; | |
965 | register vm_map_entry_t last; | |
966 | ||
967 | /* | |
968 | * Start looking either from the head of the | |
969 | * list, or from the hint. | |
970 | */ | |
55e303ae A |
971 | if (not_in_kdp) |
972 | mutex_lock(&map->s_lock); | |
1c79356b | 973 | cur = map->hint; |
55e303ae A |
974 | if (not_in_kdp) |
975 | mutex_unlock(&map->s_lock); | |
1c79356b A |
976 | |
977 | if (cur == vm_map_to_entry(map)) | |
978 | cur = cur->vme_next; | |
979 | ||
980 | if (address >= cur->vme_start) { | |
981 | /* | |
982 | * Go from hint to end of list. | |
983 | * | |
984 | * But first, make a quick check to see if | |
985 | * we are already looking at the entry we | |
986 | * want (which is usually the case). | |
987 | * Note also that we don't need to save the hint | |
988 | * here... it is the same hint (unless we are | |
989 | * at the header, in which case the hint didn't | |
990 | * buy us anything anyway). | |
991 | */ | |
992 | last = vm_map_to_entry(map); | |
993 | if ((cur != last) && (cur->vme_end > address)) { | |
994 | *entry = cur; | |
995 | return(TRUE); | |
996 | } | |
997 | } | |
998 | else { | |
999 | /* | |
1000 | * Go from start to hint, *inclusively* | |
1001 | */ | |
1002 | last = cur->vme_next; | |
1003 | cur = vm_map_first_entry(map); | |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * Search linearly | |
1008 | */ | |
1009 | ||
1010 | while (cur != last) { | |
1011 | if (cur->vme_end > address) { | |
1012 | if (address >= cur->vme_start) { | |
1013 | /* | |
1014 | * Save this lookup for future | |
1015 | * hints, and return | |
1016 | */ | |
1017 | ||
1018 | *entry = cur; | |
55e303ae A |
1019 | if (not_in_kdp) |
1020 | SAVE_HINT(map, cur); | |
1c79356b A |
1021 | return(TRUE); |
1022 | } | |
1023 | break; | |
1024 | } | |
1025 | cur = cur->vme_next; | |
1026 | } | |
1027 | *entry = cur->vme_prev; | |
55e303ae A |
1028 | if (not_in_kdp) |
1029 | SAVE_HINT(map, *entry); | |
1c79356b A |
1030 | return(FALSE); |
1031 | } | |
1032 | ||
1033 | /* | |
1034 | * Routine: vm_map_find_space | |
1035 | * Purpose: | |
1036 | * Allocate a range in the specified virtual address map, | |
1037 | * returning the entry allocated for that range. | |
1038 | * Used by kmem_alloc, etc. | |
1039 | * | |
1040 | * The map must be NOT be locked. It will be returned locked | |
1041 | * on KERN_SUCCESS, unlocked on failure. | |
1042 | * | |
1043 | * If an entry is allocated, the object/offset fields | |
1044 | * are initialized to zero. | |
1045 | */ | |
1046 | kern_return_t | |
1047 | vm_map_find_space( | |
1048 | register vm_map_t map, | |
91447636 A |
1049 | vm_map_offset_t *address, /* OUT */ |
1050 | vm_map_size_t size, | |
1051 | vm_map_offset_t mask, | |
1c79356b A |
1052 | vm_map_entry_t *o_entry) /* OUT */ |
1053 | { | |
1054 | register vm_map_entry_t entry, new_entry; | |
91447636 A |
1055 | register vm_map_offset_t start; |
1056 | register vm_map_offset_t end; | |
1057 | ||
1058 | if (size == 0) { | |
1059 | *address = 0; | |
1060 | return KERN_INVALID_ARGUMENT; | |
1061 | } | |
1c79356b A |
1062 | |
1063 | new_entry = vm_map_entry_create(map); | |
1064 | ||
1065 | /* | |
1066 | * Look for the first possible address; if there's already | |
1067 | * something at this address, we have to start after it. | |
1068 | */ | |
1069 | ||
1070 | vm_map_lock(map); | |
1071 | ||
1072 | assert(first_free_is_valid(map)); | |
1073 | if ((entry = map->first_free) == vm_map_to_entry(map)) | |
1074 | start = map->min_offset; | |
1075 | else | |
1076 | start = entry->vme_end; | |
1077 | ||
1078 | /* | |
1079 | * In any case, the "entry" always precedes | |
1080 | * the proposed new region throughout the loop: | |
1081 | */ | |
1082 | ||
1083 | while (TRUE) { | |
1084 | register vm_map_entry_t next; | |
1085 | ||
1086 | /* | |
1087 | * Find the end of the proposed new region. | |
1088 | * Be sure we didn't go beyond the end, or | |
1089 | * wrap around the address. | |
1090 | */ | |
1091 | ||
1092 | end = ((start + mask) & ~mask); | |
1093 | if (end < start) { | |
1094 | vm_map_entry_dispose(map, new_entry); | |
1095 | vm_map_unlock(map); | |
1096 | return(KERN_NO_SPACE); | |
1097 | } | |
1098 | start = end; | |
1099 | end += size; | |
1100 | ||
1101 | if ((end > map->max_offset) || (end < start)) { | |
1102 | vm_map_entry_dispose(map, new_entry); | |
1103 | vm_map_unlock(map); | |
1104 | return(KERN_NO_SPACE); | |
1105 | } | |
1106 | ||
1107 | /* | |
1108 | * If there are no more entries, we must win. | |
1109 | */ | |
1110 | ||
1111 | next = entry->vme_next; | |
1112 | if (next == vm_map_to_entry(map)) | |
1113 | break; | |
1114 | ||
1115 | /* | |
1116 | * If there is another entry, it must be | |
1117 | * after the end of the potential new region. | |
1118 | */ | |
1119 | ||
1120 | if (next->vme_start >= end) | |
1121 | break; | |
1122 | ||
1123 | /* | |
1124 | * Didn't fit -- move to the next entry. | |
1125 | */ | |
1126 | ||
1127 | entry = next; | |
1128 | start = entry->vme_end; | |
1129 | } | |
1130 | ||
1131 | /* | |
1132 | * At this point, | |
1133 | * "start" and "end" should define the endpoints of the | |
1134 | * available new range, and | |
1135 | * "entry" should refer to the region before the new | |
1136 | * range, and | |
1137 | * | |
1138 | * the map should be locked. | |
1139 | */ | |
1140 | ||
1141 | *address = start; | |
1142 | ||
1143 | new_entry->vme_start = start; | |
1144 | new_entry->vme_end = end; | |
1145 | assert(page_aligned(new_entry->vme_start)); | |
1146 | assert(page_aligned(new_entry->vme_end)); | |
1147 | ||
1148 | new_entry->is_shared = FALSE; | |
1149 | new_entry->is_sub_map = FALSE; | |
1150 | new_entry->use_pmap = FALSE; | |
1151 | new_entry->object.vm_object = VM_OBJECT_NULL; | |
1152 | new_entry->offset = (vm_object_offset_t) 0; | |
1153 | ||
1154 | new_entry->needs_copy = FALSE; | |
1155 | ||
1156 | new_entry->inheritance = VM_INHERIT_DEFAULT; | |
1157 | new_entry->protection = VM_PROT_DEFAULT; | |
1158 | new_entry->max_protection = VM_PROT_ALL; | |
1159 | new_entry->behavior = VM_BEHAVIOR_DEFAULT; | |
1160 | new_entry->wired_count = 0; | |
1161 | new_entry->user_wired_count = 0; | |
1162 | ||
1163 | new_entry->in_transition = FALSE; | |
1164 | new_entry->needs_wakeup = FALSE; | |
1165 | ||
1166 | /* | |
1167 | * Insert the new entry into the list | |
1168 | */ | |
1169 | ||
1170 | vm_map_entry_link(map, entry, new_entry); | |
1171 | ||
1172 | map->size += size; | |
1173 | ||
1174 | /* | |
1175 | * Update the lookup hint | |
1176 | */ | |
1177 | SAVE_HINT(map, new_entry); | |
1178 | ||
1179 | *o_entry = new_entry; | |
1180 | return(KERN_SUCCESS); | |
1181 | } | |
1182 | ||
1183 | int vm_map_pmap_enter_print = FALSE; | |
1184 | int vm_map_pmap_enter_enable = FALSE; | |
1185 | ||
1186 | /* | |
91447636 | 1187 | * Routine: vm_map_pmap_enter [internal only] |
1c79356b A |
1188 | * |
1189 | * Description: | |
1190 | * Force pages from the specified object to be entered into | |
1191 | * the pmap at the specified address if they are present. | |
1192 | * As soon as a page not found in the object the scan ends. | |
1193 | * | |
1194 | * Returns: | |
1195 | * Nothing. | |
1196 | * | |
1197 | * In/out conditions: | |
1198 | * The source map should not be locked on entry. | |
1199 | */ | |
91447636 | 1200 | static void |
1c79356b A |
1201 | vm_map_pmap_enter( |
1202 | vm_map_t map, | |
91447636 A |
1203 | register vm_map_offset_t addr, |
1204 | register vm_map_offset_t end_addr, | |
1c79356b A |
1205 | register vm_object_t object, |
1206 | vm_object_offset_t offset, | |
1207 | vm_prot_t protection) | |
1208 | { | |
9bccf70c | 1209 | unsigned int cache_attr; |
0b4e3aa0 | 1210 | |
55e303ae A |
1211 | if(map->pmap == 0) |
1212 | return; | |
1213 | ||
1c79356b A |
1214 | while (addr < end_addr) { |
1215 | register vm_page_t m; | |
1216 | ||
1217 | vm_object_lock(object); | |
1218 | vm_object_paging_begin(object); | |
1219 | ||
1220 | m = vm_page_lookup(object, offset); | |
91447636 A |
1221 | /* |
1222 | * ENCRYPTED SWAP: | |
1223 | * The user should never see encrypted data, so do not | |
1224 | * enter an encrypted page in the page table. | |
1225 | */ | |
1226 | if (m == VM_PAGE_NULL || m->busy || m->encrypted || | |
1c79356b A |
1227 | (m->unusual && ( m->error || m->restart || m->absent || |
1228 | protection & m->page_lock))) { | |
1229 | ||
1230 | vm_object_paging_end(object); | |
1231 | vm_object_unlock(object); | |
1232 | return; | |
1233 | } | |
1234 | ||
1235 | assert(!m->fictitious); /* XXX is this possible ??? */ | |
1236 | ||
1237 | if (vm_map_pmap_enter_print) { | |
1238 | printf("vm_map_pmap_enter:"); | |
91447636 A |
1239 | printf("map: %x, addr: %llx, object: %x, offset: %llx\n", |
1240 | map, (unsigned long long)addr, object, (unsigned long long)offset); | |
1c79356b | 1241 | } |
1c79356b | 1242 | m->busy = TRUE; |
765c9de3 A |
1243 | |
1244 | if (m->no_isync == TRUE) { | |
91447636 | 1245 | pmap_sync_page_data_phys(m->phys_page); |
765c9de3 A |
1246 | m->no_isync = FALSE; |
1247 | } | |
9bccf70c A |
1248 | |
1249 | cache_attr = ((unsigned int)object->wimg_bits) & VM_WIMG_MASK; | |
1c79356b A |
1250 | vm_object_unlock(object); |
1251 | ||
9bccf70c A |
1252 | PMAP_ENTER(map->pmap, addr, m, |
1253 | protection, cache_attr, FALSE); | |
1c79356b A |
1254 | |
1255 | vm_object_lock(object); | |
0b4e3aa0 | 1256 | |
1c79356b A |
1257 | PAGE_WAKEUP_DONE(m); |
1258 | vm_page_lock_queues(); | |
1259 | if (!m->active && !m->inactive) | |
1260 | vm_page_activate(m); | |
1261 | vm_page_unlock_queues(); | |
1262 | vm_object_paging_end(object); | |
1263 | vm_object_unlock(object); | |
1264 | ||
1265 | offset += PAGE_SIZE_64; | |
1266 | addr += PAGE_SIZE; | |
1267 | } | |
1268 | } | |
1269 | ||
91447636 A |
1270 | boolean_t vm_map_pmap_is_empty( |
1271 | vm_map_t map, | |
1272 | vm_map_offset_t start, | |
1273 | vm_map_offset_t end); | |
1274 | boolean_t vm_map_pmap_is_empty( | |
1275 | vm_map_t map, | |
1276 | vm_map_offset_t start, | |
1277 | vm_map_offset_t end) | |
1278 | { | |
1279 | vm_map_offset_t offset; | |
1280 | ppnum_t phys_page; | |
1281 | ||
1282 | if (map->pmap == NULL) { | |
1283 | return TRUE; | |
1284 | } | |
1285 | for (offset = start; | |
1286 | offset < end; | |
1287 | offset += PAGE_SIZE) { | |
1288 | phys_page = pmap_find_phys(map->pmap, offset); | |
1289 | if (phys_page) { | |
1290 | kprintf("vm_map_pmap_is_empty(%p,0x%llx,0x%llx): " | |
1291 | "page %d at 0x%llx\n", | |
1292 | map, start, end, phys_page, offset); | |
1293 | return FALSE; | |
1294 | } | |
1295 | } | |
1296 | return TRUE; | |
1297 | } | |
1298 | ||
1c79356b A |
1299 | /* |
1300 | * Routine: vm_map_enter | |
1301 | * | |
1302 | * Description: | |
1303 | * Allocate a range in the specified virtual address map. | |
1304 | * The resulting range will refer to memory defined by | |
1305 | * the given memory object and offset into that object. | |
1306 | * | |
1307 | * Arguments are as defined in the vm_map call. | |
1308 | */ | |
91447636 A |
1309 | int _map_enter_debug = 0; |
1310 | static unsigned int vm_map_enter_restore_successes = 0; | |
1311 | static unsigned int vm_map_enter_restore_failures = 0; | |
1c79356b A |
1312 | kern_return_t |
1313 | vm_map_enter( | |
91447636 A |
1314 | vm_map_t map, |
1315 | vm_map_offset_t *address, /* IN/OUT */ | |
1316 | vm_map_size_t size, | |
1317 | vm_map_offset_t mask, | |
1c79356b A |
1318 | int flags, |
1319 | vm_object_t object, | |
1320 | vm_object_offset_t offset, | |
1321 | boolean_t needs_copy, | |
1322 | vm_prot_t cur_protection, | |
1323 | vm_prot_t max_protection, | |
1324 | vm_inherit_t inheritance) | |
1325 | { | |
91447636 A |
1326 | vm_map_entry_t entry, new_entry; |
1327 | vm_map_offset_t start, tmp_start; | |
1328 | vm_map_offset_t end, tmp_end; | |
1c79356b | 1329 | kern_return_t result = KERN_SUCCESS; |
91447636 A |
1330 | vm_map_t zap_old_map = VM_MAP_NULL; |
1331 | vm_map_t zap_new_map = VM_MAP_NULL; | |
1332 | boolean_t map_locked = FALSE; | |
1333 | boolean_t pmap_empty = TRUE; | |
1334 | boolean_t new_mapping_established = FALSE; | |
1335 | boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); | |
1336 | boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); | |
1337 | boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); | |
1c79356b A |
1338 | char alias; |
1339 | ||
91447636 A |
1340 | if (size == 0) { |
1341 | *address = 0; | |
1342 | return KERN_INVALID_ARGUMENT; | |
1343 | } | |
1344 | ||
1c79356b A |
1345 | VM_GET_FLAGS_ALIAS(flags, alias); |
1346 | ||
1347 | #define RETURN(value) { result = value; goto BailOut; } | |
1348 | ||
1349 | assert(page_aligned(*address)); | |
1350 | assert(page_aligned(size)); | |
91447636 A |
1351 | |
1352 | /* | |
1353 | * Only zero-fill objects are allowed to be purgable. | |
1354 | * LP64todo - limit purgable objects to 32-bits for now | |
1355 | */ | |
1356 | if (purgable && | |
1357 | (offset != 0 || | |
1358 | (object != VM_OBJECT_NULL && | |
1359 | (object->size != size || | |
1360 | object->purgable == VM_OBJECT_NONPURGABLE)) | |
1361 | || size > VM_MAX_ADDRESS)) /* LP64todo: remove when dp capable */ | |
1362 | return KERN_INVALID_ARGUMENT; | |
1363 | ||
1364 | if (!anywhere && overwrite) { | |
1365 | /* | |
1366 | * Create a temporary VM map to hold the old mappings in the | |
1367 | * affected area while we create the new one. | |
1368 | * This avoids releasing the VM map lock in | |
1369 | * vm_map_entry_delete() and allows atomicity | |
1370 | * when we want to replace some mappings with a new one. | |
1371 | * It also allows us to restore the old VM mappings if the | |
1372 | * new mapping fails. | |
1373 | */ | |
1374 | zap_old_map = vm_map_create(PMAP_NULL, | |
1375 | *address, | |
1376 | *address + size, | |
1377 | TRUE); | |
1378 | } | |
1379 | ||
1c79356b A |
1380 | StartAgain: ; |
1381 | ||
1382 | start = *address; | |
1383 | ||
1384 | if (anywhere) { | |
1385 | vm_map_lock(map); | |
91447636 | 1386 | map_locked = TRUE; |
1c79356b A |
1387 | |
1388 | /* | |
1389 | * Calculate the first possible address. | |
1390 | */ | |
1391 | ||
1392 | if (start < map->min_offset) | |
1393 | start = map->min_offset; | |
1394 | if (start > map->max_offset) | |
1395 | RETURN(KERN_NO_SPACE); | |
1396 | ||
1397 | /* | |
1398 | * Look for the first possible address; | |
1399 | * if there's already something at this | |
1400 | * address, we have to start after it. | |
1401 | */ | |
1402 | ||
1403 | assert(first_free_is_valid(map)); | |
1404 | if (start == map->min_offset) { | |
1405 | if ((entry = map->first_free) != vm_map_to_entry(map)) | |
1406 | start = entry->vme_end; | |
1407 | } else { | |
1408 | vm_map_entry_t tmp_entry; | |
1409 | if (vm_map_lookup_entry(map, start, &tmp_entry)) | |
1410 | start = tmp_entry->vme_end; | |
1411 | entry = tmp_entry; | |
1412 | } | |
1413 | ||
1414 | /* | |
1415 | * In any case, the "entry" always precedes | |
1416 | * the proposed new region throughout the | |
1417 | * loop: | |
1418 | */ | |
1419 | ||
1420 | while (TRUE) { | |
1421 | register vm_map_entry_t next; | |
1422 | ||
1423 | /* | |
1424 | * Find the end of the proposed new region. | |
1425 | * Be sure we didn't go beyond the end, or | |
1426 | * wrap around the address. | |
1427 | */ | |
1428 | ||
1429 | end = ((start + mask) & ~mask); | |
1430 | if (end < start) | |
1431 | RETURN(KERN_NO_SPACE); | |
1432 | start = end; | |
1433 | end += size; | |
1434 | ||
1435 | if ((end > map->max_offset) || (end < start)) { | |
1436 | if (map->wait_for_space) { | |
1437 | if (size <= (map->max_offset - | |
1438 | map->min_offset)) { | |
1439 | assert_wait((event_t)map, | |
1440 | THREAD_ABORTSAFE); | |
1441 | vm_map_unlock(map); | |
91447636 A |
1442 | map_locked = FALSE; |
1443 | thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
1444 | goto StartAgain; |
1445 | } | |
1446 | } | |
1447 | RETURN(KERN_NO_SPACE); | |
1448 | } | |
1449 | ||
1450 | /* | |
1451 | * If there are no more entries, we must win. | |
1452 | */ | |
1453 | ||
1454 | next = entry->vme_next; | |
1455 | if (next == vm_map_to_entry(map)) | |
1456 | break; | |
1457 | ||
1458 | /* | |
1459 | * If there is another entry, it must be | |
1460 | * after the end of the potential new region. | |
1461 | */ | |
1462 | ||
1463 | if (next->vme_start >= end) | |
1464 | break; | |
1465 | ||
1466 | /* | |
1467 | * Didn't fit -- move to the next entry. | |
1468 | */ | |
1469 | ||
1470 | entry = next; | |
1471 | start = entry->vme_end; | |
1472 | } | |
1473 | *address = start; | |
1474 | } else { | |
1475 | vm_map_entry_t temp_entry; | |
1476 | ||
1477 | /* | |
1478 | * Verify that: | |
1479 | * the address doesn't itself violate | |
1480 | * the mask requirement. | |
1481 | */ | |
1482 | ||
1483 | vm_map_lock(map); | |
91447636 | 1484 | map_locked = TRUE; |
1c79356b A |
1485 | if ((start & mask) != 0) |
1486 | RETURN(KERN_NO_SPACE); | |
1487 | ||
1488 | /* | |
1489 | * ... the address is within bounds | |
1490 | */ | |
1491 | ||
1492 | end = start + size; | |
1493 | ||
1494 | if ((start < map->min_offset) || | |
1495 | (end > map->max_offset) || | |
1496 | (start >= end)) { | |
1497 | RETURN(KERN_INVALID_ADDRESS); | |
1498 | } | |
1499 | ||
91447636 A |
1500 | if (overwrite && zap_old_map != VM_MAP_NULL) { |
1501 | /* | |
1502 | * Fixed mapping and "overwrite" flag: attempt to | |
1503 | * remove all existing mappings in the specified | |
1504 | * address range, saving them in our "zap_old_map". | |
1505 | */ | |
1506 | (void) vm_map_delete(map, start, end, | |
1507 | VM_MAP_REMOVE_SAVE_ENTRIES, | |
1508 | zap_old_map); | |
1509 | } | |
1510 | ||
1c79356b A |
1511 | /* |
1512 | * ... the starting address isn't allocated | |
1513 | */ | |
1514 | ||
1515 | if (vm_map_lookup_entry(map, start, &temp_entry)) | |
1516 | RETURN(KERN_NO_SPACE); | |
1517 | ||
1518 | entry = temp_entry; | |
1519 | ||
1520 | /* | |
1521 | * ... the next region doesn't overlap the | |
1522 | * end point. | |
1523 | */ | |
1524 | ||
1525 | if ((entry->vme_next != vm_map_to_entry(map)) && | |
1526 | (entry->vme_next->vme_start < end)) | |
1527 | RETURN(KERN_NO_SPACE); | |
1528 | } | |
1529 | ||
1530 | /* | |
1531 | * At this point, | |
1532 | * "start" and "end" should define the endpoints of the | |
1533 | * available new range, and | |
1534 | * "entry" should refer to the region before the new | |
1535 | * range, and | |
1536 | * | |
1537 | * the map should be locked. | |
1538 | */ | |
1539 | ||
1540 | /* | |
1541 | * See whether we can avoid creating a new entry (and object) by | |
1542 | * extending one of our neighbors. [So far, we only attempt to | |
91447636 A |
1543 | * extend from below.] Note that we can never extend/join |
1544 | * purgable objects because they need to remain distinct | |
1545 | * entities in order to implement their "volatile object" | |
1546 | * semantics. | |
1c79356b A |
1547 | */ |
1548 | ||
91447636 A |
1549 | if (purgable) { |
1550 | if (object == VM_OBJECT_NULL) { | |
1551 | object = vm_object_allocate(size); | |
1552 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
1553 | object->purgable = VM_OBJECT_PURGABLE_NONVOLATILE; | |
1554 | offset = (vm_object_offset_t)0; | |
1555 | } | |
1556 | } else if ((object == VM_OBJECT_NULL) && | |
1c79356b A |
1557 | (entry != vm_map_to_entry(map)) && |
1558 | (entry->vme_end == start) && | |
1559 | (!entry->is_shared) && | |
1560 | (!entry->is_sub_map) && | |
1561 | (entry->alias == alias) && | |
1562 | (entry->inheritance == inheritance) && | |
1563 | (entry->protection == cur_protection) && | |
1564 | (entry->max_protection == max_protection) && | |
1565 | (entry->behavior == VM_BEHAVIOR_DEFAULT) && | |
1566 | (entry->in_transition == 0) && | |
55e303ae | 1567 | ((alias == VM_MEMORY_REALLOC) || ((entry->vme_end - entry->vme_start) + size < NO_COALESCE_LIMIT)) && |
1c79356b A |
1568 | (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ |
1569 | if (vm_object_coalesce(entry->object.vm_object, | |
1570 | VM_OBJECT_NULL, | |
1571 | entry->offset, | |
1572 | (vm_object_offset_t) 0, | |
91447636 A |
1573 | (vm_map_size_t)(entry->vme_end - entry->vme_start), |
1574 | (vm_map_size_t)(end - entry->vme_end))) { | |
1c79356b A |
1575 | |
1576 | /* | |
1577 | * Coalesced the two objects - can extend | |
1578 | * the previous map entry to include the | |
1579 | * new range. | |
1580 | */ | |
1581 | map->size += (end - entry->vme_end); | |
1582 | entry->vme_end = end; | |
1583 | UPDATE_FIRST_FREE(map, map->first_free); | |
1584 | RETURN(KERN_SUCCESS); | |
1585 | } | |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * Create a new entry | |
91447636 A |
1590 | * LP64todo - for now, we can only allocate 4GB internal objects |
1591 | * because the default pager can't page bigger ones. Remove this | |
1592 | * when it can. | |
1c79356b | 1593 | */ |
91447636 A |
1594 | tmp_start = start; |
1595 | if (object == VM_OBJECT_NULL && size > (vm_map_size_t)VM_MAX_ADDRESS) | |
1596 | tmp_end = tmp_start + (vm_map_size_t)VM_MAX_ADDRESS; | |
1597 | else | |
1598 | tmp_end = end; | |
1599 | do { | |
1600 | new_entry = vm_map_entry_insert(map, entry, tmp_start, tmp_end, | |
1601 | object, offset, needs_copy, FALSE, FALSE, | |
1c79356b A |
1602 | cur_protection, max_protection, |
1603 | VM_BEHAVIOR_DEFAULT, inheritance, 0); | |
91447636 A |
1604 | new_entry->alias = alias; |
1605 | entry = new_entry; | |
1606 | } while (object == VM_OBJECT_NULL && | |
1607 | tmp_end != end && | |
1608 | (tmp_start = tmp_end) && | |
1609 | (tmp_end = (end - tmp_end > (vm_map_size_t)VM_MAX_ADDRESS) ? | |
1610 | tmp_end + (vm_map_size_t)VM_MAX_ADDRESS : end)); | |
1611 | ||
1c79356b | 1612 | vm_map_unlock(map); |
91447636 A |
1613 | map_locked = FALSE; |
1614 | ||
1615 | new_mapping_established = TRUE; | |
1c79356b A |
1616 | |
1617 | /* Wire down the new entry if the user | |
1618 | * requested all new map entries be wired. | |
1619 | */ | |
1620 | if (map->wiring_required) { | |
91447636 | 1621 | pmap_empty = FALSE; /* pmap won't be empty */ |
1c79356b | 1622 | result = vm_map_wire(map, start, end, |
91447636 A |
1623 | new_entry->protection, TRUE); |
1624 | RETURN(result); | |
1c79356b A |
1625 | } |
1626 | ||
1627 | if ((object != VM_OBJECT_NULL) && | |
1628 | (vm_map_pmap_enter_enable) && | |
1629 | (!anywhere) && | |
1630 | (!needs_copy) && | |
1631 | (size < (128*1024))) { | |
91447636 | 1632 | pmap_empty = FALSE; /* pmap won't be empty */ |
1c79356b A |
1633 | vm_map_pmap_enter(map, start, end, |
1634 | object, offset, cur_protection); | |
1635 | } | |
1636 | ||
1c79356b | 1637 | BailOut: ; |
91447636 A |
1638 | if (result == KERN_SUCCESS && |
1639 | pmap_empty && | |
1640 | !(flags & VM_FLAGS_NO_PMAP_CHECK)) { | |
1641 | assert(vm_map_pmap_is_empty(map, *address, *address+size)); | |
1642 | } | |
1643 | ||
1644 | if (result != KERN_SUCCESS) { | |
1645 | if (new_mapping_established) { | |
1646 | /* | |
1647 | * We have to get rid of the new mappings since we | |
1648 | * won't make them available to the user. | |
1649 | * Try and do that atomically, to minimize the risk | |
1650 | * that someone else create new mappings that range. | |
1651 | */ | |
1652 | zap_new_map = vm_map_create(PMAP_NULL, | |
1653 | *address, | |
1654 | *address + size, | |
1655 | TRUE); | |
1656 | if (!map_locked) { | |
1657 | vm_map_lock(map); | |
1658 | map_locked = TRUE; | |
1659 | } | |
1660 | (void) vm_map_delete(map, *address, *address+size, | |
1661 | VM_MAP_REMOVE_SAVE_ENTRIES, | |
1662 | zap_new_map); | |
1663 | } | |
1664 | if (zap_old_map != VM_MAP_NULL && | |
1665 | zap_old_map->hdr.nentries != 0) { | |
1666 | vm_map_entry_t entry1, entry2; | |
1667 | ||
1668 | /* | |
1669 | * The new mapping failed. Attempt to restore | |
1670 | * the old mappings, saved in the "zap_old_map". | |
1671 | */ | |
1672 | if (!map_locked) { | |
1673 | vm_map_lock(map); | |
1674 | map_locked = TRUE; | |
1675 | } | |
1676 | ||
1677 | /* first check if the coast is still clear */ | |
1678 | start = vm_map_first_entry(zap_old_map)->vme_start; | |
1679 | end = vm_map_last_entry(zap_old_map)->vme_end; | |
1680 | if (vm_map_lookup_entry(map, start, &entry1) || | |
1681 | vm_map_lookup_entry(map, end, &entry2) || | |
1682 | entry1 != entry2) { | |
1683 | /* | |
1684 | * Part of that range has already been | |
1685 | * re-mapped: we can't restore the old | |
1686 | * mappings... | |
1687 | */ | |
1688 | vm_map_enter_restore_failures++; | |
1689 | } else { | |
1690 | /* | |
1691 | * Transfer the saved map entries from | |
1692 | * "zap_old_map" to the original "map", | |
1693 | * inserting them all after "entry1". | |
1694 | */ | |
1695 | for (entry2 = vm_map_first_entry(zap_old_map); | |
1696 | entry2 != vm_map_to_entry(zap_old_map); | |
1697 | entry2 = vm_map_first_entry(zap_old_map)) { | |
1698 | vm_map_entry_unlink(zap_old_map, | |
1699 | entry2); | |
1700 | vm_map_entry_link(map, entry1, entry2); | |
1701 | entry1 = entry2; | |
1702 | } | |
1703 | if (map->wiring_required) { | |
1704 | /* | |
1705 | * XXX TODO: we should rewire the | |
1706 | * old pages here... | |
1707 | */ | |
1708 | } | |
1709 | vm_map_enter_restore_successes++; | |
1710 | } | |
1711 | } | |
1712 | } | |
1713 | ||
1714 | if (map_locked) { | |
1715 | vm_map_unlock(map); | |
1716 | } | |
1717 | ||
1718 | /* | |
1719 | * Get rid of the "zap_maps" and all the map entries that | |
1720 | * they may still contain. | |
1721 | */ | |
1722 | if (zap_old_map != VM_MAP_NULL) { | |
1723 | vm_map_destroy(zap_old_map); | |
1724 | zap_old_map = VM_MAP_NULL; | |
1725 | } | |
1726 | if (zap_new_map != VM_MAP_NULL) { | |
1727 | vm_map_destroy(zap_new_map); | |
1728 | zap_new_map = VM_MAP_NULL; | |
1729 | } | |
1730 | ||
1731 | return result; | |
1c79356b A |
1732 | |
1733 | #undef RETURN | |
1734 | } | |
1735 | ||
91447636 A |
1736 | |
1737 | #if VM_CPM | |
1738 | ||
1739 | #ifdef MACH_ASSERT | |
1740 | extern vm_offset_t avail_start, avail_end; | |
1741 | #endif | |
1742 | ||
1743 | /* | |
1744 | * Allocate memory in the specified map, with the caveat that | |
1745 | * the memory is physically contiguous. This call may fail | |
1746 | * if the system can't find sufficient contiguous memory. | |
1747 | * This call may cause or lead to heart-stopping amounts of | |
1748 | * paging activity. | |
1749 | * | |
1750 | * Memory obtained from this call should be freed in the | |
1751 | * normal way, viz., via vm_deallocate. | |
1752 | */ | |
1753 | kern_return_t | |
1754 | vm_map_enter_cpm( | |
1755 | vm_map_t map, | |
1756 | vm_map_offset_t *addr, | |
1757 | vm_map_size_t size, | |
1758 | int flags) | |
1759 | { | |
1760 | vm_object_t cpm_obj; | |
1761 | pmap_t pmap; | |
1762 | vm_page_t m, pages; | |
1763 | kern_return_t kr; | |
1764 | vm_map_offset_t va, start, end, offset; | |
1765 | #if MACH_ASSERT | |
1766 | vm_map_offset_t prev_addr; | |
1767 | #endif /* MACH_ASSERT */ | |
1768 | ||
1769 | boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); | |
1770 | ||
1771 | if (!vm_allocate_cpm_enabled) | |
1772 | return KERN_FAILURE; | |
1773 | ||
1774 | if (size == 0) { | |
1775 | *addr = 0; | |
1776 | return KERN_SUCCESS; | |
1777 | } | |
1778 | ||
1779 | if (anywhere) | |
1780 | *addr = vm_map_min(map); | |
1781 | else | |
1782 | *addr = vm_map_trunc_page(*addr); | |
1783 | size = vm_map_round_page(size); | |
1784 | ||
1785 | /* | |
1786 | * LP64todo - cpm_allocate should probably allow | |
1787 | * allocations of >4GB, but not with the current | |
1788 | * algorithm, so just cast down the size for now. | |
1789 | */ | |
1790 | if (size > VM_MAX_ADDRESS) | |
1791 | return KERN_RESOURCE_SHORTAGE; | |
1792 | if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size), | |
1793 | &pages, TRUE)) != KERN_SUCCESS) | |
1794 | return kr; | |
1795 | ||
1796 | cpm_obj = vm_object_allocate((vm_object_size_t)size); | |
1797 | assert(cpm_obj != VM_OBJECT_NULL); | |
1798 | assert(cpm_obj->internal); | |
1799 | assert(cpm_obj->size == (vm_object_size_t)size); | |
1800 | assert(cpm_obj->can_persist == FALSE); | |
1801 | assert(cpm_obj->pager_created == FALSE); | |
1802 | assert(cpm_obj->pageout == FALSE); | |
1803 | assert(cpm_obj->shadow == VM_OBJECT_NULL); | |
1804 | ||
1805 | /* | |
1806 | * Insert pages into object. | |
1807 | */ | |
1808 | ||
1809 | vm_object_lock(cpm_obj); | |
1810 | for (offset = 0; offset < size; offset += PAGE_SIZE) { | |
1811 | m = pages; | |
1812 | pages = NEXT_PAGE(m); | |
1813 | ||
1814 | assert(!m->gobbled); | |
1815 | assert(!m->wanted); | |
1816 | assert(!m->pageout); | |
1817 | assert(!m->tabled); | |
1818 | /* | |
1819 | * ENCRYPTED SWAP: | |
1820 | * "m" is not supposed to be pageable, so it | |
1821 | * should not be encrypted. It wouldn't be safe | |
1822 | * to enter it in a new VM object while encrypted. | |
1823 | */ | |
1824 | ASSERT_PAGE_DECRYPTED(m); | |
1825 | assert(m->busy); | |
1826 | assert(m->phys_page>=avail_start && m->phys_page<=avail_end); | |
1827 | ||
1828 | m->busy = FALSE; | |
1829 | vm_page_insert(m, cpm_obj, offset); | |
1830 | } | |
1831 | assert(cpm_obj->resident_page_count == size / PAGE_SIZE); | |
1832 | vm_object_unlock(cpm_obj); | |
1833 | ||
1834 | /* | |
1835 | * Hang onto a reference on the object in case a | |
1836 | * multi-threaded application for some reason decides | |
1837 | * to deallocate the portion of the address space into | |
1838 | * which we will insert this object. | |
1839 | * | |
1840 | * Unfortunately, we must insert the object now before | |
1841 | * we can talk to the pmap module about which addresses | |
1842 | * must be wired down. Hence, the race with a multi- | |
1843 | * threaded app. | |
1844 | */ | |
1845 | vm_object_reference(cpm_obj); | |
1846 | ||
1847 | /* | |
1848 | * Insert object into map. | |
1849 | */ | |
1850 | ||
1851 | kr = vm_map_enter( | |
1852 | map, | |
1853 | addr, | |
1854 | size, | |
1855 | (vm_map_offset_t)0, | |
1856 | flags, | |
1857 | cpm_obj, | |
1858 | (vm_object_offset_t)0, | |
1859 | FALSE, | |
1860 | VM_PROT_ALL, | |
1861 | VM_PROT_ALL, | |
1862 | VM_INHERIT_DEFAULT); | |
1863 | ||
1864 | if (kr != KERN_SUCCESS) { | |
1865 | /* | |
1866 | * A CPM object doesn't have can_persist set, | |
1867 | * so all we have to do is deallocate it to | |
1868 | * free up these pages. | |
1869 | */ | |
1870 | assert(cpm_obj->pager_created == FALSE); | |
1871 | assert(cpm_obj->can_persist == FALSE); | |
1872 | assert(cpm_obj->pageout == FALSE); | |
1873 | assert(cpm_obj->shadow == VM_OBJECT_NULL); | |
1874 | vm_object_deallocate(cpm_obj); /* kill acquired ref */ | |
1875 | vm_object_deallocate(cpm_obj); /* kill creation ref */ | |
1876 | } | |
1877 | ||
1878 | /* | |
1879 | * Inform the physical mapping system that the | |
1880 | * range of addresses may not fault, so that | |
1881 | * page tables and such can be locked down as well. | |
1882 | */ | |
1883 | start = *addr; | |
1884 | end = start + size; | |
1885 | pmap = vm_map_pmap(map); | |
1886 | pmap_pageable(pmap, start, end, FALSE); | |
1887 | ||
1888 | /* | |
1889 | * Enter each page into the pmap, to avoid faults. | |
1890 | * Note that this loop could be coded more efficiently, | |
1891 | * if the need arose, rather than looking up each page | |
1892 | * again. | |
1893 | */ | |
1894 | for (offset = 0, va = start; offset < size; | |
1895 | va += PAGE_SIZE, offset += PAGE_SIZE) { | |
1896 | vm_object_lock(cpm_obj); | |
1897 | m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); | |
1898 | vm_object_unlock(cpm_obj); | |
1899 | assert(m != VM_PAGE_NULL); | |
1900 | PMAP_ENTER(pmap, va, m, VM_PROT_ALL, | |
1901 | ((unsigned int)(m->object->wimg_bits)) & VM_WIMG_MASK, | |
1902 | TRUE); | |
1903 | } | |
1904 | ||
1905 | #if MACH_ASSERT | |
1906 | /* | |
1907 | * Verify ordering in address space. | |
1908 | */ | |
1909 | for (offset = 0; offset < size; offset += PAGE_SIZE) { | |
1910 | vm_object_lock(cpm_obj); | |
1911 | m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); | |
1912 | vm_object_unlock(cpm_obj); | |
1913 | if (m == VM_PAGE_NULL) | |
1914 | panic("vm_allocate_cpm: obj 0x%x off 0x%x no page", | |
1915 | cpm_obj, offset); | |
1916 | assert(m->tabled); | |
1917 | assert(!m->busy); | |
1918 | assert(!m->wanted); | |
1919 | assert(!m->fictitious); | |
1920 | assert(!m->private); | |
1921 | assert(!m->absent); | |
1922 | assert(!m->error); | |
1923 | assert(!m->cleaning); | |
1924 | assert(!m->precious); | |
1925 | assert(!m->clustered); | |
1926 | if (offset != 0) { | |
1927 | if (m->phys_page != prev_addr + 1) { | |
1928 | printf("start 0x%x end 0x%x va 0x%x\n", | |
1929 | start, end, va); | |
1930 | printf("obj 0x%x off 0x%x\n", cpm_obj, offset); | |
1931 | printf("m 0x%x prev_address 0x%x\n", m, | |
1932 | prev_addr); | |
1933 | panic("vm_allocate_cpm: pages not contig!"); | |
1934 | } | |
1935 | } | |
1936 | prev_addr = m->phys_page; | |
1937 | } | |
1938 | #endif /* MACH_ASSERT */ | |
1939 | ||
1940 | vm_object_deallocate(cpm_obj); /* kill extra ref */ | |
1941 | ||
1942 | return kr; | |
1943 | } | |
1944 | ||
1945 | ||
1946 | #else /* VM_CPM */ | |
1947 | ||
1948 | /* | |
1949 | * Interface is defined in all cases, but unless the kernel | |
1950 | * is built explicitly for this option, the interface does | |
1951 | * nothing. | |
1952 | */ | |
1953 | ||
1954 | kern_return_t | |
1955 | vm_map_enter_cpm( | |
1956 | __unused vm_map_t map, | |
1957 | __unused vm_map_offset_t *addr, | |
1958 | __unused vm_map_size_t size, | |
1959 | __unused int flags) | |
1960 | { | |
1961 | return KERN_FAILURE; | |
1962 | } | |
1963 | #endif /* VM_CPM */ | |
1964 | ||
1c79356b A |
1965 | /* |
1966 | * vm_map_clip_start: [ internal use only ] | |
1967 | * | |
1968 | * Asserts that the given entry begins at or after | |
1969 | * the specified address; if necessary, | |
1970 | * it splits the entry into two. | |
1971 | */ | |
1972 | #ifndef i386 | |
1973 | #define vm_map_clip_start(map, entry, startaddr) \ | |
1974 | MACRO_BEGIN \ | |
1975 | vm_map_t VMCS_map; \ | |
1976 | vm_map_entry_t VMCS_entry; \ | |
91447636 | 1977 | vm_map_offset_t VMCS_startaddr; \ |
1c79356b A |
1978 | VMCS_map = (map); \ |
1979 | VMCS_entry = (entry); \ | |
1980 | VMCS_startaddr = (startaddr); \ | |
1981 | if (VMCS_startaddr > VMCS_entry->vme_start) { \ | |
1982 | if(entry->use_pmap) { \ | |
91447636 | 1983 | vm_map_offset_t pmap_base_addr; \ |
1c79356b A |
1984 | \ |
1985 | pmap_base_addr = 0xF0000000 & entry->vme_start; \ | |
55e303ae | 1986 | pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \ |
1c79356b | 1987 | entry->use_pmap = FALSE; \ |
9bccf70c A |
1988 | } else if(entry->object.vm_object \ |
1989 | && !entry->is_sub_map \ | |
1990 | && entry->object.vm_object->phys_contiguous) { \ | |
1991 | pmap_remove(map->pmap, \ | |
55e303ae A |
1992 | (addr64_t)(entry->vme_start), \ |
1993 | (addr64_t)(entry->vme_end)); \ | |
1c79356b A |
1994 | } \ |
1995 | _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\ | |
1996 | } \ | |
1997 | UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \ | |
1998 | MACRO_END | |
1999 | #else | |
2000 | #define vm_map_clip_start(map, entry, startaddr) \ | |
2001 | MACRO_BEGIN \ | |
2002 | vm_map_t VMCS_map; \ | |
2003 | vm_map_entry_t VMCS_entry; \ | |
91447636 | 2004 | vm_map_offset_t VMCS_startaddr; \ |
1c79356b A |
2005 | VMCS_map = (map); \ |
2006 | VMCS_entry = (entry); \ | |
2007 | VMCS_startaddr = (startaddr); \ | |
2008 | if (VMCS_startaddr > VMCS_entry->vme_start) { \ | |
2009 | _vm_map_clip_start(&VMCS_map->hdr,VMCS_entry,VMCS_startaddr);\ | |
2010 | } \ | |
2011 | UPDATE_FIRST_FREE(VMCS_map, VMCS_map->first_free); \ | |
2012 | MACRO_END | |
2013 | #endif | |
2014 | ||
2015 | #define vm_map_copy_clip_start(copy, entry, startaddr) \ | |
2016 | MACRO_BEGIN \ | |
2017 | if ((startaddr) > (entry)->vme_start) \ | |
2018 | _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ | |
2019 | MACRO_END | |
2020 | ||
2021 | /* | |
2022 | * This routine is called only when it is known that | |
2023 | * the entry must be split. | |
2024 | */ | |
91447636 | 2025 | static void |
1c79356b A |
2026 | _vm_map_clip_start( |
2027 | register struct vm_map_header *map_header, | |
2028 | register vm_map_entry_t entry, | |
91447636 | 2029 | register vm_map_offset_t start) |
1c79356b A |
2030 | { |
2031 | register vm_map_entry_t new_entry; | |
2032 | ||
2033 | /* | |
2034 | * Split off the front portion -- | |
2035 | * note that we must insert the new | |
2036 | * entry BEFORE this one, so that | |
2037 | * this entry has the specified starting | |
2038 | * address. | |
2039 | */ | |
2040 | ||
2041 | new_entry = _vm_map_entry_create(map_header); | |
2042 | vm_map_entry_copy_full(new_entry, entry); | |
2043 | ||
2044 | new_entry->vme_end = start; | |
2045 | entry->offset += (start - entry->vme_start); | |
2046 | entry->vme_start = start; | |
2047 | ||
2048 | _vm_map_entry_link(map_header, entry->vme_prev, new_entry); | |
2049 | ||
2050 | if (entry->is_sub_map) | |
2051 | vm_map_reference(new_entry->object.sub_map); | |
2052 | else | |
2053 | vm_object_reference(new_entry->object.vm_object); | |
2054 | } | |
2055 | ||
2056 | ||
2057 | /* | |
2058 | * vm_map_clip_end: [ internal use only ] | |
2059 | * | |
2060 | * Asserts that the given entry ends at or before | |
2061 | * the specified address; if necessary, | |
2062 | * it splits the entry into two. | |
2063 | */ | |
2064 | #ifndef i386 | |
2065 | #define vm_map_clip_end(map, entry, endaddr) \ | |
2066 | MACRO_BEGIN \ | |
2067 | vm_map_t VMCE_map; \ | |
2068 | vm_map_entry_t VMCE_entry; \ | |
91447636 | 2069 | vm_map_offset_t VMCE_endaddr; \ |
1c79356b A |
2070 | VMCE_map = (map); \ |
2071 | VMCE_entry = (entry); \ | |
2072 | VMCE_endaddr = (endaddr); \ | |
2073 | if (VMCE_endaddr < VMCE_entry->vme_end) { \ | |
2074 | if(entry->use_pmap) { \ | |
91447636 | 2075 | vm_map_offset_t pmap_base_addr; \ |
1c79356b A |
2076 | \ |
2077 | pmap_base_addr = 0xF0000000 & entry->vme_start; \ | |
55e303ae | 2078 | pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); \ |
1c79356b | 2079 | entry->use_pmap = FALSE; \ |
9bccf70c A |
2080 | } else if(entry->object.vm_object \ |
2081 | && !entry->is_sub_map \ | |
2082 | && entry->object.vm_object->phys_contiguous) { \ | |
2083 | pmap_remove(map->pmap, \ | |
55e303ae A |
2084 | (addr64_t)(entry->vme_start), \ |
2085 | (addr64_t)(entry->vme_end)); \ | |
1c79356b A |
2086 | } \ |
2087 | _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \ | |
2088 | } \ | |
2089 | UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \ | |
2090 | MACRO_END | |
2091 | #else | |
2092 | #define vm_map_clip_end(map, entry, endaddr) \ | |
2093 | MACRO_BEGIN \ | |
2094 | vm_map_t VMCE_map; \ | |
2095 | vm_map_entry_t VMCE_entry; \ | |
91447636 | 2096 | vm_map_offset_t VMCE_endaddr; \ |
1c79356b A |
2097 | VMCE_map = (map); \ |
2098 | VMCE_entry = (entry); \ | |
2099 | VMCE_endaddr = (endaddr); \ | |
2100 | if (VMCE_endaddr < VMCE_entry->vme_end) { \ | |
2101 | _vm_map_clip_end(&VMCE_map->hdr,VMCE_entry,VMCE_endaddr); \ | |
2102 | } \ | |
2103 | UPDATE_FIRST_FREE(VMCE_map, VMCE_map->first_free); \ | |
2104 | MACRO_END | |
2105 | #endif | |
2106 | ||
2107 | #define vm_map_copy_clip_end(copy, entry, endaddr) \ | |
2108 | MACRO_BEGIN \ | |
2109 | if ((endaddr) < (entry)->vme_end) \ | |
2110 | _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ | |
2111 | MACRO_END | |
2112 | ||
2113 | /* | |
2114 | * This routine is called only when it is known that | |
2115 | * the entry must be split. | |
2116 | */ | |
91447636 | 2117 | static void |
1c79356b A |
2118 | _vm_map_clip_end( |
2119 | register struct vm_map_header *map_header, | |
2120 | register vm_map_entry_t entry, | |
91447636 | 2121 | register vm_map_offset_t end) |
1c79356b A |
2122 | { |
2123 | register vm_map_entry_t new_entry; | |
2124 | ||
2125 | /* | |
2126 | * Create a new entry and insert it | |
2127 | * AFTER the specified entry | |
2128 | */ | |
2129 | ||
2130 | new_entry = _vm_map_entry_create(map_header); | |
2131 | vm_map_entry_copy_full(new_entry, entry); | |
2132 | ||
2133 | new_entry->vme_start = entry->vme_end = end; | |
2134 | new_entry->offset += (end - entry->vme_start); | |
2135 | ||
2136 | _vm_map_entry_link(map_header, entry, new_entry); | |
2137 | ||
2138 | if (entry->is_sub_map) | |
2139 | vm_map_reference(new_entry->object.sub_map); | |
2140 | else | |
2141 | vm_object_reference(new_entry->object.vm_object); | |
2142 | } | |
2143 | ||
2144 | ||
2145 | /* | |
2146 | * VM_MAP_RANGE_CHECK: [ internal use only ] | |
2147 | * | |
2148 | * Asserts that the starting and ending region | |
2149 | * addresses fall within the valid range of the map. | |
2150 | */ | |
2151 | #define VM_MAP_RANGE_CHECK(map, start, end) \ | |
2152 | { \ | |
2153 | if (start < vm_map_min(map)) \ | |
2154 | start = vm_map_min(map); \ | |
2155 | if (end > vm_map_max(map)) \ | |
2156 | end = vm_map_max(map); \ | |
2157 | if (start > end) \ | |
2158 | start = end; \ | |
2159 | } | |
2160 | ||
2161 | /* | |
2162 | * vm_map_range_check: [ internal use only ] | |
2163 | * | |
2164 | * Check that the region defined by the specified start and | |
2165 | * end addresses are wholly contained within a single map | |
2166 | * entry or set of adjacent map entries of the spacified map, | |
2167 | * i.e. the specified region contains no unmapped space. | |
2168 | * If any or all of the region is unmapped, FALSE is returned. | |
2169 | * Otherwise, TRUE is returned and if the output argument 'entry' | |
2170 | * is not NULL it points to the map entry containing the start | |
2171 | * of the region. | |
2172 | * | |
2173 | * The map is locked for reading on entry and is left locked. | |
2174 | */ | |
91447636 | 2175 | static boolean_t |
1c79356b A |
2176 | vm_map_range_check( |
2177 | register vm_map_t map, | |
91447636 A |
2178 | register vm_map_offset_t start, |
2179 | register vm_map_offset_t end, | |
1c79356b A |
2180 | vm_map_entry_t *entry) |
2181 | { | |
2182 | vm_map_entry_t cur; | |
91447636 | 2183 | register vm_map_offset_t prev; |
1c79356b A |
2184 | |
2185 | /* | |
2186 | * Basic sanity checks first | |
2187 | */ | |
2188 | if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) | |
2189 | return (FALSE); | |
2190 | ||
2191 | /* | |
2192 | * Check first if the region starts within a valid | |
2193 | * mapping for the map. | |
2194 | */ | |
2195 | if (!vm_map_lookup_entry(map, start, &cur)) | |
2196 | return (FALSE); | |
2197 | ||
2198 | /* | |
2199 | * Optimize for the case that the region is contained | |
2200 | * in a single map entry. | |
2201 | */ | |
2202 | if (entry != (vm_map_entry_t *) NULL) | |
2203 | *entry = cur; | |
2204 | if (end <= cur->vme_end) | |
2205 | return (TRUE); | |
2206 | ||
2207 | /* | |
2208 | * If the region is not wholly contained within a | |
2209 | * single entry, walk the entries looking for holes. | |
2210 | */ | |
2211 | prev = cur->vme_end; | |
2212 | cur = cur->vme_next; | |
2213 | while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) { | |
2214 | if (end <= cur->vme_end) | |
2215 | return (TRUE); | |
2216 | prev = cur->vme_end; | |
2217 | cur = cur->vme_next; | |
2218 | } | |
2219 | return (FALSE); | |
2220 | } | |
2221 | ||
2222 | /* | |
2223 | * vm_map_submap: [ kernel use only ] | |
2224 | * | |
2225 | * Mark the given range as handled by a subordinate map. | |
2226 | * | |
2227 | * This range must have been created with vm_map_find using | |
2228 | * the vm_submap_object, and no other operations may have been | |
2229 | * performed on this range prior to calling vm_map_submap. | |
2230 | * | |
2231 | * Only a limited number of operations can be performed | |
2232 | * within this rage after calling vm_map_submap: | |
2233 | * vm_fault | |
2234 | * [Don't try vm_map_copyin!] | |
2235 | * | |
2236 | * To remove a submapping, one must first remove the | |
2237 | * range from the superior map, and then destroy the | |
2238 | * submap (if desired). [Better yet, don't try it.] | |
2239 | */ | |
2240 | kern_return_t | |
2241 | vm_map_submap( | |
91447636 A |
2242 | vm_map_t map, |
2243 | vm_map_offset_t start, | |
2244 | vm_map_offset_t end, | |
1c79356b | 2245 | vm_map_t submap, |
91447636 A |
2246 | vm_map_offset_t offset, |
2247 | #ifdef i386 | |
2248 | __unused | |
2249 | #endif | |
1c79356b A |
2250 | boolean_t use_pmap) |
2251 | { | |
2252 | vm_map_entry_t entry; | |
2253 | register kern_return_t result = KERN_INVALID_ARGUMENT; | |
2254 | register vm_object_t object; | |
2255 | ||
2256 | vm_map_lock(map); | |
2257 | ||
9bccf70c A |
2258 | submap->mapped = TRUE; |
2259 | ||
1c79356b A |
2260 | VM_MAP_RANGE_CHECK(map, start, end); |
2261 | ||
2262 | if (vm_map_lookup_entry(map, start, &entry)) { | |
2263 | vm_map_clip_start(map, entry, start); | |
2264 | } | |
2265 | else | |
2266 | entry = entry->vme_next; | |
2267 | ||
2268 | if(entry == vm_map_to_entry(map)) { | |
2269 | vm_map_unlock(map); | |
2270 | return KERN_INVALID_ARGUMENT; | |
2271 | } | |
2272 | ||
2273 | vm_map_clip_end(map, entry, end); | |
2274 | ||
2275 | if ((entry->vme_start == start) && (entry->vme_end == end) && | |
2276 | (!entry->is_sub_map) && | |
2277 | ((object = entry->object.vm_object) == vm_submap_object) && | |
2278 | (object->resident_page_count == 0) && | |
2279 | (object->copy == VM_OBJECT_NULL) && | |
2280 | (object->shadow == VM_OBJECT_NULL) && | |
2281 | (!object->pager_created)) { | |
55e303ae A |
2282 | entry->offset = (vm_object_offset_t)offset; |
2283 | entry->object.vm_object = VM_OBJECT_NULL; | |
2284 | vm_object_deallocate(object); | |
2285 | entry->is_sub_map = TRUE; | |
2286 | entry->object.sub_map = submap; | |
2287 | vm_map_reference(submap); | |
1c79356b | 2288 | #ifndef i386 |
55e303ae A |
2289 | if ((use_pmap) && (offset == 0)) { |
2290 | /* nest if platform code will allow */ | |
2291 | if(submap->pmap == NULL) { | |
91447636 | 2292 | submap->pmap = pmap_create((vm_map_size_t) 0); |
55e303ae | 2293 | if(submap->pmap == PMAP_NULL) { |
91447636 | 2294 | vm_map_unlock(map); |
55e303ae A |
2295 | return(KERN_NO_SPACE); |
2296 | } | |
2297 | } | |
2298 | result = pmap_nest(map->pmap, (entry->object.sub_map)->pmap, | |
91447636 A |
2299 | (addr64_t)start, |
2300 | (addr64_t)start, | |
2301 | (uint64_t)(end - start)); | |
55e303ae A |
2302 | if(result) |
2303 | panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); | |
2304 | entry->use_pmap = TRUE; | |
2305 | } | |
1c79356b A |
2306 | #endif |
2307 | #ifdef i386 | |
55e303ae | 2308 | pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end); |
1c79356b | 2309 | #endif |
55e303ae | 2310 | result = KERN_SUCCESS; |
1c79356b A |
2311 | } |
2312 | vm_map_unlock(map); | |
2313 | ||
2314 | return(result); | |
2315 | } | |
2316 | ||
2317 | /* | |
2318 | * vm_map_protect: | |
2319 | * | |
2320 | * Sets the protection of the specified address | |
2321 | * region in the target map. If "set_max" is | |
2322 | * specified, the maximum protection is to be set; | |
2323 | * otherwise, only the current protection is affected. | |
2324 | */ | |
2325 | kern_return_t | |
2326 | vm_map_protect( | |
2327 | register vm_map_t map, | |
91447636 A |
2328 | register vm_map_offset_t start, |
2329 | register vm_map_offset_t end, | |
1c79356b A |
2330 | register vm_prot_t new_prot, |
2331 | register boolean_t set_max) | |
2332 | { | |
2333 | register vm_map_entry_t current; | |
91447636 | 2334 | register vm_map_offset_t prev; |
1c79356b A |
2335 | vm_map_entry_t entry; |
2336 | vm_prot_t new_max; | |
2337 | boolean_t clip; | |
2338 | ||
2339 | XPR(XPR_VM_MAP, | |
2340 | "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d", | |
2341 | (integer_t)map, start, end, new_prot, set_max); | |
2342 | ||
2343 | vm_map_lock(map); | |
2344 | ||
91447636 A |
2345 | /* LP64todo - remove this check when vm_map_commpage64() |
2346 | * no longer has to stuff in a map_entry for the commpage | |
2347 | * above the map's max_offset. | |
2348 | */ | |
2349 | if (start >= map->max_offset) { | |
2350 | vm_map_unlock(map); | |
2351 | return(KERN_INVALID_ADDRESS); | |
2352 | } | |
2353 | ||
1c79356b A |
2354 | /* |
2355 | * Lookup the entry. If it doesn't start in a valid | |
2356 | * entry, return an error. Remember if we need to | |
2357 | * clip the entry. We don't do it here because we don't | |
2358 | * want to make any changes until we've scanned the | |
2359 | * entire range below for address and protection | |
2360 | * violations. | |
2361 | */ | |
2362 | if (!(clip = vm_map_lookup_entry(map, start, &entry))) { | |
2363 | vm_map_unlock(map); | |
2364 | return(KERN_INVALID_ADDRESS); | |
2365 | } | |
2366 | ||
2367 | /* | |
2368 | * Make a first pass to check for protection and address | |
2369 | * violations. | |
2370 | */ | |
2371 | ||
2372 | current = entry; | |
2373 | prev = current->vme_start; | |
2374 | while ((current != vm_map_to_entry(map)) && | |
2375 | (current->vme_start < end)) { | |
2376 | ||
2377 | /* | |
2378 | * If there is a hole, return an error. | |
2379 | */ | |
2380 | if (current->vme_start != prev) { | |
2381 | vm_map_unlock(map); | |
2382 | return(KERN_INVALID_ADDRESS); | |
2383 | } | |
2384 | ||
2385 | new_max = current->max_protection; | |
2386 | if(new_prot & VM_PROT_COPY) { | |
2387 | new_max |= VM_PROT_WRITE; | |
2388 | if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) { | |
2389 | vm_map_unlock(map); | |
2390 | return(KERN_PROTECTION_FAILURE); | |
2391 | } | |
2392 | } else { | |
2393 | if ((new_prot & new_max) != new_prot) { | |
2394 | vm_map_unlock(map); | |
2395 | return(KERN_PROTECTION_FAILURE); | |
2396 | } | |
2397 | } | |
2398 | ||
2399 | prev = current->vme_end; | |
2400 | current = current->vme_next; | |
2401 | } | |
2402 | if (end > prev) { | |
2403 | vm_map_unlock(map); | |
2404 | return(KERN_INVALID_ADDRESS); | |
2405 | } | |
2406 | ||
2407 | /* | |
2408 | * Go back and fix up protections. | |
2409 | * Clip to start here if the range starts within | |
2410 | * the entry. | |
2411 | */ | |
2412 | ||
2413 | current = entry; | |
2414 | if (clip) { | |
2415 | vm_map_clip_start(map, entry, start); | |
2416 | } | |
2417 | while ((current != vm_map_to_entry(map)) && | |
2418 | (current->vme_start < end)) { | |
2419 | ||
2420 | vm_prot_t old_prot; | |
2421 | ||
2422 | vm_map_clip_end(map, current, end); | |
2423 | ||
2424 | old_prot = current->protection; | |
2425 | ||
2426 | if(new_prot & VM_PROT_COPY) { | |
2427 | /* caller is asking specifically to copy the */ | |
2428 | /* mapped data, this implies that max protection */ | |
2429 | /* will include write. Caller must be prepared */ | |
2430 | /* for loss of shared memory communication in the */ | |
2431 | /* target area after taking this step */ | |
2432 | current->needs_copy = TRUE; | |
2433 | current->max_protection |= VM_PROT_WRITE; | |
2434 | } | |
2435 | ||
2436 | if (set_max) | |
2437 | current->protection = | |
2438 | (current->max_protection = | |
2439 | new_prot & ~VM_PROT_COPY) & | |
2440 | old_prot; | |
2441 | else | |
2442 | current->protection = new_prot & ~VM_PROT_COPY; | |
2443 | ||
2444 | /* | |
2445 | * Update physical map if necessary. | |
2446 | * If the request is to turn off write protection, | |
2447 | * we won't do it for real (in pmap). This is because | |
2448 | * it would cause copy-on-write to fail. We've already | |
2449 | * set, the new protection in the map, so if a | |
2450 | * write-protect fault occurred, it will be fixed up | |
2451 | * properly, COW or not. | |
2452 | */ | |
2453 | /* the 256M hack for existing hardware limitations */ | |
2454 | if (current->protection != old_prot) { | |
2455 | if(current->is_sub_map && current->use_pmap) { | |
91447636 A |
2456 | vm_map_offset_t pmap_base_addr; |
2457 | vm_map_offset_t pmap_end_addr; | |
2458 | #ifdef i386 | |
2459 | __unused | |
2460 | #endif | |
1c79356b A |
2461 | vm_map_entry_t local_entry; |
2462 | ||
2463 | pmap_base_addr = 0xF0000000 & current->vme_start; | |
2464 | pmap_end_addr = (pmap_base_addr + 0x10000000) - 1; | |
2465 | #ifndef i386 | |
2466 | if(!vm_map_lookup_entry(map, | |
2467 | pmap_base_addr, &local_entry)) | |
2468 | panic("vm_map_protect: nested pmap area is missing"); | |
2469 | while ((local_entry != vm_map_to_entry(map)) && | |
2470 | (local_entry->vme_start < pmap_end_addr)) { | |
2471 | local_entry->use_pmap = FALSE; | |
2472 | local_entry = local_entry->vme_next; | |
2473 | } | |
55e303ae | 2474 | pmap_unnest(map->pmap, (addr64_t)pmap_base_addr); |
1c79356b A |
2475 | #endif |
2476 | } | |
2477 | if (!(current->protection & VM_PROT_WRITE)) { | |
2478 | /* Look one level in we support nested pmaps */ | |
2479 | /* from mapped submaps which are direct entries */ | |
2480 | /* in our map */ | |
2481 | if(current->is_sub_map && current->use_pmap) { | |
2482 | pmap_protect(current->object.sub_map->pmap, | |
2483 | current->vme_start, | |
2484 | current->vme_end, | |
2485 | current->protection); | |
2486 | } else { | |
2487 | pmap_protect(map->pmap, current->vme_start, | |
2488 | current->vme_end, | |
2489 | current->protection); | |
2490 | } | |
2491 | } | |
2492 | } | |
2493 | current = current->vme_next; | |
2494 | } | |
2495 | ||
5353443c | 2496 | current = entry; |
91447636 A |
2497 | while ((current != vm_map_to_entry(map)) && |
2498 | (current->vme_start <= end)) { | |
5353443c A |
2499 | vm_map_simplify_entry(map, current); |
2500 | current = current->vme_next; | |
2501 | } | |
2502 | ||
1c79356b A |
2503 | vm_map_unlock(map); |
2504 | return(KERN_SUCCESS); | |
2505 | } | |
2506 | ||
2507 | /* | |
2508 | * vm_map_inherit: | |
2509 | * | |
2510 | * Sets the inheritance of the specified address | |
2511 | * range in the target map. Inheritance | |
2512 | * affects how the map will be shared with | |
2513 | * child maps at the time of vm_map_fork. | |
2514 | */ | |
2515 | kern_return_t | |
2516 | vm_map_inherit( | |
2517 | register vm_map_t map, | |
91447636 A |
2518 | register vm_map_offset_t start, |
2519 | register vm_map_offset_t end, | |
1c79356b A |
2520 | register vm_inherit_t new_inheritance) |
2521 | { | |
2522 | register vm_map_entry_t entry; | |
2523 | vm_map_entry_t temp_entry; | |
2524 | ||
2525 | vm_map_lock(map); | |
2526 | ||
2527 | VM_MAP_RANGE_CHECK(map, start, end); | |
2528 | ||
2529 | if (vm_map_lookup_entry(map, start, &temp_entry)) { | |
2530 | entry = temp_entry; | |
2531 | vm_map_clip_start(map, entry, start); | |
2532 | } | |
2533 | else { | |
2534 | temp_entry = temp_entry->vme_next; | |
2535 | entry = temp_entry; | |
2536 | } | |
2537 | ||
2538 | /* first check entire range for submaps which can't support the */ | |
2539 | /* given inheritance. */ | |
2540 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
2541 | if(entry->is_sub_map) { | |
91447636 A |
2542 | if(new_inheritance == VM_INHERIT_COPY) { |
2543 | vm_map_unlock(map); | |
1c79356b | 2544 | return(KERN_INVALID_ARGUMENT); |
91447636 | 2545 | } |
1c79356b A |
2546 | } |
2547 | ||
2548 | entry = entry->vme_next; | |
2549 | } | |
2550 | ||
2551 | entry = temp_entry; | |
2552 | ||
2553 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
2554 | vm_map_clip_end(map, entry, end); | |
2555 | ||
2556 | entry->inheritance = new_inheritance; | |
2557 | ||
2558 | entry = entry->vme_next; | |
2559 | } | |
2560 | ||
2561 | vm_map_unlock(map); | |
2562 | return(KERN_SUCCESS); | |
2563 | } | |
2564 | ||
2565 | /* | |
2566 | * vm_map_wire: | |
2567 | * | |
2568 | * Sets the pageability of the specified address range in the | |
2569 | * target map as wired. Regions specified as not pageable require | |
2570 | * locked-down physical memory and physical page maps. The | |
2571 | * access_type variable indicates types of accesses that must not | |
2572 | * generate page faults. This is checked against protection of | |
2573 | * memory being locked-down. | |
2574 | * | |
2575 | * The map must not be locked, but a reference must remain to the | |
2576 | * map throughout the call. | |
2577 | */ | |
91447636 | 2578 | static kern_return_t |
1c79356b A |
2579 | vm_map_wire_nested( |
2580 | register vm_map_t map, | |
91447636 A |
2581 | register vm_map_offset_t start, |
2582 | register vm_map_offset_t end, | |
1c79356b A |
2583 | register vm_prot_t access_type, |
2584 | boolean_t user_wire, | |
9bccf70c | 2585 | pmap_t map_pmap, |
91447636 | 2586 | vm_map_offset_t pmap_addr) |
1c79356b A |
2587 | { |
2588 | register vm_map_entry_t entry; | |
2589 | struct vm_map_entry *first_entry, tmp_entry; | |
91447636 A |
2590 | vm_map_t real_map; |
2591 | register vm_map_offset_t s,e; | |
1c79356b A |
2592 | kern_return_t rc; |
2593 | boolean_t need_wakeup; | |
2594 | boolean_t main_map = FALSE; | |
9bccf70c | 2595 | wait_interrupt_t interruptible_state; |
0b4e3aa0 | 2596 | thread_t cur_thread; |
1c79356b | 2597 | unsigned int last_timestamp; |
91447636 | 2598 | vm_map_size_t size; |
1c79356b A |
2599 | |
2600 | vm_map_lock(map); | |
2601 | if(map_pmap == NULL) | |
2602 | main_map = TRUE; | |
2603 | last_timestamp = map->timestamp; | |
2604 | ||
2605 | VM_MAP_RANGE_CHECK(map, start, end); | |
2606 | assert(page_aligned(start)); | |
2607 | assert(page_aligned(end)); | |
0b4e3aa0 A |
2608 | if (start == end) { |
2609 | /* We wired what the caller asked for, zero pages */ | |
2610 | vm_map_unlock(map); | |
2611 | return KERN_SUCCESS; | |
2612 | } | |
1c79356b A |
2613 | |
2614 | if (vm_map_lookup_entry(map, start, &first_entry)) { | |
2615 | entry = first_entry; | |
2616 | /* vm_map_clip_start will be done later. */ | |
2617 | } else { | |
2618 | /* Start address is not in map */ | |
2619 | vm_map_unlock(map); | |
2620 | return(KERN_INVALID_ADDRESS); | |
2621 | } | |
2622 | ||
2623 | s=start; | |
2624 | need_wakeup = FALSE; | |
0b4e3aa0 | 2625 | cur_thread = current_thread(); |
1c79356b A |
2626 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { |
2627 | /* | |
2628 | * If another thread is wiring/unwiring this entry then | |
2629 | * block after informing other thread to wake us up. | |
2630 | */ | |
2631 | if (entry->in_transition) { | |
9bccf70c A |
2632 | wait_result_t wait_result; |
2633 | ||
1c79356b A |
2634 | /* |
2635 | * We have not clipped the entry. Make sure that | |
2636 | * the start address is in range so that the lookup | |
2637 | * below will succeed. | |
2638 | */ | |
2639 | s = entry->vme_start < start? start: entry->vme_start; | |
2640 | ||
2641 | entry->needs_wakeup = TRUE; | |
2642 | ||
2643 | /* | |
2644 | * wake up anybody waiting on entries that we have | |
2645 | * already wired. | |
2646 | */ | |
2647 | if (need_wakeup) { | |
2648 | vm_map_entry_wakeup(map); | |
2649 | need_wakeup = FALSE; | |
2650 | } | |
2651 | /* | |
2652 | * User wiring is interruptible | |
2653 | */ | |
9bccf70c | 2654 | wait_result = vm_map_entry_wait(map, |
1c79356b A |
2655 | (user_wire) ? THREAD_ABORTSAFE : |
2656 | THREAD_UNINT); | |
9bccf70c | 2657 | if (user_wire && wait_result == THREAD_INTERRUPTED) { |
1c79356b A |
2658 | /* |
2659 | * undo the wirings we have done so far | |
2660 | * We do not clear the needs_wakeup flag, | |
2661 | * because we cannot tell if we were the | |
2662 | * only one waiting. | |
2663 | */ | |
9bccf70c | 2664 | vm_map_unlock(map); |
1c79356b A |
2665 | vm_map_unwire(map, start, s, user_wire); |
2666 | return(KERN_FAILURE); | |
2667 | } | |
2668 | ||
1c79356b A |
2669 | /* |
2670 | * Cannot avoid a lookup here. reset timestamp. | |
2671 | */ | |
2672 | last_timestamp = map->timestamp; | |
2673 | ||
2674 | /* | |
2675 | * The entry could have been clipped, look it up again. | |
2676 | * Worse that can happen is, it may not exist anymore. | |
2677 | */ | |
2678 | if (!vm_map_lookup_entry(map, s, &first_entry)) { | |
2679 | if (!user_wire) | |
2680 | panic("vm_map_wire: re-lookup failed"); | |
2681 | ||
2682 | /* | |
2683 | * User: undo everything upto the previous | |
2684 | * entry. let vm_map_unwire worry about | |
2685 | * checking the validity of the range. | |
2686 | */ | |
2687 | vm_map_unlock(map); | |
2688 | vm_map_unwire(map, start, s, user_wire); | |
2689 | return(KERN_FAILURE); | |
2690 | } | |
2691 | entry = first_entry; | |
2692 | continue; | |
2693 | } | |
2694 | ||
2695 | if(entry->is_sub_map) { | |
91447636 A |
2696 | vm_map_offset_t sub_start; |
2697 | vm_map_offset_t sub_end; | |
2698 | vm_map_offset_t local_start; | |
2699 | vm_map_offset_t local_end; | |
1c79356b A |
2700 | pmap_t pmap; |
2701 | ||
2702 | vm_map_clip_start(map, entry, start); | |
2703 | vm_map_clip_end(map, entry, end); | |
2704 | ||
9bccf70c | 2705 | sub_start = entry->offset; |
1c79356b A |
2706 | sub_end = entry->vme_end - entry->vme_start; |
2707 | sub_end += entry->offset; | |
2708 | ||
2709 | local_end = entry->vme_end; | |
2710 | if(map_pmap == NULL) { | |
2711 | if(entry->use_pmap) { | |
2712 | pmap = entry->object.sub_map->pmap; | |
9bccf70c A |
2713 | /* ppc implementation requires that */ |
2714 | /* submaps pmap address ranges line */ | |
2715 | /* up with parent map */ | |
2716 | #ifdef notdef | |
2717 | pmap_addr = sub_start; | |
2718 | #endif | |
2719 | pmap_addr = start; | |
1c79356b A |
2720 | } else { |
2721 | pmap = map->pmap; | |
9bccf70c | 2722 | pmap_addr = start; |
1c79356b A |
2723 | } |
2724 | if (entry->wired_count) { | |
2725 | if (entry->wired_count | |
2726 | >= MAX_WIRE_COUNT) | |
2727 | panic("vm_map_wire: too many wirings"); | |
2728 | ||
2729 | if (user_wire && | |
2730 | entry->user_wired_count | |
2731 | >= MAX_WIRE_COUNT) { | |
2732 | vm_map_unlock(map); | |
2733 | vm_map_unwire(map, start, | |
2734 | entry->vme_start, user_wire); | |
2735 | return(KERN_FAILURE); | |
2736 | } | |
9bccf70c A |
2737 | if(user_wire) |
2738 | entry->user_wired_count++; | |
2739 | if((!user_wire) || | |
2740 | (entry->user_wired_count == 0)) | |
1c79356b A |
2741 | entry->wired_count++; |
2742 | entry = entry->vme_next; | |
2743 | continue; | |
2744 | ||
2745 | } else { | |
2746 | vm_object_t object; | |
91447636 A |
2747 | vm_map_offset_t offset_hi; |
2748 | vm_map_offset_t offset_lo; | |
1c79356b A |
2749 | vm_object_offset_t offset; |
2750 | vm_prot_t prot; | |
2751 | boolean_t wired; | |
2752 | vm_behavior_t behavior; | |
1c79356b A |
2753 | vm_map_entry_t local_entry; |
2754 | vm_map_version_t version; | |
2755 | vm_map_t lookup_map; | |
2756 | ||
2757 | /* call vm_map_lookup_locked to */ | |
2758 | /* cause any needs copy to be */ | |
2759 | /* evaluated */ | |
2760 | local_start = entry->vme_start; | |
2761 | lookup_map = map; | |
2762 | vm_map_lock_write_to_read(map); | |
2763 | if(vm_map_lookup_locked( | |
2764 | &lookup_map, local_start, | |
9bccf70c | 2765 | access_type, |
1c79356b A |
2766 | &version, &object, |
2767 | &offset, &prot, &wired, | |
2768 | &behavior, &offset_lo, | |
91447636 | 2769 | &offset_hi, &real_map)) { |
1c79356b | 2770 | |
91447636 | 2771 | vm_map_unlock_read(lookup_map); |
1c79356b A |
2772 | vm_map_unwire(map, start, |
2773 | entry->vme_start, user_wire); | |
2774 | return(KERN_FAILURE); | |
2775 | } | |
91447636 A |
2776 | if(real_map != lookup_map) |
2777 | vm_map_unlock(real_map); | |
9bccf70c A |
2778 | vm_map_unlock_read(lookup_map); |
2779 | vm_map_lock(map); | |
1c79356b | 2780 | vm_object_unlock(object); |
9bccf70c A |
2781 | |
2782 | if (!vm_map_lookup_entry(map, | |
1c79356b A |
2783 | local_start, &local_entry)) { |
2784 | vm_map_unlock(map); | |
2785 | vm_map_unwire(map, start, | |
2786 | entry->vme_start, user_wire); | |
2787 | return(KERN_FAILURE); | |
2788 | } | |
2789 | /* did we have a change of type? */ | |
9bccf70c A |
2790 | if (!local_entry->is_sub_map) { |
2791 | last_timestamp = map->timestamp; | |
1c79356b | 2792 | continue; |
9bccf70c | 2793 | } |
1c79356b A |
2794 | entry = local_entry; |
2795 | if (user_wire) | |
2796 | entry->user_wired_count++; | |
9bccf70c A |
2797 | if((!user_wire) || |
2798 | (entry->user_wired_count == 1)) | |
1c79356b A |
2799 | entry->wired_count++; |
2800 | ||
2801 | entry->in_transition = TRUE; | |
2802 | ||
2803 | vm_map_unlock(map); | |
2804 | rc = vm_map_wire_nested( | |
2805 | entry->object.sub_map, | |
2806 | sub_start, sub_end, | |
2807 | access_type, | |
9bccf70c | 2808 | user_wire, pmap, pmap_addr); |
1c79356b | 2809 | vm_map_lock(map); |
1c79356b A |
2810 | } |
2811 | } else { | |
9bccf70c A |
2812 | local_start = entry->vme_start; |
2813 | if (user_wire) | |
2814 | entry->user_wired_count++; | |
2815 | if((!user_wire) || | |
2816 | (entry->user_wired_count == 1)) | |
2817 | entry->wired_count++; | |
1c79356b A |
2818 | vm_map_unlock(map); |
2819 | rc = vm_map_wire_nested(entry->object.sub_map, | |
2820 | sub_start, sub_end, | |
2821 | access_type, | |
55e303ae | 2822 | user_wire, map_pmap, pmap_addr); |
1c79356b | 2823 | vm_map_lock(map); |
1c79356b A |
2824 | } |
2825 | s = entry->vme_start; | |
2826 | e = entry->vme_end; | |
9bccf70c | 2827 | |
1c79356b A |
2828 | /* |
2829 | * Find the entry again. It could have been clipped | |
2830 | * after we unlocked the map. | |
2831 | */ | |
9bccf70c A |
2832 | if (!vm_map_lookup_entry(map, local_start, |
2833 | &first_entry)) | |
2834 | panic("vm_map_wire: re-lookup failed"); | |
2835 | entry = first_entry; | |
1c79356b A |
2836 | |
2837 | last_timestamp = map->timestamp; | |
2838 | while ((entry != vm_map_to_entry(map)) && | |
2839 | (entry->vme_start < e)) { | |
2840 | assert(entry->in_transition); | |
2841 | entry->in_transition = FALSE; | |
2842 | if (entry->needs_wakeup) { | |
2843 | entry->needs_wakeup = FALSE; | |
2844 | need_wakeup = TRUE; | |
2845 | } | |
2846 | if (rc != KERN_SUCCESS) {/* from vm_*_wire */ | |
1c79356b A |
2847 | if (user_wire) |
2848 | entry->user_wired_count--; | |
9bccf70c A |
2849 | if ((!user_wire) || |
2850 | (entry->user_wired_count == 0)) | |
2851 | entry->wired_count--; | |
1c79356b A |
2852 | } |
2853 | entry = entry->vme_next; | |
2854 | } | |
2855 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
2856 | vm_map_unlock(map); | |
2857 | if (need_wakeup) | |
2858 | vm_map_entry_wakeup(map); | |
2859 | /* | |
2860 | * undo everything upto the previous entry. | |
2861 | */ | |
2862 | (void)vm_map_unwire(map, start, s, user_wire); | |
2863 | return rc; | |
2864 | } | |
2865 | continue; | |
2866 | } | |
2867 | ||
2868 | /* | |
2869 | * If this entry is already wired then increment | |
2870 | * the appropriate wire reference count. | |
2871 | */ | |
9bccf70c | 2872 | if (entry->wired_count) { |
1c79356b A |
2873 | /* sanity check: wired_count is a short */ |
2874 | if (entry->wired_count >= MAX_WIRE_COUNT) | |
2875 | panic("vm_map_wire: too many wirings"); | |
2876 | ||
2877 | if (user_wire && | |
2878 | entry->user_wired_count >= MAX_WIRE_COUNT) { | |
2879 | vm_map_unlock(map); | |
2880 | vm_map_unwire(map, start, | |
2881 | entry->vme_start, user_wire); | |
2882 | return(KERN_FAILURE); | |
2883 | } | |
2884 | /* | |
2885 | * entry is already wired down, get our reference | |
2886 | * after clipping to our range. | |
2887 | */ | |
2888 | vm_map_clip_start(map, entry, start); | |
2889 | vm_map_clip_end(map, entry, end); | |
9bccf70c A |
2890 | if (user_wire) |
2891 | entry->user_wired_count++; | |
2892 | if ((!user_wire) || (entry->user_wired_count == 1)) | |
1c79356b A |
2893 | entry->wired_count++; |
2894 | ||
2895 | entry = entry->vme_next; | |
2896 | continue; | |
2897 | } | |
2898 | ||
2899 | /* | |
2900 | * Unwired entry or wire request transmitted via submap | |
2901 | */ | |
2902 | ||
2903 | ||
2904 | /* | |
2905 | * Perform actions of vm_map_lookup that need the write | |
2906 | * lock on the map: create a shadow object for a | |
2907 | * copy-on-write region, or an object for a zero-fill | |
2908 | * region. | |
2909 | */ | |
2910 | size = entry->vme_end - entry->vme_start; | |
2911 | /* | |
2912 | * If wiring a copy-on-write page, we need to copy it now | |
2913 | * even if we're only (currently) requesting read access. | |
2914 | * This is aggressive, but once it's wired we can't move it. | |
2915 | */ | |
2916 | if (entry->needs_copy) { | |
2917 | vm_object_shadow(&entry->object.vm_object, | |
2918 | &entry->offset, size); | |
2919 | entry->needs_copy = FALSE; | |
2920 | } else if (entry->object.vm_object == VM_OBJECT_NULL) { | |
2921 | entry->object.vm_object = vm_object_allocate(size); | |
2922 | entry->offset = (vm_object_offset_t)0; | |
2923 | } | |
2924 | ||
2925 | vm_map_clip_start(map, entry, start); | |
2926 | vm_map_clip_end(map, entry, end); | |
2927 | ||
2928 | s = entry->vme_start; | |
2929 | e = entry->vme_end; | |
2930 | ||
2931 | /* | |
2932 | * Check for holes and protection mismatch. | |
2933 | * Holes: Next entry should be contiguous unless this | |
2934 | * is the end of the region. | |
2935 | * Protection: Access requested must be allowed, unless | |
2936 | * wiring is by protection class | |
2937 | */ | |
2938 | if ((((entry->vme_end < end) && | |
2939 | ((entry->vme_next == vm_map_to_entry(map)) || | |
2940 | (entry->vme_next->vme_start > entry->vme_end))) || | |
2941 | ((entry->protection & access_type) != access_type))) { | |
2942 | /* | |
2943 | * Found a hole or protection problem. | |
2944 | * Unwire the region we wired so far. | |
2945 | */ | |
2946 | if (start != entry->vme_start) { | |
2947 | vm_map_unlock(map); | |
2948 | vm_map_unwire(map, start, s, user_wire); | |
2949 | } else { | |
2950 | vm_map_unlock(map); | |
2951 | } | |
2952 | return((entry->protection&access_type) != access_type? | |
2953 | KERN_PROTECTION_FAILURE: KERN_INVALID_ADDRESS); | |
2954 | } | |
2955 | ||
2956 | assert(entry->wired_count == 0 && entry->user_wired_count == 0); | |
2957 | ||
9bccf70c A |
2958 | if (user_wire) |
2959 | entry->user_wired_count++; | |
2960 | if ((!user_wire) || (entry->user_wired_count == 1)) | |
1c79356b | 2961 | entry->wired_count++; |
1c79356b A |
2962 | |
2963 | entry->in_transition = TRUE; | |
2964 | ||
2965 | /* | |
2966 | * This entry might get split once we unlock the map. | |
2967 | * In vm_fault_wire(), we need the current range as | |
2968 | * defined by this entry. In order for this to work | |
2969 | * along with a simultaneous clip operation, we make a | |
2970 | * temporary copy of this entry and use that for the | |
2971 | * wiring. Note that the underlying objects do not | |
2972 | * change during a clip. | |
2973 | */ | |
2974 | tmp_entry = *entry; | |
2975 | ||
2976 | /* | |
2977 | * The in_transition state guarentees that the entry | |
2978 | * (or entries for this range, if split occured) will be | |
2979 | * there when the map lock is acquired for the second time. | |
2980 | */ | |
2981 | vm_map_unlock(map); | |
0b4e3aa0 | 2982 | |
9bccf70c A |
2983 | if (!user_wire && cur_thread != THREAD_NULL) |
2984 | interruptible_state = thread_interrupt_level(THREAD_UNINT); | |
91447636 A |
2985 | else |
2986 | interruptible_state = THREAD_UNINT; | |
9bccf70c | 2987 | |
1c79356b | 2988 | if(map_pmap) |
9bccf70c A |
2989 | rc = vm_fault_wire(map, |
2990 | &tmp_entry, map_pmap, pmap_addr); | |
1c79356b | 2991 | else |
9bccf70c A |
2992 | rc = vm_fault_wire(map, |
2993 | &tmp_entry, map->pmap, | |
2994 | tmp_entry.vme_start); | |
0b4e3aa0 A |
2995 | |
2996 | if (!user_wire && cur_thread != THREAD_NULL) | |
9bccf70c | 2997 | thread_interrupt_level(interruptible_state); |
0b4e3aa0 | 2998 | |
1c79356b A |
2999 | vm_map_lock(map); |
3000 | ||
3001 | if (last_timestamp+1 != map->timestamp) { | |
3002 | /* | |
3003 | * Find the entry again. It could have been clipped | |
3004 | * after we unlocked the map. | |
3005 | */ | |
3006 | if (!vm_map_lookup_entry(map, tmp_entry.vme_start, | |
3007 | &first_entry)) | |
3008 | panic("vm_map_wire: re-lookup failed"); | |
3009 | ||
3010 | entry = first_entry; | |
3011 | } | |
3012 | ||
3013 | last_timestamp = map->timestamp; | |
3014 | ||
3015 | while ((entry != vm_map_to_entry(map)) && | |
3016 | (entry->vme_start < tmp_entry.vme_end)) { | |
3017 | assert(entry->in_transition); | |
3018 | entry->in_transition = FALSE; | |
3019 | if (entry->needs_wakeup) { | |
3020 | entry->needs_wakeup = FALSE; | |
3021 | need_wakeup = TRUE; | |
3022 | } | |
3023 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
9bccf70c A |
3024 | if (user_wire) |
3025 | entry->user_wired_count--; | |
3026 | if ((!user_wire) || | |
3027 | (entry->user_wired_count == 0)) | |
1c79356b | 3028 | entry->wired_count--; |
1c79356b A |
3029 | } |
3030 | entry = entry->vme_next; | |
3031 | } | |
3032 | ||
3033 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
3034 | vm_map_unlock(map); | |
3035 | if (need_wakeup) | |
3036 | vm_map_entry_wakeup(map); | |
3037 | /* | |
3038 | * undo everything upto the previous entry. | |
3039 | */ | |
3040 | (void)vm_map_unwire(map, start, s, user_wire); | |
3041 | return rc; | |
3042 | } | |
3043 | } /* end while loop through map entries */ | |
3044 | vm_map_unlock(map); | |
3045 | ||
3046 | /* | |
3047 | * wake up anybody waiting on entries we wired. | |
3048 | */ | |
3049 | if (need_wakeup) | |
3050 | vm_map_entry_wakeup(map); | |
3051 | ||
3052 | return(KERN_SUCCESS); | |
3053 | ||
3054 | } | |
3055 | ||
3056 | kern_return_t | |
3057 | vm_map_wire( | |
3058 | register vm_map_t map, | |
91447636 A |
3059 | register vm_map_offset_t start, |
3060 | register vm_map_offset_t end, | |
1c79356b A |
3061 | register vm_prot_t access_type, |
3062 | boolean_t user_wire) | |
3063 | { | |
3064 | ||
3065 | kern_return_t kret; | |
3066 | ||
3067 | #ifdef ppc | |
3068 | /* | |
3069 | * the calls to mapping_prealloc and mapping_relpre | |
3070 | * (along with the VM_MAP_RANGE_CHECK to insure a | |
3071 | * resonable range was passed in) are | |
3072 | * currently necessary because | |
3073 | * we haven't enabled kernel pre-emption | |
3074 | * and/or the pmap_enter cannot purge and re-use | |
3075 | * existing mappings | |
3076 | */ | |
3077 | VM_MAP_RANGE_CHECK(map, start, end); | |
3078 | mapping_prealloc(end - start); | |
3079 | #endif | |
3080 | kret = vm_map_wire_nested(map, start, end, access_type, | |
9bccf70c | 3081 | user_wire, (pmap_t)NULL, 0); |
1c79356b A |
3082 | #ifdef ppc |
3083 | mapping_relpre(); | |
3084 | #endif | |
3085 | return kret; | |
3086 | } | |
3087 | ||
3088 | /* | |
3089 | * vm_map_unwire: | |
3090 | * | |
3091 | * Sets the pageability of the specified address range in the target | |
3092 | * as pageable. Regions specified must have been wired previously. | |
3093 | * | |
3094 | * The map must not be locked, but a reference must remain to the map | |
3095 | * throughout the call. | |
3096 | * | |
3097 | * Kernel will panic on failures. User unwire ignores holes and | |
3098 | * unwired and intransition entries to avoid losing memory by leaving | |
3099 | * it unwired. | |
3100 | */ | |
91447636 | 3101 | static kern_return_t |
1c79356b A |
3102 | vm_map_unwire_nested( |
3103 | register vm_map_t map, | |
91447636 A |
3104 | register vm_map_offset_t start, |
3105 | register vm_map_offset_t end, | |
1c79356b | 3106 | boolean_t user_wire, |
9bccf70c | 3107 | pmap_t map_pmap, |
91447636 | 3108 | vm_map_offset_t pmap_addr) |
1c79356b A |
3109 | { |
3110 | register vm_map_entry_t entry; | |
3111 | struct vm_map_entry *first_entry, tmp_entry; | |
3112 | boolean_t need_wakeup; | |
3113 | boolean_t main_map = FALSE; | |
3114 | unsigned int last_timestamp; | |
3115 | ||
3116 | vm_map_lock(map); | |
3117 | if(map_pmap == NULL) | |
3118 | main_map = TRUE; | |
3119 | last_timestamp = map->timestamp; | |
3120 | ||
3121 | VM_MAP_RANGE_CHECK(map, start, end); | |
3122 | assert(page_aligned(start)); | |
3123 | assert(page_aligned(end)); | |
3124 | ||
3125 | if (vm_map_lookup_entry(map, start, &first_entry)) { | |
3126 | entry = first_entry; | |
3127 | /* vm_map_clip_start will be done later. */ | |
3128 | } | |
3129 | else { | |
3130 | /* Start address is not in map. */ | |
3131 | vm_map_unlock(map); | |
3132 | return(KERN_INVALID_ADDRESS); | |
3133 | } | |
3134 | ||
3135 | need_wakeup = FALSE; | |
3136 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
3137 | if (entry->in_transition) { | |
3138 | /* | |
3139 | * 1) | |
3140 | * Another thread is wiring down this entry. Note | |
3141 | * that if it is not for the other thread we would | |
3142 | * be unwiring an unwired entry. This is not | |
3143 | * permitted. If we wait, we will be unwiring memory | |
3144 | * we did not wire. | |
3145 | * | |
3146 | * 2) | |
3147 | * Another thread is unwiring this entry. We did not | |
3148 | * have a reference to it, because if we did, this | |
3149 | * entry will not be getting unwired now. | |
3150 | */ | |
3151 | if (!user_wire) | |
3152 | panic("vm_map_unwire: in_transition entry"); | |
3153 | ||
3154 | entry = entry->vme_next; | |
3155 | continue; | |
3156 | } | |
3157 | ||
3158 | if(entry->is_sub_map) { | |
91447636 A |
3159 | vm_map_offset_t sub_start; |
3160 | vm_map_offset_t sub_end; | |
3161 | vm_map_offset_t local_end; | |
1c79356b A |
3162 | pmap_t pmap; |
3163 | ||
3164 | ||
3165 | vm_map_clip_start(map, entry, start); | |
3166 | vm_map_clip_end(map, entry, end); | |
3167 | ||
3168 | sub_start = entry->offset; | |
3169 | sub_end = entry->vme_end - entry->vme_start; | |
3170 | sub_end += entry->offset; | |
3171 | local_end = entry->vme_end; | |
3172 | if(map_pmap == NULL) { | |
3173 | if(entry->use_pmap) { | |
3174 | pmap = entry->object.sub_map->pmap; | |
9bccf70c | 3175 | pmap_addr = sub_start; |
1c79356b A |
3176 | } else { |
3177 | pmap = map->pmap; | |
9bccf70c | 3178 | pmap_addr = start; |
1c79356b A |
3179 | } |
3180 | if (entry->wired_count == 0 || | |
3181 | (user_wire && entry->user_wired_count == 0)) { | |
3182 | if (!user_wire) | |
3183 | panic("vm_map_unwire: entry is unwired"); | |
3184 | entry = entry->vme_next; | |
3185 | continue; | |
3186 | } | |
3187 | ||
3188 | /* | |
3189 | * Check for holes | |
3190 | * Holes: Next entry should be contiguous unless | |
3191 | * this is the end of the region. | |
3192 | */ | |
3193 | if (((entry->vme_end < end) && | |
3194 | ((entry->vme_next == vm_map_to_entry(map)) || | |
3195 | (entry->vme_next->vme_start | |
3196 | > entry->vme_end)))) { | |
3197 | if (!user_wire) | |
3198 | panic("vm_map_unwire: non-contiguous region"); | |
3199 | /* | |
3200 | entry = entry->vme_next; | |
3201 | continue; | |
3202 | */ | |
3203 | } | |
3204 | ||
3205 | if (!user_wire || (--entry->user_wired_count == 0)) | |
3206 | entry->wired_count--; | |
3207 | ||
3208 | if (entry->wired_count != 0) { | |
3209 | entry = entry->vme_next; | |
3210 | continue; | |
3211 | } | |
3212 | ||
3213 | entry->in_transition = TRUE; | |
3214 | tmp_entry = *entry;/* see comment in vm_map_wire() */ | |
3215 | ||
3216 | /* | |
3217 | * We can unlock the map now. The in_transition state | |
3218 | * guarantees existance of the entry. | |
3219 | */ | |
3220 | vm_map_unlock(map); | |
3221 | vm_map_unwire_nested(entry->object.sub_map, | |
9bccf70c | 3222 | sub_start, sub_end, user_wire, pmap, pmap_addr); |
1c79356b A |
3223 | vm_map_lock(map); |
3224 | ||
3225 | if (last_timestamp+1 != map->timestamp) { | |
3226 | /* | |
3227 | * Find the entry again. It could have been | |
3228 | * clipped or deleted after we unlocked the map. | |
3229 | */ | |
3230 | if (!vm_map_lookup_entry(map, | |
3231 | tmp_entry.vme_start, | |
3232 | &first_entry)) { | |
3233 | if (!user_wire) | |
3234 | panic("vm_map_unwire: re-lookup failed"); | |
3235 | entry = first_entry->vme_next; | |
3236 | } else | |
3237 | entry = first_entry; | |
3238 | } | |
3239 | last_timestamp = map->timestamp; | |
3240 | ||
3241 | /* | |
3242 | * clear transition bit for all constituent entries | |
3243 | * that were in the original entry (saved in | |
3244 | * tmp_entry). Also check for waiters. | |
3245 | */ | |
3246 | while ((entry != vm_map_to_entry(map)) && | |
3247 | (entry->vme_start < tmp_entry.vme_end)) { | |
3248 | assert(entry->in_transition); | |
3249 | entry->in_transition = FALSE; | |
3250 | if (entry->needs_wakeup) { | |
3251 | entry->needs_wakeup = FALSE; | |
3252 | need_wakeup = TRUE; | |
3253 | } | |
3254 | entry = entry->vme_next; | |
3255 | } | |
3256 | continue; | |
3257 | } else { | |
3258 | vm_map_unlock(map); | |
55e303ae A |
3259 | vm_map_unwire_nested(entry->object.sub_map, |
3260 | sub_start, sub_end, user_wire, map_pmap, | |
3261 | pmap_addr); | |
1c79356b A |
3262 | vm_map_lock(map); |
3263 | ||
3264 | if (last_timestamp+1 != map->timestamp) { | |
3265 | /* | |
3266 | * Find the entry again. It could have been | |
3267 | * clipped or deleted after we unlocked the map. | |
3268 | */ | |
3269 | if (!vm_map_lookup_entry(map, | |
3270 | tmp_entry.vme_start, | |
3271 | &first_entry)) { | |
3272 | if (!user_wire) | |
3273 | panic("vm_map_unwire: re-lookup failed"); | |
3274 | entry = first_entry->vme_next; | |
3275 | } else | |
3276 | entry = first_entry; | |
3277 | } | |
3278 | last_timestamp = map->timestamp; | |
3279 | } | |
3280 | } | |
3281 | ||
3282 | ||
9bccf70c A |
3283 | if ((entry->wired_count == 0) || |
3284 | (user_wire && entry->user_wired_count == 0)) { | |
1c79356b A |
3285 | if (!user_wire) |
3286 | panic("vm_map_unwire: entry is unwired"); | |
3287 | ||
3288 | entry = entry->vme_next; | |
3289 | continue; | |
3290 | } | |
3291 | ||
3292 | assert(entry->wired_count > 0 && | |
3293 | (!user_wire || entry->user_wired_count > 0)); | |
3294 | ||
3295 | vm_map_clip_start(map, entry, start); | |
3296 | vm_map_clip_end(map, entry, end); | |
3297 | ||
3298 | /* | |
3299 | * Check for holes | |
3300 | * Holes: Next entry should be contiguous unless | |
3301 | * this is the end of the region. | |
3302 | */ | |
3303 | if (((entry->vme_end < end) && | |
3304 | ((entry->vme_next == vm_map_to_entry(map)) || | |
3305 | (entry->vme_next->vme_start > entry->vme_end)))) { | |
3306 | ||
3307 | if (!user_wire) | |
3308 | panic("vm_map_unwire: non-contiguous region"); | |
3309 | entry = entry->vme_next; | |
3310 | continue; | |
3311 | } | |
3312 | ||
9bccf70c | 3313 | if (!user_wire || (--entry->user_wired_count == 0)) |
1c79356b A |
3314 | entry->wired_count--; |
3315 | ||
9bccf70c | 3316 | if (entry->wired_count != 0) { |
1c79356b A |
3317 | entry = entry->vme_next; |
3318 | continue; | |
1c79356b A |
3319 | } |
3320 | ||
3321 | entry->in_transition = TRUE; | |
3322 | tmp_entry = *entry; /* see comment in vm_map_wire() */ | |
3323 | ||
3324 | /* | |
3325 | * We can unlock the map now. The in_transition state | |
3326 | * guarantees existance of the entry. | |
3327 | */ | |
3328 | vm_map_unlock(map); | |
3329 | if(map_pmap) { | |
9bccf70c A |
3330 | vm_fault_unwire(map, |
3331 | &tmp_entry, FALSE, map_pmap, pmap_addr); | |
1c79356b | 3332 | } else { |
9bccf70c A |
3333 | vm_fault_unwire(map, |
3334 | &tmp_entry, FALSE, map->pmap, | |
3335 | tmp_entry.vme_start); | |
1c79356b A |
3336 | } |
3337 | vm_map_lock(map); | |
3338 | ||
3339 | if (last_timestamp+1 != map->timestamp) { | |
3340 | /* | |
3341 | * Find the entry again. It could have been clipped | |
3342 | * or deleted after we unlocked the map. | |
3343 | */ | |
3344 | if (!vm_map_lookup_entry(map, tmp_entry.vme_start, | |
3345 | &first_entry)) { | |
3346 | if (!user_wire) | |
3347 | panic("vm_map_unwire: re-lookup failed"); | |
3348 | entry = first_entry->vme_next; | |
3349 | } else | |
3350 | entry = first_entry; | |
3351 | } | |
3352 | last_timestamp = map->timestamp; | |
3353 | ||
3354 | /* | |
3355 | * clear transition bit for all constituent entries that | |
3356 | * were in the original entry (saved in tmp_entry). Also | |
3357 | * check for waiters. | |
3358 | */ | |
3359 | while ((entry != vm_map_to_entry(map)) && | |
3360 | (entry->vme_start < tmp_entry.vme_end)) { | |
3361 | assert(entry->in_transition); | |
3362 | entry->in_transition = FALSE; | |
3363 | if (entry->needs_wakeup) { | |
3364 | entry->needs_wakeup = FALSE; | |
3365 | need_wakeup = TRUE; | |
3366 | } | |
3367 | entry = entry->vme_next; | |
3368 | } | |
3369 | } | |
91447636 A |
3370 | |
3371 | /* | |
3372 | * We might have fragmented the address space when we wired this | |
3373 | * range of addresses. Attempt to re-coalesce these VM map entries | |
3374 | * with their neighbors now that they're no longer wired. | |
3375 | * Under some circumstances, address space fragmentation can | |
3376 | * prevent VM object shadow chain collapsing, which can cause | |
3377 | * swap space leaks. | |
3378 | */ | |
3379 | vm_map_simplify_range(map, start, end); | |
3380 | ||
1c79356b A |
3381 | vm_map_unlock(map); |
3382 | /* | |
3383 | * wake up anybody waiting on entries that we have unwired. | |
3384 | */ | |
3385 | if (need_wakeup) | |
3386 | vm_map_entry_wakeup(map); | |
3387 | return(KERN_SUCCESS); | |
3388 | ||
3389 | } | |
3390 | ||
3391 | kern_return_t | |
3392 | vm_map_unwire( | |
3393 | register vm_map_t map, | |
91447636 A |
3394 | register vm_map_offset_t start, |
3395 | register vm_map_offset_t end, | |
1c79356b A |
3396 | boolean_t user_wire) |
3397 | { | |
9bccf70c A |
3398 | return vm_map_unwire_nested(map, start, end, |
3399 | user_wire, (pmap_t)NULL, 0); | |
1c79356b A |
3400 | } |
3401 | ||
3402 | ||
3403 | /* | |
3404 | * vm_map_entry_delete: [ internal use only ] | |
3405 | * | |
3406 | * Deallocate the given entry from the target map. | |
3407 | */ | |
91447636 | 3408 | static void |
1c79356b A |
3409 | vm_map_entry_delete( |
3410 | register vm_map_t map, | |
3411 | register vm_map_entry_t entry) | |
3412 | { | |
91447636 | 3413 | register vm_map_offset_t s, e; |
1c79356b A |
3414 | register vm_object_t object; |
3415 | register vm_map_t submap; | |
1c79356b A |
3416 | |
3417 | s = entry->vme_start; | |
3418 | e = entry->vme_end; | |
3419 | assert(page_aligned(s)); | |
3420 | assert(page_aligned(e)); | |
3421 | assert(entry->wired_count == 0); | |
3422 | assert(entry->user_wired_count == 0); | |
3423 | ||
3424 | if (entry->is_sub_map) { | |
3425 | object = NULL; | |
3426 | submap = entry->object.sub_map; | |
3427 | } else { | |
3428 | submap = NULL; | |
3429 | object = entry->object.vm_object; | |
3430 | } | |
3431 | ||
3432 | vm_map_entry_unlink(map, entry); | |
3433 | map->size -= e - s; | |
3434 | ||
3435 | vm_map_entry_dispose(map, entry); | |
3436 | ||
3437 | vm_map_unlock(map); | |
3438 | /* | |
3439 | * Deallocate the object only after removing all | |
3440 | * pmap entries pointing to its pages. | |
3441 | */ | |
3442 | if (submap) | |
3443 | vm_map_deallocate(submap); | |
3444 | else | |
3445 | vm_object_deallocate(object); | |
3446 | ||
3447 | } | |
3448 | ||
3449 | void | |
3450 | vm_map_submap_pmap_clean( | |
3451 | vm_map_t map, | |
91447636 A |
3452 | vm_map_offset_t start, |
3453 | vm_map_offset_t end, | |
1c79356b | 3454 | vm_map_t sub_map, |
91447636 | 3455 | vm_map_offset_t offset) |
1c79356b | 3456 | { |
91447636 A |
3457 | vm_map_offset_t submap_start; |
3458 | vm_map_offset_t submap_end; | |
3459 | vm_map_size_t remove_size; | |
1c79356b A |
3460 | vm_map_entry_t entry; |
3461 | ||
3462 | submap_end = offset + (end - start); | |
3463 | submap_start = offset; | |
3464 | if(vm_map_lookup_entry(sub_map, offset, &entry)) { | |
3465 | ||
3466 | remove_size = (entry->vme_end - entry->vme_start); | |
3467 | if(offset > entry->vme_start) | |
3468 | remove_size -= offset - entry->vme_start; | |
3469 | ||
3470 | ||
3471 | if(submap_end < entry->vme_end) { | |
3472 | remove_size -= | |
3473 | entry->vme_end - submap_end; | |
3474 | } | |
3475 | if(entry->is_sub_map) { | |
3476 | vm_map_submap_pmap_clean( | |
3477 | sub_map, | |
3478 | start, | |
3479 | start + remove_size, | |
3480 | entry->object.sub_map, | |
3481 | entry->offset); | |
3482 | } else { | |
9bccf70c A |
3483 | |
3484 | if((map->mapped) && (map->ref_count) | |
3485 | && (entry->object.vm_object != NULL)) { | |
3486 | vm_object_pmap_protect( | |
3487 | entry->object.vm_object, | |
3488 | entry->offset, | |
3489 | remove_size, | |
3490 | PMAP_NULL, | |
3491 | entry->vme_start, | |
3492 | VM_PROT_NONE); | |
3493 | } else { | |
3494 | pmap_remove(map->pmap, | |
55e303ae A |
3495 | (addr64_t)start, |
3496 | (addr64_t)(start + remove_size)); | |
9bccf70c | 3497 | } |
1c79356b A |
3498 | } |
3499 | } | |
3500 | ||
3501 | entry = entry->vme_next; | |
3502 | ||
3503 | while((entry != vm_map_to_entry(sub_map)) | |
3504 | && (entry->vme_start < submap_end)) { | |
3505 | remove_size = (entry->vme_end - entry->vme_start); | |
3506 | if(submap_end < entry->vme_end) { | |
3507 | remove_size -= entry->vme_end - submap_end; | |
3508 | } | |
3509 | if(entry->is_sub_map) { | |
3510 | vm_map_submap_pmap_clean( | |
3511 | sub_map, | |
3512 | (start + entry->vme_start) - offset, | |
3513 | ((start + entry->vme_start) - offset) + remove_size, | |
3514 | entry->object.sub_map, | |
3515 | entry->offset); | |
3516 | } else { | |
9bccf70c A |
3517 | if((map->mapped) && (map->ref_count) |
3518 | && (entry->object.vm_object != NULL)) { | |
3519 | vm_object_pmap_protect( | |
3520 | entry->object.vm_object, | |
3521 | entry->offset, | |
3522 | remove_size, | |
3523 | PMAP_NULL, | |
3524 | entry->vme_start, | |
3525 | VM_PROT_NONE); | |
3526 | } else { | |
3527 | pmap_remove(map->pmap, | |
55e303ae A |
3528 | (addr64_t)((start + entry->vme_start) |
3529 | - offset), | |
3530 | (addr64_t)(((start + entry->vme_start) | |
3531 | - offset) + remove_size)); | |
9bccf70c | 3532 | } |
1c79356b A |
3533 | } |
3534 | entry = entry->vme_next; | |
3535 | } | |
3536 | return; | |
3537 | } | |
3538 | ||
3539 | /* | |
3540 | * vm_map_delete: [ internal use only ] | |
3541 | * | |
3542 | * Deallocates the given address range from the target map. | |
3543 | * Removes all user wirings. Unwires one kernel wiring if | |
3544 | * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go | |
3545 | * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps | |
3546 | * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set. | |
3547 | * | |
3548 | * This routine is called with map locked and leaves map locked. | |
3549 | */ | |
91447636 | 3550 | static kern_return_t |
1c79356b | 3551 | vm_map_delete( |
91447636 A |
3552 | vm_map_t map, |
3553 | vm_map_offset_t start, | |
3554 | vm_map_offset_t end, | |
3555 | int flags, | |
3556 | vm_map_t zap_map) | |
1c79356b A |
3557 | { |
3558 | vm_map_entry_t entry, next; | |
3559 | struct vm_map_entry *first_entry, tmp_entry; | |
91447636 | 3560 | register vm_map_offset_t s, e; |
1c79356b A |
3561 | register vm_object_t object; |
3562 | boolean_t need_wakeup; | |
3563 | unsigned int last_timestamp = ~0; /* unlikely value */ | |
3564 | int interruptible; | |
1c79356b A |
3565 | |
3566 | interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? | |
3567 | THREAD_ABORTSAFE : THREAD_UNINT; | |
3568 | ||
3569 | /* | |
3570 | * All our DMA I/O operations in IOKit are currently done by | |
3571 | * wiring through the map entries of the task requesting the I/O. | |
3572 | * Because of this, we must always wait for kernel wirings | |
3573 | * to go away on the entries before deleting them. | |
3574 | * | |
3575 | * Any caller who wants to actually remove a kernel wiring | |
3576 | * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to | |
3577 | * properly remove one wiring instead of blasting through | |
3578 | * them all. | |
3579 | */ | |
3580 | flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE; | |
3581 | ||
3582 | /* | |
3583 | * Find the start of the region, and clip it | |
3584 | */ | |
3585 | if (vm_map_lookup_entry(map, start, &first_entry)) { | |
3586 | entry = first_entry; | |
3587 | vm_map_clip_start(map, entry, start); | |
3588 | ||
3589 | /* | |
3590 | * Fix the lookup hint now, rather than each | |
3591 | * time through the loop. | |
3592 | */ | |
3593 | SAVE_HINT(map, entry->vme_prev); | |
3594 | } else { | |
3595 | entry = first_entry->vme_next; | |
3596 | } | |
3597 | ||
3598 | need_wakeup = FALSE; | |
3599 | /* | |
3600 | * Step through all entries in this region | |
3601 | */ | |
3602 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
3603 | ||
3604 | vm_map_clip_end(map, entry, end); | |
3605 | if (entry->in_transition) { | |
9bccf70c A |
3606 | wait_result_t wait_result; |
3607 | ||
1c79356b A |
3608 | /* |
3609 | * Another thread is wiring/unwiring this entry. | |
3610 | * Let the other thread know we are waiting. | |
3611 | */ | |
3612 | s = entry->vme_start; | |
3613 | entry->needs_wakeup = TRUE; | |
3614 | ||
3615 | /* | |
3616 | * wake up anybody waiting on entries that we have | |
3617 | * already unwired/deleted. | |
3618 | */ | |
3619 | if (need_wakeup) { | |
3620 | vm_map_entry_wakeup(map); | |
3621 | need_wakeup = FALSE; | |
3622 | } | |
3623 | ||
9bccf70c | 3624 | wait_result = vm_map_entry_wait(map, interruptible); |
1c79356b A |
3625 | |
3626 | if (interruptible && | |
9bccf70c | 3627 | wait_result == THREAD_INTERRUPTED) { |
1c79356b A |
3628 | /* |
3629 | * We do not clear the needs_wakeup flag, | |
3630 | * since we cannot tell if we were the only one. | |
3631 | */ | |
9bccf70c | 3632 | vm_map_unlock(map); |
1c79356b | 3633 | return KERN_ABORTED; |
9bccf70c | 3634 | } |
1c79356b A |
3635 | |
3636 | /* | |
3637 | * The entry could have been clipped or it | |
3638 | * may not exist anymore. Look it up again. | |
3639 | */ | |
3640 | if (!vm_map_lookup_entry(map, s, &first_entry)) { | |
3641 | assert((map != kernel_map) && | |
3642 | (!entry->is_sub_map)); | |
3643 | /* | |
3644 | * User: use the next entry | |
3645 | */ | |
3646 | entry = first_entry->vme_next; | |
3647 | } else { | |
3648 | entry = first_entry; | |
3649 | SAVE_HINT(map, entry->vme_prev); | |
3650 | } | |
9bccf70c | 3651 | last_timestamp = map->timestamp; |
1c79356b A |
3652 | continue; |
3653 | } /* end in_transition */ | |
3654 | ||
3655 | if (entry->wired_count) { | |
3656 | /* | |
3657 | * Remove a kernel wiring if requested or if | |
3658 | * there are user wirings. | |
3659 | */ | |
3660 | if ((flags & VM_MAP_REMOVE_KUNWIRE) || | |
3661 | (entry->user_wired_count > 0)) | |
3662 | entry->wired_count--; | |
3663 | ||
3664 | /* remove all user wire references */ | |
3665 | entry->user_wired_count = 0; | |
3666 | ||
3667 | if (entry->wired_count != 0) { | |
3668 | assert((map != kernel_map) && | |
3669 | (!entry->is_sub_map)); | |
3670 | /* | |
3671 | * Cannot continue. Typical case is when | |
3672 | * a user thread has physical io pending on | |
3673 | * on this page. Either wait for the | |
3674 | * kernel wiring to go away or return an | |
3675 | * error. | |
3676 | */ | |
3677 | if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) { | |
9bccf70c | 3678 | wait_result_t wait_result; |
1c79356b A |
3679 | |
3680 | s = entry->vme_start; | |
3681 | entry->needs_wakeup = TRUE; | |
9bccf70c A |
3682 | wait_result = vm_map_entry_wait(map, |
3683 | interruptible); | |
1c79356b A |
3684 | |
3685 | if (interruptible && | |
9bccf70c | 3686 | wait_result == THREAD_INTERRUPTED) { |
1c79356b A |
3687 | /* |
3688 | * We do not clear the | |
3689 | * needs_wakeup flag, since we | |
3690 | * cannot tell if we were the | |
3691 | * only one. | |
3692 | */ | |
9bccf70c | 3693 | vm_map_unlock(map); |
1c79356b | 3694 | return KERN_ABORTED; |
9bccf70c | 3695 | } |
1c79356b A |
3696 | |
3697 | /* | |
3698 | * The entry could have been clipped or | |
3699 | * it may not exist anymore. Look it | |
3700 | * up again. | |
3701 | */ | |
3702 | if (!vm_map_lookup_entry(map, s, | |
3703 | &first_entry)) { | |
3704 | assert((map != kernel_map) && | |
3705 | (!entry->is_sub_map)); | |
3706 | /* | |
3707 | * User: use the next entry | |
3708 | */ | |
3709 | entry = first_entry->vme_next; | |
3710 | } else { | |
3711 | entry = first_entry; | |
3712 | SAVE_HINT(map, entry->vme_prev); | |
3713 | } | |
9bccf70c | 3714 | last_timestamp = map->timestamp; |
1c79356b A |
3715 | continue; |
3716 | } | |
3717 | else { | |
3718 | return KERN_FAILURE; | |
3719 | } | |
3720 | } | |
3721 | ||
3722 | entry->in_transition = TRUE; | |
3723 | /* | |
3724 | * copy current entry. see comment in vm_map_wire() | |
3725 | */ | |
3726 | tmp_entry = *entry; | |
3727 | s = entry->vme_start; | |
3728 | e = entry->vme_end; | |
3729 | ||
3730 | /* | |
3731 | * We can unlock the map now. The in_transition | |
3732 | * state guarentees existance of the entry. | |
3733 | */ | |
3734 | vm_map_unlock(map); | |
3735 | vm_fault_unwire(map, &tmp_entry, | |
3736 | tmp_entry.object.vm_object == kernel_object, | |
9bccf70c | 3737 | map->pmap, tmp_entry.vme_start); |
1c79356b A |
3738 | vm_map_lock(map); |
3739 | ||
3740 | if (last_timestamp+1 != map->timestamp) { | |
3741 | /* | |
3742 | * Find the entry again. It could have | |
3743 | * been clipped after we unlocked the map. | |
3744 | */ | |
3745 | if (!vm_map_lookup_entry(map, s, &first_entry)){ | |
3746 | assert((map != kernel_map) && | |
3747 | (!entry->is_sub_map)); | |
3748 | first_entry = first_entry->vme_next; | |
3749 | } else { | |
3750 | SAVE_HINT(map, entry->vme_prev); | |
3751 | } | |
3752 | } else { | |
3753 | SAVE_HINT(map, entry->vme_prev); | |
3754 | first_entry = entry; | |
3755 | } | |
3756 | ||
3757 | last_timestamp = map->timestamp; | |
3758 | ||
3759 | entry = first_entry; | |
3760 | while ((entry != vm_map_to_entry(map)) && | |
3761 | (entry->vme_start < tmp_entry.vme_end)) { | |
3762 | assert(entry->in_transition); | |
3763 | entry->in_transition = FALSE; | |
3764 | if (entry->needs_wakeup) { | |
3765 | entry->needs_wakeup = FALSE; | |
3766 | need_wakeup = TRUE; | |
3767 | } | |
3768 | entry = entry->vme_next; | |
3769 | } | |
3770 | /* | |
3771 | * We have unwired the entry(s). Go back and | |
3772 | * delete them. | |
3773 | */ | |
3774 | entry = first_entry; | |
3775 | continue; | |
3776 | } | |
3777 | ||
3778 | /* entry is unwired */ | |
3779 | assert(entry->wired_count == 0); | |
3780 | assert(entry->user_wired_count == 0); | |
3781 | ||
3782 | if ((!entry->is_sub_map && | |
3783 | entry->object.vm_object != kernel_object) || | |
3784 | entry->is_sub_map) { | |
3785 | if(entry->is_sub_map) { | |
3786 | if(entry->use_pmap) { | |
3787 | #ifndef i386 | |
55e303ae | 3788 | pmap_unnest(map->pmap, (addr64_t)entry->vme_start); |
1c79356b | 3789 | #endif |
9bccf70c A |
3790 | if((map->mapped) && (map->ref_count)) { |
3791 | /* clean up parent map/maps */ | |
3792 | vm_map_submap_pmap_clean( | |
3793 | map, entry->vme_start, | |
3794 | entry->vme_end, | |
3795 | entry->object.sub_map, | |
3796 | entry->offset); | |
3797 | } | |
1c79356b A |
3798 | } else { |
3799 | vm_map_submap_pmap_clean( | |
3800 | map, entry->vme_start, entry->vme_end, | |
3801 | entry->object.sub_map, | |
3802 | entry->offset); | |
3803 | } | |
3804 | } else { | |
55e303ae A |
3805 | object = entry->object.vm_object; |
3806 | if((map->mapped) && (map->ref_count)) { | |
3807 | vm_object_pmap_protect( | |
3808 | object, entry->offset, | |
3809 | entry->vme_end - entry->vme_start, | |
3810 | PMAP_NULL, | |
3811 | entry->vme_start, | |
3812 | VM_PROT_NONE); | |
91447636 A |
3813 | } else { |
3814 | pmap_remove(map->pmap, | |
3815 | entry->vme_start, | |
3816 | entry->vme_end); | |
3817 | } | |
1c79356b A |
3818 | } |
3819 | } | |
3820 | ||
91447636 A |
3821 | /* |
3822 | * All pmap mappings for this map entry must have been | |
3823 | * cleared by now. | |
3824 | */ | |
3825 | assert(vm_map_pmap_is_empty(map, | |
3826 | entry->vme_start, | |
3827 | entry->vme_end)); | |
3828 | ||
1c79356b A |
3829 | next = entry->vme_next; |
3830 | s = next->vme_start; | |
3831 | last_timestamp = map->timestamp; | |
91447636 A |
3832 | |
3833 | if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) && | |
3834 | zap_map != VM_MAP_NULL) { | |
3835 | /* | |
3836 | * The caller wants to save the affected VM map entries | |
3837 | * into the "zap_map". The caller will take care of | |
3838 | * these entries. | |
3839 | */ | |
3840 | /* unlink the entry from "map" ... */ | |
3841 | vm_map_entry_unlink(map, entry); | |
3842 | /* ... and add it to the end of the "zap_map" */ | |
3843 | vm_map_entry_link(zap_map, | |
3844 | vm_map_last_entry(zap_map), | |
3845 | entry); | |
3846 | } else { | |
3847 | vm_map_entry_delete(map, entry); | |
3848 | /* vm_map_entry_delete unlocks the map */ | |
3849 | vm_map_lock(map); | |
3850 | } | |
3851 | ||
1c79356b A |
3852 | entry = next; |
3853 | ||
3854 | if(entry == vm_map_to_entry(map)) { | |
3855 | break; | |
3856 | } | |
3857 | if (last_timestamp+1 != map->timestamp) { | |
3858 | /* | |
3859 | * we are responsible for deleting everything | |
3860 | * from the give space, if someone has interfered | |
3861 | * we pick up where we left off, back fills should | |
3862 | * be all right for anyone except map_delete and | |
3863 | * we have to assume that the task has been fully | |
3864 | * disabled before we get here | |
3865 | */ | |
3866 | if (!vm_map_lookup_entry(map, s, &entry)){ | |
3867 | entry = entry->vme_next; | |
3868 | } else { | |
3869 | SAVE_HINT(map, entry->vme_prev); | |
3870 | } | |
3871 | /* | |
3872 | * others can not only allocate behind us, we can | |
3873 | * also see coalesce while we don't have the map lock | |
3874 | */ | |
3875 | if(entry == vm_map_to_entry(map)) { | |
3876 | break; | |
3877 | } | |
3878 | vm_map_clip_start(map, entry, s); | |
3879 | } | |
3880 | last_timestamp = map->timestamp; | |
3881 | } | |
3882 | ||
3883 | if (map->wait_for_space) | |
3884 | thread_wakeup((event_t) map); | |
3885 | /* | |
3886 | * wake up anybody waiting on entries that we have already deleted. | |
3887 | */ | |
3888 | if (need_wakeup) | |
3889 | vm_map_entry_wakeup(map); | |
3890 | ||
3891 | return KERN_SUCCESS; | |
3892 | } | |
3893 | ||
3894 | /* | |
3895 | * vm_map_remove: | |
3896 | * | |
3897 | * Remove the given address range from the target map. | |
3898 | * This is the exported form of vm_map_delete. | |
3899 | */ | |
3900 | kern_return_t | |
3901 | vm_map_remove( | |
3902 | register vm_map_t map, | |
91447636 A |
3903 | register vm_map_offset_t start, |
3904 | register vm_map_offset_t end, | |
1c79356b A |
3905 | register boolean_t flags) |
3906 | { | |
3907 | register kern_return_t result; | |
9bccf70c | 3908 | |
1c79356b A |
3909 | vm_map_lock(map); |
3910 | VM_MAP_RANGE_CHECK(map, start, end); | |
91447636 | 3911 | result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); |
1c79356b | 3912 | vm_map_unlock(map); |
91447636 | 3913 | |
1c79356b A |
3914 | return(result); |
3915 | } | |
3916 | ||
3917 | ||
1c79356b A |
3918 | /* |
3919 | * Routine: vm_map_copy_discard | |
3920 | * | |
3921 | * Description: | |
3922 | * Dispose of a map copy object (returned by | |
3923 | * vm_map_copyin). | |
3924 | */ | |
3925 | void | |
3926 | vm_map_copy_discard( | |
3927 | vm_map_copy_t copy) | |
3928 | { | |
3929 | TR_DECL("vm_map_copy_discard"); | |
3930 | ||
3931 | /* tr3("enter: copy 0x%x type %d", copy, copy->type);*/ | |
91447636 | 3932 | |
1c79356b A |
3933 | if (copy == VM_MAP_COPY_NULL) |
3934 | return; | |
3935 | ||
3936 | switch (copy->type) { | |
3937 | case VM_MAP_COPY_ENTRY_LIST: | |
3938 | while (vm_map_copy_first_entry(copy) != | |
3939 | vm_map_copy_to_entry(copy)) { | |
3940 | vm_map_entry_t entry = vm_map_copy_first_entry(copy); | |
3941 | ||
3942 | vm_map_copy_entry_unlink(copy, entry); | |
3943 | vm_object_deallocate(entry->object.vm_object); | |
3944 | vm_map_copy_entry_dispose(copy, entry); | |
3945 | } | |
3946 | break; | |
3947 | case VM_MAP_COPY_OBJECT: | |
3948 | vm_object_deallocate(copy->cpy_object); | |
3949 | break; | |
1c79356b A |
3950 | case VM_MAP_COPY_KERNEL_BUFFER: |
3951 | ||
3952 | /* | |
3953 | * The vm_map_copy_t and possibly the data buffer were | |
3954 | * allocated by a single call to kalloc(), i.e. the | |
3955 | * vm_map_copy_t was not allocated out of the zone. | |
3956 | */ | |
91447636 | 3957 | kfree(copy, copy->cpy_kalloc_size); |
1c79356b A |
3958 | return; |
3959 | } | |
91447636 | 3960 | zfree(vm_map_copy_zone, copy); |
1c79356b A |
3961 | } |
3962 | ||
3963 | /* | |
3964 | * Routine: vm_map_copy_copy | |
3965 | * | |
3966 | * Description: | |
3967 | * Move the information in a map copy object to | |
3968 | * a new map copy object, leaving the old one | |
3969 | * empty. | |
3970 | * | |
3971 | * This is used by kernel routines that need | |
3972 | * to look at out-of-line data (in copyin form) | |
3973 | * before deciding whether to return SUCCESS. | |
3974 | * If the routine returns FAILURE, the original | |
3975 | * copy object will be deallocated; therefore, | |
3976 | * these routines must make a copy of the copy | |
3977 | * object and leave the original empty so that | |
3978 | * deallocation will not fail. | |
3979 | */ | |
3980 | vm_map_copy_t | |
3981 | vm_map_copy_copy( | |
3982 | vm_map_copy_t copy) | |
3983 | { | |
3984 | vm_map_copy_t new_copy; | |
3985 | ||
3986 | if (copy == VM_MAP_COPY_NULL) | |
3987 | return VM_MAP_COPY_NULL; | |
3988 | ||
3989 | /* | |
3990 | * Allocate a new copy object, and copy the information | |
3991 | * from the old one into it. | |
3992 | */ | |
3993 | ||
3994 | new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
3995 | *new_copy = *copy; | |
3996 | ||
3997 | if (copy->type == VM_MAP_COPY_ENTRY_LIST) { | |
3998 | /* | |
3999 | * The links in the entry chain must be | |
4000 | * changed to point to the new copy object. | |
4001 | */ | |
4002 | vm_map_copy_first_entry(copy)->vme_prev | |
4003 | = vm_map_copy_to_entry(new_copy); | |
4004 | vm_map_copy_last_entry(copy)->vme_next | |
4005 | = vm_map_copy_to_entry(new_copy); | |
4006 | } | |
4007 | ||
4008 | /* | |
4009 | * Change the old copy object into one that contains | |
4010 | * nothing to be deallocated. | |
4011 | */ | |
4012 | copy->type = VM_MAP_COPY_OBJECT; | |
4013 | copy->cpy_object = VM_OBJECT_NULL; | |
4014 | ||
4015 | /* | |
4016 | * Return the new object. | |
4017 | */ | |
4018 | return new_copy; | |
4019 | } | |
4020 | ||
91447636 | 4021 | static kern_return_t |
1c79356b A |
4022 | vm_map_overwrite_submap_recurse( |
4023 | vm_map_t dst_map, | |
91447636 A |
4024 | vm_map_offset_t dst_addr, |
4025 | vm_map_size_t dst_size) | |
1c79356b | 4026 | { |
91447636 | 4027 | vm_map_offset_t dst_end; |
1c79356b A |
4028 | vm_map_entry_t tmp_entry; |
4029 | vm_map_entry_t entry; | |
4030 | kern_return_t result; | |
4031 | boolean_t encountered_sub_map = FALSE; | |
4032 | ||
4033 | ||
4034 | ||
4035 | /* | |
4036 | * Verify that the destination is all writeable | |
4037 | * initially. We have to trunc the destination | |
4038 | * address and round the copy size or we'll end up | |
4039 | * splitting entries in strange ways. | |
4040 | */ | |
4041 | ||
91447636 | 4042 | dst_end = vm_map_round_page(dst_addr + dst_size); |
9bccf70c | 4043 | vm_map_lock(dst_map); |
1c79356b A |
4044 | |
4045 | start_pass_1: | |
1c79356b A |
4046 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
4047 | vm_map_unlock(dst_map); | |
4048 | return(KERN_INVALID_ADDRESS); | |
4049 | } | |
4050 | ||
91447636 | 4051 | vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr)); |
1c79356b A |
4052 | |
4053 | for (entry = tmp_entry;;) { | |
4054 | vm_map_entry_t next; | |
4055 | ||
4056 | next = entry->vme_next; | |
4057 | while(entry->is_sub_map) { | |
91447636 A |
4058 | vm_map_offset_t sub_start; |
4059 | vm_map_offset_t sub_end; | |
4060 | vm_map_offset_t local_end; | |
1c79356b A |
4061 | |
4062 | if (entry->in_transition) { | |
4063 | /* | |
4064 | * Say that we are waiting, and wait for entry. | |
4065 | */ | |
4066 | entry->needs_wakeup = TRUE; | |
4067 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4068 | ||
4069 | goto start_pass_1; | |
4070 | } | |
4071 | ||
4072 | encountered_sub_map = TRUE; | |
4073 | sub_start = entry->offset; | |
4074 | ||
4075 | if(entry->vme_end < dst_end) | |
4076 | sub_end = entry->vme_end; | |
4077 | else | |
4078 | sub_end = dst_end; | |
4079 | sub_end -= entry->vme_start; | |
4080 | sub_end += entry->offset; | |
4081 | local_end = entry->vme_end; | |
4082 | vm_map_unlock(dst_map); | |
4083 | ||
4084 | result = vm_map_overwrite_submap_recurse( | |
4085 | entry->object.sub_map, | |
4086 | sub_start, | |
4087 | sub_end - sub_start); | |
4088 | ||
4089 | if(result != KERN_SUCCESS) | |
4090 | return result; | |
4091 | if (dst_end <= entry->vme_end) | |
4092 | return KERN_SUCCESS; | |
4093 | vm_map_lock(dst_map); | |
4094 | if(!vm_map_lookup_entry(dst_map, local_end, | |
4095 | &tmp_entry)) { | |
4096 | vm_map_unlock(dst_map); | |
4097 | return(KERN_INVALID_ADDRESS); | |
4098 | } | |
4099 | entry = tmp_entry; | |
4100 | next = entry->vme_next; | |
4101 | } | |
4102 | ||
4103 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
4104 | vm_map_unlock(dst_map); | |
4105 | return(KERN_PROTECTION_FAILURE); | |
4106 | } | |
4107 | ||
4108 | /* | |
4109 | * If the entry is in transition, we must wait | |
4110 | * for it to exit that state. Anything could happen | |
4111 | * when we unlock the map, so start over. | |
4112 | */ | |
4113 | if (entry->in_transition) { | |
4114 | ||
4115 | /* | |
4116 | * Say that we are waiting, and wait for entry. | |
4117 | */ | |
4118 | entry->needs_wakeup = TRUE; | |
4119 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4120 | ||
4121 | goto start_pass_1; | |
4122 | } | |
4123 | ||
4124 | /* | |
4125 | * our range is contained completely within this map entry | |
4126 | */ | |
4127 | if (dst_end <= entry->vme_end) { | |
4128 | vm_map_unlock(dst_map); | |
4129 | return KERN_SUCCESS; | |
4130 | } | |
4131 | /* | |
4132 | * check that range specified is contiguous region | |
4133 | */ | |
4134 | if ((next == vm_map_to_entry(dst_map)) || | |
4135 | (next->vme_start != entry->vme_end)) { | |
4136 | vm_map_unlock(dst_map); | |
4137 | return(KERN_INVALID_ADDRESS); | |
4138 | } | |
4139 | ||
4140 | /* | |
4141 | * Check for permanent objects in the destination. | |
4142 | */ | |
4143 | if ((entry->object.vm_object != VM_OBJECT_NULL) && | |
4144 | ((!entry->object.vm_object->internal) || | |
4145 | (entry->object.vm_object->true_share))) { | |
4146 | if(encountered_sub_map) { | |
4147 | vm_map_unlock(dst_map); | |
4148 | return(KERN_FAILURE); | |
4149 | } | |
4150 | } | |
4151 | ||
4152 | ||
4153 | entry = next; | |
4154 | }/* for */ | |
4155 | vm_map_unlock(dst_map); | |
4156 | return(KERN_SUCCESS); | |
4157 | } | |
4158 | ||
4159 | /* | |
4160 | * Routine: vm_map_copy_overwrite | |
4161 | * | |
4162 | * Description: | |
4163 | * Copy the memory described by the map copy | |
4164 | * object (copy; returned by vm_map_copyin) onto | |
4165 | * the specified destination region (dst_map, dst_addr). | |
4166 | * The destination must be writeable. | |
4167 | * | |
4168 | * Unlike vm_map_copyout, this routine actually | |
4169 | * writes over previously-mapped memory. If the | |
4170 | * previous mapping was to a permanent (user-supplied) | |
4171 | * memory object, it is preserved. | |
4172 | * | |
4173 | * The attributes (protection and inheritance) of the | |
4174 | * destination region are preserved. | |
4175 | * | |
4176 | * If successful, consumes the copy object. | |
4177 | * Otherwise, the caller is responsible for it. | |
4178 | * | |
4179 | * Implementation notes: | |
4180 | * To overwrite aligned temporary virtual memory, it is | |
4181 | * sufficient to remove the previous mapping and insert | |
4182 | * the new copy. This replacement is done either on | |
4183 | * the whole region (if no permanent virtual memory | |
4184 | * objects are embedded in the destination region) or | |
4185 | * in individual map entries. | |
4186 | * | |
4187 | * To overwrite permanent virtual memory , it is necessary | |
4188 | * to copy each page, as the external memory management | |
4189 | * interface currently does not provide any optimizations. | |
4190 | * | |
4191 | * Unaligned memory also has to be copied. It is possible | |
4192 | * to use 'vm_trickery' to copy the aligned data. This is | |
4193 | * not done but not hard to implement. | |
4194 | * | |
4195 | * Once a page of permanent memory has been overwritten, | |
4196 | * it is impossible to interrupt this function; otherwise, | |
4197 | * the call would be neither atomic nor location-independent. | |
4198 | * The kernel-state portion of a user thread must be | |
4199 | * interruptible. | |
4200 | * | |
4201 | * It may be expensive to forward all requests that might | |
4202 | * overwrite permanent memory (vm_write, vm_copy) to | |
4203 | * uninterruptible kernel threads. This routine may be | |
4204 | * called by interruptible threads; however, success is | |
4205 | * not guaranteed -- if the request cannot be performed | |
4206 | * atomically and interruptibly, an error indication is | |
4207 | * returned. | |
4208 | */ | |
4209 | ||
91447636 | 4210 | static kern_return_t |
1c79356b | 4211 | vm_map_copy_overwrite_nested( |
91447636 A |
4212 | vm_map_t dst_map, |
4213 | vm_map_address_t dst_addr, | |
4214 | vm_map_copy_t copy, | |
4215 | boolean_t interruptible, | |
4216 | pmap_t pmap) | |
1c79356b | 4217 | { |
91447636 A |
4218 | vm_map_offset_t dst_end; |
4219 | vm_map_entry_t tmp_entry; | |
4220 | vm_map_entry_t entry; | |
4221 | kern_return_t kr; | |
4222 | boolean_t aligned = TRUE; | |
4223 | boolean_t contains_permanent_objects = FALSE; | |
4224 | boolean_t encountered_sub_map = FALSE; | |
4225 | vm_map_offset_t base_addr; | |
4226 | vm_map_size_t copy_size; | |
4227 | vm_map_size_t total_size; | |
1c79356b A |
4228 | |
4229 | ||
4230 | /* | |
4231 | * Check for null copy object. | |
4232 | */ | |
4233 | ||
4234 | if (copy == VM_MAP_COPY_NULL) | |
4235 | return(KERN_SUCCESS); | |
4236 | ||
4237 | /* | |
4238 | * Check for special kernel buffer allocated | |
4239 | * by new_ipc_kmsg_copyin. | |
4240 | */ | |
4241 | ||
4242 | if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { | |
0b4e3aa0 A |
4243 | return(vm_map_copyout_kernel_buffer( |
4244 | dst_map, &dst_addr, | |
4245 | copy, TRUE)); | |
1c79356b A |
4246 | } |
4247 | ||
4248 | /* | |
4249 | * Only works for entry lists at the moment. Will | |
4250 | * support page lists later. | |
4251 | */ | |
4252 | ||
4253 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST); | |
4254 | ||
4255 | if (copy->size == 0) { | |
4256 | vm_map_copy_discard(copy); | |
4257 | return(KERN_SUCCESS); | |
4258 | } | |
4259 | ||
4260 | /* | |
4261 | * Verify that the destination is all writeable | |
4262 | * initially. We have to trunc the destination | |
4263 | * address and round the copy size or we'll end up | |
4264 | * splitting entries in strange ways. | |
4265 | */ | |
4266 | ||
4267 | if (!page_aligned(copy->size) || | |
4268 | !page_aligned (copy->offset) || | |
4269 | !page_aligned (dst_addr)) | |
4270 | { | |
4271 | aligned = FALSE; | |
91447636 | 4272 | dst_end = vm_map_round_page(dst_addr + copy->size); |
1c79356b A |
4273 | } else { |
4274 | dst_end = dst_addr + copy->size; | |
4275 | } | |
4276 | ||
1c79356b | 4277 | vm_map_lock(dst_map); |
9bccf70c | 4278 | |
91447636 A |
4279 | /* LP64todo - remove this check when vm_map_commpage64() |
4280 | * no longer has to stuff in a map_entry for the commpage | |
4281 | * above the map's max_offset. | |
4282 | */ | |
4283 | if (dst_addr >= dst_map->max_offset) { | |
4284 | vm_map_unlock(dst_map); | |
4285 | return(KERN_INVALID_ADDRESS); | |
4286 | } | |
4287 | ||
9bccf70c | 4288 | start_pass_1: |
1c79356b A |
4289 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
4290 | vm_map_unlock(dst_map); | |
4291 | return(KERN_INVALID_ADDRESS); | |
4292 | } | |
91447636 | 4293 | vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(dst_addr)); |
1c79356b A |
4294 | for (entry = tmp_entry;;) { |
4295 | vm_map_entry_t next = entry->vme_next; | |
4296 | ||
4297 | while(entry->is_sub_map) { | |
91447636 A |
4298 | vm_map_offset_t sub_start; |
4299 | vm_map_offset_t sub_end; | |
4300 | vm_map_offset_t local_end; | |
1c79356b A |
4301 | |
4302 | if (entry->in_transition) { | |
4303 | ||
4304 | /* | |
4305 | * Say that we are waiting, and wait for entry. | |
4306 | */ | |
4307 | entry->needs_wakeup = TRUE; | |
4308 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4309 | ||
4310 | goto start_pass_1; | |
4311 | } | |
4312 | ||
4313 | local_end = entry->vme_end; | |
4314 | if (!(entry->needs_copy)) { | |
4315 | /* if needs_copy we are a COW submap */ | |
4316 | /* in such a case we just replace so */ | |
4317 | /* there is no need for the follow- */ | |
4318 | /* ing check. */ | |
4319 | encountered_sub_map = TRUE; | |
4320 | sub_start = entry->offset; | |
4321 | ||
4322 | if(entry->vme_end < dst_end) | |
4323 | sub_end = entry->vme_end; | |
4324 | else | |
4325 | sub_end = dst_end; | |
4326 | sub_end -= entry->vme_start; | |
4327 | sub_end += entry->offset; | |
4328 | vm_map_unlock(dst_map); | |
4329 | ||
4330 | kr = vm_map_overwrite_submap_recurse( | |
4331 | entry->object.sub_map, | |
4332 | sub_start, | |
4333 | sub_end - sub_start); | |
4334 | if(kr != KERN_SUCCESS) | |
4335 | return kr; | |
4336 | vm_map_lock(dst_map); | |
4337 | } | |
4338 | ||
4339 | if (dst_end <= entry->vme_end) | |
4340 | goto start_overwrite; | |
4341 | if(!vm_map_lookup_entry(dst_map, local_end, | |
4342 | &entry)) { | |
4343 | vm_map_unlock(dst_map); | |
4344 | return(KERN_INVALID_ADDRESS); | |
4345 | } | |
4346 | next = entry->vme_next; | |
4347 | } | |
4348 | ||
4349 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
4350 | vm_map_unlock(dst_map); | |
4351 | return(KERN_PROTECTION_FAILURE); | |
4352 | } | |
4353 | ||
4354 | /* | |
4355 | * If the entry is in transition, we must wait | |
4356 | * for it to exit that state. Anything could happen | |
4357 | * when we unlock the map, so start over. | |
4358 | */ | |
4359 | if (entry->in_transition) { | |
4360 | ||
4361 | /* | |
4362 | * Say that we are waiting, and wait for entry. | |
4363 | */ | |
4364 | entry->needs_wakeup = TRUE; | |
4365 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4366 | ||
4367 | goto start_pass_1; | |
4368 | } | |
4369 | ||
4370 | /* | |
4371 | * our range is contained completely within this map entry | |
4372 | */ | |
4373 | if (dst_end <= entry->vme_end) | |
4374 | break; | |
4375 | /* | |
4376 | * check that range specified is contiguous region | |
4377 | */ | |
4378 | if ((next == vm_map_to_entry(dst_map)) || | |
4379 | (next->vme_start != entry->vme_end)) { | |
4380 | vm_map_unlock(dst_map); | |
4381 | return(KERN_INVALID_ADDRESS); | |
4382 | } | |
4383 | ||
4384 | ||
4385 | /* | |
4386 | * Check for permanent objects in the destination. | |
4387 | */ | |
4388 | if ((entry->object.vm_object != VM_OBJECT_NULL) && | |
4389 | ((!entry->object.vm_object->internal) || | |
4390 | (entry->object.vm_object->true_share))) { | |
4391 | contains_permanent_objects = TRUE; | |
4392 | } | |
4393 | ||
4394 | entry = next; | |
4395 | }/* for */ | |
4396 | ||
4397 | start_overwrite: | |
4398 | /* | |
4399 | * If there are permanent objects in the destination, then | |
4400 | * the copy cannot be interrupted. | |
4401 | */ | |
4402 | ||
4403 | if (interruptible && contains_permanent_objects) { | |
4404 | vm_map_unlock(dst_map); | |
4405 | return(KERN_FAILURE); /* XXX */ | |
4406 | } | |
4407 | ||
4408 | /* | |
4409 | * | |
4410 | * Make a second pass, overwriting the data | |
4411 | * At the beginning of each loop iteration, | |
4412 | * the next entry to be overwritten is "tmp_entry" | |
4413 | * (initially, the value returned from the lookup above), | |
4414 | * and the starting address expected in that entry | |
4415 | * is "start". | |
4416 | */ | |
4417 | ||
4418 | total_size = copy->size; | |
4419 | if(encountered_sub_map) { | |
4420 | copy_size = 0; | |
4421 | /* re-calculate tmp_entry since we've had the map */ | |
4422 | /* unlocked */ | |
4423 | if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) { | |
4424 | vm_map_unlock(dst_map); | |
4425 | return(KERN_INVALID_ADDRESS); | |
4426 | } | |
4427 | } else { | |
4428 | copy_size = copy->size; | |
4429 | } | |
4430 | ||
4431 | base_addr = dst_addr; | |
4432 | while(TRUE) { | |
4433 | /* deconstruct the copy object and do in parts */ | |
4434 | /* only in sub_map, interruptable case */ | |
4435 | vm_map_entry_t copy_entry; | |
91447636 A |
4436 | vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; |
4437 | vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; | |
1c79356b | 4438 | int nentries; |
91447636 A |
4439 | int remaining_entries = 0; |
4440 | int new_offset = 0; | |
1c79356b A |
4441 | |
4442 | for (entry = tmp_entry; copy_size == 0;) { | |
4443 | vm_map_entry_t next; | |
4444 | ||
4445 | next = entry->vme_next; | |
4446 | ||
4447 | /* tmp_entry and base address are moved along */ | |
4448 | /* each time we encounter a sub-map. Otherwise */ | |
4449 | /* entry can outpase tmp_entry, and the copy_size */ | |
4450 | /* may reflect the distance between them */ | |
4451 | /* if the current entry is found to be in transition */ | |
4452 | /* we will start over at the beginning or the last */ | |
4453 | /* encounter of a submap as dictated by base_addr */ | |
4454 | /* we will zero copy_size accordingly. */ | |
4455 | if (entry->in_transition) { | |
4456 | /* | |
4457 | * Say that we are waiting, and wait for entry. | |
4458 | */ | |
4459 | entry->needs_wakeup = TRUE; | |
4460 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4461 | ||
1c79356b A |
4462 | if(!vm_map_lookup_entry(dst_map, base_addr, |
4463 | &tmp_entry)) { | |
4464 | vm_map_unlock(dst_map); | |
4465 | return(KERN_INVALID_ADDRESS); | |
4466 | } | |
4467 | copy_size = 0; | |
4468 | entry = tmp_entry; | |
4469 | continue; | |
4470 | } | |
4471 | if(entry->is_sub_map) { | |
91447636 A |
4472 | vm_map_offset_t sub_start; |
4473 | vm_map_offset_t sub_end; | |
4474 | vm_map_offset_t local_end; | |
1c79356b A |
4475 | |
4476 | if (entry->needs_copy) { | |
4477 | /* if this is a COW submap */ | |
4478 | /* just back the range with a */ | |
4479 | /* anonymous entry */ | |
4480 | if(entry->vme_end < dst_end) | |
4481 | sub_end = entry->vme_end; | |
4482 | else | |
4483 | sub_end = dst_end; | |
4484 | if(entry->vme_start < base_addr) | |
4485 | sub_start = base_addr; | |
4486 | else | |
4487 | sub_start = entry->vme_start; | |
4488 | vm_map_clip_end( | |
4489 | dst_map, entry, sub_end); | |
4490 | vm_map_clip_start( | |
4491 | dst_map, entry, sub_start); | |
4492 | entry->is_sub_map = FALSE; | |
4493 | vm_map_deallocate( | |
4494 | entry->object.sub_map); | |
4495 | entry->object.sub_map = NULL; | |
4496 | entry->is_shared = FALSE; | |
4497 | entry->needs_copy = FALSE; | |
4498 | entry->offset = 0; | |
4499 | entry->protection = VM_PROT_ALL; | |
4500 | entry->max_protection = VM_PROT_ALL; | |
4501 | entry->wired_count = 0; | |
4502 | entry->user_wired_count = 0; | |
4503 | if(entry->inheritance | |
4504 | == VM_INHERIT_SHARE) | |
4505 | entry->inheritance = VM_INHERIT_COPY; | |
4506 | continue; | |
4507 | } | |
4508 | /* first take care of any non-sub_map */ | |
4509 | /* entries to send */ | |
4510 | if(base_addr < entry->vme_start) { | |
4511 | /* stuff to send */ | |
4512 | copy_size = | |
4513 | entry->vme_start - base_addr; | |
4514 | break; | |
4515 | } | |
4516 | sub_start = entry->offset; | |
4517 | ||
4518 | if(entry->vme_end < dst_end) | |
4519 | sub_end = entry->vme_end; | |
4520 | else | |
4521 | sub_end = dst_end; | |
4522 | sub_end -= entry->vme_start; | |
4523 | sub_end += entry->offset; | |
4524 | local_end = entry->vme_end; | |
4525 | vm_map_unlock(dst_map); | |
4526 | copy_size = sub_end - sub_start; | |
4527 | ||
4528 | /* adjust the copy object */ | |
4529 | if (total_size > copy_size) { | |
91447636 A |
4530 | vm_map_size_t local_size = 0; |
4531 | vm_map_size_t entry_size; | |
1c79356b A |
4532 | |
4533 | nentries = 1; | |
4534 | new_offset = copy->offset; | |
4535 | copy_entry = vm_map_copy_first_entry(copy); | |
4536 | while(copy_entry != | |
4537 | vm_map_copy_to_entry(copy)){ | |
4538 | entry_size = copy_entry->vme_end - | |
4539 | copy_entry->vme_start; | |
4540 | if((local_size < copy_size) && | |
4541 | ((local_size + entry_size) | |
4542 | >= copy_size)) { | |
4543 | vm_map_copy_clip_end(copy, | |
4544 | copy_entry, | |
4545 | copy_entry->vme_start + | |
4546 | (copy_size - local_size)); | |
4547 | entry_size = copy_entry->vme_end - | |
4548 | copy_entry->vme_start; | |
4549 | local_size += entry_size; | |
4550 | new_offset += entry_size; | |
4551 | } | |
4552 | if(local_size >= copy_size) { | |
4553 | next_copy = copy_entry->vme_next; | |
4554 | copy_entry->vme_next = | |
4555 | vm_map_copy_to_entry(copy); | |
4556 | previous_prev = | |
4557 | copy->cpy_hdr.links.prev; | |
4558 | copy->cpy_hdr.links.prev = copy_entry; | |
4559 | copy->size = copy_size; | |
4560 | remaining_entries = | |
4561 | copy->cpy_hdr.nentries; | |
4562 | remaining_entries -= nentries; | |
4563 | copy->cpy_hdr.nentries = nentries; | |
4564 | break; | |
4565 | } else { | |
4566 | local_size += entry_size; | |
4567 | new_offset += entry_size; | |
4568 | nentries++; | |
4569 | } | |
4570 | copy_entry = copy_entry->vme_next; | |
4571 | } | |
4572 | } | |
4573 | ||
4574 | if((entry->use_pmap) && (pmap == NULL)) { | |
4575 | kr = vm_map_copy_overwrite_nested( | |
4576 | entry->object.sub_map, | |
4577 | sub_start, | |
4578 | copy, | |
4579 | interruptible, | |
4580 | entry->object.sub_map->pmap); | |
4581 | } else if (pmap != NULL) { | |
4582 | kr = vm_map_copy_overwrite_nested( | |
4583 | entry->object.sub_map, | |
4584 | sub_start, | |
4585 | copy, | |
4586 | interruptible, pmap); | |
4587 | } else { | |
4588 | kr = vm_map_copy_overwrite_nested( | |
4589 | entry->object.sub_map, | |
4590 | sub_start, | |
4591 | copy, | |
4592 | interruptible, | |
4593 | dst_map->pmap); | |
4594 | } | |
4595 | if(kr != KERN_SUCCESS) { | |
4596 | if(next_copy != NULL) { | |
4597 | copy->cpy_hdr.nentries += | |
4598 | remaining_entries; | |
4599 | copy->cpy_hdr.links.prev->vme_next = | |
4600 | next_copy; | |
4601 | copy->cpy_hdr.links.prev | |
4602 | = previous_prev; | |
4603 | copy->size = total_size; | |
4604 | } | |
4605 | return kr; | |
4606 | } | |
4607 | if (dst_end <= local_end) { | |
4608 | return(KERN_SUCCESS); | |
4609 | } | |
4610 | /* otherwise copy no longer exists, it was */ | |
4611 | /* destroyed after successful copy_overwrite */ | |
4612 | copy = (vm_map_copy_t) | |
4613 | zalloc(vm_map_copy_zone); | |
4614 | vm_map_copy_first_entry(copy) = | |
4615 | vm_map_copy_last_entry(copy) = | |
4616 | vm_map_copy_to_entry(copy); | |
4617 | copy->type = VM_MAP_COPY_ENTRY_LIST; | |
4618 | copy->offset = new_offset; | |
4619 | ||
4620 | total_size -= copy_size; | |
4621 | copy_size = 0; | |
4622 | /* put back remainder of copy in container */ | |
4623 | if(next_copy != NULL) { | |
4624 | copy->cpy_hdr.nentries = remaining_entries; | |
4625 | copy->cpy_hdr.links.next = next_copy; | |
4626 | copy->cpy_hdr.links.prev = previous_prev; | |
4627 | copy->size = total_size; | |
4628 | next_copy->vme_prev = | |
4629 | vm_map_copy_to_entry(copy); | |
4630 | next_copy = NULL; | |
4631 | } | |
4632 | base_addr = local_end; | |
4633 | vm_map_lock(dst_map); | |
4634 | if(!vm_map_lookup_entry(dst_map, | |
4635 | local_end, &tmp_entry)) { | |
4636 | vm_map_unlock(dst_map); | |
4637 | return(KERN_INVALID_ADDRESS); | |
4638 | } | |
4639 | entry = tmp_entry; | |
4640 | continue; | |
4641 | } | |
4642 | if (dst_end <= entry->vme_end) { | |
4643 | copy_size = dst_end - base_addr; | |
4644 | break; | |
4645 | } | |
4646 | ||
4647 | if ((next == vm_map_to_entry(dst_map)) || | |
4648 | (next->vme_start != entry->vme_end)) { | |
4649 | vm_map_unlock(dst_map); | |
4650 | return(KERN_INVALID_ADDRESS); | |
4651 | } | |
4652 | ||
4653 | entry = next; | |
4654 | }/* for */ | |
4655 | ||
4656 | next_copy = NULL; | |
4657 | nentries = 1; | |
4658 | ||
4659 | /* adjust the copy object */ | |
4660 | if (total_size > copy_size) { | |
91447636 A |
4661 | vm_map_size_t local_size = 0; |
4662 | vm_map_size_t entry_size; | |
1c79356b A |
4663 | |
4664 | new_offset = copy->offset; | |
4665 | copy_entry = vm_map_copy_first_entry(copy); | |
4666 | while(copy_entry != vm_map_copy_to_entry(copy)) { | |
4667 | entry_size = copy_entry->vme_end - | |
4668 | copy_entry->vme_start; | |
4669 | if((local_size < copy_size) && | |
4670 | ((local_size + entry_size) | |
4671 | >= copy_size)) { | |
4672 | vm_map_copy_clip_end(copy, copy_entry, | |
4673 | copy_entry->vme_start + | |
4674 | (copy_size - local_size)); | |
4675 | entry_size = copy_entry->vme_end - | |
4676 | copy_entry->vme_start; | |
4677 | local_size += entry_size; | |
4678 | new_offset += entry_size; | |
4679 | } | |
4680 | if(local_size >= copy_size) { | |
4681 | next_copy = copy_entry->vme_next; | |
4682 | copy_entry->vme_next = | |
4683 | vm_map_copy_to_entry(copy); | |
4684 | previous_prev = | |
4685 | copy->cpy_hdr.links.prev; | |
4686 | copy->cpy_hdr.links.prev = copy_entry; | |
4687 | copy->size = copy_size; | |
4688 | remaining_entries = | |
4689 | copy->cpy_hdr.nentries; | |
4690 | remaining_entries -= nentries; | |
4691 | copy->cpy_hdr.nentries = nentries; | |
4692 | break; | |
4693 | } else { | |
4694 | local_size += entry_size; | |
4695 | new_offset += entry_size; | |
4696 | nentries++; | |
4697 | } | |
4698 | copy_entry = copy_entry->vme_next; | |
4699 | } | |
4700 | } | |
4701 | ||
4702 | if (aligned) { | |
4703 | pmap_t local_pmap; | |
4704 | ||
4705 | if(pmap) | |
4706 | local_pmap = pmap; | |
4707 | else | |
4708 | local_pmap = dst_map->pmap; | |
4709 | ||
4710 | if ((kr = vm_map_copy_overwrite_aligned( | |
4711 | dst_map, tmp_entry, copy, | |
4712 | base_addr, local_pmap)) != KERN_SUCCESS) { | |
4713 | if(next_copy != NULL) { | |
4714 | copy->cpy_hdr.nentries += | |
4715 | remaining_entries; | |
4716 | copy->cpy_hdr.links.prev->vme_next = | |
4717 | next_copy; | |
4718 | copy->cpy_hdr.links.prev = | |
4719 | previous_prev; | |
4720 | copy->size += copy_size; | |
4721 | } | |
4722 | return kr; | |
4723 | } | |
4724 | vm_map_unlock(dst_map); | |
4725 | } else { | |
4726 | /* | |
4727 | * Performance gain: | |
4728 | * | |
4729 | * if the copy and dst address are misaligned but the same | |
4730 | * offset within the page we can copy_not_aligned the | |
4731 | * misaligned parts and copy aligned the rest. If they are | |
4732 | * aligned but len is unaligned we simply need to copy | |
4733 | * the end bit unaligned. We'll need to split the misaligned | |
4734 | * bits of the region in this case ! | |
4735 | */ | |
4736 | /* ALWAYS UNLOCKS THE dst_map MAP */ | |
4737 | if ((kr = vm_map_copy_overwrite_unaligned( dst_map, | |
4738 | tmp_entry, copy, base_addr)) != KERN_SUCCESS) { | |
4739 | if(next_copy != NULL) { | |
4740 | copy->cpy_hdr.nentries += | |
4741 | remaining_entries; | |
4742 | copy->cpy_hdr.links.prev->vme_next = | |
4743 | next_copy; | |
4744 | copy->cpy_hdr.links.prev = | |
4745 | previous_prev; | |
4746 | copy->size += copy_size; | |
4747 | } | |
4748 | return kr; | |
4749 | } | |
4750 | } | |
4751 | total_size -= copy_size; | |
4752 | if(total_size == 0) | |
4753 | break; | |
4754 | base_addr += copy_size; | |
4755 | copy_size = 0; | |
4756 | copy->offset = new_offset; | |
4757 | if(next_copy != NULL) { | |
4758 | copy->cpy_hdr.nentries = remaining_entries; | |
4759 | copy->cpy_hdr.links.next = next_copy; | |
4760 | copy->cpy_hdr.links.prev = previous_prev; | |
4761 | next_copy->vme_prev = vm_map_copy_to_entry(copy); | |
4762 | copy->size = total_size; | |
4763 | } | |
4764 | vm_map_lock(dst_map); | |
4765 | while(TRUE) { | |
4766 | if (!vm_map_lookup_entry(dst_map, | |
4767 | base_addr, &tmp_entry)) { | |
4768 | vm_map_unlock(dst_map); | |
4769 | return(KERN_INVALID_ADDRESS); | |
4770 | } | |
4771 | if (tmp_entry->in_transition) { | |
4772 | entry->needs_wakeup = TRUE; | |
4773 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
4774 | } else { | |
4775 | break; | |
4776 | } | |
4777 | } | |
91447636 | 4778 | vm_map_clip_start(dst_map, tmp_entry, vm_map_trunc_page(base_addr)); |
1c79356b A |
4779 | |
4780 | entry = tmp_entry; | |
4781 | } /* while */ | |
4782 | ||
4783 | /* | |
4784 | * Throw away the vm_map_copy object | |
4785 | */ | |
4786 | vm_map_copy_discard(copy); | |
4787 | ||
4788 | return(KERN_SUCCESS); | |
4789 | }/* vm_map_copy_overwrite */ | |
4790 | ||
4791 | kern_return_t | |
4792 | vm_map_copy_overwrite( | |
4793 | vm_map_t dst_map, | |
91447636 | 4794 | vm_map_offset_t dst_addr, |
1c79356b A |
4795 | vm_map_copy_t copy, |
4796 | boolean_t interruptible) | |
4797 | { | |
4798 | return vm_map_copy_overwrite_nested( | |
4799 | dst_map, dst_addr, copy, interruptible, (pmap_t) NULL); | |
4800 | } | |
4801 | ||
4802 | ||
4803 | /* | |
91447636 | 4804 | * Routine: vm_map_copy_overwrite_unaligned [internal use only] |
1c79356b A |
4805 | * |
4806 | * Decription: | |
4807 | * Physically copy unaligned data | |
4808 | * | |
4809 | * Implementation: | |
4810 | * Unaligned parts of pages have to be physically copied. We use | |
4811 | * a modified form of vm_fault_copy (which understands none-aligned | |
4812 | * page offsets and sizes) to do the copy. We attempt to copy as | |
4813 | * much memory in one go as possibly, however vm_fault_copy copies | |
4814 | * within 1 memory object so we have to find the smaller of "amount left" | |
4815 | * "source object data size" and "target object data size". With | |
4816 | * unaligned data we don't need to split regions, therefore the source | |
4817 | * (copy) object should be one map entry, the target range may be split | |
4818 | * over multiple map entries however. In any event we are pessimistic | |
4819 | * about these assumptions. | |
4820 | * | |
4821 | * Assumptions: | |
4822 | * dst_map is locked on entry and is return locked on success, | |
4823 | * unlocked on error. | |
4824 | */ | |
4825 | ||
91447636 | 4826 | static kern_return_t |
1c79356b A |
4827 | vm_map_copy_overwrite_unaligned( |
4828 | vm_map_t dst_map, | |
4829 | vm_map_entry_t entry, | |
4830 | vm_map_copy_t copy, | |
91447636 | 4831 | vm_map_offset_t start) |
1c79356b A |
4832 | { |
4833 | vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy); | |
4834 | vm_map_version_t version; | |
4835 | vm_object_t dst_object; | |
4836 | vm_object_offset_t dst_offset; | |
4837 | vm_object_offset_t src_offset; | |
4838 | vm_object_offset_t entry_offset; | |
91447636 A |
4839 | vm_map_offset_t entry_end; |
4840 | vm_map_size_t src_size, | |
1c79356b A |
4841 | dst_size, |
4842 | copy_size, | |
4843 | amount_left; | |
4844 | kern_return_t kr = KERN_SUCCESS; | |
4845 | ||
4846 | vm_map_lock_write_to_read(dst_map); | |
4847 | ||
91447636 | 4848 | src_offset = copy->offset - vm_object_trunc_page(copy->offset); |
1c79356b A |
4849 | amount_left = copy->size; |
4850 | /* | |
4851 | * unaligned so we never clipped this entry, we need the offset into | |
4852 | * the vm_object not just the data. | |
4853 | */ | |
4854 | while (amount_left > 0) { | |
4855 | ||
4856 | if (entry == vm_map_to_entry(dst_map)) { | |
4857 | vm_map_unlock_read(dst_map); | |
4858 | return KERN_INVALID_ADDRESS; | |
4859 | } | |
4860 | ||
4861 | /* "start" must be within the current map entry */ | |
4862 | assert ((start>=entry->vme_start) && (start<entry->vme_end)); | |
4863 | ||
4864 | dst_offset = start - entry->vme_start; | |
4865 | ||
4866 | dst_size = entry->vme_end - start; | |
4867 | ||
4868 | src_size = copy_entry->vme_end - | |
4869 | (copy_entry->vme_start + src_offset); | |
4870 | ||
4871 | if (dst_size < src_size) { | |
4872 | /* | |
4873 | * we can only copy dst_size bytes before | |
4874 | * we have to get the next destination entry | |
4875 | */ | |
4876 | copy_size = dst_size; | |
4877 | } else { | |
4878 | /* | |
4879 | * we can only copy src_size bytes before | |
4880 | * we have to get the next source copy entry | |
4881 | */ | |
4882 | copy_size = src_size; | |
4883 | } | |
4884 | ||
4885 | if (copy_size > amount_left) { | |
4886 | copy_size = amount_left; | |
4887 | } | |
4888 | /* | |
4889 | * Entry needs copy, create a shadow shadow object for | |
4890 | * Copy on write region. | |
4891 | */ | |
4892 | if (entry->needs_copy && | |
4893 | ((entry->protection & VM_PROT_WRITE) != 0)) | |
4894 | { | |
4895 | if (vm_map_lock_read_to_write(dst_map)) { | |
4896 | vm_map_lock_read(dst_map); | |
4897 | goto RetryLookup; | |
4898 | } | |
4899 | vm_object_shadow(&entry->object.vm_object, | |
4900 | &entry->offset, | |
91447636 | 4901 | (vm_map_size_t)(entry->vme_end |
1c79356b A |
4902 | - entry->vme_start)); |
4903 | entry->needs_copy = FALSE; | |
4904 | vm_map_lock_write_to_read(dst_map); | |
4905 | } | |
4906 | dst_object = entry->object.vm_object; | |
4907 | /* | |
4908 | * unlike with the virtual (aligned) copy we're going | |
4909 | * to fault on it therefore we need a target object. | |
4910 | */ | |
4911 | if (dst_object == VM_OBJECT_NULL) { | |
4912 | if (vm_map_lock_read_to_write(dst_map)) { | |
4913 | vm_map_lock_read(dst_map); | |
4914 | goto RetryLookup; | |
4915 | } | |
91447636 | 4916 | dst_object = vm_object_allocate((vm_map_size_t) |
1c79356b A |
4917 | entry->vme_end - entry->vme_start); |
4918 | entry->object.vm_object = dst_object; | |
4919 | entry->offset = 0; | |
4920 | vm_map_lock_write_to_read(dst_map); | |
4921 | } | |
4922 | /* | |
4923 | * Take an object reference and unlock map. The "entry" may | |
4924 | * disappear or change when the map is unlocked. | |
4925 | */ | |
4926 | vm_object_reference(dst_object); | |
4927 | version.main_timestamp = dst_map->timestamp; | |
4928 | entry_offset = entry->offset; | |
4929 | entry_end = entry->vme_end; | |
4930 | vm_map_unlock_read(dst_map); | |
4931 | /* | |
4932 | * Copy as much as possible in one pass | |
4933 | */ | |
4934 | kr = vm_fault_copy( | |
4935 | copy_entry->object.vm_object, | |
4936 | copy_entry->offset + src_offset, | |
4937 | ©_size, | |
4938 | dst_object, | |
4939 | entry_offset + dst_offset, | |
4940 | dst_map, | |
4941 | &version, | |
4942 | THREAD_UNINT ); | |
4943 | ||
4944 | start += copy_size; | |
4945 | src_offset += copy_size; | |
4946 | amount_left -= copy_size; | |
4947 | /* | |
4948 | * Release the object reference | |
4949 | */ | |
4950 | vm_object_deallocate(dst_object); | |
4951 | /* | |
4952 | * If a hard error occurred, return it now | |
4953 | */ | |
4954 | if (kr != KERN_SUCCESS) | |
4955 | return kr; | |
4956 | ||
4957 | if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end | |
4958 | || amount_left == 0) | |
4959 | { | |
4960 | /* | |
4961 | * all done with this copy entry, dispose. | |
4962 | */ | |
4963 | vm_map_copy_entry_unlink(copy, copy_entry); | |
4964 | vm_object_deallocate(copy_entry->object.vm_object); | |
4965 | vm_map_copy_entry_dispose(copy, copy_entry); | |
4966 | ||
4967 | if ((copy_entry = vm_map_copy_first_entry(copy)) | |
4968 | == vm_map_copy_to_entry(copy) && amount_left) { | |
4969 | /* | |
4970 | * not finished copying but run out of source | |
4971 | */ | |
4972 | return KERN_INVALID_ADDRESS; | |
4973 | } | |
4974 | src_offset = 0; | |
4975 | } | |
4976 | ||
4977 | if (amount_left == 0) | |
4978 | return KERN_SUCCESS; | |
4979 | ||
4980 | vm_map_lock_read(dst_map); | |
4981 | if (version.main_timestamp == dst_map->timestamp) { | |
4982 | if (start == entry_end) { | |
4983 | /* | |
4984 | * destination region is split. Use the version | |
4985 | * information to avoid a lookup in the normal | |
4986 | * case. | |
4987 | */ | |
4988 | entry = entry->vme_next; | |
4989 | /* | |
4990 | * should be contiguous. Fail if we encounter | |
4991 | * a hole in the destination. | |
4992 | */ | |
4993 | if (start != entry->vme_start) { | |
4994 | vm_map_unlock_read(dst_map); | |
4995 | return KERN_INVALID_ADDRESS ; | |
4996 | } | |
4997 | } | |
4998 | } else { | |
4999 | /* | |
5000 | * Map version check failed. | |
5001 | * we must lookup the entry because somebody | |
5002 | * might have changed the map behind our backs. | |
5003 | */ | |
5004 | RetryLookup: | |
5005 | if (!vm_map_lookup_entry(dst_map, start, &entry)) | |
5006 | { | |
5007 | vm_map_unlock_read(dst_map); | |
5008 | return KERN_INVALID_ADDRESS ; | |
5009 | } | |
5010 | } | |
5011 | }/* while */ | |
5012 | ||
1c79356b A |
5013 | return KERN_SUCCESS; |
5014 | }/* vm_map_copy_overwrite_unaligned */ | |
5015 | ||
5016 | /* | |
91447636 | 5017 | * Routine: vm_map_copy_overwrite_aligned [internal use only] |
1c79356b A |
5018 | * |
5019 | * Description: | |
5020 | * Does all the vm_trickery possible for whole pages. | |
5021 | * | |
5022 | * Implementation: | |
5023 | * | |
5024 | * If there are no permanent objects in the destination, | |
5025 | * and the source and destination map entry zones match, | |
5026 | * and the destination map entry is not shared, | |
5027 | * then the map entries can be deleted and replaced | |
5028 | * with those from the copy. The following code is the | |
5029 | * basic idea of what to do, but there are lots of annoying | |
5030 | * little details about getting protection and inheritance | |
5031 | * right. Should add protection, inheritance, and sharing checks | |
5032 | * to the above pass and make sure that no wiring is involved. | |
5033 | */ | |
5034 | ||
91447636 | 5035 | static kern_return_t |
1c79356b A |
5036 | vm_map_copy_overwrite_aligned( |
5037 | vm_map_t dst_map, | |
5038 | vm_map_entry_t tmp_entry, | |
5039 | vm_map_copy_t copy, | |
91447636 A |
5040 | vm_map_offset_t start, |
5041 | #if !BAD_OPTIMIZATION | |
5042 | __unused | |
5043 | #endif /* !BAD_OPTIMIZATION */ | |
1c79356b A |
5044 | pmap_t pmap) |
5045 | { | |
5046 | vm_object_t object; | |
5047 | vm_map_entry_t copy_entry; | |
91447636 A |
5048 | vm_map_size_t copy_size; |
5049 | vm_map_size_t size; | |
1c79356b A |
5050 | vm_map_entry_t entry; |
5051 | ||
5052 | while ((copy_entry = vm_map_copy_first_entry(copy)) | |
5053 | != vm_map_copy_to_entry(copy)) | |
5054 | { | |
5055 | copy_size = (copy_entry->vme_end - copy_entry->vme_start); | |
5056 | ||
5057 | entry = tmp_entry; | |
5058 | if (entry == vm_map_to_entry(dst_map)) { | |
5059 | vm_map_unlock(dst_map); | |
5060 | return KERN_INVALID_ADDRESS; | |
5061 | } | |
5062 | size = (entry->vme_end - entry->vme_start); | |
5063 | /* | |
5064 | * Make sure that no holes popped up in the | |
5065 | * address map, and that the protection is | |
5066 | * still valid, in case the map was unlocked | |
5067 | * earlier. | |
5068 | */ | |
5069 | ||
5070 | if ((entry->vme_start != start) || ((entry->is_sub_map) | |
5071 | && !entry->needs_copy)) { | |
5072 | vm_map_unlock(dst_map); | |
5073 | return(KERN_INVALID_ADDRESS); | |
5074 | } | |
5075 | assert(entry != vm_map_to_entry(dst_map)); | |
5076 | ||
5077 | /* | |
5078 | * Check protection again | |
5079 | */ | |
5080 | ||
5081 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
5082 | vm_map_unlock(dst_map); | |
5083 | return(KERN_PROTECTION_FAILURE); | |
5084 | } | |
5085 | ||
5086 | /* | |
5087 | * Adjust to source size first | |
5088 | */ | |
5089 | ||
5090 | if (copy_size < size) { | |
5091 | vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size); | |
5092 | size = copy_size; | |
5093 | } | |
5094 | ||
5095 | /* | |
5096 | * Adjust to destination size | |
5097 | */ | |
5098 | ||
5099 | if (size < copy_size) { | |
5100 | vm_map_copy_clip_end(copy, copy_entry, | |
5101 | copy_entry->vme_start + size); | |
5102 | copy_size = size; | |
5103 | } | |
5104 | ||
5105 | assert((entry->vme_end - entry->vme_start) == size); | |
5106 | assert((tmp_entry->vme_end - tmp_entry->vme_start) == size); | |
5107 | assert((copy_entry->vme_end - copy_entry->vme_start) == size); | |
5108 | ||
5109 | /* | |
5110 | * If the destination contains temporary unshared memory, | |
5111 | * we can perform the copy by throwing it away and | |
5112 | * installing the source data. | |
5113 | */ | |
5114 | ||
5115 | object = entry->object.vm_object; | |
5116 | if ((!entry->is_shared && | |
5117 | ((object == VM_OBJECT_NULL) || | |
5118 | (object->internal && !object->true_share))) || | |
5119 | entry->needs_copy) { | |
5120 | vm_object_t old_object = entry->object.vm_object; | |
5121 | vm_object_offset_t old_offset = entry->offset; | |
5122 | vm_object_offset_t offset; | |
5123 | ||
5124 | /* | |
5125 | * Ensure that the source and destination aren't | |
5126 | * identical | |
5127 | */ | |
5128 | if (old_object == copy_entry->object.vm_object && | |
5129 | old_offset == copy_entry->offset) { | |
5130 | vm_map_copy_entry_unlink(copy, copy_entry); | |
5131 | vm_map_copy_entry_dispose(copy, copy_entry); | |
5132 | ||
5133 | if (old_object != VM_OBJECT_NULL) | |
5134 | vm_object_deallocate(old_object); | |
5135 | ||
5136 | start = tmp_entry->vme_end; | |
5137 | tmp_entry = tmp_entry->vme_next; | |
5138 | continue; | |
5139 | } | |
5140 | ||
5141 | if (old_object != VM_OBJECT_NULL) { | |
5142 | if(entry->is_sub_map) { | |
9bccf70c | 5143 | if(entry->use_pmap) { |
1c79356b | 5144 | #ifndef i386 |
9bccf70c | 5145 | pmap_unnest(dst_map->pmap, |
91447636 | 5146 | entry->vme_start); |
1c79356b | 5147 | #endif |
9bccf70c A |
5148 | if(dst_map->mapped) { |
5149 | /* clean up parent */ | |
5150 | /* map/maps */ | |
5151 | vm_map_submap_pmap_clean( | |
5152 | dst_map, entry->vme_start, | |
5153 | entry->vme_end, | |
5154 | entry->object.sub_map, | |
5155 | entry->offset); | |
5156 | } | |
5157 | } else { | |
5158 | vm_map_submap_pmap_clean( | |
5159 | dst_map, entry->vme_start, | |
5160 | entry->vme_end, | |
5161 | entry->object.sub_map, | |
5162 | entry->offset); | |
5163 | } | |
5164 | vm_map_deallocate( | |
1c79356b | 5165 | entry->object.sub_map); |
9bccf70c A |
5166 | } else { |
5167 | if(dst_map->mapped) { | |
5168 | vm_object_pmap_protect( | |
5169 | entry->object.vm_object, | |
5170 | entry->offset, | |
5171 | entry->vme_end | |
5172 | - entry->vme_start, | |
5173 | PMAP_NULL, | |
5174 | entry->vme_start, | |
5175 | VM_PROT_NONE); | |
5176 | } else { | |
55e303ae A |
5177 | pmap_remove(dst_map->pmap, |
5178 | (addr64_t)(entry->vme_start), | |
5179 | (addr64_t)(entry->vme_end)); | |
9bccf70c | 5180 | } |
1c79356b | 5181 | vm_object_deallocate(old_object); |
9bccf70c | 5182 | } |
1c79356b A |
5183 | } |
5184 | ||
5185 | entry->is_sub_map = FALSE; | |
5186 | entry->object = copy_entry->object; | |
5187 | object = entry->object.vm_object; | |
5188 | entry->needs_copy = copy_entry->needs_copy; | |
5189 | entry->wired_count = 0; | |
5190 | entry->user_wired_count = 0; | |
5191 | offset = entry->offset = copy_entry->offset; | |
5192 | ||
5193 | vm_map_copy_entry_unlink(copy, copy_entry); | |
5194 | vm_map_copy_entry_dispose(copy, copy_entry); | |
5195 | #if BAD_OPTIMIZATION | |
5196 | /* | |
5197 | * if we turn this optimization back on | |
5198 | * we need to revisit our use of pmap mappings | |
5199 | * large copies will cause us to run out and panic | |
5200 | * this optimization only saved on average 2 us per page if ALL | |
5201 | * the pages in the source were currently mapped | |
5202 | * and ALL the pages in the dest were touched, if there were fewer | |
5203 | * than 2/3 of the pages touched, this optimization actually cost more cycles | |
5204 | */ | |
5205 | ||
5206 | /* | |
5207 | * Try to aggressively enter physical mappings | |
5208 | * (but avoid uninstantiated objects) | |
5209 | */ | |
5210 | if (object != VM_OBJECT_NULL) { | |
91447636 | 5211 | vm_map_offset_t va = entry->vme_start; |
1c79356b A |
5212 | |
5213 | while (va < entry->vme_end) { | |
5214 | register vm_page_t m; | |
5215 | vm_prot_t prot; | |
5216 | ||
5217 | /* | |
5218 | * Look for the page in the top object | |
5219 | */ | |
5220 | prot = entry->protection; | |
5221 | vm_object_lock(object); | |
5222 | vm_object_paging_begin(object); | |
5223 | ||
91447636 A |
5224 | /* |
5225 | * ENCRYPTED SWAP: | |
5226 | * If the page is encrypted, skip it: | |
5227 | * we can't let the user see the encrypted | |
5228 | * contents. The page will get decrypted | |
5229 | * on demand when the user generates a | |
5230 | * soft-fault when trying to access it. | |
5231 | */ | |
1c79356b | 5232 | if ((m = vm_page_lookup(object,offset)) != |
91447636 A |
5233 | VM_PAGE_NULL && !m->busy && |
5234 | !m->fictitious && !m->encrypted && | |
1c79356b A |
5235 | (!m->unusual || (!m->error && |
5236 | !m->restart && !m->absent && | |
5237 | (prot & m->page_lock) == 0))) { | |
5238 | ||
5239 | m->busy = TRUE; | |
5240 | vm_object_unlock(object); | |
5241 | ||
5242 | /* | |
5243 | * Honor COW obligations | |
5244 | */ | |
5245 | if (entry->needs_copy) | |
5246 | prot &= ~VM_PROT_WRITE; | |
0b4e3aa0 A |
5247 | /* It is our policy to require */ |
5248 | /* explicit sync from anyone */ | |
5249 | /* writing code and then */ | |
5250 | /* a pc to execute it. */ | |
5251 | /* No isync here */ | |
1c79356b | 5252 | |
9bccf70c | 5253 | PMAP_ENTER(pmap, va, m, prot, |
55e303ae A |
5254 | ((unsigned int) |
5255 | (m->object->wimg_bits)) | |
5256 | & VM_WIMG_MASK, | |
5257 | FALSE); | |
1c79356b A |
5258 | |
5259 | vm_object_lock(object); | |
5260 | vm_page_lock_queues(); | |
5261 | if (!m->active && !m->inactive) | |
5262 | vm_page_activate(m); | |
5263 | vm_page_unlock_queues(); | |
5264 | PAGE_WAKEUP_DONE(m); | |
5265 | } | |
5266 | vm_object_paging_end(object); | |
5267 | vm_object_unlock(object); | |
5268 | ||
5269 | offset += PAGE_SIZE_64; | |
5270 | va += PAGE_SIZE; | |
5271 | } /* end while (va < entry->vme_end) */ | |
5272 | } /* end if (object) */ | |
5273 | #endif | |
5274 | /* | |
5275 | * Set up for the next iteration. The map | |
5276 | * has not been unlocked, so the next | |
5277 | * address should be at the end of this | |
5278 | * entry, and the next map entry should be | |
5279 | * the one following it. | |
5280 | */ | |
5281 | ||
5282 | start = tmp_entry->vme_end; | |
5283 | tmp_entry = tmp_entry->vme_next; | |
5284 | } else { | |
5285 | vm_map_version_t version; | |
5286 | vm_object_t dst_object = entry->object.vm_object; | |
5287 | vm_object_offset_t dst_offset = entry->offset; | |
5288 | kern_return_t r; | |
5289 | ||
5290 | /* | |
5291 | * Take an object reference, and record | |
5292 | * the map version information so that the | |
5293 | * map can be safely unlocked. | |
5294 | */ | |
5295 | ||
5296 | vm_object_reference(dst_object); | |
5297 | ||
9bccf70c A |
5298 | /* account for unlock bumping up timestamp */ |
5299 | version.main_timestamp = dst_map->timestamp + 1; | |
1c79356b A |
5300 | |
5301 | vm_map_unlock(dst_map); | |
5302 | ||
5303 | /* | |
5304 | * Copy as much as possible in one pass | |
5305 | */ | |
5306 | ||
5307 | copy_size = size; | |
5308 | r = vm_fault_copy( | |
5309 | copy_entry->object.vm_object, | |
5310 | copy_entry->offset, | |
5311 | ©_size, | |
5312 | dst_object, | |
5313 | dst_offset, | |
5314 | dst_map, | |
5315 | &version, | |
5316 | THREAD_UNINT ); | |
5317 | ||
5318 | /* | |
5319 | * Release the object reference | |
5320 | */ | |
5321 | ||
5322 | vm_object_deallocate(dst_object); | |
5323 | ||
5324 | /* | |
5325 | * If a hard error occurred, return it now | |
5326 | */ | |
5327 | ||
5328 | if (r != KERN_SUCCESS) | |
5329 | return(r); | |
5330 | ||
5331 | if (copy_size != 0) { | |
5332 | /* | |
5333 | * Dispose of the copied region | |
5334 | */ | |
5335 | ||
5336 | vm_map_copy_clip_end(copy, copy_entry, | |
5337 | copy_entry->vme_start + copy_size); | |
5338 | vm_map_copy_entry_unlink(copy, copy_entry); | |
5339 | vm_object_deallocate(copy_entry->object.vm_object); | |
5340 | vm_map_copy_entry_dispose(copy, copy_entry); | |
5341 | } | |
5342 | ||
5343 | /* | |
5344 | * Pick up in the destination map where we left off. | |
5345 | * | |
5346 | * Use the version information to avoid a lookup | |
5347 | * in the normal case. | |
5348 | */ | |
5349 | ||
5350 | start += copy_size; | |
5351 | vm_map_lock(dst_map); | |
9bccf70c | 5352 | if (version.main_timestamp == dst_map->timestamp) { |
1c79356b A |
5353 | /* We can safely use saved tmp_entry value */ |
5354 | ||
5355 | vm_map_clip_end(dst_map, tmp_entry, start); | |
5356 | tmp_entry = tmp_entry->vme_next; | |
5357 | } else { | |
5358 | /* Must do lookup of tmp_entry */ | |
5359 | ||
5360 | if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { | |
5361 | vm_map_unlock(dst_map); | |
5362 | return(KERN_INVALID_ADDRESS); | |
5363 | } | |
5364 | vm_map_clip_start(dst_map, tmp_entry, start); | |
5365 | } | |
5366 | } | |
5367 | }/* while */ | |
5368 | ||
5369 | return(KERN_SUCCESS); | |
5370 | }/* vm_map_copy_overwrite_aligned */ | |
5371 | ||
5372 | /* | |
91447636 | 5373 | * Routine: vm_map_copyin_kernel_buffer [internal use only] |
1c79356b A |
5374 | * |
5375 | * Description: | |
5376 | * Copy in data to a kernel buffer from space in the | |
91447636 | 5377 | * source map. The original space may be optionally |
1c79356b A |
5378 | * deallocated. |
5379 | * | |
5380 | * If successful, returns a new copy object. | |
5381 | */ | |
91447636 | 5382 | static kern_return_t |
1c79356b A |
5383 | vm_map_copyin_kernel_buffer( |
5384 | vm_map_t src_map, | |
91447636 A |
5385 | vm_map_offset_t src_addr, |
5386 | vm_map_size_t len, | |
1c79356b A |
5387 | boolean_t src_destroy, |
5388 | vm_map_copy_t *copy_result) | |
5389 | { | |
91447636 | 5390 | kern_return_t kr; |
1c79356b | 5391 | vm_map_copy_t copy; |
91447636 | 5392 | vm_map_size_t kalloc_size = sizeof(struct vm_map_copy) + len; |
1c79356b A |
5393 | |
5394 | copy = (vm_map_copy_t) kalloc(kalloc_size); | |
5395 | if (copy == VM_MAP_COPY_NULL) { | |
5396 | return KERN_RESOURCE_SHORTAGE; | |
5397 | } | |
5398 | copy->type = VM_MAP_COPY_KERNEL_BUFFER; | |
5399 | copy->size = len; | |
5400 | copy->offset = 0; | |
91447636 | 5401 | copy->cpy_kdata = (void *) (copy + 1); |
1c79356b A |
5402 | copy->cpy_kalloc_size = kalloc_size; |
5403 | ||
91447636 A |
5404 | kr = copyinmap(src_map, src_addr, copy->cpy_kdata, len); |
5405 | if (kr != KERN_SUCCESS) { | |
5406 | kfree(copy, kalloc_size); | |
5407 | return kr; | |
1c79356b A |
5408 | } |
5409 | if (src_destroy) { | |
91447636 A |
5410 | (void) vm_map_remove(src_map, vm_map_trunc_page(src_addr), |
5411 | vm_map_round_page(src_addr + len), | |
5412 | VM_MAP_REMOVE_INTERRUPTIBLE | | |
5413 | VM_MAP_REMOVE_WAIT_FOR_KWIRE | | |
5414 | (src_map == kernel_map) ? | |
5415 | VM_MAP_REMOVE_KUNWIRE : 0); | |
1c79356b A |
5416 | } |
5417 | *copy_result = copy; | |
5418 | return KERN_SUCCESS; | |
5419 | } | |
5420 | ||
5421 | /* | |
91447636 | 5422 | * Routine: vm_map_copyout_kernel_buffer [internal use only] |
1c79356b A |
5423 | * |
5424 | * Description: | |
5425 | * Copy out data from a kernel buffer into space in the | |
5426 | * destination map. The space may be otpionally dynamically | |
5427 | * allocated. | |
5428 | * | |
5429 | * If successful, consumes the copy object. | |
5430 | * Otherwise, the caller is responsible for it. | |
5431 | */ | |
91447636 A |
5432 | static int vm_map_copyout_kernel_buffer_failures = 0; |
5433 | static kern_return_t | |
1c79356b | 5434 | vm_map_copyout_kernel_buffer( |
91447636 A |
5435 | vm_map_t map, |
5436 | vm_map_address_t *addr, /* IN/OUT */ | |
5437 | vm_map_copy_t copy, | |
5438 | boolean_t overwrite) | |
1c79356b A |
5439 | { |
5440 | kern_return_t kr = KERN_SUCCESS; | |
91447636 | 5441 | thread_t thread = current_thread(); |
1c79356b A |
5442 | |
5443 | if (!overwrite) { | |
5444 | ||
5445 | /* | |
5446 | * Allocate space in the target map for the data | |
5447 | */ | |
5448 | *addr = 0; | |
5449 | kr = vm_map_enter(map, | |
5450 | addr, | |
91447636 A |
5451 | vm_map_round_page(copy->size), |
5452 | (vm_map_offset_t) 0, | |
5453 | VM_FLAGS_ANYWHERE, | |
1c79356b A |
5454 | VM_OBJECT_NULL, |
5455 | (vm_object_offset_t) 0, | |
5456 | FALSE, | |
5457 | VM_PROT_DEFAULT, | |
5458 | VM_PROT_ALL, | |
5459 | VM_INHERIT_DEFAULT); | |
5460 | if (kr != KERN_SUCCESS) | |
91447636 | 5461 | return kr; |
1c79356b A |
5462 | } |
5463 | ||
5464 | /* | |
5465 | * Copyout the data from the kernel buffer to the target map. | |
5466 | */ | |
91447636 | 5467 | if (thread->map == map) { |
1c79356b A |
5468 | |
5469 | /* | |
5470 | * If the target map is the current map, just do | |
5471 | * the copy. | |
5472 | */ | |
91447636 A |
5473 | if (copyout(copy->cpy_kdata, *addr, copy->size)) { |
5474 | kr = KERN_INVALID_ADDRESS; | |
1c79356b A |
5475 | } |
5476 | } | |
5477 | else { | |
5478 | vm_map_t oldmap; | |
5479 | ||
5480 | /* | |
5481 | * If the target map is another map, assume the | |
5482 | * target's address space identity for the duration | |
5483 | * of the copy. | |
5484 | */ | |
5485 | vm_map_reference(map); | |
5486 | oldmap = vm_map_switch(map); | |
5487 | ||
91447636 A |
5488 | if (copyout(copy->cpy_kdata, *addr, copy->size)) { |
5489 | vm_map_copyout_kernel_buffer_failures++; | |
5490 | kr = KERN_INVALID_ADDRESS; | |
1c79356b A |
5491 | } |
5492 | ||
5493 | (void) vm_map_switch(oldmap); | |
5494 | vm_map_deallocate(map); | |
5495 | } | |
5496 | ||
91447636 A |
5497 | if (kr != KERN_SUCCESS) { |
5498 | /* the copy failed, clean up */ | |
5499 | if (!overwrite) { | |
5500 | /* | |
5501 | * Deallocate the space we allocated in the target map. | |
5502 | */ | |
5503 | (void) vm_map_remove(map, | |
5504 | vm_map_trunc_page(*addr), | |
5505 | vm_map_round_page(*addr + | |
5506 | vm_map_round_page(copy->size)), | |
5507 | VM_MAP_NO_FLAGS); | |
5508 | *addr = 0; | |
5509 | } | |
5510 | } else { | |
5511 | /* copy was successful, dicard the copy structure */ | |
5512 | kfree(copy, copy->cpy_kalloc_size); | |
5513 | } | |
1c79356b | 5514 | |
91447636 | 5515 | return kr; |
1c79356b A |
5516 | } |
5517 | ||
5518 | /* | |
5519 | * Macro: vm_map_copy_insert | |
5520 | * | |
5521 | * Description: | |
5522 | * Link a copy chain ("copy") into a map at the | |
5523 | * specified location (after "where"). | |
5524 | * Side effects: | |
5525 | * The copy chain is destroyed. | |
5526 | * Warning: | |
5527 | * The arguments are evaluated multiple times. | |
5528 | */ | |
5529 | #define vm_map_copy_insert(map, where, copy) \ | |
5530 | MACRO_BEGIN \ | |
5531 | vm_map_t VMCI_map; \ | |
5532 | vm_map_entry_t VMCI_where; \ | |
5533 | vm_map_copy_t VMCI_copy; \ | |
5534 | VMCI_map = (map); \ | |
5535 | VMCI_where = (where); \ | |
5536 | VMCI_copy = (copy); \ | |
5537 | ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\ | |
5538 | ->vme_next = (VMCI_where->vme_next); \ | |
5539 | ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \ | |
5540 | ->vme_prev = VMCI_where; \ | |
5541 | VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \ | |
5542 | UPDATE_FIRST_FREE(VMCI_map, VMCI_map->first_free); \ | |
91447636 | 5543 | zfree(vm_map_copy_zone, VMCI_copy); \ |
1c79356b A |
5544 | MACRO_END |
5545 | ||
5546 | /* | |
5547 | * Routine: vm_map_copyout | |
5548 | * | |
5549 | * Description: | |
5550 | * Copy out a copy chain ("copy") into newly-allocated | |
5551 | * space in the destination map. | |
5552 | * | |
5553 | * If successful, consumes the copy object. | |
5554 | * Otherwise, the caller is responsible for it. | |
5555 | */ | |
5556 | kern_return_t | |
5557 | vm_map_copyout( | |
91447636 A |
5558 | vm_map_t dst_map, |
5559 | vm_map_address_t *dst_addr, /* OUT */ | |
5560 | vm_map_copy_t copy) | |
1c79356b | 5561 | { |
91447636 A |
5562 | vm_map_size_t size; |
5563 | vm_map_size_t adjustment; | |
5564 | vm_map_offset_t start; | |
1c79356b A |
5565 | vm_object_offset_t vm_copy_start; |
5566 | vm_map_entry_t last; | |
5567 | register | |
5568 | vm_map_entry_t entry; | |
5569 | ||
5570 | /* | |
5571 | * Check for null copy object. | |
5572 | */ | |
5573 | ||
5574 | if (copy == VM_MAP_COPY_NULL) { | |
5575 | *dst_addr = 0; | |
5576 | return(KERN_SUCCESS); | |
5577 | } | |
5578 | ||
5579 | /* | |
5580 | * Check for special copy object, created | |
5581 | * by vm_map_copyin_object. | |
5582 | */ | |
5583 | ||
5584 | if (copy->type == VM_MAP_COPY_OBJECT) { | |
5585 | vm_object_t object = copy->cpy_object; | |
5586 | kern_return_t kr; | |
5587 | vm_object_offset_t offset; | |
5588 | ||
91447636 A |
5589 | offset = vm_object_trunc_page(copy->offset); |
5590 | size = vm_map_round_page(copy->size + | |
5591 | (vm_map_size_t)(copy->offset - offset)); | |
1c79356b A |
5592 | *dst_addr = 0; |
5593 | kr = vm_map_enter(dst_map, dst_addr, size, | |
91447636 | 5594 | (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, |
1c79356b A |
5595 | object, offset, FALSE, |
5596 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
5597 | VM_INHERIT_DEFAULT); | |
5598 | if (kr != KERN_SUCCESS) | |
5599 | return(kr); | |
5600 | /* Account for non-pagealigned copy object */ | |
91447636 A |
5601 | *dst_addr += (vm_map_offset_t)(copy->offset - offset); |
5602 | zfree(vm_map_copy_zone, copy); | |
1c79356b A |
5603 | return(KERN_SUCCESS); |
5604 | } | |
5605 | ||
5606 | /* | |
5607 | * Check for special kernel buffer allocated | |
5608 | * by new_ipc_kmsg_copyin. | |
5609 | */ | |
5610 | ||
5611 | if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { | |
5612 | return(vm_map_copyout_kernel_buffer(dst_map, dst_addr, | |
5613 | copy, FALSE)); | |
5614 | } | |
5615 | ||
1c79356b A |
5616 | /* |
5617 | * Find space for the data | |
5618 | */ | |
5619 | ||
91447636 A |
5620 | vm_copy_start = vm_object_trunc_page(copy->offset); |
5621 | size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size) | |
1c79356b A |
5622 | - vm_copy_start; |
5623 | ||
5624 | StartAgain: ; | |
5625 | ||
5626 | vm_map_lock(dst_map); | |
5627 | assert(first_free_is_valid(dst_map)); | |
5628 | start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? | |
5629 | vm_map_min(dst_map) : last->vme_end; | |
5630 | ||
5631 | while (TRUE) { | |
5632 | vm_map_entry_t next = last->vme_next; | |
91447636 | 5633 | vm_map_offset_t end = start + size; |
1c79356b A |
5634 | |
5635 | if ((end > dst_map->max_offset) || (end < start)) { | |
5636 | if (dst_map->wait_for_space) { | |
5637 | if (size <= (dst_map->max_offset - dst_map->min_offset)) { | |
5638 | assert_wait((event_t) dst_map, | |
5639 | THREAD_INTERRUPTIBLE); | |
5640 | vm_map_unlock(dst_map); | |
91447636 | 5641 | thread_block(THREAD_CONTINUE_NULL); |
1c79356b A |
5642 | goto StartAgain; |
5643 | } | |
5644 | } | |
5645 | vm_map_unlock(dst_map); | |
5646 | return(KERN_NO_SPACE); | |
5647 | } | |
5648 | ||
5649 | if ((next == vm_map_to_entry(dst_map)) || | |
5650 | (next->vme_start >= end)) | |
5651 | break; | |
5652 | ||
5653 | last = next; | |
5654 | start = last->vme_end; | |
5655 | } | |
5656 | ||
5657 | /* | |
5658 | * Since we're going to just drop the map | |
5659 | * entries from the copy into the destination | |
5660 | * map, they must come from the same pool. | |
5661 | */ | |
5662 | ||
5663 | if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) { | |
5664 | /* | |
5665 | * Mismatches occur when dealing with the default | |
5666 | * pager. | |
5667 | */ | |
5668 | zone_t old_zone; | |
5669 | vm_map_entry_t next, new; | |
5670 | ||
5671 | /* | |
5672 | * Find the zone that the copies were allocated from | |
5673 | */ | |
5674 | old_zone = (copy->cpy_hdr.entries_pageable) | |
5675 | ? vm_map_entry_zone | |
5676 | : vm_map_kentry_zone; | |
5677 | entry = vm_map_copy_first_entry(copy); | |
5678 | ||
5679 | /* | |
5680 | * Reinitialize the copy so that vm_map_copy_entry_link | |
5681 | * will work. | |
5682 | */ | |
5683 | copy->cpy_hdr.nentries = 0; | |
5684 | copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable; | |
5685 | vm_map_copy_first_entry(copy) = | |
5686 | vm_map_copy_last_entry(copy) = | |
5687 | vm_map_copy_to_entry(copy); | |
5688 | ||
5689 | /* | |
5690 | * Copy each entry. | |
5691 | */ | |
5692 | while (entry != vm_map_copy_to_entry(copy)) { | |
5693 | new = vm_map_copy_entry_create(copy); | |
5694 | vm_map_entry_copy_full(new, entry); | |
5695 | new->use_pmap = FALSE; /* clr address space specifics */ | |
5696 | vm_map_copy_entry_link(copy, | |
5697 | vm_map_copy_last_entry(copy), | |
5698 | new); | |
5699 | next = entry->vme_next; | |
91447636 | 5700 | zfree(old_zone, entry); |
1c79356b A |
5701 | entry = next; |
5702 | } | |
5703 | } | |
5704 | ||
5705 | /* | |
5706 | * Adjust the addresses in the copy chain, and | |
5707 | * reset the region attributes. | |
5708 | */ | |
5709 | ||
5710 | adjustment = start - vm_copy_start; | |
5711 | for (entry = vm_map_copy_first_entry(copy); | |
5712 | entry != vm_map_copy_to_entry(copy); | |
5713 | entry = entry->vme_next) { | |
5714 | entry->vme_start += adjustment; | |
5715 | entry->vme_end += adjustment; | |
5716 | ||
5717 | entry->inheritance = VM_INHERIT_DEFAULT; | |
5718 | entry->protection = VM_PROT_DEFAULT; | |
5719 | entry->max_protection = VM_PROT_ALL; | |
5720 | entry->behavior = VM_BEHAVIOR_DEFAULT; | |
5721 | ||
5722 | /* | |
5723 | * If the entry is now wired, | |
5724 | * map the pages into the destination map. | |
5725 | */ | |
5726 | if (entry->wired_count != 0) { | |
91447636 | 5727 | register vm_map_offset_t va; |
1c79356b A |
5728 | vm_object_offset_t offset; |
5729 | register vm_object_t object; | |
5730 | ||
5731 | object = entry->object.vm_object; | |
5732 | offset = entry->offset; | |
5733 | va = entry->vme_start; | |
5734 | ||
5735 | pmap_pageable(dst_map->pmap, | |
5736 | entry->vme_start, | |
5737 | entry->vme_end, | |
5738 | TRUE); | |
5739 | ||
5740 | while (va < entry->vme_end) { | |
5741 | register vm_page_t m; | |
5742 | ||
5743 | /* | |
5744 | * Look up the page in the object. | |
5745 | * Assert that the page will be found in the | |
5746 | * top object: | |
5747 | * either | |
5748 | * the object was newly created by | |
5749 | * vm_object_copy_slowly, and has | |
5750 | * copies of all of the pages from | |
5751 | * the source object | |
5752 | * or | |
5753 | * the object was moved from the old | |
5754 | * map entry; because the old map | |
5755 | * entry was wired, all of the pages | |
5756 | * were in the top-level object. | |
5757 | * (XXX not true if we wire pages for | |
5758 | * reading) | |
5759 | */ | |
5760 | vm_object_lock(object); | |
5761 | vm_object_paging_begin(object); | |
5762 | ||
5763 | m = vm_page_lookup(object, offset); | |
5764 | if (m == VM_PAGE_NULL || m->wire_count == 0 || | |
5765 | m->absent) | |
5766 | panic("vm_map_copyout: wiring 0x%x", m); | |
5767 | ||
91447636 A |
5768 | /* |
5769 | * ENCRYPTED SWAP: | |
5770 | * The page is assumed to be wired here, so it | |
5771 | * shouldn't be encrypted. Otherwise, we | |
5772 | * couldn't enter it in the page table, since | |
5773 | * we don't want the user to see the encrypted | |
5774 | * data. | |
5775 | */ | |
5776 | ASSERT_PAGE_DECRYPTED(m); | |
5777 | ||
1c79356b A |
5778 | m->busy = TRUE; |
5779 | vm_object_unlock(object); | |
5780 | ||
9bccf70c | 5781 | PMAP_ENTER(dst_map->pmap, va, m, entry->protection, |
55e303ae A |
5782 | ((unsigned int) |
5783 | (m->object->wimg_bits)) | |
5784 | & VM_WIMG_MASK, | |
5785 | TRUE); | |
1c79356b A |
5786 | |
5787 | vm_object_lock(object); | |
5788 | PAGE_WAKEUP_DONE(m); | |
5789 | /* the page is wired, so we don't have to activate */ | |
5790 | vm_object_paging_end(object); | |
5791 | vm_object_unlock(object); | |
5792 | ||
5793 | offset += PAGE_SIZE_64; | |
5794 | va += PAGE_SIZE; | |
5795 | } | |
5796 | } | |
5797 | else if (size <= vm_map_aggressive_enter_max) { | |
5798 | ||
91447636 | 5799 | register vm_map_offset_t va; |
1c79356b A |
5800 | vm_object_offset_t offset; |
5801 | register vm_object_t object; | |
5802 | vm_prot_t prot; | |
5803 | ||
5804 | object = entry->object.vm_object; | |
5805 | if (object != VM_OBJECT_NULL) { | |
5806 | ||
5807 | offset = entry->offset; | |
5808 | va = entry->vme_start; | |
5809 | while (va < entry->vme_end) { | |
5810 | register vm_page_t m; | |
5811 | ||
5812 | /* | |
5813 | * Look up the page in the object. | |
5814 | * Assert that the page will be found | |
5815 | * in the top object if at all... | |
5816 | */ | |
5817 | vm_object_lock(object); | |
5818 | vm_object_paging_begin(object); | |
5819 | ||
91447636 A |
5820 | /* |
5821 | * ENCRYPTED SWAP: | |
5822 | * If the page is encrypted, skip it: | |
5823 | * we can't let the user see the | |
5824 | * encrypted contents. The page will | |
5825 | * get decrypted on demand when the | |
5826 | * user generates a soft-fault when | |
5827 | * trying to access it. | |
5828 | */ | |
1c79356b A |
5829 | if (((m = vm_page_lookup(object, |
5830 | offset)) | |
5831 | != VM_PAGE_NULL) && | |
5832 | !m->busy && !m->fictitious && | |
91447636 | 5833 | !m->encrypted && |
1c79356b A |
5834 | !m->absent && !m->error) { |
5835 | m->busy = TRUE; | |
5836 | vm_object_unlock(object); | |
5837 | ||
5838 | /* honor cow obligations */ | |
5839 | prot = entry->protection; | |
5840 | if (entry->needs_copy) | |
5841 | prot &= ~VM_PROT_WRITE; | |
5842 | ||
5843 | PMAP_ENTER(dst_map->pmap, va, | |
9bccf70c | 5844 | m, prot, |
55e303ae A |
5845 | ((unsigned int) |
5846 | (m->object->wimg_bits)) | |
5847 | & VM_WIMG_MASK, | |
9bccf70c | 5848 | FALSE); |
1c79356b A |
5849 | |
5850 | vm_object_lock(object); | |
5851 | vm_page_lock_queues(); | |
5852 | if (!m->active && !m->inactive) | |
5853 | vm_page_activate(m); | |
5854 | vm_page_unlock_queues(); | |
5855 | PAGE_WAKEUP_DONE(m); | |
5856 | } | |
5857 | vm_object_paging_end(object); | |
5858 | vm_object_unlock(object); | |
5859 | ||
5860 | offset += PAGE_SIZE_64; | |
5861 | va += PAGE_SIZE; | |
5862 | } | |
5863 | } | |
5864 | } | |
5865 | } | |
5866 | ||
5867 | /* | |
5868 | * Correct the page alignment for the result | |
5869 | */ | |
5870 | ||
5871 | *dst_addr = start + (copy->offset - vm_copy_start); | |
5872 | ||
5873 | /* | |
5874 | * Update the hints and the map size | |
5875 | */ | |
5876 | ||
5877 | SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); | |
5878 | ||
5879 | dst_map->size += size; | |
5880 | ||
5881 | /* | |
5882 | * Link in the copy | |
5883 | */ | |
5884 | ||
5885 | vm_map_copy_insert(dst_map, last, copy); | |
5886 | ||
5887 | vm_map_unlock(dst_map); | |
5888 | ||
5889 | /* | |
5890 | * XXX If wiring_required, call vm_map_pageable | |
5891 | */ | |
5892 | ||
5893 | return(KERN_SUCCESS); | |
5894 | } | |
5895 | ||
1c79356b A |
5896 | /* |
5897 | * Routine: vm_map_copyin | |
5898 | * | |
5899 | * Description: | |
5900 | * Copy the specified region (src_addr, len) from the | |
5901 | * source address space (src_map), possibly removing | |
5902 | * the region from the source address space (src_destroy). | |
5903 | * | |
5904 | * Returns: | |
5905 | * A vm_map_copy_t object (copy_result), suitable for | |
5906 | * insertion into another address space (using vm_map_copyout), | |
5907 | * copying over another address space region (using | |
5908 | * vm_map_copy_overwrite). If the copy is unused, it | |
5909 | * should be destroyed (using vm_map_copy_discard). | |
5910 | * | |
5911 | * In/out conditions: | |
5912 | * The source map should not be locked on entry. | |
5913 | */ | |
5914 | ||
5915 | typedef struct submap_map { | |
5916 | vm_map_t parent_map; | |
91447636 A |
5917 | vm_map_offset_t base_start; |
5918 | vm_map_offset_t base_end; | |
1c79356b A |
5919 | struct submap_map *next; |
5920 | } submap_map_t; | |
5921 | ||
5922 | kern_return_t | |
5923 | vm_map_copyin_common( | |
5924 | vm_map_t src_map, | |
91447636 A |
5925 | vm_map_address_t src_addr, |
5926 | vm_map_size_t len, | |
1c79356b | 5927 | boolean_t src_destroy, |
91447636 | 5928 | __unused boolean_t src_volatile, |
1c79356b A |
5929 | vm_map_copy_t *copy_result, /* OUT */ |
5930 | boolean_t use_maxprot) | |
5931 | { | |
1c79356b A |
5932 | vm_map_entry_t tmp_entry; /* Result of last map lookup -- |
5933 | * in multi-level lookup, this | |
5934 | * entry contains the actual | |
5935 | * vm_object/offset. | |
5936 | */ | |
5937 | register | |
5938 | vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ | |
5939 | ||
91447636 | 5940 | vm_map_offset_t src_start; /* Start of current entry -- |
1c79356b A |
5941 | * where copy is taking place now |
5942 | */ | |
91447636 | 5943 | vm_map_offset_t src_end; /* End of entire region to be |
1c79356b | 5944 | * copied */ |
91447636 | 5945 | vm_map_t base_map = src_map; |
1c79356b A |
5946 | boolean_t map_share=FALSE; |
5947 | submap_map_t *parent_maps = NULL; | |
5948 | ||
5949 | register | |
5950 | vm_map_copy_t copy; /* Resulting copy */ | |
91447636 | 5951 | vm_map_address_t copy_addr; |
1c79356b A |
5952 | |
5953 | /* | |
5954 | * Check for copies of zero bytes. | |
5955 | */ | |
5956 | ||
5957 | if (len == 0) { | |
5958 | *copy_result = VM_MAP_COPY_NULL; | |
5959 | return(KERN_SUCCESS); | |
5960 | } | |
5961 | ||
4a249263 A |
5962 | /* |
5963 | * Check that the end address doesn't overflow | |
5964 | */ | |
5965 | src_end = src_addr + len; | |
5966 | if (src_end < src_addr) | |
5967 | return KERN_INVALID_ADDRESS; | |
5968 | ||
1c79356b A |
5969 | /* |
5970 | * If the copy is sufficiently small, use a kernel buffer instead | |
5971 | * of making a virtual copy. The theory being that the cost of | |
5972 | * setting up VM (and taking C-O-W faults) dominates the copy costs | |
5973 | * for small regions. | |
5974 | */ | |
5975 | if ((len < msg_ool_size_small) && !use_maxprot) | |
5976 | return vm_map_copyin_kernel_buffer(src_map, src_addr, len, | |
5977 | src_destroy, copy_result); | |
5978 | ||
5979 | /* | |
4a249263 | 5980 | * Compute (page aligned) start and end of region |
1c79356b | 5981 | */ |
91447636 A |
5982 | src_start = vm_map_trunc_page(src_addr); |
5983 | src_end = vm_map_round_page(src_end); | |
1c79356b A |
5984 | |
5985 | XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", (natural_t)src_map, src_addr, len, src_destroy, 0); | |
5986 | ||
1c79356b A |
5987 | /* |
5988 | * Allocate a header element for the list. | |
5989 | * | |
5990 | * Use the start and end in the header to | |
5991 | * remember the endpoints prior to rounding. | |
5992 | */ | |
5993 | ||
5994 | copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
5995 | vm_map_copy_first_entry(copy) = | |
5996 | vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); | |
5997 | copy->type = VM_MAP_COPY_ENTRY_LIST; | |
5998 | copy->cpy_hdr.nentries = 0; | |
5999 | copy->cpy_hdr.entries_pageable = TRUE; | |
6000 | ||
6001 | copy->offset = src_addr; | |
6002 | copy->size = len; | |
6003 | ||
6004 | new_entry = vm_map_copy_entry_create(copy); | |
6005 | ||
6006 | #define RETURN(x) \ | |
6007 | MACRO_BEGIN \ | |
6008 | vm_map_unlock(src_map); \ | |
9bccf70c A |
6009 | if(src_map != base_map) \ |
6010 | vm_map_deallocate(src_map); \ | |
1c79356b A |
6011 | if (new_entry != VM_MAP_ENTRY_NULL) \ |
6012 | vm_map_copy_entry_dispose(copy,new_entry); \ | |
6013 | vm_map_copy_discard(copy); \ | |
6014 | { \ | |
91447636 | 6015 | submap_map_t *_ptr; \ |
1c79356b | 6016 | \ |
91447636 | 6017 | for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ |
1c79356b | 6018 | parent_maps=parent_maps->next; \ |
91447636 A |
6019 | if (_ptr->parent_map != base_map) \ |
6020 | vm_map_deallocate(_ptr->parent_map); \ | |
6021 | kfree(_ptr, sizeof(submap_map_t)); \ | |
1c79356b A |
6022 | } \ |
6023 | } \ | |
6024 | MACRO_RETURN(x); \ | |
6025 | MACRO_END | |
6026 | ||
6027 | /* | |
6028 | * Find the beginning of the region. | |
6029 | */ | |
6030 | ||
6031 | vm_map_lock(src_map); | |
6032 | ||
6033 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) | |
6034 | RETURN(KERN_INVALID_ADDRESS); | |
6035 | if(!tmp_entry->is_sub_map) { | |
6036 | vm_map_clip_start(src_map, tmp_entry, src_start); | |
6037 | } | |
6038 | /* set for later submap fix-up */ | |
6039 | copy_addr = src_start; | |
6040 | ||
6041 | /* | |
6042 | * Go through entries until we get to the end. | |
6043 | */ | |
6044 | ||
6045 | while (TRUE) { | |
6046 | register | |
6047 | vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ | |
91447636 | 6048 | vm_map_size_t src_size; /* Size of source |
1c79356b A |
6049 | * map entry (in both |
6050 | * maps) | |
6051 | */ | |
6052 | ||
6053 | register | |
6054 | vm_object_t src_object; /* Object to copy */ | |
6055 | vm_object_offset_t src_offset; | |
6056 | ||
6057 | boolean_t src_needs_copy; /* Should source map | |
6058 | * be made read-only | |
6059 | * for copy-on-write? | |
6060 | */ | |
6061 | ||
6062 | boolean_t new_entry_needs_copy; /* Will new entry be COW? */ | |
6063 | ||
6064 | boolean_t was_wired; /* Was source wired? */ | |
6065 | vm_map_version_t version; /* Version before locks | |
6066 | * dropped to make copy | |
6067 | */ | |
6068 | kern_return_t result; /* Return value from | |
6069 | * copy_strategically. | |
6070 | */ | |
6071 | while(tmp_entry->is_sub_map) { | |
91447636 | 6072 | vm_map_size_t submap_len; |
1c79356b A |
6073 | submap_map_t *ptr; |
6074 | ||
6075 | ptr = (submap_map_t *)kalloc(sizeof(submap_map_t)); | |
6076 | ptr->next = parent_maps; | |
6077 | parent_maps = ptr; | |
6078 | ptr->parent_map = src_map; | |
6079 | ptr->base_start = src_start; | |
6080 | ptr->base_end = src_end; | |
6081 | submap_len = tmp_entry->vme_end - src_start; | |
6082 | if(submap_len > (src_end-src_start)) | |
6083 | submap_len = src_end-src_start; | |
6084 | ptr->base_start += submap_len; | |
6085 | ||
6086 | src_start -= tmp_entry->vme_start; | |
6087 | src_start += tmp_entry->offset; | |
6088 | src_end = src_start + submap_len; | |
6089 | src_map = tmp_entry->object.sub_map; | |
6090 | vm_map_lock(src_map); | |
9bccf70c A |
6091 | /* keep an outstanding reference for all maps in */ |
6092 | /* the parents tree except the base map */ | |
6093 | vm_map_reference(src_map); | |
1c79356b A |
6094 | vm_map_unlock(ptr->parent_map); |
6095 | if (!vm_map_lookup_entry( | |
6096 | src_map, src_start, &tmp_entry)) | |
6097 | RETURN(KERN_INVALID_ADDRESS); | |
6098 | map_share = TRUE; | |
6099 | if(!tmp_entry->is_sub_map) | |
6100 | vm_map_clip_start(src_map, tmp_entry, src_start); | |
6101 | src_entry = tmp_entry; | |
6102 | } | |
0b4e3aa0 | 6103 | if ((tmp_entry->object.vm_object != VM_OBJECT_NULL) && |
55e303ae A |
6104 | (tmp_entry->object.vm_object->phys_contiguous)) { |
6105 | /* This is not, supported for now.In future */ | |
6106 | /* we will need to detect the phys_contig */ | |
6107 | /* condition and then upgrade copy_slowly */ | |
6108 | /* to do physical copy from the device mem */ | |
6109 | /* based object. We can piggy-back off of */ | |
6110 | /* the was wired boolean to set-up the */ | |
6111 | /* proper handling */ | |
0b4e3aa0 A |
6112 | RETURN(KERN_PROTECTION_FAILURE); |
6113 | } | |
1c79356b A |
6114 | /* |
6115 | * Create a new address map entry to hold the result. | |
6116 | * Fill in the fields from the appropriate source entries. | |
6117 | * We must unlock the source map to do this if we need | |
6118 | * to allocate a map entry. | |
6119 | */ | |
6120 | if (new_entry == VM_MAP_ENTRY_NULL) { | |
6121 | version.main_timestamp = src_map->timestamp; | |
6122 | vm_map_unlock(src_map); | |
6123 | ||
6124 | new_entry = vm_map_copy_entry_create(copy); | |
6125 | ||
6126 | vm_map_lock(src_map); | |
6127 | if ((version.main_timestamp + 1) != src_map->timestamp) { | |
6128 | if (!vm_map_lookup_entry(src_map, src_start, | |
6129 | &tmp_entry)) { | |
6130 | RETURN(KERN_INVALID_ADDRESS); | |
6131 | } | |
6132 | vm_map_clip_start(src_map, tmp_entry, src_start); | |
6133 | continue; /* restart w/ new tmp_entry */ | |
6134 | } | |
6135 | } | |
6136 | ||
6137 | /* | |
6138 | * Verify that the region can be read. | |
6139 | */ | |
6140 | if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE && | |
6141 | !use_maxprot) || | |
6142 | (src_entry->max_protection & VM_PROT_READ) == 0) | |
6143 | RETURN(KERN_PROTECTION_FAILURE); | |
6144 | ||
6145 | /* | |
6146 | * Clip against the endpoints of the entire region. | |
6147 | */ | |
6148 | ||
6149 | vm_map_clip_end(src_map, src_entry, src_end); | |
6150 | ||
6151 | src_size = src_entry->vme_end - src_start; | |
6152 | src_object = src_entry->object.vm_object; | |
6153 | src_offset = src_entry->offset; | |
6154 | was_wired = (src_entry->wired_count != 0); | |
6155 | ||
6156 | vm_map_entry_copy(new_entry, src_entry); | |
6157 | new_entry->use_pmap = FALSE; /* clr address space specifics */ | |
6158 | ||
6159 | /* | |
6160 | * Attempt non-blocking copy-on-write optimizations. | |
6161 | */ | |
6162 | ||
6163 | if (src_destroy && | |
6164 | (src_object == VM_OBJECT_NULL || | |
6165 | (src_object->internal && !src_object->true_share | |
6166 | && !map_share))) { | |
6167 | /* | |
6168 | * If we are destroying the source, and the object | |
6169 | * is internal, we can move the object reference | |
6170 | * from the source to the copy. The copy is | |
6171 | * copy-on-write only if the source is. | |
6172 | * We make another reference to the object, because | |
6173 | * destroying the source entry will deallocate it. | |
6174 | */ | |
6175 | vm_object_reference(src_object); | |
6176 | ||
6177 | /* | |
6178 | * Copy is always unwired. vm_map_copy_entry | |
6179 | * set its wired count to zero. | |
6180 | */ | |
6181 | ||
6182 | goto CopySuccessful; | |
6183 | } | |
6184 | ||
6185 | ||
6186 | RestartCopy: | |
6187 | XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", | |
6188 | src_object, new_entry, new_entry->object.vm_object, | |
6189 | was_wired, 0); | |
55e303ae A |
6190 | if ((src_object == VM_OBJECT_NULL || |
6191 | (!was_wired && !map_share && !tmp_entry->is_shared)) && | |
6192 | vm_object_copy_quickly( | |
1c79356b A |
6193 | &new_entry->object.vm_object, |
6194 | src_offset, | |
6195 | src_size, | |
6196 | &src_needs_copy, | |
6197 | &new_entry_needs_copy)) { | |
6198 | ||
6199 | new_entry->needs_copy = new_entry_needs_copy; | |
6200 | ||
6201 | /* | |
6202 | * Handle copy-on-write obligations | |
6203 | */ | |
6204 | ||
6205 | if (src_needs_copy && !tmp_entry->needs_copy) { | |
55e303ae A |
6206 | vm_object_pmap_protect( |
6207 | src_object, | |
6208 | src_offset, | |
6209 | src_size, | |
6210 | (src_entry->is_shared ? | |
6211 | PMAP_NULL | |
6212 | : src_map->pmap), | |
6213 | src_entry->vme_start, | |
6214 | src_entry->protection & | |
6215 | ~VM_PROT_WRITE); | |
6216 | tmp_entry->needs_copy = TRUE; | |
1c79356b A |
6217 | } |
6218 | ||
6219 | /* | |
6220 | * The map has never been unlocked, so it's safe | |
6221 | * to move to the next entry rather than doing | |
6222 | * another lookup. | |
6223 | */ | |
6224 | ||
6225 | goto CopySuccessful; | |
6226 | } | |
6227 | ||
1c79356b A |
6228 | /* |
6229 | * Take an object reference, so that we may | |
6230 | * release the map lock(s). | |
6231 | */ | |
6232 | ||
6233 | assert(src_object != VM_OBJECT_NULL); | |
6234 | vm_object_reference(src_object); | |
6235 | ||
6236 | /* | |
6237 | * Record the timestamp for later verification. | |
6238 | * Unlock the map. | |
6239 | */ | |
6240 | ||
6241 | version.main_timestamp = src_map->timestamp; | |
9bccf70c | 6242 | vm_map_unlock(src_map); /* Increments timestamp once! */ |
1c79356b A |
6243 | |
6244 | /* | |
6245 | * Perform the copy | |
6246 | */ | |
6247 | ||
6248 | if (was_wired) { | |
55e303ae | 6249 | CopySlowly: |
1c79356b A |
6250 | vm_object_lock(src_object); |
6251 | result = vm_object_copy_slowly( | |
6252 | src_object, | |
6253 | src_offset, | |
6254 | src_size, | |
6255 | THREAD_UNINT, | |
6256 | &new_entry->object.vm_object); | |
6257 | new_entry->offset = 0; | |
6258 | new_entry->needs_copy = FALSE; | |
55e303ae A |
6259 | |
6260 | } | |
6261 | else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && | |
6262 | (tmp_entry->is_shared || map_share)) { | |
6263 | vm_object_t new_object; | |
6264 | ||
6265 | vm_object_lock(src_object); | |
6266 | new_object = vm_object_copy_delayed( | |
6267 | src_object, | |
6268 | src_offset, | |
6269 | src_size); | |
6270 | if (new_object == VM_OBJECT_NULL) | |
6271 | goto CopySlowly; | |
6272 | ||
6273 | new_entry->object.vm_object = new_object; | |
6274 | new_entry->needs_copy = TRUE; | |
6275 | result = KERN_SUCCESS; | |
6276 | ||
1c79356b A |
6277 | } else { |
6278 | result = vm_object_copy_strategically(src_object, | |
6279 | src_offset, | |
6280 | src_size, | |
6281 | &new_entry->object.vm_object, | |
6282 | &new_entry->offset, | |
6283 | &new_entry_needs_copy); | |
6284 | ||
6285 | new_entry->needs_copy = new_entry_needs_copy; | |
1c79356b A |
6286 | } |
6287 | ||
6288 | if (result != KERN_SUCCESS && | |
6289 | result != KERN_MEMORY_RESTART_COPY) { | |
6290 | vm_map_lock(src_map); | |
6291 | RETURN(result); | |
6292 | } | |
6293 | ||
6294 | /* | |
6295 | * Throw away the extra reference | |
6296 | */ | |
6297 | ||
6298 | vm_object_deallocate(src_object); | |
6299 | ||
6300 | /* | |
6301 | * Verify that the map has not substantially | |
6302 | * changed while the copy was being made. | |
6303 | */ | |
6304 | ||
9bccf70c | 6305 | vm_map_lock(src_map); |
1c79356b A |
6306 | |
6307 | if ((version.main_timestamp + 1) == src_map->timestamp) | |
6308 | goto VerificationSuccessful; | |
6309 | ||
6310 | /* | |
6311 | * Simple version comparison failed. | |
6312 | * | |
6313 | * Retry the lookup and verify that the | |
6314 | * same object/offset are still present. | |
6315 | * | |
6316 | * [Note: a memory manager that colludes with | |
6317 | * the calling task can detect that we have | |
6318 | * cheated. While the map was unlocked, the | |
6319 | * mapping could have been changed and restored.] | |
6320 | */ | |
6321 | ||
6322 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { | |
6323 | RETURN(KERN_INVALID_ADDRESS); | |
6324 | } | |
6325 | ||
6326 | src_entry = tmp_entry; | |
6327 | vm_map_clip_start(src_map, src_entry, src_start); | |
6328 | ||
91447636 A |
6329 | if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) && |
6330 | !use_maxprot) || | |
6331 | ((src_entry->max_protection & VM_PROT_READ) == 0)) | |
1c79356b A |
6332 | goto VerificationFailed; |
6333 | ||
6334 | if (src_entry->vme_end < new_entry->vme_end) | |
6335 | src_size = (new_entry->vme_end = src_entry->vme_end) - src_start; | |
6336 | ||
6337 | if ((src_entry->object.vm_object != src_object) || | |
6338 | (src_entry->offset != src_offset) ) { | |
6339 | ||
6340 | /* | |
6341 | * Verification failed. | |
6342 | * | |
6343 | * Start over with this top-level entry. | |
6344 | */ | |
6345 | ||
6346 | VerificationFailed: ; | |
6347 | ||
6348 | vm_object_deallocate(new_entry->object.vm_object); | |
6349 | tmp_entry = src_entry; | |
6350 | continue; | |
6351 | } | |
6352 | ||
6353 | /* | |
6354 | * Verification succeeded. | |
6355 | */ | |
6356 | ||
6357 | VerificationSuccessful: ; | |
6358 | ||
6359 | if (result == KERN_MEMORY_RESTART_COPY) | |
6360 | goto RestartCopy; | |
6361 | ||
6362 | /* | |
6363 | * Copy succeeded. | |
6364 | */ | |
6365 | ||
6366 | CopySuccessful: ; | |
6367 | ||
6368 | /* | |
6369 | * Link in the new copy entry. | |
6370 | */ | |
6371 | ||
6372 | vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), | |
6373 | new_entry); | |
6374 | ||
6375 | /* | |
6376 | * Determine whether the entire region | |
6377 | * has been copied. | |
6378 | */ | |
6379 | src_start = new_entry->vme_end; | |
6380 | new_entry = VM_MAP_ENTRY_NULL; | |
6381 | while ((src_start >= src_end) && (src_end != 0)) { | |
6382 | if (src_map != base_map) { | |
6383 | submap_map_t *ptr; | |
6384 | ||
6385 | ptr = parent_maps; | |
6386 | assert(ptr != NULL); | |
6387 | parent_maps = parent_maps->next; | |
1c79356b | 6388 | vm_map_unlock(src_map); |
9bccf70c A |
6389 | vm_map_deallocate(src_map); |
6390 | vm_map_lock(ptr->parent_map); | |
1c79356b A |
6391 | src_map = ptr->parent_map; |
6392 | src_start = ptr->base_start; | |
6393 | src_end = ptr->base_end; | |
6394 | if ((src_end > src_start) && | |
6395 | !vm_map_lookup_entry( | |
6396 | src_map, src_start, &tmp_entry)) | |
6397 | RETURN(KERN_INVALID_ADDRESS); | |
91447636 | 6398 | kfree(ptr, sizeof(submap_map_t)); |
1c79356b A |
6399 | if(parent_maps == NULL) |
6400 | map_share = FALSE; | |
6401 | src_entry = tmp_entry->vme_prev; | |
6402 | } else | |
6403 | break; | |
6404 | } | |
6405 | if ((src_start >= src_end) && (src_end != 0)) | |
6406 | break; | |
6407 | ||
6408 | /* | |
6409 | * Verify that there are no gaps in the region | |
6410 | */ | |
6411 | ||
6412 | tmp_entry = src_entry->vme_next; | |
6413 | if ((tmp_entry->vme_start != src_start) || | |
6414 | (tmp_entry == vm_map_to_entry(src_map))) | |
6415 | RETURN(KERN_INVALID_ADDRESS); | |
6416 | } | |
6417 | ||
6418 | /* | |
6419 | * If the source should be destroyed, do it now, since the | |
6420 | * copy was successful. | |
6421 | */ | |
6422 | if (src_destroy) { | |
6423 | (void) vm_map_delete(src_map, | |
91447636 | 6424 | vm_map_trunc_page(src_addr), |
1c79356b A |
6425 | src_end, |
6426 | (src_map == kernel_map) ? | |
6427 | VM_MAP_REMOVE_KUNWIRE : | |
91447636 A |
6428 | VM_MAP_NO_FLAGS, |
6429 | VM_MAP_NULL); | |
1c79356b A |
6430 | } |
6431 | ||
6432 | vm_map_unlock(src_map); | |
6433 | ||
6434 | /* Fix-up start and end points in copy. This is necessary */ | |
6435 | /* when the various entries in the copy object were picked */ | |
6436 | /* up from different sub-maps */ | |
6437 | ||
6438 | tmp_entry = vm_map_copy_first_entry(copy); | |
6439 | while (tmp_entry != vm_map_copy_to_entry(copy)) { | |
6440 | tmp_entry->vme_end = copy_addr + | |
6441 | (tmp_entry->vme_end - tmp_entry->vme_start); | |
6442 | tmp_entry->vme_start = copy_addr; | |
6443 | copy_addr += tmp_entry->vme_end - tmp_entry->vme_start; | |
6444 | tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next; | |
6445 | } | |
6446 | ||
6447 | *copy_result = copy; | |
6448 | return(KERN_SUCCESS); | |
6449 | ||
6450 | #undef RETURN | |
6451 | } | |
6452 | ||
6453 | /* | |
6454 | * vm_map_copyin_object: | |
6455 | * | |
6456 | * Create a copy object from an object. | |
6457 | * Our caller donates an object reference. | |
6458 | */ | |
6459 | ||
6460 | kern_return_t | |
6461 | vm_map_copyin_object( | |
6462 | vm_object_t object, | |
6463 | vm_object_offset_t offset, /* offset of region in object */ | |
6464 | vm_object_size_t size, /* size of region in object */ | |
6465 | vm_map_copy_t *copy_result) /* OUT */ | |
6466 | { | |
6467 | vm_map_copy_t copy; /* Resulting copy */ | |
6468 | ||
6469 | /* | |
6470 | * We drop the object into a special copy object | |
6471 | * that contains the object directly. | |
6472 | */ | |
6473 | ||
6474 | copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
6475 | copy->type = VM_MAP_COPY_OBJECT; | |
6476 | copy->cpy_object = object; | |
1c79356b A |
6477 | copy->offset = offset; |
6478 | copy->size = size; | |
6479 | ||
6480 | *copy_result = copy; | |
6481 | return(KERN_SUCCESS); | |
6482 | } | |
6483 | ||
91447636 | 6484 | static void |
1c79356b A |
6485 | vm_map_fork_share( |
6486 | vm_map_t old_map, | |
6487 | vm_map_entry_t old_entry, | |
6488 | vm_map_t new_map) | |
6489 | { | |
6490 | vm_object_t object; | |
6491 | vm_map_entry_t new_entry; | |
1c79356b A |
6492 | |
6493 | /* | |
6494 | * New sharing code. New map entry | |
6495 | * references original object. Internal | |
6496 | * objects use asynchronous copy algorithm for | |
6497 | * future copies. First make sure we have | |
6498 | * the right object. If we need a shadow, | |
6499 | * or someone else already has one, then | |
6500 | * make a new shadow and share it. | |
6501 | */ | |
6502 | ||
6503 | object = old_entry->object.vm_object; | |
6504 | if (old_entry->is_sub_map) { | |
6505 | assert(old_entry->wired_count == 0); | |
6506 | #ifndef i386 | |
6507 | if(old_entry->use_pmap) { | |
91447636 A |
6508 | kern_return_t result; |
6509 | ||
1c79356b A |
6510 | result = pmap_nest(new_map->pmap, |
6511 | (old_entry->object.sub_map)->pmap, | |
55e303ae A |
6512 | (addr64_t)old_entry->vme_start, |
6513 | (addr64_t)old_entry->vme_start, | |
6514 | (uint64_t)(old_entry->vme_end - old_entry->vme_start)); | |
1c79356b A |
6515 | if(result) |
6516 | panic("vm_map_fork_share: pmap_nest failed!"); | |
6517 | } | |
6518 | #endif | |
6519 | } else if (object == VM_OBJECT_NULL) { | |
91447636 | 6520 | object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end - |
1c79356b A |
6521 | old_entry->vme_start)); |
6522 | old_entry->offset = 0; | |
6523 | old_entry->object.vm_object = object; | |
6524 | assert(!old_entry->needs_copy); | |
6525 | } else if (object->copy_strategy != | |
6526 | MEMORY_OBJECT_COPY_SYMMETRIC) { | |
6527 | ||
6528 | /* | |
6529 | * We are already using an asymmetric | |
6530 | * copy, and therefore we already have | |
6531 | * the right object. | |
6532 | */ | |
6533 | ||
6534 | assert(! old_entry->needs_copy); | |
6535 | } | |
6536 | else if (old_entry->needs_copy || /* case 1 */ | |
6537 | object->shadowed || /* case 2 */ | |
6538 | (!object->true_share && /* case 3 */ | |
6539 | !old_entry->is_shared && | |
6540 | (object->size > | |
91447636 | 6541 | (vm_map_size_t)(old_entry->vme_end - |
1c79356b A |
6542 | old_entry->vme_start)))) { |
6543 | ||
6544 | /* | |
6545 | * We need to create a shadow. | |
6546 | * There are three cases here. | |
6547 | * In the first case, we need to | |
6548 | * complete a deferred symmetrical | |
6549 | * copy that we participated in. | |
6550 | * In the second and third cases, | |
6551 | * we need to create the shadow so | |
6552 | * that changes that we make to the | |
6553 | * object do not interfere with | |
6554 | * any symmetrical copies which | |
6555 | * have occured (case 2) or which | |
6556 | * might occur (case 3). | |
6557 | * | |
6558 | * The first case is when we had | |
6559 | * deferred shadow object creation | |
6560 | * via the entry->needs_copy mechanism. | |
6561 | * This mechanism only works when | |
6562 | * only one entry points to the source | |
6563 | * object, and we are about to create | |
6564 | * a second entry pointing to the | |
6565 | * same object. The problem is that | |
6566 | * there is no way of mapping from | |
6567 | * an object to the entries pointing | |
6568 | * to it. (Deferred shadow creation | |
6569 | * works with one entry because occurs | |
6570 | * at fault time, and we walk from the | |
6571 | * entry to the object when handling | |
6572 | * the fault.) | |
6573 | * | |
6574 | * The second case is when the object | |
6575 | * to be shared has already been copied | |
6576 | * with a symmetric copy, but we point | |
6577 | * directly to the object without | |
6578 | * needs_copy set in our entry. (This | |
6579 | * can happen because different ranges | |
6580 | * of an object can be pointed to by | |
6581 | * different entries. In particular, | |
6582 | * a single entry pointing to an object | |
6583 | * can be split by a call to vm_inherit, | |
6584 | * which, combined with task_create, can | |
6585 | * result in the different entries | |
6586 | * having different needs_copy values.) | |
6587 | * The shadowed flag in the object allows | |
6588 | * us to detect this case. The problem | |
6589 | * with this case is that if this object | |
6590 | * has or will have shadows, then we | |
6591 | * must not perform an asymmetric copy | |
6592 | * of this object, since such a copy | |
6593 | * allows the object to be changed, which | |
6594 | * will break the previous symmetrical | |
6595 | * copies (which rely upon the object | |
6596 | * not changing). In a sense, the shadowed | |
6597 | * flag says "don't change this object". | |
6598 | * We fix this by creating a shadow | |
6599 | * object for this object, and sharing | |
6600 | * that. This works because we are free | |
6601 | * to change the shadow object (and thus | |
6602 | * to use an asymmetric copy strategy); | |
6603 | * this is also semantically correct, | |
6604 | * since this object is temporary, and | |
6605 | * therefore a copy of the object is | |
6606 | * as good as the object itself. (This | |
6607 | * is not true for permanent objects, | |
6608 | * since the pager needs to see changes, | |
6609 | * which won't happen if the changes | |
6610 | * are made to a copy.) | |
6611 | * | |
6612 | * The third case is when the object | |
6613 | * to be shared has parts sticking | |
6614 | * outside of the entry we're working | |
6615 | * with, and thus may in the future | |
6616 | * be subject to a symmetrical copy. | |
6617 | * (This is a preemptive version of | |
6618 | * case 2.) | |
6619 | */ | |
6620 | ||
6621 | assert(!(object->shadowed && old_entry->is_shared)); | |
6622 | vm_object_shadow(&old_entry->object.vm_object, | |
6623 | &old_entry->offset, | |
91447636 | 6624 | (vm_map_size_t) (old_entry->vme_end - |
1c79356b A |
6625 | old_entry->vme_start)); |
6626 | ||
6627 | /* | |
6628 | * If we're making a shadow for other than | |
6629 | * copy on write reasons, then we have | |
6630 | * to remove write permission. | |
6631 | */ | |
6632 | ||
1c79356b A |
6633 | if (!old_entry->needs_copy && |
6634 | (old_entry->protection & VM_PROT_WRITE)) { | |
9bccf70c A |
6635 | if(old_map->mapped) { |
6636 | vm_object_pmap_protect( | |
6637 | old_entry->object.vm_object, | |
6638 | old_entry->offset, | |
6639 | (old_entry->vme_end - | |
6640 | old_entry->vme_start), | |
6641 | PMAP_NULL, | |
6642 | old_entry->vme_start, | |
6643 | old_entry->protection & ~VM_PROT_WRITE); | |
1c79356b | 6644 | } else { |
9bccf70c | 6645 | pmap_protect(old_map->pmap, |
1c79356b A |
6646 | old_entry->vme_start, |
6647 | old_entry->vme_end, | |
6648 | old_entry->protection & ~VM_PROT_WRITE); | |
6649 | } | |
6650 | } | |
6651 | ||
6652 | old_entry->needs_copy = FALSE; | |
6653 | object = old_entry->object.vm_object; | |
6654 | } | |
6655 | ||
6656 | /* | |
6657 | * If object was using a symmetric copy strategy, | |
6658 | * change its copy strategy to the default | |
6659 | * asymmetric copy strategy, which is copy_delay | |
6660 | * in the non-norma case and copy_call in the | |
6661 | * norma case. Bump the reference count for the | |
6662 | * new entry. | |
6663 | */ | |
6664 | ||
6665 | if(old_entry->is_sub_map) { | |
6666 | vm_map_lock(old_entry->object.sub_map); | |
6667 | vm_map_reference(old_entry->object.sub_map); | |
6668 | vm_map_unlock(old_entry->object.sub_map); | |
6669 | } else { | |
6670 | vm_object_lock(object); | |
6671 | object->ref_count++; | |
6672 | vm_object_res_reference(object); | |
6673 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { | |
6674 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
6675 | } | |
6676 | vm_object_unlock(object); | |
6677 | } | |
6678 | ||
6679 | /* | |
6680 | * Clone the entry, using object ref from above. | |
6681 | * Mark both entries as shared. | |
6682 | */ | |
6683 | ||
6684 | new_entry = vm_map_entry_create(new_map); | |
6685 | vm_map_entry_copy(new_entry, old_entry); | |
6686 | old_entry->is_shared = TRUE; | |
6687 | new_entry->is_shared = TRUE; | |
6688 | ||
6689 | /* | |
6690 | * Insert the entry into the new map -- we | |
6691 | * know we're inserting at the end of the new | |
6692 | * map. | |
6693 | */ | |
6694 | ||
6695 | vm_map_entry_link(new_map, vm_map_last_entry(new_map), new_entry); | |
6696 | ||
6697 | /* | |
6698 | * Update the physical map | |
6699 | */ | |
6700 | ||
6701 | if (old_entry->is_sub_map) { | |
6702 | /* Bill Angell pmap support goes here */ | |
6703 | } else { | |
6704 | pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start, | |
6705 | old_entry->vme_end - old_entry->vme_start, | |
6706 | old_entry->vme_start); | |
6707 | } | |
6708 | } | |
6709 | ||
91447636 | 6710 | static boolean_t |
1c79356b A |
6711 | vm_map_fork_copy( |
6712 | vm_map_t old_map, | |
6713 | vm_map_entry_t *old_entry_p, | |
6714 | vm_map_t new_map) | |
6715 | { | |
6716 | vm_map_entry_t old_entry = *old_entry_p; | |
91447636 A |
6717 | vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start; |
6718 | vm_map_offset_t start = old_entry->vme_start; | |
1c79356b A |
6719 | vm_map_copy_t copy; |
6720 | vm_map_entry_t last = vm_map_last_entry(new_map); | |
6721 | ||
6722 | vm_map_unlock(old_map); | |
6723 | /* | |
6724 | * Use maxprot version of copyin because we | |
6725 | * care about whether this memory can ever | |
6726 | * be accessed, not just whether it's accessible | |
6727 | * right now. | |
6728 | */ | |
6729 | if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©) | |
6730 | != KERN_SUCCESS) { | |
6731 | /* | |
6732 | * The map might have changed while it | |
6733 | * was unlocked, check it again. Skip | |
6734 | * any blank space or permanently | |
6735 | * unreadable region. | |
6736 | */ | |
6737 | vm_map_lock(old_map); | |
6738 | if (!vm_map_lookup_entry(old_map, start, &last) || | |
55e303ae | 6739 | (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) { |
1c79356b A |
6740 | last = last->vme_next; |
6741 | } | |
6742 | *old_entry_p = last; | |
6743 | ||
6744 | /* | |
6745 | * XXX For some error returns, want to | |
6746 | * XXX skip to the next element. Note | |
6747 | * that INVALID_ADDRESS and | |
6748 | * PROTECTION_FAILURE are handled above. | |
6749 | */ | |
6750 | ||
6751 | return FALSE; | |
6752 | } | |
6753 | ||
6754 | /* | |
6755 | * Insert the copy into the new map | |
6756 | */ | |
6757 | ||
6758 | vm_map_copy_insert(new_map, last, copy); | |
6759 | ||
6760 | /* | |
6761 | * Pick up the traversal at the end of | |
6762 | * the copied region. | |
6763 | */ | |
6764 | ||
6765 | vm_map_lock(old_map); | |
6766 | start += entry_size; | |
6767 | if (! vm_map_lookup_entry(old_map, start, &last)) { | |
6768 | last = last->vme_next; | |
6769 | } else { | |
6770 | vm_map_clip_start(old_map, last, start); | |
6771 | } | |
6772 | *old_entry_p = last; | |
6773 | ||
6774 | return TRUE; | |
6775 | } | |
6776 | ||
6777 | /* | |
6778 | * vm_map_fork: | |
6779 | * | |
6780 | * Create and return a new map based on the old | |
6781 | * map, according to the inheritance values on the | |
6782 | * regions in that map. | |
6783 | * | |
6784 | * The source map must not be locked. | |
6785 | */ | |
6786 | vm_map_t | |
6787 | vm_map_fork( | |
6788 | vm_map_t old_map) | |
6789 | { | |
91447636 | 6790 | pmap_t new_pmap = pmap_create((vm_map_size_t) 0); |
1c79356b A |
6791 | vm_map_t new_map; |
6792 | vm_map_entry_t old_entry; | |
91447636 | 6793 | vm_map_size_t new_size = 0, entry_size; |
1c79356b A |
6794 | vm_map_entry_t new_entry; |
6795 | boolean_t src_needs_copy; | |
6796 | boolean_t new_entry_needs_copy; | |
6797 | ||
6798 | vm_map_reference_swap(old_map); | |
6799 | vm_map_lock(old_map); | |
6800 | ||
6801 | new_map = vm_map_create(new_pmap, | |
6802 | old_map->min_offset, | |
6803 | old_map->max_offset, | |
6804 | old_map->hdr.entries_pageable); | |
6805 | ||
6806 | for ( | |
6807 | old_entry = vm_map_first_entry(old_map); | |
6808 | old_entry != vm_map_to_entry(old_map); | |
6809 | ) { | |
6810 | ||
6811 | entry_size = old_entry->vme_end - old_entry->vme_start; | |
6812 | ||
6813 | switch (old_entry->inheritance) { | |
6814 | case VM_INHERIT_NONE: | |
6815 | break; | |
6816 | ||
6817 | case VM_INHERIT_SHARE: | |
6818 | vm_map_fork_share(old_map, old_entry, new_map); | |
6819 | new_size += entry_size; | |
6820 | break; | |
6821 | ||
6822 | case VM_INHERIT_COPY: | |
6823 | ||
6824 | /* | |
6825 | * Inline the copy_quickly case; | |
6826 | * upon failure, fall back on call | |
6827 | * to vm_map_fork_copy. | |
6828 | */ | |
6829 | ||
6830 | if(old_entry->is_sub_map) | |
6831 | break; | |
9bccf70c A |
6832 | if ((old_entry->wired_count != 0) || |
6833 | ((old_entry->object.vm_object != NULL) && | |
6834 | (old_entry->object.vm_object->true_share))) { | |
1c79356b A |
6835 | goto slow_vm_map_fork_copy; |
6836 | } | |
6837 | ||
6838 | new_entry = vm_map_entry_create(new_map); | |
6839 | vm_map_entry_copy(new_entry, old_entry); | |
6840 | /* clear address space specifics */ | |
6841 | new_entry->use_pmap = FALSE; | |
6842 | ||
6843 | if (! vm_object_copy_quickly( | |
6844 | &new_entry->object.vm_object, | |
6845 | old_entry->offset, | |
6846 | (old_entry->vme_end - | |
6847 | old_entry->vme_start), | |
6848 | &src_needs_copy, | |
6849 | &new_entry_needs_copy)) { | |
6850 | vm_map_entry_dispose(new_map, new_entry); | |
6851 | goto slow_vm_map_fork_copy; | |
6852 | } | |
6853 | ||
6854 | /* | |
6855 | * Handle copy-on-write obligations | |
6856 | */ | |
6857 | ||
6858 | if (src_needs_copy && !old_entry->needs_copy) { | |
6859 | vm_object_pmap_protect( | |
6860 | old_entry->object.vm_object, | |
6861 | old_entry->offset, | |
6862 | (old_entry->vme_end - | |
6863 | old_entry->vme_start), | |
6864 | ((old_entry->is_shared | |
9bccf70c | 6865 | || old_map->mapped) |
1c79356b A |
6866 | ? PMAP_NULL : |
6867 | old_map->pmap), | |
6868 | old_entry->vme_start, | |
6869 | old_entry->protection & ~VM_PROT_WRITE); | |
6870 | ||
6871 | old_entry->needs_copy = TRUE; | |
6872 | } | |
6873 | new_entry->needs_copy = new_entry_needs_copy; | |
6874 | ||
6875 | /* | |
6876 | * Insert the entry at the end | |
6877 | * of the map. | |
6878 | */ | |
6879 | ||
6880 | vm_map_entry_link(new_map, vm_map_last_entry(new_map), | |
6881 | new_entry); | |
6882 | new_size += entry_size; | |
6883 | break; | |
6884 | ||
6885 | slow_vm_map_fork_copy: | |
6886 | if (vm_map_fork_copy(old_map, &old_entry, new_map)) { | |
6887 | new_size += entry_size; | |
6888 | } | |
6889 | continue; | |
6890 | } | |
6891 | old_entry = old_entry->vme_next; | |
6892 | } | |
6893 | ||
6894 | new_map->size = new_size; | |
6895 | vm_map_unlock(old_map); | |
6896 | vm_map_deallocate(old_map); | |
6897 | ||
6898 | return(new_map); | |
6899 | } | |
6900 | ||
6901 | ||
6902 | /* | |
6903 | * vm_map_lookup_locked: | |
6904 | * | |
6905 | * Finds the VM object, offset, and | |
6906 | * protection for a given virtual address in the | |
6907 | * specified map, assuming a page fault of the | |
6908 | * type specified. | |
6909 | * | |
6910 | * Returns the (object, offset, protection) for | |
6911 | * this address, whether it is wired down, and whether | |
6912 | * this map has the only reference to the data in question. | |
6913 | * In order to later verify this lookup, a "version" | |
6914 | * is returned. | |
6915 | * | |
6916 | * The map MUST be locked by the caller and WILL be | |
6917 | * locked on exit. In order to guarantee the | |
6918 | * existence of the returned object, it is returned | |
6919 | * locked. | |
6920 | * | |
6921 | * If a lookup is requested with "write protection" | |
6922 | * specified, the map may be changed to perform virtual | |
6923 | * copying operations, although the data referenced will | |
6924 | * remain the same. | |
6925 | */ | |
6926 | kern_return_t | |
6927 | vm_map_lookup_locked( | |
6928 | vm_map_t *var_map, /* IN/OUT */ | |
91447636 A |
6929 | vm_map_offset_t vaddr, |
6930 | vm_prot_t fault_type, | |
1c79356b A |
6931 | vm_map_version_t *out_version, /* OUT */ |
6932 | vm_object_t *object, /* OUT */ | |
6933 | vm_object_offset_t *offset, /* OUT */ | |
6934 | vm_prot_t *out_prot, /* OUT */ | |
6935 | boolean_t *wired, /* OUT */ | |
6936 | int *behavior, /* OUT */ | |
91447636 A |
6937 | vm_map_offset_t *lo_offset, /* OUT */ |
6938 | vm_map_offset_t *hi_offset, /* OUT */ | |
6939 | vm_map_t *real_map) | |
1c79356b A |
6940 | { |
6941 | vm_map_entry_t entry; | |
6942 | register vm_map_t map = *var_map; | |
6943 | vm_map_t old_map = *var_map; | |
6944 | vm_map_t cow_sub_map_parent = VM_MAP_NULL; | |
91447636 A |
6945 | vm_map_offset_t cow_parent_vaddr = 0; |
6946 | vm_map_offset_t old_start = 0; | |
6947 | vm_map_offset_t old_end = 0; | |
1c79356b A |
6948 | register vm_prot_t prot; |
6949 | ||
91447636 | 6950 | *real_map = map; |
1c79356b A |
6951 | RetryLookup: ; |
6952 | ||
6953 | /* | |
6954 | * If the map has an interesting hint, try it before calling | |
6955 | * full blown lookup routine. | |
6956 | */ | |
6957 | ||
6958 | mutex_lock(&map->s_lock); | |
6959 | entry = map->hint; | |
6960 | mutex_unlock(&map->s_lock); | |
6961 | ||
6962 | if ((entry == vm_map_to_entry(map)) || | |
6963 | (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) { | |
6964 | vm_map_entry_t tmp_entry; | |
6965 | ||
6966 | /* | |
6967 | * Entry was either not a valid hint, or the vaddr | |
6968 | * was not contained in the entry, so do a full lookup. | |
6969 | */ | |
6970 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { | |
6971 | if((cow_sub_map_parent) && (cow_sub_map_parent != map)) | |
6972 | vm_map_unlock(cow_sub_map_parent); | |
91447636 A |
6973 | if((*real_map != map) |
6974 | && (*real_map != cow_sub_map_parent)) | |
6975 | vm_map_unlock(*real_map); | |
1c79356b A |
6976 | return KERN_INVALID_ADDRESS; |
6977 | } | |
6978 | ||
6979 | entry = tmp_entry; | |
6980 | } | |
6981 | if(map == old_map) { | |
6982 | old_start = entry->vme_start; | |
6983 | old_end = entry->vme_end; | |
6984 | } | |
6985 | ||
6986 | /* | |
6987 | * Handle submaps. Drop lock on upper map, submap is | |
6988 | * returned locked. | |
6989 | */ | |
6990 | ||
6991 | submap_recurse: | |
6992 | if (entry->is_sub_map) { | |
91447636 A |
6993 | vm_map_offset_t local_vaddr; |
6994 | vm_map_offset_t end_delta; | |
6995 | vm_map_offset_t start_delta; | |
1c79356b A |
6996 | vm_map_entry_t submap_entry; |
6997 | boolean_t mapped_needs_copy=FALSE; | |
6998 | ||
6999 | local_vaddr = vaddr; | |
7000 | ||
7001 | if ((!entry->needs_copy) && (entry->use_pmap)) { | |
91447636 A |
7002 | /* if real_map equals map we unlock below */ |
7003 | if ((*real_map != map) && | |
7004 | (*real_map != cow_sub_map_parent)) | |
7005 | vm_map_unlock(*real_map); | |
7006 | *real_map = entry->object.sub_map; | |
1c79356b A |
7007 | } |
7008 | ||
7009 | if(entry->needs_copy) { | |
7010 | if (!mapped_needs_copy) { | |
7011 | if (vm_map_lock_read_to_write(map)) { | |
7012 | vm_map_lock_read(map); | |
91447636 A |
7013 | if(*real_map == entry->object.sub_map) |
7014 | *real_map = map; | |
1c79356b A |
7015 | goto RetryLookup; |
7016 | } | |
7017 | vm_map_lock_read(entry->object.sub_map); | |
7018 | cow_sub_map_parent = map; | |
7019 | /* reset base to map before cow object */ | |
7020 | /* this is the map which will accept */ | |
7021 | /* the new cow object */ | |
7022 | old_start = entry->vme_start; | |
7023 | old_end = entry->vme_end; | |
7024 | cow_parent_vaddr = vaddr; | |
7025 | mapped_needs_copy = TRUE; | |
7026 | } else { | |
7027 | vm_map_lock_read(entry->object.sub_map); | |
7028 | if((cow_sub_map_parent != map) && | |
91447636 | 7029 | (*real_map != map)) |
1c79356b A |
7030 | vm_map_unlock(map); |
7031 | } | |
7032 | } else { | |
7033 | vm_map_lock_read(entry->object.sub_map); | |
7034 | /* leave map locked if it is a target */ | |
7035 | /* cow sub_map above otherwise, just */ | |
7036 | /* follow the maps down to the object */ | |
7037 | /* here we unlock knowing we are not */ | |
7038 | /* revisiting the map. */ | |
91447636 | 7039 | if((*real_map != map) && (map != cow_sub_map_parent)) |
1c79356b A |
7040 | vm_map_unlock_read(map); |
7041 | } | |
7042 | ||
7043 | *var_map = map = entry->object.sub_map; | |
7044 | ||
7045 | /* calculate the offset in the submap for vaddr */ | |
7046 | local_vaddr = (local_vaddr - entry->vme_start) + entry->offset; | |
7047 | ||
7048 | RetrySubMap: | |
7049 | if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { | |
7050 | if((cow_sub_map_parent) && (cow_sub_map_parent != map)){ | |
7051 | vm_map_unlock(cow_sub_map_parent); | |
7052 | } | |
91447636 A |
7053 | if((*real_map != map) |
7054 | && (*real_map != cow_sub_map_parent)) { | |
7055 | vm_map_unlock(*real_map); | |
1c79356b | 7056 | } |
91447636 | 7057 | *real_map = map; |
1c79356b A |
7058 | return KERN_INVALID_ADDRESS; |
7059 | } | |
7060 | /* find the attenuated shadow of the underlying object */ | |
7061 | /* on our target map */ | |
7062 | ||
7063 | /* in english the submap object may extend beyond the */ | |
7064 | /* region mapped by the entry or, may only fill a portion */ | |
7065 | /* of it. For our purposes, we only care if the object */ | |
7066 | /* doesn't fill. In this case the area which will */ | |
7067 | /* ultimately be clipped in the top map will only need */ | |
7068 | /* to be as big as the portion of the underlying entry */ | |
7069 | /* which is mapped */ | |
7070 | start_delta = submap_entry->vme_start > entry->offset ? | |
7071 | submap_entry->vme_start - entry->offset : 0; | |
7072 | ||
7073 | end_delta = | |
7074 | (entry->offset + start_delta + (old_end - old_start)) <= | |
7075 | submap_entry->vme_end ? | |
7076 | 0 : (entry->offset + | |
7077 | (old_end - old_start)) | |
7078 | - submap_entry->vme_end; | |
7079 | ||
7080 | old_start += start_delta; | |
7081 | old_end -= end_delta; | |
7082 | ||
7083 | if(submap_entry->is_sub_map) { | |
7084 | entry = submap_entry; | |
7085 | vaddr = local_vaddr; | |
7086 | goto submap_recurse; | |
7087 | } | |
7088 | ||
7089 | if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) { | |
7090 | ||
7091 | vm_object_t copy_object; | |
91447636 A |
7092 | vm_map_offset_t local_start; |
7093 | vm_map_offset_t local_end; | |
0b4e3aa0 | 7094 | boolean_t copied_slowly = FALSE; |
1c79356b A |
7095 | |
7096 | if (vm_map_lock_read_to_write(map)) { | |
7097 | vm_map_lock_read(map); | |
7098 | old_start -= start_delta; | |
7099 | old_end += end_delta; | |
7100 | goto RetrySubMap; | |
7101 | } | |
0b4e3aa0 A |
7102 | |
7103 | ||
1c79356b A |
7104 | if (submap_entry->object.vm_object == VM_OBJECT_NULL) { |
7105 | submap_entry->object.vm_object = | |
7106 | vm_object_allocate( | |
91447636 | 7107 | (vm_map_size_t) |
1c79356b A |
7108 | (submap_entry->vme_end |
7109 | - submap_entry->vme_start)); | |
91447636 | 7110 | submap_entry->offset = 0; |
1c79356b A |
7111 | } |
7112 | local_start = local_vaddr - | |
7113 | (cow_parent_vaddr - old_start); | |
7114 | local_end = local_vaddr + | |
7115 | (old_end - cow_parent_vaddr); | |
7116 | vm_map_clip_start(map, submap_entry, local_start); | |
7117 | vm_map_clip_end(map, submap_entry, local_end); | |
7118 | ||
7119 | /* This is the COW case, lets connect */ | |
7120 | /* an entry in our space to the underlying */ | |
7121 | /* object in the submap, bypassing the */ | |
7122 | /* submap. */ | |
0b4e3aa0 A |
7123 | |
7124 | ||
7125 | if(submap_entry->wired_count != 0) { | |
7126 | vm_object_lock( | |
7127 | submap_entry->object.vm_object); | |
7128 | vm_object_copy_slowly( | |
7129 | submap_entry->object.vm_object, | |
7130 | submap_entry->offset, | |
7131 | submap_entry->vme_end - | |
7132 | submap_entry->vme_start, | |
7133 | FALSE, | |
7134 | ©_object); | |
7135 | copied_slowly = TRUE; | |
7136 | } else { | |
1c79356b | 7137 | |
0b4e3aa0 A |
7138 | /* set up shadow object */ |
7139 | copy_object = submap_entry->object.vm_object; | |
7140 | vm_object_reference(copy_object); | |
7141 | submap_entry->object.vm_object->shadowed = TRUE; | |
7142 | submap_entry->needs_copy = TRUE; | |
7143 | vm_object_pmap_protect( | |
7144 | submap_entry->object.vm_object, | |
1c79356b A |
7145 | submap_entry->offset, |
7146 | submap_entry->vme_end - | |
7147 | submap_entry->vme_start, | |
9bccf70c A |
7148 | (submap_entry->is_shared |
7149 | || map->mapped) ? | |
1c79356b A |
7150 | PMAP_NULL : map->pmap, |
7151 | submap_entry->vme_start, | |
7152 | submap_entry->protection & | |
7153 | ~VM_PROT_WRITE); | |
0b4e3aa0 | 7154 | } |
1c79356b A |
7155 | |
7156 | ||
7157 | /* This works diffently than the */ | |
7158 | /* normal submap case. We go back */ | |
7159 | /* to the parent of the cow map and*/ | |
7160 | /* clip out the target portion of */ | |
7161 | /* the sub_map, substituting the */ | |
7162 | /* new copy object, */ | |
7163 | ||
7164 | vm_map_unlock(map); | |
7165 | local_start = old_start; | |
7166 | local_end = old_end; | |
7167 | map = cow_sub_map_parent; | |
7168 | *var_map = cow_sub_map_parent; | |
7169 | vaddr = cow_parent_vaddr; | |
7170 | cow_sub_map_parent = NULL; | |
7171 | ||
7172 | if(!vm_map_lookup_entry(map, | |
7173 | vaddr, &entry)) { | |
7174 | vm_object_deallocate( | |
7175 | copy_object); | |
7176 | vm_map_lock_write_to_read(map); | |
7177 | return KERN_INVALID_ADDRESS; | |
7178 | } | |
7179 | ||
7180 | /* clip out the portion of space */ | |
7181 | /* mapped by the sub map which */ | |
7182 | /* corresponds to the underlying */ | |
7183 | /* object */ | |
7184 | vm_map_clip_start(map, entry, local_start); | |
7185 | vm_map_clip_end(map, entry, local_end); | |
7186 | ||
7187 | ||
7188 | /* substitute copy object for */ | |
7189 | /* shared map entry */ | |
7190 | vm_map_deallocate(entry->object.sub_map); | |
7191 | entry->is_sub_map = FALSE; | |
1c79356b | 7192 | entry->object.vm_object = copy_object; |
1c79356b A |
7193 | |
7194 | entry->protection |= VM_PROT_WRITE; | |
7195 | entry->max_protection |= VM_PROT_WRITE; | |
0b4e3aa0 A |
7196 | if(copied_slowly) { |
7197 | entry->offset = 0; | |
7198 | entry->needs_copy = FALSE; | |
7199 | entry->is_shared = FALSE; | |
7200 | } else { | |
7201 | entry->offset = submap_entry->offset; | |
7202 | entry->needs_copy = TRUE; | |
7203 | if(entry->inheritance == VM_INHERIT_SHARE) | |
7204 | entry->inheritance = VM_INHERIT_COPY; | |
7205 | if (map != old_map) | |
7206 | entry->is_shared = TRUE; | |
7207 | } | |
1c79356b | 7208 | if(entry->inheritance == VM_INHERIT_SHARE) |
0b4e3aa0 | 7209 | entry->inheritance = VM_INHERIT_COPY; |
1c79356b A |
7210 | |
7211 | vm_map_lock_write_to_read(map); | |
7212 | } else { | |
7213 | if((cow_sub_map_parent) | |
91447636 | 7214 | && (cow_sub_map_parent != *real_map) |
1c79356b A |
7215 | && (cow_sub_map_parent != map)) { |
7216 | vm_map_unlock(cow_sub_map_parent); | |
7217 | } | |
7218 | entry = submap_entry; | |
7219 | vaddr = local_vaddr; | |
7220 | } | |
7221 | } | |
7222 | ||
7223 | /* | |
7224 | * Check whether this task is allowed to have | |
7225 | * this page. | |
7226 | */ | |
7227 | ||
7228 | prot = entry->protection; | |
7229 | if ((fault_type & (prot)) != fault_type) { | |
91447636 A |
7230 | if (*real_map != map) { |
7231 | vm_map_unlock(*real_map); | |
1c79356b | 7232 | } |
91447636 | 7233 | *real_map = map; |
1c79356b A |
7234 | return KERN_PROTECTION_FAILURE; |
7235 | } | |
7236 | ||
7237 | /* | |
7238 | * If this page is not pageable, we have to get | |
7239 | * it for all possible accesses. | |
7240 | */ | |
7241 | ||
91447636 A |
7242 | *wired = (entry->wired_count != 0); |
7243 | if (*wired) | |
1c79356b A |
7244 | prot = fault_type = entry->protection; |
7245 | ||
7246 | /* | |
7247 | * If the entry was copy-on-write, we either ... | |
7248 | */ | |
7249 | ||
7250 | if (entry->needs_copy) { | |
7251 | /* | |
7252 | * If we want to write the page, we may as well | |
7253 | * handle that now since we've got the map locked. | |
7254 | * | |
7255 | * If we don't need to write the page, we just | |
7256 | * demote the permissions allowed. | |
7257 | */ | |
7258 | ||
91447636 | 7259 | if ((fault_type & VM_PROT_WRITE) || *wired) { |
1c79356b A |
7260 | /* |
7261 | * Make a new object, and place it in the | |
7262 | * object chain. Note that no new references | |
7263 | * have appeared -- one just moved from the | |
7264 | * map to the new object. | |
7265 | */ | |
7266 | ||
7267 | if (vm_map_lock_read_to_write(map)) { | |
7268 | vm_map_lock_read(map); | |
7269 | goto RetryLookup; | |
7270 | } | |
7271 | vm_object_shadow(&entry->object.vm_object, | |
7272 | &entry->offset, | |
91447636 | 7273 | (vm_map_size_t) (entry->vme_end - |
1c79356b A |
7274 | entry->vme_start)); |
7275 | ||
7276 | entry->object.vm_object->shadowed = TRUE; | |
7277 | entry->needs_copy = FALSE; | |
7278 | vm_map_lock_write_to_read(map); | |
7279 | } | |
7280 | else { | |
7281 | /* | |
7282 | * We're attempting to read a copy-on-write | |
7283 | * page -- don't allow writes. | |
7284 | */ | |
7285 | ||
7286 | prot &= (~VM_PROT_WRITE); | |
7287 | } | |
7288 | } | |
7289 | ||
7290 | /* | |
7291 | * Create an object if necessary. | |
7292 | */ | |
7293 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
7294 | ||
7295 | if (vm_map_lock_read_to_write(map)) { | |
7296 | vm_map_lock_read(map); | |
7297 | goto RetryLookup; | |
7298 | } | |
7299 | ||
7300 | entry->object.vm_object = vm_object_allocate( | |
91447636 | 7301 | (vm_map_size_t)(entry->vme_end - entry->vme_start)); |
1c79356b A |
7302 | entry->offset = 0; |
7303 | vm_map_lock_write_to_read(map); | |
7304 | } | |
7305 | ||
7306 | /* | |
7307 | * Return the object/offset from this entry. If the entry | |
7308 | * was copy-on-write or empty, it has been fixed up. Also | |
7309 | * return the protection. | |
7310 | */ | |
7311 | ||
7312 | *offset = (vaddr - entry->vme_start) + entry->offset; | |
7313 | *object = entry->object.vm_object; | |
7314 | *out_prot = prot; | |
7315 | *behavior = entry->behavior; | |
7316 | *lo_offset = entry->offset; | |
7317 | *hi_offset = (entry->vme_end - entry->vme_start) + entry->offset; | |
7318 | ||
7319 | /* | |
7320 | * Lock the object to prevent it from disappearing | |
7321 | */ | |
7322 | ||
7323 | vm_object_lock(*object); | |
7324 | ||
7325 | /* | |
7326 | * Save the version number | |
7327 | */ | |
7328 | ||
7329 | out_version->main_timestamp = map->timestamp; | |
7330 | ||
7331 | return KERN_SUCCESS; | |
7332 | } | |
7333 | ||
7334 | ||
7335 | /* | |
7336 | * vm_map_verify: | |
7337 | * | |
7338 | * Verifies that the map in question has not changed | |
7339 | * since the given version. If successful, the map | |
7340 | * will not change until vm_map_verify_done() is called. | |
7341 | */ | |
7342 | boolean_t | |
7343 | vm_map_verify( | |
7344 | register vm_map_t map, | |
7345 | register vm_map_version_t *version) /* REF */ | |
7346 | { | |
7347 | boolean_t result; | |
7348 | ||
7349 | vm_map_lock_read(map); | |
7350 | result = (map->timestamp == version->main_timestamp); | |
7351 | ||
7352 | if (!result) | |
7353 | vm_map_unlock_read(map); | |
7354 | ||
7355 | return(result); | |
7356 | } | |
7357 | ||
7358 | /* | |
7359 | * vm_map_verify_done: | |
7360 | * | |
7361 | * Releases locks acquired by a vm_map_verify. | |
7362 | * | |
7363 | * This is now a macro in vm/vm_map.h. It does a | |
7364 | * vm_map_unlock_read on the map. | |
7365 | */ | |
7366 | ||
7367 | ||
91447636 A |
7368 | /* |
7369 | * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY | |
7370 | * Goes away after regular vm_region_recurse function migrates to | |
7371 | * 64 bits | |
7372 | * vm_region_recurse: A form of vm_region which follows the | |
7373 | * submaps in a target map | |
7374 | * | |
7375 | */ | |
7376 | ||
7377 | kern_return_t | |
7378 | vm_map_region_recurse_64( | |
7379 | vm_map_t map, | |
7380 | vm_map_offset_t *address, /* IN/OUT */ | |
7381 | vm_map_size_t *size, /* OUT */ | |
7382 | natural_t *nesting_depth, /* IN/OUT */ | |
7383 | vm_region_submap_info_64_t submap_info, /* IN/OUT */ | |
7384 | mach_msg_type_number_t *count) /* IN/OUT */ | |
7385 | { | |
7386 | vm_region_extended_info_data_t extended; | |
7387 | vm_map_entry_t tmp_entry; | |
7388 | vm_map_offset_t user_address; | |
7389 | unsigned int user_max_depth; | |
7390 | ||
7391 | /* | |
7392 | * "curr_entry" is the VM map entry preceding or including the | |
7393 | * address we're looking for. | |
7394 | * "curr_map" is the map or sub-map containing "curr_entry". | |
7395 | * "curr_offset" is the cumulated offset of "curr_map" in the | |
7396 | * target task's address space. | |
7397 | * "curr_depth" is the depth of "curr_map" in the chain of | |
7398 | * sub-maps. | |
7399 | * "curr_max_offset" is the maximum offset we should take into | |
7400 | * account in the current map. It may be smaller than the current | |
7401 | * map's "max_offset" because we might not have mapped it all in | |
7402 | * the upper level map. | |
7403 | */ | |
7404 | vm_map_entry_t curr_entry; | |
7405 | vm_map_offset_t curr_offset; | |
7406 | vm_map_t curr_map; | |
7407 | unsigned int curr_depth; | |
7408 | vm_map_offset_t curr_max_offset; | |
7409 | ||
7410 | /* | |
7411 | * "next_" is the same as "curr_" but for the VM region immediately | |
7412 | * after the address we're looking for. We need to keep track of this | |
7413 | * too because we want to return info about that region if the | |
7414 | * address we're looking for is not mapped. | |
7415 | */ | |
7416 | vm_map_entry_t next_entry; | |
7417 | vm_map_offset_t next_offset; | |
7418 | vm_map_t next_map; | |
7419 | unsigned int next_depth; | |
7420 | vm_map_offset_t next_max_offset; | |
7421 | ||
7422 | if (map == VM_MAP_NULL) { | |
7423 | /* no address space to work on */ | |
7424 | return KERN_INVALID_ARGUMENT; | |
7425 | } | |
7426 | ||
7427 | if (*count < VM_REGION_SUBMAP_INFO_COUNT_64) { | |
7428 | /* "info" structure is not big enough and would overflow */ | |
7429 | return KERN_INVALID_ARGUMENT; | |
7430 | } | |
7431 | ||
7432 | *count = VM_REGION_SUBMAP_INFO_COUNT_64; | |
7433 | ||
7434 | user_address = *address; | |
7435 | user_max_depth = *nesting_depth; | |
7436 | ||
7437 | curr_entry = NULL; | |
7438 | curr_map = map; | |
7439 | curr_offset = 0; | |
7440 | curr_depth = 0; | |
7441 | curr_max_offset = curr_map->max_offset; | |
7442 | ||
7443 | next_entry = NULL; | |
7444 | next_map = NULL; | |
7445 | next_offset = 0; | |
7446 | next_depth = 0; | |
7447 | next_max_offset = curr_max_offset; | |
7448 | ||
7449 | if (not_in_kdp) { | |
7450 | vm_map_lock_read(curr_map); | |
7451 | } | |
7452 | ||
7453 | for (;;) { | |
7454 | if (vm_map_lookup_entry(curr_map, | |
7455 | user_address - curr_offset, | |
7456 | &tmp_entry)) { | |
7457 | /* tmp_entry contains the address we're looking for */ | |
7458 | curr_entry = tmp_entry; | |
7459 | } else { | |
7460 | /* | |
7461 | * The address is not mapped. "tmp_entry" is the | |
7462 | * map entry preceding the address. We want the next | |
7463 | * one, if it exists. | |
7464 | */ | |
7465 | curr_entry = tmp_entry->vme_next; | |
7466 | if (curr_entry == vm_map_to_entry(curr_map) || | |
7467 | curr_entry->vme_start >= curr_max_offset) { | |
7468 | /* no next entry at this level: stop looking */ | |
7469 | if (not_in_kdp) { | |
7470 | vm_map_unlock_read(curr_map); | |
7471 | } | |
7472 | curr_entry = NULL; | |
7473 | curr_map = NULL; | |
7474 | curr_offset = 0; | |
7475 | curr_depth = 0; | |
7476 | curr_max_offset = 0; | |
7477 | break; | |
7478 | } | |
7479 | } | |
7480 | ||
7481 | /* | |
7482 | * Is the next entry at this level closer to the address (or | |
7483 | * deeper in the submap chain) than the one we had | |
7484 | * so far ? | |
7485 | */ | |
7486 | tmp_entry = curr_entry->vme_next; | |
7487 | if (tmp_entry == vm_map_to_entry(curr_map)) { | |
7488 | /* no next entry at this level */ | |
7489 | } else if (tmp_entry->vme_start >= curr_max_offset) { | |
7490 | /* | |
7491 | * tmp_entry is beyond the scope of what we mapped of | |
7492 | * this submap in the upper level: ignore it. | |
7493 | */ | |
7494 | } else if ((next_entry == NULL) || | |
7495 | (tmp_entry->vme_start + curr_offset <= | |
7496 | next_entry->vme_start + next_offset)) { | |
7497 | /* | |
7498 | * We didn't have a "next_entry" or this one is | |
7499 | * closer to the address we're looking for: | |
7500 | * use this "tmp_entry" as the new "next_entry". | |
7501 | */ | |
7502 | if (next_entry != NULL) { | |
7503 | /* unlock the last "next_map" */ | |
7504 | if (next_map != curr_map && not_in_kdp) { | |
7505 | vm_map_unlock_read(next_map); | |
7506 | } | |
7507 | } | |
7508 | next_entry = tmp_entry; | |
7509 | next_map = curr_map; | |
7510 | next_offset = curr_offset; | |
7511 | next_depth = curr_depth; | |
7512 | next_max_offset = curr_max_offset; | |
7513 | } | |
7514 | ||
7515 | if (!curr_entry->is_sub_map || | |
7516 | curr_depth >= user_max_depth) { | |
7517 | /* | |
7518 | * We hit a leaf map or we reached the maximum depth | |
7519 | * we could, so stop looking. Keep the current map | |
7520 | * locked. | |
7521 | */ | |
7522 | break; | |
7523 | } | |
7524 | ||
7525 | /* | |
7526 | * Get down to the next submap level. | |
7527 | */ | |
7528 | ||
7529 | /* | |
7530 | * Lock the next level and unlock the current level, | |
7531 | * unless we need to keep it locked to access the "next_entry" | |
7532 | * later. | |
7533 | */ | |
7534 | if (not_in_kdp) { | |
7535 | vm_map_lock_read(curr_entry->object.sub_map); | |
7536 | } | |
7537 | if (curr_map == next_map) { | |
7538 | /* keep "next_map" locked in case we need it */ | |
7539 | } else { | |
7540 | /* release this map */ | |
7541 | vm_map_unlock_read(curr_map); | |
7542 | } | |
7543 | ||
7544 | /* | |
7545 | * Adjust the offset. "curr_entry" maps the submap | |
7546 | * at relative address "curr_entry->vme_start" in the | |
7547 | * curr_map but skips the first "curr_entry->offset" | |
7548 | * bytes of the submap. | |
7549 | * "curr_offset" always represents the offset of a virtual | |
7550 | * address in the curr_map relative to the absolute address | |
7551 | * space (i.e. the top-level VM map). | |
7552 | */ | |
7553 | curr_offset += | |
7554 | (curr_entry->vme_start - curr_entry->offset); | |
7555 | /* switch to the submap */ | |
7556 | curr_map = curr_entry->object.sub_map; | |
7557 | curr_depth++; | |
7558 | /* | |
7559 | * "curr_max_offset" allows us to keep track of the | |
7560 | * portion of the submap that is actually mapped at this level: | |
7561 | * the rest of that submap is irrelevant to us, since it's not | |
7562 | * mapped here. | |
7563 | * The relevant portion of the map starts at | |
7564 | * "curr_entry->offset" up to the size of "curr_entry". | |
7565 | */ | |
7566 | curr_max_offset = | |
7567 | curr_entry->vme_end - curr_entry->vme_start + | |
7568 | curr_entry->offset; | |
7569 | curr_entry = NULL; | |
7570 | } | |
7571 | ||
7572 | if (curr_entry == NULL) { | |
7573 | /* no VM region contains the address... */ | |
7574 | if (next_entry == NULL) { | |
7575 | /* ... and no VM region follows it either */ | |
7576 | return KERN_INVALID_ADDRESS; | |
7577 | } | |
7578 | /* ... gather info about the next VM region */ | |
7579 | curr_entry = next_entry; | |
7580 | curr_map = next_map; /* still locked ... */ | |
7581 | curr_offset = next_offset; | |
7582 | curr_depth = next_depth; | |
7583 | curr_max_offset = next_max_offset; | |
7584 | } else { | |
7585 | /* we won't need "next_entry" after all */ | |
7586 | if (next_entry != NULL) { | |
7587 | /* release "next_map" */ | |
7588 | if (next_map != curr_map && not_in_kdp) { | |
7589 | vm_map_unlock_read(next_map); | |
7590 | } | |
7591 | } | |
7592 | } | |
7593 | next_entry = NULL; | |
7594 | next_map = NULL; | |
7595 | next_offset = 0; | |
7596 | next_depth = 0; | |
7597 | next_max_offset = 0; | |
7598 | ||
7599 | *nesting_depth = curr_depth; | |
7600 | *size = curr_entry->vme_end - curr_entry->vme_start; | |
7601 | *address = curr_entry->vme_start + curr_offset; | |
7602 | ||
7603 | submap_info->user_tag = curr_entry->alias; | |
7604 | submap_info->offset = curr_entry->offset; | |
7605 | submap_info->protection = curr_entry->protection; | |
7606 | submap_info->inheritance = curr_entry->inheritance; | |
7607 | submap_info->max_protection = curr_entry->max_protection; | |
7608 | submap_info->behavior = curr_entry->behavior; | |
7609 | submap_info->user_wired_count = curr_entry->user_wired_count; | |
7610 | submap_info->is_submap = curr_entry->is_sub_map; | |
7611 | submap_info->object_id = (uint32_t) curr_entry->object.vm_object; | |
7612 | ||
7613 | extended.pages_resident = 0; | |
7614 | extended.pages_swapped_out = 0; | |
7615 | extended.pages_shared_now_private = 0; | |
7616 | extended.pages_dirtied = 0; | |
7617 | extended.external_pager = 0; | |
7618 | extended.shadow_depth = 0; | |
7619 | ||
7620 | if (not_in_kdp) { | |
7621 | if (!curr_entry->is_sub_map) { | |
7622 | vm_map_region_walk(curr_map, | |
7623 | curr_entry->vme_start, | |
7624 | curr_entry, | |
7625 | curr_entry->offset, | |
7626 | (curr_entry->vme_end - | |
7627 | curr_entry->vme_start), | |
7628 | &extended); | |
7629 | submap_info->share_mode = extended.share_mode; | |
7630 | if (extended.external_pager && | |
7631 | extended.ref_count == 2 && | |
7632 | extended.share_mode == SM_SHARED) { | |
7633 | submap_info->share_mode = SM_PRIVATE; | |
7634 | } | |
7635 | submap_info->ref_count = extended.ref_count; | |
7636 | } else { | |
7637 | if (curr_entry->use_pmap) { | |
7638 | submap_info->share_mode = SM_TRUESHARED; | |
7639 | } else { | |
7640 | submap_info->share_mode = SM_PRIVATE; | |
7641 | } | |
7642 | submap_info->ref_count = | |
7643 | curr_entry->object.sub_map->ref_count; | |
7644 | } | |
7645 | } | |
7646 | ||
7647 | submap_info->pages_resident = extended.pages_resident; | |
7648 | submap_info->pages_swapped_out = extended.pages_swapped_out; | |
7649 | submap_info->pages_shared_now_private = | |
7650 | extended.pages_shared_now_private; | |
7651 | submap_info->pages_dirtied = extended.pages_dirtied; | |
7652 | submap_info->external_pager = extended.external_pager; | |
7653 | submap_info->shadow_depth = extended.shadow_depth; | |
7654 | ||
7655 | if (not_in_kdp) { | |
7656 | vm_map_unlock_read(curr_map); | |
7657 | } | |
7658 | ||
7659 | return KERN_SUCCESS; | |
7660 | } | |
7661 | ||
1c79356b A |
7662 | /* |
7663 | * vm_region: | |
7664 | * | |
7665 | * User call to obtain information about a region in | |
7666 | * a task's address map. Currently, only one flavor is | |
7667 | * supported. | |
7668 | * | |
7669 | * XXX The reserved and behavior fields cannot be filled | |
7670 | * in until the vm merge from the IK is completed, and | |
7671 | * vm_reserve is implemented. | |
1c79356b A |
7672 | */ |
7673 | ||
7674 | kern_return_t | |
91447636 | 7675 | vm_map_region( |
1c79356b | 7676 | vm_map_t map, |
91447636 A |
7677 | vm_map_offset_t *address, /* IN/OUT */ |
7678 | vm_map_size_t *size, /* OUT */ | |
1c79356b A |
7679 | vm_region_flavor_t flavor, /* IN */ |
7680 | vm_region_info_t info, /* OUT */ | |
91447636 A |
7681 | mach_msg_type_number_t *count, /* IN/OUT */ |
7682 | mach_port_t *object_name) /* OUT */ | |
1c79356b A |
7683 | { |
7684 | vm_map_entry_t tmp_entry; | |
1c79356b | 7685 | vm_map_entry_t entry; |
91447636 | 7686 | vm_map_offset_t start; |
1c79356b A |
7687 | |
7688 | if (map == VM_MAP_NULL) | |
7689 | return(KERN_INVALID_ARGUMENT); | |
7690 | ||
7691 | switch (flavor) { | |
91447636 | 7692 | |
1c79356b | 7693 | case VM_REGION_BASIC_INFO: |
91447636 | 7694 | /* legacy for old 32-bit objects info */ |
1c79356b | 7695 | { |
91447636 A |
7696 | vm_region_basic_info_t basic; |
7697 | ||
1c79356b A |
7698 | if (*count < VM_REGION_BASIC_INFO_COUNT) |
7699 | return(KERN_INVALID_ARGUMENT); | |
7700 | ||
7701 | basic = (vm_region_basic_info_t) info; | |
7702 | *count = VM_REGION_BASIC_INFO_COUNT; | |
7703 | ||
7704 | vm_map_lock_read(map); | |
7705 | ||
7706 | start = *address; | |
7707 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
7708 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
7709 | vm_map_unlock_read(map); | |
7710 | return(KERN_INVALID_ADDRESS); | |
7711 | } | |
7712 | } else { | |
7713 | entry = tmp_entry; | |
7714 | } | |
7715 | ||
7716 | start = entry->vme_start; | |
7717 | ||
91447636 A |
7718 | basic->offset = (uint32_t)entry->offset; |
7719 | basic->protection = entry->protection; | |
7720 | basic->inheritance = entry->inheritance; | |
7721 | basic->max_protection = entry->max_protection; | |
7722 | basic->behavior = entry->behavior; | |
7723 | basic->user_wired_count = entry->user_wired_count; | |
7724 | basic->reserved = entry->is_sub_map; | |
7725 | *address = start; | |
7726 | *size = (entry->vme_end - start); | |
7727 | ||
7728 | if (object_name) *object_name = IP_NULL; | |
7729 | if (entry->is_sub_map) { | |
7730 | basic->shared = FALSE; | |
7731 | } else { | |
7732 | basic->shared = entry->is_shared; | |
7733 | } | |
7734 | ||
7735 | vm_map_unlock_read(map); | |
7736 | return(KERN_SUCCESS); | |
7737 | } | |
7738 | ||
7739 | case VM_REGION_BASIC_INFO_64: | |
7740 | { | |
7741 | vm_region_basic_info_64_t basic; | |
7742 | ||
7743 | if (*count < VM_REGION_BASIC_INFO_COUNT_64) | |
7744 | return(KERN_INVALID_ARGUMENT); | |
7745 | ||
7746 | basic = (vm_region_basic_info_64_t) info; | |
7747 | *count = VM_REGION_BASIC_INFO_COUNT_64; | |
7748 | ||
7749 | vm_map_lock_read(map); | |
7750 | ||
7751 | start = *address; | |
7752 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
7753 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
7754 | vm_map_unlock_read(map); | |
7755 | return(KERN_INVALID_ADDRESS); | |
7756 | } | |
7757 | } else { | |
7758 | entry = tmp_entry; | |
7759 | } | |
7760 | ||
7761 | start = entry->vme_start; | |
7762 | ||
1c79356b A |
7763 | basic->offset = entry->offset; |
7764 | basic->protection = entry->protection; | |
7765 | basic->inheritance = entry->inheritance; | |
7766 | basic->max_protection = entry->max_protection; | |
7767 | basic->behavior = entry->behavior; | |
7768 | basic->user_wired_count = entry->user_wired_count; | |
7769 | basic->reserved = entry->is_sub_map; | |
7770 | *address = start; | |
7771 | *size = (entry->vme_end - start); | |
7772 | ||
7773 | if (object_name) *object_name = IP_NULL; | |
7774 | if (entry->is_sub_map) { | |
7775 | basic->shared = FALSE; | |
7776 | } else { | |
7777 | basic->shared = entry->is_shared; | |
7778 | } | |
7779 | ||
7780 | vm_map_unlock_read(map); | |
7781 | return(KERN_SUCCESS); | |
7782 | } | |
7783 | case VM_REGION_EXTENDED_INFO: | |
7784 | { | |
91447636 | 7785 | vm_region_extended_info_t extended; |
1c79356b A |
7786 | |
7787 | if (*count < VM_REGION_EXTENDED_INFO_COUNT) | |
7788 | return(KERN_INVALID_ARGUMENT); | |
7789 | ||
7790 | extended = (vm_region_extended_info_t) info; | |
7791 | *count = VM_REGION_EXTENDED_INFO_COUNT; | |
7792 | ||
7793 | vm_map_lock_read(map); | |
7794 | ||
7795 | start = *address; | |
7796 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
7797 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
7798 | vm_map_unlock_read(map); | |
7799 | return(KERN_INVALID_ADDRESS); | |
7800 | } | |
7801 | } else { | |
7802 | entry = tmp_entry; | |
7803 | } | |
7804 | start = entry->vme_start; | |
7805 | ||
7806 | extended->protection = entry->protection; | |
7807 | extended->user_tag = entry->alias; | |
7808 | extended->pages_resident = 0; | |
7809 | extended->pages_swapped_out = 0; | |
7810 | extended->pages_shared_now_private = 0; | |
0b4e3aa0 | 7811 | extended->pages_dirtied = 0; |
1c79356b A |
7812 | extended->external_pager = 0; |
7813 | extended->shadow_depth = 0; | |
7814 | ||
91447636 | 7815 | vm_map_region_walk(map, start, entry, entry->offset, entry->vme_end - start, extended); |
1c79356b A |
7816 | |
7817 | if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) | |
7818 | extended->share_mode = SM_PRIVATE; | |
7819 | ||
7820 | if (object_name) | |
7821 | *object_name = IP_NULL; | |
7822 | *address = start; | |
7823 | *size = (entry->vme_end - start); | |
7824 | ||
7825 | vm_map_unlock_read(map); | |
7826 | return(KERN_SUCCESS); | |
7827 | } | |
7828 | case VM_REGION_TOP_INFO: | |
7829 | { | |
91447636 | 7830 | vm_region_top_info_t top; |
1c79356b A |
7831 | |
7832 | if (*count < VM_REGION_TOP_INFO_COUNT) | |
7833 | return(KERN_INVALID_ARGUMENT); | |
7834 | ||
7835 | top = (vm_region_top_info_t) info; | |
7836 | *count = VM_REGION_TOP_INFO_COUNT; | |
7837 | ||
7838 | vm_map_lock_read(map); | |
7839 | ||
7840 | start = *address; | |
7841 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
7842 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
7843 | vm_map_unlock_read(map); | |
7844 | return(KERN_INVALID_ADDRESS); | |
7845 | } | |
7846 | } else { | |
7847 | entry = tmp_entry; | |
7848 | ||
7849 | } | |
7850 | start = entry->vme_start; | |
7851 | ||
7852 | top->private_pages_resident = 0; | |
7853 | top->shared_pages_resident = 0; | |
7854 | ||
91447636 | 7855 | vm_map_region_top_walk(entry, top); |
1c79356b A |
7856 | |
7857 | if (object_name) | |
7858 | *object_name = IP_NULL; | |
7859 | *address = start; | |
7860 | *size = (entry->vme_end - start); | |
7861 | ||
7862 | vm_map_unlock_read(map); | |
7863 | return(KERN_SUCCESS); | |
7864 | } | |
7865 | default: | |
7866 | return(KERN_INVALID_ARGUMENT); | |
7867 | } | |
7868 | } | |
7869 | ||
91447636 A |
7870 | static void |
7871 | vm_map_region_top_walk( | |
7872 | vm_map_entry_t entry, | |
7873 | vm_region_top_info_t top) | |
1c79356b | 7874 | { |
91447636 A |
7875 | register struct vm_object *obj, *tmp_obj; |
7876 | register int ref_count; | |
1c79356b | 7877 | |
91447636 A |
7878 | if (entry->object.vm_object == 0 || entry->is_sub_map) { |
7879 | top->share_mode = SM_EMPTY; | |
7880 | top->ref_count = 0; | |
7881 | top->obj_id = 0; | |
7882 | return; | |
1c79356b | 7883 | } |
91447636 A |
7884 | { |
7885 | obj = entry->object.vm_object; | |
1c79356b | 7886 | |
91447636 | 7887 | vm_object_lock(obj); |
1c79356b | 7888 | |
91447636 A |
7889 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) |
7890 | ref_count--; | |
1c79356b | 7891 | |
91447636 A |
7892 | if (obj->shadow) { |
7893 | if (ref_count == 1) | |
7894 | top->private_pages_resident = obj->resident_page_count; | |
7895 | else | |
7896 | top->shared_pages_resident = obj->resident_page_count; | |
7897 | top->ref_count = ref_count; | |
7898 | top->share_mode = SM_COW; | |
7899 | ||
7900 | while ((tmp_obj = obj->shadow)) { | |
7901 | vm_object_lock(tmp_obj); | |
7902 | vm_object_unlock(obj); | |
7903 | obj = tmp_obj; | |
1c79356b | 7904 | |
91447636 A |
7905 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) |
7906 | ref_count--; | |
1c79356b | 7907 | |
91447636 A |
7908 | top->shared_pages_resident += obj->resident_page_count; |
7909 | top->ref_count += ref_count - 1; | |
1c79356b | 7910 | } |
91447636 A |
7911 | } else { |
7912 | if (entry->needs_copy) { | |
7913 | top->share_mode = SM_COW; | |
7914 | top->shared_pages_resident = obj->resident_page_count; | |
1c79356b | 7915 | } else { |
91447636 A |
7916 | if (ref_count == 1 || |
7917 | (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) { | |
7918 | top->share_mode = SM_PRIVATE; | |
7919 | top->private_pages_resident = obj->resident_page_count; | |
7920 | } else { | |
7921 | top->share_mode = SM_SHARED; | |
7922 | top->shared_pages_resident = obj->resident_page_count; | |
7923 | } | |
1c79356b | 7924 | } |
91447636 A |
7925 | top->ref_count = ref_count; |
7926 | } | |
7927 | top->obj_id = (int)obj; | |
1c79356b | 7928 | |
91447636 | 7929 | vm_object_unlock(obj); |
1c79356b | 7930 | } |
91447636 A |
7931 | } |
7932 | ||
7933 | static void | |
7934 | vm_map_region_walk( | |
7935 | vm_map_t map, | |
7936 | vm_map_offset_t va, | |
7937 | vm_map_entry_t entry, | |
7938 | vm_object_offset_t offset, | |
7939 | vm_object_size_t range, | |
7940 | vm_region_extended_info_t extended) | |
7941 | { | |
7942 | register struct vm_object *obj, *tmp_obj; | |
7943 | register vm_map_offset_t last_offset; | |
7944 | register int i; | |
7945 | register int ref_count; | |
7946 | struct vm_object *shadow_object; | |
7947 | int shadow_depth; | |
7948 | ||
7949 | if ((entry->object.vm_object == 0) || | |
7950 | (entry->is_sub_map) || | |
7951 | (entry->object.vm_object->phys_contiguous)) { | |
7952 | extended->share_mode = SM_EMPTY; | |
7953 | extended->ref_count = 0; | |
7954 | return; | |
1c79356b | 7955 | } |
91447636 A |
7956 | { |
7957 | obj = entry->object.vm_object; | |
1c79356b | 7958 | |
91447636 | 7959 | vm_object_lock(obj); |
1c79356b | 7960 | |
91447636 A |
7961 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) |
7962 | ref_count--; | |
1c79356b | 7963 | |
91447636 A |
7964 | for (last_offset = offset + range; offset < last_offset; offset += PAGE_SIZE_64, va += PAGE_SIZE) |
7965 | vm_map_region_look_for_page(map, va, obj, offset, ref_count, 0, extended); | |
7966 | ||
7967 | shadow_object = obj->shadow; | |
7968 | shadow_depth = 0; | |
7969 | if (shadow_object != VM_OBJECT_NULL) { | |
7970 | vm_object_lock(shadow_object); | |
7971 | for (; | |
7972 | shadow_object != VM_OBJECT_NULL; | |
7973 | shadow_depth++) { | |
7974 | vm_object_t next_shadow; | |
7975 | ||
7976 | next_shadow = shadow_object->shadow; | |
7977 | if (next_shadow) { | |
7978 | vm_object_lock(next_shadow); | |
7979 | } | |
7980 | vm_object_unlock(shadow_object); | |
7981 | shadow_object = next_shadow; | |
7982 | } | |
7983 | } | |
7984 | extended->shadow_depth = shadow_depth; | |
1c79356b | 7985 | |
91447636 A |
7986 | if (extended->shadow_depth || entry->needs_copy) |
7987 | extended->share_mode = SM_COW; | |
7988 | else { | |
7989 | if (ref_count == 1) | |
7990 | extended->share_mode = SM_PRIVATE; | |
7991 | else { | |
7992 | if (obj->true_share) | |
7993 | extended->share_mode = SM_TRUESHARED; | |
7994 | else | |
7995 | extended->share_mode = SM_SHARED; | |
7996 | } | |
7997 | } | |
7998 | extended->ref_count = ref_count - extended->shadow_depth; | |
7999 | ||
8000 | for (i = 0; i < extended->shadow_depth; i++) { | |
8001 | if ((tmp_obj = obj->shadow) == 0) | |
8002 | break; | |
8003 | vm_object_lock(tmp_obj); | |
8004 | vm_object_unlock(obj); | |
1c79356b | 8005 | |
91447636 A |
8006 | if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) |
8007 | ref_count--; | |
1c79356b | 8008 | |
91447636 A |
8009 | extended->ref_count += ref_count; |
8010 | obj = tmp_obj; | |
8011 | } | |
8012 | vm_object_unlock(obj); | |
1c79356b | 8013 | |
91447636 A |
8014 | if (extended->share_mode == SM_SHARED) { |
8015 | register vm_map_entry_t cur; | |
8016 | register vm_map_entry_t last; | |
8017 | int my_refs; | |
8018 | ||
8019 | obj = entry->object.vm_object; | |
8020 | last = vm_map_to_entry(map); | |
8021 | my_refs = 0; | |
8022 | ||
8023 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) | |
8024 | ref_count--; | |
8025 | for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) | |
8026 | my_refs += vm_map_region_count_obj_refs(cur, obj); | |
8027 | ||
8028 | if (my_refs == ref_count) | |
8029 | extended->share_mode = SM_PRIVATE_ALIASED; | |
8030 | else if (my_refs > 1) | |
8031 | extended->share_mode = SM_SHARED_ALIASED; | |
8032 | } | |
8033 | } | |
1c79356b A |
8034 | } |
8035 | ||
1c79356b | 8036 | |
91447636 A |
8037 | /* object is locked on entry and locked on return */ |
8038 | ||
8039 | ||
8040 | static void | |
8041 | vm_map_region_look_for_page( | |
8042 | __unused vm_map_t map, | |
8043 | __unused vm_map_offset_t va, | |
8044 | vm_object_t object, | |
8045 | vm_object_offset_t offset, | |
8046 | int max_refcnt, | |
8047 | int depth, | |
8048 | vm_region_extended_info_t extended) | |
1c79356b | 8049 | { |
91447636 A |
8050 | register vm_page_t p; |
8051 | register vm_object_t shadow; | |
8052 | register int ref_count; | |
8053 | vm_object_t caller_object; | |
8054 | ||
8055 | shadow = object->shadow; | |
8056 | caller_object = object; | |
1c79356b | 8057 | |
91447636 A |
8058 | |
8059 | while (TRUE) { | |
1c79356b | 8060 | |
91447636 A |
8061 | if ( !(object->pager_trusted) && !(object->internal)) |
8062 | extended->external_pager = 1; | |
1c79356b | 8063 | |
91447636 A |
8064 | if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
8065 | if (shadow && (max_refcnt == 1)) | |
8066 | extended->pages_shared_now_private++; | |
1c79356b | 8067 | |
91447636 A |
8068 | if (!p->fictitious && |
8069 | (p->dirty || pmap_is_modified(p->phys_page))) | |
8070 | extended->pages_dirtied++; | |
1c79356b | 8071 | |
91447636 A |
8072 | extended->pages_resident++; |
8073 | ||
8074 | if(object != caller_object) | |
8075 | vm_object_unlock(object); | |
8076 | ||
8077 | return; | |
1c79356b | 8078 | } |
91447636 A |
8079 | if (object->existence_map) { |
8080 | if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) { | |
1c79356b | 8081 | |
91447636 | 8082 | extended->pages_swapped_out++; |
1c79356b | 8083 | |
91447636 A |
8084 | if(object != caller_object) |
8085 | vm_object_unlock(object); | |
1c79356b | 8086 | |
91447636 A |
8087 | return; |
8088 | } | |
1c79356b | 8089 | } |
91447636 A |
8090 | if (shadow) { |
8091 | vm_object_lock(shadow); | |
1c79356b | 8092 | |
91447636 A |
8093 | if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) |
8094 | ref_count--; | |
1c79356b | 8095 | |
91447636 A |
8096 | if (++depth > extended->shadow_depth) |
8097 | extended->shadow_depth = depth; | |
1c79356b | 8098 | |
91447636 A |
8099 | if (ref_count > max_refcnt) |
8100 | max_refcnt = ref_count; | |
8101 | ||
8102 | if(object != caller_object) | |
8103 | vm_object_unlock(object); | |
8104 | ||
8105 | offset = offset + object->shadow_offset; | |
8106 | object = shadow; | |
8107 | shadow = object->shadow; | |
8108 | continue; | |
1c79356b | 8109 | } |
91447636 A |
8110 | if(object != caller_object) |
8111 | vm_object_unlock(object); | |
8112 | break; | |
8113 | } | |
8114 | } | |
1c79356b | 8115 | |
91447636 A |
8116 | static int |
8117 | vm_map_region_count_obj_refs( | |
8118 | vm_map_entry_t entry, | |
8119 | vm_object_t object) | |
8120 | { | |
8121 | register int ref_count; | |
8122 | register vm_object_t chk_obj; | |
8123 | register vm_object_t tmp_obj; | |
1c79356b | 8124 | |
91447636 A |
8125 | if (entry->object.vm_object == 0) |
8126 | return(0); | |
1c79356b | 8127 | |
91447636 A |
8128 | if (entry->is_sub_map) |
8129 | return(0); | |
8130 | else { | |
8131 | ref_count = 0; | |
1c79356b | 8132 | |
91447636 A |
8133 | chk_obj = entry->object.vm_object; |
8134 | vm_object_lock(chk_obj); | |
1c79356b | 8135 | |
91447636 A |
8136 | while (chk_obj) { |
8137 | if (chk_obj == object) | |
8138 | ref_count++; | |
8139 | tmp_obj = chk_obj->shadow; | |
8140 | if (tmp_obj) | |
8141 | vm_object_lock(tmp_obj); | |
8142 | vm_object_unlock(chk_obj); | |
1c79356b | 8143 | |
91447636 A |
8144 | chk_obj = tmp_obj; |
8145 | } | |
1c79356b | 8146 | } |
91447636 | 8147 | return(ref_count); |
1c79356b A |
8148 | } |
8149 | ||
8150 | ||
8151 | /* | |
91447636 A |
8152 | * Routine: vm_map_simplify |
8153 | * | |
8154 | * Description: | |
8155 | * Attempt to simplify the map representation in | |
8156 | * the vicinity of the given starting address. | |
8157 | * Note: | |
8158 | * This routine is intended primarily to keep the | |
8159 | * kernel maps more compact -- they generally don't | |
8160 | * benefit from the "expand a map entry" technology | |
8161 | * at allocation time because the adjacent entry | |
8162 | * is often wired down. | |
1c79356b | 8163 | */ |
91447636 A |
8164 | void |
8165 | vm_map_simplify_entry( | |
8166 | vm_map_t map, | |
8167 | vm_map_entry_t this_entry) | |
1c79356b | 8168 | { |
91447636 | 8169 | vm_map_entry_t prev_entry; |
1c79356b | 8170 | |
91447636 | 8171 | counter(c_vm_map_simplify_entry_called++); |
1c79356b | 8172 | |
91447636 | 8173 | prev_entry = this_entry->vme_prev; |
1c79356b | 8174 | |
91447636 A |
8175 | if ((this_entry != vm_map_to_entry(map)) && |
8176 | (prev_entry != vm_map_to_entry(map)) && | |
1c79356b | 8177 | |
91447636 | 8178 | (prev_entry->vme_end == this_entry->vme_start) && |
1c79356b | 8179 | |
91447636 A |
8180 | (prev_entry->is_sub_map == FALSE) && |
8181 | (this_entry->is_sub_map == FALSE) && | |
1c79356b | 8182 | |
91447636 A |
8183 | (prev_entry->object.vm_object == this_entry->object.vm_object) && |
8184 | ((prev_entry->offset + (prev_entry->vme_end - | |
8185 | prev_entry->vme_start)) | |
8186 | == this_entry->offset) && | |
1c79356b | 8187 | |
91447636 A |
8188 | (prev_entry->inheritance == this_entry->inheritance) && |
8189 | (prev_entry->protection == this_entry->protection) && | |
8190 | (prev_entry->max_protection == this_entry->max_protection) && | |
8191 | (prev_entry->behavior == this_entry->behavior) && | |
8192 | (prev_entry->alias == this_entry->alias) && | |
8193 | (prev_entry->wired_count == this_entry->wired_count) && | |
8194 | (prev_entry->user_wired_count == this_entry->user_wired_count) && | |
1c79356b | 8195 | |
91447636 | 8196 | (prev_entry->needs_copy == this_entry->needs_copy) && |
1c79356b | 8197 | |
91447636 A |
8198 | (prev_entry->use_pmap == FALSE) && |
8199 | (this_entry->use_pmap == FALSE) && | |
8200 | (prev_entry->in_transition == FALSE) && | |
8201 | (this_entry->in_transition == FALSE) && | |
8202 | (prev_entry->needs_wakeup == FALSE) && | |
8203 | (this_entry->needs_wakeup == FALSE) && | |
8204 | (prev_entry->is_shared == FALSE) && | |
8205 | (this_entry->is_shared == FALSE) | |
8206 | ) { | |
8207 | _vm_map_entry_unlink(&map->hdr, prev_entry); | |
8208 | this_entry->vme_start = prev_entry->vme_start; | |
8209 | this_entry->offset = prev_entry->offset; | |
8210 | vm_object_deallocate(prev_entry->object.vm_object); | |
8211 | vm_map_entry_dispose(map, prev_entry); | |
8212 | SAVE_HINT(map, this_entry); | |
8213 | counter(c_vm_map_simplified++); | |
1c79356b | 8214 | } |
91447636 | 8215 | } |
1c79356b | 8216 | |
91447636 A |
8217 | void |
8218 | vm_map_simplify( | |
8219 | vm_map_t map, | |
8220 | vm_map_offset_t start) | |
8221 | { | |
8222 | vm_map_entry_t this_entry; | |
1c79356b | 8223 | |
91447636 A |
8224 | vm_map_lock(map); |
8225 | if (vm_map_lookup_entry(map, start, &this_entry)) { | |
8226 | vm_map_simplify_entry(map, this_entry); | |
8227 | vm_map_simplify_entry(map, this_entry->vme_next); | |
8228 | } | |
8229 | counter(c_vm_map_simplify_called++); | |
8230 | vm_map_unlock(map); | |
8231 | } | |
1c79356b | 8232 | |
91447636 A |
8233 | static void |
8234 | vm_map_simplify_range( | |
8235 | vm_map_t map, | |
8236 | vm_map_offset_t start, | |
8237 | vm_map_offset_t end) | |
8238 | { | |
8239 | vm_map_entry_t entry; | |
1c79356b | 8240 | |
91447636 A |
8241 | /* |
8242 | * The map should be locked (for "write") by the caller. | |
8243 | */ | |
1c79356b | 8244 | |
91447636 A |
8245 | if (start >= end) { |
8246 | /* invalid address range */ | |
8247 | return; | |
8248 | } | |
1c79356b | 8249 | |
91447636 A |
8250 | if (!vm_map_lookup_entry(map, start, &entry)) { |
8251 | /* "start" is not mapped and "entry" ends before "start" */ | |
8252 | if (entry == vm_map_to_entry(map)) { | |
8253 | /* start with first entry in the map */ | |
8254 | entry = vm_map_first_entry(map); | |
8255 | } else { | |
8256 | /* start with next entry */ | |
8257 | entry = entry->vme_next; | |
8258 | } | |
8259 | } | |
8260 | ||
8261 | while (entry != vm_map_to_entry(map) && | |
8262 | entry->vme_start <= end) { | |
8263 | /* try and coalesce "entry" with its previous entry */ | |
8264 | vm_map_simplify_entry(map, entry); | |
8265 | entry = entry->vme_next; | |
8266 | } | |
8267 | } | |
1c79356b | 8268 | |
1c79356b | 8269 | |
91447636 A |
8270 | /* |
8271 | * Routine: vm_map_machine_attribute | |
8272 | * Purpose: | |
8273 | * Provide machine-specific attributes to mappings, | |
8274 | * such as cachability etc. for machines that provide | |
8275 | * them. NUMA architectures and machines with big/strange | |
8276 | * caches will use this. | |
8277 | * Note: | |
8278 | * Responsibilities for locking and checking are handled here, | |
8279 | * everything else in the pmap module. If any non-volatile | |
8280 | * information must be kept, the pmap module should handle | |
8281 | * it itself. [This assumes that attributes do not | |
8282 | * need to be inherited, which seems ok to me] | |
8283 | */ | |
8284 | kern_return_t | |
8285 | vm_map_machine_attribute( | |
8286 | vm_map_t map, | |
8287 | vm_map_offset_t start, | |
8288 | vm_map_offset_t end, | |
8289 | vm_machine_attribute_t attribute, | |
8290 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
8291 | { | |
8292 | kern_return_t ret; | |
8293 | vm_map_size_t sync_size; | |
8294 | vm_map_entry_t entry; | |
8295 | ||
8296 | if (start < vm_map_min(map) || end > vm_map_max(map)) | |
8297 | return KERN_INVALID_ADDRESS; | |
1c79356b | 8298 | |
91447636 A |
8299 | /* Figure how much memory we need to flush (in page increments) */ |
8300 | sync_size = end - start; | |
1c79356b | 8301 | |
91447636 A |
8302 | vm_map_lock(map); |
8303 | ||
8304 | if (attribute != MATTR_CACHE) { | |
8305 | /* If we don't have to find physical addresses, we */ | |
8306 | /* don't have to do an explicit traversal here. */ | |
8307 | ret = pmap_attribute(map->pmap, start, end-start, | |
8308 | attribute, value); | |
8309 | vm_map_unlock(map); | |
8310 | return ret; | |
8311 | } | |
1c79356b | 8312 | |
91447636 | 8313 | ret = KERN_SUCCESS; /* Assume it all worked */ |
1c79356b | 8314 | |
91447636 A |
8315 | while(sync_size) { |
8316 | if (vm_map_lookup_entry(map, start, &entry)) { | |
8317 | vm_map_size_t sub_size; | |
8318 | if((entry->vme_end - start) > sync_size) { | |
8319 | sub_size = sync_size; | |
8320 | sync_size = 0; | |
8321 | } else { | |
8322 | sub_size = entry->vme_end - start; | |
8323 | sync_size -= sub_size; | |
8324 | } | |
8325 | if(entry->is_sub_map) { | |
8326 | vm_map_offset_t sub_start; | |
8327 | vm_map_offset_t sub_end; | |
1c79356b | 8328 | |
91447636 A |
8329 | sub_start = (start - entry->vme_start) |
8330 | + entry->offset; | |
8331 | sub_end = sub_start + sub_size; | |
8332 | vm_map_machine_attribute( | |
8333 | entry->object.sub_map, | |
8334 | sub_start, | |
8335 | sub_end, | |
8336 | attribute, value); | |
8337 | } else { | |
8338 | if(entry->object.vm_object) { | |
8339 | vm_page_t m; | |
8340 | vm_object_t object; | |
8341 | vm_object_t base_object; | |
8342 | vm_object_t last_object; | |
8343 | vm_object_offset_t offset; | |
8344 | vm_object_offset_t base_offset; | |
8345 | vm_map_size_t range; | |
8346 | range = sub_size; | |
8347 | offset = (start - entry->vme_start) | |
8348 | + entry->offset; | |
8349 | base_offset = offset; | |
8350 | object = entry->object.vm_object; | |
8351 | base_object = object; | |
8352 | last_object = NULL; | |
1c79356b | 8353 | |
91447636 | 8354 | vm_object_lock(object); |
1c79356b | 8355 | |
91447636 A |
8356 | while (range) { |
8357 | m = vm_page_lookup( | |
8358 | object, offset); | |
1c79356b | 8359 | |
91447636 A |
8360 | if (m && !m->fictitious) { |
8361 | ret = | |
8362 | pmap_attribute_cache_sync( | |
8363 | m->phys_page, | |
8364 | PAGE_SIZE, | |
8365 | attribute, value); | |
8366 | ||
8367 | } else if (object->shadow) { | |
8368 | offset = offset + object->shadow_offset; | |
8369 | last_object = object; | |
8370 | object = object->shadow; | |
8371 | vm_object_lock(last_object->shadow); | |
8372 | vm_object_unlock(last_object); | |
8373 | continue; | |
8374 | } | |
8375 | range -= PAGE_SIZE; | |
1c79356b | 8376 | |
91447636 A |
8377 | if (base_object != object) { |
8378 | vm_object_unlock(object); | |
8379 | vm_object_lock(base_object); | |
8380 | object = base_object; | |
8381 | } | |
8382 | /* Bump to the next page */ | |
8383 | base_offset += PAGE_SIZE; | |
8384 | offset = base_offset; | |
8385 | } | |
8386 | vm_object_unlock(object); | |
8387 | } | |
8388 | } | |
8389 | start += sub_size; | |
8390 | } else { | |
8391 | vm_map_unlock(map); | |
8392 | return KERN_FAILURE; | |
8393 | } | |
8394 | ||
1c79356b | 8395 | } |
e5568f75 | 8396 | |
91447636 | 8397 | vm_map_unlock(map); |
e5568f75 | 8398 | |
91447636 A |
8399 | return ret; |
8400 | } | |
e5568f75 | 8401 | |
91447636 A |
8402 | /* |
8403 | * vm_map_behavior_set: | |
8404 | * | |
8405 | * Sets the paging reference behavior of the specified address | |
8406 | * range in the target map. Paging reference behavior affects | |
8407 | * how pagein operations resulting from faults on the map will be | |
8408 | * clustered. | |
8409 | */ | |
8410 | kern_return_t | |
8411 | vm_map_behavior_set( | |
8412 | vm_map_t map, | |
8413 | vm_map_offset_t start, | |
8414 | vm_map_offset_t end, | |
8415 | vm_behavior_t new_behavior) | |
8416 | { | |
8417 | register vm_map_entry_t entry; | |
8418 | vm_map_entry_t temp_entry; | |
e5568f75 | 8419 | |
91447636 A |
8420 | XPR(XPR_VM_MAP, |
8421 | "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d", | |
8422 | (integer_t)map, start, end, new_behavior, 0); | |
e5568f75 | 8423 | |
91447636 A |
8424 | switch (new_behavior) { |
8425 | case VM_BEHAVIOR_DEFAULT: | |
8426 | case VM_BEHAVIOR_RANDOM: | |
8427 | case VM_BEHAVIOR_SEQUENTIAL: | |
8428 | case VM_BEHAVIOR_RSEQNTL: | |
8429 | break; | |
8430 | case VM_BEHAVIOR_WILLNEED: | |
8431 | case VM_BEHAVIOR_DONTNEED: | |
8432 | new_behavior = VM_BEHAVIOR_DEFAULT; | |
8433 | break; | |
1c79356b | 8434 | default: |
91447636 | 8435 | return(KERN_INVALID_ARGUMENT); |
1c79356b | 8436 | } |
1c79356b | 8437 | |
91447636 | 8438 | vm_map_lock(map); |
1c79356b | 8439 | |
91447636 A |
8440 | /* |
8441 | * The entire address range must be valid for the map. | |
8442 | * Note that vm_map_range_check() does a | |
8443 | * vm_map_lookup_entry() internally and returns the | |
8444 | * entry containing the start of the address range if | |
8445 | * the entire range is valid. | |
8446 | */ | |
8447 | if (vm_map_range_check(map, start, end, &temp_entry)) { | |
8448 | entry = temp_entry; | |
8449 | vm_map_clip_start(map, entry, start); | |
8450 | } | |
8451 | else { | |
8452 | vm_map_unlock(map); | |
8453 | return(KERN_INVALID_ADDRESS); | |
1c79356b | 8454 | } |
1c79356b | 8455 | |
91447636 A |
8456 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { |
8457 | vm_map_clip_end(map, entry, end); | |
0b4e3aa0 | 8458 | |
91447636 | 8459 | entry->behavior = new_behavior; |
1c79356b | 8460 | |
91447636 | 8461 | entry = entry->vme_next; |
1c79356b | 8462 | } |
91447636 A |
8463 | |
8464 | vm_map_unlock(map); | |
8465 | return(KERN_SUCCESS); | |
1c79356b A |
8466 | } |
8467 | ||
1c79356b | 8468 | |
91447636 A |
8469 | #include <mach_kdb.h> |
8470 | #if MACH_KDB | |
8471 | #include <ddb/db_output.h> | |
8472 | #include <vm/vm_print.h> | |
1c79356b | 8473 | |
91447636 | 8474 | #define printf db_printf |
1c79356b | 8475 | |
91447636 A |
8476 | /* |
8477 | * Forward declarations for internal functions. | |
8478 | */ | |
8479 | extern void vm_map_links_print( | |
8480 | struct vm_map_links *links); | |
0b4e3aa0 | 8481 | |
91447636 A |
8482 | extern void vm_map_header_print( |
8483 | struct vm_map_header *header); | |
1c79356b | 8484 | |
91447636 A |
8485 | extern void vm_map_entry_print( |
8486 | vm_map_entry_t entry); | |
0b4e3aa0 | 8487 | |
91447636 A |
8488 | extern void vm_follow_entry( |
8489 | vm_map_entry_t entry); | |
0b4e3aa0 | 8490 | |
91447636 A |
8491 | extern void vm_follow_map( |
8492 | vm_map_t map); | |
1c79356b | 8493 | |
91447636 A |
8494 | /* |
8495 | * vm_map_links_print: [ debug ] | |
8496 | */ | |
8497 | void | |
8498 | vm_map_links_print( | |
8499 | struct vm_map_links *links) | |
8500 | { | |
8501 | iprintf("prev = %08X next = %08X start = %016llX end = %016llX\n", | |
8502 | links->prev, | |
8503 | links->next, | |
8504 | (unsigned long long)links->start, | |
8505 | (unsigned long long)links->end); | |
8506 | } | |
1c79356b | 8507 | |
91447636 A |
8508 | /* |
8509 | * vm_map_header_print: [ debug ] | |
8510 | */ | |
8511 | void | |
8512 | vm_map_header_print( | |
8513 | struct vm_map_header *header) | |
8514 | { | |
8515 | vm_map_links_print(&header->links); | |
8516 | iprintf("nentries = %08X, %sentries_pageable\n", | |
8517 | header->nentries, | |
8518 | (header->entries_pageable ? "" : "!")); | |
8519 | } | |
1c79356b | 8520 | |
91447636 A |
8521 | /* |
8522 | * vm_follow_entry: [ debug ] | |
8523 | */ | |
8524 | void | |
8525 | vm_follow_entry( | |
8526 | vm_map_entry_t entry) | |
8527 | { | |
8528 | int shadows; | |
1c79356b | 8529 | |
91447636 | 8530 | iprintf("map entry %08X\n", entry); |
1c79356b | 8531 | |
91447636 | 8532 | db_indent += 2; |
1c79356b | 8533 | |
91447636 A |
8534 | shadows = vm_follow_object(entry->object.vm_object); |
8535 | iprintf("Total objects : %d\n",shadows); | |
0b4e3aa0 | 8536 | |
91447636 A |
8537 | db_indent -= 2; |
8538 | } | |
1c79356b | 8539 | |
91447636 A |
8540 | /* |
8541 | * vm_map_entry_print: [ debug ] | |
8542 | */ | |
1c79356b | 8543 | void |
91447636 A |
8544 | vm_map_entry_print( |
8545 | register vm_map_entry_t entry) | |
1c79356b | 8546 | { |
91447636 A |
8547 | static const char *inheritance_name[4] = |
8548 | { "share", "copy", "none", "?"}; | |
8549 | static const char *behavior_name[4] = | |
8550 | { "dflt", "rand", "seqtl", "rseqntl" }; | |
0b4e3aa0 | 8551 | |
91447636 | 8552 | iprintf("map entry %08X - prev = %08X next = %08X\n", entry, entry->vme_prev, entry->vme_next); |
0b4e3aa0 | 8553 | |
91447636 | 8554 | db_indent += 2; |
0b4e3aa0 | 8555 | |
91447636 | 8556 | vm_map_links_print(&entry->links); |
0b4e3aa0 | 8557 | |
91447636 A |
8558 | iprintf("start = %016llX end = %016llX - prot=%x/%x/%s\n", |
8559 | (unsigned long long)entry->vme_start, | |
8560 | (unsigned long long)entry->vme_end, | |
8561 | entry->protection, | |
8562 | entry->max_protection, | |
8563 | inheritance_name[(entry->inheritance & 0x3)]); | |
0b4e3aa0 | 8564 | |
91447636 A |
8565 | iprintf("behavior = %s, wired_count = %d, user_wired_count = %d\n", |
8566 | behavior_name[(entry->behavior & 0x3)], | |
8567 | entry->wired_count, | |
8568 | entry->user_wired_count); | |
8569 | iprintf("%sin_transition, %sneeds_wakeup\n", | |
8570 | (entry->in_transition ? "" : "!"), | |
8571 | (entry->needs_wakeup ? "" : "!")); | |
0b4e3aa0 | 8572 | |
91447636 A |
8573 | if (entry->is_sub_map) { |
8574 | iprintf("submap = %08X - offset = %016llX\n", | |
8575 | entry->object.sub_map, | |
8576 | (unsigned long long)entry->offset); | |
8577 | } else { | |
8578 | iprintf("object = %08X offset = %016llX - ", | |
8579 | entry->object.vm_object, | |
8580 | (unsigned long long)entry->offset); | |
8581 | printf("%sis_shared, %sneeds_copy\n", | |
8582 | (entry->is_shared ? "" : "!"), | |
8583 | (entry->needs_copy ? "" : "!")); | |
1c79356b | 8584 | } |
1c79356b | 8585 | |
91447636 A |
8586 | db_indent -= 2; |
8587 | } | |
1c79356b | 8588 | |
91447636 A |
8589 | /* |
8590 | * vm_follow_map: [ debug ] | |
8591 | */ | |
8592 | void | |
8593 | vm_follow_map( | |
8594 | vm_map_t map) | |
1c79356b | 8595 | { |
91447636 | 8596 | register vm_map_entry_t entry; |
1c79356b | 8597 | |
91447636 | 8598 | iprintf("task map %08X\n", map); |
1c79356b | 8599 | |
91447636 | 8600 | db_indent += 2; |
55e303ae | 8601 | |
91447636 A |
8602 | for (entry = vm_map_first_entry(map); |
8603 | entry && entry != vm_map_to_entry(map); | |
8604 | entry = entry->vme_next) { | |
8605 | vm_follow_entry(entry); | |
1c79356b | 8606 | } |
1c79356b | 8607 | |
91447636 A |
8608 | db_indent -= 2; |
8609 | } | |
1c79356b A |
8610 | |
8611 | /* | |
91447636 | 8612 | * vm_map_print: [ debug ] |
1c79356b | 8613 | */ |
5353443c | 8614 | void |
91447636 A |
8615 | vm_map_print( |
8616 | db_addr_t inmap) | |
5353443c | 8617 | { |
91447636 A |
8618 | register vm_map_entry_t entry; |
8619 | vm_map_t map; | |
8620 | #if TASK_SWAPPER | |
8621 | char *swstate; | |
8622 | #endif /* TASK_SWAPPER */ | |
5353443c | 8623 | |
91447636 A |
8624 | map = (vm_map_t)(long) |
8625 | inmap; /* Make sure we have the right type */ | |
5353443c | 8626 | |
91447636 | 8627 | iprintf("task map %08X\n", map); |
5353443c | 8628 | |
91447636 | 8629 | db_indent += 2; |
5353443c | 8630 | |
91447636 | 8631 | vm_map_header_print(&map->hdr); |
5353443c | 8632 | |
91447636 A |
8633 | iprintf("pmap = %08X size = %08X ref = %d hint = %08X first_free = %08X\n", |
8634 | map->pmap, | |
8635 | map->size, | |
8636 | map->ref_count, | |
8637 | map->hint, | |
8638 | map->first_free); | |
1c79356b | 8639 | |
91447636 A |
8640 | iprintf("%swait_for_space, %swiring_required, timestamp = %d\n", |
8641 | (map->wait_for_space ? "" : "!"), | |
8642 | (map->wiring_required ? "" : "!"), | |
8643 | map->timestamp); | |
8644 | ||
8645 | #if TASK_SWAPPER | |
8646 | switch (map->sw_state) { | |
8647 | case MAP_SW_IN: | |
8648 | swstate = "SW_IN"; | |
8649 | break; | |
8650 | case MAP_SW_OUT: | |
8651 | swstate = "SW_OUT"; | |
8652 | break; | |
8653 | default: | |
8654 | swstate = "????"; | |
8655 | break; | |
1c79356b | 8656 | } |
91447636 A |
8657 | iprintf("res = %d, sw_state = %s\n", map->res_count, swstate); |
8658 | #endif /* TASK_SWAPPER */ | |
8659 | ||
8660 | for (entry = vm_map_first_entry(map); | |
8661 | entry && entry != vm_map_to_entry(map); | |
8662 | entry = entry->vme_next) { | |
8663 | vm_map_entry_print(entry); | |
8664 | } | |
8665 | ||
8666 | db_indent -= 2; | |
1c79356b A |
8667 | } |
8668 | ||
1c79356b | 8669 | /* |
91447636 | 8670 | * Routine: vm_map_copy_print |
1c79356b | 8671 | * Purpose: |
91447636 | 8672 | * Pretty-print a copy object for ddb. |
1c79356b | 8673 | */ |
91447636 A |
8674 | |
8675 | void | |
8676 | vm_map_copy_print( | |
8677 | db_addr_t incopy) | |
1c79356b | 8678 | { |
91447636 | 8679 | vm_map_copy_t copy; |
9bccf70c | 8680 | vm_map_entry_t entry; |
1c79356b | 8681 | |
91447636 A |
8682 | copy = (vm_map_copy_t)(long) |
8683 | incopy; /* Make sure we have the right type */ | |
1c79356b | 8684 | |
91447636 | 8685 | printf("copy object 0x%x\n", copy); |
9bccf70c | 8686 | |
91447636 | 8687 | db_indent += 2; |
9bccf70c | 8688 | |
91447636 A |
8689 | iprintf("type=%d", copy->type); |
8690 | switch (copy->type) { | |
8691 | case VM_MAP_COPY_ENTRY_LIST: | |
8692 | printf("[entry_list]"); | |
8693 | break; | |
9bccf70c | 8694 | |
91447636 A |
8695 | case VM_MAP_COPY_OBJECT: |
8696 | printf("[object]"); | |
1c79356b | 8697 | break; |
91447636 A |
8698 | |
8699 | case VM_MAP_COPY_KERNEL_BUFFER: | |
8700 | printf("[kernel_buffer]"); | |
9bccf70c | 8701 | break; |
1c79356b | 8702 | |
91447636 A |
8703 | default: |
8704 | printf("[bad type]"); | |
8705 | break; | |
1c79356b | 8706 | } |
91447636 A |
8707 | printf(", offset=0x%llx", (unsigned long long)copy->offset); |
8708 | printf(", size=0x%x\n", copy->size); | |
1c79356b | 8709 | |
91447636 A |
8710 | switch (copy->type) { |
8711 | case VM_MAP_COPY_ENTRY_LIST: | |
8712 | vm_map_header_print(©->cpy_hdr); | |
8713 | for (entry = vm_map_copy_first_entry(copy); | |
8714 | entry && entry != vm_map_copy_to_entry(copy); | |
8715 | entry = entry->vme_next) { | |
8716 | vm_map_entry_print(entry); | |
8717 | } | |
8718 | break; | |
1c79356b | 8719 | |
91447636 A |
8720 | case VM_MAP_COPY_OBJECT: |
8721 | iprintf("object=0x%x\n", copy->cpy_object); | |
8722 | break; | |
8723 | ||
8724 | case VM_MAP_COPY_KERNEL_BUFFER: | |
8725 | iprintf("kernel buffer=0x%x", copy->cpy_kdata); | |
8726 | printf(", kalloc_size=0x%x\n", copy->cpy_kalloc_size); | |
8727 | break; | |
1c79356b | 8728 | |
1c79356b A |
8729 | } |
8730 | ||
91447636 | 8731 | db_indent -=2; |
1c79356b A |
8732 | } |
8733 | ||
1c79356b | 8734 | /* |
91447636 A |
8735 | * db_vm_map_total_size(map) [ debug ] |
8736 | * | |
8737 | * return the total virtual size (in bytes) of the map | |
1c79356b | 8738 | */ |
91447636 A |
8739 | vm_map_size_t |
8740 | db_vm_map_total_size( | |
8741 | db_addr_t inmap) | |
8742 | { | |
8743 | vm_map_entry_t entry; | |
8744 | vm_map_size_t total; | |
8745 | vm_map_t map; | |
1c79356b | 8746 | |
91447636 A |
8747 | map = (vm_map_t)(long) |
8748 | inmap; /* Make sure we have the right type */ | |
1c79356b | 8749 | |
91447636 A |
8750 | total = 0; |
8751 | for (entry = vm_map_first_entry(map); | |
8752 | entry != vm_map_to_entry(map); | |
8753 | entry = entry->vme_next) { | |
8754 | total += entry->vme_end - entry->vme_start; | |
8755 | } | |
1c79356b | 8756 | |
91447636 A |
8757 | return total; |
8758 | } | |
1c79356b | 8759 | |
91447636 | 8760 | #endif /* MACH_KDB */ |
1c79356b A |
8761 | |
8762 | /* | |
91447636 A |
8763 | * Routine: vm_map_entry_insert |
8764 | * | |
8765 | * Descritpion: This routine inserts a new vm_entry in a locked map. | |
1c79356b | 8766 | */ |
91447636 A |
8767 | vm_map_entry_t |
8768 | vm_map_entry_insert( | |
8769 | vm_map_t map, | |
8770 | vm_map_entry_t insp_entry, | |
8771 | vm_map_offset_t start, | |
8772 | vm_map_offset_t end, | |
8773 | vm_object_t object, | |
8774 | vm_object_offset_t offset, | |
8775 | boolean_t needs_copy, | |
8776 | boolean_t is_shared, | |
8777 | boolean_t in_transition, | |
8778 | vm_prot_t cur_protection, | |
8779 | vm_prot_t max_protection, | |
8780 | vm_behavior_t behavior, | |
8781 | vm_inherit_t inheritance, | |
8782 | unsigned wired_count) | |
1c79356b | 8783 | { |
91447636 | 8784 | vm_map_entry_t new_entry; |
1c79356b | 8785 | |
91447636 | 8786 | assert(insp_entry != (vm_map_entry_t)0); |
1c79356b | 8787 | |
91447636 | 8788 | new_entry = vm_map_entry_create(map); |
1c79356b | 8789 | |
91447636 A |
8790 | new_entry->vme_start = start; |
8791 | new_entry->vme_end = end; | |
8792 | assert(page_aligned(new_entry->vme_start)); | |
8793 | assert(page_aligned(new_entry->vme_end)); | |
1c79356b | 8794 | |
91447636 A |
8795 | new_entry->object.vm_object = object; |
8796 | new_entry->offset = offset; | |
8797 | new_entry->is_shared = is_shared; | |
8798 | new_entry->is_sub_map = FALSE; | |
8799 | new_entry->needs_copy = needs_copy; | |
8800 | new_entry->in_transition = in_transition; | |
8801 | new_entry->needs_wakeup = FALSE; | |
8802 | new_entry->inheritance = inheritance; | |
8803 | new_entry->protection = cur_protection; | |
8804 | new_entry->max_protection = max_protection; | |
8805 | new_entry->behavior = behavior; | |
8806 | new_entry->wired_count = wired_count; | |
8807 | new_entry->user_wired_count = 0; | |
8808 | new_entry->use_pmap = FALSE; | |
1c79356b | 8809 | |
91447636 A |
8810 | /* |
8811 | * Insert the new entry into the list. | |
8812 | */ | |
1c79356b | 8813 | |
91447636 A |
8814 | vm_map_entry_link(map, insp_entry, new_entry); |
8815 | map->size += end - start; | |
8816 | ||
8817 | /* | |
8818 | * Update the free space hint and the lookup hint. | |
8819 | */ | |
8820 | ||
8821 | SAVE_HINT(map, new_entry); | |
8822 | return new_entry; | |
1c79356b A |
8823 | } |
8824 | ||
8825 | /* | |
91447636 A |
8826 | * Routine: vm_map_remap_extract |
8827 | * | |
8828 | * Descritpion: This routine returns a vm_entry list from a map. | |
1c79356b | 8829 | */ |
91447636 A |
8830 | static kern_return_t |
8831 | vm_map_remap_extract( | |
8832 | vm_map_t map, | |
8833 | vm_map_offset_t addr, | |
8834 | vm_map_size_t size, | |
8835 | boolean_t copy, | |
8836 | struct vm_map_header *map_header, | |
8837 | vm_prot_t *cur_protection, | |
8838 | vm_prot_t *max_protection, | |
8839 | /* What, no behavior? */ | |
8840 | vm_inherit_t inheritance, | |
8841 | boolean_t pageable) | |
1c79356b | 8842 | { |
91447636 A |
8843 | kern_return_t result; |
8844 | vm_map_size_t mapped_size; | |
8845 | vm_map_size_t tmp_size; | |
8846 | vm_map_entry_t src_entry; /* result of last map lookup */ | |
8847 | vm_map_entry_t new_entry; | |
8848 | vm_object_offset_t offset; | |
8849 | vm_map_offset_t map_address; | |
8850 | vm_map_offset_t src_start; /* start of entry to map */ | |
8851 | vm_map_offset_t src_end; /* end of region to be mapped */ | |
8852 | vm_object_t object; | |
8853 | vm_map_version_t version; | |
8854 | boolean_t src_needs_copy; | |
8855 | boolean_t new_entry_needs_copy; | |
1c79356b | 8856 | |
91447636 A |
8857 | assert(map != VM_MAP_NULL); |
8858 | assert(size != 0 && size == vm_map_round_page(size)); | |
8859 | assert(inheritance == VM_INHERIT_NONE || | |
8860 | inheritance == VM_INHERIT_COPY || | |
8861 | inheritance == VM_INHERIT_SHARE); | |
1c79356b | 8862 | |
91447636 A |
8863 | /* |
8864 | * Compute start and end of region. | |
8865 | */ | |
8866 | src_start = vm_map_trunc_page(addr); | |
8867 | src_end = vm_map_round_page(src_start + size); | |
1c79356b | 8868 | |
91447636 A |
8869 | /* |
8870 | * Initialize map_header. | |
8871 | */ | |
8872 | map_header->links.next = (struct vm_map_entry *)&map_header->links; | |
8873 | map_header->links.prev = (struct vm_map_entry *)&map_header->links; | |
8874 | map_header->nentries = 0; | |
8875 | map_header->entries_pageable = pageable; | |
1c79356b | 8876 | |
91447636 A |
8877 | *cur_protection = VM_PROT_ALL; |
8878 | *max_protection = VM_PROT_ALL; | |
1c79356b | 8879 | |
91447636 A |
8880 | map_address = 0; |
8881 | mapped_size = 0; | |
8882 | result = KERN_SUCCESS; | |
1c79356b | 8883 | |
91447636 A |
8884 | /* |
8885 | * The specified source virtual space might correspond to | |
8886 | * multiple map entries, need to loop on them. | |
8887 | */ | |
8888 | vm_map_lock(map); | |
8889 | while (mapped_size != size) { | |
8890 | vm_map_size_t entry_size; | |
1c79356b | 8891 | |
91447636 A |
8892 | /* |
8893 | * Find the beginning of the region. | |
8894 | */ | |
8895 | if (! vm_map_lookup_entry(map, src_start, &src_entry)) { | |
8896 | result = KERN_INVALID_ADDRESS; | |
8897 | break; | |
8898 | } | |
1c79356b | 8899 | |
91447636 A |
8900 | if (src_start < src_entry->vme_start || |
8901 | (mapped_size && src_start != src_entry->vme_start)) { | |
8902 | result = KERN_INVALID_ADDRESS; | |
8903 | break; | |
8904 | } | |
1c79356b | 8905 | |
91447636 A |
8906 | if(src_entry->is_sub_map) { |
8907 | result = KERN_INVALID_ADDRESS; | |
8908 | break; | |
8909 | } | |
1c79356b | 8910 | |
91447636 A |
8911 | tmp_size = size - mapped_size; |
8912 | if (src_end > src_entry->vme_end) | |
8913 | tmp_size -= (src_end - src_entry->vme_end); | |
1c79356b | 8914 | |
91447636 A |
8915 | entry_size = (vm_map_size_t)(src_entry->vme_end - |
8916 | src_entry->vme_start); | |
1c79356b | 8917 | |
91447636 A |
8918 | if(src_entry->is_sub_map) { |
8919 | vm_map_reference(src_entry->object.sub_map); | |
8920 | object = VM_OBJECT_NULL; | |
8921 | } else { | |
8922 | object = src_entry->object.vm_object; | |
55e303ae | 8923 | |
91447636 A |
8924 | if (object == VM_OBJECT_NULL) { |
8925 | object = vm_object_allocate(entry_size); | |
8926 | src_entry->offset = 0; | |
8927 | src_entry->object.vm_object = object; | |
8928 | } else if (object->copy_strategy != | |
8929 | MEMORY_OBJECT_COPY_SYMMETRIC) { | |
8930 | /* | |
8931 | * We are already using an asymmetric | |
8932 | * copy, and therefore we already have | |
8933 | * the right object. | |
8934 | */ | |
8935 | assert(!src_entry->needs_copy); | |
8936 | } else if (src_entry->needs_copy || object->shadowed || | |
8937 | (object->internal && !object->true_share && | |
8938 | !src_entry->is_shared && | |
8939 | object->size > entry_size)) { | |
1c79356b | 8940 | |
91447636 A |
8941 | vm_object_shadow(&src_entry->object.vm_object, |
8942 | &src_entry->offset, | |
8943 | entry_size); | |
1c79356b | 8944 | |
91447636 A |
8945 | if (!src_entry->needs_copy && |
8946 | (src_entry->protection & VM_PROT_WRITE)) { | |
8947 | if(map->mapped) { | |
8948 | vm_object_pmap_protect( | |
8949 | src_entry->object.vm_object, | |
8950 | src_entry->offset, | |
8951 | entry_size, | |
8952 | PMAP_NULL, | |
8953 | src_entry->vme_start, | |
8954 | src_entry->protection & | |
8955 | ~VM_PROT_WRITE); | |
8956 | } else { | |
8957 | pmap_protect(vm_map_pmap(map), | |
8958 | src_entry->vme_start, | |
8959 | src_entry->vme_end, | |
8960 | src_entry->protection & | |
8961 | ~VM_PROT_WRITE); | |
8962 | } | |
8963 | } | |
1c79356b | 8964 | |
91447636 A |
8965 | object = src_entry->object.vm_object; |
8966 | src_entry->needs_copy = FALSE; | |
8967 | } | |
1c79356b | 8968 | |
1c79356b | 8969 | |
91447636 A |
8970 | vm_object_lock(object); |
8971 | object->ref_count++; /* object ref. for new entry */ | |
8972 | VM_OBJ_RES_INCR(object); | |
8973 | if (object->copy_strategy == | |
8974 | MEMORY_OBJECT_COPY_SYMMETRIC) { | |
8975 | object->copy_strategy = | |
8976 | MEMORY_OBJECT_COPY_DELAY; | |
8977 | } | |
8978 | vm_object_unlock(object); | |
8979 | } | |
1c79356b | 8980 | |
91447636 | 8981 | offset = src_entry->offset + (src_start - src_entry->vme_start); |
1c79356b | 8982 | |
91447636 A |
8983 | new_entry = _vm_map_entry_create(map_header); |
8984 | vm_map_entry_copy(new_entry, src_entry); | |
8985 | new_entry->use_pmap = FALSE; /* clr address space specifics */ | |
1c79356b | 8986 | |
91447636 A |
8987 | new_entry->vme_start = map_address; |
8988 | new_entry->vme_end = map_address + tmp_size; | |
8989 | new_entry->inheritance = inheritance; | |
8990 | new_entry->offset = offset; | |
1c79356b | 8991 | |
91447636 A |
8992 | /* |
8993 | * The new region has to be copied now if required. | |
8994 | */ | |
8995 | RestartCopy: | |
8996 | if (!copy) { | |
8997 | src_entry->is_shared = TRUE; | |
8998 | new_entry->is_shared = TRUE; | |
8999 | if (!(new_entry->is_sub_map)) | |
9000 | new_entry->needs_copy = FALSE; | |
1c79356b | 9001 | |
91447636 A |
9002 | } else if (src_entry->is_sub_map) { |
9003 | /* make this a COW sub_map if not already */ | |
9004 | new_entry->needs_copy = TRUE; | |
9005 | object = VM_OBJECT_NULL; | |
9006 | } else if (src_entry->wired_count == 0 && | |
9007 | vm_object_copy_quickly(&new_entry->object.vm_object, | |
9008 | new_entry->offset, | |
9009 | (new_entry->vme_end - | |
9010 | new_entry->vme_start), | |
9011 | &src_needs_copy, | |
9012 | &new_entry_needs_copy)) { | |
55e303ae | 9013 | |
91447636 A |
9014 | new_entry->needs_copy = new_entry_needs_copy; |
9015 | new_entry->is_shared = FALSE; | |
1c79356b | 9016 | |
91447636 A |
9017 | /* |
9018 | * Handle copy_on_write semantics. | |
9019 | */ | |
9020 | if (src_needs_copy && !src_entry->needs_copy) { | |
9021 | vm_object_pmap_protect(object, | |
9022 | offset, | |
9023 | entry_size, | |
9024 | ((src_entry->is_shared | |
9025 | || map->mapped) ? | |
9026 | PMAP_NULL : map->pmap), | |
9027 | src_entry->vme_start, | |
9028 | src_entry->protection & | |
9029 | ~VM_PROT_WRITE); | |
1c79356b | 9030 | |
91447636 A |
9031 | src_entry->needs_copy = TRUE; |
9032 | } | |
9033 | /* | |
9034 | * Throw away the old object reference of the new entry. | |
9035 | */ | |
9036 | vm_object_deallocate(object); | |
1c79356b | 9037 | |
91447636 A |
9038 | } else { |
9039 | new_entry->is_shared = FALSE; | |
1c79356b | 9040 | |
91447636 A |
9041 | /* |
9042 | * The map can be safely unlocked since we | |
9043 | * already hold a reference on the object. | |
9044 | * | |
9045 | * Record the timestamp of the map for later | |
9046 | * verification, and unlock the map. | |
9047 | */ | |
9048 | version.main_timestamp = map->timestamp; | |
9049 | vm_map_unlock(map); /* Increments timestamp once! */ | |
55e303ae | 9050 | |
91447636 A |
9051 | /* |
9052 | * Perform the copy. | |
9053 | */ | |
9054 | if (src_entry->wired_count > 0) { | |
9055 | vm_object_lock(object); | |
9056 | result = vm_object_copy_slowly( | |
9057 | object, | |
9058 | offset, | |
9059 | entry_size, | |
9060 | THREAD_UNINT, | |
9061 | &new_entry->object.vm_object); | |
1c79356b | 9062 | |
91447636 A |
9063 | new_entry->offset = 0; |
9064 | new_entry->needs_copy = FALSE; | |
9065 | } else { | |
9066 | result = vm_object_copy_strategically( | |
9067 | object, | |
9068 | offset, | |
9069 | entry_size, | |
9070 | &new_entry->object.vm_object, | |
9071 | &new_entry->offset, | |
9072 | &new_entry_needs_copy); | |
1c79356b | 9073 | |
91447636 A |
9074 | new_entry->needs_copy = new_entry_needs_copy; |
9075 | } | |
1c79356b | 9076 | |
91447636 A |
9077 | /* |
9078 | * Throw away the old object reference of the new entry. | |
9079 | */ | |
9080 | vm_object_deallocate(object); | |
1c79356b | 9081 | |
91447636 A |
9082 | if (result != KERN_SUCCESS && |
9083 | result != KERN_MEMORY_RESTART_COPY) { | |
9084 | _vm_map_entry_dispose(map_header, new_entry); | |
9085 | break; | |
9086 | } | |
1c79356b | 9087 | |
91447636 A |
9088 | /* |
9089 | * Verify that the map has not substantially | |
9090 | * changed while the copy was being made. | |
9091 | */ | |
1c79356b | 9092 | |
91447636 A |
9093 | vm_map_lock(map); |
9094 | if (version.main_timestamp + 1 != map->timestamp) { | |
9095 | /* | |
9096 | * Simple version comparison failed. | |
9097 | * | |
9098 | * Retry the lookup and verify that the | |
9099 | * same object/offset are still present. | |
9100 | */ | |
9101 | vm_object_deallocate(new_entry-> | |
9102 | object.vm_object); | |
9103 | _vm_map_entry_dispose(map_header, new_entry); | |
9104 | if (result == KERN_MEMORY_RESTART_COPY) | |
9105 | result = KERN_SUCCESS; | |
9106 | continue; | |
9107 | } | |
1c79356b | 9108 | |
91447636 A |
9109 | if (result == KERN_MEMORY_RESTART_COPY) { |
9110 | vm_object_reference(object); | |
9111 | goto RestartCopy; | |
9112 | } | |
9113 | } | |
1c79356b | 9114 | |
91447636 A |
9115 | _vm_map_entry_link(map_header, |
9116 | map_header->links.prev, new_entry); | |
1c79356b | 9117 | |
91447636 A |
9118 | *cur_protection &= src_entry->protection; |
9119 | *max_protection &= src_entry->max_protection; | |
1c79356b | 9120 | |
91447636 A |
9121 | map_address += tmp_size; |
9122 | mapped_size += tmp_size; | |
9123 | src_start += tmp_size; | |
1c79356b | 9124 | |
91447636 | 9125 | } /* end while */ |
1c79356b | 9126 | |
91447636 A |
9127 | vm_map_unlock(map); |
9128 | if (result != KERN_SUCCESS) { | |
9129 | /* | |
9130 | * Free all allocated elements. | |
9131 | */ | |
9132 | for (src_entry = map_header->links.next; | |
9133 | src_entry != (struct vm_map_entry *)&map_header->links; | |
9134 | src_entry = new_entry) { | |
9135 | new_entry = src_entry->vme_next; | |
9136 | _vm_map_entry_unlink(map_header, src_entry); | |
9137 | vm_object_deallocate(src_entry->object.vm_object); | |
9138 | _vm_map_entry_dispose(map_header, src_entry); | |
9139 | } | |
9140 | } | |
9141 | return result; | |
1c79356b A |
9142 | } |
9143 | ||
9144 | /* | |
91447636 | 9145 | * Routine: vm_remap |
1c79356b | 9146 | * |
91447636 A |
9147 | * Map portion of a task's address space. |
9148 | * Mapped region must not overlap more than | |
9149 | * one vm memory object. Protections and | |
9150 | * inheritance attributes remain the same | |
9151 | * as in the original task and are out parameters. | |
9152 | * Source and Target task can be identical | |
9153 | * Other attributes are identical as for vm_map() | |
1c79356b A |
9154 | */ |
9155 | kern_return_t | |
91447636 A |
9156 | vm_map_remap( |
9157 | vm_map_t target_map, | |
9158 | vm_map_address_t *address, | |
9159 | vm_map_size_t size, | |
9160 | vm_map_offset_t mask, | |
9161 | boolean_t anywhere, | |
9162 | vm_map_t src_map, | |
9163 | vm_map_offset_t memory_address, | |
1c79356b | 9164 | boolean_t copy, |
1c79356b A |
9165 | vm_prot_t *cur_protection, |
9166 | vm_prot_t *max_protection, | |
91447636 | 9167 | vm_inherit_t inheritance) |
1c79356b A |
9168 | { |
9169 | kern_return_t result; | |
91447636 A |
9170 | vm_map_entry_t entry; |
9171 | vm_map_entry_t insp_entry; | |
1c79356b | 9172 | vm_map_entry_t new_entry; |
91447636 | 9173 | struct vm_map_header map_header; |
1c79356b | 9174 | |
91447636 A |
9175 | if (target_map == VM_MAP_NULL) |
9176 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 9177 | |
91447636 A |
9178 | switch (inheritance) { |
9179 | case VM_INHERIT_NONE: | |
9180 | case VM_INHERIT_COPY: | |
9181 | case VM_INHERIT_SHARE: | |
9182 | if (size != 0 && src_map != VM_MAP_NULL) | |
9183 | break; | |
9184 | /*FALL THRU*/ | |
9185 | default: | |
9186 | return KERN_INVALID_ARGUMENT; | |
9187 | } | |
1c79356b | 9188 | |
91447636 | 9189 | size = vm_map_round_page(size); |
1c79356b | 9190 | |
91447636 A |
9191 | result = vm_map_remap_extract(src_map, memory_address, |
9192 | size, copy, &map_header, | |
9193 | cur_protection, | |
9194 | max_protection, | |
9195 | inheritance, | |
9196 | target_map->hdr. | |
9197 | entries_pageable); | |
1c79356b | 9198 | |
91447636 A |
9199 | if (result != KERN_SUCCESS) { |
9200 | return result; | |
9201 | } | |
1c79356b | 9202 | |
91447636 A |
9203 | /* |
9204 | * Allocate/check a range of free virtual address | |
9205 | * space for the target | |
1c79356b | 9206 | */ |
91447636 A |
9207 | *address = vm_map_trunc_page(*address); |
9208 | vm_map_lock(target_map); | |
9209 | result = vm_map_remap_range_allocate(target_map, address, size, | |
9210 | mask, anywhere, &insp_entry); | |
1c79356b | 9211 | |
91447636 A |
9212 | for (entry = map_header.links.next; |
9213 | entry != (struct vm_map_entry *)&map_header.links; | |
9214 | entry = new_entry) { | |
9215 | new_entry = entry->vme_next; | |
9216 | _vm_map_entry_unlink(&map_header, entry); | |
9217 | if (result == KERN_SUCCESS) { | |
9218 | entry->vme_start += *address; | |
9219 | entry->vme_end += *address; | |
9220 | vm_map_entry_link(target_map, insp_entry, entry); | |
9221 | insp_entry = entry; | |
9222 | } else { | |
9223 | if (!entry->is_sub_map) { | |
9224 | vm_object_deallocate(entry->object.vm_object); | |
9225 | } else { | |
9226 | vm_map_deallocate(entry->object.sub_map); | |
9227 | } | |
9228 | _vm_map_entry_dispose(&map_header, entry); | |
1c79356b | 9229 | } |
91447636 | 9230 | } |
1c79356b | 9231 | |
91447636 A |
9232 | if (result == KERN_SUCCESS) { |
9233 | target_map->size += size; | |
9234 | SAVE_HINT(target_map, insp_entry); | |
9235 | } | |
9236 | vm_map_unlock(target_map); | |
1c79356b | 9237 | |
91447636 A |
9238 | if (result == KERN_SUCCESS && target_map->wiring_required) |
9239 | result = vm_map_wire(target_map, *address, | |
9240 | *address + size, *cur_protection, TRUE); | |
9241 | return result; | |
9242 | } | |
1c79356b | 9243 | |
91447636 A |
9244 | /* |
9245 | * Routine: vm_map_remap_range_allocate | |
9246 | * | |
9247 | * Description: | |
9248 | * Allocate a range in the specified virtual address map. | |
9249 | * returns the address and the map entry just before the allocated | |
9250 | * range | |
9251 | * | |
9252 | * Map must be locked. | |
9253 | */ | |
1c79356b | 9254 | |
91447636 A |
9255 | static kern_return_t |
9256 | vm_map_remap_range_allocate( | |
9257 | vm_map_t map, | |
9258 | vm_map_address_t *address, /* IN/OUT */ | |
9259 | vm_map_size_t size, | |
9260 | vm_map_offset_t mask, | |
9261 | boolean_t anywhere, | |
9262 | vm_map_entry_t *map_entry) /* OUT */ | |
9263 | { | |
9264 | register vm_map_entry_t entry; | |
9265 | register vm_map_offset_t start; | |
9266 | register vm_map_offset_t end; | |
1c79356b | 9267 | |
91447636 | 9268 | StartAgain: ; |
1c79356b | 9269 | |
91447636 | 9270 | start = *address; |
1c79356b | 9271 | |
91447636 A |
9272 | if (anywhere) |
9273 | { | |
9274 | /* | |
9275 | * Calculate the first possible address. | |
9276 | */ | |
1c79356b | 9277 | |
91447636 A |
9278 | if (start < map->min_offset) |
9279 | start = map->min_offset; | |
9280 | if (start > map->max_offset) | |
9281 | return(KERN_NO_SPACE); | |
9282 | ||
9283 | /* | |
9284 | * Look for the first possible address; | |
9285 | * if there's already something at this | |
9286 | * address, we have to start after it. | |
9287 | */ | |
1c79356b | 9288 | |
91447636 A |
9289 | assert(first_free_is_valid(map)); |
9290 | if (start == map->min_offset) { | |
9291 | if ((entry = map->first_free) != vm_map_to_entry(map)) | |
9292 | start = entry->vme_end; | |
9293 | } else { | |
9294 | vm_map_entry_t tmp_entry; | |
9295 | if (vm_map_lookup_entry(map, start, &tmp_entry)) | |
9296 | start = tmp_entry->vme_end; | |
9297 | entry = tmp_entry; | |
9298 | } | |
9299 | ||
9300 | /* | |
9301 | * In any case, the "entry" always precedes | |
9302 | * the proposed new region throughout the | |
9303 | * loop: | |
9304 | */ | |
1c79356b | 9305 | |
91447636 A |
9306 | while (TRUE) { |
9307 | register vm_map_entry_t next; | |
1c79356b | 9308 | |
91447636 A |
9309 | /* |
9310 | * Find the end of the proposed new region. | |
9311 | * Be sure we didn't go beyond the end, or | |
9312 | * wrap around the address. | |
9313 | */ | |
9314 | ||
9315 | end = ((start + mask) & ~mask); | |
9316 | if (end < start) | |
9317 | return(KERN_NO_SPACE); | |
9318 | start = end; | |
9319 | end += size; | |
9320 | ||
9321 | if ((end > map->max_offset) || (end < start)) { | |
9322 | if (map->wait_for_space) { | |
9323 | if (size <= (map->max_offset - | |
9324 | map->min_offset)) { | |
9325 | assert_wait((event_t) map, THREAD_INTERRUPTIBLE); | |
9326 | vm_map_unlock(map); | |
9327 | thread_block(THREAD_CONTINUE_NULL); | |
9328 | vm_map_lock(map); | |
9329 | goto StartAgain; | |
9330 | } | |
1c79356b | 9331 | } |
91447636 A |
9332 | |
9333 | return(KERN_NO_SPACE); | |
9334 | } | |
1c79356b | 9335 | |
91447636 A |
9336 | /* |
9337 | * If there are no more entries, we must win. | |
9338 | */ | |
1c79356b | 9339 | |
91447636 A |
9340 | next = entry->vme_next; |
9341 | if (next == vm_map_to_entry(map)) | |
9342 | break; | |
1c79356b | 9343 | |
91447636 A |
9344 | /* |
9345 | * If there is another entry, it must be | |
9346 | * after the end of the potential new region. | |
9347 | */ | |
1c79356b | 9348 | |
91447636 A |
9349 | if (next->vme_start >= end) |
9350 | break; | |
1c79356b | 9351 | |
91447636 A |
9352 | /* |
9353 | * Didn't fit -- move to the next entry. | |
9354 | */ | |
1c79356b | 9355 | |
91447636 A |
9356 | entry = next; |
9357 | start = entry->vme_end; | |
9358 | } | |
9359 | *address = start; | |
9360 | } else { | |
9361 | vm_map_entry_t temp_entry; | |
9362 | ||
9363 | /* | |
9364 | * Verify that: | |
9365 | * the address doesn't itself violate | |
9366 | * the mask requirement. | |
9367 | */ | |
1c79356b | 9368 | |
91447636 A |
9369 | if ((start & mask) != 0) |
9370 | return(KERN_NO_SPACE); | |
1c79356b | 9371 | |
1c79356b | 9372 | |
91447636 A |
9373 | /* |
9374 | * ... the address is within bounds | |
9375 | */ | |
1c79356b | 9376 | |
91447636 | 9377 | end = start + size; |
1c79356b | 9378 | |
91447636 A |
9379 | if ((start < map->min_offset) || |
9380 | (end > map->max_offset) || | |
9381 | (start >= end)) { | |
9382 | return(KERN_INVALID_ADDRESS); | |
9383 | } | |
1c79356b | 9384 | |
91447636 A |
9385 | /* |
9386 | * ... the starting address isn't allocated | |
9387 | */ | |
9388 | ||
9389 | if (vm_map_lookup_entry(map, start, &temp_entry)) | |
9390 | return(KERN_NO_SPACE); | |
9391 | ||
9392 | entry = temp_entry; | |
9393 | ||
9394 | /* | |
9395 | * ... the next region doesn't overlap the | |
9396 | * end point. | |
9397 | */ | |
1c79356b | 9398 | |
91447636 A |
9399 | if ((entry->vme_next != vm_map_to_entry(map)) && |
9400 | (entry->vme_next->vme_start < end)) | |
9401 | return(KERN_NO_SPACE); | |
9402 | } | |
9403 | *map_entry = entry; | |
9404 | return(KERN_SUCCESS); | |
9405 | } | |
1c79356b | 9406 | |
91447636 A |
9407 | /* |
9408 | * vm_map_switch: | |
9409 | * | |
9410 | * Set the address map for the current thread to the specified map | |
9411 | */ | |
1c79356b | 9412 | |
91447636 A |
9413 | vm_map_t |
9414 | vm_map_switch( | |
9415 | vm_map_t map) | |
9416 | { | |
9417 | int mycpu; | |
9418 | thread_t thread = current_thread(); | |
9419 | vm_map_t oldmap = thread->map; | |
1c79356b | 9420 | |
91447636 A |
9421 | mp_disable_preemption(); |
9422 | mycpu = cpu_number(); | |
1c79356b | 9423 | |
91447636 A |
9424 | /* |
9425 | * Deactivate the current map and activate the requested map | |
9426 | */ | |
9427 | PMAP_SWITCH_USER(thread, map, mycpu); | |
1c79356b | 9428 | |
91447636 A |
9429 | mp_enable_preemption(); |
9430 | return(oldmap); | |
9431 | } | |
1c79356b | 9432 | |
1c79356b | 9433 | |
91447636 A |
9434 | /* |
9435 | * Routine: vm_map_write_user | |
9436 | * | |
9437 | * Description: | |
9438 | * Copy out data from a kernel space into space in the | |
9439 | * destination map. The space must already exist in the | |
9440 | * destination map. | |
9441 | * NOTE: This routine should only be called by threads | |
9442 | * which can block on a page fault. i.e. kernel mode user | |
9443 | * threads. | |
9444 | * | |
9445 | */ | |
9446 | kern_return_t | |
9447 | vm_map_write_user( | |
9448 | vm_map_t map, | |
9449 | void *src_p, | |
9450 | vm_map_address_t dst_addr, | |
9451 | vm_size_t size) | |
9452 | { | |
9453 | kern_return_t kr = KERN_SUCCESS; | |
1c79356b | 9454 | |
91447636 A |
9455 | if(current_map() == map) { |
9456 | if (copyout(src_p, dst_addr, size)) { | |
9457 | kr = KERN_INVALID_ADDRESS; | |
9458 | } | |
9459 | } else { | |
9460 | vm_map_t oldmap; | |
1c79356b | 9461 | |
91447636 A |
9462 | /* take on the identity of the target map while doing */ |
9463 | /* the transfer */ | |
1c79356b | 9464 | |
91447636 A |
9465 | vm_map_reference(map); |
9466 | oldmap = vm_map_switch(map); | |
9467 | if (copyout(src_p, dst_addr, size)) { | |
9468 | kr = KERN_INVALID_ADDRESS; | |
1c79356b | 9469 | } |
91447636 A |
9470 | vm_map_switch(oldmap); |
9471 | vm_map_deallocate(map); | |
1c79356b | 9472 | } |
91447636 | 9473 | return kr; |
1c79356b A |
9474 | } |
9475 | ||
9476 | /* | |
91447636 A |
9477 | * Routine: vm_map_read_user |
9478 | * | |
9479 | * Description: | |
9480 | * Copy in data from a user space source map into the | |
9481 | * kernel map. The space must already exist in the | |
9482 | * kernel map. | |
9483 | * NOTE: This routine should only be called by threads | |
9484 | * which can block on a page fault. i.e. kernel mode user | |
9485 | * threads. | |
1c79356b | 9486 | * |
1c79356b A |
9487 | */ |
9488 | kern_return_t | |
91447636 A |
9489 | vm_map_read_user( |
9490 | vm_map_t map, | |
9491 | vm_map_address_t src_addr, | |
9492 | void *dst_p, | |
9493 | vm_size_t size) | |
1c79356b | 9494 | { |
91447636 | 9495 | kern_return_t kr = KERN_SUCCESS; |
1c79356b | 9496 | |
91447636 A |
9497 | if(current_map() == map) { |
9498 | if (copyin(src_addr, dst_p, size)) { | |
9499 | kr = KERN_INVALID_ADDRESS; | |
9500 | } | |
9501 | } else { | |
9502 | vm_map_t oldmap; | |
1c79356b | 9503 | |
91447636 A |
9504 | /* take on the identity of the target map while doing */ |
9505 | /* the transfer */ | |
9506 | ||
9507 | vm_map_reference(map); | |
9508 | oldmap = vm_map_switch(map); | |
9509 | if (copyin(src_addr, dst_p, size)) { | |
9510 | kr = KERN_INVALID_ADDRESS; | |
9511 | } | |
9512 | vm_map_switch(oldmap); | |
9513 | vm_map_deallocate(map); | |
1c79356b | 9514 | } |
91447636 A |
9515 | return kr; |
9516 | } | |
9517 | ||
1c79356b | 9518 | |
91447636 A |
9519 | /* |
9520 | * vm_map_check_protection: | |
9521 | * | |
9522 | * Assert that the target map allows the specified | |
9523 | * privilege on the entire address region given. | |
9524 | * The entire region must be allocated. | |
9525 | */ | |
9526 | boolean_t vm_map_check_protection(map, start, end, protection) | |
9527 | register vm_map_t map; | |
9528 | register vm_map_offset_t start; | |
9529 | register vm_map_offset_t end; | |
9530 | register vm_prot_t protection; | |
9531 | { | |
9532 | register vm_map_entry_t entry; | |
9533 | vm_map_entry_t tmp_entry; | |
1c79356b | 9534 | |
91447636 | 9535 | vm_map_lock(map); |
1c79356b | 9536 | |
91447636 A |
9537 | if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) |
9538 | { | |
9539 | vm_map_unlock(map); | |
9540 | return (FALSE); | |
1c79356b A |
9541 | } |
9542 | ||
91447636 A |
9543 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { |
9544 | vm_map_unlock(map); | |
9545 | return(FALSE); | |
9546 | } | |
1c79356b | 9547 | |
91447636 A |
9548 | entry = tmp_entry; |
9549 | ||
9550 | while (start < end) { | |
9551 | if (entry == vm_map_to_entry(map)) { | |
9552 | vm_map_unlock(map); | |
9553 | return(FALSE); | |
1c79356b | 9554 | } |
1c79356b | 9555 | |
91447636 A |
9556 | /* |
9557 | * No holes allowed! | |
9558 | */ | |
1c79356b | 9559 | |
91447636 A |
9560 | if (start < entry->vme_start) { |
9561 | vm_map_unlock(map); | |
9562 | return(FALSE); | |
9563 | } | |
9564 | ||
9565 | /* | |
9566 | * Check protection associated with entry. | |
9567 | */ | |
9568 | ||
9569 | if ((entry->protection & protection) != protection) { | |
9570 | vm_map_unlock(map); | |
9571 | return(FALSE); | |
9572 | } | |
9573 | ||
9574 | /* go to next entry */ | |
9575 | ||
9576 | start = entry->vme_end; | |
9577 | entry = entry->vme_next; | |
9578 | } | |
9579 | vm_map_unlock(map); | |
9580 | return(TRUE); | |
1c79356b A |
9581 | } |
9582 | ||
1c79356b | 9583 | kern_return_t |
91447636 A |
9584 | vm_map_purgable_control( |
9585 | vm_map_t map, | |
9586 | vm_map_offset_t address, | |
9587 | vm_purgable_t control, | |
9588 | int *state) | |
1c79356b | 9589 | { |
91447636 A |
9590 | vm_map_entry_t entry; |
9591 | vm_object_t object; | |
9592 | kern_return_t kr; | |
1c79356b | 9593 | |
1c79356b | 9594 | /* |
91447636 A |
9595 | * Vet all the input parameters and current type and state of the |
9596 | * underlaying object. Return with an error if anything is amiss. | |
1c79356b | 9597 | */ |
91447636 A |
9598 | if (map == VM_MAP_NULL) |
9599 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 9600 | |
91447636 A |
9601 | if (control != VM_PURGABLE_SET_STATE && |
9602 | control != VM_PURGABLE_GET_STATE) | |
9603 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 9604 | |
91447636 A |
9605 | if (control == VM_PURGABLE_SET_STATE && |
9606 | (*state < VM_PURGABLE_STATE_MIN || | |
9607 | *state > VM_PURGABLE_STATE_MAX)) | |
9608 | return(KERN_INVALID_ARGUMENT); | |
9609 | ||
9610 | vm_map_lock(map); | |
9611 | ||
9612 | if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) { | |
9613 | ||
9614 | /* | |
9615 | * Must pass a valid non-submap address. | |
9616 | */ | |
9617 | vm_map_unlock(map); | |
9618 | return(KERN_INVALID_ADDRESS); | |
9619 | } | |
9620 | ||
9621 | if ((entry->protection & VM_PROT_WRITE) == 0) { | |
9622 | /* | |
9623 | * Can't apply purgable controls to something you can't write. | |
9624 | */ | |
9625 | vm_map_unlock(map); | |
9626 | return(KERN_PROTECTION_FAILURE); | |
9627 | } | |
9628 | ||
9629 | object = entry->object.vm_object; | |
9630 | if (object == VM_OBJECT_NULL) { | |
9631 | /* | |
9632 | * Object must already be present or it can't be purgable. | |
9633 | */ | |
9634 | vm_map_unlock(map); | |
9635 | return KERN_INVALID_ARGUMENT; | |
9636 | } | |
9637 | ||
9638 | vm_object_lock(object); | |
9639 | ||
9640 | if (entry->offset != 0 || | |
9641 | entry->vme_end - entry->vme_start != object->size) { | |
9642 | /* | |
9643 | * Can only apply purgable controls to the whole (existing) | |
9644 | * object at once. | |
9645 | */ | |
9646 | vm_map_unlock(map); | |
9647 | vm_object_unlock(object); | |
9648 | return KERN_INVALID_ARGUMENT; | |
1c79356b A |
9649 | } |
9650 | ||
91447636 | 9651 | vm_map_unlock(map); |
1c79356b | 9652 | |
91447636 | 9653 | kr = vm_object_purgable_control(object, control, state); |
1c79356b | 9654 | |
91447636 | 9655 | vm_object_unlock(object); |
1c79356b | 9656 | |
91447636 A |
9657 | return kr; |
9658 | } | |
1c79356b | 9659 | |
91447636 A |
9660 | kern_return_t |
9661 | vm_map_page_info( | |
9662 | vm_map_t target_map, | |
9663 | vm_map_offset_t offset, | |
9664 | int *disposition, | |
9665 | int *ref_count) | |
9666 | { | |
9667 | vm_map_entry_t map_entry; | |
9668 | vm_object_t object; | |
9669 | vm_page_t m; | |
9670 | ||
9671 | restart_page_query: | |
9672 | *disposition = 0; | |
9673 | *ref_count = 0; | |
9674 | vm_map_lock(target_map); | |
9675 | if(!vm_map_lookup_entry(target_map, offset, &map_entry)) { | |
9676 | vm_map_unlock(target_map); | |
9677 | return KERN_FAILURE; | |
9678 | } | |
9679 | offset -= map_entry->vme_start; /* adjust to offset within entry */ | |
9680 | offset += map_entry->offset; /* adjust to target object offset */ | |
9681 | if(map_entry->object.vm_object != VM_OBJECT_NULL) { | |
9682 | if(!map_entry->is_sub_map) { | |
9683 | object = map_entry->object.vm_object; | |
9684 | } else { | |
9685 | vm_map_unlock(target_map); | |
9686 | target_map = map_entry->object.sub_map; | |
9687 | goto restart_page_query; | |
1c79356b | 9688 | } |
91447636 A |
9689 | } else { |
9690 | vm_map_unlock(target_map); | |
9691 | return KERN_FAILURE; | |
9692 | } | |
9693 | vm_object_lock(object); | |
9694 | vm_map_unlock(target_map); | |
9695 | while(TRUE) { | |
9696 | m = vm_page_lookup(object, offset); | |
9697 | if (m != VM_PAGE_NULL) { | |
9698 | *disposition |= VM_PAGE_QUERY_PAGE_PRESENT; | |
9699 | break; | |
9700 | } else { | |
9701 | if(object->shadow) { | |
9702 | offset += object->shadow_offset; | |
9703 | vm_object_unlock(object); | |
9704 | object = object->shadow; | |
9705 | vm_object_lock(object); | |
9706 | continue; | |
9707 | } | |
9708 | vm_object_unlock(object); | |
9709 | return KERN_FAILURE; | |
9710 | } | |
9711 | } | |
1c79356b | 9712 | |
91447636 A |
9713 | /* The ref_count is not strictly accurate, it measures the number */ |
9714 | /* of entities holding a ref on the object, they may not be mapping */ | |
9715 | /* the object or may not be mapping the section holding the */ | |
9716 | /* target page but its still a ball park number and though an over- */ | |
9717 | /* count, it picks up the copy-on-write cases */ | |
1c79356b | 9718 | |
91447636 A |
9719 | /* We could also get a picture of page sharing from pmap_attributes */ |
9720 | /* but this would under count as only faulted-in mappings would */ | |
9721 | /* show up. */ | |
1c79356b | 9722 | |
91447636 | 9723 | *ref_count = object->ref_count; |
1c79356b | 9724 | |
91447636 A |
9725 | if (m->fictitious) { |
9726 | *disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; | |
9727 | vm_object_unlock(object); | |
9728 | return KERN_SUCCESS; | |
9729 | } | |
1c79356b | 9730 | |
91447636 A |
9731 | if (m->dirty) |
9732 | *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; | |
9733 | else if(pmap_is_modified(m->phys_page)) | |
9734 | *disposition |= VM_PAGE_QUERY_PAGE_DIRTY; | |
1c79356b | 9735 | |
91447636 A |
9736 | if (m->reference) |
9737 | *disposition |= VM_PAGE_QUERY_PAGE_REF; | |
9738 | else if(pmap_is_referenced(m->phys_page)) | |
9739 | *disposition |= VM_PAGE_QUERY_PAGE_REF; | |
1c79356b | 9740 | |
91447636 A |
9741 | vm_object_unlock(object); |
9742 | return KERN_SUCCESS; | |
9743 | ||
9744 | } | |
1c79356b A |
9745 | |
9746 | ||
91447636 A |
9747 | /* For a given range, check all map entries. If the entry coresponds to */ |
9748 | /* the old vm_region/map provided on the call, replace it with the */ | |
9749 | /* corresponding range in the new vm_region/map */ | |
9750 | kern_return_t vm_map_region_replace( | |
9751 | vm_map_t target_map, | |
9752 | ipc_port_t old_region, | |
9753 | ipc_port_t new_region, | |
9754 | vm_map_offset_t start, | |
9755 | vm_map_offset_t end) | |
9756 | { | |
9757 | vm_named_entry_t old_object; | |
9758 | vm_named_entry_t new_object; | |
9759 | vm_map_t old_submap; | |
9760 | vm_map_t new_submap; | |
9761 | vm_map_offset_t addr; | |
9762 | vm_map_entry_t entry; | |
9763 | int nested_pmap = 0; | |
1c79356b | 9764 | |
1c79356b | 9765 | |
91447636 A |
9766 | vm_map_lock(target_map); |
9767 | old_object = (vm_named_entry_t)old_region->ip_kobject; | |
9768 | new_object = (vm_named_entry_t)new_region->ip_kobject; | |
9769 | if((!old_object->is_sub_map) || (!new_object->is_sub_map)) { | |
9770 | vm_map_unlock(target_map); | |
9771 | return KERN_INVALID_ARGUMENT; | |
9772 | } | |
9773 | old_submap = (vm_map_t)old_object->backing.map; | |
9774 | new_submap = (vm_map_t)new_object->backing.map; | |
9775 | vm_map_lock(old_submap); | |
9776 | if((old_submap->min_offset != new_submap->min_offset) || | |
9777 | (old_submap->max_offset != new_submap->max_offset)) { | |
9778 | vm_map_unlock(old_submap); | |
9779 | vm_map_unlock(target_map); | |
9780 | return KERN_INVALID_ARGUMENT; | |
9781 | } | |
9782 | if(!vm_map_lookup_entry(target_map, start, &entry)) { | |
9783 | /* if the src is not contained, the entry preceeds */ | |
9784 | /* our range */ | |
9785 | addr = entry->vme_start; | |
9786 | if(entry == vm_map_to_entry(target_map)) { | |
9787 | vm_map_unlock(old_submap); | |
9788 | vm_map_unlock(target_map); | |
9789 | return KERN_SUCCESS; | |
9790 | } | |
9791 | } | |
9792 | if ((entry->use_pmap) && | |
9793 | (new_submap->pmap == NULL)) { | |
9794 | new_submap->pmap = pmap_create((vm_map_size_t) 0); | |
9795 | if(new_submap->pmap == PMAP_NULL) { | |
9796 | vm_map_unlock(old_submap); | |
9797 | vm_map_unlock(target_map); | |
9798 | return(KERN_NO_SPACE); | |
9799 | } | |
9800 | } | |
9801 | addr = entry->vme_start; | |
9802 | vm_map_reference(old_submap); | |
9803 | while((entry != vm_map_to_entry(target_map)) && | |
9804 | (entry->vme_start < end)) { | |
9805 | if((entry->is_sub_map) && | |
9806 | (entry->object.sub_map == old_submap)) { | |
9807 | if(entry->use_pmap) { | |
9808 | if((start & 0x0fffffff) || | |
9809 | ((end - start) != 0x10000000)) { | |
9810 | vm_map_unlock(old_submap); | |
9811 | vm_map_deallocate(old_submap); | |
9812 | vm_map_unlock(target_map); | |
9813 | return KERN_INVALID_ARGUMENT; | |
9814 | } | |
9815 | nested_pmap = 1; | |
9816 | } | |
9817 | entry->object.sub_map = new_submap; | |
9818 | vm_map_reference(new_submap); | |
9819 | vm_map_deallocate(old_submap); | |
9820 | } | |
9821 | entry = entry->vme_next; | |
9822 | addr = entry->vme_start; | |
9823 | } | |
9824 | if(nested_pmap) { | |
9825 | #ifndef i386 | |
9826 | pmap_unnest(target_map->pmap, (addr64_t)start); | |
9827 | if(target_map->mapped) { | |
9828 | vm_map_submap_pmap_clean(target_map, | |
9829 | start, end, old_submap, 0); | |
9830 | } | |
9831 | pmap_nest(target_map->pmap, new_submap->pmap, | |
9832 | (addr64_t)start, (addr64_t)start, | |
9833 | (uint64_t)(end - start)); | |
9834 | #endif /* i386 */ | |
9835 | } else { | |
9836 | vm_map_submap_pmap_clean(target_map, | |
9837 | start, end, old_submap, 0); | |
1c79356b | 9838 | } |
91447636 A |
9839 | vm_map_unlock(old_submap); |
9840 | vm_map_deallocate(old_submap); | |
9841 | vm_map_unlock(target_map); | |
9842 | return KERN_SUCCESS; | |
9843 | } | |
9844 | ||
9845 | /* | |
9846 | * vm_map_msync | |
9847 | * | |
9848 | * Synchronises the memory range specified with its backing store | |
9849 | * image by either flushing or cleaning the contents to the appropriate | |
9850 | * memory manager engaging in a memory object synchronize dialog with | |
9851 | * the manager. The client doesn't return until the manager issues | |
9852 | * m_o_s_completed message. MIG Magically converts user task parameter | |
9853 | * to the task's address map. | |
9854 | * | |
9855 | * interpretation of sync_flags | |
9856 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
9857 | * pages to manager. | |
9858 | * | |
9859 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
9860 | * - discard pages, write dirty or precious | |
9861 | * pages back to memory manager. | |
9862 | * | |
9863 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
9864 | * - write dirty or precious pages back to | |
9865 | * the memory manager. | |
9866 | * | |
9867 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
9868 | * is a hole in the region, and we would | |
9869 | * have returned KERN_SUCCESS, return | |
9870 | * KERN_INVALID_ADDRESS instead. | |
9871 | * | |
9872 | * NOTE | |
9873 | * The memory object attributes have not yet been implemented, this | |
9874 | * function will have to deal with the invalidate attribute | |
9875 | * | |
9876 | * RETURNS | |
9877 | * KERN_INVALID_TASK Bad task parameter | |
9878 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
9879 | * KERN_SUCCESS The usual. | |
9880 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
9881 | */ | |
9882 | ||
9883 | kern_return_t | |
9884 | vm_map_msync( | |
9885 | vm_map_t map, | |
9886 | vm_map_address_t address, | |
9887 | vm_map_size_t size, | |
9888 | vm_sync_t sync_flags) | |
9889 | { | |
9890 | msync_req_t msr; | |
9891 | msync_req_t new_msr; | |
9892 | queue_chain_t req_q; /* queue of requests for this msync */ | |
9893 | vm_map_entry_t entry; | |
9894 | vm_map_size_t amount_left; | |
9895 | vm_object_offset_t offset; | |
9896 | boolean_t do_sync_req; | |
9897 | boolean_t modifiable; | |
9898 | boolean_t had_hole = FALSE; | |
9899 | ||
9900 | if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && | |
9901 | (sync_flags & VM_SYNC_SYNCHRONOUS)) | |
9902 | return(KERN_INVALID_ARGUMENT); | |
1c79356b A |
9903 | |
9904 | /* | |
91447636 | 9905 | * align address and size on page boundaries |
1c79356b | 9906 | */ |
91447636 A |
9907 | size = vm_map_round_page(address + size) - vm_map_trunc_page(address); |
9908 | address = vm_map_trunc_page(address); | |
1c79356b | 9909 | |
91447636 A |
9910 | if (map == VM_MAP_NULL) |
9911 | return(KERN_INVALID_TASK); | |
1c79356b | 9912 | |
91447636 A |
9913 | if (size == 0) |
9914 | return(KERN_SUCCESS); | |
1c79356b | 9915 | |
91447636 A |
9916 | queue_init(&req_q); |
9917 | amount_left = size; | |
1c79356b | 9918 | |
91447636 A |
9919 | while (amount_left > 0) { |
9920 | vm_object_size_t flush_size; | |
9921 | vm_object_t object; | |
1c79356b | 9922 | |
91447636 A |
9923 | vm_map_lock(map); |
9924 | if (!vm_map_lookup_entry(map, | |
9925 | vm_map_trunc_page(address), &entry)) { | |
9926 | ||
9927 | vm_size_t skip; | |
9928 | ||
9929 | /* | |
9930 | * hole in the address map. | |
9931 | */ | |
9932 | had_hole = TRUE; | |
9933 | ||
9934 | /* | |
9935 | * Check for empty map. | |
9936 | */ | |
9937 | if (entry == vm_map_to_entry(map) && | |
9938 | entry->vme_next == entry) { | |
9939 | vm_map_unlock(map); | |
9940 | break; | |
9941 | } | |
9942 | /* | |
9943 | * Check that we don't wrap and that | |
9944 | * we have at least one real map entry. | |
9945 | */ | |
9946 | if ((map->hdr.nentries == 0) || | |
9947 | (entry->vme_next->vme_start < address)) { | |
9948 | vm_map_unlock(map); | |
9949 | break; | |
9950 | } | |
9951 | /* | |
9952 | * Move up to the next entry if needed | |
9953 | */ | |
9954 | skip = (entry->vme_next->vme_start - address); | |
9955 | if (skip >= amount_left) | |
9956 | amount_left = 0; | |
9957 | else | |
9958 | amount_left -= skip; | |
9959 | address = entry->vme_next->vme_start; | |
9960 | vm_map_unlock(map); | |
9961 | continue; | |
9962 | } | |
1c79356b | 9963 | |
91447636 | 9964 | offset = address - entry->vme_start; |
1c79356b | 9965 | |
91447636 A |
9966 | /* |
9967 | * do we have more to flush than is contained in this | |
9968 | * entry ? | |
9969 | */ | |
9970 | if (amount_left + entry->vme_start + offset > entry->vme_end) { | |
9971 | flush_size = entry->vme_end - | |
9972 | (entry->vme_start + offset); | |
9973 | } else { | |
9974 | flush_size = amount_left; | |
9975 | } | |
9976 | amount_left -= flush_size; | |
9977 | address += flush_size; | |
1c79356b | 9978 | |
91447636 A |
9979 | if (entry->is_sub_map == TRUE) { |
9980 | vm_map_t local_map; | |
9981 | vm_map_offset_t local_offset; | |
1c79356b | 9982 | |
91447636 A |
9983 | local_map = entry->object.sub_map; |
9984 | local_offset = entry->offset; | |
9985 | vm_map_unlock(map); | |
9986 | if (vm_map_msync( | |
9987 | local_map, | |
9988 | local_offset, | |
9989 | flush_size, | |
9990 | sync_flags) == KERN_INVALID_ADDRESS) { | |
9991 | had_hole = TRUE; | |
9992 | } | |
9993 | continue; | |
9994 | } | |
9995 | object = entry->object.vm_object; | |
1c79356b | 9996 | |
91447636 A |
9997 | /* |
9998 | * We can't sync this object if the object has not been | |
9999 | * created yet | |
10000 | */ | |
10001 | if (object == VM_OBJECT_NULL) { | |
10002 | vm_map_unlock(map); | |
10003 | continue; | |
10004 | } | |
10005 | offset += entry->offset; | |
10006 | modifiable = (entry->protection & VM_PROT_WRITE) | |
10007 | != VM_PROT_NONE; | |
1c79356b | 10008 | |
91447636 | 10009 | vm_object_lock(object); |
1c79356b | 10010 | |
91447636 A |
10011 | if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { |
10012 | boolean_t kill_pages = 0; | |
10013 | ||
10014 | if (sync_flags & VM_SYNC_KILLPAGES) { | |
10015 | if (object->ref_count == 1 && !entry->needs_copy && !object->shadow) | |
10016 | kill_pages = 1; | |
10017 | else | |
10018 | kill_pages = -1; | |
10019 | } | |
10020 | if (kill_pages != -1) | |
10021 | vm_object_deactivate_pages(object, offset, | |
10022 | (vm_object_size_t)flush_size, kill_pages); | |
10023 | vm_object_unlock(object); | |
10024 | vm_map_unlock(map); | |
10025 | continue; | |
1c79356b | 10026 | } |
91447636 A |
10027 | /* |
10028 | * We can't sync this object if there isn't a pager. | |
10029 | * Don't bother to sync internal objects, since there can't | |
10030 | * be any "permanent" storage for these objects anyway. | |
10031 | */ | |
10032 | if ((object->pager == MEMORY_OBJECT_NULL) || | |
10033 | (object->internal) || (object->private)) { | |
10034 | vm_object_unlock(object); | |
10035 | vm_map_unlock(map); | |
10036 | continue; | |
10037 | } | |
10038 | /* | |
10039 | * keep reference on the object until syncing is done | |
10040 | */ | |
10041 | assert(object->ref_count > 0); | |
10042 | object->ref_count++; | |
10043 | vm_object_res_reference(object); | |
10044 | vm_object_unlock(object); | |
1c79356b | 10045 | |
91447636 | 10046 | vm_map_unlock(map); |
1c79356b | 10047 | |
91447636 A |
10048 | do_sync_req = vm_object_sync(object, |
10049 | offset, | |
10050 | flush_size, | |
10051 | sync_flags & VM_SYNC_INVALIDATE, | |
10052 | (modifiable && | |
10053 | (sync_flags & VM_SYNC_SYNCHRONOUS || | |
10054 | sync_flags & VM_SYNC_ASYNCHRONOUS)), | |
10055 | sync_flags & VM_SYNC_SYNCHRONOUS); | |
10056 | /* | |
10057 | * only send a m_o_s if we returned pages or if the entry | |
10058 | * is writable (ie dirty pages may have already been sent back) | |
10059 | */ | |
10060 | if (!do_sync_req && !modifiable) { | |
10061 | vm_object_deallocate(object); | |
10062 | continue; | |
1c79356b | 10063 | } |
91447636 | 10064 | msync_req_alloc(new_msr); |
1c79356b | 10065 | |
91447636 A |
10066 | vm_object_lock(object); |
10067 | offset += object->paging_offset; | |
1c79356b | 10068 | |
91447636 A |
10069 | new_msr->offset = offset; |
10070 | new_msr->length = flush_size; | |
10071 | new_msr->object = object; | |
10072 | new_msr->flag = VM_MSYNC_SYNCHRONIZING; | |
10073 | re_iterate: | |
10074 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { | |
10075 | /* | |
10076 | * need to check for overlapping entry, if found, wait | |
10077 | * on overlapping msr to be done, then reiterate | |
10078 | */ | |
10079 | msr_lock(msr); | |
10080 | if (msr->flag == VM_MSYNC_SYNCHRONIZING && | |
10081 | ((offset >= msr->offset && | |
10082 | offset < (msr->offset + msr->length)) || | |
10083 | (msr->offset >= offset && | |
10084 | msr->offset < (offset + flush_size)))) | |
10085 | { | |
10086 | assert_wait((event_t) msr,THREAD_INTERRUPTIBLE); | |
10087 | msr_unlock(msr); | |
10088 | vm_object_unlock(object); | |
10089 | thread_block(THREAD_CONTINUE_NULL); | |
10090 | vm_object_lock(object); | |
10091 | goto re_iterate; | |
10092 | } | |
10093 | msr_unlock(msr); | |
10094 | }/* queue_iterate */ | |
1c79356b | 10095 | |
91447636 A |
10096 | queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q); |
10097 | vm_object_unlock(object); | |
1c79356b | 10098 | |
91447636 A |
10099 | queue_enter(&req_q, new_msr, msync_req_t, req_q); |
10100 | ||
10101 | (void) memory_object_synchronize( | |
10102 | object->pager, | |
10103 | offset, | |
10104 | flush_size, | |
10105 | sync_flags & ~VM_SYNC_CONTIGUOUS); | |
10106 | }/* while */ | |
10107 | ||
10108 | /* | |
10109 | * wait for memory_object_sychronize_completed messages from pager(s) | |
10110 | */ | |
10111 | ||
10112 | while (!queue_empty(&req_q)) { | |
10113 | msr = (msync_req_t)queue_first(&req_q); | |
10114 | msr_lock(msr); | |
10115 | while(msr->flag != VM_MSYNC_DONE) { | |
10116 | assert_wait((event_t) msr, THREAD_INTERRUPTIBLE); | |
10117 | msr_unlock(msr); | |
10118 | thread_block(THREAD_CONTINUE_NULL); | |
10119 | msr_lock(msr); | |
10120 | }/* while */ | |
10121 | queue_remove(&req_q, msr, msync_req_t, req_q); | |
10122 | msr_unlock(msr); | |
10123 | vm_object_deallocate(msr->object); | |
10124 | msync_req_free(msr); | |
10125 | }/* queue_iterate */ | |
10126 | ||
10127 | /* for proper msync() behaviour */ | |
10128 | if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) | |
10129 | return(KERN_INVALID_ADDRESS); | |
10130 | ||
10131 | return(KERN_SUCCESS); | |
10132 | }/* vm_msync */ | |
1c79356b A |
10133 | |
10134 | /* Takes existing source and destination sub-maps and clones the contents of */ | |
10135 | /* the source map */ | |
1c79356b A |
10136 | kern_return_t |
10137 | vm_region_clone( | |
10138 | ipc_port_t src_region, | |
10139 | ipc_port_t dst_region) | |
10140 | { | |
10141 | vm_named_entry_t src_object; | |
10142 | vm_named_entry_t dst_object; | |
10143 | vm_map_t src_map; | |
10144 | vm_map_t dst_map; | |
91447636 A |
10145 | vm_map_offset_t addr; |
10146 | vm_map_offset_t max_off; | |
1c79356b A |
10147 | vm_map_entry_t entry; |
10148 | vm_map_entry_t new_entry; | |
10149 | vm_map_entry_t insert_point; | |
10150 | ||
10151 | src_object = (vm_named_entry_t)src_region->ip_kobject; | |
10152 | dst_object = (vm_named_entry_t)dst_region->ip_kobject; | |
10153 | if((!src_object->is_sub_map) || (!dst_object->is_sub_map)) { | |
10154 | return KERN_INVALID_ARGUMENT; | |
10155 | } | |
10156 | src_map = (vm_map_t)src_object->backing.map; | |
10157 | dst_map = (vm_map_t)dst_object->backing.map; | |
10158 | /* destination map is assumed to be unavailable to any other */ | |
10159 | /* activity. i.e. it is new */ | |
10160 | vm_map_lock(src_map); | |
10161 | if((src_map->min_offset != dst_map->min_offset) | |
10162 | || (src_map->max_offset != dst_map->max_offset)) { | |
10163 | vm_map_unlock(src_map); | |
10164 | return KERN_INVALID_ARGUMENT; | |
10165 | } | |
10166 | addr = src_map->min_offset; | |
10167 | vm_map_lookup_entry(dst_map, addr, &entry); | |
10168 | if(entry == vm_map_to_entry(dst_map)) { | |
10169 | entry = entry->vme_next; | |
10170 | } | |
10171 | if(entry == vm_map_to_entry(dst_map)) { | |
10172 | max_off = src_map->max_offset; | |
10173 | } else { | |
10174 | max_off = entry->vme_start; | |
10175 | } | |
10176 | vm_map_lookup_entry(src_map, addr, &entry); | |
10177 | if(entry == vm_map_to_entry(src_map)) { | |
10178 | entry = entry->vme_next; | |
10179 | } | |
10180 | vm_map_lookup_entry(dst_map, addr, &insert_point); | |
10181 | while((entry != vm_map_to_entry(src_map)) && | |
10182 | (entry->vme_end <= max_off)) { | |
10183 | addr = entry->vme_start; | |
10184 | new_entry = vm_map_entry_create(dst_map); | |
10185 | vm_map_entry_copy(new_entry, entry); | |
10186 | vm_map_entry_link(dst_map, insert_point, new_entry); | |
10187 | insert_point = new_entry; | |
10188 | if (entry->object.vm_object != VM_OBJECT_NULL) { | |
10189 | if (new_entry->is_sub_map) { | |
10190 | vm_map_reference(new_entry->object.sub_map); | |
10191 | } else { | |
10192 | vm_object_reference( | |
10193 | new_entry->object.vm_object); | |
10194 | } | |
10195 | } | |
10196 | dst_map->size += new_entry->vme_end - new_entry->vme_start; | |
10197 | entry = entry->vme_next; | |
10198 | } | |
10199 | vm_map_unlock(src_map); | |
10200 | return KERN_SUCCESS; | |
10201 | } | |
10202 | ||
10203 | /* | |
91447636 A |
10204 | * Routine: convert_port_entry_to_map |
10205 | * Purpose: | |
10206 | * Convert from a port specifying an entry or a task | |
10207 | * to a map. Doesn't consume the port ref; produces a map ref, | |
10208 | * which may be null. Unlike convert_port_to_map, the | |
10209 | * port may be task or a named entry backed. | |
10210 | * Conditions: | |
10211 | * Nothing locked. | |
1c79356b | 10212 | */ |
1c79356b | 10213 | |
1c79356b | 10214 | |
91447636 A |
10215 | vm_map_t |
10216 | convert_port_entry_to_map( | |
10217 | ipc_port_t port) | |
10218 | { | |
10219 | vm_map_t map; | |
10220 | vm_named_entry_t named_entry; | |
1c79356b | 10221 | |
91447636 A |
10222 | if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { |
10223 | while(TRUE) { | |
10224 | ip_lock(port); | |
10225 | if(ip_active(port) && (ip_kotype(port) | |
10226 | == IKOT_NAMED_ENTRY)) { | |
10227 | named_entry = | |
10228 | (vm_named_entry_t)port->ip_kobject; | |
10229 | if (!(mutex_try(&(named_entry)->Lock))) { | |
10230 | ip_unlock(port); | |
10231 | mutex_pause(); | |
10232 | continue; | |
10233 | } | |
10234 | named_entry->ref_count++; | |
10235 | mutex_unlock(&(named_entry)->Lock); | |
10236 | ip_unlock(port); | |
10237 | if ((named_entry->is_sub_map) && | |
10238 | (named_entry->protection | |
10239 | & VM_PROT_WRITE)) { | |
10240 | map = named_entry->backing.map; | |
10241 | } else { | |
10242 | mach_destroy_memory_entry(port); | |
10243 | return VM_MAP_NULL; | |
10244 | } | |
10245 | vm_map_reference_swap(map); | |
10246 | mach_destroy_memory_entry(port); | |
10247 | break; | |
10248 | } | |
10249 | else | |
10250 | return VM_MAP_NULL; | |
10251 | } | |
1c79356b | 10252 | } |
91447636 A |
10253 | else |
10254 | map = convert_port_to_map(port); | |
1c79356b | 10255 | |
91447636 A |
10256 | return map; |
10257 | } | |
1c79356b | 10258 | |
91447636 A |
10259 | /* |
10260 | * Routine: convert_port_entry_to_object | |
10261 | * Purpose: | |
10262 | * Convert from a port specifying a named entry to an | |
10263 | * object. Doesn't consume the port ref; produces a map ref, | |
10264 | * which may be null. | |
10265 | * Conditions: | |
10266 | * Nothing locked. | |
10267 | */ | |
1c79356b | 10268 | |
1c79356b | 10269 | |
91447636 A |
10270 | vm_object_t |
10271 | convert_port_entry_to_object( | |
10272 | ipc_port_t port) | |
10273 | { | |
10274 | vm_object_t object; | |
10275 | vm_named_entry_t named_entry; | |
1c79356b | 10276 | |
91447636 A |
10277 | if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { |
10278 | while(TRUE) { | |
10279 | ip_lock(port); | |
10280 | if(ip_active(port) && (ip_kotype(port) | |
10281 | == IKOT_NAMED_ENTRY)) { | |
10282 | named_entry = | |
10283 | (vm_named_entry_t)port->ip_kobject; | |
10284 | if (!(mutex_try(&(named_entry)->Lock))) { | |
10285 | ip_unlock(port); | |
10286 | mutex_pause(); | |
10287 | continue; | |
10288 | } | |
10289 | named_entry->ref_count++; | |
10290 | mutex_unlock(&(named_entry)->Lock); | |
10291 | ip_unlock(port); | |
10292 | if ((!named_entry->is_sub_map) && | |
10293 | (!named_entry->is_pager) && | |
10294 | (named_entry->protection | |
10295 | & VM_PROT_WRITE)) { | |
10296 | object = named_entry->backing.object; | |
10297 | } else { | |
10298 | mach_destroy_memory_entry(port); | |
10299 | return (vm_object_t)NULL; | |
10300 | } | |
10301 | vm_object_reference(named_entry->backing.object); | |
10302 | mach_destroy_memory_entry(port); | |
10303 | break; | |
10304 | } | |
10305 | else | |
10306 | return (vm_object_t)NULL; | |
1c79356b | 10307 | } |
91447636 A |
10308 | } else { |
10309 | return (vm_object_t)NULL; | |
1c79356b | 10310 | } |
91447636 A |
10311 | |
10312 | return object; | |
1c79356b | 10313 | } |
9bccf70c A |
10314 | |
10315 | /* | |
91447636 A |
10316 | * Export routines to other components for the things we access locally through |
10317 | * macros. | |
9bccf70c | 10318 | */ |
91447636 A |
10319 | #undef current_map |
10320 | vm_map_t | |
10321 | current_map(void) | |
9bccf70c | 10322 | { |
91447636 | 10323 | return (current_map_fast()); |
9bccf70c A |
10324 | } |
10325 | ||
10326 | /* | |
10327 | * vm_map_reference: | |
10328 | * | |
10329 | * Most code internal to the osfmk will go through a | |
10330 | * macro defining this. This is always here for the | |
10331 | * use of other kernel components. | |
10332 | */ | |
10333 | #undef vm_map_reference | |
10334 | void | |
10335 | vm_map_reference( | |
10336 | register vm_map_t map) | |
10337 | { | |
10338 | if (map == VM_MAP_NULL) | |
10339 | return; | |
10340 | ||
10341 | mutex_lock(&map->s_lock); | |
10342 | #if TASK_SWAPPER | |
10343 | assert(map->res_count > 0); | |
10344 | assert(map->ref_count >= map->res_count); | |
10345 | map->res_count++; | |
10346 | #endif | |
10347 | map->ref_count++; | |
10348 | mutex_unlock(&map->s_lock); | |
10349 | } | |
10350 | ||
10351 | /* | |
10352 | * vm_map_deallocate: | |
10353 | * | |
10354 | * Removes a reference from the specified map, | |
10355 | * destroying it if no references remain. | |
10356 | * The map should not be locked. | |
10357 | */ | |
10358 | void | |
10359 | vm_map_deallocate( | |
10360 | register vm_map_t map) | |
10361 | { | |
10362 | unsigned int ref; | |
10363 | ||
10364 | if (map == VM_MAP_NULL) | |
10365 | return; | |
10366 | ||
10367 | mutex_lock(&map->s_lock); | |
10368 | ref = --map->ref_count; | |
10369 | if (ref > 0) { | |
10370 | vm_map_res_deallocate(map); | |
10371 | mutex_unlock(&map->s_lock); | |
10372 | return; | |
10373 | } | |
10374 | assert(map->ref_count == 0); | |
10375 | mutex_unlock(&map->s_lock); | |
10376 | ||
10377 | #if TASK_SWAPPER | |
10378 | /* | |
10379 | * The map residence count isn't decremented here because | |
10380 | * the vm_map_delete below will traverse the entire map, | |
10381 | * deleting entries, and the residence counts on objects | |
10382 | * and sharing maps will go away then. | |
10383 | */ | |
10384 | #endif | |
10385 | ||
10386 | vm_map_destroy(map); | |
10387 | } | |
91447636 A |
10388 | |
10389 | #ifdef __PPC__ | |
10390 | ||
10391 | /* LP64todo - this whole mechanism is temporary. It should be redone when | |
10392 | * the pmap layer can handle 64-bit address spaces. Until then, we trump | |
10393 | * up a map entry for the 64-bit commpage above the map's max_offset. | |
10394 | */ | |
10395 | extern vm_map_t com_region_map64; /* the submap for 64-bit commpage */ | |
10396 | SInt32 commpage64s_in_use = 0; | |
10397 | ||
10398 | void | |
10399 | vm_map_commpage64( | |
10400 | vm_map_t map ) | |
10401 | { | |
10402 | vm_map_entry_t entry; | |
10403 | vm_object_t object; | |
10404 | ||
10405 | vm_map_lock(map); | |
10406 | ||
10407 | /* The commpage is necessarily the last entry in the map. | |
10408 | * See if one is already there (not sure if this can happen???) | |
10409 | */ | |
10410 | entry = vm_map_last_entry(map); | |
10411 | if (entry != vm_map_to_entry(map)) { | |
10412 | if (entry->vme_end >= (vm_map_offset_t)_COMM_PAGE_BASE_ADDRESS) { | |
10413 | vm_map_unlock(map); | |
10414 | return; | |
10415 | } | |
10416 | } | |
10417 | ||
10418 | entry = vm_map_first_entry(com_region_map64); /* the 64-bit commpage */ | |
10419 | object = entry->object.vm_object; | |
10420 | vm_object_reference(object); | |
10421 | ||
10422 | /* We bypass vm_map_enter() because we are adding the entry past the | |
10423 | * map's max_offset. | |
10424 | */ | |
10425 | entry = vm_map_entry_insert( | |
10426 | map, | |
10427 | vm_map_last_entry(map), /* insert after last entry */ | |
10428 | _COMM_PAGE_BASE_ADDRESS, | |
10429 | _COMM_PAGE_BASE_ADDRESS+_COMM_PAGE_AREA_USED, | |
10430 | object, | |
10431 | 0, /* offset */ | |
10432 | FALSE, /* needs_copy */ | |
10433 | FALSE, /* is_shared */ | |
10434 | FALSE, /* in_transition */ | |
10435 | VM_PROT_READ, | |
10436 | VM_PROT_READ, | |
10437 | VM_BEHAVIOR_DEFAULT, | |
10438 | VM_INHERIT_NONE, | |
10439 | 1 ); /* wired_count */ | |
10440 | ||
10441 | vm_map_unlock(map); | |
10442 | ||
10443 | OSIncrementAtomic(&commpage64s_in_use); | |
10444 | } | |
10445 | ||
10446 | ||
10447 | /* LP64todo - remove this! */ | |
10448 | ||
10449 | void | |
10450 | vm_map_remove_commpage64( | |
10451 | vm_map_t map ) | |
10452 | { | |
10453 | vm_map_entry_t entry; | |
10454 | int deleted = 0; | |
10455 | ||
10456 | while( 1 ) { | |
10457 | vm_map_lock(map); | |
10458 | ||
10459 | entry = vm_map_last_entry(map); | |
10460 | if ((entry == vm_map_to_entry(map)) || | |
10461 | (entry->vme_start < (vm_map_offset_t)_COMM_PAGE_BASE_ADDRESS)) | |
10462 | break; | |
10463 | ||
10464 | /* clearing the wired count isn't strictly correct */ | |
10465 | entry->wired_count = 0; | |
10466 | vm_map_entry_delete(map,entry); | |
10467 | deleted++; | |
10468 | } | |
10469 | ||
10470 | vm_map_unlock(map); | |
10471 | ||
10472 | if (deleted != 0) | |
10473 | OSDecrementAtomic(&commpage64s_in_use); | |
10474 | } | |
10475 | ||
10476 | #endif /* __PPC__ */ |