]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39236c6e | 2 | * Copyright (c) 2000-2012 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_map.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * Date: 1985 | |
62 | * | |
63 | * Virtual memory mapping module. | |
64 | */ | |
65 | ||
1c79356b A |
66 | #include <task_swapper.h> |
67 | #include <mach_assert.h> | |
fe8ab488 A |
68 | |
69 | #include <vm/vm_options.h> | |
70 | ||
91447636 | 71 | #include <libkern/OSAtomic.h> |
1c79356b A |
72 | |
73 | #include <mach/kern_return.h> | |
74 | #include <mach/port.h> | |
75 | #include <mach/vm_attributes.h> | |
76 | #include <mach/vm_param.h> | |
77 | #include <mach/vm_behavior.h> | |
55e303ae | 78 | #include <mach/vm_statistics.h> |
91447636 | 79 | #include <mach/memory_object.h> |
0c530ab8 | 80 | #include <mach/mach_vm.h> |
91447636 | 81 | #include <machine/cpu_capabilities.h> |
2d21ac55 | 82 | #include <mach/sdt.h> |
91447636 | 83 | |
1c79356b A |
84 | #include <kern/assert.h> |
85 | #include <kern/counters.h> | |
91447636 | 86 | #include <kern/kalloc.h> |
1c79356b | 87 | #include <kern/zalloc.h> |
91447636 A |
88 | |
89 | #include <vm/cpm.h> | |
39236c6e | 90 | #include <vm/vm_compressor_pager.h> |
1c79356b A |
91 | #include <vm/vm_init.h> |
92 | #include <vm/vm_fault.h> | |
93 | #include <vm/vm_map.h> | |
94 | #include <vm/vm_object.h> | |
95 | #include <vm/vm_page.h> | |
b0d623f7 | 96 | #include <vm/vm_pageout.h> |
1c79356b A |
97 | #include <vm/vm_kern.h> |
98 | #include <ipc/ipc_port.h> | |
99 | #include <kern/sched_prim.h> | |
100 | #include <kern/misc_protos.h> | |
1c79356b A |
101 | #include <kern/xpr.h> |
102 | ||
91447636 A |
103 | #include <mach/vm_map_server.h> |
104 | #include <mach/mach_host_server.h> | |
2d21ac55 | 105 | #include <vm/vm_protos.h> |
b0d623f7 | 106 | #include <vm/vm_purgeable_internal.h> |
91447636 | 107 | |
91447636 | 108 | #include <vm/vm_protos.h> |
2d21ac55 | 109 | #include <vm/vm_shared_region.h> |
6d2010ae | 110 | #include <vm/vm_map_store.h> |
91447636 | 111 | |
3e170ce0 | 112 | |
316670eb | 113 | extern u_int32_t random(void); /* from <libkern/libkern.h> */ |
1c79356b A |
114 | /* Internal prototypes |
115 | */ | |
2d21ac55 | 116 | |
91447636 A |
117 | static void vm_map_simplify_range( |
118 | vm_map_t map, | |
119 | vm_map_offset_t start, | |
120 | vm_map_offset_t end); /* forward */ | |
121 | ||
122 | static boolean_t vm_map_range_check( | |
2d21ac55 A |
123 | vm_map_t map, |
124 | vm_map_offset_t start, | |
125 | vm_map_offset_t end, | |
126 | vm_map_entry_t *entry); | |
1c79356b | 127 | |
91447636 | 128 | static vm_map_entry_t _vm_map_entry_create( |
7ddcb079 | 129 | struct vm_map_header *map_header, boolean_t map_locked); |
1c79356b | 130 | |
91447636 | 131 | static void _vm_map_entry_dispose( |
2d21ac55 A |
132 | struct vm_map_header *map_header, |
133 | vm_map_entry_t entry); | |
1c79356b | 134 | |
91447636 | 135 | static void vm_map_pmap_enter( |
2d21ac55 A |
136 | vm_map_t map, |
137 | vm_map_offset_t addr, | |
138 | vm_map_offset_t end_addr, | |
139 | vm_object_t object, | |
140 | vm_object_offset_t offset, | |
141 | vm_prot_t protection); | |
1c79356b | 142 | |
91447636 | 143 | static void _vm_map_clip_end( |
2d21ac55 A |
144 | struct vm_map_header *map_header, |
145 | vm_map_entry_t entry, | |
146 | vm_map_offset_t end); | |
91447636 A |
147 | |
148 | static void _vm_map_clip_start( | |
2d21ac55 A |
149 | struct vm_map_header *map_header, |
150 | vm_map_entry_t entry, | |
151 | vm_map_offset_t start); | |
1c79356b | 152 | |
91447636 | 153 | static void vm_map_entry_delete( |
2d21ac55 A |
154 | vm_map_t map, |
155 | vm_map_entry_t entry); | |
1c79356b | 156 | |
91447636 | 157 | static kern_return_t vm_map_delete( |
2d21ac55 A |
158 | vm_map_t map, |
159 | vm_map_offset_t start, | |
160 | vm_map_offset_t end, | |
161 | int flags, | |
162 | vm_map_t zap_map); | |
1c79356b | 163 | |
91447636 | 164 | static kern_return_t vm_map_copy_overwrite_unaligned( |
2d21ac55 A |
165 | vm_map_t dst_map, |
166 | vm_map_entry_t entry, | |
167 | vm_map_copy_t copy, | |
39236c6e A |
168 | vm_map_address_t start, |
169 | boolean_t discard_on_success); | |
1c79356b | 170 | |
91447636 | 171 | static kern_return_t vm_map_copy_overwrite_aligned( |
2d21ac55 A |
172 | vm_map_t dst_map, |
173 | vm_map_entry_t tmp_entry, | |
174 | vm_map_copy_t copy, | |
175 | vm_map_offset_t start, | |
176 | pmap_t pmap); | |
1c79356b | 177 | |
91447636 | 178 | static kern_return_t vm_map_copyin_kernel_buffer( |
2d21ac55 A |
179 | vm_map_t src_map, |
180 | vm_map_address_t src_addr, | |
181 | vm_map_size_t len, | |
182 | boolean_t src_destroy, | |
183 | vm_map_copy_t *copy_result); /* OUT */ | |
1c79356b | 184 | |
91447636 | 185 | static kern_return_t vm_map_copyout_kernel_buffer( |
2d21ac55 A |
186 | vm_map_t map, |
187 | vm_map_address_t *addr, /* IN/OUT */ | |
188 | vm_map_copy_t copy, | |
39236c6e A |
189 | boolean_t overwrite, |
190 | boolean_t consume_on_success); | |
1c79356b | 191 | |
91447636 | 192 | static void vm_map_fork_share( |
2d21ac55 A |
193 | vm_map_t old_map, |
194 | vm_map_entry_t old_entry, | |
195 | vm_map_t new_map); | |
1c79356b | 196 | |
91447636 | 197 | static boolean_t vm_map_fork_copy( |
2d21ac55 A |
198 | vm_map_t old_map, |
199 | vm_map_entry_t *old_entry_p, | |
200 | vm_map_t new_map); | |
1c79356b | 201 | |
0c530ab8 | 202 | void vm_map_region_top_walk( |
2d21ac55 A |
203 | vm_map_entry_t entry, |
204 | vm_region_top_info_t top); | |
1c79356b | 205 | |
0c530ab8 | 206 | void vm_map_region_walk( |
2d21ac55 A |
207 | vm_map_t map, |
208 | vm_map_offset_t va, | |
209 | vm_map_entry_t entry, | |
210 | vm_object_offset_t offset, | |
211 | vm_object_size_t range, | |
212 | vm_region_extended_info_t extended, | |
39236c6e A |
213 | boolean_t look_for_pages, |
214 | mach_msg_type_number_t count); | |
91447636 A |
215 | |
216 | static kern_return_t vm_map_wire_nested( | |
2d21ac55 A |
217 | vm_map_t map, |
218 | vm_map_offset_t start, | |
219 | vm_map_offset_t end, | |
3e170ce0 | 220 | vm_prot_t caller_prot, |
2d21ac55 A |
221 | boolean_t user_wire, |
222 | pmap_t map_pmap, | |
fe8ab488 A |
223 | vm_map_offset_t pmap_addr, |
224 | ppnum_t *physpage_p); | |
91447636 A |
225 | |
226 | static kern_return_t vm_map_unwire_nested( | |
2d21ac55 A |
227 | vm_map_t map, |
228 | vm_map_offset_t start, | |
229 | vm_map_offset_t end, | |
230 | boolean_t user_wire, | |
231 | pmap_t map_pmap, | |
232 | vm_map_offset_t pmap_addr); | |
91447636 A |
233 | |
234 | static kern_return_t vm_map_overwrite_submap_recurse( | |
2d21ac55 A |
235 | vm_map_t dst_map, |
236 | vm_map_offset_t dst_addr, | |
237 | vm_map_size_t dst_size); | |
91447636 A |
238 | |
239 | static kern_return_t vm_map_copy_overwrite_nested( | |
2d21ac55 A |
240 | vm_map_t dst_map, |
241 | vm_map_offset_t dst_addr, | |
242 | vm_map_copy_t copy, | |
243 | boolean_t interruptible, | |
6d2010ae A |
244 | pmap_t pmap, |
245 | boolean_t discard_on_success); | |
91447636 A |
246 | |
247 | static kern_return_t vm_map_remap_extract( | |
2d21ac55 A |
248 | vm_map_t map, |
249 | vm_map_offset_t addr, | |
250 | vm_map_size_t size, | |
251 | boolean_t copy, | |
252 | struct vm_map_header *map_header, | |
253 | vm_prot_t *cur_protection, | |
254 | vm_prot_t *max_protection, | |
255 | vm_inherit_t inheritance, | |
256 | boolean_t pageable); | |
91447636 A |
257 | |
258 | static kern_return_t vm_map_remap_range_allocate( | |
2d21ac55 A |
259 | vm_map_t map, |
260 | vm_map_address_t *address, | |
261 | vm_map_size_t size, | |
262 | vm_map_offset_t mask, | |
060df5ea | 263 | int flags, |
2d21ac55 | 264 | vm_map_entry_t *map_entry); |
91447636 A |
265 | |
266 | static void vm_map_region_look_for_page( | |
2d21ac55 A |
267 | vm_map_t map, |
268 | vm_map_offset_t va, | |
269 | vm_object_t object, | |
270 | vm_object_offset_t offset, | |
271 | int max_refcnt, | |
272 | int depth, | |
39236c6e A |
273 | vm_region_extended_info_t extended, |
274 | mach_msg_type_number_t count); | |
91447636 A |
275 | |
276 | static int vm_map_region_count_obj_refs( | |
2d21ac55 A |
277 | vm_map_entry_t entry, |
278 | vm_object_t object); | |
1c79356b | 279 | |
b0d623f7 A |
280 | |
281 | static kern_return_t vm_map_willneed( | |
282 | vm_map_t map, | |
283 | vm_map_offset_t start, | |
284 | vm_map_offset_t end); | |
285 | ||
286 | static kern_return_t vm_map_reuse_pages( | |
287 | vm_map_t map, | |
288 | vm_map_offset_t start, | |
289 | vm_map_offset_t end); | |
290 | ||
291 | static kern_return_t vm_map_reusable_pages( | |
292 | vm_map_t map, | |
293 | vm_map_offset_t start, | |
294 | vm_map_offset_t end); | |
295 | ||
296 | static kern_return_t vm_map_can_reuse( | |
297 | vm_map_t map, | |
298 | vm_map_offset_t start, | |
299 | vm_map_offset_t end); | |
300 | ||
3e170ce0 A |
301 | #if MACH_ASSERT |
302 | static kern_return_t vm_map_pageout( | |
303 | vm_map_t map, | |
304 | vm_map_offset_t start, | |
305 | vm_map_offset_t end); | |
306 | #endif /* MACH_ASSERT */ | |
6d2010ae | 307 | |
1c79356b A |
308 | /* |
309 | * Macros to copy a vm_map_entry. We must be careful to correctly | |
310 | * manage the wired page count. vm_map_entry_copy() creates a new | |
311 | * map entry to the same memory - the wired count in the new entry | |
312 | * must be set to zero. vm_map_entry_copy_full() creates a new | |
313 | * entry that is identical to the old entry. This preserves the | |
314 | * wire count; it's used for map splitting and zone changing in | |
315 | * vm_map_copyout. | |
316 | */ | |
316670eb | 317 | |
7ddcb079 A |
318 | #define vm_map_entry_copy(NEW,OLD) \ |
319 | MACRO_BEGIN \ | |
320 | boolean_t _vmec_reserved = (NEW)->from_reserved_zone; \ | |
2d21ac55 A |
321 | *(NEW) = *(OLD); \ |
322 | (NEW)->is_shared = FALSE; \ | |
323 | (NEW)->needs_wakeup = FALSE; \ | |
324 | (NEW)->in_transition = FALSE; \ | |
325 | (NEW)->wired_count = 0; \ | |
326 | (NEW)->user_wired_count = 0; \ | |
b0d623f7 | 327 | (NEW)->permanent = FALSE; \ |
316670eb | 328 | (NEW)->used_for_jit = FALSE; \ |
fe8ab488 A |
329 | (NEW)->from_reserved_zone = _vmec_reserved; \ |
330 | (NEW)->iokit_acct = FALSE; \ | |
3e170ce0 A |
331 | (NEW)->vme_resilient_codesign = FALSE; \ |
332 | (NEW)->vme_resilient_media = FALSE; \ | |
1c79356b A |
333 | MACRO_END |
334 | ||
7ddcb079 A |
335 | #define vm_map_entry_copy_full(NEW,OLD) \ |
336 | MACRO_BEGIN \ | |
337 | boolean_t _vmecf_reserved = (NEW)->from_reserved_zone; \ | |
338 | (*(NEW) = *(OLD)); \ | |
339 | (NEW)->from_reserved_zone = _vmecf_reserved; \ | |
340 | MACRO_END | |
1c79356b | 341 | |
2d21ac55 A |
342 | /* |
343 | * Decide if we want to allow processes to execute from their data or stack areas. | |
344 | * override_nx() returns true if we do. Data/stack execution can be enabled independently | |
345 | * for 32 and 64 bit processes. Set the VM_ABI_32 or VM_ABI_64 flags in allow_data_exec | |
346 | * or allow_stack_exec to enable data execution for that type of data area for that particular | |
347 | * ABI (or both by or'ing the flags together). These are initialized in the architecture | |
348 | * specific pmap files since the default behavior varies according to architecture. The | |
349 | * main reason it varies is because of the need to provide binary compatibility with old | |
350 | * applications that were written before these restrictions came into being. In the old | |
351 | * days, an app could execute anything it could read, but this has slowly been tightened | |
352 | * up over time. The default behavior is: | |
353 | * | |
354 | * 32-bit PPC apps may execute from both stack and data areas | |
355 | * 32-bit Intel apps may exeucte from data areas but not stack | |
356 | * 64-bit PPC/Intel apps may not execute from either data or stack | |
357 | * | |
358 | * An application on any architecture may override these defaults by explicitly | |
359 | * adding PROT_EXEC permission to the page in question with the mprotect(2) | |
360 | * system call. This code here just determines what happens when an app tries to | |
361 | * execute from a page that lacks execute permission. | |
362 | * | |
363 | * Note that allow_data_exec or allow_stack_exec may also be modified by sysctl to change the | |
6d2010ae A |
364 | * default behavior for both 32 and 64 bit apps on a system-wide basis. Furthermore, |
365 | * a Mach-O header flag bit (MH_NO_HEAP_EXECUTION) can be used to forcibly disallow | |
366 | * execution from data areas for a particular binary even if the arch normally permits it. As | |
367 | * a final wrinkle, a posix_spawn attribute flag can be used to negate this opt-in header bit | |
368 | * to support some complicated use cases, notably browsers with out-of-process plugins that | |
369 | * are not all NX-safe. | |
2d21ac55 A |
370 | */ |
371 | ||
372 | extern int allow_data_exec, allow_stack_exec; | |
373 | ||
374 | int | |
375 | override_nx(vm_map_t map, uint32_t user_tag) /* map unused on arm */ | |
376 | { | |
377 | int current_abi; | |
378 | ||
3e170ce0 A |
379 | if (map->pmap == kernel_pmap) return FALSE; |
380 | ||
2d21ac55 A |
381 | /* |
382 | * Determine if the app is running in 32 or 64 bit mode. | |
383 | */ | |
384 | ||
385 | if (vm_map_is_64bit(map)) | |
386 | current_abi = VM_ABI_64; | |
387 | else | |
388 | current_abi = VM_ABI_32; | |
389 | ||
390 | /* | |
391 | * Determine if we should allow the execution based on whether it's a | |
392 | * stack or data area and the current architecture. | |
393 | */ | |
394 | ||
395 | if (user_tag == VM_MEMORY_STACK) | |
396 | return allow_stack_exec & current_abi; | |
397 | ||
6d2010ae | 398 | return (allow_data_exec & current_abi) && (map->map_disallow_data_exec == FALSE); |
2d21ac55 A |
399 | } |
400 | ||
401 | ||
1c79356b A |
402 | /* |
403 | * Virtual memory maps provide for the mapping, protection, | |
404 | * and sharing of virtual memory objects. In addition, | |
405 | * this module provides for an efficient virtual copy of | |
406 | * memory from one map to another. | |
407 | * | |
408 | * Synchronization is required prior to most operations. | |
409 | * | |
410 | * Maps consist of an ordered doubly-linked list of simple | |
411 | * entries; a single hint is used to speed up lookups. | |
412 | * | |
413 | * Sharing maps have been deleted from this version of Mach. | |
414 | * All shared objects are now mapped directly into the respective | |
415 | * maps. This requires a change in the copy on write strategy; | |
416 | * the asymmetric (delayed) strategy is used for shared temporary | |
417 | * objects instead of the symmetric (shadow) strategy. All maps | |
418 | * are now "top level" maps (either task map, kernel map or submap | |
419 | * of the kernel map). | |
420 | * | |
421 | * Since portions of maps are specified by start/end addreses, | |
422 | * which may not align with existing map entries, all | |
423 | * routines merely "clip" entries to these start/end values. | |
424 | * [That is, an entry is split into two, bordering at a | |
425 | * start or end value.] Note that these clippings may not | |
426 | * always be necessary (as the two resulting entries are then | |
427 | * not changed); however, the clipping is done for convenience. | |
428 | * No attempt is currently made to "glue back together" two | |
429 | * abutting entries. | |
430 | * | |
431 | * The symmetric (shadow) copy strategy implements virtual copy | |
432 | * by copying VM object references from one map to | |
433 | * another, and then marking both regions as copy-on-write. | |
434 | * It is important to note that only one writeable reference | |
435 | * to a VM object region exists in any map when this strategy | |
436 | * is used -- this means that shadow object creation can be | |
437 | * delayed until a write operation occurs. The symmetric (delayed) | |
438 | * strategy allows multiple maps to have writeable references to | |
439 | * the same region of a vm object, and hence cannot delay creating | |
440 | * its copy objects. See vm_object_copy_quickly() in vm_object.c. | |
441 | * Copying of permanent objects is completely different; see | |
442 | * vm_object_copy_strategically() in vm_object.c. | |
443 | */ | |
444 | ||
91447636 A |
445 | static zone_t vm_map_zone; /* zone for vm_map structures */ |
446 | static zone_t vm_map_entry_zone; /* zone for vm_map_entry structures */ | |
7ddcb079 A |
447 | static zone_t vm_map_entry_reserved_zone; /* zone with reserve for non-blocking |
448 | * allocations */ | |
91447636 | 449 | static zone_t vm_map_copy_zone; /* zone for vm_map_copy structures */ |
3e170ce0 | 450 | zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ |
1c79356b A |
451 | |
452 | ||
453 | /* | |
454 | * Placeholder object for submap operations. This object is dropped | |
455 | * into the range by a call to vm_map_find, and removed when | |
456 | * vm_map_submap creates the submap. | |
457 | */ | |
458 | ||
459 | vm_object_t vm_submap_object; | |
460 | ||
91447636 | 461 | static void *map_data; |
b0d623f7 | 462 | static vm_size_t map_data_size; |
91447636 | 463 | static void *kentry_data; |
b0d623f7 | 464 | static vm_size_t kentry_data_size; |
3e170ce0 A |
465 | static void *map_holes_data; |
466 | static vm_size_t map_holes_data_size; | |
1c79356b | 467 | |
b0d623f7 | 468 | #define NO_COALESCE_LIMIT ((1024 * 128) - 1) |
1c79356b | 469 | |
55e303ae | 470 | /* Skip acquiring locks if we're in the midst of a kernel core dump */ |
b0d623f7 | 471 | unsigned int not_in_kdp = 1; |
55e303ae | 472 | |
6d2010ae A |
473 | unsigned int vm_map_set_cache_attr_count = 0; |
474 | ||
475 | kern_return_t | |
476 | vm_map_set_cache_attr( | |
477 | vm_map_t map, | |
478 | vm_map_offset_t va) | |
479 | { | |
480 | vm_map_entry_t map_entry; | |
481 | vm_object_t object; | |
482 | kern_return_t kr = KERN_SUCCESS; | |
483 | ||
484 | vm_map_lock_read(map); | |
485 | ||
486 | if (!vm_map_lookup_entry(map, va, &map_entry) || | |
487 | map_entry->is_sub_map) { | |
488 | /* | |
489 | * that memory is not properly mapped | |
490 | */ | |
491 | kr = KERN_INVALID_ARGUMENT; | |
492 | goto done; | |
493 | } | |
3e170ce0 | 494 | object = VME_OBJECT(map_entry); |
6d2010ae A |
495 | |
496 | if (object == VM_OBJECT_NULL) { | |
497 | /* | |
498 | * there should be a VM object here at this point | |
499 | */ | |
500 | kr = KERN_INVALID_ARGUMENT; | |
501 | goto done; | |
502 | } | |
503 | vm_object_lock(object); | |
504 | object->set_cache_attr = TRUE; | |
505 | vm_object_unlock(object); | |
506 | ||
507 | vm_map_set_cache_attr_count++; | |
508 | done: | |
509 | vm_map_unlock_read(map); | |
510 | ||
511 | return kr; | |
512 | } | |
513 | ||
514 | ||
593a1d5f A |
515 | #if CONFIG_CODE_DECRYPTION |
516 | /* | |
517 | * vm_map_apple_protected: | |
518 | * This remaps the requested part of the object with an object backed by | |
519 | * the decrypting pager. | |
520 | * crypt_info contains entry points and session data for the crypt module. | |
521 | * The crypt_info block will be copied by vm_map_apple_protected. The data structures | |
522 | * referenced in crypt_info must remain valid until crypt_info->crypt_end() is called. | |
523 | */ | |
0c530ab8 A |
524 | kern_return_t |
525 | vm_map_apple_protected( | |
3e170ce0 A |
526 | vm_map_t map, |
527 | vm_map_offset_t start, | |
528 | vm_map_offset_t end, | |
529 | vm_object_offset_t crypto_backing_offset, | |
593a1d5f | 530 | struct pager_crypt_info *crypt_info) |
0c530ab8 A |
531 | { |
532 | boolean_t map_locked; | |
533 | kern_return_t kr; | |
534 | vm_map_entry_t map_entry; | |
3e170ce0 A |
535 | struct vm_map_entry tmp_entry; |
536 | memory_object_t unprotected_mem_obj; | |
0c530ab8 A |
537 | vm_object_t protected_object; |
538 | vm_map_offset_t map_addr; | |
3e170ce0 A |
539 | vm_map_offset_t start_aligned, end_aligned; |
540 | vm_object_offset_t crypto_start, crypto_end; | |
541 | int vm_flags; | |
0c530ab8 | 542 | |
3e170ce0 A |
543 | map_locked = FALSE; |
544 | unprotected_mem_obj = MEMORY_OBJECT_NULL; | |
0c530ab8 | 545 | |
3e170ce0 A |
546 | start_aligned = vm_map_trunc_page(start, PAGE_MASK_64); |
547 | end_aligned = vm_map_round_page(end, PAGE_MASK_64); | |
548 | start_aligned = vm_map_trunc_page(start_aligned, VM_MAP_PAGE_MASK(map)); | |
549 | end_aligned = vm_map_round_page(end_aligned, VM_MAP_PAGE_MASK(map)); | |
b0d623f7 | 550 | |
3e170ce0 A |
551 | assert(start_aligned == start); |
552 | assert(end_aligned == end); | |
b0d623f7 | 553 | |
3e170ce0 A |
554 | map_addr = start_aligned; |
555 | for (map_addr = start_aligned; | |
556 | map_addr < end; | |
557 | map_addr = tmp_entry.vme_end) { | |
558 | vm_map_lock(map); | |
559 | map_locked = TRUE; | |
b0d623f7 | 560 | |
3e170ce0 A |
561 | /* lookup the protected VM object */ |
562 | if (!vm_map_lookup_entry(map, | |
563 | map_addr, | |
564 | &map_entry) || | |
565 | map_entry->is_sub_map || | |
566 | VME_OBJECT(map_entry) == VM_OBJECT_NULL || | |
567 | !(map_entry->protection & VM_PROT_EXECUTE)) { | |
568 | /* that memory is not properly mapped */ | |
569 | kr = KERN_INVALID_ARGUMENT; | |
570 | goto done; | |
571 | } | |
b0d623f7 | 572 | |
3e170ce0 A |
573 | /* get the protected object to be decrypted */ |
574 | protected_object = VME_OBJECT(map_entry); | |
575 | if (protected_object == VM_OBJECT_NULL) { | |
576 | /* there should be a VM object here at this point */ | |
577 | kr = KERN_INVALID_ARGUMENT; | |
578 | goto done; | |
579 | } | |
580 | /* ensure protected object stays alive while map is unlocked */ | |
581 | vm_object_reference(protected_object); | |
582 | ||
583 | /* limit the map entry to the area we want to cover */ | |
584 | vm_map_clip_start(map, map_entry, start_aligned); | |
585 | vm_map_clip_end(map, map_entry, end_aligned); | |
586 | ||
587 | tmp_entry = *map_entry; | |
588 | map_entry = VM_MAP_ENTRY_NULL; /* not valid after unlocking map */ | |
589 | vm_map_unlock(map); | |
590 | map_locked = FALSE; | |
591 | ||
592 | /* | |
593 | * This map entry might be only partially encrypted | |
594 | * (if not fully "page-aligned"). | |
595 | */ | |
596 | crypto_start = 0; | |
597 | crypto_end = tmp_entry.vme_end - tmp_entry.vme_start; | |
598 | if (tmp_entry.vme_start < start) { | |
599 | if (tmp_entry.vme_start != start_aligned) { | |
600 | kr = KERN_INVALID_ADDRESS; | |
601 | } | |
602 | crypto_start += (start - tmp_entry.vme_start); | |
603 | } | |
604 | if (tmp_entry.vme_end > end) { | |
605 | if (tmp_entry.vme_end != end_aligned) { | |
606 | kr = KERN_INVALID_ADDRESS; | |
607 | } | |
608 | crypto_end -= (tmp_entry.vme_end - end); | |
609 | } | |
610 | ||
611 | /* | |
612 | * This "extra backing offset" is needed to get the decryption | |
613 | * routine to use the right key. It adjusts for the possibly | |
614 | * relative offset of an interposed "4K" pager... | |
615 | */ | |
616 | if (crypto_backing_offset == (vm_object_offset_t) -1) { | |
617 | crypto_backing_offset = VME_OFFSET(&tmp_entry); | |
618 | } | |
0c530ab8 | 619 | |
3e170ce0 A |
620 | /* |
621 | * Lookup (and create if necessary) the protected memory object | |
622 | * matching that VM object. | |
623 | * If successful, this also grabs a reference on the memory object, | |
624 | * to guarantee that it doesn't go away before we get a chance to map | |
625 | * it. | |
626 | */ | |
627 | unprotected_mem_obj = apple_protect_pager_setup( | |
628 | protected_object, | |
629 | VME_OFFSET(&tmp_entry), | |
630 | crypto_backing_offset, | |
631 | crypt_info, | |
632 | crypto_start, | |
633 | crypto_end); | |
634 | ||
635 | /* release extra ref on protected object */ | |
636 | vm_object_deallocate(protected_object); | |
637 | ||
638 | if (unprotected_mem_obj == NULL) { | |
639 | kr = KERN_FAILURE; | |
640 | goto done; | |
641 | } | |
642 | ||
643 | vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE; | |
644 | ||
645 | /* map this memory object in place of the current one */ | |
646 | map_addr = tmp_entry.vme_start; | |
647 | kr = vm_map_enter_mem_object(map, | |
648 | &map_addr, | |
649 | (tmp_entry.vme_end - | |
650 | tmp_entry.vme_start), | |
651 | (mach_vm_offset_t) 0, | |
652 | vm_flags, | |
653 | (ipc_port_t) unprotected_mem_obj, | |
654 | 0, | |
655 | TRUE, | |
656 | tmp_entry.protection, | |
657 | tmp_entry.max_protection, | |
658 | tmp_entry.inheritance); | |
659 | assert(kr == KERN_SUCCESS); | |
660 | assert(map_addr == tmp_entry.vme_start); | |
661 | ||
662 | #if VM_MAP_DEBUG_APPLE_PROTECT | |
663 | printf("APPLE_PROTECT: map %p [0x%llx:0x%llx] pager %p: " | |
664 | "backing:[object:%p,offset:0x%llx," | |
665 | "crypto_backing_offset:0x%llx," | |
666 | "crypto_start:0x%llx,crypto_end:0x%llx]\n", | |
667 | map, | |
668 | (uint64_t) map_addr, | |
669 | (uint64_t) (map_addr + (tmp_entry.vme_end - | |
670 | tmp_entry.vme_start)), | |
671 | unprotected_mem_obj, | |
672 | protected_object, | |
673 | VME_OFFSET(&tmp_entry), | |
674 | crypto_backing_offset, | |
675 | crypto_start, | |
676 | crypto_end); | |
677 | #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ | |
678 | ||
679 | /* | |
680 | * Release the reference obtained by | |
681 | * apple_protect_pager_setup(). | |
682 | * The mapping (if it succeeded) is now holding a reference on | |
683 | * the memory object. | |
684 | */ | |
685 | memory_object_deallocate(unprotected_mem_obj); | |
686 | unprotected_mem_obj = MEMORY_OBJECT_NULL; | |
687 | ||
688 | /* continue with next map entry */ | |
689 | crypto_backing_offset += (tmp_entry.vme_end - | |
690 | tmp_entry.vme_start); | |
691 | crypto_backing_offset -= crypto_start; | |
692 | } | |
693 | kr = KERN_SUCCESS; | |
0c530ab8 A |
694 | |
695 | done: | |
696 | if (map_locked) { | |
3e170ce0 | 697 | vm_map_unlock(map); |
0c530ab8 A |
698 | } |
699 | return kr; | |
700 | } | |
593a1d5f | 701 | #endif /* CONFIG_CODE_DECRYPTION */ |
0c530ab8 A |
702 | |
703 | ||
b0d623f7 A |
704 | lck_grp_t vm_map_lck_grp; |
705 | lck_grp_attr_t vm_map_lck_grp_attr; | |
706 | lck_attr_t vm_map_lck_attr; | |
fe8ab488 | 707 | lck_attr_t vm_map_lck_rw_attr; |
b0d623f7 A |
708 | |
709 | ||
593a1d5f A |
710 | /* |
711 | * vm_map_init: | |
712 | * | |
713 | * Initialize the vm_map module. Must be called before | |
714 | * any other vm_map routines. | |
715 | * | |
716 | * Map and entry structures are allocated from zones -- we must | |
717 | * initialize those zones. | |
718 | * | |
719 | * There are three zones of interest: | |
720 | * | |
721 | * vm_map_zone: used to allocate maps. | |
722 | * vm_map_entry_zone: used to allocate map entries. | |
7ddcb079 | 723 | * vm_map_entry_reserved_zone: fallback zone for kernel map entries |
593a1d5f A |
724 | * |
725 | * The kernel allocates map entries from a special zone that is initially | |
726 | * "crammed" with memory. It would be difficult (perhaps impossible) for | |
727 | * the kernel to allocate more memory to a entry zone when it became | |
728 | * empty since the very act of allocating memory implies the creation | |
729 | * of a new entry. | |
730 | */ | |
1c79356b A |
731 | void |
732 | vm_map_init( | |
733 | void) | |
734 | { | |
7ddcb079 | 735 | vm_size_t entry_zone_alloc_size; |
316670eb A |
736 | const char *mez_name = "VM map entries"; |
737 | ||
2d21ac55 A |
738 | vm_map_zone = zinit((vm_map_size_t) sizeof(struct _vm_map), 40*1024, |
739 | PAGE_SIZE, "maps"); | |
0b4c1975 | 740 | zone_change(vm_map_zone, Z_NOENCRYPT, TRUE); |
7ddcb079 A |
741 | #if defined(__LP64__) |
742 | entry_zone_alloc_size = PAGE_SIZE * 5; | |
743 | #else | |
744 | entry_zone_alloc_size = PAGE_SIZE * 6; | |
745 | #endif | |
91447636 | 746 | vm_map_entry_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), |
7ddcb079 | 747 | 1024*1024, entry_zone_alloc_size, |
316670eb | 748 | mez_name); |
0b4c1975 | 749 | zone_change(vm_map_entry_zone, Z_NOENCRYPT, TRUE); |
7ddcb079 | 750 | zone_change(vm_map_entry_zone, Z_NOCALLOUT, TRUE); |
316670eb | 751 | zone_change(vm_map_entry_zone, Z_GZALLOC_EXEMPT, TRUE); |
1c79356b | 752 | |
7ddcb079 A |
753 | vm_map_entry_reserved_zone = zinit((vm_map_size_t) sizeof(struct vm_map_entry), |
754 | kentry_data_size * 64, kentry_data_size, | |
755 | "Reserved VM map entries"); | |
756 | zone_change(vm_map_entry_reserved_zone, Z_NOENCRYPT, TRUE); | |
1c79356b | 757 | |
91447636 | 758 | vm_map_copy_zone = zinit((vm_map_size_t) sizeof(struct vm_map_copy), |
7ddcb079 | 759 | 16*1024, PAGE_SIZE, "VM map copies"); |
0b4c1975 | 760 | zone_change(vm_map_copy_zone, Z_NOENCRYPT, TRUE); |
1c79356b | 761 | |
3e170ce0 A |
762 | vm_map_holes_zone = zinit((vm_map_size_t) sizeof(struct vm_map_links), |
763 | 16*1024, PAGE_SIZE, "VM map holes"); | |
764 | zone_change(vm_map_holes_zone, Z_NOENCRYPT, TRUE); | |
765 | ||
1c79356b A |
766 | /* |
767 | * Cram the map and kentry zones with initial data. | |
7ddcb079 | 768 | * Set reserved_zone non-collectible to aid zone_gc(). |
1c79356b A |
769 | */ |
770 | zone_change(vm_map_zone, Z_COLLECT, FALSE); | |
7ddcb079 A |
771 | |
772 | zone_change(vm_map_entry_reserved_zone, Z_COLLECT, FALSE); | |
773 | zone_change(vm_map_entry_reserved_zone, Z_EXPAND, FALSE); | |
774 | zone_change(vm_map_entry_reserved_zone, Z_FOREIGN, TRUE); | |
775 | zone_change(vm_map_entry_reserved_zone, Z_NOCALLOUT, TRUE); | |
776 | zone_change(vm_map_entry_reserved_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ | |
6d2010ae | 777 | zone_change(vm_map_copy_zone, Z_CALLERACCT, FALSE); /* don't charge caller */ |
316670eb | 778 | zone_change(vm_map_entry_reserved_zone, Z_GZALLOC_EXEMPT, TRUE); |
6d2010ae | 779 | |
3e170ce0 A |
780 | zone_change(vm_map_holes_zone, Z_COLLECT, TRUE); |
781 | zone_change(vm_map_holes_zone, Z_EXPAND, TRUE); | |
782 | zone_change(vm_map_holes_zone, Z_FOREIGN, TRUE); | |
783 | zone_change(vm_map_holes_zone, Z_NOCALLOUT, TRUE); | |
784 | zone_change(vm_map_holes_zone, Z_CALLERACCT, TRUE); | |
785 | zone_change(vm_map_holes_zone, Z_GZALLOC_EXEMPT, TRUE); | |
786 | ||
787 | /* | |
788 | * Add the stolen memory to zones, adjust zone size and stolen counts. | |
789 | */ | |
7ddcb079 A |
790 | zcram(vm_map_zone, (vm_offset_t)map_data, map_data_size); |
791 | zcram(vm_map_entry_reserved_zone, (vm_offset_t)kentry_data, kentry_data_size); | |
3e170ce0 A |
792 | zcram(vm_map_holes_zone, (vm_offset_t)map_holes_data, map_holes_data_size); |
793 | VM_PAGE_MOVE_STOLEN(atop_64(map_data_size) + atop_64(kentry_data_size) + atop_64(map_holes_data_size)); | |
794 | ||
b0d623f7 A |
795 | lck_grp_attr_setdefault(&vm_map_lck_grp_attr); |
796 | lck_grp_init(&vm_map_lck_grp, "vm_map", &vm_map_lck_grp_attr); | |
797 | lck_attr_setdefault(&vm_map_lck_attr); | |
316670eb | 798 | |
fe8ab488 A |
799 | lck_attr_setdefault(&vm_map_lck_rw_attr); |
800 | lck_attr_cleardebug(&vm_map_lck_rw_attr); | |
801 | ||
316670eb A |
802 | #if CONFIG_FREEZE |
803 | default_freezer_init(); | |
804 | #endif /* CONFIG_FREEZE */ | |
1c79356b A |
805 | } |
806 | ||
807 | void | |
808 | vm_map_steal_memory( | |
809 | void) | |
810 | { | |
7ddcb079 A |
811 | uint32_t kentry_initial_pages; |
812 | ||
b0d623f7 | 813 | map_data_size = round_page(10 * sizeof(struct _vm_map)); |
1c79356b A |
814 | map_data = pmap_steal_memory(map_data_size); |
815 | ||
1c79356b | 816 | /* |
7ddcb079 A |
817 | * kentry_initial_pages corresponds to the number of kernel map entries |
818 | * required during bootstrap until the asynchronous replenishment | |
819 | * scheme is activated and/or entries are available from the general | |
820 | * map entry pool. | |
1c79356b | 821 | */ |
7ddcb079 A |
822 | #if defined(__LP64__) |
823 | kentry_initial_pages = 10; | |
824 | #else | |
825 | kentry_initial_pages = 6; | |
1c79356b | 826 | #endif |
316670eb A |
827 | |
828 | #if CONFIG_GZALLOC | |
829 | /* If using the guard allocator, reserve more memory for the kernel | |
830 | * reserved map entry pool. | |
831 | */ | |
832 | if (gzalloc_enabled()) | |
833 | kentry_initial_pages *= 1024; | |
834 | #endif | |
835 | ||
7ddcb079 | 836 | kentry_data_size = kentry_initial_pages * PAGE_SIZE; |
1c79356b | 837 | kentry_data = pmap_steal_memory(kentry_data_size); |
3e170ce0 A |
838 | |
839 | map_holes_data_size = kentry_data_size; | |
840 | map_holes_data = pmap_steal_memory(map_holes_data_size); | |
1c79356b A |
841 | } |
842 | ||
3e170ce0 A |
843 | void |
844 | vm_kernel_reserved_entry_init(void) { | |
7ddcb079 | 845 | zone_prio_refill_configure(vm_map_entry_reserved_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_entry)); |
3e170ce0 A |
846 | zone_prio_refill_configure(vm_map_holes_zone, (6*PAGE_SIZE)/sizeof(struct vm_map_links)); |
847 | } | |
848 | ||
849 | void | |
850 | vm_map_disable_hole_optimization(vm_map_t map) | |
851 | { | |
852 | vm_map_entry_t head_entry, hole_entry, next_hole_entry; | |
853 | ||
854 | if (map->holelistenabled) { | |
855 | ||
856 | head_entry = hole_entry = (vm_map_entry_t) map->holes_list; | |
857 | ||
858 | while (hole_entry != NULL) { | |
859 | ||
860 | next_hole_entry = hole_entry->vme_next; | |
861 | ||
862 | hole_entry->vme_next = NULL; | |
863 | hole_entry->vme_prev = NULL; | |
864 | zfree(vm_map_holes_zone, hole_entry); | |
865 | ||
866 | if (next_hole_entry == head_entry) { | |
867 | hole_entry = NULL; | |
868 | } else { | |
869 | hole_entry = next_hole_entry; | |
870 | } | |
871 | } | |
872 | ||
873 | map->holes_list = NULL; | |
874 | map->holelistenabled = FALSE; | |
875 | ||
876 | map->first_free = vm_map_first_entry(map); | |
877 | SAVE_HINT_HOLE_WRITE(map, NULL); | |
878 | } | |
879 | } | |
880 | ||
881 | boolean_t | |
882 | vm_kernel_map_is_kernel(vm_map_t map) { | |
883 | return (map->pmap == kernel_pmap); | |
7ddcb079 A |
884 | } |
885 | ||
1c79356b A |
886 | /* |
887 | * vm_map_create: | |
888 | * | |
889 | * Creates and returns a new empty VM map with | |
890 | * the given physical map structure, and having | |
891 | * the given lower and upper address bounds. | |
892 | */ | |
3e170ce0 A |
893 | |
894 | boolean_t vm_map_supports_hole_optimization = TRUE; | |
895 | ||
1c79356b A |
896 | vm_map_t |
897 | vm_map_create( | |
91447636 A |
898 | pmap_t pmap, |
899 | vm_map_offset_t min, | |
900 | vm_map_offset_t max, | |
901 | boolean_t pageable) | |
1c79356b | 902 | { |
2d21ac55 | 903 | static int color_seed = 0; |
1c79356b | 904 | register vm_map_t result; |
3e170ce0 | 905 | struct vm_map_links *hole_entry = NULL; |
1c79356b A |
906 | |
907 | result = (vm_map_t) zalloc(vm_map_zone); | |
908 | if (result == VM_MAP_NULL) | |
909 | panic("vm_map_create"); | |
910 | ||
911 | vm_map_first_entry(result) = vm_map_to_entry(result); | |
912 | vm_map_last_entry(result) = vm_map_to_entry(result); | |
913 | result->hdr.nentries = 0; | |
914 | result->hdr.entries_pageable = pageable; | |
915 | ||
6d2010ae A |
916 | vm_map_store_init( &(result->hdr) ); |
917 | ||
39236c6e A |
918 | result->hdr.page_shift = PAGE_SHIFT; |
919 | ||
1c79356b | 920 | result->size = 0; |
2d21ac55 A |
921 | result->user_wire_limit = MACH_VM_MAX_ADDRESS; /* default limit is unlimited */ |
922 | result->user_wire_size = 0; | |
1c79356b A |
923 | result->ref_count = 1; |
924 | #if TASK_SWAPPER | |
925 | result->res_count = 1; | |
926 | result->sw_state = MAP_SW_IN; | |
927 | #endif /* TASK_SWAPPER */ | |
928 | result->pmap = pmap; | |
929 | result->min_offset = min; | |
930 | result->max_offset = max; | |
931 | result->wiring_required = FALSE; | |
932 | result->no_zero_fill = FALSE; | |
316670eb | 933 | result->mapped_in_other_pmaps = FALSE; |
1c79356b | 934 | result->wait_for_space = FALSE; |
b0d623f7 | 935 | result->switch_protect = FALSE; |
6d2010ae A |
936 | result->disable_vmentry_reuse = FALSE; |
937 | result->map_disallow_data_exec = FALSE; | |
938 | result->highest_entry_end = 0; | |
1c79356b A |
939 | result->first_free = vm_map_to_entry(result); |
940 | result->hint = vm_map_to_entry(result); | |
2d21ac55 | 941 | result->color_rr = (color_seed++) & vm_color_mask; |
6d2010ae | 942 | result->jit_entry_exists = FALSE; |
3e170ce0 A |
943 | |
944 | if (vm_map_supports_hole_optimization && pmap != kernel_pmap) { | |
945 | hole_entry = zalloc(vm_map_holes_zone); | |
946 | ||
947 | hole_entry->start = min; | |
948 | hole_entry->end = (max > (vm_map_offset_t)MACH_VM_MAX_ADDRESS) ? max : (vm_map_offset_t)MACH_VM_MAX_ADDRESS; | |
949 | result->holes_list = result->hole_hint = hole_entry; | |
950 | hole_entry->prev = hole_entry->next = (vm_map_entry_t) hole_entry; | |
951 | result->holelistenabled = TRUE; | |
952 | ||
953 | } else { | |
954 | ||
955 | result->holelistenabled = FALSE; | |
956 | } | |
957 | ||
6d2010ae | 958 | #if CONFIG_FREEZE |
316670eb | 959 | result->default_freezer_handle = NULL; |
6d2010ae | 960 | #endif |
1c79356b | 961 | vm_map_lock_init(result); |
b0d623f7 A |
962 | lck_mtx_init_ext(&result->s_lock, &result->s_lock_ext, &vm_map_lck_grp, &vm_map_lck_attr); |
963 | ||
1c79356b A |
964 | return(result); |
965 | } | |
966 | ||
967 | /* | |
968 | * vm_map_entry_create: [ internal use only ] | |
969 | * | |
970 | * Allocates a VM map entry for insertion in the | |
971 | * given map (or map copy). No fields are filled. | |
972 | */ | |
7ddcb079 | 973 | #define vm_map_entry_create(map, map_locked) _vm_map_entry_create(&(map)->hdr, map_locked) |
1c79356b | 974 | |
7ddcb079 A |
975 | #define vm_map_copy_entry_create(copy, map_locked) \ |
976 | _vm_map_entry_create(&(copy)->cpy_hdr, map_locked) | |
977 | unsigned reserved_zalloc_count, nonreserved_zalloc_count; | |
1c79356b | 978 | |
91447636 | 979 | static vm_map_entry_t |
1c79356b | 980 | _vm_map_entry_create( |
7ddcb079 | 981 | struct vm_map_header *map_header, boolean_t __unused map_locked) |
1c79356b | 982 | { |
7ddcb079 A |
983 | zone_t zone; |
984 | vm_map_entry_t entry; | |
1c79356b | 985 | |
7ddcb079 A |
986 | zone = vm_map_entry_zone; |
987 | ||
988 | assert(map_header->entries_pageable ? !map_locked : TRUE); | |
989 | ||
990 | if (map_header->entries_pageable) { | |
991 | entry = (vm_map_entry_t) zalloc(zone); | |
992 | } | |
993 | else { | |
994 | entry = (vm_map_entry_t) zalloc_canblock(zone, FALSE); | |
995 | ||
996 | if (entry == VM_MAP_ENTRY_NULL) { | |
997 | zone = vm_map_entry_reserved_zone; | |
998 | entry = (vm_map_entry_t) zalloc(zone); | |
999 | OSAddAtomic(1, &reserved_zalloc_count); | |
1000 | } else | |
1001 | OSAddAtomic(1, &nonreserved_zalloc_count); | |
1002 | } | |
1c79356b | 1003 | |
1c79356b A |
1004 | if (entry == VM_MAP_ENTRY_NULL) |
1005 | panic("vm_map_entry_create"); | |
7ddcb079 A |
1006 | entry->from_reserved_zone = (zone == vm_map_entry_reserved_zone); |
1007 | ||
6d2010ae | 1008 | vm_map_store_update( (vm_map_t) NULL, entry, VM_MAP_ENTRY_CREATE); |
316670eb | 1009 | #if MAP_ENTRY_CREATION_DEBUG |
39236c6e A |
1010 | entry->vme_creation_maphdr = map_header; |
1011 | fastbacktrace(&entry->vme_creation_bt[0], | |
1012 | (sizeof(entry->vme_creation_bt)/sizeof(uintptr_t))); | |
316670eb | 1013 | #endif |
1c79356b A |
1014 | return(entry); |
1015 | } | |
1016 | ||
1017 | /* | |
1018 | * vm_map_entry_dispose: [ internal use only ] | |
1019 | * | |
1020 | * Inverse of vm_map_entry_create. | |
2d21ac55 A |
1021 | * |
1022 | * write map lock held so no need to | |
1023 | * do anything special to insure correctness | |
1024 | * of the stores | |
1c79356b A |
1025 | */ |
1026 | #define vm_map_entry_dispose(map, entry) \ | |
6d2010ae | 1027 | _vm_map_entry_dispose(&(map)->hdr, (entry)) |
1c79356b A |
1028 | |
1029 | #define vm_map_copy_entry_dispose(map, entry) \ | |
1030 | _vm_map_entry_dispose(&(copy)->cpy_hdr, (entry)) | |
1031 | ||
91447636 | 1032 | static void |
1c79356b A |
1033 | _vm_map_entry_dispose( |
1034 | register struct vm_map_header *map_header, | |
1035 | register vm_map_entry_t entry) | |
1036 | { | |
1037 | register zone_t zone; | |
1038 | ||
7ddcb079 | 1039 | if (map_header->entries_pageable || !(entry->from_reserved_zone)) |
2d21ac55 | 1040 | zone = vm_map_entry_zone; |
1c79356b | 1041 | else |
7ddcb079 A |
1042 | zone = vm_map_entry_reserved_zone; |
1043 | ||
1044 | if (!map_header->entries_pageable) { | |
1045 | if (zone == vm_map_entry_zone) | |
1046 | OSAddAtomic(-1, &nonreserved_zalloc_count); | |
1047 | else | |
1048 | OSAddAtomic(-1, &reserved_zalloc_count); | |
1049 | } | |
1c79356b | 1050 | |
91447636 | 1051 | zfree(zone, entry); |
1c79356b A |
1052 | } |
1053 | ||
91447636 | 1054 | #if MACH_ASSERT |
91447636 | 1055 | static boolean_t first_free_check = FALSE; |
6d2010ae | 1056 | boolean_t |
1c79356b A |
1057 | first_free_is_valid( |
1058 | vm_map_t map) | |
1059 | { | |
1c79356b A |
1060 | if (!first_free_check) |
1061 | return TRUE; | |
2d21ac55 | 1062 | |
6d2010ae | 1063 | return( first_free_is_valid_store( map )); |
1c79356b | 1064 | } |
91447636 | 1065 | #endif /* MACH_ASSERT */ |
1c79356b | 1066 | |
1c79356b A |
1067 | |
1068 | #define vm_map_copy_entry_link(copy, after_where, entry) \ | |
6d2010ae | 1069 | _vm_map_store_entry_link(&(copy)->cpy_hdr, after_where, (entry)) |
1c79356b A |
1070 | |
1071 | #define vm_map_copy_entry_unlink(copy, entry) \ | |
6d2010ae | 1072 | _vm_map_store_entry_unlink(&(copy)->cpy_hdr, (entry)) |
1c79356b | 1073 | |
1c79356b | 1074 | #if MACH_ASSERT && TASK_SWAPPER |
1c79356b A |
1075 | /* |
1076 | * vm_map_res_reference: | |
1077 | * | |
1078 | * Adds another valid residence count to the given map. | |
1079 | * | |
1080 | * Map is locked so this function can be called from | |
1081 | * vm_map_swapin. | |
1082 | * | |
1083 | */ | |
1084 | void vm_map_res_reference(register vm_map_t map) | |
1085 | { | |
1086 | /* assert map is locked */ | |
1087 | assert(map->res_count >= 0); | |
1088 | assert(map->ref_count >= map->res_count); | |
1089 | if (map->res_count == 0) { | |
b0d623f7 | 1090 | lck_mtx_unlock(&map->s_lock); |
1c79356b A |
1091 | vm_map_lock(map); |
1092 | vm_map_swapin(map); | |
b0d623f7 | 1093 | lck_mtx_lock(&map->s_lock); |
1c79356b A |
1094 | ++map->res_count; |
1095 | vm_map_unlock(map); | |
1096 | } else | |
1097 | ++map->res_count; | |
1098 | } | |
1099 | ||
1100 | /* | |
1101 | * vm_map_reference_swap: | |
1102 | * | |
1103 | * Adds valid reference and residence counts to the given map. | |
1104 | * | |
1105 | * The map may not be in memory (i.e. zero residence count). | |
1106 | * | |
1107 | */ | |
1108 | void vm_map_reference_swap(register vm_map_t map) | |
1109 | { | |
1110 | assert(map != VM_MAP_NULL); | |
b0d623f7 | 1111 | lck_mtx_lock(&map->s_lock); |
1c79356b A |
1112 | assert(map->res_count >= 0); |
1113 | assert(map->ref_count >= map->res_count); | |
1114 | map->ref_count++; | |
1115 | vm_map_res_reference(map); | |
b0d623f7 | 1116 | lck_mtx_unlock(&map->s_lock); |
1c79356b A |
1117 | } |
1118 | ||
1119 | /* | |
1120 | * vm_map_res_deallocate: | |
1121 | * | |
1122 | * Decrement residence count on a map; possibly causing swapout. | |
1123 | * | |
1124 | * The map must be in memory (i.e. non-zero residence count). | |
1125 | * | |
1126 | * The map is locked, so this function is callable from vm_map_deallocate. | |
1127 | * | |
1128 | */ | |
1129 | void vm_map_res_deallocate(register vm_map_t map) | |
1130 | { | |
1131 | assert(map->res_count > 0); | |
1132 | if (--map->res_count == 0) { | |
b0d623f7 | 1133 | lck_mtx_unlock(&map->s_lock); |
1c79356b A |
1134 | vm_map_lock(map); |
1135 | vm_map_swapout(map); | |
1136 | vm_map_unlock(map); | |
b0d623f7 | 1137 | lck_mtx_lock(&map->s_lock); |
1c79356b A |
1138 | } |
1139 | assert(map->ref_count >= map->res_count); | |
1140 | } | |
1141 | #endif /* MACH_ASSERT && TASK_SWAPPER */ | |
1142 | ||
1c79356b A |
1143 | /* |
1144 | * vm_map_destroy: | |
1145 | * | |
1146 | * Actually destroy a map. | |
1147 | */ | |
1148 | void | |
1149 | vm_map_destroy( | |
2d21ac55 A |
1150 | vm_map_t map, |
1151 | int flags) | |
91447636 | 1152 | { |
1c79356b | 1153 | vm_map_lock(map); |
2d21ac55 | 1154 | |
3e170ce0 A |
1155 | /* final cleanup: no need to unnest shared region */ |
1156 | flags |= VM_MAP_REMOVE_NO_UNNESTING; | |
1157 | ||
2d21ac55 A |
1158 | /* clean up regular map entries */ |
1159 | (void) vm_map_delete(map, map->min_offset, map->max_offset, | |
1160 | flags, VM_MAP_NULL); | |
1161 | /* clean up leftover special mappings (commpage, etc...) */ | |
2d21ac55 A |
1162 | (void) vm_map_delete(map, 0x0, 0xFFFFFFFFFFFFF000ULL, |
1163 | flags, VM_MAP_NULL); | |
6d2010ae A |
1164 | |
1165 | #if CONFIG_FREEZE | |
316670eb A |
1166 | if (map->default_freezer_handle) { |
1167 | default_freezer_handle_deallocate(map->default_freezer_handle); | |
1168 | map->default_freezer_handle = NULL; | |
6d2010ae A |
1169 | } |
1170 | #endif | |
3e170ce0 | 1171 | vm_map_disable_hole_optimization(map); |
1c79356b A |
1172 | vm_map_unlock(map); |
1173 | ||
2d21ac55 A |
1174 | assert(map->hdr.nentries == 0); |
1175 | ||
55e303ae A |
1176 | if(map->pmap) |
1177 | pmap_destroy(map->pmap); | |
1c79356b | 1178 | |
91447636 | 1179 | zfree(vm_map_zone, map); |
1c79356b A |
1180 | } |
1181 | ||
1182 | #if TASK_SWAPPER | |
1183 | /* | |
1184 | * vm_map_swapin/vm_map_swapout | |
1185 | * | |
1186 | * Swap a map in and out, either referencing or releasing its resources. | |
1187 | * These functions are internal use only; however, they must be exported | |
1188 | * because they may be called from macros, which are exported. | |
1189 | * | |
1190 | * In the case of swapout, there could be races on the residence count, | |
1191 | * so if the residence count is up, we return, assuming that a | |
1192 | * vm_map_deallocate() call in the near future will bring us back. | |
1193 | * | |
1194 | * Locking: | |
1195 | * -- We use the map write lock for synchronization among races. | |
1196 | * -- The map write lock, and not the simple s_lock, protects the | |
1197 | * swap state of the map. | |
1198 | * -- If a map entry is a share map, then we hold both locks, in | |
1199 | * hierarchical order. | |
1200 | * | |
1201 | * Synchronization Notes: | |
1202 | * 1) If a vm_map_swapin() call happens while swapout in progress, it | |
1203 | * will block on the map lock and proceed when swapout is through. | |
1204 | * 2) A vm_map_reference() call at this time is illegal, and will | |
1205 | * cause a panic. vm_map_reference() is only allowed on resident | |
1206 | * maps, since it refuses to block. | |
1207 | * 3) A vm_map_swapin() call during a swapin will block, and | |
1208 | * proceeed when the first swapin is done, turning into a nop. | |
1209 | * This is the reason the res_count is not incremented until | |
1210 | * after the swapin is complete. | |
1211 | * 4) There is a timing hole after the checks of the res_count, before | |
1212 | * the map lock is taken, during which a swapin may get the lock | |
1213 | * before a swapout about to happen. If this happens, the swapin | |
1214 | * will detect the state and increment the reference count, causing | |
1215 | * the swapout to be a nop, thereby delaying it until a later | |
1216 | * vm_map_deallocate. If the swapout gets the lock first, then | |
1217 | * the swapin will simply block until the swapout is done, and | |
1218 | * then proceed. | |
1219 | * | |
1220 | * Because vm_map_swapin() is potentially an expensive operation, it | |
1221 | * should be used with caution. | |
1222 | * | |
1223 | * Invariants: | |
1224 | * 1) A map with a residence count of zero is either swapped, or | |
1225 | * being swapped. | |
1226 | * 2) A map with a non-zero residence count is either resident, | |
1227 | * or being swapped in. | |
1228 | */ | |
1229 | ||
1230 | int vm_map_swap_enable = 1; | |
1231 | ||
1232 | void vm_map_swapin (vm_map_t map) | |
1233 | { | |
1234 | register vm_map_entry_t entry; | |
2d21ac55 | 1235 | |
1c79356b A |
1236 | if (!vm_map_swap_enable) /* debug */ |
1237 | return; | |
1238 | ||
1239 | /* | |
1240 | * Map is locked | |
1241 | * First deal with various races. | |
1242 | */ | |
1243 | if (map->sw_state == MAP_SW_IN) | |
1244 | /* | |
1245 | * we raced with swapout and won. Returning will incr. | |
1246 | * the res_count, turning the swapout into a nop. | |
1247 | */ | |
1248 | return; | |
1249 | ||
1250 | /* | |
1251 | * The residence count must be zero. If we raced with another | |
1252 | * swapin, the state would have been IN; if we raced with a | |
1253 | * swapout (after another competing swapin), we must have lost | |
1254 | * the race to get here (see above comment), in which case | |
1255 | * res_count is still 0. | |
1256 | */ | |
1257 | assert(map->res_count == 0); | |
1258 | ||
1259 | /* | |
1260 | * There are no intermediate states of a map going out or | |
1261 | * coming in, since the map is locked during the transition. | |
1262 | */ | |
1263 | assert(map->sw_state == MAP_SW_OUT); | |
1264 | ||
1265 | /* | |
1266 | * We now operate upon each map entry. If the entry is a sub- | |
1267 | * or share-map, we call vm_map_res_reference upon it. | |
1268 | * If the entry is an object, we call vm_object_res_reference | |
1269 | * (this may iterate through the shadow chain). | |
1270 | * Note that we hold the map locked the entire time, | |
1271 | * even if we get back here via a recursive call in | |
1272 | * vm_map_res_reference. | |
1273 | */ | |
1274 | entry = vm_map_first_entry(map); | |
1275 | ||
1276 | while (entry != vm_map_to_entry(map)) { | |
3e170ce0 | 1277 | if (VME_OBJECT(entry) != VM_OBJECT_NULL) { |
1c79356b | 1278 | if (entry->is_sub_map) { |
3e170ce0 | 1279 | vm_map_t lmap = VME_SUBMAP(entry); |
b0d623f7 | 1280 | lck_mtx_lock(&lmap->s_lock); |
1c79356b | 1281 | vm_map_res_reference(lmap); |
b0d623f7 | 1282 | lck_mtx_unlock(&lmap->s_lock); |
1c79356b | 1283 | } else { |
3e170ce0 | 1284 | vm_object_t object = VME_OBEJCT(entry); |
1c79356b A |
1285 | vm_object_lock(object); |
1286 | /* | |
1287 | * This call may iterate through the | |
1288 | * shadow chain. | |
1289 | */ | |
1290 | vm_object_res_reference(object); | |
1291 | vm_object_unlock(object); | |
1292 | } | |
1293 | } | |
1294 | entry = entry->vme_next; | |
1295 | } | |
1296 | assert(map->sw_state == MAP_SW_OUT); | |
1297 | map->sw_state = MAP_SW_IN; | |
1298 | } | |
1299 | ||
1300 | void vm_map_swapout(vm_map_t map) | |
1301 | { | |
1302 | register vm_map_entry_t entry; | |
1303 | ||
1304 | /* | |
1305 | * Map is locked | |
1306 | * First deal with various races. | |
1307 | * If we raced with a swapin and lost, the residence count | |
1308 | * will have been incremented to 1, and we simply return. | |
1309 | */ | |
b0d623f7 | 1310 | lck_mtx_lock(&map->s_lock); |
1c79356b | 1311 | if (map->res_count != 0) { |
b0d623f7 | 1312 | lck_mtx_unlock(&map->s_lock); |
1c79356b A |
1313 | return; |
1314 | } | |
b0d623f7 | 1315 | lck_mtx_unlock(&map->s_lock); |
1c79356b A |
1316 | |
1317 | /* | |
1318 | * There are no intermediate states of a map going out or | |
1319 | * coming in, since the map is locked during the transition. | |
1320 | */ | |
1321 | assert(map->sw_state == MAP_SW_IN); | |
1322 | ||
1323 | if (!vm_map_swap_enable) | |
1324 | return; | |
1325 | ||
1326 | /* | |
1327 | * We now operate upon each map entry. If the entry is a sub- | |
1328 | * or share-map, we call vm_map_res_deallocate upon it. | |
1329 | * If the entry is an object, we call vm_object_res_deallocate | |
1330 | * (this may iterate through the shadow chain). | |
1331 | * Note that we hold the map locked the entire time, | |
1332 | * even if we get back here via a recursive call in | |
1333 | * vm_map_res_deallocate. | |
1334 | */ | |
1335 | entry = vm_map_first_entry(map); | |
1336 | ||
1337 | while (entry != vm_map_to_entry(map)) { | |
3e170ce0 | 1338 | if (VME_OBJECT(entry) != VM_OBJECT_NULL) { |
1c79356b | 1339 | if (entry->is_sub_map) { |
3e170ce0 | 1340 | vm_map_t lmap = VME_SUBMAP(entry); |
b0d623f7 | 1341 | lck_mtx_lock(&lmap->s_lock); |
1c79356b | 1342 | vm_map_res_deallocate(lmap); |
b0d623f7 | 1343 | lck_mtx_unlock(&lmap->s_lock); |
1c79356b | 1344 | } else { |
3e170ce0 | 1345 | vm_object_t object = VME_OBJECT(entry); |
1c79356b A |
1346 | vm_object_lock(object); |
1347 | /* | |
1348 | * This call may take a long time, | |
1349 | * since it could actively push | |
1350 | * out pages (if we implement it | |
1351 | * that way). | |
1352 | */ | |
1353 | vm_object_res_deallocate(object); | |
1354 | vm_object_unlock(object); | |
1355 | } | |
1356 | } | |
1357 | entry = entry->vme_next; | |
1358 | } | |
1359 | assert(map->sw_state == MAP_SW_IN); | |
1360 | map->sw_state = MAP_SW_OUT; | |
1361 | } | |
1362 | ||
1363 | #endif /* TASK_SWAPPER */ | |
1364 | ||
1c79356b A |
1365 | /* |
1366 | * vm_map_lookup_entry: [ internal use only ] | |
1367 | * | |
6d2010ae A |
1368 | * Calls into the vm map store layer to find the map |
1369 | * entry containing (or immediately preceding) the | |
1370 | * specified address in the given map; the entry is returned | |
1c79356b A |
1371 | * in the "entry" parameter. The boolean |
1372 | * result indicates whether the address is | |
1373 | * actually contained in the map. | |
1374 | */ | |
1375 | boolean_t | |
1376 | vm_map_lookup_entry( | |
91447636 A |
1377 | register vm_map_t map, |
1378 | register vm_map_offset_t address, | |
1c79356b A |
1379 | vm_map_entry_t *entry) /* OUT */ |
1380 | { | |
6d2010ae | 1381 | return ( vm_map_store_lookup_entry( map, address, entry )); |
1c79356b A |
1382 | } |
1383 | ||
1384 | /* | |
1385 | * Routine: vm_map_find_space | |
1386 | * Purpose: | |
1387 | * Allocate a range in the specified virtual address map, | |
1388 | * returning the entry allocated for that range. | |
1389 | * Used by kmem_alloc, etc. | |
1390 | * | |
1391 | * The map must be NOT be locked. It will be returned locked | |
1392 | * on KERN_SUCCESS, unlocked on failure. | |
1393 | * | |
1394 | * If an entry is allocated, the object/offset fields | |
1395 | * are initialized to zero. | |
1396 | */ | |
1397 | kern_return_t | |
1398 | vm_map_find_space( | |
1399 | register vm_map_t map, | |
91447636 A |
1400 | vm_map_offset_t *address, /* OUT */ |
1401 | vm_map_size_t size, | |
1402 | vm_map_offset_t mask, | |
0c530ab8 | 1403 | int flags, |
1c79356b A |
1404 | vm_map_entry_t *o_entry) /* OUT */ |
1405 | { | |
3e170ce0 | 1406 | vm_map_entry_t entry, new_entry; |
91447636 A |
1407 | register vm_map_offset_t start; |
1408 | register vm_map_offset_t end; | |
3e170ce0 | 1409 | vm_map_entry_t hole_entry; |
91447636 A |
1410 | |
1411 | if (size == 0) { | |
1412 | *address = 0; | |
1413 | return KERN_INVALID_ARGUMENT; | |
1414 | } | |
1c79356b | 1415 | |
2d21ac55 A |
1416 | if (flags & VM_FLAGS_GUARD_AFTER) { |
1417 | /* account for the back guard page in the size */ | |
39236c6e | 1418 | size += VM_MAP_PAGE_SIZE(map); |
2d21ac55 A |
1419 | } |
1420 | ||
7ddcb079 | 1421 | new_entry = vm_map_entry_create(map, FALSE); |
1c79356b A |
1422 | |
1423 | /* | |
1424 | * Look for the first possible address; if there's already | |
1425 | * something at this address, we have to start after it. | |
1426 | */ | |
1427 | ||
1428 | vm_map_lock(map); | |
1429 | ||
6d2010ae A |
1430 | if( map->disable_vmentry_reuse == TRUE) { |
1431 | VM_MAP_HIGHEST_ENTRY(map, entry, start); | |
1432 | } else { | |
3e170ce0 A |
1433 | if (map->holelistenabled) { |
1434 | hole_entry = (vm_map_entry_t)map->holes_list; | |
1435 | ||
1436 | if (hole_entry == NULL) { | |
1437 | /* | |
1438 | * No more space in the map? | |
1439 | */ | |
1440 | vm_map_entry_dispose(map, new_entry); | |
1441 | vm_map_unlock(map); | |
1442 | return(KERN_NO_SPACE); | |
1443 | } | |
1444 | ||
1445 | entry = hole_entry; | |
1446 | start = entry->vme_start; | |
1447 | } else { | |
1448 | assert(first_free_is_valid(map)); | |
1449 | if ((entry = map->first_free) == vm_map_to_entry(map)) | |
1450 | start = map->min_offset; | |
1451 | else | |
1452 | start = entry->vme_end; | |
1453 | } | |
6d2010ae | 1454 | } |
1c79356b A |
1455 | |
1456 | /* | |
1457 | * In any case, the "entry" always precedes | |
1458 | * the proposed new region throughout the loop: | |
1459 | */ | |
1460 | ||
1461 | while (TRUE) { | |
1462 | register vm_map_entry_t next; | |
1463 | ||
1464 | /* | |
1465 | * Find the end of the proposed new region. | |
1466 | * Be sure we didn't go beyond the end, or | |
1467 | * wrap around the address. | |
1468 | */ | |
1469 | ||
2d21ac55 A |
1470 | if (flags & VM_FLAGS_GUARD_BEFORE) { |
1471 | /* reserve space for the front guard page */ | |
39236c6e | 1472 | start += VM_MAP_PAGE_SIZE(map); |
2d21ac55 | 1473 | } |
1c79356b | 1474 | end = ((start + mask) & ~mask); |
2d21ac55 | 1475 | |
1c79356b A |
1476 | if (end < start) { |
1477 | vm_map_entry_dispose(map, new_entry); | |
1478 | vm_map_unlock(map); | |
1479 | return(KERN_NO_SPACE); | |
1480 | } | |
1481 | start = end; | |
1482 | end += size; | |
1483 | ||
1484 | if ((end > map->max_offset) || (end < start)) { | |
1485 | vm_map_entry_dispose(map, new_entry); | |
1486 | vm_map_unlock(map); | |
1487 | return(KERN_NO_SPACE); | |
1488 | } | |
1489 | ||
1c79356b | 1490 | next = entry->vme_next; |
1c79356b | 1491 | |
3e170ce0 A |
1492 | if (map->holelistenabled) { |
1493 | if (entry->vme_end >= end) | |
1494 | break; | |
1495 | } else { | |
1496 | /* | |
1497 | * If there are no more entries, we must win. | |
1498 | * | |
1499 | * OR | |
1500 | * | |
1501 | * If there is another entry, it must be | |
1502 | * after the end of the potential new region. | |
1503 | */ | |
1c79356b | 1504 | |
3e170ce0 A |
1505 | if (next == vm_map_to_entry(map)) |
1506 | break; | |
1507 | ||
1508 | if (next->vme_start >= end) | |
1509 | break; | |
1510 | } | |
1c79356b A |
1511 | |
1512 | /* | |
1513 | * Didn't fit -- move to the next entry. | |
1514 | */ | |
1515 | ||
1516 | entry = next; | |
3e170ce0 A |
1517 | |
1518 | if (map->holelistenabled) { | |
1519 | if (entry == (vm_map_entry_t) map->holes_list) { | |
1520 | /* | |
1521 | * Wrapped around | |
1522 | */ | |
1523 | vm_map_entry_dispose(map, new_entry); | |
1524 | vm_map_unlock(map); | |
1525 | return(KERN_NO_SPACE); | |
1526 | } | |
1527 | start = entry->vme_start; | |
1528 | } else { | |
1529 | start = entry->vme_end; | |
1530 | } | |
1531 | } | |
1532 | ||
1533 | if (map->holelistenabled) { | |
1534 | if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { | |
1535 | panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); | |
1536 | } | |
1c79356b A |
1537 | } |
1538 | ||
1539 | /* | |
1540 | * At this point, | |
1541 | * "start" and "end" should define the endpoints of the | |
1542 | * available new range, and | |
1543 | * "entry" should refer to the region before the new | |
1544 | * range, and | |
1545 | * | |
1546 | * the map should be locked. | |
1547 | */ | |
1548 | ||
2d21ac55 A |
1549 | if (flags & VM_FLAGS_GUARD_BEFORE) { |
1550 | /* go back for the front guard page */ | |
39236c6e | 1551 | start -= VM_MAP_PAGE_SIZE(map); |
2d21ac55 | 1552 | } |
1c79356b A |
1553 | *address = start; |
1554 | ||
e2d2fc5c | 1555 | assert(start < end); |
1c79356b A |
1556 | new_entry->vme_start = start; |
1557 | new_entry->vme_end = end; | |
1558 | assert(page_aligned(new_entry->vme_start)); | |
1559 | assert(page_aligned(new_entry->vme_end)); | |
39236c6e A |
1560 | assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, |
1561 | VM_MAP_PAGE_MASK(map))); | |
1562 | assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, | |
1563 | VM_MAP_PAGE_MASK(map))); | |
1c79356b A |
1564 | |
1565 | new_entry->is_shared = FALSE; | |
1566 | new_entry->is_sub_map = FALSE; | |
fe8ab488 | 1567 | new_entry->use_pmap = TRUE; |
3e170ce0 A |
1568 | VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); |
1569 | VME_OFFSET_SET(new_entry, (vm_object_offset_t) 0); | |
1c79356b A |
1570 | |
1571 | new_entry->needs_copy = FALSE; | |
1572 | ||
1573 | new_entry->inheritance = VM_INHERIT_DEFAULT; | |
1574 | new_entry->protection = VM_PROT_DEFAULT; | |
1575 | new_entry->max_protection = VM_PROT_ALL; | |
1576 | new_entry->behavior = VM_BEHAVIOR_DEFAULT; | |
1577 | new_entry->wired_count = 0; | |
1578 | new_entry->user_wired_count = 0; | |
1579 | ||
1580 | new_entry->in_transition = FALSE; | |
1581 | new_entry->needs_wakeup = FALSE; | |
2d21ac55 | 1582 | new_entry->no_cache = FALSE; |
b0d623f7 | 1583 | new_entry->permanent = FALSE; |
39236c6e A |
1584 | new_entry->superpage_size = FALSE; |
1585 | if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) { | |
1586 | new_entry->map_aligned = TRUE; | |
1587 | } else { | |
1588 | new_entry->map_aligned = FALSE; | |
1589 | } | |
2d21ac55 | 1590 | |
3e170ce0 | 1591 | new_entry->used_for_jit = FALSE; |
b0d623f7 | 1592 | new_entry->zero_wired_pages = FALSE; |
fe8ab488 | 1593 | new_entry->iokit_acct = FALSE; |
3e170ce0 A |
1594 | new_entry->vme_resilient_codesign = FALSE; |
1595 | new_entry->vme_resilient_media = FALSE; | |
1c79356b | 1596 | |
3e170ce0 A |
1597 | int alias; |
1598 | VM_GET_FLAGS_ALIAS(flags, alias); | |
1599 | VME_ALIAS_SET(new_entry, alias); | |
0c530ab8 | 1600 | |
1c79356b A |
1601 | /* |
1602 | * Insert the new entry into the list | |
1603 | */ | |
1604 | ||
6d2010ae | 1605 | vm_map_store_entry_link(map, entry, new_entry); |
1c79356b A |
1606 | |
1607 | map->size += size; | |
1608 | ||
1609 | /* | |
1610 | * Update the lookup hint | |
1611 | */ | |
0c530ab8 | 1612 | SAVE_HINT_MAP_WRITE(map, new_entry); |
1c79356b A |
1613 | |
1614 | *o_entry = new_entry; | |
1615 | return(KERN_SUCCESS); | |
1616 | } | |
1617 | ||
1618 | int vm_map_pmap_enter_print = FALSE; | |
1619 | int vm_map_pmap_enter_enable = FALSE; | |
1620 | ||
1621 | /* | |
91447636 | 1622 | * Routine: vm_map_pmap_enter [internal only] |
1c79356b A |
1623 | * |
1624 | * Description: | |
1625 | * Force pages from the specified object to be entered into | |
1626 | * the pmap at the specified address if they are present. | |
1627 | * As soon as a page not found in the object the scan ends. | |
1628 | * | |
1629 | * Returns: | |
1630 | * Nothing. | |
1631 | * | |
1632 | * In/out conditions: | |
1633 | * The source map should not be locked on entry. | |
1634 | */ | |
fe8ab488 | 1635 | __unused static void |
1c79356b A |
1636 | vm_map_pmap_enter( |
1637 | vm_map_t map, | |
91447636 A |
1638 | register vm_map_offset_t addr, |
1639 | register vm_map_offset_t end_addr, | |
1c79356b A |
1640 | register vm_object_t object, |
1641 | vm_object_offset_t offset, | |
1642 | vm_prot_t protection) | |
1643 | { | |
2d21ac55 A |
1644 | int type_of_fault; |
1645 | kern_return_t kr; | |
0b4e3aa0 | 1646 | |
55e303ae A |
1647 | if(map->pmap == 0) |
1648 | return; | |
1649 | ||
1c79356b A |
1650 | while (addr < end_addr) { |
1651 | register vm_page_t m; | |
1652 | ||
fe8ab488 A |
1653 | |
1654 | /* | |
1655 | * TODO: | |
1656 | * From vm_map_enter(), we come into this function without the map | |
1657 | * lock held or the object lock held. | |
1658 | * We haven't taken a reference on the object either. | |
1659 | * We should do a proper lookup on the map to make sure | |
1660 | * that things are sane before we go locking objects that | |
1661 | * could have been deallocated from under us. | |
1662 | */ | |
1663 | ||
1c79356b | 1664 | vm_object_lock(object); |
1c79356b A |
1665 | |
1666 | m = vm_page_lookup(object, offset); | |
91447636 A |
1667 | /* |
1668 | * ENCRYPTED SWAP: | |
1669 | * The user should never see encrypted data, so do not | |
1670 | * enter an encrypted page in the page table. | |
1671 | */ | |
1672 | if (m == VM_PAGE_NULL || m->busy || m->encrypted || | |
2d21ac55 A |
1673 | m->fictitious || |
1674 | (m->unusual && ( m->error || m->restart || m->absent))) { | |
1c79356b A |
1675 | vm_object_unlock(object); |
1676 | return; | |
1677 | } | |
1678 | ||
1c79356b A |
1679 | if (vm_map_pmap_enter_print) { |
1680 | printf("vm_map_pmap_enter:"); | |
2d21ac55 A |
1681 | printf("map: %p, addr: %llx, object: %p, offset: %llx\n", |
1682 | map, (unsigned long long)addr, object, (unsigned long long)offset); | |
1c79356b | 1683 | } |
2d21ac55 | 1684 | type_of_fault = DBG_CACHE_HIT_FAULT; |
6d2010ae | 1685 | kr = vm_fault_enter(m, map->pmap, addr, protection, protection, |
fe8ab488 A |
1686 | VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, |
1687 | 0, /* XXX need user tag / alias? */ | |
1688 | 0, /* alternate accounting? */ | |
1689 | NULL, | |
2d21ac55 | 1690 | &type_of_fault); |
1c79356b | 1691 | |
1c79356b A |
1692 | vm_object_unlock(object); |
1693 | ||
1694 | offset += PAGE_SIZE_64; | |
1695 | addr += PAGE_SIZE; | |
1696 | } | |
1697 | } | |
1698 | ||
91447636 A |
1699 | boolean_t vm_map_pmap_is_empty( |
1700 | vm_map_t map, | |
1701 | vm_map_offset_t start, | |
1702 | vm_map_offset_t end); | |
1703 | boolean_t vm_map_pmap_is_empty( | |
1704 | vm_map_t map, | |
1705 | vm_map_offset_t start, | |
1706 | vm_map_offset_t end) | |
1707 | { | |
2d21ac55 A |
1708 | #ifdef MACHINE_PMAP_IS_EMPTY |
1709 | return pmap_is_empty(map->pmap, start, end); | |
1710 | #else /* MACHINE_PMAP_IS_EMPTY */ | |
91447636 A |
1711 | vm_map_offset_t offset; |
1712 | ppnum_t phys_page; | |
1713 | ||
1714 | if (map->pmap == NULL) { | |
1715 | return TRUE; | |
1716 | } | |
2d21ac55 | 1717 | |
91447636 A |
1718 | for (offset = start; |
1719 | offset < end; | |
1720 | offset += PAGE_SIZE) { | |
1721 | phys_page = pmap_find_phys(map->pmap, offset); | |
1722 | if (phys_page) { | |
1723 | kprintf("vm_map_pmap_is_empty(%p,0x%llx,0x%llx): " | |
1724 | "page %d at 0x%llx\n", | |
2d21ac55 A |
1725 | map, (long long)start, (long long)end, |
1726 | phys_page, (long long)offset); | |
91447636 A |
1727 | return FALSE; |
1728 | } | |
1729 | } | |
1730 | return TRUE; | |
2d21ac55 | 1731 | #endif /* MACHINE_PMAP_IS_EMPTY */ |
91447636 A |
1732 | } |
1733 | ||
316670eb A |
1734 | #define MAX_TRIES_TO_GET_RANDOM_ADDRESS 1000 |
1735 | kern_return_t | |
1736 | vm_map_random_address_for_size( | |
1737 | vm_map_t map, | |
1738 | vm_map_offset_t *address, | |
1739 | vm_map_size_t size) | |
1740 | { | |
1741 | kern_return_t kr = KERN_SUCCESS; | |
1742 | int tries = 0; | |
1743 | vm_map_offset_t random_addr = 0; | |
1744 | vm_map_offset_t hole_end; | |
1745 | ||
1746 | vm_map_entry_t next_entry = VM_MAP_ENTRY_NULL; | |
1747 | vm_map_entry_t prev_entry = VM_MAP_ENTRY_NULL; | |
1748 | vm_map_size_t vm_hole_size = 0; | |
1749 | vm_map_size_t addr_space_size; | |
1750 | ||
1751 | addr_space_size = vm_map_max(map) - vm_map_min(map); | |
1752 | ||
1753 | assert(page_aligned(size)); | |
1754 | ||
1755 | while (tries < MAX_TRIES_TO_GET_RANDOM_ADDRESS) { | |
1756 | random_addr = ((vm_map_offset_t)random()) << PAGE_SHIFT; | |
39236c6e A |
1757 | random_addr = vm_map_trunc_page( |
1758 | vm_map_min(map) +(random_addr % addr_space_size), | |
1759 | VM_MAP_PAGE_MASK(map)); | |
316670eb A |
1760 | |
1761 | if (vm_map_lookup_entry(map, random_addr, &prev_entry) == FALSE) { | |
1762 | if (prev_entry == vm_map_to_entry(map)) { | |
1763 | next_entry = vm_map_first_entry(map); | |
1764 | } else { | |
1765 | next_entry = prev_entry->vme_next; | |
1766 | } | |
1767 | if (next_entry == vm_map_to_entry(map)) { | |
1768 | hole_end = vm_map_max(map); | |
1769 | } else { | |
1770 | hole_end = next_entry->vme_start; | |
1771 | } | |
1772 | vm_hole_size = hole_end - random_addr; | |
1773 | if (vm_hole_size >= size) { | |
1774 | *address = random_addr; | |
1775 | break; | |
1776 | } | |
1777 | } | |
1778 | tries++; | |
1779 | } | |
1780 | ||
1781 | if (tries == MAX_TRIES_TO_GET_RANDOM_ADDRESS) { | |
1782 | kr = KERN_NO_SPACE; | |
1783 | } | |
1784 | return kr; | |
1785 | } | |
1786 | ||
1c79356b A |
1787 | /* |
1788 | * Routine: vm_map_enter | |
1789 | * | |
1790 | * Description: | |
1791 | * Allocate a range in the specified virtual address map. | |
1792 | * The resulting range will refer to memory defined by | |
1793 | * the given memory object and offset into that object. | |
1794 | * | |
1795 | * Arguments are as defined in the vm_map call. | |
1796 | */ | |
91447636 A |
1797 | int _map_enter_debug = 0; |
1798 | static unsigned int vm_map_enter_restore_successes = 0; | |
1799 | static unsigned int vm_map_enter_restore_failures = 0; | |
1c79356b A |
1800 | kern_return_t |
1801 | vm_map_enter( | |
91447636 | 1802 | vm_map_t map, |
593a1d5f | 1803 | vm_map_offset_t *address, /* IN/OUT */ |
91447636 | 1804 | vm_map_size_t size, |
593a1d5f | 1805 | vm_map_offset_t mask, |
1c79356b A |
1806 | int flags, |
1807 | vm_object_t object, | |
1808 | vm_object_offset_t offset, | |
1809 | boolean_t needs_copy, | |
1810 | vm_prot_t cur_protection, | |
1811 | vm_prot_t max_protection, | |
1812 | vm_inherit_t inheritance) | |
1813 | { | |
91447636 | 1814 | vm_map_entry_t entry, new_entry; |
2d21ac55 | 1815 | vm_map_offset_t start, tmp_start, tmp_offset; |
91447636 | 1816 | vm_map_offset_t end, tmp_end; |
b0d623f7 A |
1817 | vm_map_offset_t tmp2_start, tmp2_end; |
1818 | vm_map_offset_t step; | |
1c79356b | 1819 | kern_return_t result = KERN_SUCCESS; |
91447636 A |
1820 | vm_map_t zap_old_map = VM_MAP_NULL; |
1821 | vm_map_t zap_new_map = VM_MAP_NULL; | |
1822 | boolean_t map_locked = FALSE; | |
1823 | boolean_t pmap_empty = TRUE; | |
1824 | boolean_t new_mapping_established = FALSE; | |
fe8ab488 | 1825 | boolean_t keep_map_locked = ((flags & VM_FLAGS_KEEP_MAP_LOCKED) != 0); |
91447636 A |
1826 | boolean_t anywhere = ((flags & VM_FLAGS_ANYWHERE) != 0); |
1827 | boolean_t purgable = ((flags & VM_FLAGS_PURGABLE) != 0); | |
1828 | boolean_t overwrite = ((flags & VM_FLAGS_OVERWRITE) != 0); | |
2d21ac55 A |
1829 | boolean_t no_cache = ((flags & VM_FLAGS_NO_CACHE) != 0); |
1830 | boolean_t is_submap = ((flags & VM_FLAGS_SUBMAP) != 0); | |
b0d623f7 | 1831 | boolean_t permanent = ((flags & VM_FLAGS_PERMANENT) != 0); |
316670eb | 1832 | boolean_t entry_for_jit = ((flags & VM_FLAGS_MAP_JIT) != 0); |
fe8ab488 | 1833 | boolean_t iokit_acct = ((flags & VM_FLAGS_IOKIT_ACCT) != 0); |
3e170ce0 A |
1834 | boolean_t resilient_codesign = ((flags & VM_FLAGS_RESILIENT_CODESIGN) != 0); |
1835 | boolean_t resilient_media = ((flags & VM_FLAGS_RESILIENT_MEDIA) != 0); | |
b0d623f7 | 1836 | unsigned int superpage_size = ((flags & VM_FLAGS_SUPERPAGE_MASK) >> VM_FLAGS_SUPERPAGE_SHIFT); |
3e170ce0 | 1837 | vm_tag_t alias, user_alias; |
2d21ac55 | 1838 | vm_map_offset_t effective_min_offset, effective_max_offset; |
593a1d5f | 1839 | kern_return_t kr; |
39236c6e | 1840 | boolean_t clear_map_aligned = FALSE; |
3e170ce0 | 1841 | vm_map_entry_t hole_entry; |
593a1d5f | 1842 | |
b0d623f7 A |
1843 | if (superpage_size) { |
1844 | switch (superpage_size) { | |
1845 | /* | |
1846 | * Note that the current implementation only supports | |
1847 | * a single size for superpages, SUPERPAGE_SIZE, per | |
1848 | * architecture. As soon as more sizes are supposed | |
1849 | * to be supported, SUPERPAGE_SIZE has to be replaced | |
1850 | * with a lookup of the size depending on superpage_size. | |
1851 | */ | |
1852 | #ifdef __x86_64__ | |
6d2010ae A |
1853 | case SUPERPAGE_SIZE_ANY: |
1854 | /* handle it like 2 MB and round up to page size */ | |
1855 | size = (size + 2*1024*1024 - 1) & ~(2*1024*1024 - 1); | |
b0d623f7 A |
1856 | case SUPERPAGE_SIZE_2MB: |
1857 | break; | |
1858 | #endif | |
1859 | default: | |
1860 | return KERN_INVALID_ARGUMENT; | |
1861 | } | |
1862 | mask = SUPERPAGE_SIZE-1; | |
1863 | if (size & (SUPERPAGE_SIZE-1)) | |
1864 | return KERN_INVALID_ARGUMENT; | |
1865 | inheritance = VM_INHERIT_NONE; /* fork() children won't inherit superpages */ | |
1866 | } | |
1867 | ||
6d2010ae | 1868 | |
1c79356b | 1869 | |
3e170ce0 A |
1870 | if (resilient_codesign || resilient_media) { |
1871 | if ((cur_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) || | |
1872 | (max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE))) { | |
1873 | return KERN_PROTECTION_FAILURE; | |
1874 | } | |
1875 | } | |
1876 | ||
2d21ac55 A |
1877 | if (is_submap) { |
1878 | if (purgable) { | |
1879 | /* submaps can not be purgeable */ | |
1880 | return KERN_INVALID_ARGUMENT; | |
1881 | } | |
1882 | if (object == VM_OBJECT_NULL) { | |
1883 | /* submaps can not be created lazily */ | |
1884 | return KERN_INVALID_ARGUMENT; | |
1885 | } | |
1886 | } | |
1887 | if (flags & VM_FLAGS_ALREADY) { | |
1888 | /* | |
1889 | * VM_FLAGS_ALREADY says that it's OK if the same mapping | |
1890 | * is already present. For it to be meaningul, the requested | |
1891 | * mapping has to be at a fixed address (!VM_FLAGS_ANYWHERE) and | |
1892 | * we shouldn't try and remove what was mapped there first | |
1893 | * (!VM_FLAGS_OVERWRITE). | |
1894 | */ | |
1895 | if ((flags & VM_FLAGS_ANYWHERE) || | |
1896 | (flags & VM_FLAGS_OVERWRITE)) { | |
1897 | return KERN_INVALID_ARGUMENT; | |
1898 | } | |
1899 | } | |
1900 | ||
6d2010ae | 1901 | effective_min_offset = map->min_offset; |
b0d623f7 | 1902 | |
2d21ac55 A |
1903 | if (flags & VM_FLAGS_BEYOND_MAX) { |
1904 | /* | |
b0d623f7 | 1905 | * Allow an insertion beyond the map's max offset. |
2d21ac55 A |
1906 | */ |
1907 | if (vm_map_is_64bit(map)) | |
1908 | effective_max_offset = 0xFFFFFFFFFFFFF000ULL; | |
1909 | else | |
1910 | effective_max_offset = 0x00000000FFFFF000ULL; | |
1911 | } else { | |
1912 | effective_max_offset = map->max_offset; | |
1913 | } | |
1914 | ||
1915 | if (size == 0 || | |
1916 | (offset & PAGE_MASK_64) != 0) { | |
91447636 A |
1917 | *address = 0; |
1918 | return KERN_INVALID_ARGUMENT; | |
1919 | } | |
1920 | ||
1c79356b | 1921 | VM_GET_FLAGS_ALIAS(flags, alias); |
3e170ce0 A |
1922 | if (map->pmap == kernel_pmap) { |
1923 | user_alias = VM_KERN_MEMORY_NONE; | |
1924 | } else { | |
1925 | user_alias = alias; | |
1926 | } | |
2d21ac55 | 1927 | |
1c79356b A |
1928 | #define RETURN(value) { result = value; goto BailOut; } |
1929 | ||
1930 | assert(page_aligned(*address)); | |
1931 | assert(page_aligned(size)); | |
91447636 | 1932 | |
39236c6e A |
1933 | if (!VM_MAP_PAGE_ALIGNED(size, VM_MAP_PAGE_MASK(map))) { |
1934 | /* | |
1935 | * In most cases, the caller rounds the size up to the | |
1936 | * map's page size. | |
1937 | * If we get a size that is explicitly not map-aligned here, | |
1938 | * we'll have to respect the caller's wish and mark the | |
1939 | * mapping as "not map-aligned" to avoid tripping the | |
1940 | * map alignment checks later. | |
1941 | */ | |
1942 | clear_map_aligned = TRUE; | |
1943 | } | |
fe8ab488 A |
1944 | if (!anywhere && |
1945 | !VM_MAP_PAGE_ALIGNED(*address, VM_MAP_PAGE_MASK(map))) { | |
1946 | /* | |
1947 | * We've been asked to map at a fixed address and that | |
1948 | * address is not aligned to the map's specific alignment. | |
1949 | * The caller should know what it's doing (i.e. most likely | |
1950 | * mapping some fragmented copy map, transferring memory from | |
1951 | * a VM map with a different alignment), so clear map_aligned | |
1952 | * for this new VM map entry and proceed. | |
1953 | */ | |
1954 | clear_map_aligned = TRUE; | |
1955 | } | |
39236c6e | 1956 | |
91447636 A |
1957 | /* |
1958 | * Only zero-fill objects are allowed to be purgable. | |
1959 | * LP64todo - limit purgable objects to 32-bits for now | |
1960 | */ | |
1961 | if (purgable && | |
1962 | (offset != 0 || | |
1963 | (object != VM_OBJECT_NULL && | |
6d2010ae | 1964 | (object->vo_size != size || |
2d21ac55 | 1965 | object->purgable == VM_PURGABLE_DENY)) |
b0d623f7 | 1966 | || size > ANON_MAX_SIZE)) /* LP64todo: remove when dp capable */ |
91447636 A |
1967 | return KERN_INVALID_ARGUMENT; |
1968 | ||
1969 | if (!anywhere && overwrite) { | |
1970 | /* | |
1971 | * Create a temporary VM map to hold the old mappings in the | |
1972 | * affected area while we create the new one. | |
1973 | * This avoids releasing the VM map lock in | |
1974 | * vm_map_entry_delete() and allows atomicity | |
1975 | * when we want to replace some mappings with a new one. | |
1976 | * It also allows us to restore the old VM mappings if the | |
1977 | * new mapping fails. | |
1978 | */ | |
1979 | zap_old_map = vm_map_create(PMAP_NULL, | |
1980 | *address, | |
1981 | *address + size, | |
b0d623f7 | 1982 | map->hdr.entries_pageable); |
39236c6e | 1983 | vm_map_set_page_shift(zap_old_map, VM_MAP_PAGE_SHIFT(map)); |
3e170ce0 | 1984 | vm_map_disable_hole_optimization(zap_old_map); |
91447636 A |
1985 | } |
1986 | ||
2d21ac55 | 1987 | StartAgain: ; |
1c79356b A |
1988 | |
1989 | start = *address; | |
1990 | ||
1991 | if (anywhere) { | |
1992 | vm_map_lock(map); | |
91447636 | 1993 | map_locked = TRUE; |
6d2010ae | 1994 | |
316670eb A |
1995 | if (entry_for_jit) { |
1996 | if (map->jit_entry_exists) { | |
1997 | result = KERN_INVALID_ARGUMENT; | |
1998 | goto BailOut; | |
1999 | } | |
2000 | /* | |
2001 | * Get a random start address. | |
2002 | */ | |
2003 | result = vm_map_random_address_for_size(map, address, size); | |
2004 | if (result != KERN_SUCCESS) { | |
2005 | goto BailOut; | |
2006 | } | |
2007 | start = *address; | |
6d2010ae | 2008 | } |
1c79356b | 2009 | |
316670eb | 2010 | |
1c79356b A |
2011 | /* |
2012 | * Calculate the first possible address. | |
2013 | */ | |
2014 | ||
2d21ac55 A |
2015 | if (start < effective_min_offset) |
2016 | start = effective_min_offset; | |
2017 | if (start > effective_max_offset) | |
1c79356b A |
2018 | RETURN(KERN_NO_SPACE); |
2019 | ||
2020 | /* | |
2021 | * Look for the first possible address; | |
2022 | * if there's already something at this | |
2023 | * address, we have to start after it. | |
2024 | */ | |
2025 | ||
6d2010ae A |
2026 | if( map->disable_vmentry_reuse == TRUE) { |
2027 | VM_MAP_HIGHEST_ENTRY(map, entry, start); | |
1c79356b | 2028 | } else { |
6d2010ae | 2029 | |
3e170ce0 A |
2030 | if (map->holelistenabled) { |
2031 | hole_entry = (vm_map_entry_t)map->holes_list; | |
2032 | ||
2033 | if (hole_entry == NULL) { | |
2034 | /* | |
2035 | * No more space in the map? | |
2036 | */ | |
2037 | result = KERN_NO_SPACE; | |
2038 | goto BailOut; | |
2039 | } else { | |
2040 | ||
2041 | boolean_t found_hole = FALSE; | |
2042 | ||
2043 | do { | |
2044 | if (hole_entry->vme_start >= start) { | |
2045 | start = hole_entry->vme_start; | |
2046 | found_hole = TRUE; | |
2047 | break; | |
2048 | } | |
2049 | ||
2050 | if (hole_entry->vme_end > start) { | |
2051 | found_hole = TRUE; | |
2052 | break; | |
2053 | } | |
2054 | hole_entry = hole_entry->vme_next; | |
2055 | ||
2056 | } while (hole_entry != (vm_map_entry_t) map->holes_list); | |
2057 | ||
2058 | if (found_hole == FALSE) { | |
2059 | result = KERN_NO_SPACE; | |
2060 | goto BailOut; | |
2061 | } | |
2062 | ||
2063 | entry = hole_entry; | |
6d2010ae | 2064 | |
3e170ce0 A |
2065 | if (start == 0) |
2066 | start += PAGE_SIZE_64; | |
2067 | } | |
6d2010ae | 2068 | } else { |
3e170ce0 A |
2069 | assert(first_free_is_valid(map)); |
2070 | ||
2071 | entry = map->first_free; | |
2072 | ||
2073 | if (entry == vm_map_to_entry(map)) { | |
6d2010ae | 2074 | entry = NULL; |
3e170ce0 A |
2075 | } else { |
2076 | if (entry->vme_next == vm_map_to_entry(map)){ | |
2077 | /* | |
2078 | * Hole at the end of the map. | |
2079 | */ | |
2080 | entry = NULL; | |
2081 | } else { | |
2082 | if (start < (entry->vme_next)->vme_start ) { | |
2083 | start = entry->vme_end; | |
2084 | start = vm_map_round_page(start, | |
2085 | VM_MAP_PAGE_MASK(map)); | |
2086 | } else { | |
2087 | /* | |
2088 | * Need to do a lookup. | |
2089 | */ | |
2090 | entry = NULL; | |
2091 | } | |
2092 | } | |
2093 | } | |
2094 | ||
2095 | if (entry == NULL) { | |
2096 | vm_map_entry_t tmp_entry; | |
2097 | if (vm_map_lookup_entry(map, start, &tmp_entry)) { | |
2098 | assert(!entry_for_jit); | |
2099 | start = tmp_entry->vme_end; | |
39236c6e A |
2100 | start = vm_map_round_page(start, |
2101 | VM_MAP_PAGE_MASK(map)); | |
6d2010ae | 2102 | } |
3e170ce0 | 2103 | entry = tmp_entry; |
316670eb | 2104 | } |
6d2010ae | 2105 | } |
1c79356b A |
2106 | } |
2107 | ||
2108 | /* | |
2109 | * In any case, the "entry" always precedes | |
2110 | * the proposed new region throughout the | |
2111 | * loop: | |
2112 | */ | |
2113 | ||
2114 | while (TRUE) { | |
2115 | register vm_map_entry_t next; | |
2116 | ||
2d21ac55 | 2117 | /* |
1c79356b A |
2118 | * Find the end of the proposed new region. |
2119 | * Be sure we didn't go beyond the end, or | |
2120 | * wrap around the address. | |
2121 | */ | |
2122 | ||
2123 | end = ((start + mask) & ~mask); | |
39236c6e A |
2124 | end = vm_map_round_page(end, |
2125 | VM_MAP_PAGE_MASK(map)); | |
1c79356b A |
2126 | if (end < start) |
2127 | RETURN(KERN_NO_SPACE); | |
2128 | start = end; | |
39236c6e A |
2129 | assert(VM_MAP_PAGE_ALIGNED(start, |
2130 | VM_MAP_PAGE_MASK(map))); | |
1c79356b A |
2131 | end += size; |
2132 | ||
2d21ac55 | 2133 | if ((end > effective_max_offset) || (end < start)) { |
1c79356b | 2134 | if (map->wait_for_space) { |
fe8ab488 | 2135 | assert(!keep_map_locked); |
2d21ac55 A |
2136 | if (size <= (effective_max_offset - |
2137 | effective_min_offset)) { | |
1c79356b A |
2138 | assert_wait((event_t)map, |
2139 | THREAD_ABORTSAFE); | |
2140 | vm_map_unlock(map); | |
91447636 A |
2141 | map_locked = FALSE; |
2142 | thread_block(THREAD_CONTINUE_NULL); | |
1c79356b A |
2143 | goto StartAgain; |
2144 | } | |
2145 | } | |
2146 | RETURN(KERN_NO_SPACE); | |
2147 | } | |
2148 | ||
1c79356b | 2149 | next = entry->vme_next; |
1c79356b | 2150 | |
3e170ce0 A |
2151 | if (map->holelistenabled) { |
2152 | if (entry->vme_end >= end) | |
2153 | break; | |
2154 | } else { | |
2155 | /* | |
2156 | * If there are no more entries, we must win. | |
2157 | * | |
2158 | * OR | |
2159 | * | |
2160 | * If there is another entry, it must be | |
2161 | * after the end of the potential new region. | |
2162 | */ | |
1c79356b | 2163 | |
3e170ce0 A |
2164 | if (next == vm_map_to_entry(map)) |
2165 | break; | |
2166 | ||
2167 | if (next->vme_start >= end) | |
2168 | break; | |
2169 | } | |
1c79356b A |
2170 | |
2171 | /* | |
2172 | * Didn't fit -- move to the next entry. | |
2173 | */ | |
2174 | ||
2175 | entry = next; | |
3e170ce0 A |
2176 | |
2177 | if (map->holelistenabled) { | |
2178 | if (entry == (vm_map_entry_t) map->holes_list) { | |
2179 | /* | |
2180 | * Wrapped around | |
2181 | */ | |
2182 | result = KERN_NO_SPACE; | |
2183 | goto BailOut; | |
2184 | } | |
2185 | start = entry->vme_start; | |
2186 | } else { | |
2187 | start = entry->vme_end; | |
2188 | } | |
2189 | ||
39236c6e A |
2190 | start = vm_map_round_page(start, |
2191 | VM_MAP_PAGE_MASK(map)); | |
1c79356b | 2192 | } |
3e170ce0 A |
2193 | |
2194 | if (map->holelistenabled) { | |
2195 | if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { | |
2196 | panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); | |
2197 | } | |
2198 | } | |
2199 | ||
1c79356b | 2200 | *address = start; |
39236c6e A |
2201 | assert(VM_MAP_PAGE_ALIGNED(*address, |
2202 | VM_MAP_PAGE_MASK(map))); | |
1c79356b | 2203 | } else { |
1c79356b A |
2204 | /* |
2205 | * Verify that: | |
2206 | * the address doesn't itself violate | |
2207 | * the mask requirement. | |
2208 | */ | |
2209 | ||
2210 | vm_map_lock(map); | |
91447636 | 2211 | map_locked = TRUE; |
1c79356b A |
2212 | if ((start & mask) != 0) |
2213 | RETURN(KERN_NO_SPACE); | |
2214 | ||
2215 | /* | |
2216 | * ... the address is within bounds | |
2217 | */ | |
2218 | ||
2219 | end = start + size; | |
2220 | ||
2d21ac55 A |
2221 | if ((start < effective_min_offset) || |
2222 | (end > effective_max_offset) || | |
1c79356b A |
2223 | (start >= end)) { |
2224 | RETURN(KERN_INVALID_ADDRESS); | |
2225 | } | |
2226 | ||
91447636 A |
2227 | if (overwrite && zap_old_map != VM_MAP_NULL) { |
2228 | /* | |
2229 | * Fixed mapping and "overwrite" flag: attempt to | |
2230 | * remove all existing mappings in the specified | |
2231 | * address range, saving them in our "zap_old_map". | |
2232 | */ | |
2233 | (void) vm_map_delete(map, start, end, | |
fe8ab488 A |
2234 | (VM_MAP_REMOVE_SAVE_ENTRIES | |
2235 | VM_MAP_REMOVE_NO_MAP_ALIGN), | |
91447636 A |
2236 | zap_old_map); |
2237 | } | |
2238 | ||
1c79356b A |
2239 | /* |
2240 | * ... the starting address isn't allocated | |
2241 | */ | |
2242 | ||
2d21ac55 A |
2243 | if (vm_map_lookup_entry(map, start, &entry)) { |
2244 | if (! (flags & VM_FLAGS_ALREADY)) { | |
2245 | RETURN(KERN_NO_SPACE); | |
2246 | } | |
2247 | /* | |
2248 | * Check if what's already there is what we want. | |
2249 | */ | |
2250 | tmp_start = start; | |
2251 | tmp_offset = offset; | |
2252 | if (entry->vme_start < start) { | |
2253 | tmp_start -= start - entry->vme_start; | |
2254 | tmp_offset -= start - entry->vme_start; | |
2255 | ||
2256 | } | |
2257 | for (; entry->vme_start < end; | |
2258 | entry = entry->vme_next) { | |
4a3eedf9 A |
2259 | /* |
2260 | * Check if the mapping's attributes | |
2261 | * match the existing map entry. | |
2262 | */ | |
2d21ac55 A |
2263 | if (entry == vm_map_to_entry(map) || |
2264 | entry->vme_start != tmp_start || | |
2265 | entry->is_sub_map != is_submap || | |
3e170ce0 | 2266 | VME_OFFSET(entry) != tmp_offset || |
2d21ac55 A |
2267 | entry->needs_copy != needs_copy || |
2268 | entry->protection != cur_protection || | |
2269 | entry->max_protection != max_protection || | |
2270 | entry->inheritance != inheritance || | |
fe8ab488 | 2271 | entry->iokit_acct != iokit_acct || |
3e170ce0 | 2272 | VME_ALIAS(entry) != alias) { |
2d21ac55 A |
2273 | /* not the same mapping ! */ |
2274 | RETURN(KERN_NO_SPACE); | |
2275 | } | |
4a3eedf9 A |
2276 | /* |
2277 | * Check if the same object is being mapped. | |
2278 | */ | |
2279 | if (is_submap) { | |
3e170ce0 | 2280 | if (VME_SUBMAP(entry) != |
4a3eedf9 A |
2281 | (vm_map_t) object) { |
2282 | /* not the same submap */ | |
2283 | RETURN(KERN_NO_SPACE); | |
2284 | } | |
2285 | } else { | |
3e170ce0 | 2286 | if (VME_OBJECT(entry) != object) { |
4a3eedf9 A |
2287 | /* not the same VM object... */ |
2288 | vm_object_t obj2; | |
2289 | ||
3e170ce0 | 2290 | obj2 = VME_OBJECT(entry); |
4a3eedf9 A |
2291 | if ((obj2 == VM_OBJECT_NULL || |
2292 | obj2->internal) && | |
2293 | (object == VM_OBJECT_NULL || | |
2294 | object->internal)) { | |
2295 | /* | |
2296 | * ... but both are | |
2297 | * anonymous memory, | |
2298 | * so equivalent. | |
2299 | */ | |
2300 | } else { | |
2301 | RETURN(KERN_NO_SPACE); | |
2302 | } | |
2303 | } | |
2304 | } | |
2305 | ||
2d21ac55 A |
2306 | tmp_offset += entry->vme_end - entry->vme_start; |
2307 | tmp_start += entry->vme_end - entry->vme_start; | |
2308 | if (entry->vme_end >= end) { | |
2309 | /* reached the end of our mapping */ | |
2310 | break; | |
2311 | } | |
2312 | } | |
2313 | /* it all matches: let's use what's already there ! */ | |
2314 | RETURN(KERN_MEMORY_PRESENT); | |
2315 | } | |
1c79356b A |
2316 | |
2317 | /* | |
2318 | * ... the next region doesn't overlap the | |
2319 | * end point. | |
2320 | */ | |
2321 | ||
2322 | if ((entry->vme_next != vm_map_to_entry(map)) && | |
2323 | (entry->vme_next->vme_start < end)) | |
2324 | RETURN(KERN_NO_SPACE); | |
2325 | } | |
2326 | ||
2327 | /* | |
2328 | * At this point, | |
2329 | * "start" and "end" should define the endpoints of the | |
2330 | * available new range, and | |
2331 | * "entry" should refer to the region before the new | |
2332 | * range, and | |
2333 | * | |
2334 | * the map should be locked. | |
2335 | */ | |
2336 | ||
2337 | /* | |
2338 | * See whether we can avoid creating a new entry (and object) by | |
2339 | * extending one of our neighbors. [So far, we only attempt to | |
91447636 A |
2340 | * extend from below.] Note that we can never extend/join |
2341 | * purgable objects because they need to remain distinct | |
2342 | * entities in order to implement their "volatile object" | |
2343 | * semantics. | |
1c79356b A |
2344 | */ |
2345 | ||
316670eb | 2346 | if (purgable || entry_for_jit) { |
91447636 | 2347 | if (object == VM_OBJECT_NULL) { |
3e170ce0 | 2348 | |
91447636 A |
2349 | object = vm_object_allocate(size); |
2350 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
fe8ab488 | 2351 | object->true_share = TRUE; |
316670eb | 2352 | if (purgable) { |
fe8ab488 | 2353 | task_t owner; |
316670eb | 2354 | object->purgable = VM_PURGABLE_NONVOLATILE; |
fe8ab488 A |
2355 | if (map->pmap == kernel_pmap) { |
2356 | /* | |
2357 | * Purgeable mappings made in a kernel | |
2358 | * map are "owned" by the kernel itself | |
2359 | * rather than the current user task | |
2360 | * because they're likely to be used by | |
2361 | * more than this user task (see | |
2362 | * execargs_purgeable_allocate(), for | |
2363 | * example). | |
2364 | */ | |
2365 | owner = kernel_task; | |
2366 | } else { | |
2367 | owner = current_task(); | |
2368 | } | |
2369 | assert(object->vo_purgeable_owner == NULL); | |
2370 | assert(object->resident_page_count == 0); | |
2371 | assert(object->wired_page_count == 0); | |
2372 | vm_object_lock(object); | |
2373 | vm_purgeable_nonvolatile_enqueue(object, owner); | |
2374 | vm_object_unlock(object); | |
316670eb | 2375 | } |
91447636 A |
2376 | offset = (vm_object_offset_t)0; |
2377 | } | |
2d21ac55 A |
2378 | } else if ((is_submap == FALSE) && |
2379 | (object == VM_OBJECT_NULL) && | |
2380 | (entry != vm_map_to_entry(map)) && | |
2381 | (entry->vme_end == start) && | |
2382 | (!entry->is_shared) && | |
2383 | (!entry->is_sub_map) && | |
fe8ab488 A |
2384 | (!entry->in_transition) && |
2385 | (!entry->needs_wakeup) && | |
2386 | (entry->behavior == VM_BEHAVIOR_DEFAULT) && | |
2d21ac55 A |
2387 | (entry->protection == cur_protection) && |
2388 | (entry->max_protection == max_protection) && | |
fe8ab488 | 2389 | (entry->inheritance == inheritance) && |
3e170ce0 A |
2390 | ((user_alias == VM_MEMORY_REALLOC) || |
2391 | (VME_ALIAS(entry) == alias)) && | |
2d21ac55 | 2392 | (entry->no_cache == no_cache) && |
fe8ab488 A |
2393 | (entry->permanent == permanent) && |
2394 | (!entry->superpage_size && !superpage_size) && | |
39236c6e A |
2395 | /* |
2396 | * No coalescing if not map-aligned, to avoid propagating | |
2397 | * that condition any further than needed: | |
2398 | */ | |
2399 | (!entry->map_aligned || !clear_map_aligned) && | |
fe8ab488 A |
2400 | (!entry->zero_wired_pages) && |
2401 | (!entry->used_for_jit && !entry_for_jit) && | |
2402 | (entry->iokit_acct == iokit_acct) && | |
3e170ce0 A |
2403 | (!entry->vme_resilient_codesign) && |
2404 | (!entry->vme_resilient_media) && | |
fe8ab488 | 2405 | |
b0d623f7 | 2406 | ((entry->vme_end - entry->vme_start) + size <= |
3e170ce0 | 2407 | (user_alias == VM_MEMORY_REALLOC ? |
b0d623f7 A |
2408 | ANON_CHUNK_SIZE : |
2409 | NO_COALESCE_LIMIT)) && | |
fe8ab488 | 2410 | |
2d21ac55 | 2411 | (entry->wired_count == 0)) { /* implies user_wired_count == 0 */ |
3e170ce0 | 2412 | if (vm_object_coalesce(VME_OBJECT(entry), |
2d21ac55 | 2413 | VM_OBJECT_NULL, |
3e170ce0 | 2414 | VME_OFFSET(entry), |
2d21ac55 A |
2415 | (vm_object_offset_t) 0, |
2416 | (vm_map_size_t)(entry->vme_end - entry->vme_start), | |
2417 | (vm_map_size_t)(end - entry->vme_end))) { | |
1c79356b A |
2418 | |
2419 | /* | |
2420 | * Coalesced the two objects - can extend | |
2421 | * the previous map entry to include the | |
2422 | * new range. | |
2423 | */ | |
2424 | map->size += (end - entry->vme_end); | |
e2d2fc5c | 2425 | assert(entry->vme_start < end); |
39236c6e A |
2426 | assert(VM_MAP_PAGE_ALIGNED(end, |
2427 | VM_MAP_PAGE_MASK(map))); | |
3e170ce0 A |
2428 | if (__improbable(vm_debug_events)) |
2429 | DTRACE_VM5(map_entry_extend, vm_map_t, map, vm_map_entry_t, entry, vm_address_t, entry->vme_start, vm_address_t, entry->vme_end, vm_address_t, end); | |
1c79356b | 2430 | entry->vme_end = end; |
3e170ce0 A |
2431 | if (map->holelistenabled) { |
2432 | vm_map_store_update_first_free(map, entry, TRUE); | |
2433 | } else { | |
2434 | vm_map_store_update_first_free(map, map->first_free, TRUE); | |
2435 | } | |
fe8ab488 | 2436 | new_mapping_established = TRUE; |
1c79356b A |
2437 | RETURN(KERN_SUCCESS); |
2438 | } | |
2439 | } | |
2440 | ||
b0d623f7 A |
2441 | step = superpage_size ? SUPERPAGE_SIZE : (end - start); |
2442 | new_entry = NULL; | |
2443 | ||
2444 | for (tmp2_start = start; tmp2_start<end; tmp2_start += step) { | |
2445 | tmp2_end = tmp2_start + step; | |
2446 | /* | |
2447 | * Create a new entry | |
2448 | * LP64todo - for now, we can only allocate 4GB internal objects | |
2449 | * because the default pager can't page bigger ones. Remove this | |
2450 | * when it can. | |
2451 | * | |
2452 | * XXX FBDP | |
2453 | * The reserved "page zero" in each process's address space can | |
2454 | * be arbitrarily large. Splitting it into separate 4GB objects and | |
2455 | * therefore different VM map entries serves no purpose and just | |
2456 | * slows down operations on the VM map, so let's not split the | |
2457 | * allocation into 4GB chunks if the max protection is NONE. That | |
2458 | * memory should never be accessible, so it will never get to the | |
2459 | * default pager. | |
2460 | */ | |
2461 | tmp_start = tmp2_start; | |
2462 | if (object == VM_OBJECT_NULL && | |
2463 | size > (vm_map_size_t)ANON_CHUNK_SIZE && | |
2464 | max_protection != VM_PROT_NONE && | |
2465 | superpage_size == 0) | |
2466 | tmp_end = tmp_start + (vm_map_size_t)ANON_CHUNK_SIZE; | |
2467 | else | |
2468 | tmp_end = tmp2_end; | |
2469 | do { | |
2470 | new_entry = vm_map_entry_insert(map, entry, tmp_start, tmp_end, | |
2471 | object, offset, needs_copy, | |
2472 | FALSE, FALSE, | |
2473 | cur_protection, max_protection, | |
2474 | VM_BEHAVIOR_DEFAULT, | |
316670eb | 2475 | (entry_for_jit)? VM_INHERIT_NONE: inheritance, |
6d2010ae | 2476 | 0, no_cache, |
39236c6e A |
2477 | permanent, |
2478 | superpage_size, | |
fe8ab488 A |
2479 | clear_map_aligned, |
2480 | is_submap); | |
3e170ce0 A |
2481 | |
2482 | assert((object != kernel_object) || (VM_KERN_MEMORY_NONE != alias)); | |
2483 | VME_ALIAS_SET(new_entry, alias); | |
2484 | ||
316670eb | 2485 | if (entry_for_jit){ |
6d2010ae A |
2486 | if (!(map->jit_entry_exists)){ |
2487 | new_entry->used_for_jit = TRUE; | |
2488 | map->jit_entry_exists = TRUE; | |
2489 | } | |
2490 | } | |
2491 | ||
3e170ce0 A |
2492 | if (resilient_codesign && |
2493 | ! ((cur_protection | max_protection) & | |
2494 | (VM_PROT_WRITE | VM_PROT_EXECUTE))) { | |
2495 | new_entry->vme_resilient_codesign = TRUE; | |
2496 | } | |
2497 | ||
2498 | if (resilient_media && | |
2499 | ! ((cur_protection | max_protection) & | |
2500 | (VM_PROT_WRITE | VM_PROT_EXECUTE))) { | |
2501 | new_entry->vme_resilient_media = TRUE; | |
2502 | } | |
2503 | ||
fe8ab488 A |
2504 | assert(!new_entry->iokit_acct); |
2505 | if (!is_submap && | |
2506 | object != VM_OBJECT_NULL && | |
2507 | object->purgable != VM_PURGABLE_DENY) { | |
2508 | assert(new_entry->use_pmap); | |
2509 | assert(!new_entry->iokit_acct); | |
2510 | /* | |
2511 | * Turn off pmap accounting since | |
2512 | * purgeable objects have their | |
2513 | * own ledgers. | |
2514 | */ | |
2515 | new_entry->use_pmap = FALSE; | |
2516 | } else if (!is_submap && | |
2517 | iokit_acct) { | |
2518 | /* alternate accounting */ | |
2519 | assert(!new_entry->iokit_acct); | |
2520 | assert(new_entry->use_pmap); | |
2521 | new_entry->iokit_acct = TRUE; | |
2522 | new_entry->use_pmap = FALSE; | |
2523 | vm_map_iokit_mapped_region( | |
2524 | map, | |
2525 | (new_entry->vme_end - | |
2526 | new_entry->vme_start)); | |
2527 | } else if (!is_submap) { | |
2528 | assert(!new_entry->iokit_acct); | |
2529 | assert(new_entry->use_pmap); | |
2530 | } | |
2531 | ||
b0d623f7 A |
2532 | if (is_submap) { |
2533 | vm_map_t submap; | |
2534 | boolean_t submap_is_64bit; | |
2535 | boolean_t use_pmap; | |
2536 | ||
fe8ab488 A |
2537 | assert(new_entry->is_sub_map); |
2538 | assert(!new_entry->use_pmap); | |
2539 | assert(!new_entry->iokit_acct); | |
b0d623f7 A |
2540 | submap = (vm_map_t) object; |
2541 | submap_is_64bit = vm_map_is_64bit(submap); | |
3e170ce0 | 2542 | use_pmap = (user_alias == VM_MEMORY_SHARED_PMAP); |
fe8ab488 | 2543 | #ifndef NO_NESTED_PMAP |
b0d623f7 | 2544 | if (use_pmap && submap->pmap == NULL) { |
316670eb | 2545 | ledger_t ledger = map->pmap->ledger; |
b0d623f7 | 2546 | /* we need a sub pmap to nest... */ |
316670eb A |
2547 | submap->pmap = pmap_create(ledger, 0, |
2548 | submap_is_64bit); | |
b0d623f7 A |
2549 | if (submap->pmap == NULL) { |
2550 | /* let's proceed without nesting... */ | |
2551 | } | |
2d21ac55 | 2552 | } |
b0d623f7 A |
2553 | if (use_pmap && submap->pmap != NULL) { |
2554 | kr = pmap_nest(map->pmap, | |
2555 | submap->pmap, | |
2556 | tmp_start, | |
2557 | tmp_start, | |
2558 | tmp_end - tmp_start); | |
2559 | if (kr != KERN_SUCCESS) { | |
2560 | printf("vm_map_enter: " | |
2561 | "pmap_nest(0x%llx,0x%llx) " | |
2562 | "error 0x%x\n", | |
2563 | (long long)tmp_start, | |
2564 | (long long)tmp_end, | |
2565 | kr); | |
2566 | } else { | |
2567 | /* we're now nested ! */ | |
2568 | new_entry->use_pmap = TRUE; | |
2569 | pmap_empty = FALSE; | |
2570 | } | |
2571 | } | |
fe8ab488 | 2572 | #endif /* NO_NESTED_PMAP */ |
2d21ac55 | 2573 | } |
b0d623f7 A |
2574 | entry = new_entry; |
2575 | ||
2576 | if (superpage_size) { | |
2577 | vm_page_t pages, m; | |
2578 | vm_object_t sp_object; | |
2579 | ||
3e170ce0 | 2580 | VME_OFFSET_SET(entry, 0); |
b0d623f7 A |
2581 | |
2582 | /* allocate one superpage */ | |
2583 | kr = cpm_allocate(SUPERPAGE_SIZE, &pages, 0, SUPERPAGE_NBASEPAGES-1, TRUE, 0); | |
2d21ac55 | 2584 | if (kr != KERN_SUCCESS) { |
3e170ce0 A |
2585 | /* deallocate whole range... */ |
2586 | new_mapping_established = TRUE; | |
2587 | /* ... but only up to "tmp_end" */ | |
2588 | size -= end - tmp_end; | |
b0d623f7 A |
2589 | RETURN(kr); |
2590 | } | |
2591 | ||
2592 | /* create one vm_object per superpage */ | |
2593 | sp_object = vm_object_allocate((vm_map_size_t)(entry->vme_end - entry->vme_start)); | |
2594 | sp_object->phys_contiguous = TRUE; | |
6d2010ae | 2595 | sp_object->vo_shadow_offset = (vm_object_offset_t)pages->phys_page*PAGE_SIZE; |
3e170ce0 | 2596 | VME_OBJECT_SET(entry, sp_object); |
fe8ab488 | 2597 | assert(entry->use_pmap); |
b0d623f7 A |
2598 | |
2599 | /* enter the base pages into the object */ | |
2600 | vm_object_lock(sp_object); | |
2601 | for (offset = 0; offset < SUPERPAGE_SIZE; offset += PAGE_SIZE) { | |
2602 | m = pages; | |
2603 | pmap_zero_page(m->phys_page); | |
2604 | pages = NEXT_PAGE(m); | |
2605 | *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; | |
3e170ce0 | 2606 | vm_page_insert_wired(m, sp_object, offset, VM_KERN_MEMORY_OSFMK); |
2d21ac55 | 2607 | } |
b0d623f7 | 2608 | vm_object_unlock(sp_object); |
2d21ac55 | 2609 | } |
b0d623f7 A |
2610 | } while (tmp_end != tmp2_end && |
2611 | (tmp_start = tmp_end) && | |
2612 | (tmp_end = (tmp2_end - tmp_end > (vm_map_size_t)ANON_CHUNK_SIZE) ? | |
2613 | tmp_end + (vm_map_size_t)ANON_CHUNK_SIZE : tmp2_end)); | |
2614 | } | |
91447636 | 2615 | |
91447636 | 2616 | new_mapping_established = TRUE; |
1c79356b | 2617 | |
fe8ab488 A |
2618 | BailOut: |
2619 | assert(map_locked == TRUE); | |
2d21ac55 | 2620 | |
593a1d5f A |
2621 | if (result == KERN_SUCCESS) { |
2622 | vm_prot_t pager_prot; | |
2623 | memory_object_t pager; | |
91447636 | 2624 | |
fe8ab488 | 2625 | #if DEBUG |
593a1d5f A |
2626 | if (pmap_empty && |
2627 | !(flags & VM_FLAGS_NO_PMAP_CHECK)) { | |
2628 | assert(vm_map_pmap_is_empty(map, | |
2629 | *address, | |
2630 | *address+size)); | |
2631 | } | |
fe8ab488 | 2632 | #endif /* DEBUG */ |
593a1d5f A |
2633 | |
2634 | /* | |
2635 | * For "named" VM objects, let the pager know that the | |
2636 | * memory object is being mapped. Some pagers need to keep | |
2637 | * track of this, to know when they can reclaim the memory | |
2638 | * object, for example. | |
2639 | * VM calls memory_object_map() for each mapping (specifying | |
2640 | * the protection of each mapping) and calls | |
2641 | * memory_object_last_unmap() when all the mappings are gone. | |
2642 | */ | |
2643 | pager_prot = max_protection; | |
2644 | if (needs_copy) { | |
2645 | /* | |
2646 | * Copy-On-Write mapping: won't modify | |
2647 | * the memory object. | |
2648 | */ | |
2649 | pager_prot &= ~VM_PROT_WRITE; | |
2650 | } | |
2651 | if (!is_submap && | |
2652 | object != VM_OBJECT_NULL && | |
2653 | object->named && | |
2654 | object->pager != MEMORY_OBJECT_NULL) { | |
2655 | vm_object_lock(object); | |
2656 | pager = object->pager; | |
2657 | if (object->named && | |
2658 | pager != MEMORY_OBJECT_NULL) { | |
2659 | assert(object->pager_ready); | |
2660 | vm_object_mapping_wait(object, THREAD_UNINT); | |
2661 | vm_object_mapping_begin(object); | |
2662 | vm_object_unlock(object); | |
2663 | ||
2664 | kr = memory_object_map(pager, pager_prot); | |
2665 | assert(kr == KERN_SUCCESS); | |
2666 | ||
2667 | vm_object_lock(object); | |
2668 | vm_object_mapping_end(object); | |
2669 | } | |
2670 | vm_object_unlock(object); | |
2671 | } | |
fe8ab488 A |
2672 | } |
2673 | ||
2674 | assert(map_locked == TRUE); | |
2675 | ||
2676 | if (!keep_map_locked) { | |
2677 | vm_map_unlock(map); | |
2678 | map_locked = FALSE; | |
2679 | } | |
2680 | ||
2681 | /* | |
2682 | * We can't hold the map lock if we enter this block. | |
2683 | */ | |
2684 | ||
2685 | if (result == KERN_SUCCESS) { | |
2686 | ||
2687 | /* Wire down the new entry if the user | |
2688 | * requested all new map entries be wired. | |
2689 | */ | |
2690 | if ((map->wiring_required)||(superpage_size)) { | |
2691 | assert(!keep_map_locked); | |
2692 | pmap_empty = FALSE; /* pmap won't be empty */ | |
2693 | kr = vm_map_wire(map, start, end, | |
3e170ce0 A |
2694 | new_entry->protection | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), |
2695 | TRUE); | |
fe8ab488 A |
2696 | result = kr; |
2697 | } | |
2698 | ||
2699 | } | |
2700 | ||
2701 | if (result != KERN_SUCCESS) { | |
91447636 A |
2702 | if (new_mapping_established) { |
2703 | /* | |
2704 | * We have to get rid of the new mappings since we | |
2705 | * won't make them available to the user. | |
2706 | * Try and do that atomically, to minimize the risk | |
2707 | * that someone else create new mappings that range. | |
2708 | */ | |
2709 | zap_new_map = vm_map_create(PMAP_NULL, | |
2710 | *address, | |
2711 | *address + size, | |
b0d623f7 | 2712 | map->hdr.entries_pageable); |
39236c6e A |
2713 | vm_map_set_page_shift(zap_new_map, |
2714 | VM_MAP_PAGE_SHIFT(map)); | |
3e170ce0 A |
2715 | vm_map_disable_hole_optimization(zap_new_map); |
2716 | ||
91447636 A |
2717 | if (!map_locked) { |
2718 | vm_map_lock(map); | |
2719 | map_locked = TRUE; | |
2720 | } | |
2721 | (void) vm_map_delete(map, *address, *address+size, | |
fe8ab488 A |
2722 | (VM_MAP_REMOVE_SAVE_ENTRIES | |
2723 | VM_MAP_REMOVE_NO_MAP_ALIGN), | |
91447636 A |
2724 | zap_new_map); |
2725 | } | |
2726 | if (zap_old_map != VM_MAP_NULL && | |
2727 | zap_old_map->hdr.nentries != 0) { | |
2728 | vm_map_entry_t entry1, entry2; | |
2729 | ||
2730 | /* | |
2731 | * The new mapping failed. Attempt to restore | |
2732 | * the old mappings, saved in the "zap_old_map". | |
2733 | */ | |
2734 | if (!map_locked) { | |
2735 | vm_map_lock(map); | |
2736 | map_locked = TRUE; | |
2737 | } | |
2738 | ||
2739 | /* first check if the coast is still clear */ | |
2740 | start = vm_map_first_entry(zap_old_map)->vme_start; | |
2741 | end = vm_map_last_entry(zap_old_map)->vme_end; | |
2742 | if (vm_map_lookup_entry(map, start, &entry1) || | |
2743 | vm_map_lookup_entry(map, end, &entry2) || | |
2744 | entry1 != entry2) { | |
2745 | /* | |
2746 | * Part of that range has already been | |
2747 | * re-mapped: we can't restore the old | |
2748 | * mappings... | |
2749 | */ | |
2750 | vm_map_enter_restore_failures++; | |
2751 | } else { | |
2752 | /* | |
2753 | * Transfer the saved map entries from | |
2754 | * "zap_old_map" to the original "map", | |
2755 | * inserting them all after "entry1". | |
2756 | */ | |
2757 | for (entry2 = vm_map_first_entry(zap_old_map); | |
2758 | entry2 != vm_map_to_entry(zap_old_map); | |
2759 | entry2 = vm_map_first_entry(zap_old_map)) { | |
2d21ac55 A |
2760 | vm_map_size_t entry_size; |
2761 | ||
2762 | entry_size = (entry2->vme_end - | |
2763 | entry2->vme_start); | |
6d2010ae | 2764 | vm_map_store_entry_unlink(zap_old_map, |
91447636 | 2765 | entry2); |
2d21ac55 | 2766 | zap_old_map->size -= entry_size; |
6d2010ae | 2767 | vm_map_store_entry_link(map, entry1, entry2); |
2d21ac55 | 2768 | map->size += entry_size; |
91447636 A |
2769 | entry1 = entry2; |
2770 | } | |
2771 | if (map->wiring_required) { | |
2772 | /* | |
2773 | * XXX TODO: we should rewire the | |
2774 | * old pages here... | |
2775 | */ | |
2776 | } | |
2777 | vm_map_enter_restore_successes++; | |
2778 | } | |
2779 | } | |
2780 | } | |
2781 | ||
fe8ab488 A |
2782 | /* |
2783 | * The caller is responsible for releasing the lock if it requested to | |
2784 | * keep the map locked. | |
2785 | */ | |
2786 | if (map_locked && !keep_map_locked) { | |
91447636 A |
2787 | vm_map_unlock(map); |
2788 | } | |
2789 | ||
2790 | /* | |
2791 | * Get rid of the "zap_maps" and all the map entries that | |
2792 | * they may still contain. | |
2793 | */ | |
2794 | if (zap_old_map != VM_MAP_NULL) { | |
2d21ac55 | 2795 | vm_map_destroy(zap_old_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); |
91447636 A |
2796 | zap_old_map = VM_MAP_NULL; |
2797 | } | |
2798 | if (zap_new_map != VM_MAP_NULL) { | |
2d21ac55 | 2799 | vm_map_destroy(zap_new_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); |
91447636 A |
2800 | zap_new_map = VM_MAP_NULL; |
2801 | } | |
2802 | ||
2803 | return result; | |
1c79356b A |
2804 | |
2805 | #undef RETURN | |
2806 | } | |
2807 | ||
3e170ce0 | 2808 | |
fe8ab488 A |
2809 | /* |
2810 | * Counters for the prefault optimization. | |
2811 | */ | |
2812 | int64_t vm_prefault_nb_pages = 0; | |
2813 | int64_t vm_prefault_nb_bailout = 0; | |
2814 | ||
2815 | static kern_return_t | |
2816 | vm_map_enter_mem_object_helper( | |
2d21ac55 A |
2817 | vm_map_t target_map, |
2818 | vm_map_offset_t *address, | |
2819 | vm_map_size_t initial_size, | |
2820 | vm_map_offset_t mask, | |
2821 | int flags, | |
2822 | ipc_port_t port, | |
2823 | vm_object_offset_t offset, | |
2824 | boolean_t copy, | |
2825 | vm_prot_t cur_protection, | |
2826 | vm_prot_t max_protection, | |
fe8ab488 A |
2827 | vm_inherit_t inheritance, |
2828 | upl_page_list_ptr_t page_list, | |
2829 | unsigned int page_list_count) | |
91447636 | 2830 | { |
2d21ac55 A |
2831 | vm_map_address_t map_addr; |
2832 | vm_map_size_t map_size; | |
2833 | vm_object_t object; | |
2834 | vm_object_size_t size; | |
2835 | kern_return_t result; | |
6d2010ae | 2836 | boolean_t mask_cur_protection, mask_max_protection; |
fe8ab488 | 2837 | boolean_t try_prefault = (page_list_count != 0); |
3e170ce0 | 2838 | vm_map_offset_t offset_in_mapping = 0; |
6d2010ae A |
2839 | |
2840 | mask_cur_protection = cur_protection & VM_PROT_IS_MASK; | |
2841 | mask_max_protection = max_protection & VM_PROT_IS_MASK; | |
2842 | cur_protection &= ~VM_PROT_IS_MASK; | |
2843 | max_protection &= ~VM_PROT_IS_MASK; | |
91447636 A |
2844 | |
2845 | /* | |
2d21ac55 | 2846 | * Check arguments for validity |
91447636 | 2847 | */ |
2d21ac55 A |
2848 | if ((target_map == VM_MAP_NULL) || |
2849 | (cur_protection & ~VM_PROT_ALL) || | |
2850 | (max_protection & ~VM_PROT_ALL) || | |
2851 | (inheritance > VM_INHERIT_LAST_VALID) || | |
fe8ab488 | 2852 | (try_prefault && (copy || !page_list)) || |
3e170ce0 | 2853 | initial_size == 0) { |
2d21ac55 | 2854 | return KERN_INVALID_ARGUMENT; |
3e170ce0 | 2855 | } |
6d2010ae | 2856 | |
3e170ce0 A |
2857 | { |
2858 | map_addr = vm_map_trunc_page(*address, | |
2859 | VM_MAP_PAGE_MASK(target_map)); | |
2860 | map_size = vm_map_round_page(initial_size, | |
2861 | VM_MAP_PAGE_MASK(target_map)); | |
2862 | } | |
39236c6e | 2863 | size = vm_object_round_page(initial_size); |
593a1d5f | 2864 | |
2d21ac55 A |
2865 | /* |
2866 | * Find the vm object (if any) corresponding to this port. | |
2867 | */ | |
2868 | if (!IP_VALID(port)) { | |
2869 | object = VM_OBJECT_NULL; | |
2870 | offset = 0; | |
2871 | copy = FALSE; | |
2872 | } else if (ip_kotype(port) == IKOT_NAMED_ENTRY) { | |
2873 | vm_named_entry_t named_entry; | |
2874 | ||
2875 | named_entry = (vm_named_entry_t) port->ip_kobject; | |
39236c6e | 2876 | |
3e170ce0 A |
2877 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
2878 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
2879 | offset += named_entry->data_offset; |
2880 | } | |
2881 | ||
2d21ac55 A |
2882 | /* a few checks to make sure user is obeying rules */ |
2883 | if (size == 0) { | |
2884 | if (offset >= named_entry->size) | |
2885 | return KERN_INVALID_RIGHT; | |
2886 | size = named_entry->size - offset; | |
2887 | } | |
6d2010ae A |
2888 | if (mask_max_protection) { |
2889 | max_protection &= named_entry->protection; | |
2890 | } | |
2891 | if (mask_cur_protection) { | |
2892 | cur_protection &= named_entry->protection; | |
2893 | } | |
2d21ac55 A |
2894 | if ((named_entry->protection & max_protection) != |
2895 | max_protection) | |
2896 | return KERN_INVALID_RIGHT; | |
2897 | if ((named_entry->protection & cur_protection) != | |
2898 | cur_protection) | |
2899 | return KERN_INVALID_RIGHT; | |
22ba694c A |
2900 | if (offset + size < offset) { |
2901 | /* overflow */ | |
2902 | return KERN_INVALID_ARGUMENT; | |
2903 | } | |
3e170ce0 | 2904 | if (named_entry->size < (offset + initial_size)) { |
2d21ac55 | 2905 | return KERN_INVALID_ARGUMENT; |
3e170ce0 | 2906 | } |
2d21ac55 | 2907 | |
39236c6e A |
2908 | if (named_entry->is_copy) { |
2909 | /* for a vm_map_copy, we can only map it whole */ | |
2910 | if ((size != named_entry->size) && | |
2911 | (vm_map_round_page(size, | |
2912 | VM_MAP_PAGE_MASK(target_map)) == | |
2913 | named_entry->size)) { | |
2914 | /* XXX FBDP use the rounded size... */ | |
2915 | size = vm_map_round_page( | |
2916 | size, | |
2917 | VM_MAP_PAGE_MASK(target_map)); | |
2918 | } | |
2919 | ||
fe8ab488 A |
2920 | if (!(flags & VM_FLAGS_ANYWHERE) && |
2921 | (offset != 0 || | |
2922 | size != named_entry->size)) { | |
2923 | /* | |
2924 | * XXX for a mapping at a "fixed" address, | |
2925 | * we can't trim after mapping the whole | |
2926 | * memory entry, so reject a request for a | |
2927 | * partial mapping. | |
2928 | */ | |
39236c6e A |
2929 | return KERN_INVALID_ARGUMENT; |
2930 | } | |
2931 | } | |
2932 | ||
2d21ac55 A |
2933 | /* the callers parameter offset is defined to be the */ |
2934 | /* offset from beginning of named entry offset in object */ | |
2935 | offset = offset + named_entry->offset; | |
2936 | ||
39236c6e A |
2937 | if (! VM_MAP_PAGE_ALIGNED(size, |
2938 | VM_MAP_PAGE_MASK(target_map))) { | |
2939 | /* | |
2940 | * Let's not map more than requested; | |
2941 | * vm_map_enter() will handle this "not map-aligned" | |
2942 | * case. | |
2943 | */ | |
2944 | map_size = size; | |
2945 | } | |
2946 | ||
2d21ac55 A |
2947 | named_entry_lock(named_entry); |
2948 | if (named_entry->is_sub_map) { | |
2949 | vm_map_t submap; | |
2950 | ||
3e170ce0 A |
2951 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
2952 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
2953 | panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap."); |
2954 | } | |
2955 | ||
2d21ac55 A |
2956 | submap = named_entry->backing.map; |
2957 | vm_map_lock(submap); | |
2958 | vm_map_reference(submap); | |
2959 | vm_map_unlock(submap); | |
2960 | named_entry_unlock(named_entry); | |
2961 | ||
2962 | result = vm_map_enter(target_map, | |
2963 | &map_addr, | |
2964 | map_size, | |
2965 | mask, | |
2966 | flags | VM_FLAGS_SUBMAP, | |
2967 | (vm_object_t) submap, | |
2968 | offset, | |
2969 | copy, | |
2970 | cur_protection, | |
2971 | max_protection, | |
2972 | inheritance); | |
2973 | if (result != KERN_SUCCESS) { | |
2974 | vm_map_deallocate(submap); | |
2975 | } else { | |
2976 | /* | |
2977 | * No need to lock "submap" just to check its | |
2978 | * "mapped" flag: that flag is never reset | |
2979 | * once it's been set and if we race, we'll | |
2980 | * just end up setting it twice, which is OK. | |
2981 | */ | |
316670eb A |
2982 | if (submap->mapped_in_other_pmaps == FALSE && |
2983 | vm_map_pmap(submap) != PMAP_NULL && | |
2984 | vm_map_pmap(submap) != | |
2985 | vm_map_pmap(target_map)) { | |
2d21ac55 | 2986 | /* |
316670eb A |
2987 | * This submap is being mapped in a map |
2988 | * that uses a different pmap. | |
2989 | * Set its "mapped_in_other_pmaps" flag | |
2990 | * to indicate that we now need to | |
2991 | * remove mappings from all pmaps rather | |
2992 | * than just the submap's pmap. | |
2d21ac55 A |
2993 | */ |
2994 | vm_map_lock(submap); | |
316670eb | 2995 | submap->mapped_in_other_pmaps = TRUE; |
2d21ac55 A |
2996 | vm_map_unlock(submap); |
2997 | } | |
2998 | *address = map_addr; | |
2999 | } | |
3000 | return result; | |
3001 | ||
3002 | } else if (named_entry->is_pager) { | |
3003 | unsigned int access; | |
3004 | vm_prot_t protections; | |
3005 | unsigned int wimg_mode; | |
2d21ac55 A |
3006 | |
3007 | protections = named_entry->protection & VM_PROT_ALL; | |
3008 | access = GET_MAP_MEM(named_entry->protection); | |
3009 | ||
3e170ce0 A |
3010 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR| |
3011 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
3012 | panic("VM_FLAGS_RETURN_DATA_ADDR not expected for submap."); |
3013 | } | |
3014 | ||
2d21ac55 A |
3015 | object = vm_object_enter(named_entry->backing.pager, |
3016 | named_entry->size, | |
3017 | named_entry->internal, | |
3018 | FALSE, | |
3019 | FALSE); | |
3020 | if (object == VM_OBJECT_NULL) { | |
3021 | named_entry_unlock(named_entry); | |
3022 | return KERN_INVALID_OBJECT; | |
3023 | } | |
3024 | ||
3025 | /* JMM - drop reference on pager here */ | |
3026 | ||
3027 | /* create an extra ref for the named entry */ | |
3028 | vm_object_lock(object); | |
3029 | vm_object_reference_locked(object); | |
3030 | named_entry->backing.object = object; | |
3031 | named_entry->is_pager = FALSE; | |
3032 | named_entry_unlock(named_entry); | |
3033 | ||
3034 | wimg_mode = object->wimg_bits; | |
6d2010ae | 3035 | |
2d21ac55 A |
3036 | if (access == MAP_MEM_IO) { |
3037 | wimg_mode = VM_WIMG_IO; | |
3038 | } else if (access == MAP_MEM_COPYBACK) { | |
3039 | wimg_mode = VM_WIMG_USE_DEFAULT; | |
316670eb A |
3040 | } else if (access == MAP_MEM_INNERWBACK) { |
3041 | wimg_mode = VM_WIMG_INNERWBACK; | |
2d21ac55 A |
3042 | } else if (access == MAP_MEM_WTHRU) { |
3043 | wimg_mode = VM_WIMG_WTHRU; | |
3044 | } else if (access == MAP_MEM_WCOMB) { | |
3045 | wimg_mode = VM_WIMG_WCOMB; | |
3046 | } | |
2d21ac55 A |
3047 | |
3048 | /* wait for object (if any) to be ready */ | |
3049 | if (!named_entry->internal) { | |
3050 | while (!object->pager_ready) { | |
3051 | vm_object_wait( | |
3052 | object, | |
3053 | VM_OBJECT_EVENT_PAGER_READY, | |
3054 | THREAD_UNINT); | |
3055 | vm_object_lock(object); | |
3056 | } | |
3057 | } | |
3058 | ||
6d2010ae A |
3059 | if (object->wimg_bits != wimg_mode) |
3060 | vm_object_change_wimg_mode(object, wimg_mode); | |
2d21ac55 | 3061 | |
fe8ab488 A |
3062 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
3063 | if (!object->true_share && | |
3064 | vm_object_tracking_inited) { | |
3065 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
3066 | int num = 0; | |
3067 | ||
3068 | num = OSBacktrace(bt, | |
3069 | VM_OBJECT_TRACKING_BTDEPTH); | |
3070 | btlog_add_entry(vm_object_tracking_btlog, | |
3071 | object, | |
3072 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
3073 | bt, | |
3074 | num); | |
3075 | } | |
3076 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
3077 | ||
2d21ac55 | 3078 | object->true_share = TRUE; |
6d2010ae | 3079 | |
2d21ac55 A |
3080 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) |
3081 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
3082 | vm_object_unlock(object); | |
39236c6e A |
3083 | |
3084 | } else if (named_entry->is_copy) { | |
3085 | kern_return_t kr; | |
3086 | vm_map_copy_t copy_map; | |
3087 | vm_map_entry_t copy_entry; | |
3088 | vm_map_offset_t copy_addr; | |
3089 | ||
3090 | if (flags & ~(VM_FLAGS_FIXED | | |
3091 | VM_FLAGS_ANYWHERE | | |
3092 | VM_FLAGS_OVERWRITE | | |
3e170ce0 | 3093 | VM_FLAGS_RETURN_4K_DATA_ADDR | |
39236c6e A |
3094 | VM_FLAGS_RETURN_DATA_ADDR)) { |
3095 | named_entry_unlock(named_entry); | |
3096 | return KERN_INVALID_ARGUMENT; | |
3097 | } | |
3098 | ||
3e170ce0 A |
3099 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
3100 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e | 3101 | offset_in_mapping = offset - vm_object_trunc_page(offset); |
3e170ce0 A |
3102 | if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) |
3103 | offset_in_mapping &= ~((signed)(0xFFF)); | |
39236c6e A |
3104 | offset = vm_object_trunc_page(offset); |
3105 | map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; | |
3106 | } | |
3107 | ||
3108 | copy_map = named_entry->backing.copy; | |
3109 | assert(copy_map->type == VM_MAP_COPY_ENTRY_LIST); | |
3110 | if (copy_map->type != VM_MAP_COPY_ENTRY_LIST) { | |
3111 | /* unsupported type; should not happen */ | |
3112 | printf("vm_map_enter_mem_object: " | |
3113 | "memory_entry->backing.copy " | |
3114 | "unsupported type 0x%x\n", | |
3115 | copy_map->type); | |
3116 | named_entry_unlock(named_entry); | |
3117 | return KERN_INVALID_ARGUMENT; | |
3118 | } | |
3119 | ||
3120 | /* reserve a contiguous range */ | |
3121 | kr = vm_map_enter(target_map, | |
3122 | &map_addr, | |
fe8ab488 A |
3123 | /* map whole mem entry, trim later: */ |
3124 | named_entry->size, | |
39236c6e A |
3125 | mask, |
3126 | flags & (VM_FLAGS_ANYWHERE | | |
3127 | VM_FLAGS_OVERWRITE | | |
3e170ce0 | 3128 | VM_FLAGS_RETURN_4K_DATA_ADDR | |
39236c6e A |
3129 | VM_FLAGS_RETURN_DATA_ADDR), |
3130 | VM_OBJECT_NULL, | |
3131 | 0, | |
3132 | FALSE, /* copy */ | |
3133 | cur_protection, | |
3134 | max_protection, | |
3135 | inheritance); | |
3136 | if (kr != KERN_SUCCESS) { | |
3137 | named_entry_unlock(named_entry); | |
3138 | return kr; | |
3139 | } | |
3140 | ||
3141 | copy_addr = map_addr; | |
3142 | ||
3143 | for (copy_entry = vm_map_copy_first_entry(copy_map); | |
3144 | copy_entry != vm_map_copy_to_entry(copy_map); | |
3145 | copy_entry = copy_entry->vme_next) { | |
3146 | int remap_flags = 0; | |
3147 | vm_map_t copy_submap; | |
3148 | vm_object_t copy_object; | |
3149 | vm_map_size_t copy_size; | |
3150 | vm_object_offset_t copy_offset; | |
3151 | ||
3e170ce0 | 3152 | copy_offset = VME_OFFSET(copy_entry); |
39236c6e A |
3153 | copy_size = (copy_entry->vme_end - |
3154 | copy_entry->vme_start); | |
3155 | ||
3156 | /* sanity check */ | |
fe8ab488 A |
3157 | if ((copy_addr + copy_size) > |
3158 | (map_addr + | |
3159 | named_entry->size /* XXX full size */ )) { | |
39236c6e A |
3160 | /* over-mapping too much !? */ |
3161 | kr = KERN_INVALID_ARGUMENT; | |
3162 | /* abort */ | |
3163 | break; | |
3164 | } | |
3165 | ||
3166 | /* take a reference on the object */ | |
3167 | if (copy_entry->is_sub_map) { | |
3168 | remap_flags |= VM_FLAGS_SUBMAP; | |
3e170ce0 | 3169 | copy_submap = VME_SUBMAP(copy_entry); |
39236c6e A |
3170 | vm_map_lock(copy_submap); |
3171 | vm_map_reference(copy_submap); | |
3172 | vm_map_unlock(copy_submap); | |
3173 | copy_object = (vm_object_t) copy_submap; | |
3174 | } else { | |
3e170ce0 | 3175 | copy_object = VME_OBJECT(copy_entry); |
39236c6e A |
3176 | vm_object_reference(copy_object); |
3177 | } | |
3178 | ||
3179 | /* over-map the object into destination */ | |
3180 | remap_flags |= flags; | |
3181 | remap_flags |= VM_FLAGS_FIXED; | |
3182 | remap_flags |= VM_FLAGS_OVERWRITE; | |
3183 | remap_flags &= ~VM_FLAGS_ANYWHERE; | |
3184 | kr = vm_map_enter(target_map, | |
3185 | ©_addr, | |
3186 | copy_size, | |
3187 | (vm_map_offset_t) 0, | |
3188 | remap_flags, | |
3189 | copy_object, | |
3190 | copy_offset, | |
3191 | copy, | |
3192 | cur_protection, | |
3193 | max_protection, | |
3194 | inheritance); | |
3195 | if (kr != KERN_SUCCESS) { | |
3196 | if (copy_entry->is_sub_map) { | |
3197 | vm_map_deallocate(copy_submap); | |
3198 | } else { | |
3199 | vm_object_deallocate(copy_object); | |
3200 | } | |
3201 | /* abort */ | |
3202 | break; | |
3203 | } | |
3204 | ||
3205 | /* next mapping */ | |
3206 | copy_addr += copy_size; | |
3207 | } | |
3208 | ||
3209 | if (kr == KERN_SUCCESS) { | |
3e170ce0 A |
3210 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
3211 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
3212 | *address = map_addr + offset_in_mapping; |
3213 | } else { | |
3214 | *address = map_addr; | |
3215 | } | |
fe8ab488 A |
3216 | |
3217 | if (offset) { | |
3218 | /* | |
3219 | * Trim in front, from 0 to "offset". | |
3220 | */ | |
3221 | vm_map_remove(target_map, | |
3222 | map_addr, | |
3223 | map_addr + offset, | |
3224 | 0); | |
3225 | *address += offset; | |
3226 | } | |
3227 | if (offset + map_size < named_entry->size) { | |
3228 | /* | |
3229 | * Trim in back, from | |
3230 | * "offset + map_size" to | |
3231 | * "named_entry->size". | |
3232 | */ | |
3233 | vm_map_remove(target_map, | |
3234 | (map_addr + | |
3235 | offset + map_size), | |
3236 | (map_addr + | |
3237 | named_entry->size), | |
3238 | 0); | |
3239 | } | |
39236c6e A |
3240 | } |
3241 | named_entry_unlock(named_entry); | |
3242 | ||
3243 | if (kr != KERN_SUCCESS) { | |
3244 | if (! (flags & VM_FLAGS_OVERWRITE)) { | |
3245 | /* deallocate the contiguous range */ | |
3246 | (void) vm_deallocate(target_map, | |
3247 | map_addr, | |
3248 | map_size); | |
3249 | } | |
3250 | } | |
3251 | ||
3252 | return kr; | |
3253 | ||
2d21ac55 A |
3254 | } else { |
3255 | /* This is the case where we are going to map */ | |
3256 | /* an already mapped object. If the object is */ | |
3257 | /* not ready it is internal. An external */ | |
3258 | /* object cannot be mapped until it is ready */ | |
3259 | /* we can therefore avoid the ready check */ | |
3260 | /* in this case. */ | |
3e170ce0 A |
3261 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
3262 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e | 3263 | offset_in_mapping = offset - vm_object_trunc_page(offset); |
3e170ce0 A |
3264 | if (flags & VM_FLAGS_RETURN_4K_DATA_ADDR) |
3265 | offset_in_mapping &= ~((signed)(0xFFF)); | |
39236c6e A |
3266 | offset = vm_object_trunc_page(offset); |
3267 | map_size = vm_object_round_page(offset + offset_in_mapping + initial_size) - offset; | |
3268 | } | |
3269 | ||
2d21ac55 A |
3270 | object = named_entry->backing.object; |
3271 | assert(object != VM_OBJECT_NULL); | |
3272 | named_entry_unlock(named_entry); | |
3273 | vm_object_reference(object); | |
3274 | } | |
3275 | } else if (ip_kotype(port) == IKOT_MEMORY_OBJECT) { | |
3276 | /* | |
3277 | * JMM - This is temporary until we unify named entries | |
3278 | * and raw memory objects. | |
3279 | * | |
3280 | * Detected fake ip_kotype for a memory object. In | |
3281 | * this case, the port isn't really a port at all, but | |
3282 | * instead is just a raw memory object. | |
3283 | */ | |
3e170ce0 A |
3284 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
3285 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
3286 | panic("VM_FLAGS_RETURN_DATA_ADDR not expected for raw memory object."); |
3287 | } | |
3288 | ||
2d21ac55 A |
3289 | object = vm_object_enter((memory_object_t)port, |
3290 | size, FALSE, FALSE, FALSE); | |
3291 | if (object == VM_OBJECT_NULL) | |
3292 | return KERN_INVALID_OBJECT; | |
3293 | ||
3294 | /* wait for object (if any) to be ready */ | |
3295 | if (object != VM_OBJECT_NULL) { | |
3296 | if (object == kernel_object) { | |
3297 | printf("Warning: Attempt to map kernel object" | |
3298 | " by a non-private kernel entity\n"); | |
3299 | return KERN_INVALID_OBJECT; | |
3300 | } | |
b0d623f7 | 3301 | if (!object->pager_ready) { |
2d21ac55 | 3302 | vm_object_lock(object); |
b0d623f7 A |
3303 | |
3304 | while (!object->pager_ready) { | |
3305 | vm_object_wait(object, | |
3306 | VM_OBJECT_EVENT_PAGER_READY, | |
3307 | THREAD_UNINT); | |
3308 | vm_object_lock(object); | |
3309 | } | |
3310 | vm_object_unlock(object); | |
2d21ac55 | 3311 | } |
2d21ac55 A |
3312 | } |
3313 | } else { | |
3314 | return KERN_INVALID_OBJECT; | |
3315 | } | |
3316 | ||
593a1d5f A |
3317 | if (object != VM_OBJECT_NULL && |
3318 | object->named && | |
3319 | object->pager != MEMORY_OBJECT_NULL && | |
3320 | object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { | |
3321 | memory_object_t pager; | |
3322 | vm_prot_t pager_prot; | |
3323 | kern_return_t kr; | |
3324 | ||
3325 | /* | |
3326 | * For "named" VM objects, let the pager know that the | |
3327 | * memory object is being mapped. Some pagers need to keep | |
3328 | * track of this, to know when they can reclaim the memory | |
3329 | * object, for example. | |
3330 | * VM calls memory_object_map() for each mapping (specifying | |
3331 | * the protection of each mapping) and calls | |
3332 | * memory_object_last_unmap() when all the mappings are gone. | |
3333 | */ | |
3334 | pager_prot = max_protection; | |
3335 | if (copy) { | |
3336 | /* | |
3337 | * Copy-On-Write mapping: won't modify the | |
3338 | * memory object. | |
3339 | */ | |
3340 | pager_prot &= ~VM_PROT_WRITE; | |
3341 | } | |
3342 | vm_object_lock(object); | |
3343 | pager = object->pager; | |
3344 | if (object->named && | |
3345 | pager != MEMORY_OBJECT_NULL && | |
3346 | object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { | |
3347 | assert(object->pager_ready); | |
3348 | vm_object_mapping_wait(object, THREAD_UNINT); | |
3349 | vm_object_mapping_begin(object); | |
3350 | vm_object_unlock(object); | |
3351 | ||
3352 | kr = memory_object_map(pager, pager_prot); | |
3353 | assert(kr == KERN_SUCCESS); | |
3354 | ||
3355 | vm_object_lock(object); | |
3356 | vm_object_mapping_end(object); | |
3357 | } | |
3358 | vm_object_unlock(object); | |
3359 | } | |
3360 | ||
2d21ac55 A |
3361 | /* |
3362 | * Perform the copy if requested | |
3363 | */ | |
3364 | ||
3365 | if (copy) { | |
3366 | vm_object_t new_object; | |
3367 | vm_object_offset_t new_offset; | |
3368 | ||
3e170ce0 A |
3369 | result = vm_object_copy_strategically(object, offset, |
3370 | map_size, | |
2d21ac55 A |
3371 | &new_object, &new_offset, |
3372 | ©); | |
3373 | ||
3374 | ||
3375 | if (result == KERN_MEMORY_RESTART_COPY) { | |
3376 | boolean_t success; | |
3377 | boolean_t src_needs_copy; | |
3378 | ||
3379 | /* | |
3380 | * XXX | |
3381 | * We currently ignore src_needs_copy. | |
3382 | * This really is the issue of how to make | |
3383 | * MEMORY_OBJECT_COPY_SYMMETRIC safe for | |
3384 | * non-kernel users to use. Solution forthcoming. | |
3385 | * In the meantime, since we don't allow non-kernel | |
3386 | * memory managers to specify symmetric copy, | |
3387 | * we won't run into problems here. | |
3388 | */ | |
3389 | new_object = object; | |
3390 | new_offset = offset; | |
3391 | success = vm_object_copy_quickly(&new_object, | |
3e170ce0 A |
3392 | new_offset, |
3393 | map_size, | |
2d21ac55 A |
3394 | &src_needs_copy, |
3395 | ©); | |
3396 | assert(success); | |
3397 | result = KERN_SUCCESS; | |
3398 | } | |
3399 | /* | |
3400 | * Throw away the reference to the | |
3401 | * original object, as it won't be mapped. | |
3402 | */ | |
3403 | ||
3404 | vm_object_deallocate(object); | |
3405 | ||
3e170ce0 | 3406 | if (result != KERN_SUCCESS) { |
2d21ac55 | 3407 | return result; |
3e170ce0 | 3408 | } |
2d21ac55 A |
3409 | |
3410 | object = new_object; | |
3411 | offset = new_offset; | |
3412 | } | |
3413 | ||
fe8ab488 A |
3414 | /* |
3415 | * If users want to try to prefault pages, the mapping and prefault | |
3416 | * needs to be atomic. | |
3417 | */ | |
3418 | if (try_prefault) | |
3419 | flags |= VM_FLAGS_KEEP_MAP_LOCKED; | |
3e170ce0 A |
3420 | |
3421 | { | |
3422 | result = vm_map_enter(target_map, | |
3423 | &map_addr, map_size, | |
3424 | (vm_map_offset_t)mask, | |
3425 | flags, | |
3426 | object, offset, | |
3427 | copy, | |
3428 | cur_protection, max_protection, | |
3429 | inheritance); | |
3430 | } | |
2d21ac55 A |
3431 | if (result != KERN_SUCCESS) |
3432 | vm_object_deallocate(object); | |
39236c6e | 3433 | |
fe8ab488 A |
3434 | /* |
3435 | * Try to prefault, and do not forget to release the vm map lock. | |
3436 | */ | |
3437 | if (result == KERN_SUCCESS && try_prefault) { | |
3438 | mach_vm_address_t va = map_addr; | |
3439 | kern_return_t kr = KERN_SUCCESS; | |
3440 | unsigned int i = 0; | |
3441 | ||
3442 | for (i = 0; i < page_list_count; ++i) { | |
3443 | if (UPL_VALID_PAGE(page_list, i)) { | |
3444 | /* | |
3445 | * If this function call failed, we should stop | |
3446 | * trying to optimize, other calls are likely | |
3447 | * going to fail too. | |
3448 | * | |
3449 | * We are not gonna report an error for such | |
3450 | * failure though. That's an optimization, not | |
3451 | * something critical. | |
3452 | */ | |
3453 | kr = pmap_enter_options(target_map->pmap, | |
3454 | va, UPL_PHYS_PAGE(page_list, i), | |
3455 | cur_protection, VM_PROT_NONE, | |
3456 | 0, TRUE, PMAP_OPTIONS_NOWAIT, NULL); | |
3457 | if (kr != KERN_SUCCESS) { | |
3458 | OSIncrementAtomic64(&vm_prefault_nb_bailout); | |
3e170ce0 | 3459 | break; |
fe8ab488 A |
3460 | } |
3461 | OSIncrementAtomic64(&vm_prefault_nb_pages); | |
3462 | } | |
3463 | ||
3464 | /* Next virtual address */ | |
3465 | va += PAGE_SIZE; | |
3466 | } | |
fe8ab488 A |
3467 | vm_map_unlock(target_map); |
3468 | } | |
3469 | ||
3e170ce0 A |
3470 | if (flags & (VM_FLAGS_RETURN_DATA_ADDR | |
3471 | VM_FLAGS_RETURN_4K_DATA_ADDR)) { | |
39236c6e A |
3472 | *address = map_addr + offset_in_mapping; |
3473 | } else { | |
3474 | *address = map_addr; | |
3475 | } | |
2d21ac55 A |
3476 | return result; |
3477 | } | |
3478 | ||
fe8ab488 A |
3479 | kern_return_t |
3480 | vm_map_enter_mem_object( | |
3481 | vm_map_t target_map, | |
3482 | vm_map_offset_t *address, | |
3483 | vm_map_size_t initial_size, | |
3484 | vm_map_offset_t mask, | |
3485 | int flags, | |
3486 | ipc_port_t port, | |
3487 | vm_object_offset_t offset, | |
3488 | boolean_t copy, | |
3489 | vm_prot_t cur_protection, | |
3490 | vm_prot_t max_protection, | |
3491 | vm_inherit_t inheritance) | |
3492 | { | |
3493 | return vm_map_enter_mem_object_helper(target_map, address, initial_size, mask, flags, | |
3494 | port, offset, copy, cur_protection, max_protection, | |
3495 | inheritance, NULL, 0); | |
3496 | } | |
b0d623f7 | 3497 | |
fe8ab488 A |
3498 | kern_return_t |
3499 | vm_map_enter_mem_object_prefault( | |
3500 | vm_map_t target_map, | |
3501 | vm_map_offset_t *address, | |
3502 | vm_map_size_t initial_size, | |
3503 | vm_map_offset_t mask, | |
3504 | int flags, | |
3505 | ipc_port_t port, | |
3506 | vm_object_offset_t offset, | |
3507 | vm_prot_t cur_protection, | |
3508 | vm_prot_t max_protection, | |
3509 | upl_page_list_ptr_t page_list, | |
3510 | unsigned int page_list_count) | |
3511 | { | |
3512 | return vm_map_enter_mem_object_helper(target_map, address, initial_size, mask, flags, | |
3513 | port, offset, FALSE, cur_protection, max_protection, | |
3514 | VM_INHERIT_DEFAULT, page_list, page_list_count); | |
3515 | } | |
b0d623f7 A |
3516 | |
3517 | ||
3518 | kern_return_t | |
3519 | vm_map_enter_mem_object_control( | |
3520 | vm_map_t target_map, | |
3521 | vm_map_offset_t *address, | |
3522 | vm_map_size_t initial_size, | |
3523 | vm_map_offset_t mask, | |
3524 | int flags, | |
3525 | memory_object_control_t control, | |
3526 | vm_object_offset_t offset, | |
3527 | boolean_t copy, | |
3528 | vm_prot_t cur_protection, | |
3529 | vm_prot_t max_protection, | |
3530 | vm_inherit_t inheritance) | |
3531 | { | |
3532 | vm_map_address_t map_addr; | |
3533 | vm_map_size_t map_size; | |
3534 | vm_object_t object; | |
3535 | vm_object_size_t size; | |
3536 | kern_return_t result; | |
3537 | memory_object_t pager; | |
3538 | vm_prot_t pager_prot; | |
3539 | kern_return_t kr; | |
3540 | ||
3541 | /* | |
3542 | * Check arguments for validity | |
3543 | */ | |
3544 | if ((target_map == VM_MAP_NULL) || | |
3545 | (cur_protection & ~VM_PROT_ALL) || | |
3546 | (max_protection & ~VM_PROT_ALL) || | |
3547 | (inheritance > VM_INHERIT_LAST_VALID) || | |
3e170ce0 | 3548 | initial_size == 0) { |
b0d623f7 | 3549 | return KERN_INVALID_ARGUMENT; |
3e170ce0 | 3550 | } |
b0d623f7 | 3551 | |
3e170ce0 A |
3552 | { |
3553 | map_addr = vm_map_trunc_page(*address, | |
3554 | VM_MAP_PAGE_MASK(target_map)); | |
3555 | map_size = vm_map_round_page(initial_size, | |
3556 | VM_MAP_PAGE_MASK(target_map)); | |
3557 | } | |
3558 | size = vm_object_round_page(initial_size); | |
b0d623f7 A |
3559 | |
3560 | object = memory_object_control_to_vm_object(control); | |
3561 | ||
3562 | if (object == VM_OBJECT_NULL) | |
3563 | return KERN_INVALID_OBJECT; | |
3564 | ||
3565 | if (object == kernel_object) { | |
3566 | printf("Warning: Attempt to map kernel object" | |
3567 | " by a non-private kernel entity\n"); | |
3568 | return KERN_INVALID_OBJECT; | |
3569 | } | |
3570 | ||
3571 | vm_object_lock(object); | |
3572 | object->ref_count++; | |
3573 | vm_object_res_reference(object); | |
3574 | ||
3575 | /* | |
3576 | * For "named" VM objects, let the pager know that the | |
3577 | * memory object is being mapped. Some pagers need to keep | |
3578 | * track of this, to know when they can reclaim the memory | |
3579 | * object, for example. | |
3580 | * VM calls memory_object_map() for each mapping (specifying | |
3581 | * the protection of each mapping) and calls | |
3582 | * memory_object_last_unmap() when all the mappings are gone. | |
3583 | */ | |
3584 | pager_prot = max_protection; | |
3585 | if (copy) { | |
3586 | pager_prot &= ~VM_PROT_WRITE; | |
3587 | } | |
3588 | pager = object->pager; | |
3589 | if (object->named && | |
3590 | pager != MEMORY_OBJECT_NULL && | |
3591 | object->copy_strategy != MEMORY_OBJECT_COPY_NONE) { | |
3592 | assert(object->pager_ready); | |
3593 | vm_object_mapping_wait(object, THREAD_UNINT); | |
3594 | vm_object_mapping_begin(object); | |
3595 | vm_object_unlock(object); | |
3596 | ||
3597 | kr = memory_object_map(pager, pager_prot); | |
3598 | assert(kr == KERN_SUCCESS); | |
3599 | ||
3600 | vm_object_lock(object); | |
3601 | vm_object_mapping_end(object); | |
3602 | } | |
3603 | vm_object_unlock(object); | |
3604 | ||
3605 | /* | |
3606 | * Perform the copy if requested | |
3607 | */ | |
3608 | ||
3609 | if (copy) { | |
3610 | vm_object_t new_object; | |
3611 | vm_object_offset_t new_offset; | |
3612 | ||
3613 | result = vm_object_copy_strategically(object, offset, size, | |
3614 | &new_object, &new_offset, | |
3615 | ©); | |
3616 | ||
3617 | ||
3618 | if (result == KERN_MEMORY_RESTART_COPY) { | |
3619 | boolean_t success; | |
3620 | boolean_t src_needs_copy; | |
3621 | ||
3622 | /* | |
3623 | * XXX | |
3624 | * We currently ignore src_needs_copy. | |
3625 | * This really is the issue of how to make | |
3626 | * MEMORY_OBJECT_COPY_SYMMETRIC safe for | |
3627 | * non-kernel users to use. Solution forthcoming. | |
3628 | * In the meantime, since we don't allow non-kernel | |
3629 | * memory managers to specify symmetric copy, | |
3630 | * we won't run into problems here. | |
3631 | */ | |
3632 | new_object = object; | |
3633 | new_offset = offset; | |
3634 | success = vm_object_copy_quickly(&new_object, | |
3635 | new_offset, size, | |
3636 | &src_needs_copy, | |
3637 | ©); | |
3638 | assert(success); | |
3639 | result = KERN_SUCCESS; | |
3640 | } | |
3641 | /* | |
3642 | * Throw away the reference to the | |
3643 | * original object, as it won't be mapped. | |
3644 | */ | |
3645 | ||
3646 | vm_object_deallocate(object); | |
3647 | ||
3e170ce0 | 3648 | if (result != KERN_SUCCESS) { |
b0d623f7 | 3649 | return result; |
3e170ce0 | 3650 | } |
b0d623f7 A |
3651 | |
3652 | object = new_object; | |
3653 | offset = new_offset; | |
3654 | } | |
3655 | ||
3e170ce0 A |
3656 | { |
3657 | result = vm_map_enter(target_map, | |
3658 | &map_addr, map_size, | |
3659 | (vm_map_offset_t)mask, | |
3660 | flags, | |
3661 | object, offset, | |
3662 | copy, | |
3663 | cur_protection, max_protection, | |
3664 | inheritance); | |
3665 | } | |
b0d623f7 A |
3666 | if (result != KERN_SUCCESS) |
3667 | vm_object_deallocate(object); | |
3668 | *address = map_addr; | |
3669 | ||
3670 | return result; | |
3671 | } | |
3672 | ||
3673 | ||
2d21ac55 A |
3674 | #if VM_CPM |
3675 | ||
3676 | #ifdef MACH_ASSERT | |
3677 | extern pmap_paddr_t avail_start, avail_end; | |
3678 | #endif | |
3679 | ||
3680 | /* | |
3681 | * Allocate memory in the specified map, with the caveat that | |
3682 | * the memory is physically contiguous. This call may fail | |
3683 | * if the system can't find sufficient contiguous memory. | |
3684 | * This call may cause or lead to heart-stopping amounts of | |
3685 | * paging activity. | |
3686 | * | |
3687 | * Memory obtained from this call should be freed in the | |
3688 | * normal way, viz., via vm_deallocate. | |
3689 | */ | |
3690 | kern_return_t | |
3691 | vm_map_enter_cpm( | |
3692 | vm_map_t map, | |
3693 | vm_map_offset_t *addr, | |
3694 | vm_map_size_t size, | |
3695 | int flags) | |
3696 | { | |
3697 | vm_object_t cpm_obj; | |
3698 | pmap_t pmap; | |
3699 | vm_page_t m, pages; | |
3700 | kern_return_t kr; | |
3701 | vm_map_offset_t va, start, end, offset; | |
3702 | #if MACH_ASSERT | |
316670eb | 3703 | vm_map_offset_t prev_addr = 0; |
2d21ac55 A |
3704 | #endif /* MACH_ASSERT */ |
3705 | ||
3706 | boolean_t anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); | |
3e170ce0 A |
3707 | vm_tag_t tag; |
3708 | ||
3709 | VM_GET_FLAGS_ALIAS(flags, tag); | |
2d21ac55 | 3710 | |
2d21ac55 A |
3711 | if (size == 0) { |
3712 | *addr = 0; | |
3713 | return KERN_SUCCESS; | |
3714 | } | |
3715 | if (anywhere) | |
3716 | *addr = vm_map_min(map); | |
3717 | else | |
39236c6e A |
3718 | *addr = vm_map_trunc_page(*addr, |
3719 | VM_MAP_PAGE_MASK(map)); | |
3720 | size = vm_map_round_page(size, | |
3721 | VM_MAP_PAGE_MASK(map)); | |
2d21ac55 A |
3722 | |
3723 | /* | |
3724 | * LP64todo - cpm_allocate should probably allow | |
3725 | * allocations of >4GB, but not with the current | |
3726 | * algorithm, so just cast down the size for now. | |
3727 | */ | |
3728 | if (size > VM_MAX_ADDRESS) | |
3729 | return KERN_RESOURCE_SHORTAGE; | |
3730 | if ((kr = cpm_allocate(CAST_DOWN(vm_size_t, size), | |
b0d623f7 | 3731 | &pages, 0, 0, TRUE, flags)) != KERN_SUCCESS) |
2d21ac55 A |
3732 | return kr; |
3733 | ||
3734 | cpm_obj = vm_object_allocate((vm_object_size_t)size); | |
3735 | assert(cpm_obj != VM_OBJECT_NULL); | |
3736 | assert(cpm_obj->internal); | |
316670eb | 3737 | assert(cpm_obj->vo_size == (vm_object_size_t)size); |
2d21ac55 A |
3738 | assert(cpm_obj->can_persist == FALSE); |
3739 | assert(cpm_obj->pager_created == FALSE); | |
3740 | assert(cpm_obj->pageout == FALSE); | |
3741 | assert(cpm_obj->shadow == VM_OBJECT_NULL); | |
91447636 A |
3742 | |
3743 | /* | |
3744 | * Insert pages into object. | |
3745 | */ | |
3746 | ||
3747 | vm_object_lock(cpm_obj); | |
3748 | for (offset = 0; offset < size; offset += PAGE_SIZE) { | |
3749 | m = pages; | |
3750 | pages = NEXT_PAGE(m); | |
0c530ab8 | 3751 | *(NEXT_PAGE_PTR(m)) = VM_PAGE_NULL; |
91447636 A |
3752 | |
3753 | assert(!m->gobbled); | |
3754 | assert(!m->wanted); | |
3755 | assert(!m->pageout); | |
3756 | assert(!m->tabled); | |
b0d623f7 | 3757 | assert(VM_PAGE_WIRED(m)); |
91447636 A |
3758 | /* |
3759 | * ENCRYPTED SWAP: | |
3760 | * "m" is not supposed to be pageable, so it | |
3761 | * should not be encrypted. It wouldn't be safe | |
3762 | * to enter it in a new VM object while encrypted. | |
3763 | */ | |
3764 | ASSERT_PAGE_DECRYPTED(m); | |
3765 | assert(m->busy); | |
0c530ab8 | 3766 | assert(m->phys_page>=(avail_start>>PAGE_SHIFT) && m->phys_page<=(avail_end>>PAGE_SHIFT)); |
91447636 A |
3767 | |
3768 | m->busy = FALSE; | |
3769 | vm_page_insert(m, cpm_obj, offset); | |
3770 | } | |
3771 | assert(cpm_obj->resident_page_count == size / PAGE_SIZE); | |
3772 | vm_object_unlock(cpm_obj); | |
3773 | ||
3774 | /* | |
3775 | * Hang onto a reference on the object in case a | |
3776 | * multi-threaded application for some reason decides | |
3777 | * to deallocate the portion of the address space into | |
3778 | * which we will insert this object. | |
3779 | * | |
3780 | * Unfortunately, we must insert the object now before | |
3781 | * we can talk to the pmap module about which addresses | |
3782 | * must be wired down. Hence, the race with a multi- | |
3783 | * threaded app. | |
3784 | */ | |
3785 | vm_object_reference(cpm_obj); | |
3786 | ||
3787 | /* | |
3788 | * Insert object into map. | |
3789 | */ | |
3790 | ||
3791 | kr = vm_map_enter( | |
2d21ac55 A |
3792 | map, |
3793 | addr, | |
3794 | size, | |
3795 | (vm_map_offset_t)0, | |
3796 | flags, | |
3797 | cpm_obj, | |
3798 | (vm_object_offset_t)0, | |
3799 | FALSE, | |
3800 | VM_PROT_ALL, | |
3801 | VM_PROT_ALL, | |
3802 | VM_INHERIT_DEFAULT); | |
91447636 A |
3803 | |
3804 | if (kr != KERN_SUCCESS) { | |
3805 | /* | |
3806 | * A CPM object doesn't have can_persist set, | |
3807 | * so all we have to do is deallocate it to | |
3808 | * free up these pages. | |
3809 | */ | |
3810 | assert(cpm_obj->pager_created == FALSE); | |
3811 | assert(cpm_obj->can_persist == FALSE); | |
3812 | assert(cpm_obj->pageout == FALSE); | |
3813 | assert(cpm_obj->shadow == VM_OBJECT_NULL); | |
3814 | vm_object_deallocate(cpm_obj); /* kill acquired ref */ | |
3815 | vm_object_deallocate(cpm_obj); /* kill creation ref */ | |
3816 | } | |
3817 | ||
3818 | /* | |
3819 | * Inform the physical mapping system that the | |
3820 | * range of addresses may not fault, so that | |
3821 | * page tables and such can be locked down as well. | |
3822 | */ | |
3823 | start = *addr; | |
3824 | end = start + size; | |
3825 | pmap = vm_map_pmap(map); | |
3826 | pmap_pageable(pmap, start, end, FALSE); | |
3827 | ||
3828 | /* | |
3829 | * Enter each page into the pmap, to avoid faults. | |
3830 | * Note that this loop could be coded more efficiently, | |
3831 | * if the need arose, rather than looking up each page | |
3832 | * again. | |
3833 | */ | |
3834 | for (offset = 0, va = start; offset < size; | |
3835 | va += PAGE_SIZE, offset += PAGE_SIZE) { | |
2d21ac55 A |
3836 | int type_of_fault; |
3837 | ||
91447636 A |
3838 | vm_object_lock(cpm_obj); |
3839 | m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); | |
91447636 | 3840 | assert(m != VM_PAGE_NULL); |
2d21ac55 A |
3841 | |
3842 | vm_page_zero_fill(m); | |
3843 | ||
3844 | type_of_fault = DBG_ZERO_FILL_FAULT; | |
3845 | ||
6d2010ae | 3846 | vm_fault_enter(m, pmap, va, VM_PROT_ALL, VM_PROT_WRITE, |
fe8ab488 | 3847 | VM_PAGE_WIRED(m), FALSE, FALSE, FALSE, 0, NULL, |
2d21ac55 A |
3848 | &type_of_fault); |
3849 | ||
3850 | vm_object_unlock(cpm_obj); | |
91447636 A |
3851 | } |
3852 | ||
3853 | #if MACH_ASSERT | |
3854 | /* | |
3855 | * Verify ordering in address space. | |
3856 | */ | |
3857 | for (offset = 0; offset < size; offset += PAGE_SIZE) { | |
3858 | vm_object_lock(cpm_obj); | |
3859 | m = vm_page_lookup(cpm_obj, (vm_object_offset_t)offset); | |
3860 | vm_object_unlock(cpm_obj); | |
3861 | if (m == VM_PAGE_NULL) | |
316670eb A |
3862 | panic("vm_allocate_cpm: obj %p off 0x%llx no page", |
3863 | cpm_obj, (uint64_t)offset); | |
91447636 A |
3864 | assert(m->tabled); |
3865 | assert(!m->busy); | |
3866 | assert(!m->wanted); | |
3867 | assert(!m->fictitious); | |
3868 | assert(!m->private); | |
3869 | assert(!m->absent); | |
3870 | assert(!m->error); | |
3871 | assert(!m->cleaning); | |
316670eb | 3872 | assert(!m->laundry); |
91447636 A |
3873 | assert(!m->precious); |
3874 | assert(!m->clustered); | |
3875 | if (offset != 0) { | |
3876 | if (m->phys_page != prev_addr + 1) { | |
316670eb A |
3877 | printf("start 0x%llx end 0x%llx va 0x%llx\n", |
3878 | (uint64_t)start, (uint64_t)end, (uint64_t)va); | |
3879 | printf("obj %p off 0x%llx\n", cpm_obj, (uint64_t)offset); | |
3880 | printf("m %p prev_address 0x%llx\n", m, (uint64_t)prev_addr); | |
91447636 A |
3881 | panic("vm_allocate_cpm: pages not contig!"); |
3882 | } | |
3883 | } | |
3884 | prev_addr = m->phys_page; | |
3885 | } | |
3886 | #endif /* MACH_ASSERT */ | |
3887 | ||
3888 | vm_object_deallocate(cpm_obj); /* kill extra ref */ | |
3889 | ||
3890 | return kr; | |
3891 | } | |
3892 | ||
3893 | ||
3894 | #else /* VM_CPM */ | |
3895 | ||
3896 | /* | |
3897 | * Interface is defined in all cases, but unless the kernel | |
3898 | * is built explicitly for this option, the interface does | |
3899 | * nothing. | |
3900 | */ | |
3901 | ||
3902 | kern_return_t | |
3903 | vm_map_enter_cpm( | |
3904 | __unused vm_map_t map, | |
3905 | __unused vm_map_offset_t *addr, | |
3906 | __unused vm_map_size_t size, | |
3907 | __unused int flags) | |
3908 | { | |
3909 | return KERN_FAILURE; | |
3910 | } | |
3911 | #endif /* VM_CPM */ | |
3912 | ||
b0d623f7 A |
3913 | /* Not used without nested pmaps */ |
3914 | #ifndef NO_NESTED_PMAP | |
2d21ac55 A |
3915 | /* |
3916 | * Clip and unnest a portion of a nested submap mapping. | |
3917 | */ | |
b0d623f7 A |
3918 | |
3919 | ||
2d21ac55 A |
3920 | static void |
3921 | vm_map_clip_unnest( | |
3922 | vm_map_t map, | |
3923 | vm_map_entry_t entry, | |
3924 | vm_map_offset_t start_unnest, | |
3925 | vm_map_offset_t end_unnest) | |
3926 | { | |
b0d623f7 A |
3927 | vm_map_offset_t old_start_unnest = start_unnest; |
3928 | vm_map_offset_t old_end_unnest = end_unnest; | |
3929 | ||
2d21ac55 | 3930 | assert(entry->is_sub_map); |
3e170ce0 | 3931 | assert(VME_SUBMAP(entry) != NULL); |
fe8ab488 | 3932 | assert(entry->use_pmap); |
2d21ac55 | 3933 | |
b0d623f7 A |
3934 | /* |
3935 | * Query the platform for the optimal unnest range. | |
3936 | * DRK: There's some duplication of effort here, since | |
3937 | * callers may have adjusted the range to some extent. This | |
3938 | * routine was introduced to support 1GiB subtree nesting | |
3939 | * for x86 platforms, which can also nest on 2MiB boundaries | |
3940 | * depending on size/alignment. | |
3941 | */ | |
3942 | if (pmap_adjust_unnest_parameters(map->pmap, &start_unnest, &end_unnest)) { | |
3943 | log_unnest_badness(map, old_start_unnest, old_end_unnest); | |
3944 | } | |
3945 | ||
2d21ac55 A |
3946 | if (entry->vme_start > start_unnest || |
3947 | entry->vme_end < end_unnest) { | |
3948 | panic("vm_map_clip_unnest(0x%llx,0x%llx): " | |
3949 | "bad nested entry: start=0x%llx end=0x%llx\n", | |
3950 | (long long)start_unnest, (long long)end_unnest, | |
3951 | (long long)entry->vme_start, (long long)entry->vme_end); | |
3952 | } | |
b0d623f7 | 3953 | |
2d21ac55 A |
3954 | if (start_unnest > entry->vme_start) { |
3955 | _vm_map_clip_start(&map->hdr, | |
3956 | entry, | |
3957 | start_unnest); | |
3e170ce0 A |
3958 | if (map->holelistenabled) { |
3959 | vm_map_store_update_first_free(map, NULL, FALSE); | |
3960 | } else { | |
3961 | vm_map_store_update_first_free(map, map->first_free, FALSE); | |
3962 | } | |
2d21ac55 A |
3963 | } |
3964 | if (entry->vme_end > end_unnest) { | |
3965 | _vm_map_clip_end(&map->hdr, | |
3966 | entry, | |
3967 | end_unnest); | |
3e170ce0 A |
3968 | if (map->holelistenabled) { |
3969 | vm_map_store_update_first_free(map, NULL, FALSE); | |
3970 | } else { | |
3971 | vm_map_store_update_first_free(map, map->first_free, FALSE); | |
3972 | } | |
2d21ac55 A |
3973 | } |
3974 | ||
3975 | pmap_unnest(map->pmap, | |
3976 | entry->vme_start, | |
3977 | entry->vme_end - entry->vme_start); | |
316670eb | 3978 | if ((map->mapped_in_other_pmaps) && (map->ref_count)) { |
2d21ac55 A |
3979 | /* clean up parent map/maps */ |
3980 | vm_map_submap_pmap_clean( | |
3981 | map, entry->vme_start, | |
3982 | entry->vme_end, | |
3e170ce0 A |
3983 | VME_SUBMAP(entry), |
3984 | VME_OFFSET(entry)); | |
2d21ac55 A |
3985 | } |
3986 | entry->use_pmap = FALSE; | |
3e170ce0 A |
3987 | if ((map->pmap != kernel_pmap) && |
3988 | (VME_ALIAS(entry) == VM_MEMORY_SHARED_PMAP)) { | |
3989 | VME_ALIAS_SET(entry, VM_MEMORY_UNSHARED_PMAP); | |
316670eb | 3990 | } |
2d21ac55 | 3991 | } |
b0d623f7 | 3992 | #endif /* NO_NESTED_PMAP */ |
2d21ac55 | 3993 | |
1c79356b A |
3994 | /* |
3995 | * vm_map_clip_start: [ internal use only ] | |
3996 | * | |
3997 | * Asserts that the given entry begins at or after | |
3998 | * the specified address; if necessary, | |
3999 | * it splits the entry into two. | |
4000 | */ | |
e2d2fc5c | 4001 | void |
2d21ac55 A |
4002 | vm_map_clip_start( |
4003 | vm_map_t map, | |
4004 | vm_map_entry_t entry, | |
4005 | vm_map_offset_t startaddr) | |
4006 | { | |
0c530ab8 | 4007 | #ifndef NO_NESTED_PMAP |
fe8ab488 A |
4008 | if (entry->is_sub_map && |
4009 | entry->use_pmap && | |
2d21ac55 A |
4010 | startaddr >= entry->vme_start) { |
4011 | vm_map_offset_t start_unnest, end_unnest; | |
4012 | ||
4013 | /* | |
4014 | * Make sure "startaddr" is no longer in a nested range | |
4015 | * before we clip. Unnest only the minimum range the platform | |
4016 | * can handle. | |
b0d623f7 A |
4017 | * vm_map_clip_unnest may perform additional adjustments to |
4018 | * the unnest range. | |
2d21ac55 A |
4019 | */ |
4020 | start_unnest = startaddr & ~(pmap_nesting_size_min - 1); | |
4021 | end_unnest = start_unnest + pmap_nesting_size_min; | |
4022 | vm_map_clip_unnest(map, entry, start_unnest, end_unnest); | |
4023 | } | |
4024 | #endif /* NO_NESTED_PMAP */ | |
4025 | if (startaddr > entry->vme_start) { | |
3e170ce0 | 4026 | if (VME_OBJECT(entry) && |
2d21ac55 | 4027 | !entry->is_sub_map && |
3e170ce0 | 4028 | VME_OBJECT(entry)->phys_contiguous) { |
2d21ac55 A |
4029 | pmap_remove(map->pmap, |
4030 | (addr64_t)(entry->vme_start), | |
4031 | (addr64_t)(entry->vme_end)); | |
4032 | } | |
4033 | _vm_map_clip_start(&map->hdr, entry, startaddr); | |
3e170ce0 A |
4034 | if (map->holelistenabled) { |
4035 | vm_map_store_update_first_free(map, NULL, FALSE); | |
4036 | } else { | |
4037 | vm_map_store_update_first_free(map, map->first_free, FALSE); | |
4038 | } | |
2d21ac55 A |
4039 | } |
4040 | } | |
4041 | ||
1c79356b A |
4042 | |
4043 | #define vm_map_copy_clip_start(copy, entry, startaddr) \ | |
4044 | MACRO_BEGIN \ | |
4045 | if ((startaddr) > (entry)->vme_start) \ | |
4046 | _vm_map_clip_start(&(copy)->cpy_hdr,(entry),(startaddr)); \ | |
4047 | MACRO_END | |
4048 | ||
4049 | /* | |
4050 | * This routine is called only when it is known that | |
4051 | * the entry must be split. | |
4052 | */ | |
91447636 | 4053 | static void |
1c79356b A |
4054 | _vm_map_clip_start( |
4055 | register struct vm_map_header *map_header, | |
4056 | register vm_map_entry_t entry, | |
3e170ce0 | 4057 | register vm_map_offset_t start) |
1c79356b A |
4058 | { |
4059 | register vm_map_entry_t new_entry; | |
4060 | ||
4061 | /* | |
4062 | * Split off the front portion -- | |
4063 | * note that we must insert the new | |
4064 | * entry BEFORE this one, so that | |
4065 | * this entry has the specified starting | |
4066 | * address. | |
4067 | */ | |
4068 | ||
fe8ab488 A |
4069 | if (entry->map_aligned) { |
4070 | assert(VM_MAP_PAGE_ALIGNED(start, | |
4071 | VM_MAP_HDR_PAGE_MASK(map_header))); | |
4072 | } | |
4073 | ||
7ddcb079 | 4074 | new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); |
1c79356b A |
4075 | vm_map_entry_copy_full(new_entry, entry); |
4076 | ||
4077 | new_entry->vme_end = start; | |
e2d2fc5c | 4078 | assert(new_entry->vme_start < new_entry->vme_end); |
3e170ce0 | 4079 | VME_OFFSET_SET(entry, VME_OFFSET(entry) + (start - entry->vme_start)); |
e2d2fc5c | 4080 | assert(start < entry->vme_end); |
1c79356b A |
4081 | entry->vme_start = start; |
4082 | ||
6d2010ae | 4083 | _vm_map_store_entry_link(map_header, entry->vme_prev, new_entry); |
1c79356b A |
4084 | |
4085 | if (entry->is_sub_map) | |
3e170ce0 | 4086 | vm_map_reference(VME_SUBMAP(new_entry)); |
1c79356b | 4087 | else |
3e170ce0 | 4088 | vm_object_reference(VME_OBJECT(new_entry)); |
1c79356b A |
4089 | } |
4090 | ||
4091 | ||
4092 | /* | |
4093 | * vm_map_clip_end: [ internal use only ] | |
4094 | * | |
4095 | * Asserts that the given entry ends at or before | |
4096 | * the specified address; if necessary, | |
4097 | * it splits the entry into two. | |
4098 | */ | |
e2d2fc5c | 4099 | void |
2d21ac55 A |
4100 | vm_map_clip_end( |
4101 | vm_map_t map, | |
4102 | vm_map_entry_t entry, | |
4103 | vm_map_offset_t endaddr) | |
4104 | { | |
4105 | if (endaddr > entry->vme_end) { | |
4106 | /* | |
4107 | * Within the scope of this clipping, limit "endaddr" to | |
4108 | * the end of this map entry... | |
4109 | */ | |
4110 | endaddr = entry->vme_end; | |
4111 | } | |
4112 | #ifndef NO_NESTED_PMAP | |
fe8ab488 | 4113 | if (entry->is_sub_map && entry->use_pmap) { |
2d21ac55 A |
4114 | vm_map_offset_t start_unnest, end_unnest; |
4115 | ||
4116 | /* | |
4117 | * Make sure the range between the start of this entry and | |
4118 | * the new "endaddr" is no longer nested before we clip. | |
4119 | * Unnest only the minimum range the platform can handle. | |
b0d623f7 A |
4120 | * vm_map_clip_unnest may perform additional adjustments to |
4121 | * the unnest range. | |
2d21ac55 A |
4122 | */ |
4123 | start_unnest = entry->vme_start; | |
4124 | end_unnest = | |
4125 | (endaddr + pmap_nesting_size_min - 1) & | |
4126 | ~(pmap_nesting_size_min - 1); | |
4127 | vm_map_clip_unnest(map, entry, start_unnest, end_unnest); | |
4128 | } | |
4129 | #endif /* NO_NESTED_PMAP */ | |
4130 | if (endaddr < entry->vme_end) { | |
3e170ce0 | 4131 | if (VME_OBJECT(entry) && |
2d21ac55 | 4132 | !entry->is_sub_map && |
3e170ce0 | 4133 | VME_OBJECT(entry)->phys_contiguous) { |
2d21ac55 A |
4134 | pmap_remove(map->pmap, |
4135 | (addr64_t)(entry->vme_start), | |
4136 | (addr64_t)(entry->vme_end)); | |
4137 | } | |
4138 | _vm_map_clip_end(&map->hdr, entry, endaddr); | |
3e170ce0 A |
4139 | if (map->holelistenabled) { |
4140 | vm_map_store_update_first_free(map, NULL, FALSE); | |
4141 | } else { | |
4142 | vm_map_store_update_first_free(map, map->first_free, FALSE); | |
4143 | } | |
2d21ac55 A |
4144 | } |
4145 | } | |
0c530ab8 | 4146 | |
1c79356b A |
4147 | |
4148 | #define vm_map_copy_clip_end(copy, entry, endaddr) \ | |
4149 | MACRO_BEGIN \ | |
4150 | if ((endaddr) < (entry)->vme_end) \ | |
4151 | _vm_map_clip_end(&(copy)->cpy_hdr,(entry),(endaddr)); \ | |
4152 | MACRO_END | |
4153 | ||
4154 | /* | |
4155 | * This routine is called only when it is known that | |
4156 | * the entry must be split. | |
4157 | */ | |
91447636 | 4158 | static void |
1c79356b A |
4159 | _vm_map_clip_end( |
4160 | register struct vm_map_header *map_header, | |
4161 | register vm_map_entry_t entry, | |
2d21ac55 | 4162 | register vm_map_offset_t end) |
1c79356b A |
4163 | { |
4164 | register vm_map_entry_t new_entry; | |
4165 | ||
4166 | /* | |
4167 | * Create a new entry and insert it | |
4168 | * AFTER the specified entry | |
4169 | */ | |
4170 | ||
fe8ab488 A |
4171 | if (entry->map_aligned) { |
4172 | assert(VM_MAP_PAGE_ALIGNED(end, | |
4173 | VM_MAP_HDR_PAGE_MASK(map_header))); | |
4174 | } | |
4175 | ||
7ddcb079 | 4176 | new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); |
1c79356b A |
4177 | vm_map_entry_copy_full(new_entry, entry); |
4178 | ||
e2d2fc5c | 4179 | assert(entry->vme_start < end); |
1c79356b | 4180 | new_entry->vme_start = entry->vme_end = end; |
3e170ce0 A |
4181 | VME_OFFSET_SET(new_entry, |
4182 | VME_OFFSET(new_entry) + (end - entry->vme_start)); | |
e2d2fc5c | 4183 | assert(new_entry->vme_start < new_entry->vme_end); |
1c79356b | 4184 | |
6d2010ae | 4185 | _vm_map_store_entry_link(map_header, entry, new_entry); |
1c79356b A |
4186 | |
4187 | if (entry->is_sub_map) | |
3e170ce0 | 4188 | vm_map_reference(VME_SUBMAP(new_entry)); |
1c79356b | 4189 | else |
3e170ce0 | 4190 | vm_object_reference(VME_OBJECT(new_entry)); |
1c79356b A |
4191 | } |
4192 | ||
4193 | ||
4194 | /* | |
4195 | * VM_MAP_RANGE_CHECK: [ internal use only ] | |
4196 | * | |
4197 | * Asserts that the starting and ending region | |
4198 | * addresses fall within the valid range of the map. | |
4199 | */ | |
2d21ac55 A |
4200 | #define VM_MAP_RANGE_CHECK(map, start, end) \ |
4201 | MACRO_BEGIN \ | |
4202 | if (start < vm_map_min(map)) \ | |
4203 | start = vm_map_min(map); \ | |
4204 | if (end > vm_map_max(map)) \ | |
4205 | end = vm_map_max(map); \ | |
4206 | if (start > end) \ | |
4207 | start = end; \ | |
4208 | MACRO_END | |
1c79356b A |
4209 | |
4210 | /* | |
4211 | * vm_map_range_check: [ internal use only ] | |
4212 | * | |
4213 | * Check that the region defined by the specified start and | |
4214 | * end addresses are wholly contained within a single map | |
4215 | * entry or set of adjacent map entries of the spacified map, | |
4216 | * i.e. the specified region contains no unmapped space. | |
4217 | * If any or all of the region is unmapped, FALSE is returned. | |
4218 | * Otherwise, TRUE is returned and if the output argument 'entry' | |
4219 | * is not NULL it points to the map entry containing the start | |
4220 | * of the region. | |
4221 | * | |
4222 | * The map is locked for reading on entry and is left locked. | |
4223 | */ | |
91447636 | 4224 | static boolean_t |
1c79356b A |
4225 | vm_map_range_check( |
4226 | register vm_map_t map, | |
91447636 A |
4227 | register vm_map_offset_t start, |
4228 | register vm_map_offset_t end, | |
1c79356b A |
4229 | vm_map_entry_t *entry) |
4230 | { | |
4231 | vm_map_entry_t cur; | |
91447636 | 4232 | register vm_map_offset_t prev; |
1c79356b A |
4233 | |
4234 | /* | |
4235 | * Basic sanity checks first | |
4236 | */ | |
4237 | if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) | |
4238 | return (FALSE); | |
4239 | ||
4240 | /* | |
4241 | * Check first if the region starts within a valid | |
4242 | * mapping for the map. | |
4243 | */ | |
4244 | if (!vm_map_lookup_entry(map, start, &cur)) | |
4245 | return (FALSE); | |
4246 | ||
4247 | /* | |
4248 | * Optimize for the case that the region is contained | |
4249 | * in a single map entry. | |
4250 | */ | |
4251 | if (entry != (vm_map_entry_t *) NULL) | |
4252 | *entry = cur; | |
4253 | if (end <= cur->vme_end) | |
4254 | return (TRUE); | |
4255 | ||
4256 | /* | |
4257 | * If the region is not wholly contained within a | |
4258 | * single entry, walk the entries looking for holes. | |
4259 | */ | |
4260 | prev = cur->vme_end; | |
4261 | cur = cur->vme_next; | |
4262 | while ((cur != vm_map_to_entry(map)) && (prev == cur->vme_start)) { | |
4263 | if (end <= cur->vme_end) | |
4264 | return (TRUE); | |
4265 | prev = cur->vme_end; | |
4266 | cur = cur->vme_next; | |
4267 | } | |
4268 | return (FALSE); | |
4269 | } | |
4270 | ||
4271 | /* | |
4272 | * vm_map_submap: [ kernel use only ] | |
4273 | * | |
4274 | * Mark the given range as handled by a subordinate map. | |
4275 | * | |
4276 | * This range must have been created with vm_map_find using | |
4277 | * the vm_submap_object, and no other operations may have been | |
4278 | * performed on this range prior to calling vm_map_submap. | |
4279 | * | |
4280 | * Only a limited number of operations can be performed | |
4281 | * within this rage after calling vm_map_submap: | |
4282 | * vm_fault | |
4283 | * [Don't try vm_map_copyin!] | |
4284 | * | |
4285 | * To remove a submapping, one must first remove the | |
4286 | * range from the superior map, and then destroy the | |
4287 | * submap (if desired). [Better yet, don't try it.] | |
4288 | */ | |
4289 | kern_return_t | |
4290 | vm_map_submap( | |
fe8ab488 | 4291 | vm_map_t map, |
91447636 A |
4292 | vm_map_offset_t start, |
4293 | vm_map_offset_t end, | |
fe8ab488 | 4294 | vm_map_t submap, |
91447636 | 4295 | vm_map_offset_t offset, |
0c530ab8 | 4296 | #ifdef NO_NESTED_PMAP |
91447636 | 4297 | __unused |
0c530ab8 | 4298 | #endif /* NO_NESTED_PMAP */ |
fe8ab488 | 4299 | boolean_t use_pmap) |
1c79356b A |
4300 | { |
4301 | vm_map_entry_t entry; | |
4302 | register kern_return_t result = KERN_INVALID_ARGUMENT; | |
4303 | register vm_object_t object; | |
4304 | ||
4305 | vm_map_lock(map); | |
4306 | ||
2d21ac55 | 4307 | if (! vm_map_lookup_entry(map, start, &entry)) { |
1c79356b | 4308 | entry = entry->vme_next; |
2d21ac55 | 4309 | } |
1c79356b | 4310 | |
2d21ac55 A |
4311 | if (entry == vm_map_to_entry(map) || |
4312 | entry->is_sub_map) { | |
1c79356b A |
4313 | vm_map_unlock(map); |
4314 | return KERN_INVALID_ARGUMENT; | |
4315 | } | |
4316 | ||
2d21ac55 | 4317 | vm_map_clip_start(map, entry, start); |
1c79356b A |
4318 | vm_map_clip_end(map, entry, end); |
4319 | ||
4320 | if ((entry->vme_start == start) && (entry->vme_end == end) && | |
4321 | (!entry->is_sub_map) && | |
3e170ce0 | 4322 | ((object = VME_OBJECT(entry)) == vm_submap_object) && |
1c79356b A |
4323 | (object->resident_page_count == 0) && |
4324 | (object->copy == VM_OBJECT_NULL) && | |
4325 | (object->shadow == VM_OBJECT_NULL) && | |
4326 | (!object->pager_created)) { | |
3e170ce0 A |
4327 | VME_OFFSET_SET(entry, (vm_object_offset_t)offset); |
4328 | VME_OBJECT_SET(entry, VM_OBJECT_NULL); | |
2d21ac55 A |
4329 | vm_object_deallocate(object); |
4330 | entry->is_sub_map = TRUE; | |
fe8ab488 | 4331 | entry->use_pmap = FALSE; |
3e170ce0 | 4332 | VME_SUBMAP_SET(entry, submap); |
2d21ac55 | 4333 | vm_map_reference(submap); |
316670eb A |
4334 | if (submap->mapped_in_other_pmaps == FALSE && |
4335 | vm_map_pmap(submap) != PMAP_NULL && | |
4336 | vm_map_pmap(submap) != vm_map_pmap(map)) { | |
4337 | /* | |
4338 | * This submap is being mapped in a map | |
4339 | * that uses a different pmap. | |
4340 | * Set its "mapped_in_other_pmaps" flag | |
4341 | * to indicate that we now need to | |
4342 | * remove mappings from all pmaps rather | |
4343 | * than just the submap's pmap. | |
4344 | */ | |
4345 | submap->mapped_in_other_pmaps = TRUE; | |
4346 | } | |
2d21ac55 | 4347 | |
0c530ab8 | 4348 | #ifndef NO_NESTED_PMAP |
2d21ac55 A |
4349 | if (use_pmap) { |
4350 | /* nest if platform code will allow */ | |
4351 | if(submap->pmap == NULL) { | |
316670eb A |
4352 | ledger_t ledger = map->pmap->ledger; |
4353 | submap->pmap = pmap_create(ledger, | |
4354 | (vm_map_size_t) 0, FALSE); | |
2d21ac55 A |
4355 | if(submap->pmap == PMAP_NULL) { |
4356 | vm_map_unlock(map); | |
4357 | return(KERN_NO_SPACE); | |
55e303ae | 4358 | } |
55e303ae | 4359 | } |
2d21ac55 | 4360 | result = pmap_nest(map->pmap, |
3e170ce0 | 4361 | (VME_SUBMAP(entry))->pmap, |
2d21ac55 A |
4362 | (addr64_t)start, |
4363 | (addr64_t)start, | |
4364 | (uint64_t)(end - start)); | |
4365 | if(result) | |
4366 | panic("vm_map_submap: pmap_nest failed, rc = %08X\n", result); | |
4367 | entry->use_pmap = TRUE; | |
4368 | } | |
0c530ab8 | 4369 | #else /* NO_NESTED_PMAP */ |
2d21ac55 | 4370 | pmap_remove(map->pmap, (addr64_t)start, (addr64_t)end); |
0c530ab8 | 4371 | #endif /* NO_NESTED_PMAP */ |
2d21ac55 | 4372 | result = KERN_SUCCESS; |
1c79356b A |
4373 | } |
4374 | vm_map_unlock(map); | |
4375 | ||
4376 | return(result); | |
4377 | } | |
4378 | ||
4379 | /* | |
4380 | * vm_map_protect: | |
4381 | * | |
4382 | * Sets the protection of the specified address | |
4383 | * region in the target map. If "set_max" is | |
4384 | * specified, the maximum protection is to be set; | |
4385 | * otherwise, only the current protection is affected. | |
4386 | */ | |
4387 | kern_return_t | |
4388 | vm_map_protect( | |
4389 | register vm_map_t map, | |
91447636 A |
4390 | register vm_map_offset_t start, |
4391 | register vm_map_offset_t end, | |
1c79356b A |
4392 | register vm_prot_t new_prot, |
4393 | register boolean_t set_max) | |
4394 | { | |
4395 | register vm_map_entry_t current; | |
2d21ac55 | 4396 | register vm_map_offset_t prev; |
1c79356b A |
4397 | vm_map_entry_t entry; |
4398 | vm_prot_t new_max; | |
1c79356b A |
4399 | |
4400 | XPR(XPR_VM_MAP, | |
2d21ac55 | 4401 | "vm_map_protect, 0x%X start 0x%X end 0x%X, new 0x%X %d", |
b0d623f7 | 4402 | map, start, end, new_prot, set_max); |
1c79356b A |
4403 | |
4404 | vm_map_lock(map); | |
4405 | ||
91447636 A |
4406 | /* LP64todo - remove this check when vm_map_commpage64() |
4407 | * no longer has to stuff in a map_entry for the commpage | |
4408 | * above the map's max_offset. | |
4409 | */ | |
4410 | if (start >= map->max_offset) { | |
4411 | vm_map_unlock(map); | |
4412 | return(KERN_INVALID_ADDRESS); | |
4413 | } | |
4414 | ||
b0d623f7 A |
4415 | while(1) { |
4416 | /* | |
4417 | * Lookup the entry. If it doesn't start in a valid | |
4418 | * entry, return an error. | |
4419 | */ | |
4420 | if (! vm_map_lookup_entry(map, start, &entry)) { | |
4421 | vm_map_unlock(map); | |
4422 | return(KERN_INVALID_ADDRESS); | |
4423 | } | |
4424 | ||
4425 | if (entry->superpage_size && (start & (SUPERPAGE_SIZE-1))) { /* extend request to whole entry */ | |
4426 | start = SUPERPAGE_ROUND_DOWN(start); | |
4427 | continue; | |
4428 | } | |
4429 | break; | |
4430 | } | |
4431 | if (entry->superpage_size) | |
4432 | end = SUPERPAGE_ROUND_UP(end); | |
1c79356b A |
4433 | |
4434 | /* | |
4435 | * Make a first pass to check for protection and address | |
4436 | * violations. | |
4437 | */ | |
4438 | ||
4439 | current = entry; | |
4440 | prev = current->vme_start; | |
4441 | while ((current != vm_map_to_entry(map)) && | |
4442 | (current->vme_start < end)) { | |
4443 | ||
4444 | /* | |
4445 | * If there is a hole, return an error. | |
4446 | */ | |
4447 | if (current->vme_start != prev) { | |
4448 | vm_map_unlock(map); | |
4449 | return(KERN_INVALID_ADDRESS); | |
4450 | } | |
4451 | ||
4452 | new_max = current->max_protection; | |
4453 | if(new_prot & VM_PROT_COPY) { | |
4454 | new_max |= VM_PROT_WRITE; | |
4455 | if ((new_prot & (new_max | VM_PROT_COPY)) != new_prot) { | |
4456 | vm_map_unlock(map); | |
4457 | return(KERN_PROTECTION_FAILURE); | |
4458 | } | |
4459 | } else { | |
4460 | if ((new_prot & new_max) != new_prot) { | |
4461 | vm_map_unlock(map); | |
4462 | return(KERN_PROTECTION_FAILURE); | |
4463 | } | |
4464 | } | |
4465 | ||
593a1d5f | 4466 | |
1c79356b A |
4467 | prev = current->vme_end; |
4468 | current = current->vme_next; | |
4469 | } | |
4470 | if (end > prev) { | |
4471 | vm_map_unlock(map); | |
4472 | return(KERN_INVALID_ADDRESS); | |
4473 | } | |
4474 | ||
4475 | /* | |
4476 | * Go back and fix up protections. | |
4477 | * Clip to start here if the range starts within | |
4478 | * the entry. | |
4479 | */ | |
4480 | ||
4481 | current = entry; | |
2d21ac55 A |
4482 | if (current != vm_map_to_entry(map)) { |
4483 | /* clip and unnest if necessary */ | |
4484 | vm_map_clip_start(map, current, start); | |
1c79356b | 4485 | } |
2d21ac55 | 4486 | |
1c79356b A |
4487 | while ((current != vm_map_to_entry(map)) && |
4488 | (current->vme_start < end)) { | |
4489 | ||
4490 | vm_prot_t old_prot; | |
4491 | ||
4492 | vm_map_clip_end(map, current, end); | |
4493 | ||
fe8ab488 A |
4494 | if (current->is_sub_map) { |
4495 | /* clipping did unnest if needed */ | |
4496 | assert(!current->use_pmap); | |
4497 | } | |
2d21ac55 | 4498 | |
1c79356b A |
4499 | old_prot = current->protection; |
4500 | ||
4501 | if(new_prot & VM_PROT_COPY) { | |
4502 | /* caller is asking specifically to copy the */ | |
4503 | /* mapped data, this implies that max protection */ | |
4504 | /* will include write. Caller must be prepared */ | |
4505 | /* for loss of shared memory communication in the */ | |
4506 | /* target area after taking this step */ | |
6d2010ae | 4507 | |
3e170ce0 A |
4508 | if (current->is_sub_map == FALSE && |
4509 | VME_OBJECT(current) == VM_OBJECT_NULL) { | |
4510 | VME_OBJECT_SET(current, | |
4511 | vm_object_allocate( | |
4512 | (vm_map_size_t) | |
4513 | (current->vme_end - | |
4514 | current->vme_start))); | |
4515 | VME_OFFSET_SET(current, 0); | |
fe8ab488 | 4516 | assert(current->use_pmap); |
6d2010ae | 4517 | } |
3e170ce0 | 4518 | assert(current->wired_count == 0); |
1c79356b A |
4519 | current->needs_copy = TRUE; |
4520 | current->max_protection |= VM_PROT_WRITE; | |
4521 | } | |
4522 | ||
4523 | if (set_max) | |
4524 | current->protection = | |
4525 | (current->max_protection = | |
2d21ac55 A |
4526 | new_prot & ~VM_PROT_COPY) & |
4527 | old_prot; | |
1c79356b A |
4528 | else |
4529 | current->protection = new_prot & ~VM_PROT_COPY; | |
4530 | ||
4531 | /* | |
4532 | * Update physical map if necessary. | |
4533 | * If the request is to turn off write protection, | |
4534 | * we won't do it for real (in pmap). This is because | |
4535 | * it would cause copy-on-write to fail. We've already | |
4536 | * set, the new protection in the map, so if a | |
4537 | * write-protect fault occurred, it will be fixed up | |
4538 | * properly, COW or not. | |
4539 | */ | |
1c79356b | 4540 | if (current->protection != old_prot) { |
1c79356b A |
4541 | /* Look one level in we support nested pmaps */ |
4542 | /* from mapped submaps which are direct entries */ | |
4543 | /* in our map */ | |
0c530ab8 | 4544 | |
2d21ac55 | 4545 | vm_prot_t prot; |
0c530ab8 | 4546 | |
2d21ac55 A |
4547 | prot = current->protection & ~VM_PROT_WRITE; |
4548 | ||
3e170ce0 | 4549 | if (override_nx(map, VME_ALIAS(current)) && prot) |
0c530ab8 | 4550 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 4551 | |
0c530ab8 | 4552 | if (current->is_sub_map && current->use_pmap) { |
3e170ce0 | 4553 | pmap_protect(VME_SUBMAP(current)->pmap, |
2d21ac55 A |
4554 | current->vme_start, |
4555 | current->vme_end, | |
4556 | prot); | |
1c79356b | 4557 | } else { |
2d21ac55 A |
4558 | pmap_protect(map->pmap, |
4559 | current->vme_start, | |
4560 | current->vme_end, | |
4561 | prot); | |
1c79356b | 4562 | } |
1c79356b A |
4563 | } |
4564 | current = current->vme_next; | |
4565 | } | |
4566 | ||
5353443c | 4567 | current = entry; |
91447636 A |
4568 | while ((current != vm_map_to_entry(map)) && |
4569 | (current->vme_start <= end)) { | |
5353443c A |
4570 | vm_map_simplify_entry(map, current); |
4571 | current = current->vme_next; | |
4572 | } | |
4573 | ||
1c79356b A |
4574 | vm_map_unlock(map); |
4575 | return(KERN_SUCCESS); | |
4576 | } | |
4577 | ||
4578 | /* | |
4579 | * vm_map_inherit: | |
4580 | * | |
4581 | * Sets the inheritance of the specified address | |
4582 | * range in the target map. Inheritance | |
4583 | * affects how the map will be shared with | |
4584 | * child maps at the time of vm_map_fork. | |
4585 | */ | |
4586 | kern_return_t | |
4587 | vm_map_inherit( | |
4588 | register vm_map_t map, | |
91447636 A |
4589 | register vm_map_offset_t start, |
4590 | register vm_map_offset_t end, | |
1c79356b A |
4591 | register vm_inherit_t new_inheritance) |
4592 | { | |
4593 | register vm_map_entry_t entry; | |
4594 | vm_map_entry_t temp_entry; | |
4595 | ||
4596 | vm_map_lock(map); | |
4597 | ||
4598 | VM_MAP_RANGE_CHECK(map, start, end); | |
4599 | ||
4600 | if (vm_map_lookup_entry(map, start, &temp_entry)) { | |
4601 | entry = temp_entry; | |
1c79356b A |
4602 | } |
4603 | else { | |
4604 | temp_entry = temp_entry->vme_next; | |
4605 | entry = temp_entry; | |
4606 | } | |
4607 | ||
4608 | /* first check entire range for submaps which can't support the */ | |
4609 | /* given inheritance. */ | |
4610 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
4611 | if(entry->is_sub_map) { | |
91447636 A |
4612 | if(new_inheritance == VM_INHERIT_COPY) { |
4613 | vm_map_unlock(map); | |
1c79356b | 4614 | return(KERN_INVALID_ARGUMENT); |
91447636 | 4615 | } |
1c79356b A |
4616 | } |
4617 | ||
4618 | entry = entry->vme_next; | |
4619 | } | |
4620 | ||
4621 | entry = temp_entry; | |
2d21ac55 A |
4622 | if (entry != vm_map_to_entry(map)) { |
4623 | /* clip and unnest if necessary */ | |
4624 | vm_map_clip_start(map, entry, start); | |
4625 | } | |
1c79356b A |
4626 | |
4627 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
4628 | vm_map_clip_end(map, entry, end); | |
fe8ab488 A |
4629 | if (entry->is_sub_map) { |
4630 | /* clip did unnest if needed */ | |
4631 | assert(!entry->use_pmap); | |
4632 | } | |
1c79356b A |
4633 | |
4634 | entry->inheritance = new_inheritance; | |
4635 | ||
4636 | entry = entry->vme_next; | |
4637 | } | |
4638 | ||
4639 | vm_map_unlock(map); | |
4640 | return(KERN_SUCCESS); | |
4641 | } | |
4642 | ||
2d21ac55 A |
4643 | /* |
4644 | * Update the accounting for the amount of wired memory in this map. If the user has | |
4645 | * exceeded the defined limits, then we fail. Wiring on behalf of the kernel never fails. | |
4646 | */ | |
4647 | ||
4648 | static kern_return_t | |
4649 | add_wire_counts( | |
4650 | vm_map_t map, | |
4651 | vm_map_entry_t entry, | |
4652 | boolean_t user_wire) | |
4653 | { | |
4654 | vm_map_size_t size; | |
4655 | ||
4656 | if (user_wire) { | |
6d2010ae | 4657 | unsigned int total_wire_count = vm_page_wire_count + vm_lopage_free_count; |
2d21ac55 A |
4658 | |
4659 | /* | |
4660 | * We're wiring memory at the request of the user. Check if this is the first time the user is wiring | |
4661 | * this map entry. | |
4662 | */ | |
4663 | ||
4664 | if (entry->user_wired_count == 0) { | |
4665 | size = entry->vme_end - entry->vme_start; | |
4666 | ||
4667 | /* | |
4668 | * Since this is the first time the user is wiring this map entry, check to see if we're | |
4669 | * exceeding the user wire limits. There is a per map limit which is the smaller of either | |
4670 | * the process's rlimit or the global vm_user_wire_limit which caps this value. There is also | |
4671 | * a system-wide limit on the amount of memory all users can wire. If the user is over either | |
4672 | * limit, then we fail. | |
4673 | */ | |
4674 | ||
4675 | if(size + map->user_wire_size > MIN(map->user_wire_limit, vm_user_wire_limit) || | |
6d2010ae A |
4676 | size + ptoa_64(total_wire_count) > vm_global_user_wire_limit || |
4677 | size + ptoa_64(total_wire_count) > max_mem - vm_global_no_user_wire_amount) | |
2d21ac55 A |
4678 | return KERN_RESOURCE_SHORTAGE; |
4679 | ||
4680 | /* | |
4681 | * The first time the user wires an entry, we also increment the wired_count and add this to | |
4682 | * the total that has been wired in the map. | |
4683 | */ | |
4684 | ||
4685 | if (entry->wired_count >= MAX_WIRE_COUNT) | |
4686 | return KERN_FAILURE; | |
4687 | ||
4688 | entry->wired_count++; | |
4689 | map->user_wire_size += size; | |
4690 | } | |
4691 | ||
4692 | if (entry->user_wired_count >= MAX_WIRE_COUNT) | |
4693 | return KERN_FAILURE; | |
4694 | ||
4695 | entry->user_wired_count++; | |
4696 | ||
4697 | } else { | |
4698 | ||
4699 | /* | |
4700 | * The kernel's wiring the memory. Just bump the count and continue. | |
4701 | */ | |
4702 | ||
4703 | if (entry->wired_count >= MAX_WIRE_COUNT) | |
4704 | panic("vm_map_wire: too many wirings"); | |
4705 | ||
4706 | entry->wired_count++; | |
4707 | } | |
4708 | ||
4709 | return KERN_SUCCESS; | |
4710 | } | |
4711 | ||
4712 | /* | |
4713 | * Update the memory wiring accounting now that the given map entry is being unwired. | |
4714 | */ | |
4715 | ||
4716 | static void | |
4717 | subtract_wire_counts( | |
4718 | vm_map_t map, | |
4719 | vm_map_entry_t entry, | |
4720 | boolean_t user_wire) | |
4721 | { | |
4722 | ||
4723 | if (user_wire) { | |
4724 | ||
4725 | /* | |
4726 | * We're unwiring memory at the request of the user. See if we're removing the last user wire reference. | |
4727 | */ | |
4728 | ||
4729 | if (entry->user_wired_count == 1) { | |
4730 | ||
4731 | /* | |
4732 | * We're removing the last user wire reference. Decrement the wired_count and the total | |
4733 | * user wired memory for this map. | |
4734 | */ | |
4735 | ||
4736 | assert(entry->wired_count >= 1); | |
4737 | entry->wired_count--; | |
4738 | map->user_wire_size -= entry->vme_end - entry->vme_start; | |
4739 | } | |
4740 | ||
4741 | assert(entry->user_wired_count >= 1); | |
4742 | entry->user_wired_count--; | |
4743 | ||
4744 | } else { | |
4745 | ||
4746 | /* | |
4747 | * The kernel is unwiring the memory. Just update the count. | |
4748 | */ | |
4749 | ||
4750 | assert(entry->wired_count >= 1); | |
4751 | entry->wired_count--; | |
4752 | } | |
4753 | } | |
4754 | ||
1c79356b A |
4755 | /* |
4756 | * vm_map_wire: | |
4757 | * | |
4758 | * Sets the pageability of the specified address range in the | |
4759 | * target map as wired. Regions specified as not pageable require | |
4760 | * locked-down physical memory and physical page maps. The | |
4761 | * access_type variable indicates types of accesses that must not | |
4762 | * generate page faults. This is checked against protection of | |
4763 | * memory being locked-down. | |
4764 | * | |
4765 | * The map must not be locked, but a reference must remain to the | |
4766 | * map throughout the call. | |
4767 | */ | |
91447636 | 4768 | static kern_return_t |
1c79356b A |
4769 | vm_map_wire_nested( |
4770 | register vm_map_t map, | |
91447636 A |
4771 | register vm_map_offset_t start, |
4772 | register vm_map_offset_t end, | |
3e170ce0 | 4773 | register vm_prot_t caller_prot, |
1c79356b | 4774 | boolean_t user_wire, |
9bccf70c | 4775 | pmap_t map_pmap, |
fe8ab488 A |
4776 | vm_map_offset_t pmap_addr, |
4777 | ppnum_t *physpage_p) | |
1c79356b A |
4778 | { |
4779 | register vm_map_entry_t entry; | |
3e170ce0 | 4780 | register vm_prot_t access_type; |
1c79356b | 4781 | struct vm_map_entry *first_entry, tmp_entry; |
91447636 A |
4782 | vm_map_t real_map; |
4783 | register vm_map_offset_t s,e; | |
1c79356b A |
4784 | kern_return_t rc; |
4785 | boolean_t need_wakeup; | |
4786 | boolean_t main_map = FALSE; | |
9bccf70c | 4787 | wait_interrupt_t interruptible_state; |
0b4e3aa0 | 4788 | thread_t cur_thread; |
1c79356b | 4789 | unsigned int last_timestamp; |
91447636 | 4790 | vm_map_size_t size; |
fe8ab488 A |
4791 | boolean_t wire_and_extract; |
4792 | ||
3e170ce0 A |
4793 | access_type = (caller_prot & VM_PROT_ALL); |
4794 | ||
fe8ab488 A |
4795 | wire_and_extract = FALSE; |
4796 | if (physpage_p != NULL) { | |
4797 | /* | |
4798 | * The caller wants the physical page number of the | |
4799 | * wired page. We return only one physical page number | |
4800 | * so this works for only one page at a time. | |
4801 | */ | |
4802 | if ((end - start) != PAGE_SIZE) { | |
4803 | return KERN_INVALID_ARGUMENT; | |
4804 | } | |
4805 | wire_and_extract = TRUE; | |
4806 | *physpage_p = 0; | |
4807 | } | |
1c79356b A |
4808 | |
4809 | vm_map_lock(map); | |
4810 | if(map_pmap == NULL) | |
4811 | main_map = TRUE; | |
4812 | last_timestamp = map->timestamp; | |
4813 | ||
4814 | VM_MAP_RANGE_CHECK(map, start, end); | |
4815 | assert(page_aligned(start)); | |
4816 | assert(page_aligned(end)); | |
39236c6e A |
4817 | assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); |
4818 | assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); | |
0b4e3aa0 A |
4819 | if (start == end) { |
4820 | /* We wired what the caller asked for, zero pages */ | |
4821 | vm_map_unlock(map); | |
4822 | return KERN_SUCCESS; | |
4823 | } | |
1c79356b | 4824 | |
2d21ac55 A |
4825 | need_wakeup = FALSE; |
4826 | cur_thread = current_thread(); | |
4827 | ||
4828 | s = start; | |
4829 | rc = KERN_SUCCESS; | |
4830 | ||
4831 | if (vm_map_lookup_entry(map, s, &first_entry)) { | |
1c79356b | 4832 | entry = first_entry; |
2d21ac55 A |
4833 | /* |
4834 | * vm_map_clip_start will be done later. | |
4835 | * We don't want to unnest any nested submaps here ! | |
4836 | */ | |
1c79356b A |
4837 | } else { |
4838 | /* Start address is not in map */ | |
2d21ac55 A |
4839 | rc = KERN_INVALID_ADDRESS; |
4840 | goto done; | |
1c79356b A |
4841 | } |
4842 | ||
2d21ac55 A |
4843 | while ((entry != vm_map_to_entry(map)) && (s < end)) { |
4844 | /* | |
4845 | * At this point, we have wired from "start" to "s". | |
4846 | * We still need to wire from "s" to "end". | |
4847 | * | |
4848 | * "entry" hasn't been clipped, so it could start before "s" | |
4849 | * and/or end after "end". | |
4850 | */ | |
4851 | ||
4852 | /* "e" is how far we want to wire in this entry */ | |
4853 | e = entry->vme_end; | |
4854 | if (e > end) | |
4855 | e = end; | |
4856 | ||
1c79356b A |
4857 | /* |
4858 | * If another thread is wiring/unwiring this entry then | |
4859 | * block after informing other thread to wake us up. | |
4860 | */ | |
4861 | if (entry->in_transition) { | |
9bccf70c A |
4862 | wait_result_t wait_result; |
4863 | ||
1c79356b A |
4864 | /* |
4865 | * We have not clipped the entry. Make sure that | |
4866 | * the start address is in range so that the lookup | |
4867 | * below will succeed. | |
2d21ac55 A |
4868 | * "s" is the current starting point: we've already |
4869 | * wired from "start" to "s" and we still have | |
4870 | * to wire from "s" to "end". | |
1c79356b | 4871 | */ |
1c79356b A |
4872 | |
4873 | entry->needs_wakeup = TRUE; | |
4874 | ||
4875 | /* | |
4876 | * wake up anybody waiting on entries that we have | |
4877 | * already wired. | |
4878 | */ | |
4879 | if (need_wakeup) { | |
4880 | vm_map_entry_wakeup(map); | |
4881 | need_wakeup = FALSE; | |
4882 | } | |
4883 | /* | |
4884 | * User wiring is interruptible | |
4885 | */ | |
9bccf70c | 4886 | wait_result = vm_map_entry_wait(map, |
2d21ac55 A |
4887 | (user_wire) ? THREAD_ABORTSAFE : |
4888 | THREAD_UNINT); | |
9bccf70c | 4889 | if (user_wire && wait_result == THREAD_INTERRUPTED) { |
1c79356b A |
4890 | /* |
4891 | * undo the wirings we have done so far | |
4892 | * We do not clear the needs_wakeup flag, | |
4893 | * because we cannot tell if we were the | |
4894 | * only one waiting. | |
4895 | */ | |
2d21ac55 A |
4896 | rc = KERN_FAILURE; |
4897 | goto done; | |
1c79356b A |
4898 | } |
4899 | ||
1c79356b A |
4900 | /* |
4901 | * Cannot avoid a lookup here. reset timestamp. | |
4902 | */ | |
4903 | last_timestamp = map->timestamp; | |
4904 | ||
4905 | /* | |
4906 | * The entry could have been clipped, look it up again. | |
4907 | * Worse that can happen is, it may not exist anymore. | |
4908 | */ | |
4909 | if (!vm_map_lookup_entry(map, s, &first_entry)) { | |
1c79356b A |
4910 | /* |
4911 | * User: undo everything upto the previous | |
4912 | * entry. let vm_map_unwire worry about | |
4913 | * checking the validity of the range. | |
4914 | */ | |
2d21ac55 A |
4915 | rc = KERN_FAILURE; |
4916 | goto done; | |
1c79356b A |
4917 | } |
4918 | entry = first_entry; | |
4919 | continue; | |
4920 | } | |
2d21ac55 A |
4921 | |
4922 | if (entry->is_sub_map) { | |
91447636 A |
4923 | vm_map_offset_t sub_start; |
4924 | vm_map_offset_t sub_end; | |
4925 | vm_map_offset_t local_start; | |
4926 | vm_map_offset_t local_end; | |
1c79356b | 4927 | pmap_t pmap; |
2d21ac55 | 4928 | |
fe8ab488 A |
4929 | if (wire_and_extract) { |
4930 | /* | |
4931 | * Wiring would result in copy-on-write | |
4932 | * which would not be compatible with | |
4933 | * the sharing we have with the original | |
4934 | * provider of this memory. | |
4935 | */ | |
4936 | rc = KERN_INVALID_ARGUMENT; | |
4937 | goto done; | |
4938 | } | |
4939 | ||
2d21ac55 | 4940 | vm_map_clip_start(map, entry, s); |
1c79356b A |
4941 | vm_map_clip_end(map, entry, end); |
4942 | ||
3e170ce0 | 4943 | sub_start = VME_OFFSET(entry); |
2d21ac55 | 4944 | sub_end = entry->vme_end; |
3e170ce0 | 4945 | sub_end += VME_OFFSET(entry) - entry->vme_start; |
2d21ac55 | 4946 | |
1c79356b A |
4947 | local_end = entry->vme_end; |
4948 | if(map_pmap == NULL) { | |
2d21ac55 A |
4949 | vm_object_t object; |
4950 | vm_object_offset_t offset; | |
4951 | vm_prot_t prot; | |
4952 | boolean_t wired; | |
4953 | vm_map_entry_t local_entry; | |
4954 | vm_map_version_t version; | |
4955 | vm_map_t lookup_map; | |
4956 | ||
1c79356b | 4957 | if(entry->use_pmap) { |
3e170ce0 | 4958 | pmap = VME_SUBMAP(entry)->pmap; |
9bccf70c A |
4959 | /* ppc implementation requires that */ |
4960 | /* submaps pmap address ranges line */ | |
4961 | /* up with parent map */ | |
4962 | #ifdef notdef | |
4963 | pmap_addr = sub_start; | |
4964 | #endif | |
2d21ac55 | 4965 | pmap_addr = s; |
1c79356b A |
4966 | } else { |
4967 | pmap = map->pmap; | |
2d21ac55 | 4968 | pmap_addr = s; |
1c79356b | 4969 | } |
2d21ac55 | 4970 | |
1c79356b | 4971 | if (entry->wired_count) { |
2d21ac55 A |
4972 | if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) |
4973 | goto done; | |
4974 | ||
4975 | /* | |
4976 | * The map was not unlocked: | |
4977 | * no need to goto re-lookup. | |
4978 | * Just go directly to next entry. | |
4979 | */ | |
1c79356b | 4980 | entry = entry->vme_next; |
2d21ac55 | 4981 | s = entry->vme_start; |
1c79356b A |
4982 | continue; |
4983 | ||
2d21ac55 | 4984 | } |
9bccf70c | 4985 | |
2d21ac55 A |
4986 | /* call vm_map_lookup_locked to */ |
4987 | /* cause any needs copy to be */ | |
4988 | /* evaluated */ | |
4989 | local_start = entry->vme_start; | |
4990 | lookup_map = map; | |
4991 | vm_map_lock_write_to_read(map); | |
4992 | if(vm_map_lookup_locked( | |
4993 | &lookup_map, local_start, | |
4994 | access_type, | |
4995 | OBJECT_LOCK_EXCLUSIVE, | |
4996 | &version, &object, | |
4997 | &offset, &prot, &wired, | |
4998 | NULL, | |
4999 | &real_map)) { | |
1c79356b | 5000 | |
2d21ac55 A |
5001 | vm_map_unlock_read(lookup_map); |
5002 | vm_map_unwire(map, start, | |
5003 | s, user_wire); | |
5004 | return(KERN_FAILURE); | |
5005 | } | |
316670eb | 5006 | vm_object_unlock(object); |
2d21ac55 A |
5007 | if(real_map != lookup_map) |
5008 | vm_map_unlock(real_map); | |
5009 | vm_map_unlock_read(lookup_map); | |
5010 | vm_map_lock(map); | |
1c79356b | 5011 | |
2d21ac55 A |
5012 | /* we unlocked, so must re-lookup */ |
5013 | if (!vm_map_lookup_entry(map, | |
5014 | local_start, | |
5015 | &local_entry)) { | |
5016 | rc = KERN_FAILURE; | |
5017 | goto done; | |
5018 | } | |
5019 | ||
5020 | /* | |
5021 | * entry could have been "simplified", | |
5022 | * so re-clip | |
5023 | */ | |
5024 | entry = local_entry; | |
5025 | assert(s == local_start); | |
5026 | vm_map_clip_start(map, entry, s); | |
5027 | vm_map_clip_end(map, entry, end); | |
5028 | /* re-compute "e" */ | |
5029 | e = entry->vme_end; | |
5030 | if (e > end) | |
5031 | e = end; | |
5032 | ||
5033 | /* did we have a change of type? */ | |
5034 | if (!entry->is_sub_map) { | |
5035 | last_timestamp = map->timestamp; | |
5036 | continue; | |
1c79356b A |
5037 | } |
5038 | } else { | |
9bccf70c | 5039 | local_start = entry->vme_start; |
2d21ac55 A |
5040 | pmap = map_pmap; |
5041 | } | |
5042 | ||
5043 | if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) | |
5044 | goto done; | |
5045 | ||
5046 | entry->in_transition = TRUE; | |
5047 | ||
5048 | vm_map_unlock(map); | |
3e170ce0 | 5049 | rc = vm_map_wire_nested(VME_SUBMAP(entry), |
1c79356b | 5050 | sub_start, sub_end, |
3e170ce0 | 5051 | caller_prot, |
fe8ab488 A |
5052 | user_wire, pmap, pmap_addr, |
5053 | NULL); | |
2d21ac55 | 5054 | vm_map_lock(map); |
9bccf70c | 5055 | |
1c79356b A |
5056 | /* |
5057 | * Find the entry again. It could have been clipped | |
5058 | * after we unlocked the map. | |
5059 | */ | |
9bccf70c A |
5060 | if (!vm_map_lookup_entry(map, local_start, |
5061 | &first_entry)) | |
5062 | panic("vm_map_wire: re-lookup failed"); | |
5063 | entry = first_entry; | |
1c79356b | 5064 | |
2d21ac55 A |
5065 | assert(local_start == s); |
5066 | /* re-compute "e" */ | |
5067 | e = entry->vme_end; | |
5068 | if (e > end) | |
5069 | e = end; | |
5070 | ||
1c79356b A |
5071 | last_timestamp = map->timestamp; |
5072 | while ((entry != vm_map_to_entry(map)) && | |
2d21ac55 | 5073 | (entry->vme_start < e)) { |
1c79356b A |
5074 | assert(entry->in_transition); |
5075 | entry->in_transition = FALSE; | |
5076 | if (entry->needs_wakeup) { | |
5077 | entry->needs_wakeup = FALSE; | |
5078 | need_wakeup = TRUE; | |
5079 | } | |
5080 | if (rc != KERN_SUCCESS) {/* from vm_*_wire */ | |
2d21ac55 | 5081 | subtract_wire_counts(map, entry, user_wire); |
1c79356b A |
5082 | } |
5083 | entry = entry->vme_next; | |
5084 | } | |
5085 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
2d21ac55 | 5086 | goto done; |
1c79356b | 5087 | } |
2d21ac55 A |
5088 | |
5089 | /* no need to relookup again */ | |
5090 | s = entry->vme_start; | |
1c79356b A |
5091 | continue; |
5092 | } | |
5093 | ||
5094 | /* | |
5095 | * If this entry is already wired then increment | |
5096 | * the appropriate wire reference count. | |
5097 | */ | |
9bccf70c | 5098 | if (entry->wired_count) { |
fe8ab488 A |
5099 | |
5100 | if ((entry->protection & access_type) != access_type) { | |
5101 | /* found a protection problem */ | |
5102 | ||
5103 | /* | |
5104 | * XXX FBDP | |
5105 | * We should always return an error | |
5106 | * in this case but since we didn't | |
5107 | * enforce it before, let's do | |
5108 | * it only for the new "wire_and_extract" | |
5109 | * code path for now... | |
5110 | */ | |
5111 | if (wire_and_extract) { | |
5112 | rc = KERN_PROTECTION_FAILURE; | |
5113 | goto done; | |
5114 | } | |
5115 | } | |
5116 | ||
1c79356b A |
5117 | /* |
5118 | * entry is already wired down, get our reference | |
5119 | * after clipping to our range. | |
5120 | */ | |
2d21ac55 | 5121 | vm_map_clip_start(map, entry, s); |
1c79356b | 5122 | vm_map_clip_end(map, entry, end); |
1c79356b | 5123 | |
2d21ac55 A |
5124 | if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) |
5125 | goto done; | |
5126 | ||
fe8ab488 A |
5127 | if (wire_and_extract) { |
5128 | vm_object_t object; | |
5129 | vm_object_offset_t offset; | |
5130 | vm_page_t m; | |
5131 | ||
5132 | /* | |
5133 | * We don't have to "wire" the page again | |
5134 | * bit we still have to "extract" its | |
5135 | * physical page number, after some sanity | |
5136 | * checks. | |
5137 | */ | |
5138 | assert((entry->vme_end - entry->vme_start) | |
5139 | == PAGE_SIZE); | |
5140 | assert(!entry->needs_copy); | |
5141 | assert(!entry->is_sub_map); | |
3e170ce0 | 5142 | assert(VME_OBJECT(entry)); |
fe8ab488 A |
5143 | if (((entry->vme_end - entry->vme_start) |
5144 | != PAGE_SIZE) || | |
5145 | entry->needs_copy || | |
5146 | entry->is_sub_map || | |
3e170ce0 | 5147 | VME_OBJECT(entry) == VM_OBJECT_NULL) { |
fe8ab488 A |
5148 | rc = KERN_INVALID_ARGUMENT; |
5149 | goto done; | |
5150 | } | |
5151 | ||
3e170ce0 A |
5152 | object = VME_OBJECT(entry); |
5153 | offset = VME_OFFSET(entry); | |
fe8ab488 A |
5154 | /* need exclusive lock to update m->dirty */ |
5155 | if (entry->protection & VM_PROT_WRITE) { | |
5156 | vm_object_lock(object); | |
5157 | } else { | |
5158 | vm_object_lock_shared(object); | |
5159 | } | |
5160 | m = vm_page_lookup(object, offset); | |
5161 | assert(m != VM_PAGE_NULL); | |
5162 | assert(m->wire_count); | |
5163 | if (m != VM_PAGE_NULL && m->wire_count) { | |
5164 | *physpage_p = m->phys_page; | |
5165 | if (entry->protection & VM_PROT_WRITE) { | |
5166 | vm_object_lock_assert_exclusive( | |
5167 | m->object); | |
5168 | m->dirty = TRUE; | |
5169 | } | |
5170 | } else { | |
5171 | /* not already wired !? */ | |
5172 | *physpage_p = 0; | |
5173 | } | |
5174 | vm_object_unlock(object); | |
5175 | } | |
5176 | ||
2d21ac55 | 5177 | /* map was not unlocked: no need to relookup */ |
1c79356b | 5178 | entry = entry->vme_next; |
2d21ac55 | 5179 | s = entry->vme_start; |
1c79356b A |
5180 | continue; |
5181 | } | |
5182 | ||
5183 | /* | |
5184 | * Unwired entry or wire request transmitted via submap | |
5185 | */ | |
5186 | ||
5187 | ||
5188 | /* | |
5189 | * Perform actions of vm_map_lookup that need the write | |
5190 | * lock on the map: create a shadow object for a | |
5191 | * copy-on-write region, or an object for a zero-fill | |
5192 | * region. | |
5193 | */ | |
5194 | size = entry->vme_end - entry->vme_start; | |
5195 | /* | |
5196 | * If wiring a copy-on-write page, we need to copy it now | |
5197 | * even if we're only (currently) requesting read access. | |
5198 | * This is aggressive, but once it's wired we can't move it. | |
5199 | */ | |
5200 | if (entry->needs_copy) { | |
fe8ab488 A |
5201 | if (wire_and_extract) { |
5202 | /* | |
5203 | * We're supposed to share with the original | |
5204 | * provider so should not be "needs_copy" | |
5205 | */ | |
5206 | rc = KERN_INVALID_ARGUMENT; | |
5207 | goto done; | |
5208 | } | |
3e170ce0 A |
5209 | |
5210 | VME_OBJECT_SHADOW(entry, size); | |
1c79356b | 5211 | entry->needs_copy = FALSE; |
3e170ce0 | 5212 | } else if (VME_OBJECT(entry) == VM_OBJECT_NULL) { |
fe8ab488 A |
5213 | if (wire_and_extract) { |
5214 | /* | |
5215 | * We're supposed to share with the original | |
5216 | * provider so should already have an object. | |
5217 | */ | |
5218 | rc = KERN_INVALID_ARGUMENT; | |
5219 | goto done; | |
5220 | } | |
3e170ce0 A |
5221 | VME_OBJECT_SET(entry, vm_object_allocate(size)); |
5222 | VME_OFFSET_SET(entry, (vm_object_offset_t)0); | |
fe8ab488 | 5223 | assert(entry->use_pmap); |
1c79356b A |
5224 | } |
5225 | ||
2d21ac55 | 5226 | vm_map_clip_start(map, entry, s); |
1c79356b A |
5227 | vm_map_clip_end(map, entry, end); |
5228 | ||
2d21ac55 | 5229 | /* re-compute "e" */ |
1c79356b | 5230 | e = entry->vme_end; |
2d21ac55 A |
5231 | if (e > end) |
5232 | e = end; | |
1c79356b A |
5233 | |
5234 | /* | |
5235 | * Check for holes and protection mismatch. | |
5236 | * Holes: Next entry should be contiguous unless this | |
5237 | * is the end of the region. | |
5238 | * Protection: Access requested must be allowed, unless | |
5239 | * wiring is by protection class | |
5240 | */ | |
2d21ac55 A |
5241 | if ((entry->vme_end < end) && |
5242 | ((entry->vme_next == vm_map_to_entry(map)) || | |
5243 | (entry->vme_next->vme_start > entry->vme_end))) { | |
5244 | /* found a hole */ | |
5245 | rc = KERN_INVALID_ADDRESS; | |
5246 | goto done; | |
5247 | } | |
5248 | if ((entry->protection & access_type) != access_type) { | |
5249 | /* found a protection problem */ | |
5250 | rc = KERN_PROTECTION_FAILURE; | |
5251 | goto done; | |
1c79356b A |
5252 | } |
5253 | ||
5254 | assert(entry->wired_count == 0 && entry->user_wired_count == 0); | |
5255 | ||
2d21ac55 A |
5256 | if ((rc = add_wire_counts(map, entry, user_wire)) != KERN_SUCCESS) |
5257 | goto done; | |
1c79356b A |
5258 | |
5259 | entry->in_transition = TRUE; | |
5260 | ||
5261 | /* | |
5262 | * This entry might get split once we unlock the map. | |
5263 | * In vm_fault_wire(), we need the current range as | |
5264 | * defined by this entry. In order for this to work | |
5265 | * along with a simultaneous clip operation, we make a | |
5266 | * temporary copy of this entry and use that for the | |
5267 | * wiring. Note that the underlying objects do not | |
5268 | * change during a clip. | |
5269 | */ | |
5270 | tmp_entry = *entry; | |
5271 | ||
5272 | /* | |
5273 | * The in_transition state guarentees that the entry | |
5274 | * (or entries for this range, if split occured) will be | |
5275 | * there when the map lock is acquired for the second time. | |
5276 | */ | |
5277 | vm_map_unlock(map); | |
0b4e3aa0 | 5278 | |
9bccf70c A |
5279 | if (!user_wire && cur_thread != THREAD_NULL) |
5280 | interruptible_state = thread_interrupt_level(THREAD_UNINT); | |
91447636 A |
5281 | else |
5282 | interruptible_state = THREAD_UNINT; | |
9bccf70c | 5283 | |
1c79356b | 5284 | if(map_pmap) |
9bccf70c | 5285 | rc = vm_fault_wire(map, |
3e170ce0 | 5286 | &tmp_entry, caller_prot, map_pmap, pmap_addr, |
fe8ab488 | 5287 | physpage_p); |
1c79356b | 5288 | else |
9bccf70c | 5289 | rc = vm_fault_wire(map, |
3e170ce0 | 5290 | &tmp_entry, caller_prot, map->pmap, |
fe8ab488 A |
5291 | tmp_entry.vme_start, |
5292 | physpage_p); | |
0b4e3aa0 A |
5293 | |
5294 | if (!user_wire && cur_thread != THREAD_NULL) | |
9bccf70c | 5295 | thread_interrupt_level(interruptible_state); |
0b4e3aa0 | 5296 | |
1c79356b A |
5297 | vm_map_lock(map); |
5298 | ||
5299 | if (last_timestamp+1 != map->timestamp) { | |
5300 | /* | |
5301 | * Find the entry again. It could have been clipped | |
5302 | * after we unlocked the map. | |
5303 | */ | |
5304 | if (!vm_map_lookup_entry(map, tmp_entry.vme_start, | |
2d21ac55 | 5305 | &first_entry)) |
1c79356b A |
5306 | panic("vm_map_wire: re-lookup failed"); |
5307 | ||
5308 | entry = first_entry; | |
5309 | } | |
5310 | ||
5311 | last_timestamp = map->timestamp; | |
5312 | ||
5313 | while ((entry != vm_map_to_entry(map)) && | |
5314 | (entry->vme_start < tmp_entry.vme_end)) { | |
5315 | assert(entry->in_transition); | |
5316 | entry->in_transition = FALSE; | |
5317 | if (entry->needs_wakeup) { | |
5318 | entry->needs_wakeup = FALSE; | |
5319 | need_wakeup = TRUE; | |
5320 | } | |
5321 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
2d21ac55 | 5322 | subtract_wire_counts(map, entry, user_wire); |
1c79356b A |
5323 | } |
5324 | entry = entry->vme_next; | |
5325 | } | |
5326 | ||
5327 | if (rc != KERN_SUCCESS) { /* from vm_*_wire */ | |
2d21ac55 | 5328 | goto done; |
1c79356b | 5329 | } |
2d21ac55 A |
5330 | |
5331 | s = entry->vme_start; | |
1c79356b | 5332 | } /* end while loop through map entries */ |
2d21ac55 A |
5333 | |
5334 | done: | |
5335 | if (rc == KERN_SUCCESS) { | |
5336 | /* repair any damage we may have made to the VM map */ | |
5337 | vm_map_simplify_range(map, start, end); | |
5338 | } | |
5339 | ||
1c79356b A |
5340 | vm_map_unlock(map); |
5341 | ||
5342 | /* | |
5343 | * wake up anybody waiting on entries we wired. | |
5344 | */ | |
5345 | if (need_wakeup) | |
5346 | vm_map_entry_wakeup(map); | |
5347 | ||
2d21ac55 A |
5348 | if (rc != KERN_SUCCESS) { |
5349 | /* undo what has been wired so far */ | |
5350 | vm_map_unwire(map, start, s, user_wire); | |
fe8ab488 A |
5351 | if (physpage_p) { |
5352 | *physpage_p = 0; | |
5353 | } | |
2d21ac55 A |
5354 | } |
5355 | ||
5356 | return rc; | |
1c79356b A |
5357 | |
5358 | } | |
5359 | ||
5360 | kern_return_t | |
3e170ce0 | 5361 | vm_map_wire_external( |
1c79356b | 5362 | register vm_map_t map, |
91447636 A |
5363 | register vm_map_offset_t start, |
5364 | register vm_map_offset_t end, | |
3e170ce0 | 5365 | register vm_prot_t caller_prot, |
1c79356b A |
5366 | boolean_t user_wire) |
5367 | { | |
3e170ce0 A |
5368 | kern_return_t kret; |
5369 | ||
5370 | caller_prot &= ~VM_PROT_MEMORY_TAG_MASK; | |
5371 | caller_prot |= VM_PROT_MEMORY_TAG_MAKE(vm_tag_bt()); | |
5372 | kret = vm_map_wire_nested(map, start, end, caller_prot, | |
5373 | user_wire, (pmap_t)NULL, 0, NULL); | |
5374 | return kret; | |
5375 | } | |
1c79356b | 5376 | |
3e170ce0 A |
5377 | kern_return_t |
5378 | vm_map_wire( | |
5379 | register vm_map_t map, | |
5380 | register vm_map_offset_t start, | |
5381 | register vm_map_offset_t end, | |
5382 | register vm_prot_t caller_prot, | |
5383 | boolean_t user_wire) | |
5384 | { | |
1c79356b A |
5385 | kern_return_t kret; |
5386 | ||
3e170ce0 | 5387 | kret = vm_map_wire_nested(map, start, end, caller_prot, |
fe8ab488 A |
5388 | user_wire, (pmap_t)NULL, 0, NULL); |
5389 | return kret; | |
5390 | } | |
5391 | ||
5392 | kern_return_t | |
3e170ce0 | 5393 | vm_map_wire_and_extract_external( |
fe8ab488 A |
5394 | vm_map_t map, |
5395 | vm_map_offset_t start, | |
3e170ce0 | 5396 | vm_prot_t caller_prot, |
fe8ab488 A |
5397 | boolean_t user_wire, |
5398 | ppnum_t *physpage_p) | |
5399 | { | |
3e170ce0 A |
5400 | kern_return_t kret; |
5401 | ||
5402 | caller_prot &= ~VM_PROT_MEMORY_TAG_MASK; | |
5403 | caller_prot |= VM_PROT_MEMORY_TAG_MAKE(vm_tag_bt()); | |
5404 | kret = vm_map_wire_nested(map, | |
5405 | start, | |
5406 | start+VM_MAP_PAGE_SIZE(map), | |
5407 | caller_prot, | |
5408 | user_wire, | |
5409 | (pmap_t)NULL, | |
5410 | 0, | |
5411 | physpage_p); | |
5412 | if (kret != KERN_SUCCESS && | |
5413 | physpage_p != NULL) { | |
5414 | *physpage_p = 0; | |
5415 | } | |
5416 | return kret; | |
5417 | } | |
fe8ab488 | 5418 | |
3e170ce0 A |
5419 | kern_return_t |
5420 | vm_map_wire_and_extract( | |
5421 | vm_map_t map, | |
5422 | vm_map_offset_t start, | |
5423 | vm_prot_t caller_prot, | |
5424 | boolean_t user_wire, | |
5425 | ppnum_t *physpage_p) | |
5426 | { | |
fe8ab488 A |
5427 | kern_return_t kret; |
5428 | ||
5429 | kret = vm_map_wire_nested(map, | |
5430 | start, | |
5431 | start+VM_MAP_PAGE_SIZE(map), | |
3e170ce0 | 5432 | caller_prot, |
fe8ab488 A |
5433 | user_wire, |
5434 | (pmap_t)NULL, | |
5435 | 0, | |
5436 | physpage_p); | |
5437 | if (kret != KERN_SUCCESS && | |
5438 | physpage_p != NULL) { | |
5439 | *physpage_p = 0; | |
5440 | } | |
1c79356b A |
5441 | return kret; |
5442 | } | |
5443 | ||
5444 | /* | |
5445 | * vm_map_unwire: | |
5446 | * | |
5447 | * Sets the pageability of the specified address range in the target | |
5448 | * as pageable. Regions specified must have been wired previously. | |
5449 | * | |
5450 | * The map must not be locked, but a reference must remain to the map | |
5451 | * throughout the call. | |
5452 | * | |
5453 | * Kernel will panic on failures. User unwire ignores holes and | |
5454 | * unwired and intransition entries to avoid losing memory by leaving | |
5455 | * it unwired. | |
5456 | */ | |
91447636 | 5457 | static kern_return_t |
1c79356b A |
5458 | vm_map_unwire_nested( |
5459 | register vm_map_t map, | |
91447636 A |
5460 | register vm_map_offset_t start, |
5461 | register vm_map_offset_t end, | |
1c79356b | 5462 | boolean_t user_wire, |
9bccf70c | 5463 | pmap_t map_pmap, |
91447636 | 5464 | vm_map_offset_t pmap_addr) |
1c79356b A |
5465 | { |
5466 | register vm_map_entry_t entry; | |
5467 | struct vm_map_entry *first_entry, tmp_entry; | |
5468 | boolean_t need_wakeup; | |
5469 | boolean_t main_map = FALSE; | |
5470 | unsigned int last_timestamp; | |
5471 | ||
5472 | vm_map_lock(map); | |
5473 | if(map_pmap == NULL) | |
5474 | main_map = TRUE; | |
5475 | last_timestamp = map->timestamp; | |
5476 | ||
5477 | VM_MAP_RANGE_CHECK(map, start, end); | |
5478 | assert(page_aligned(start)); | |
5479 | assert(page_aligned(end)); | |
39236c6e A |
5480 | assert(VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map))); |
5481 | assert(VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map))); | |
1c79356b | 5482 | |
2d21ac55 A |
5483 | if (start == end) { |
5484 | /* We unwired what the caller asked for: zero pages */ | |
5485 | vm_map_unlock(map); | |
5486 | return KERN_SUCCESS; | |
5487 | } | |
5488 | ||
1c79356b A |
5489 | if (vm_map_lookup_entry(map, start, &first_entry)) { |
5490 | entry = first_entry; | |
2d21ac55 A |
5491 | /* |
5492 | * vm_map_clip_start will be done later. | |
5493 | * We don't want to unnest any nested sub maps here ! | |
5494 | */ | |
1c79356b A |
5495 | } |
5496 | else { | |
2d21ac55 A |
5497 | if (!user_wire) { |
5498 | panic("vm_map_unwire: start not found"); | |
5499 | } | |
1c79356b A |
5500 | /* Start address is not in map. */ |
5501 | vm_map_unlock(map); | |
5502 | return(KERN_INVALID_ADDRESS); | |
5503 | } | |
5504 | ||
b0d623f7 A |
5505 | if (entry->superpage_size) { |
5506 | /* superpages are always wired */ | |
5507 | vm_map_unlock(map); | |
5508 | return KERN_INVALID_ADDRESS; | |
5509 | } | |
5510 | ||
1c79356b A |
5511 | need_wakeup = FALSE; |
5512 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
5513 | if (entry->in_transition) { | |
5514 | /* | |
5515 | * 1) | |
5516 | * Another thread is wiring down this entry. Note | |
5517 | * that if it is not for the other thread we would | |
5518 | * be unwiring an unwired entry. This is not | |
5519 | * permitted. If we wait, we will be unwiring memory | |
5520 | * we did not wire. | |
5521 | * | |
5522 | * 2) | |
5523 | * Another thread is unwiring this entry. We did not | |
5524 | * have a reference to it, because if we did, this | |
5525 | * entry will not be getting unwired now. | |
5526 | */ | |
2d21ac55 A |
5527 | if (!user_wire) { |
5528 | /* | |
5529 | * XXX FBDP | |
5530 | * This could happen: there could be some | |
5531 | * overlapping vslock/vsunlock operations | |
5532 | * going on. | |
5533 | * We should probably just wait and retry, | |
5534 | * but then we have to be careful that this | |
5535 | * entry could get "simplified" after | |
5536 | * "in_transition" gets unset and before | |
5537 | * we re-lookup the entry, so we would | |
5538 | * have to re-clip the entry to avoid | |
5539 | * re-unwiring what we have already unwired... | |
5540 | * See vm_map_wire_nested(). | |
5541 | * | |
5542 | * Or we could just ignore "in_transition" | |
5543 | * here and proceed to decement the wired | |
5544 | * count(s) on this entry. That should be fine | |
5545 | * as long as "wired_count" doesn't drop all | |
5546 | * the way to 0 (and we should panic if THAT | |
5547 | * happens). | |
5548 | */ | |
1c79356b | 5549 | panic("vm_map_unwire: in_transition entry"); |
2d21ac55 | 5550 | } |
1c79356b A |
5551 | |
5552 | entry = entry->vme_next; | |
5553 | continue; | |
5554 | } | |
5555 | ||
2d21ac55 | 5556 | if (entry->is_sub_map) { |
91447636 A |
5557 | vm_map_offset_t sub_start; |
5558 | vm_map_offset_t sub_end; | |
5559 | vm_map_offset_t local_end; | |
1c79356b | 5560 | pmap_t pmap; |
2d21ac55 | 5561 | |
1c79356b A |
5562 | vm_map_clip_start(map, entry, start); |
5563 | vm_map_clip_end(map, entry, end); | |
5564 | ||
3e170ce0 | 5565 | sub_start = VME_OFFSET(entry); |
1c79356b | 5566 | sub_end = entry->vme_end - entry->vme_start; |
3e170ce0 | 5567 | sub_end += VME_OFFSET(entry); |
1c79356b A |
5568 | local_end = entry->vme_end; |
5569 | if(map_pmap == NULL) { | |
2d21ac55 | 5570 | if(entry->use_pmap) { |
3e170ce0 | 5571 | pmap = VME_SUBMAP(entry)->pmap; |
9bccf70c | 5572 | pmap_addr = sub_start; |
2d21ac55 | 5573 | } else { |
1c79356b | 5574 | pmap = map->pmap; |
9bccf70c | 5575 | pmap_addr = start; |
2d21ac55 A |
5576 | } |
5577 | if (entry->wired_count == 0 || | |
5578 | (user_wire && entry->user_wired_count == 0)) { | |
5579 | if (!user_wire) | |
5580 | panic("vm_map_unwire: entry is unwired"); | |
5581 | entry = entry->vme_next; | |
5582 | continue; | |
5583 | } | |
5584 | ||
5585 | /* | |
5586 | * Check for holes | |
5587 | * Holes: Next entry should be contiguous unless | |
5588 | * this is the end of the region. | |
5589 | */ | |
5590 | if (((entry->vme_end < end) && | |
5591 | ((entry->vme_next == vm_map_to_entry(map)) || | |
5592 | (entry->vme_next->vme_start | |
5593 | > entry->vme_end)))) { | |
5594 | if (!user_wire) | |
5595 | panic("vm_map_unwire: non-contiguous region"); | |
1c79356b | 5596 | /* |
2d21ac55 A |
5597 | entry = entry->vme_next; |
5598 | continue; | |
1c79356b | 5599 | */ |
2d21ac55 | 5600 | } |
1c79356b | 5601 | |
2d21ac55 | 5602 | subtract_wire_counts(map, entry, user_wire); |
1c79356b | 5603 | |
2d21ac55 A |
5604 | if (entry->wired_count != 0) { |
5605 | entry = entry->vme_next; | |
5606 | continue; | |
5607 | } | |
1c79356b | 5608 | |
2d21ac55 A |
5609 | entry->in_transition = TRUE; |
5610 | tmp_entry = *entry;/* see comment in vm_map_wire() */ | |
5611 | ||
5612 | /* | |
5613 | * We can unlock the map now. The in_transition state | |
5614 | * guarantees existance of the entry. | |
5615 | */ | |
5616 | vm_map_unlock(map); | |
3e170ce0 | 5617 | vm_map_unwire_nested(VME_SUBMAP(entry), |
2d21ac55 A |
5618 | sub_start, sub_end, user_wire, pmap, pmap_addr); |
5619 | vm_map_lock(map); | |
1c79356b | 5620 | |
2d21ac55 A |
5621 | if (last_timestamp+1 != map->timestamp) { |
5622 | /* | |
5623 | * Find the entry again. It could have been | |
5624 | * clipped or deleted after we unlocked the map. | |
5625 | */ | |
5626 | if (!vm_map_lookup_entry(map, | |
5627 | tmp_entry.vme_start, | |
5628 | &first_entry)) { | |
5629 | if (!user_wire) | |
5630 | panic("vm_map_unwire: re-lookup failed"); | |
5631 | entry = first_entry->vme_next; | |
5632 | } else | |
5633 | entry = first_entry; | |
5634 | } | |
5635 | last_timestamp = map->timestamp; | |
1c79356b | 5636 | |
1c79356b | 5637 | /* |
2d21ac55 A |
5638 | * clear transition bit for all constituent entries |
5639 | * that were in the original entry (saved in | |
5640 | * tmp_entry). Also check for waiters. | |
5641 | */ | |
5642 | while ((entry != vm_map_to_entry(map)) && | |
5643 | (entry->vme_start < tmp_entry.vme_end)) { | |
5644 | assert(entry->in_transition); | |
5645 | entry->in_transition = FALSE; | |
5646 | if (entry->needs_wakeup) { | |
5647 | entry->needs_wakeup = FALSE; | |
5648 | need_wakeup = TRUE; | |
5649 | } | |
5650 | entry = entry->vme_next; | |
1c79356b | 5651 | } |
2d21ac55 | 5652 | continue; |
1c79356b | 5653 | } else { |
2d21ac55 | 5654 | vm_map_unlock(map); |
3e170ce0 | 5655 | vm_map_unwire_nested(VME_SUBMAP(entry), |
2d21ac55 A |
5656 | sub_start, sub_end, user_wire, map_pmap, |
5657 | pmap_addr); | |
5658 | vm_map_lock(map); | |
1c79356b | 5659 | |
2d21ac55 A |
5660 | if (last_timestamp+1 != map->timestamp) { |
5661 | /* | |
5662 | * Find the entry again. It could have been | |
5663 | * clipped or deleted after we unlocked the map. | |
5664 | */ | |
5665 | if (!vm_map_lookup_entry(map, | |
5666 | tmp_entry.vme_start, | |
5667 | &first_entry)) { | |
5668 | if (!user_wire) | |
5669 | panic("vm_map_unwire: re-lookup failed"); | |
5670 | entry = first_entry->vme_next; | |
5671 | } else | |
5672 | entry = first_entry; | |
5673 | } | |
5674 | last_timestamp = map->timestamp; | |
1c79356b A |
5675 | } |
5676 | } | |
5677 | ||
5678 | ||
9bccf70c | 5679 | if ((entry->wired_count == 0) || |
2d21ac55 | 5680 | (user_wire && entry->user_wired_count == 0)) { |
1c79356b A |
5681 | if (!user_wire) |
5682 | panic("vm_map_unwire: entry is unwired"); | |
5683 | ||
5684 | entry = entry->vme_next; | |
5685 | continue; | |
5686 | } | |
2d21ac55 | 5687 | |
1c79356b | 5688 | assert(entry->wired_count > 0 && |
2d21ac55 | 5689 | (!user_wire || entry->user_wired_count > 0)); |
1c79356b A |
5690 | |
5691 | vm_map_clip_start(map, entry, start); | |
5692 | vm_map_clip_end(map, entry, end); | |
5693 | ||
5694 | /* | |
5695 | * Check for holes | |
5696 | * Holes: Next entry should be contiguous unless | |
5697 | * this is the end of the region. | |
5698 | */ | |
5699 | if (((entry->vme_end < end) && | |
2d21ac55 A |
5700 | ((entry->vme_next == vm_map_to_entry(map)) || |
5701 | (entry->vme_next->vme_start > entry->vme_end)))) { | |
1c79356b A |
5702 | |
5703 | if (!user_wire) | |
5704 | panic("vm_map_unwire: non-contiguous region"); | |
5705 | entry = entry->vme_next; | |
5706 | continue; | |
5707 | } | |
5708 | ||
2d21ac55 | 5709 | subtract_wire_counts(map, entry, user_wire); |
1c79356b | 5710 | |
9bccf70c | 5711 | if (entry->wired_count != 0) { |
1c79356b A |
5712 | entry = entry->vme_next; |
5713 | continue; | |
1c79356b A |
5714 | } |
5715 | ||
b0d623f7 A |
5716 | if(entry->zero_wired_pages) { |
5717 | entry->zero_wired_pages = FALSE; | |
5718 | } | |
5719 | ||
1c79356b A |
5720 | entry->in_transition = TRUE; |
5721 | tmp_entry = *entry; /* see comment in vm_map_wire() */ | |
5722 | ||
5723 | /* | |
5724 | * We can unlock the map now. The in_transition state | |
5725 | * guarantees existance of the entry. | |
5726 | */ | |
5727 | vm_map_unlock(map); | |
5728 | if(map_pmap) { | |
9bccf70c | 5729 | vm_fault_unwire(map, |
2d21ac55 | 5730 | &tmp_entry, FALSE, map_pmap, pmap_addr); |
1c79356b | 5731 | } else { |
9bccf70c | 5732 | vm_fault_unwire(map, |
2d21ac55 A |
5733 | &tmp_entry, FALSE, map->pmap, |
5734 | tmp_entry.vme_start); | |
1c79356b A |
5735 | } |
5736 | vm_map_lock(map); | |
5737 | ||
5738 | if (last_timestamp+1 != map->timestamp) { | |
5739 | /* | |
5740 | * Find the entry again. It could have been clipped | |
5741 | * or deleted after we unlocked the map. | |
5742 | */ | |
5743 | if (!vm_map_lookup_entry(map, tmp_entry.vme_start, | |
2d21ac55 | 5744 | &first_entry)) { |
1c79356b | 5745 | if (!user_wire) |
2d21ac55 | 5746 | panic("vm_map_unwire: re-lookup failed"); |
1c79356b A |
5747 | entry = first_entry->vme_next; |
5748 | } else | |
5749 | entry = first_entry; | |
5750 | } | |
5751 | last_timestamp = map->timestamp; | |
5752 | ||
5753 | /* | |
5754 | * clear transition bit for all constituent entries that | |
5755 | * were in the original entry (saved in tmp_entry). Also | |
5756 | * check for waiters. | |
5757 | */ | |
5758 | while ((entry != vm_map_to_entry(map)) && | |
5759 | (entry->vme_start < tmp_entry.vme_end)) { | |
5760 | assert(entry->in_transition); | |
5761 | entry->in_transition = FALSE; | |
5762 | if (entry->needs_wakeup) { | |
5763 | entry->needs_wakeup = FALSE; | |
5764 | need_wakeup = TRUE; | |
5765 | } | |
5766 | entry = entry->vme_next; | |
5767 | } | |
5768 | } | |
91447636 A |
5769 | |
5770 | /* | |
5771 | * We might have fragmented the address space when we wired this | |
5772 | * range of addresses. Attempt to re-coalesce these VM map entries | |
5773 | * with their neighbors now that they're no longer wired. | |
5774 | * Under some circumstances, address space fragmentation can | |
5775 | * prevent VM object shadow chain collapsing, which can cause | |
5776 | * swap space leaks. | |
5777 | */ | |
5778 | vm_map_simplify_range(map, start, end); | |
5779 | ||
1c79356b A |
5780 | vm_map_unlock(map); |
5781 | /* | |
5782 | * wake up anybody waiting on entries that we have unwired. | |
5783 | */ | |
5784 | if (need_wakeup) | |
5785 | vm_map_entry_wakeup(map); | |
5786 | return(KERN_SUCCESS); | |
5787 | ||
5788 | } | |
5789 | ||
5790 | kern_return_t | |
5791 | vm_map_unwire( | |
5792 | register vm_map_t map, | |
91447636 A |
5793 | register vm_map_offset_t start, |
5794 | register vm_map_offset_t end, | |
1c79356b A |
5795 | boolean_t user_wire) |
5796 | { | |
9bccf70c | 5797 | return vm_map_unwire_nested(map, start, end, |
2d21ac55 | 5798 | user_wire, (pmap_t)NULL, 0); |
1c79356b A |
5799 | } |
5800 | ||
5801 | ||
5802 | /* | |
5803 | * vm_map_entry_delete: [ internal use only ] | |
5804 | * | |
5805 | * Deallocate the given entry from the target map. | |
5806 | */ | |
91447636 | 5807 | static void |
1c79356b A |
5808 | vm_map_entry_delete( |
5809 | register vm_map_t map, | |
5810 | register vm_map_entry_t entry) | |
5811 | { | |
91447636 | 5812 | register vm_map_offset_t s, e; |
1c79356b A |
5813 | register vm_object_t object; |
5814 | register vm_map_t submap; | |
1c79356b A |
5815 | |
5816 | s = entry->vme_start; | |
5817 | e = entry->vme_end; | |
5818 | assert(page_aligned(s)); | |
5819 | assert(page_aligned(e)); | |
39236c6e A |
5820 | if (entry->map_aligned == TRUE) { |
5821 | assert(VM_MAP_PAGE_ALIGNED(s, VM_MAP_PAGE_MASK(map))); | |
5822 | assert(VM_MAP_PAGE_ALIGNED(e, VM_MAP_PAGE_MASK(map))); | |
5823 | } | |
1c79356b A |
5824 | assert(entry->wired_count == 0); |
5825 | assert(entry->user_wired_count == 0); | |
b0d623f7 | 5826 | assert(!entry->permanent); |
1c79356b A |
5827 | |
5828 | if (entry->is_sub_map) { | |
5829 | object = NULL; | |
3e170ce0 | 5830 | submap = VME_SUBMAP(entry); |
1c79356b A |
5831 | } else { |
5832 | submap = NULL; | |
3e170ce0 | 5833 | object = VME_OBJECT(entry); |
1c79356b A |
5834 | } |
5835 | ||
6d2010ae | 5836 | vm_map_store_entry_unlink(map, entry); |
1c79356b A |
5837 | map->size -= e - s; |
5838 | ||
5839 | vm_map_entry_dispose(map, entry); | |
5840 | ||
5841 | vm_map_unlock(map); | |
5842 | /* | |
5843 | * Deallocate the object only after removing all | |
5844 | * pmap entries pointing to its pages. | |
5845 | */ | |
5846 | if (submap) | |
5847 | vm_map_deallocate(submap); | |
5848 | else | |
2d21ac55 | 5849 | vm_object_deallocate(object); |
1c79356b A |
5850 | |
5851 | } | |
5852 | ||
5853 | void | |
5854 | vm_map_submap_pmap_clean( | |
5855 | vm_map_t map, | |
91447636 A |
5856 | vm_map_offset_t start, |
5857 | vm_map_offset_t end, | |
1c79356b | 5858 | vm_map_t sub_map, |
91447636 | 5859 | vm_map_offset_t offset) |
1c79356b | 5860 | { |
91447636 A |
5861 | vm_map_offset_t submap_start; |
5862 | vm_map_offset_t submap_end; | |
5863 | vm_map_size_t remove_size; | |
1c79356b A |
5864 | vm_map_entry_t entry; |
5865 | ||
5866 | submap_end = offset + (end - start); | |
5867 | submap_start = offset; | |
b7266188 A |
5868 | |
5869 | vm_map_lock_read(sub_map); | |
1c79356b | 5870 | if(vm_map_lookup_entry(sub_map, offset, &entry)) { |
2d21ac55 | 5871 | |
1c79356b A |
5872 | remove_size = (entry->vme_end - entry->vme_start); |
5873 | if(offset > entry->vme_start) | |
5874 | remove_size -= offset - entry->vme_start; | |
2d21ac55 | 5875 | |
1c79356b A |
5876 | |
5877 | if(submap_end < entry->vme_end) { | |
5878 | remove_size -= | |
5879 | entry->vme_end - submap_end; | |
5880 | } | |
5881 | if(entry->is_sub_map) { | |
5882 | vm_map_submap_pmap_clean( | |
5883 | sub_map, | |
5884 | start, | |
5885 | start + remove_size, | |
3e170ce0 A |
5886 | VME_SUBMAP(entry), |
5887 | VME_OFFSET(entry)); | |
1c79356b | 5888 | } else { |
9bccf70c | 5889 | |
316670eb | 5890 | if((map->mapped_in_other_pmaps) && (map->ref_count) |
3e170ce0 A |
5891 | && (VME_OBJECT(entry) != NULL)) { |
5892 | vm_object_pmap_protect_options( | |
5893 | VME_OBJECT(entry), | |
5894 | (VME_OFFSET(entry) + | |
5895 | offset - | |
5896 | entry->vme_start), | |
9bccf70c A |
5897 | remove_size, |
5898 | PMAP_NULL, | |
5899 | entry->vme_start, | |
3e170ce0 A |
5900 | VM_PROT_NONE, |
5901 | PMAP_OPTIONS_REMOVE); | |
9bccf70c A |
5902 | } else { |
5903 | pmap_remove(map->pmap, | |
2d21ac55 A |
5904 | (addr64_t)start, |
5905 | (addr64_t)(start + remove_size)); | |
9bccf70c | 5906 | } |
1c79356b A |
5907 | } |
5908 | } | |
5909 | ||
5910 | entry = entry->vme_next; | |
2d21ac55 | 5911 | |
1c79356b | 5912 | while((entry != vm_map_to_entry(sub_map)) |
2d21ac55 | 5913 | && (entry->vme_start < submap_end)) { |
1c79356b A |
5914 | remove_size = (entry->vme_end - entry->vme_start); |
5915 | if(submap_end < entry->vme_end) { | |
5916 | remove_size -= entry->vme_end - submap_end; | |
5917 | } | |
5918 | if(entry->is_sub_map) { | |
5919 | vm_map_submap_pmap_clean( | |
5920 | sub_map, | |
5921 | (start + entry->vme_start) - offset, | |
5922 | ((start + entry->vme_start) - offset) + remove_size, | |
3e170ce0 A |
5923 | VME_SUBMAP(entry), |
5924 | VME_OFFSET(entry)); | |
1c79356b | 5925 | } else { |
316670eb | 5926 | if((map->mapped_in_other_pmaps) && (map->ref_count) |
3e170ce0 A |
5927 | && (VME_OBJECT(entry) != NULL)) { |
5928 | vm_object_pmap_protect_options( | |
5929 | VME_OBJECT(entry), | |
5930 | VME_OFFSET(entry), | |
9bccf70c A |
5931 | remove_size, |
5932 | PMAP_NULL, | |
5933 | entry->vme_start, | |
3e170ce0 A |
5934 | VM_PROT_NONE, |
5935 | PMAP_OPTIONS_REMOVE); | |
9bccf70c A |
5936 | } else { |
5937 | pmap_remove(map->pmap, | |
2d21ac55 A |
5938 | (addr64_t)((start + entry->vme_start) |
5939 | - offset), | |
5940 | (addr64_t)(((start + entry->vme_start) | |
5941 | - offset) + remove_size)); | |
9bccf70c | 5942 | } |
1c79356b A |
5943 | } |
5944 | entry = entry->vme_next; | |
b7266188 A |
5945 | } |
5946 | vm_map_unlock_read(sub_map); | |
1c79356b A |
5947 | return; |
5948 | } | |
5949 | ||
5950 | /* | |
5951 | * vm_map_delete: [ internal use only ] | |
5952 | * | |
5953 | * Deallocates the given address range from the target map. | |
5954 | * Removes all user wirings. Unwires one kernel wiring if | |
5955 | * VM_MAP_REMOVE_KUNWIRE is set. Waits for kernel wirings to go | |
5956 | * away if VM_MAP_REMOVE_WAIT_FOR_KWIRE is set. Sleeps | |
5957 | * interruptibly if VM_MAP_REMOVE_INTERRUPTIBLE is set. | |
5958 | * | |
5959 | * This routine is called with map locked and leaves map locked. | |
5960 | */ | |
91447636 | 5961 | static kern_return_t |
1c79356b | 5962 | vm_map_delete( |
91447636 A |
5963 | vm_map_t map, |
5964 | vm_map_offset_t start, | |
5965 | vm_map_offset_t end, | |
5966 | int flags, | |
5967 | vm_map_t zap_map) | |
1c79356b A |
5968 | { |
5969 | vm_map_entry_t entry, next; | |
5970 | struct vm_map_entry *first_entry, tmp_entry; | |
2d21ac55 | 5971 | register vm_map_offset_t s; |
1c79356b A |
5972 | register vm_object_t object; |
5973 | boolean_t need_wakeup; | |
5974 | unsigned int last_timestamp = ~0; /* unlikely value */ | |
5975 | int interruptible; | |
1c79356b A |
5976 | |
5977 | interruptible = (flags & VM_MAP_REMOVE_INTERRUPTIBLE) ? | |
2d21ac55 | 5978 | THREAD_ABORTSAFE : THREAD_UNINT; |
1c79356b A |
5979 | |
5980 | /* | |
5981 | * All our DMA I/O operations in IOKit are currently done by | |
5982 | * wiring through the map entries of the task requesting the I/O. | |
5983 | * Because of this, we must always wait for kernel wirings | |
5984 | * to go away on the entries before deleting them. | |
5985 | * | |
5986 | * Any caller who wants to actually remove a kernel wiring | |
5987 | * should explicitly set the VM_MAP_REMOVE_KUNWIRE flag to | |
5988 | * properly remove one wiring instead of blasting through | |
5989 | * them all. | |
5990 | */ | |
5991 | flags |= VM_MAP_REMOVE_WAIT_FOR_KWIRE; | |
5992 | ||
b0d623f7 A |
5993 | while(1) { |
5994 | /* | |
5995 | * Find the start of the region, and clip it | |
5996 | */ | |
5997 | if (vm_map_lookup_entry(map, start, &first_entry)) { | |
5998 | entry = first_entry; | |
fe8ab488 A |
5999 | if (map == kalloc_map && |
6000 | (entry->vme_start != start || | |
6001 | entry->vme_end != end)) { | |
6002 | panic("vm_map_delete(%p,0x%llx,0x%llx): " | |
6003 | "mismatched entry %p [0x%llx:0x%llx]\n", | |
6004 | map, | |
6005 | (uint64_t)start, | |
6006 | (uint64_t)end, | |
6007 | entry, | |
6008 | (uint64_t)entry->vme_start, | |
6009 | (uint64_t)entry->vme_end); | |
6010 | } | |
b0d623f7 A |
6011 | if (entry->superpage_size && (start & ~SUPERPAGE_MASK)) { /* extend request to whole entry */ start = SUPERPAGE_ROUND_DOWN(start); |
6012 | start = SUPERPAGE_ROUND_DOWN(start); | |
6013 | continue; | |
6014 | } | |
6015 | if (start == entry->vme_start) { | |
6016 | /* | |
6017 | * No need to clip. We don't want to cause | |
6018 | * any unnecessary unnesting in this case... | |
6019 | */ | |
6020 | } else { | |
fe8ab488 A |
6021 | if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && |
6022 | entry->map_aligned && | |
6023 | !VM_MAP_PAGE_ALIGNED( | |
6024 | start, | |
6025 | VM_MAP_PAGE_MASK(map))) { | |
6026 | /* | |
6027 | * The entry will no longer be | |
6028 | * map-aligned after clipping | |
6029 | * and the caller said it's OK. | |
6030 | */ | |
6031 | entry->map_aligned = FALSE; | |
6032 | } | |
6033 | if (map == kalloc_map) { | |
6034 | panic("vm_map_delete(%p,0x%llx,0x%llx):" | |
6035 | " clipping %p at 0x%llx\n", | |
6036 | map, | |
6037 | (uint64_t)start, | |
6038 | (uint64_t)end, | |
6039 | entry, | |
6040 | (uint64_t)start); | |
6041 | } | |
b0d623f7 A |
6042 | vm_map_clip_start(map, entry, start); |
6043 | } | |
6044 | ||
2d21ac55 | 6045 | /* |
b0d623f7 A |
6046 | * Fix the lookup hint now, rather than each |
6047 | * time through the loop. | |
2d21ac55 | 6048 | */ |
b0d623f7 | 6049 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
2d21ac55 | 6050 | } else { |
fe8ab488 A |
6051 | if (map->pmap == kernel_pmap && |
6052 | map->ref_count != 0) { | |
6053 | panic("vm_map_delete(%p,0x%llx,0x%llx): " | |
6054 | "no map entry at 0x%llx\n", | |
6055 | map, | |
6056 | (uint64_t)start, | |
6057 | (uint64_t)end, | |
6058 | (uint64_t)start); | |
6059 | } | |
b0d623f7 | 6060 | entry = first_entry->vme_next; |
2d21ac55 | 6061 | } |
b0d623f7 | 6062 | break; |
1c79356b | 6063 | } |
b0d623f7 A |
6064 | if (entry->superpage_size) |
6065 | end = SUPERPAGE_ROUND_UP(end); | |
1c79356b A |
6066 | |
6067 | need_wakeup = FALSE; | |
6068 | /* | |
6069 | * Step through all entries in this region | |
6070 | */ | |
2d21ac55 A |
6071 | s = entry->vme_start; |
6072 | while ((entry != vm_map_to_entry(map)) && (s < end)) { | |
6073 | /* | |
6074 | * At this point, we have deleted all the memory entries | |
6075 | * between "start" and "s". We still need to delete | |
6076 | * all memory entries between "s" and "end". | |
6077 | * While we were blocked and the map was unlocked, some | |
6078 | * new memory entries could have been re-allocated between | |
6079 | * "start" and "s" and we don't want to mess with those. | |
6080 | * Some of those entries could even have been re-assembled | |
6081 | * with an entry after "s" (in vm_map_simplify_entry()), so | |
6082 | * we may have to vm_map_clip_start() again. | |
6083 | */ | |
1c79356b | 6084 | |
2d21ac55 A |
6085 | if (entry->vme_start >= s) { |
6086 | /* | |
6087 | * This entry starts on or after "s" | |
6088 | * so no need to clip its start. | |
6089 | */ | |
6090 | } else { | |
6091 | /* | |
6092 | * This entry has been re-assembled by a | |
6093 | * vm_map_simplify_entry(). We need to | |
6094 | * re-clip its start. | |
6095 | */ | |
fe8ab488 A |
6096 | if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && |
6097 | entry->map_aligned && | |
6098 | !VM_MAP_PAGE_ALIGNED(s, | |
6099 | VM_MAP_PAGE_MASK(map))) { | |
6100 | /* | |
6101 | * The entry will no longer be map-aligned | |
6102 | * after clipping and the caller said it's OK. | |
6103 | */ | |
6104 | entry->map_aligned = FALSE; | |
6105 | } | |
6106 | if (map == kalloc_map) { | |
6107 | panic("vm_map_delete(%p,0x%llx,0x%llx): " | |
6108 | "clipping %p at 0x%llx\n", | |
6109 | map, | |
6110 | (uint64_t)start, | |
6111 | (uint64_t)end, | |
6112 | entry, | |
6113 | (uint64_t)s); | |
6114 | } | |
2d21ac55 A |
6115 | vm_map_clip_start(map, entry, s); |
6116 | } | |
6117 | if (entry->vme_end <= end) { | |
6118 | /* | |
6119 | * This entry is going away completely, so no need | |
6120 | * to clip and possibly cause an unnecessary unnesting. | |
6121 | */ | |
6122 | } else { | |
fe8ab488 A |
6123 | if ((flags & VM_MAP_REMOVE_NO_MAP_ALIGN) && |
6124 | entry->map_aligned && | |
6125 | !VM_MAP_PAGE_ALIGNED(end, | |
6126 | VM_MAP_PAGE_MASK(map))) { | |
6127 | /* | |
6128 | * The entry will no longer be map-aligned | |
6129 | * after clipping and the caller said it's OK. | |
6130 | */ | |
6131 | entry->map_aligned = FALSE; | |
6132 | } | |
6133 | if (map == kalloc_map) { | |
6134 | panic("vm_map_delete(%p,0x%llx,0x%llx): " | |
6135 | "clipping %p at 0x%llx\n", | |
6136 | map, | |
6137 | (uint64_t)start, | |
6138 | (uint64_t)end, | |
6139 | entry, | |
6140 | (uint64_t)end); | |
6141 | } | |
2d21ac55 A |
6142 | vm_map_clip_end(map, entry, end); |
6143 | } | |
b0d623f7 A |
6144 | |
6145 | if (entry->permanent) { | |
6146 | panic("attempt to remove permanent VM map entry " | |
6147 | "%p [0x%llx:0x%llx]\n", | |
6148 | entry, (uint64_t) s, (uint64_t) end); | |
6149 | } | |
6150 | ||
6151 | ||
1c79356b | 6152 | if (entry->in_transition) { |
9bccf70c A |
6153 | wait_result_t wait_result; |
6154 | ||
1c79356b A |
6155 | /* |
6156 | * Another thread is wiring/unwiring this entry. | |
6157 | * Let the other thread know we are waiting. | |
6158 | */ | |
2d21ac55 | 6159 | assert(s == entry->vme_start); |
1c79356b A |
6160 | entry->needs_wakeup = TRUE; |
6161 | ||
6162 | /* | |
6163 | * wake up anybody waiting on entries that we have | |
6164 | * already unwired/deleted. | |
6165 | */ | |
6166 | if (need_wakeup) { | |
6167 | vm_map_entry_wakeup(map); | |
6168 | need_wakeup = FALSE; | |
6169 | } | |
6170 | ||
9bccf70c | 6171 | wait_result = vm_map_entry_wait(map, interruptible); |
1c79356b A |
6172 | |
6173 | if (interruptible && | |
9bccf70c | 6174 | wait_result == THREAD_INTERRUPTED) { |
1c79356b A |
6175 | /* |
6176 | * We do not clear the needs_wakeup flag, | |
6177 | * since we cannot tell if we were the only one. | |
6178 | */ | |
6179 | return KERN_ABORTED; | |
9bccf70c | 6180 | } |
1c79356b A |
6181 | |
6182 | /* | |
6183 | * The entry could have been clipped or it | |
6184 | * may not exist anymore. Look it up again. | |
6185 | */ | |
6186 | if (!vm_map_lookup_entry(map, s, &first_entry)) { | |
1c79356b A |
6187 | /* |
6188 | * User: use the next entry | |
6189 | */ | |
6190 | entry = first_entry->vme_next; | |
2d21ac55 | 6191 | s = entry->vme_start; |
1c79356b A |
6192 | } else { |
6193 | entry = first_entry; | |
0c530ab8 | 6194 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
1c79356b | 6195 | } |
9bccf70c | 6196 | last_timestamp = map->timestamp; |
1c79356b A |
6197 | continue; |
6198 | } /* end in_transition */ | |
6199 | ||
6200 | if (entry->wired_count) { | |
2d21ac55 A |
6201 | boolean_t user_wire; |
6202 | ||
6203 | user_wire = entry->user_wired_count > 0; | |
6204 | ||
1c79356b | 6205 | /* |
b0d623f7 | 6206 | * Remove a kernel wiring if requested |
1c79356b | 6207 | */ |
b0d623f7 | 6208 | if (flags & VM_MAP_REMOVE_KUNWIRE) { |
1c79356b | 6209 | entry->wired_count--; |
b0d623f7 A |
6210 | } |
6211 | ||
6212 | /* | |
6213 | * Remove all user wirings for proper accounting | |
6214 | */ | |
6215 | if (entry->user_wired_count > 0) { | |
6216 | while (entry->user_wired_count) | |
6217 | subtract_wire_counts(map, entry, user_wire); | |
6218 | } | |
1c79356b A |
6219 | |
6220 | if (entry->wired_count != 0) { | |
2d21ac55 | 6221 | assert(map != kernel_map); |
1c79356b A |
6222 | /* |
6223 | * Cannot continue. Typical case is when | |
6224 | * a user thread has physical io pending on | |
6225 | * on this page. Either wait for the | |
6226 | * kernel wiring to go away or return an | |
6227 | * error. | |
6228 | */ | |
6229 | if (flags & VM_MAP_REMOVE_WAIT_FOR_KWIRE) { | |
9bccf70c | 6230 | wait_result_t wait_result; |
1c79356b | 6231 | |
2d21ac55 | 6232 | assert(s == entry->vme_start); |
1c79356b | 6233 | entry->needs_wakeup = TRUE; |
9bccf70c | 6234 | wait_result = vm_map_entry_wait(map, |
2d21ac55 | 6235 | interruptible); |
1c79356b A |
6236 | |
6237 | if (interruptible && | |
2d21ac55 | 6238 | wait_result == THREAD_INTERRUPTED) { |
1c79356b | 6239 | /* |
2d21ac55 | 6240 | * We do not clear the |
1c79356b A |
6241 | * needs_wakeup flag, since we |
6242 | * cannot tell if we were the | |
6243 | * only one. | |
2d21ac55 | 6244 | */ |
1c79356b | 6245 | return KERN_ABORTED; |
9bccf70c | 6246 | } |
1c79356b A |
6247 | |
6248 | /* | |
2d21ac55 | 6249 | * The entry could have been clipped or |
1c79356b A |
6250 | * it may not exist anymore. Look it |
6251 | * up again. | |
2d21ac55 | 6252 | */ |
1c79356b | 6253 | if (!vm_map_lookup_entry(map, s, |
2d21ac55 A |
6254 | &first_entry)) { |
6255 | assert(map != kernel_map); | |
1c79356b | 6256 | /* |
2d21ac55 A |
6257 | * User: use the next entry |
6258 | */ | |
1c79356b | 6259 | entry = first_entry->vme_next; |
2d21ac55 | 6260 | s = entry->vme_start; |
1c79356b A |
6261 | } else { |
6262 | entry = first_entry; | |
0c530ab8 | 6263 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
1c79356b | 6264 | } |
9bccf70c | 6265 | last_timestamp = map->timestamp; |
1c79356b A |
6266 | continue; |
6267 | } | |
6268 | else { | |
6269 | return KERN_FAILURE; | |
6270 | } | |
6271 | } | |
6272 | ||
6273 | entry->in_transition = TRUE; | |
6274 | /* | |
6275 | * copy current entry. see comment in vm_map_wire() | |
6276 | */ | |
6277 | tmp_entry = *entry; | |
2d21ac55 | 6278 | assert(s == entry->vme_start); |
1c79356b A |
6279 | |
6280 | /* | |
6281 | * We can unlock the map now. The in_transition | |
6282 | * state guarentees existance of the entry. | |
6283 | */ | |
6284 | vm_map_unlock(map); | |
2d21ac55 A |
6285 | |
6286 | if (tmp_entry.is_sub_map) { | |
6287 | vm_map_t sub_map; | |
6288 | vm_map_offset_t sub_start, sub_end; | |
6289 | pmap_t pmap; | |
6290 | vm_map_offset_t pmap_addr; | |
6291 | ||
6292 | ||
3e170ce0 A |
6293 | sub_map = VME_SUBMAP(&tmp_entry); |
6294 | sub_start = VME_OFFSET(&tmp_entry); | |
2d21ac55 A |
6295 | sub_end = sub_start + (tmp_entry.vme_end - |
6296 | tmp_entry.vme_start); | |
6297 | if (tmp_entry.use_pmap) { | |
6298 | pmap = sub_map->pmap; | |
6299 | pmap_addr = tmp_entry.vme_start; | |
6300 | } else { | |
6301 | pmap = map->pmap; | |
6302 | pmap_addr = tmp_entry.vme_start; | |
6303 | } | |
6304 | (void) vm_map_unwire_nested(sub_map, | |
6305 | sub_start, sub_end, | |
6306 | user_wire, | |
6307 | pmap, pmap_addr); | |
6308 | } else { | |
6309 | ||
3e170ce0 | 6310 | if (VME_OBJECT(&tmp_entry) == kernel_object) { |
39236c6e A |
6311 | pmap_protect_options( |
6312 | map->pmap, | |
6313 | tmp_entry.vme_start, | |
6314 | tmp_entry.vme_end, | |
6315 | VM_PROT_NONE, | |
6316 | PMAP_OPTIONS_REMOVE, | |
6317 | NULL); | |
6318 | } | |
2d21ac55 | 6319 | vm_fault_unwire(map, &tmp_entry, |
3e170ce0 | 6320 | VME_OBJECT(&tmp_entry) == kernel_object, |
2d21ac55 A |
6321 | map->pmap, tmp_entry.vme_start); |
6322 | } | |
6323 | ||
1c79356b A |
6324 | vm_map_lock(map); |
6325 | ||
6326 | if (last_timestamp+1 != map->timestamp) { | |
6327 | /* | |
6328 | * Find the entry again. It could have | |
6329 | * been clipped after we unlocked the map. | |
6330 | */ | |
6331 | if (!vm_map_lookup_entry(map, s, &first_entry)){ | |
6332 | assert((map != kernel_map) && | |
2d21ac55 | 6333 | (!entry->is_sub_map)); |
1c79356b | 6334 | first_entry = first_entry->vme_next; |
2d21ac55 | 6335 | s = first_entry->vme_start; |
1c79356b | 6336 | } else { |
0c530ab8 | 6337 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
1c79356b A |
6338 | } |
6339 | } else { | |
0c530ab8 | 6340 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
1c79356b A |
6341 | first_entry = entry; |
6342 | } | |
6343 | ||
6344 | last_timestamp = map->timestamp; | |
6345 | ||
6346 | entry = first_entry; | |
6347 | while ((entry != vm_map_to_entry(map)) && | |
6348 | (entry->vme_start < tmp_entry.vme_end)) { | |
6349 | assert(entry->in_transition); | |
6350 | entry->in_transition = FALSE; | |
6351 | if (entry->needs_wakeup) { | |
6352 | entry->needs_wakeup = FALSE; | |
6353 | need_wakeup = TRUE; | |
6354 | } | |
6355 | entry = entry->vme_next; | |
6356 | } | |
6357 | /* | |
6358 | * We have unwired the entry(s). Go back and | |
6359 | * delete them. | |
6360 | */ | |
6361 | entry = first_entry; | |
6362 | continue; | |
6363 | } | |
6364 | ||
6365 | /* entry is unwired */ | |
6366 | assert(entry->wired_count == 0); | |
6367 | assert(entry->user_wired_count == 0); | |
6368 | ||
2d21ac55 A |
6369 | assert(s == entry->vme_start); |
6370 | ||
6371 | if (flags & VM_MAP_REMOVE_NO_PMAP_CLEANUP) { | |
6372 | /* | |
6373 | * XXX with the VM_MAP_REMOVE_SAVE_ENTRIES flag to | |
6374 | * vm_map_delete(), some map entries might have been | |
6375 | * transferred to a "zap_map", which doesn't have a | |
6376 | * pmap. The original pmap has already been flushed | |
6377 | * in the vm_map_delete() call targeting the original | |
6378 | * map, but when we get to destroying the "zap_map", | |
6379 | * we don't have any pmap to flush, so let's just skip | |
6380 | * all this. | |
6381 | */ | |
6382 | } else if (entry->is_sub_map) { | |
6383 | if (entry->use_pmap) { | |
0c530ab8 | 6384 | #ifndef NO_NESTED_PMAP |
3e170ce0 A |
6385 | int pmap_flags; |
6386 | ||
6387 | if (flags & VM_MAP_REMOVE_NO_UNNESTING) { | |
6388 | /* | |
6389 | * This is the final cleanup of the | |
6390 | * address space being terminated. | |
6391 | * No new mappings are expected and | |
6392 | * we don't really need to unnest the | |
6393 | * shared region (and lose the "global" | |
6394 | * pmap mappings, if applicable). | |
6395 | * | |
6396 | * Tell the pmap layer that we're | |
6397 | * "clean" wrt nesting. | |
6398 | */ | |
6399 | pmap_flags = PMAP_UNNEST_CLEAN; | |
6400 | } else { | |
6401 | /* | |
6402 | * We're unmapping part of the nested | |
6403 | * shared region, so we can't keep the | |
6404 | * nested pmap. | |
6405 | */ | |
6406 | pmap_flags = 0; | |
6407 | } | |
6408 | pmap_unnest_options( | |
6409 | map->pmap, | |
6410 | (addr64_t)entry->vme_start, | |
6411 | entry->vme_end - entry->vme_start, | |
6412 | pmap_flags); | |
0c530ab8 | 6413 | #endif /* NO_NESTED_PMAP */ |
316670eb | 6414 | if ((map->mapped_in_other_pmaps) && (map->ref_count)) { |
9bccf70c A |
6415 | /* clean up parent map/maps */ |
6416 | vm_map_submap_pmap_clean( | |
6417 | map, entry->vme_start, | |
6418 | entry->vme_end, | |
3e170ce0 A |
6419 | VME_SUBMAP(entry), |
6420 | VME_OFFSET(entry)); | |
9bccf70c | 6421 | } |
2d21ac55 | 6422 | } else { |
1c79356b A |
6423 | vm_map_submap_pmap_clean( |
6424 | map, entry->vme_start, entry->vme_end, | |
3e170ce0 A |
6425 | VME_SUBMAP(entry), |
6426 | VME_OFFSET(entry)); | |
2d21ac55 | 6427 | } |
3e170ce0 A |
6428 | } else if (VME_OBJECT(entry) != kernel_object && |
6429 | VME_OBJECT(entry) != compressor_object) { | |
6430 | object = VME_OBJECT(entry); | |
39236c6e A |
6431 | if ((map->mapped_in_other_pmaps) && (map->ref_count)) { |
6432 | vm_object_pmap_protect_options( | |
3e170ce0 | 6433 | object, VME_OFFSET(entry), |
55e303ae A |
6434 | entry->vme_end - entry->vme_start, |
6435 | PMAP_NULL, | |
6436 | entry->vme_start, | |
39236c6e A |
6437 | VM_PROT_NONE, |
6438 | PMAP_OPTIONS_REMOVE); | |
3e170ce0 | 6439 | } else if ((VME_OBJECT(entry) != VM_OBJECT_NULL) || |
39236c6e A |
6440 | (map->pmap == kernel_pmap)) { |
6441 | /* Remove translations associated | |
6442 | * with this range unless the entry | |
6443 | * does not have an object, or | |
6444 | * it's the kernel map or a descendant | |
6445 | * since the platform could potentially | |
6446 | * create "backdoor" mappings invisible | |
6447 | * to the VM. It is expected that | |
6448 | * objectless, non-kernel ranges | |
6449 | * do not have such VM invisible | |
6450 | * translations. | |
6451 | */ | |
6452 | pmap_remove_options(map->pmap, | |
6453 | (addr64_t)entry->vme_start, | |
6454 | (addr64_t)entry->vme_end, | |
6455 | PMAP_OPTIONS_REMOVE); | |
1c79356b A |
6456 | } |
6457 | } | |
6458 | ||
fe8ab488 A |
6459 | if (entry->iokit_acct) { |
6460 | /* alternate accounting */ | |
6461 | vm_map_iokit_unmapped_region(map, | |
6462 | (entry->vme_end - | |
6463 | entry->vme_start)); | |
6464 | entry->iokit_acct = FALSE; | |
6465 | } | |
6466 | ||
91447636 A |
6467 | /* |
6468 | * All pmap mappings for this map entry must have been | |
6469 | * cleared by now. | |
6470 | */ | |
fe8ab488 | 6471 | #if DEBUG |
91447636 A |
6472 | assert(vm_map_pmap_is_empty(map, |
6473 | entry->vme_start, | |
6474 | entry->vme_end)); | |
fe8ab488 | 6475 | #endif /* DEBUG */ |
91447636 | 6476 | |
1c79356b | 6477 | next = entry->vme_next; |
fe8ab488 A |
6478 | |
6479 | if (map->pmap == kernel_pmap && | |
6480 | map->ref_count != 0 && | |
6481 | entry->vme_end < end && | |
6482 | (next == vm_map_to_entry(map) || | |
6483 | next->vme_start != entry->vme_end)) { | |
6484 | panic("vm_map_delete(%p,0x%llx,0x%llx): " | |
6485 | "hole after %p at 0x%llx\n", | |
6486 | map, | |
6487 | (uint64_t)start, | |
6488 | (uint64_t)end, | |
6489 | entry, | |
6490 | (uint64_t)entry->vme_end); | |
6491 | } | |
6492 | ||
1c79356b A |
6493 | s = next->vme_start; |
6494 | last_timestamp = map->timestamp; | |
91447636 A |
6495 | |
6496 | if ((flags & VM_MAP_REMOVE_SAVE_ENTRIES) && | |
6497 | zap_map != VM_MAP_NULL) { | |
2d21ac55 | 6498 | vm_map_size_t entry_size; |
91447636 A |
6499 | /* |
6500 | * The caller wants to save the affected VM map entries | |
6501 | * into the "zap_map". The caller will take care of | |
6502 | * these entries. | |
6503 | */ | |
6504 | /* unlink the entry from "map" ... */ | |
6d2010ae | 6505 | vm_map_store_entry_unlink(map, entry); |
91447636 | 6506 | /* ... and add it to the end of the "zap_map" */ |
6d2010ae | 6507 | vm_map_store_entry_link(zap_map, |
91447636 A |
6508 | vm_map_last_entry(zap_map), |
6509 | entry); | |
2d21ac55 A |
6510 | entry_size = entry->vme_end - entry->vme_start; |
6511 | map->size -= entry_size; | |
6512 | zap_map->size += entry_size; | |
6513 | /* we didn't unlock the map, so no timestamp increase */ | |
6514 | last_timestamp--; | |
91447636 A |
6515 | } else { |
6516 | vm_map_entry_delete(map, entry); | |
6517 | /* vm_map_entry_delete unlocks the map */ | |
6518 | vm_map_lock(map); | |
6519 | } | |
6520 | ||
1c79356b A |
6521 | entry = next; |
6522 | ||
6523 | if(entry == vm_map_to_entry(map)) { | |
6524 | break; | |
6525 | } | |
6526 | if (last_timestamp+1 != map->timestamp) { | |
6527 | /* | |
6528 | * we are responsible for deleting everything | |
6529 | * from the give space, if someone has interfered | |
6530 | * we pick up where we left off, back fills should | |
6531 | * be all right for anyone except map_delete and | |
6532 | * we have to assume that the task has been fully | |
6533 | * disabled before we get here | |
6534 | */ | |
6535 | if (!vm_map_lookup_entry(map, s, &entry)){ | |
6536 | entry = entry->vme_next; | |
2d21ac55 | 6537 | s = entry->vme_start; |
1c79356b | 6538 | } else { |
2d21ac55 | 6539 | SAVE_HINT_MAP_WRITE(map, entry->vme_prev); |
1c79356b A |
6540 | } |
6541 | /* | |
6542 | * others can not only allocate behind us, we can | |
6543 | * also see coalesce while we don't have the map lock | |
6544 | */ | |
6545 | if(entry == vm_map_to_entry(map)) { | |
6546 | break; | |
6547 | } | |
1c79356b A |
6548 | } |
6549 | last_timestamp = map->timestamp; | |
6550 | } | |
6551 | ||
6552 | if (map->wait_for_space) | |
6553 | thread_wakeup((event_t) map); | |
6554 | /* | |
6555 | * wake up anybody waiting on entries that we have already deleted. | |
6556 | */ | |
6557 | if (need_wakeup) | |
6558 | vm_map_entry_wakeup(map); | |
6559 | ||
6560 | return KERN_SUCCESS; | |
6561 | } | |
6562 | ||
6563 | /* | |
6564 | * vm_map_remove: | |
6565 | * | |
6566 | * Remove the given address range from the target map. | |
6567 | * This is the exported form of vm_map_delete. | |
6568 | */ | |
6569 | kern_return_t | |
6570 | vm_map_remove( | |
6571 | register vm_map_t map, | |
91447636 A |
6572 | register vm_map_offset_t start, |
6573 | register vm_map_offset_t end, | |
1c79356b A |
6574 | register boolean_t flags) |
6575 | { | |
6576 | register kern_return_t result; | |
9bccf70c | 6577 | |
1c79356b A |
6578 | vm_map_lock(map); |
6579 | VM_MAP_RANGE_CHECK(map, start, end); | |
39236c6e A |
6580 | /* |
6581 | * For the zone_map, the kernel controls the allocation/freeing of memory. | |
6582 | * Any free to the zone_map should be within the bounds of the map and | |
6583 | * should free up memory. If the VM_MAP_RANGE_CHECK() silently converts a | |
6584 | * free to the zone_map into a no-op, there is a problem and we should | |
6585 | * panic. | |
6586 | */ | |
6587 | if ((map == zone_map) && (start == end)) | |
6588 | panic("Nothing being freed to the zone_map. start = end = %p\n", (void *)start); | |
91447636 | 6589 | result = vm_map_delete(map, start, end, flags, VM_MAP_NULL); |
1c79356b | 6590 | vm_map_unlock(map); |
91447636 | 6591 | |
1c79356b A |
6592 | return(result); |
6593 | } | |
6594 | ||
6595 | ||
1c79356b A |
6596 | /* |
6597 | * Routine: vm_map_copy_discard | |
6598 | * | |
6599 | * Description: | |
6600 | * Dispose of a map copy object (returned by | |
6601 | * vm_map_copyin). | |
6602 | */ | |
6603 | void | |
6604 | vm_map_copy_discard( | |
6605 | vm_map_copy_t copy) | |
6606 | { | |
1c79356b A |
6607 | if (copy == VM_MAP_COPY_NULL) |
6608 | return; | |
6609 | ||
6610 | switch (copy->type) { | |
6611 | case VM_MAP_COPY_ENTRY_LIST: | |
6612 | while (vm_map_copy_first_entry(copy) != | |
2d21ac55 | 6613 | vm_map_copy_to_entry(copy)) { |
1c79356b A |
6614 | vm_map_entry_t entry = vm_map_copy_first_entry(copy); |
6615 | ||
6616 | vm_map_copy_entry_unlink(copy, entry); | |
39236c6e | 6617 | if (entry->is_sub_map) { |
3e170ce0 | 6618 | vm_map_deallocate(VME_SUBMAP(entry)); |
39236c6e | 6619 | } else { |
3e170ce0 | 6620 | vm_object_deallocate(VME_OBJECT(entry)); |
39236c6e | 6621 | } |
1c79356b A |
6622 | vm_map_copy_entry_dispose(copy, entry); |
6623 | } | |
6624 | break; | |
6625 | case VM_MAP_COPY_OBJECT: | |
6626 | vm_object_deallocate(copy->cpy_object); | |
6627 | break; | |
1c79356b A |
6628 | case VM_MAP_COPY_KERNEL_BUFFER: |
6629 | ||
6630 | /* | |
6631 | * The vm_map_copy_t and possibly the data buffer were | |
6632 | * allocated by a single call to kalloc(), i.e. the | |
6633 | * vm_map_copy_t was not allocated out of the zone. | |
6634 | */ | |
3e170ce0 A |
6635 | if (copy->size > msg_ool_size_small || copy->offset) |
6636 | panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", | |
6637 | (long long)copy->size, (long long)copy->offset); | |
6638 | kfree(copy, copy->size + cpy_kdata_hdr_sz); | |
1c79356b A |
6639 | return; |
6640 | } | |
91447636 | 6641 | zfree(vm_map_copy_zone, copy); |
1c79356b A |
6642 | } |
6643 | ||
6644 | /* | |
6645 | * Routine: vm_map_copy_copy | |
6646 | * | |
6647 | * Description: | |
6648 | * Move the information in a map copy object to | |
6649 | * a new map copy object, leaving the old one | |
6650 | * empty. | |
6651 | * | |
6652 | * This is used by kernel routines that need | |
6653 | * to look at out-of-line data (in copyin form) | |
6654 | * before deciding whether to return SUCCESS. | |
6655 | * If the routine returns FAILURE, the original | |
6656 | * copy object will be deallocated; therefore, | |
6657 | * these routines must make a copy of the copy | |
6658 | * object and leave the original empty so that | |
6659 | * deallocation will not fail. | |
6660 | */ | |
6661 | vm_map_copy_t | |
6662 | vm_map_copy_copy( | |
6663 | vm_map_copy_t copy) | |
6664 | { | |
6665 | vm_map_copy_t new_copy; | |
6666 | ||
6667 | if (copy == VM_MAP_COPY_NULL) | |
6668 | return VM_MAP_COPY_NULL; | |
6669 | ||
6670 | /* | |
6671 | * Allocate a new copy object, and copy the information | |
6672 | * from the old one into it. | |
6673 | */ | |
6674 | ||
6675 | new_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 6676 | new_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
1c79356b A |
6677 | *new_copy = *copy; |
6678 | ||
6679 | if (copy->type == VM_MAP_COPY_ENTRY_LIST) { | |
6680 | /* | |
6681 | * The links in the entry chain must be | |
6682 | * changed to point to the new copy object. | |
6683 | */ | |
6684 | vm_map_copy_first_entry(copy)->vme_prev | |
6685 | = vm_map_copy_to_entry(new_copy); | |
6686 | vm_map_copy_last_entry(copy)->vme_next | |
6687 | = vm_map_copy_to_entry(new_copy); | |
6688 | } | |
6689 | ||
6690 | /* | |
6691 | * Change the old copy object into one that contains | |
6692 | * nothing to be deallocated. | |
6693 | */ | |
6694 | copy->type = VM_MAP_COPY_OBJECT; | |
6695 | copy->cpy_object = VM_OBJECT_NULL; | |
6696 | ||
6697 | /* | |
6698 | * Return the new object. | |
6699 | */ | |
6700 | return new_copy; | |
6701 | } | |
6702 | ||
91447636 | 6703 | static kern_return_t |
1c79356b A |
6704 | vm_map_overwrite_submap_recurse( |
6705 | vm_map_t dst_map, | |
91447636 A |
6706 | vm_map_offset_t dst_addr, |
6707 | vm_map_size_t dst_size) | |
1c79356b | 6708 | { |
91447636 | 6709 | vm_map_offset_t dst_end; |
1c79356b A |
6710 | vm_map_entry_t tmp_entry; |
6711 | vm_map_entry_t entry; | |
6712 | kern_return_t result; | |
6713 | boolean_t encountered_sub_map = FALSE; | |
6714 | ||
6715 | ||
6716 | ||
6717 | /* | |
6718 | * Verify that the destination is all writeable | |
6719 | * initially. We have to trunc the destination | |
6720 | * address and round the copy size or we'll end up | |
6721 | * splitting entries in strange ways. | |
6722 | */ | |
6723 | ||
39236c6e A |
6724 | dst_end = vm_map_round_page(dst_addr + dst_size, |
6725 | VM_MAP_PAGE_MASK(dst_map)); | |
9bccf70c | 6726 | vm_map_lock(dst_map); |
1c79356b A |
6727 | |
6728 | start_pass_1: | |
1c79356b A |
6729 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
6730 | vm_map_unlock(dst_map); | |
6731 | return(KERN_INVALID_ADDRESS); | |
6732 | } | |
6733 | ||
39236c6e A |
6734 | vm_map_clip_start(dst_map, |
6735 | tmp_entry, | |
6736 | vm_map_trunc_page(dst_addr, | |
6737 | VM_MAP_PAGE_MASK(dst_map))); | |
fe8ab488 A |
6738 | if (tmp_entry->is_sub_map) { |
6739 | /* clipping did unnest if needed */ | |
6740 | assert(!tmp_entry->use_pmap); | |
6741 | } | |
1c79356b A |
6742 | |
6743 | for (entry = tmp_entry;;) { | |
6744 | vm_map_entry_t next; | |
6745 | ||
6746 | next = entry->vme_next; | |
6747 | while(entry->is_sub_map) { | |
91447636 A |
6748 | vm_map_offset_t sub_start; |
6749 | vm_map_offset_t sub_end; | |
6750 | vm_map_offset_t local_end; | |
1c79356b A |
6751 | |
6752 | if (entry->in_transition) { | |
2d21ac55 A |
6753 | /* |
6754 | * Say that we are waiting, and wait for entry. | |
6755 | */ | |
1c79356b A |
6756 | entry->needs_wakeup = TRUE; |
6757 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
6758 | ||
6759 | goto start_pass_1; | |
6760 | } | |
6761 | ||
6762 | encountered_sub_map = TRUE; | |
3e170ce0 | 6763 | sub_start = VME_OFFSET(entry); |
1c79356b A |
6764 | |
6765 | if(entry->vme_end < dst_end) | |
6766 | sub_end = entry->vme_end; | |
6767 | else | |
6768 | sub_end = dst_end; | |
6769 | sub_end -= entry->vme_start; | |
3e170ce0 | 6770 | sub_end += VME_OFFSET(entry); |
1c79356b A |
6771 | local_end = entry->vme_end; |
6772 | vm_map_unlock(dst_map); | |
6773 | ||
6774 | result = vm_map_overwrite_submap_recurse( | |
3e170ce0 | 6775 | VME_SUBMAP(entry), |
2d21ac55 A |
6776 | sub_start, |
6777 | sub_end - sub_start); | |
1c79356b A |
6778 | |
6779 | if(result != KERN_SUCCESS) | |
6780 | return result; | |
6781 | if (dst_end <= entry->vme_end) | |
6782 | return KERN_SUCCESS; | |
6783 | vm_map_lock(dst_map); | |
6784 | if(!vm_map_lookup_entry(dst_map, local_end, | |
6785 | &tmp_entry)) { | |
6786 | vm_map_unlock(dst_map); | |
6787 | return(KERN_INVALID_ADDRESS); | |
6788 | } | |
6789 | entry = tmp_entry; | |
6790 | next = entry->vme_next; | |
6791 | } | |
6792 | ||
6793 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
6794 | vm_map_unlock(dst_map); | |
6795 | return(KERN_PROTECTION_FAILURE); | |
6796 | } | |
6797 | ||
6798 | /* | |
6799 | * If the entry is in transition, we must wait | |
6800 | * for it to exit that state. Anything could happen | |
6801 | * when we unlock the map, so start over. | |
6802 | */ | |
6803 | if (entry->in_transition) { | |
6804 | ||
6805 | /* | |
6806 | * Say that we are waiting, and wait for entry. | |
6807 | */ | |
6808 | entry->needs_wakeup = TRUE; | |
6809 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
6810 | ||
6811 | goto start_pass_1; | |
6812 | } | |
6813 | ||
6814 | /* | |
6815 | * our range is contained completely within this map entry | |
6816 | */ | |
6817 | if (dst_end <= entry->vme_end) { | |
6818 | vm_map_unlock(dst_map); | |
6819 | return KERN_SUCCESS; | |
6820 | } | |
6821 | /* | |
6822 | * check that range specified is contiguous region | |
6823 | */ | |
6824 | if ((next == vm_map_to_entry(dst_map)) || | |
6825 | (next->vme_start != entry->vme_end)) { | |
6826 | vm_map_unlock(dst_map); | |
6827 | return(KERN_INVALID_ADDRESS); | |
6828 | } | |
6829 | ||
6830 | /* | |
6831 | * Check for permanent objects in the destination. | |
6832 | */ | |
3e170ce0 A |
6833 | if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && |
6834 | ((!VME_OBJECT(entry)->internal) || | |
6835 | (VME_OBJECT(entry)->true_share))) { | |
1c79356b A |
6836 | if(encountered_sub_map) { |
6837 | vm_map_unlock(dst_map); | |
6838 | return(KERN_FAILURE); | |
6839 | } | |
6840 | } | |
6841 | ||
6842 | ||
6843 | entry = next; | |
6844 | }/* for */ | |
6845 | vm_map_unlock(dst_map); | |
6846 | return(KERN_SUCCESS); | |
6847 | } | |
6848 | ||
6849 | /* | |
6850 | * Routine: vm_map_copy_overwrite | |
6851 | * | |
6852 | * Description: | |
6853 | * Copy the memory described by the map copy | |
6854 | * object (copy; returned by vm_map_copyin) onto | |
6855 | * the specified destination region (dst_map, dst_addr). | |
6856 | * The destination must be writeable. | |
6857 | * | |
6858 | * Unlike vm_map_copyout, this routine actually | |
6859 | * writes over previously-mapped memory. If the | |
6860 | * previous mapping was to a permanent (user-supplied) | |
6861 | * memory object, it is preserved. | |
6862 | * | |
6863 | * The attributes (protection and inheritance) of the | |
6864 | * destination region are preserved. | |
6865 | * | |
6866 | * If successful, consumes the copy object. | |
6867 | * Otherwise, the caller is responsible for it. | |
6868 | * | |
6869 | * Implementation notes: | |
6870 | * To overwrite aligned temporary virtual memory, it is | |
6871 | * sufficient to remove the previous mapping and insert | |
6872 | * the new copy. This replacement is done either on | |
6873 | * the whole region (if no permanent virtual memory | |
6874 | * objects are embedded in the destination region) or | |
6875 | * in individual map entries. | |
6876 | * | |
6877 | * To overwrite permanent virtual memory , it is necessary | |
6878 | * to copy each page, as the external memory management | |
6879 | * interface currently does not provide any optimizations. | |
6880 | * | |
6881 | * Unaligned memory also has to be copied. It is possible | |
6882 | * to use 'vm_trickery' to copy the aligned data. This is | |
6883 | * not done but not hard to implement. | |
6884 | * | |
6885 | * Once a page of permanent memory has been overwritten, | |
6886 | * it is impossible to interrupt this function; otherwise, | |
6887 | * the call would be neither atomic nor location-independent. | |
6888 | * The kernel-state portion of a user thread must be | |
6889 | * interruptible. | |
6890 | * | |
6891 | * It may be expensive to forward all requests that might | |
6892 | * overwrite permanent memory (vm_write, vm_copy) to | |
6893 | * uninterruptible kernel threads. This routine may be | |
6894 | * called by interruptible threads; however, success is | |
6895 | * not guaranteed -- if the request cannot be performed | |
6896 | * atomically and interruptibly, an error indication is | |
6897 | * returned. | |
6898 | */ | |
6899 | ||
91447636 | 6900 | static kern_return_t |
1c79356b | 6901 | vm_map_copy_overwrite_nested( |
91447636 A |
6902 | vm_map_t dst_map, |
6903 | vm_map_address_t dst_addr, | |
6904 | vm_map_copy_t copy, | |
6905 | boolean_t interruptible, | |
6d2010ae A |
6906 | pmap_t pmap, |
6907 | boolean_t discard_on_success) | |
1c79356b | 6908 | { |
91447636 A |
6909 | vm_map_offset_t dst_end; |
6910 | vm_map_entry_t tmp_entry; | |
6911 | vm_map_entry_t entry; | |
6912 | kern_return_t kr; | |
6913 | boolean_t aligned = TRUE; | |
6914 | boolean_t contains_permanent_objects = FALSE; | |
6915 | boolean_t encountered_sub_map = FALSE; | |
6916 | vm_map_offset_t base_addr; | |
6917 | vm_map_size_t copy_size; | |
6918 | vm_map_size_t total_size; | |
1c79356b A |
6919 | |
6920 | ||
6921 | /* | |
6922 | * Check for null copy object. | |
6923 | */ | |
6924 | ||
6925 | if (copy == VM_MAP_COPY_NULL) | |
6926 | return(KERN_SUCCESS); | |
6927 | ||
6928 | /* | |
6929 | * Check for special kernel buffer allocated | |
6930 | * by new_ipc_kmsg_copyin. | |
6931 | */ | |
6932 | ||
6933 | if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { | |
0b4e3aa0 | 6934 | return(vm_map_copyout_kernel_buffer( |
2d21ac55 | 6935 | dst_map, &dst_addr, |
39236c6e | 6936 | copy, TRUE, discard_on_success)); |
1c79356b A |
6937 | } |
6938 | ||
6939 | /* | |
6940 | * Only works for entry lists at the moment. Will | |
6941 | * support page lists later. | |
6942 | */ | |
6943 | ||
6944 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST); | |
6945 | ||
6946 | if (copy->size == 0) { | |
6d2010ae A |
6947 | if (discard_on_success) |
6948 | vm_map_copy_discard(copy); | |
1c79356b A |
6949 | return(KERN_SUCCESS); |
6950 | } | |
6951 | ||
6952 | /* | |
6953 | * Verify that the destination is all writeable | |
6954 | * initially. We have to trunc the destination | |
6955 | * address and round the copy size or we'll end up | |
6956 | * splitting entries in strange ways. | |
6957 | */ | |
6958 | ||
39236c6e A |
6959 | if (!VM_MAP_PAGE_ALIGNED(copy->size, |
6960 | VM_MAP_PAGE_MASK(dst_map)) || | |
6961 | !VM_MAP_PAGE_ALIGNED(copy->offset, | |
6962 | VM_MAP_PAGE_MASK(dst_map)) || | |
6963 | !VM_MAP_PAGE_ALIGNED(dst_addr, | |
fe8ab488 | 6964 | VM_MAP_PAGE_MASK(dst_map))) |
1c79356b A |
6965 | { |
6966 | aligned = FALSE; | |
39236c6e A |
6967 | dst_end = vm_map_round_page(dst_addr + copy->size, |
6968 | VM_MAP_PAGE_MASK(dst_map)); | |
1c79356b A |
6969 | } else { |
6970 | dst_end = dst_addr + copy->size; | |
6971 | } | |
6972 | ||
1c79356b | 6973 | vm_map_lock(dst_map); |
9bccf70c | 6974 | |
91447636 A |
6975 | /* LP64todo - remove this check when vm_map_commpage64() |
6976 | * no longer has to stuff in a map_entry for the commpage | |
6977 | * above the map's max_offset. | |
6978 | */ | |
6979 | if (dst_addr >= dst_map->max_offset) { | |
6980 | vm_map_unlock(dst_map); | |
6981 | return(KERN_INVALID_ADDRESS); | |
6982 | } | |
6983 | ||
9bccf70c | 6984 | start_pass_1: |
1c79356b A |
6985 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
6986 | vm_map_unlock(dst_map); | |
6987 | return(KERN_INVALID_ADDRESS); | |
6988 | } | |
39236c6e A |
6989 | vm_map_clip_start(dst_map, |
6990 | tmp_entry, | |
6991 | vm_map_trunc_page(dst_addr, | |
6992 | VM_MAP_PAGE_MASK(dst_map))); | |
1c79356b A |
6993 | for (entry = tmp_entry;;) { |
6994 | vm_map_entry_t next = entry->vme_next; | |
6995 | ||
6996 | while(entry->is_sub_map) { | |
91447636 A |
6997 | vm_map_offset_t sub_start; |
6998 | vm_map_offset_t sub_end; | |
6999 | vm_map_offset_t local_end; | |
1c79356b A |
7000 | |
7001 | if (entry->in_transition) { | |
7002 | ||
2d21ac55 A |
7003 | /* |
7004 | * Say that we are waiting, and wait for entry. | |
7005 | */ | |
1c79356b A |
7006 | entry->needs_wakeup = TRUE; |
7007 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
7008 | ||
7009 | goto start_pass_1; | |
7010 | } | |
7011 | ||
7012 | local_end = entry->vme_end; | |
7013 | if (!(entry->needs_copy)) { | |
7014 | /* if needs_copy we are a COW submap */ | |
7015 | /* in such a case we just replace so */ | |
7016 | /* there is no need for the follow- */ | |
7017 | /* ing check. */ | |
7018 | encountered_sub_map = TRUE; | |
3e170ce0 | 7019 | sub_start = VME_OFFSET(entry); |
1c79356b A |
7020 | |
7021 | if(entry->vme_end < dst_end) | |
7022 | sub_end = entry->vme_end; | |
7023 | else | |
7024 | sub_end = dst_end; | |
7025 | sub_end -= entry->vme_start; | |
3e170ce0 | 7026 | sub_end += VME_OFFSET(entry); |
1c79356b A |
7027 | vm_map_unlock(dst_map); |
7028 | ||
7029 | kr = vm_map_overwrite_submap_recurse( | |
3e170ce0 | 7030 | VME_SUBMAP(entry), |
1c79356b A |
7031 | sub_start, |
7032 | sub_end - sub_start); | |
7033 | if(kr != KERN_SUCCESS) | |
7034 | return kr; | |
7035 | vm_map_lock(dst_map); | |
7036 | } | |
7037 | ||
7038 | if (dst_end <= entry->vme_end) | |
7039 | goto start_overwrite; | |
7040 | if(!vm_map_lookup_entry(dst_map, local_end, | |
7041 | &entry)) { | |
7042 | vm_map_unlock(dst_map); | |
7043 | return(KERN_INVALID_ADDRESS); | |
7044 | } | |
7045 | next = entry->vme_next; | |
7046 | } | |
7047 | ||
7048 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
7049 | vm_map_unlock(dst_map); | |
7050 | return(KERN_PROTECTION_FAILURE); | |
7051 | } | |
7052 | ||
7053 | /* | |
7054 | * If the entry is in transition, we must wait | |
7055 | * for it to exit that state. Anything could happen | |
7056 | * when we unlock the map, so start over. | |
7057 | */ | |
7058 | if (entry->in_transition) { | |
7059 | ||
7060 | /* | |
7061 | * Say that we are waiting, and wait for entry. | |
7062 | */ | |
7063 | entry->needs_wakeup = TRUE; | |
7064 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
7065 | ||
7066 | goto start_pass_1; | |
7067 | } | |
7068 | ||
7069 | /* | |
7070 | * our range is contained completely within this map entry | |
7071 | */ | |
7072 | if (dst_end <= entry->vme_end) | |
7073 | break; | |
7074 | /* | |
7075 | * check that range specified is contiguous region | |
7076 | */ | |
7077 | if ((next == vm_map_to_entry(dst_map)) || | |
7078 | (next->vme_start != entry->vme_end)) { | |
7079 | vm_map_unlock(dst_map); | |
7080 | return(KERN_INVALID_ADDRESS); | |
7081 | } | |
7082 | ||
7083 | ||
7084 | /* | |
7085 | * Check for permanent objects in the destination. | |
7086 | */ | |
3e170ce0 A |
7087 | if ((VME_OBJECT(entry) != VM_OBJECT_NULL) && |
7088 | ((!VME_OBJECT(entry)->internal) || | |
7089 | (VME_OBJECT(entry)->true_share))) { | |
1c79356b A |
7090 | contains_permanent_objects = TRUE; |
7091 | } | |
7092 | ||
7093 | entry = next; | |
7094 | }/* for */ | |
7095 | ||
7096 | start_overwrite: | |
7097 | /* | |
7098 | * If there are permanent objects in the destination, then | |
7099 | * the copy cannot be interrupted. | |
7100 | */ | |
7101 | ||
7102 | if (interruptible && contains_permanent_objects) { | |
7103 | vm_map_unlock(dst_map); | |
7104 | return(KERN_FAILURE); /* XXX */ | |
7105 | } | |
7106 | ||
7107 | /* | |
7108 | * | |
7109 | * Make a second pass, overwriting the data | |
7110 | * At the beginning of each loop iteration, | |
7111 | * the next entry to be overwritten is "tmp_entry" | |
7112 | * (initially, the value returned from the lookup above), | |
7113 | * and the starting address expected in that entry | |
7114 | * is "start". | |
7115 | */ | |
7116 | ||
7117 | total_size = copy->size; | |
7118 | if(encountered_sub_map) { | |
7119 | copy_size = 0; | |
7120 | /* re-calculate tmp_entry since we've had the map */ | |
7121 | /* unlocked */ | |
7122 | if (!vm_map_lookup_entry( dst_map, dst_addr, &tmp_entry)) { | |
7123 | vm_map_unlock(dst_map); | |
7124 | return(KERN_INVALID_ADDRESS); | |
7125 | } | |
7126 | } else { | |
7127 | copy_size = copy->size; | |
7128 | } | |
7129 | ||
7130 | base_addr = dst_addr; | |
7131 | while(TRUE) { | |
7132 | /* deconstruct the copy object and do in parts */ | |
7133 | /* only in sub_map, interruptable case */ | |
7134 | vm_map_entry_t copy_entry; | |
91447636 A |
7135 | vm_map_entry_t previous_prev = VM_MAP_ENTRY_NULL; |
7136 | vm_map_entry_t next_copy = VM_MAP_ENTRY_NULL; | |
1c79356b | 7137 | int nentries; |
91447636 | 7138 | int remaining_entries = 0; |
b0d623f7 | 7139 | vm_map_offset_t new_offset = 0; |
1c79356b A |
7140 | |
7141 | for (entry = tmp_entry; copy_size == 0;) { | |
7142 | vm_map_entry_t next; | |
7143 | ||
7144 | next = entry->vme_next; | |
7145 | ||
7146 | /* tmp_entry and base address are moved along */ | |
7147 | /* each time we encounter a sub-map. Otherwise */ | |
7148 | /* entry can outpase tmp_entry, and the copy_size */ | |
7149 | /* may reflect the distance between them */ | |
7150 | /* if the current entry is found to be in transition */ | |
7151 | /* we will start over at the beginning or the last */ | |
7152 | /* encounter of a submap as dictated by base_addr */ | |
7153 | /* we will zero copy_size accordingly. */ | |
7154 | if (entry->in_transition) { | |
7155 | /* | |
7156 | * Say that we are waiting, and wait for entry. | |
7157 | */ | |
7158 | entry->needs_wakeup = TRUE; | |
7159 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
7160 | ||
1c79356b | 7161 | if(!vm_map_lookup_entry(dst_map, base_addr, |
2d21ac55 | 7162 | &tmp_entry)) { |
1c79356b A |
7163 | vm_map_unlock(dst_map); |
7164 | return(KERN_INVALID_ADDRESS); | |
7165 | } | |
7166 | copy_size = 0; | |
7167 | entry = tmp_entry; | |
7168 | continue; | |
7169 | } | |
7170 | if(entry->is_sub_map) { | |
91447636 A |
7171 | vm_map_offset_t sub_start; |
7172 | vm_map_offset_t sub_end; | |
7173 | vm_map_offset_t local_end; | |
1c79356b A |
7174 | |
7175 | if (entry->needs_copy) { | |
7176 | /* if this is a COW submap */ | |
7177 | /* just back the range with a */ | |
7178 | /* anonymous entry */ | |
7179 | if(entry->vme_end < dst_end) | |
7180 | sub_end = entry->vme_end; | |
7181 | else | |
7182 | sub_end = dst_end; | |
7183 | if(entry->vme_start < base_addr) | |
7184 | sub_start = base_addr; | |
7185 | else | |
7186 | sub_start = entry->vme_start; | |
7187 | vm_map_clip_end( | |
7188 | dst_map, entry, sub_end); | |
7189 | vm_map_clip_start( | |
7190 | dst_map, entry, sub_start); | |
2d21ac55 | 7191 | assert(!entry->use_pmap); |
1c79356b A |
7192 | entry->is_sub_map = FALSE; |
7193 | vm_map_deallocate( | |
3e170ce0 A |
7194 | VME_SUBMAP(entry)); |
7195 | VME_SUBMAP_SET(entry, NULL); | |
1c79356b A |
7196 | entry->is_shared = FALSE; |
7197 | entry->needs_copy = FALSE; | |
3e170ce0 | 7198 | VME_OFFSET_SET(entry, 0); |
2d21ac55 A |
7199 | /* |
7200 | * XXX FBDP | |
7201 | * We should propagate the protections | |
7202 | * of the submap entry here instead | |
7203 | * of forcing them to VM_PROT_ALL... | |
7204 | * Or better yet, we should inherit | |
7205 | * the protection of the copy_entry. | |
7206 | */ | |
1c79356b A |
7207 | entry->protection = VM_PROT_ALL; |
7208 | entry->max_protection = VM_PROT_ALL; | |
7209 | entry->wired_count = 0; | |
7210 | entry->user_wired_count = 0; | |
7211 | if(entry->inheritance | |
2d21ac55 A |
7212 | == VM_INHERIT_SHARE) |
7213 | entry->inheritance = VM_INHERIT_COPY; | |
1c79356b A |
7214 | continue; |
7215 | } | |
7216 | /* first take care of any non-sub_map */ | |
7217 | /* entries to send */ | |
7218 | if(base_addr < entry->vme_start) { | |
7219 | /* stuff to send */ | |
7220 | copy_size = | |
7221 | entry->vme_start - base_addr; | |
7222 | break; | |
7223 | } | |
3e170ce0 | 7224 | sub_start = VME_OFFSET(entry); |
1c79356b A |
7225 | |
7226 | if(entry->vme_end < dst_end) | |
7227 | sub_end = entry->vme_end; | |
7228 | else | |
7229 | sub_end = dst_end; | |
7230 | sub_end -= entry->vme_start; | |
3e170ce0 | 7231 | sub_end += VME_OFFSET(entry); |
1c79356b A |
7232 | local_end = entry->vme_end; |
7233 | vm_map_unlock(dst_map); | |
7234 | copy_size = sub_end - sub_start; | |
7235 | ||
7236 | /* adjust the copy object */ | |
7237 | if (total_size > copy_size) { | |
91447636 A |
7238 | vm_map_size_t local_size = 0; |
7239 | vm_map_size_t entry_size; | |
1c79356b | 7240 | |
2d21ac55 A |
7241 | nentries = 1; |
7242 | new_offset = copy->offset; | |
7243 | copy_entry = vm_map_copy_first_entry(copy); | |
7244 | while(copy_entry != | |
7245 | vm_map_copy_to_entry(copy)){ | |
7246 | entry_size = copy_entry->vme_end - | |
7247 | copy_entry->vme_start; | |
7248 | if((local_size < copy_size) && | |
7249 | ((local_size + entry_size) | |
7250 | >= copy_size)) { | |
7251 | vm_map_copy_clip_end(copy, | |
7252 | copy_entry, | |
7253 | copy_entry->vme_start + | |
7254 | (copy_size - local_size)); | |
7255 | entry_size = copy_entry->vme_end - | |
7256 | copy_entry->vme_start; | |
7257 | local_size += entry_size; | |
7258 | new_offset += entry_size; | |
7259 | } | |
7260 | if(local_size >= copy_size) { | |
7261 | next_copy = copy_entry->vme_next; | |
7262 | copy_entry->vme_next = | |
7263 | vm_map_copy_to_entry(copy); | |
7264 | previous_prev = | |
7265 | copy->cpy_hdr.links.prev; | |
7266 | copy->cpy_hdr.links.prev = copy_entry; | |
7267 | copy->size = copy_size; | |
7268 | remaining_entries = | |
7269 | copy->cpy_hdr.nentries; | |
7270 | remaining_entries -= nentries; | |
7271 | copy->cpy_hdr.nentries = nentries; | |
7272 | break; | |
7273 | } else { | |
7274 | local_size += entry_size; | |
7275 | new_offset += entry_size; | |
7276 | nentries++; | |
7277 | } | |
7278 | copy_entry = copy_entry->vme_next; | |
7279 | } | |
1c79356b A |
7280 | } |
7281 | ||
7282 | if((entry->use_pmap) && (pmap == NULL)) { | |
7283 | kr = vm_map_copy_overwrite_nested( | |
3e170ce0 | 7284 | VME_SUBMAP(entry), |
1c79356b A |
7285 | sub_start, |
7286 | copy, | |
7287 | interruptible, | |
3e170ce0 | 7288 | VME_SUBMAP(entry)->pmap, |
6d2010ae | 7289 | TRUE); |
1c79356b A |
7290 | } else if (pmap != NULL) { |
7291 | kr = vm_map_copy_overwrite_nested( | |
3e170ce0 | 7292 | VME_SUBMAP(entry), |
1c79356b A |
7293 | sub_start, |
7294 | copy, | |
6d2010ae A |
7295 | interruptible, pmap, |
7296 | TRUE); | |
1c79356b A |
7297 | } else { |
7298 | kr = vm_map_copy_overwrite_nested( | |
3e170ce0 | 7299 | VME_SUBMAP(entry), |
1c79356b A |
7300 | sub_start, |
7301 | copy, | |
7302 | interruptible, | |
6d2010ae A |
7303 | dst_map->pmap, |
7304 | TRUE); | |
1c79356b A |
7305 | } |
7306 | if(kr != KERN_SUCCESS) { | |
7307 | if(next_copy != NULL) { | |
2d21ac55 A |
7308 | copy->cpy_hdr.nentries += |
7309 | remaining_entries; | |
7310 | copy->cpy_hdr.links.prev->vme_next = | |
7311 | next_copy; | |
7312 | copy->cpy_hdr.links.prev | |
7313 | = previous_prev; | |
7314 | copy->size = total_size; | |
1c79356b A |
7315 | } |
7316 | return kr; | |
7317 | } | |
7318 | if (dst_end <= local_end) { | |
7319 | return(KERN_SUCCESS); | |
7320 | } | |
7321 | /* otherwise copy no longer exists, it was */ | |
7322 | /* destroyed after successful copy_overwrite */ | |
7323 | copy = (vm_map_copy_t) | |
2d21ac55 | 7324 | zalloc(vm_map_copy_zone); |
04b8595b | 7325 | copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
1c79356b | 7326 | vm_map_copy_first_entry(copy) = |
2d21ac55 A |
7327 | vm_map_copy_last_entry(copy) = |
7328 | vm_map_copy_to_entry(copy); | |
1c79356b A |
7329 | copy->type = VM_MAP_COPY_ENTRY_LIST; |
7330 | copy->offset = new_offset; | |
7331 | ||
e2d2fc5c A |
7332 | /* |
7333 | * XXX FBDP | |
7334 | * this does not seem to deal with | |
7335 | * the VM map store (R&B tree) | |
7336 | */ | |
7337 | ||
1c79356b A |
7338 | total_size -= copy_size; |
7339 | copy_size = 0; | |
7340 | /* put back remainder of copy in container */ | |
7341 | if(next_copy != NULL) { | |
2d21ac55 A |
7342 | copy->cpy_hdr.nentries = remaining_entries; |
7343 | copy->cpy_hdr.links.next = next_copy; | |
7344 | copy->cpy_hdr.links.prev = previous_prev; | |
7345 | copy->size = total_size; | |
7346 | next_copy->vme_prev = | |
7347 | vm_map_copy_to_entry(copy); | |
7348 | next_copy = NULL; | |
1c79356b A |
7349 | } |
7350 | base_addr = local_end; | |
7351 | vm_map_lock(dst_map); | |
7352 | if(!vm_map_lookup_entry(dst_map, | |
2d21ac55 | 7353 | local_end, &tmp_entry)) { |
1c79356b A |
7354 | vm_map_unlock(dst_map); |
7355 | return(KERN_INVALID_ADDRESS); | |
7356 | } | |
7357 | entry = tmp_entry; | |
7358 | continue; | |
7359 | } | |
7360 | if (dst_end <= entry->vme_end) { | |
7361 | copy_size = dst_end - base_addr; | |
7362 | break; | |
7363 | } | |
7364 | ||
7365 | if ((next == vm_map_to_entry(dst_map)) || | |
2d21ac55 | 7366 | (next->vme_start != entry->vme_end)) { |
1c79356b A |
7367 | vm_map_unlock(dst_map); |
7368 | return(KERN_INVALID_ADDRESS); | |
7369 | } | |
7370 | ||
7371 | entry = next; | |
7372 | }/* for */ | |
7373 | ||
7374 | next_copy = NULL; | |
7375 | nentries = 1; | |
7376 | ||
7377 | /* adjust the copy object */ | |
7378 | if (total_size > copy_size) { | |
91447636 A |
7379 | vm_map_size_t local_size = 0; |
7380 | vm_map_size_t entry_size; | |
1c79356b A |
7381 | |
7382 | new_offset = copy->offset; | |
7383 | copy_entry = vm_map_copy_first_entry(copy); | |
7384 | while(copy_entry != vm_map_copy_to_entry(copy)) { | |
7385 | entry_size = copy_entry->vme_end - | |
2d21ac55 | 7386 | copy_entry->vme_start; |
1c79356b | 7387 | if((local_size < copy_size) && |
2d21ac55 A |
7388 | ((local_size + entry_size) |
7389 | >= copy_size)) { | |
1c79356b | 7390 | vm_map_copy_clip_end(copy, copy_entry, |
2d21ac55 A |
7391 | copy_entry->vme_start + |
7392 | (copy_size - local_size)); | |
1c79356b | 7393 | entry_size = copy_entry->vme_end - |
2d21ac55 | 7394 | copy_entry->vme_start; |
1c79356b A |
7395 | local_size += entry_size; |
7396 | new_offset += entry_size; | |
7397 | } | |
7398 | if(local_size >= copy_size) { | |
7399 | next_copy = copy_entry->vme_next; | |
7400 | copy_entry->vme_next = | |
7401 | vm_map_copy_to_entry(copy); | |
7402 | previous_prev = | |
7403 | copy->cpy_hdr.links.prev; | |
7404 | copy->cpy_hdr.links.prev = copy_entry; | |
7405 | copy->size = copy_size; | |
7406 | remaining_entries = | |
7407 | copy->cpy_hdr.nentries; | |
7408 | remaining_entries -= nentries; | |
7409 | copy->cpy_hdr.nentries = nentries; | |
7410 | break; | |
7411 | } else { | |
7412 | local_size += entry_size; | |
7413 | new_offset += entry_size; | |
7414 | nentries++; | |
7415 | } | |
7416 | copy_entry = copy_entry->vme_next; | |
7417 | } | |
7418 | } | |
7419 | ||
7420 | if (aligned) { | |
7421 | pmap_t local_pmap; | |
7422 | ||
7423 | if(pmap) | |
7424 | local_pmap = pmap; | |
7425 | else | |
7426 | local_pmap = dst_map->pmap; | |
7427 | ||
7428 | if ((kr = vm_map_copy_overwrite_aligned( | |
2d21ac55 A |
7429 | dst_map, tmp_entry, copy, |
7430 | base_addr, local_pmap)) != KERN_SUCCESS) { | |
1c79356b A |
7431 | if(next_copy != NULL) { |
7432 | copy->cpy_hdr.nentries += | |
2d21ac55 | 7433 | remaining_entries; |
1c79356b | 7434 | copy->cpy_hdr.links.prev->vme_next = |
2d21ac55 | 7435 | next_copy; |
1c79356b | 7436 | copy->cpy_hdr.links.prev = |
2d21ac55 | 7437 | previous_prev; |
1c79356b A |
7438 | copy->size += copy_size; |
7439 | } | |
7440 | return kr; | |
7441 | } | |
7442 | vm_map_unlock(dst_map); | |
7443 | } else { | |
2d21ac55 A |
7444 | /* |
7445 | * Performance gain: | |
7446 | * | |
7447 | * if the copy and dst address are misaligned but the same | |
7448 | * offset within the page we can copy_not_aligned the | |
7449 | * misaligned parts and copy aligned the rest. If they are | |
7450 | * aligned but len is unaligned we simply need to copy | |
7451 | * the end bit unaligned. We'll need to split the misaligned | |
7452 | * bits of the region in this case ! | |
7453 | */ | |
7454 | /* ALWAYS UNLOCKS THE dst_map MAP */ | |
39236c6e A |
7455 | kr = vm_map_copy_overwrite_unaligned( |
7456 | dst_map, | |
7457 | tmp_entry, | |
7458 | copy, | |
7459 | base_addr, | |
7460 | discard_on_success); | |
7461 | if (kr != KERN_SUCCESS) { | |
1c79356b A |
7462 | if(next_copy != NULL) { |
7463 | copy->cpy_hdr.nentries += | |
2d21ac55 | 7464 | remaining_entries; |
1c79356b | 7465 | copy->cpy_hdr.links.prev->vme_next = |
2d21ac55 | 7466 | next_copy; |
1c79356b A |
7467 | copy->cpy_hdr.links.prev = |
7468 | previous_prev; | |
7469 | copy->size += copy_size; | |
7470 | } | |
7471 | return kr; | |
7472 | } | |
7473 | } | |
7474 | total_size -= copy_size; | |
7475 | if(total_size == 0) | |
7476 | break; | |
7477 | base_addr += copy_size; | |
7478 | copy_size = 0; | |
7479 | copy->offset = new_offset; | |
7480 | if(next_copy != NULL) { | |
7481 | copy->cpy_hdr.nentries = remaining_entries; | |
7482 | copy->cpy_hdr.links.next = next_copy; | |
7483 | copy->cpy_hdr.links.prev = previous_prev; | |
7484 | next_copy->vme_prev = vm_map_copy_to_entry(copy); | |
7485 | copy->size = total_size; | |
7486 | } | |
7487 | vm_map_lock(dst_map); | |
7488 | while(TRUE) { | |
7489 | if (!vm_map_lookup_entry(dst_map, | |
2d21ac55 | 7490 | base_addr, &tmp_entry)) { |
1c79356b A |
7491 | vm_map_unlock(dst_map); |
7492 | return(KERN_INVALID_ADDRESS); | |
7493 | } | |
7494 | if (tmp_entry->in_transition) { | |
7495 | entry->needs_wakeup = TRUE; | |
7496 | vm_map_entry_wait(dst_map, THREAD_UNINT); | |
7497 | } else { | |
7498 | break; | |
7499 | } | |
7500 | } | |
39236c6e A |
7501 | vm_map_clip_start(dst_map, |
7502 | tmp_entry, | |
7503 | vm_map_trunc_page(base_addr, | |
7504 | VM_MAP_PAGE_MASK(dst_map))); | |
1c79356b A |
7505 | |
7506 | entry = tmp_entry; | |
7507 | } /* while */ | |
7508 | ||
7509 | /* | |
7510 | * Throw away the vm_map_copy object | |
7511 | */ | |
6d2010ae A |
7512 | if (discard_on_success) |
7513 | vm_map_copy_discard(copy); | |
1c79356b A |
7514 | |
7515 | return(KERN_SUCCESS); | |
7516 | }/* vm_map_copy_overwrite */ | |
7517 | ||
7518 | kern_return_t | |
7519 | vm_map_copy_overwrite( | |
7520 | vm_map_t dst_map, | |
91447636 | 7521 | vm_map_offset_t dst_addr, |
1c79356b A |
7522 | vm_map_copy_t copy, |
7523 | boolean_t interruptible) | |
7524 | { | |
6d2010ae A |
7525 | vm_map_size_t head_size, tail_size; |
7526 | vm_map_copy_t head_copy, tail_copy; | |
7527 | vm_map_offset_t head_addr, tail_addr; | |
7528 | vm_map_entry_t entry; | |
7529 | kern_return_t kr; | |
7530 | ||
7531 | head_size = 0; | |
7532 | tail_size = 0; | |
7533 | head_copy = NULL; | |
7534 | tail_copy = NULL; | |
7535 | head_addr = 0; | |
7536 | tail_addr = 0; | |
7537 | ||
7538 | if (interruptible || | |
7539 | copy == VM_MAP_COPY_NULL || | |
7540 | copy->type != VM_MAP_COPY_ENTRY_LIST) { | |
7541 | /* | |
7542 | * We can't split the "copy" map if we're interruptible | |
7543 | * or if we don't have a "copy" map... | |
7544 | */ | |
7545 | blunt_copy: | |
7546 | return vm_map_copy_overwrite_nested(dst_map, | |
7547 | dst_addr, | |
7548 | copy, | |
7549 | interruptible, | |
7550 | (pmap_t) NULL, | |
7551 | TRUE); | |
7552 | } | |
7553 | ||
7554 | if (copy->size < 3 * PAGE_SIZE) { | |
7555 | /* | |
7556 | * Too small to bother with optimizing... | |
7557 | */ | |
7558 | goto blunt_copy; | |
7559 | } | |
7560 | ||
39236c6e A |
7561 | if ((dst_addr & VM_MAP_PAGE_MASK(dst_map)) != |
7562 | (copy->offset & VM_MAP_PAGE_MASK(dst_map))) { | |
6d2010ae A |
7563 | /* |
7564 | * Incompatible mis-alignment of source and destination... | |
7565 | */ | |
7566 | goto blunt_copy; | |
7567 | } | |
7568 | ||
7569 | /* | |
7570 | * Proper alignment or identical mis-alignment at the beginning. | |
7571 | * Let's try and do a small unaligned copy first (if needed) | |
7572 | * and then an aligned copy for the rest. | |
7573 | */ | |
7574 | if (!page_aligned(dst_addr)) { | |
7575 | head_addr = dst_addr; | |
39236c6e A |
7576 | head_size = (VM_MAP_PAGE_SIZE(dst_map) - |
7577 | (copy->offset & VM_MAP_PAGE_MASK(dst_map))); | |
6d2010ae A |
7578 | } |
7579 | if (!page_aligned(copy->offset + copy->size)) { | |
7580 | /* | |
7581 | * Mis-alignment at the end. | |
7582 | * Do an aligned copy up to the last page and | |
7583 | * then an unaligned copy for the remaining bytes. | |
7584 | */ | |
39236c6e A |
7585 | tail_size = ((copy->offset + copy->size) & |
7586 | VM_MAP_PAGE_MASK(dst_map)); | |
6d2010ae A |
7587 | tail_addr = dst_addr + copy->size - tail_size; |
7588 | } | |
7589 | ||
7590 | if (head_size + tail_size == copy->size) { | |
7591 | /* | |
7592 | * It's all unaligned, no optimization possible... | |
7593 | */ | |
7594 | goto blunt_copy; | |
7595 | } | |
7596 | ||
7597 | /* | |
7598 | * Can't optimize if there are any submaps in the | |
7599 | * destination due to the way we free the "copy" map | |
7600 | * progressively in vm_map_copy_overwrite_nested() | |
7601 | * in that case. | |
7602 | */ | |
7603 | vm_map_lock_read(dst_map); | |
7604 | if (! vm_map_lookup_entry(dst_map, dst_addr, &entry)) { | |
7605 | vm_map_unlock_read(dst_map); | |
7606 | goto blunt_copy; | |
7607 | } | |
7608 | for (; | |
7609 | (entry != vm_map_copy_to_entry(copy) && | |
7610 | entry->vme_start < dst_addr + copy->size); | |
7611 | entry = entry->vme_next) { | |
7612 | if (entry->is_sub_map) { | |
7613 | vm_map_unlock_read(dst_map); | |
7614 | goto blunt_copy; | |
7615 | } | |
7616 | } | |
7617 | vm_map_unlock_read(dst_map); | |
7618 | ||
7619 | if (head_size) { | |
7620 | /* | |
7621 | * Unaligned copy of the first "head_size" bytes, to reach | |
7622 | * a page boundary. | |
7623 | */ | |
7624 | ||
7625 | /* | |
7626 | * Extract "head_copy" out of "copy". | |
7627 | */ | |
7628 | head_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 7629 | head_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
6d2010ae A |
7630 | vm_map_copy_first_entry(head_copy) = |
7631 | vm_map_copy_to_entry(head_copy); | |
7632 | vm_map_copy_last_entry(head_copy) = | |
7633 | vm_map_copy_to_entry(head_copy); | |
7634 | head_copy->type = VM_MAP_COPY_ENTRY_LIST; | |
7635 | head_copy->cpy_hdr.nentries = 0; | |
7636 | head_copy->cpy_hdr.entries_pageable = | |
7637 | copy->cpy_hdr.entries_pageable; | |
7638 | vm_map_store_init(&head_copy->cpy_hdr); | |
7639 | ||
7640 | head_copy->offset = copy->offset; | |
7641 | head_copy->size = head_size; | |
7642 | ||
7643 | copy->offset += head_size; | |
7644 | copy->size -= head_size; | |
7645 | ||
7646 | entry = vm_map_copy_first_entry(copy); | |
7647 | vm_map_copy_clip_end(copy, entry, copy->offset); | |
7648 | vm_map_copy_entry_unlink(copy, entry); | |
7649 | vm_map_copy_entry_link(head_copy, | |
7650 | vm_map_copy_to_entry(head_copy), | |
7651 | entry); | |
7652 | ||
7653 | /* | |
7654 | * Do the unaligned copy. | |
7655 | */ | |
7656 | kr = vm_map_copy_overwrite_nested(dst_map, | |
7657 | head_addr, | |
7658 | head_copy, | |
7659 | interruptible, | |
7660 | (pmap_t) NULL, | |
7661 | FALSE); | |
7662 | if (kr != KERN_SUCCESS) | |
7663 | goto done; | |
7664 | } | |
7665 | ||
7666 | if (tail_size) { | |
7667 | /* | |
7668 | * Extract "tail_copy" out of "copy". | |
7669 | */ | |
7670 | tail_copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 7671 | tail_copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
6d2010ae A |
7672 | vm_map_copy_first_entry(tail_copy) = |
7673 | vm_map_copy_to_entry(tail_copy); | |
7674 | vm_map_copy_last_entry(tail_copy) = | |
7675 | vm_map_copy_to_entry(tail_copy); | |
7676 | tail_copy->type = VM_MAP_COPY_ENTRY_LIST; | |
7677 | tail_copy->cpy_hdr.nentries = 0; | |
7678 | tail_copy->cpy_hdr.entries_pageable = | |
7679 | copy->cpy_hdr.entries_pageable; | |
7680 | vm_map_store_init(&tail_copy->cpy_hdr); | |
7681 | ||
7682 | tail_copy->offset = copy->offset + copy->size - tail_size; | |
7683 | tail_copy->size = tail_size; | |
7684 | ||
7685 | copy->size -= tail_size; | |
7686 | ||
7687 | entry = vm_map_copy_last_entry(copy); | |
7688 | vm_map_copy_clip_start(copy, entry, tail_copy->offset); | |
7689 | entry = vm_map_copy_last_entry(copy); | |
7690 | vm_map_copy_entry_unlink(copy, entry); | |
7691 | vm_map_copy_entry_link(tail_copy, | |
7692 | vm_map_copy_last_entry(tail_copy), | |
7693 | entry); | |
7694 | } | |
7695 | ||
7696 | /* | |
7697 | * Copy most (or possibly all) of the data. | |
7698 | */ | |
7699 | kr = vm_map_copy_overwrite_nested(dst_map, | |
7700 | dst_addr + head_size, | |
7701 | copy, | |
7702 | interruptible, | |
7703 | (pmap_t) NULL, | |
7704 | FALSE); | |
7705 | if (kr != KERN_SUCCESS) { | |
7706 | goto done; | |
7707 | } | |
7708 | ||
7709 | if (tail_size) { | |
7710 | kr = vm_map_copy_overwrite_nested(dst_map, | |
7711 | tail_addr, | |
7712 | tail_copy, | |
7713 | interruptible, | |
7714 | (pmap_t) NULL, | |
7715 | FALSE); | |
7716 | } | |
7717 | ||
7718 | done: | |
7719 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST); | |
7720 | if (kr == KERN_SUCCESS) { | |
7721 | /* | |
7722 | * Discard all the copy maps. | |
7723 | */ | |
7724 | if (head_copy) { | |
7725 | vm_map_copy_discard(head_copy); | |
7726 | head_copy = NULL; | |
7727 | } | |
7728 | vm_map_copy_discard(copy); | |
7729 | if (tail_copy) { | |
7730 | vm_map_copy_discard(tail_copy); | |
7731 | tail_copy = NULL; | |
7732 | } | |
7733 | } else { | |
7734 | /* | |
7735 | * Re-assemble the original copy map. | |
7736 | */ | |
7737 | if (head_copy) { | |
7738 | entry = vm_map_copy_first_entry(head_copy); | |
7739 | vm_map_copy_entry_unlink(head_copy, entry); | |
7740 | vm_map_copy_entry_link(copy, | |
7741 | vm_map_copy_to_entry(copy), | |
7742 | entry); | |
7743 | copy->offset -= head_size; | |
7744 | copy->size += head_size; | |
7745 | vm_map_copy_discard(head_copy); | |
7746 | head_copy = NULL; | |
7747 | } | |
7748 | if (tail_copy) { | |
7749 | entry = vm_map_copy_last_entry(tail_copy); | |
7750 | vm_map_copy_entry_unlink(tail_copy, entry); | |
7751 | vm_map_copy_entry_link(copy, | |
7752 | vm_map_copy_last_entry(copy), | |
7753 | entry); | |
7754 | copy->size += tail_size; | |
7755 | vm_map_copy_discard(tail_copy); | |
7756 | tail_copy = NULL; | |
7757 | } | |
7758 | } | |
7759 | return kr; | |
1c79356b A |
7760 | } |
7761 | ||
7762 | ||
7763 | /* | |
91447636 | 7764 | * Routine: vm_map_copy_overwrite_unaligned [internal use only] |
1c79356b A |
7765 | * |
7766 | * Decription: | |
7767 | * Physically copy unaligned data | |
7768 | * | |
7769 | * Implementation: | |
7770 | * Unaligned parts of pages have to be physically copied. We use | |
7771 | * a modified form of vm_fault_copy (which understands none-aligned | |
7772 | * page offsets and sizes) to do the copy. We attempt to copy as | |
7773 | * much memory in one go as possibly, however vm_fault_copy copies | |
7774 | * within 1 memory object so we have to find the smaller of "amount left" | |
7775 | * "source object data size" and "target object data size". With | |
7776 | * unaligned data we don't need to split regions, therefore the source | |
7777 | * (copy) object should be one map entry, the target range may be split | |
7778 | * over multiple map entries however. In any event we are pessimistic | |
7779 | * about these assumptions. | |
7780 | * | |
7781 | * Assumptions: | |
7782 | * dst_map is locked on entry and is return locked on success, | |
7783 | * unlocked on error. | |
7784 | */ | |
7785 | ||
91447636 | 7786 | static kern_return_t |
1c79356b A |
7787 | vm_map_copy_overwrite_unaligned( |
7788 | vm_map_t dst_map, | |
7789 | vm_map_entry_t entry, | |
7790 | vm_map_copy_t copy, | |
39236c6e A |
7791 | vm_map_offset_t start, |
7792 | boolean_t discard_on_success) | |
1c79356b | 7793 | { |
39236c6e A |
7794 | vm_map_entry_t copy_entry; |
7795 | vm_map_entry_t copy_entry_next; | |
1c79356b A |
7796 | vm_map_version_t version; |
7797 | vm_object_t dst_object; | |
7798 | vm_object_offset_t dst_offset; | |
7799 | vm_object_offset_t src_offset; | |
7800 | vm_object_offset_t entry_offset; | |
91447636 A |
7801 | vm_map_offset_t entry_end; |
7802 | vm_map_size_t src_size, | |
1c79356b A |
7803 | dst_size, |
7804 | copy_size, | |
7805 | amount_left; | |
7806 | kern_return_t kr = KERN_SUCCESS; | |
7807 | ||
39236c6e A |
7808 | |
7809 | copy_entry = vm_map_copy_first_entry(copy); | |
7810 | ||
1c79356b A |
7811 | vm_map_lock_write_to_read(dst_map); |
7812 | ||
91447636 | 7813 | src_offset = copy->offset - vm_object_trunc_page(copy->offset); |
1c79356b A |
7814 | amount_left = copy->size; |
7815 | /* | |
7816 | * unaligned so we never clipped this entry, we need the offset into | |
7817 | * the vm_object not just the data. | |
7818 | */ | |
7819 | while (amount_left > 0) { | |
7820 | ||
7821 | if (entry == vm_map_to_entry(dst_map)) { | |
7822 | vm_map_unlock_read(dst_map); | |
7823 | return KERN_INVALID_ADDRESS; | |
7824 | } | |
7825 | ||
7826 | /* "start" must be within the current map entry */ | |
7827 | assert ((start>=entry->vme_start) && (start<entry->vme_end)); | |
7828 | ||
7829 | dst_offset = start - entry->vme_start; | |
7830 | ||
7831 | dst_size = entry->vme_end - start; | |
7832 | ||
7833 | src_size = copy_entry->vme_end - | |
7834 | (copy_entry->vme_start + src_offset); | |
7835 | ||
7836 | if (dst_size < src_size) { | |
7837 | /* | |
7838 | * we can only copy dst_size bytes before | |
7839 | * we have to get the next destination entry | |
7840 | */ | |
7841 | copy_size = dst_size; | |
7842 | } else { | |
7843 | /* | |
7844 | * we can only copy src_size bytes before | |
7845 | * we have to get the next source copy entry | |
7846 | */ | |
7847 | copy_size = src_size; | |
7848 | } | |
7849 | ||
7850 | if (copy_size > amount_left) { | |
7851 | copy_size = amount_left; | |
7852 | } | |
7853 | /* | |
7854 | * Entry needs copy, create a shadow shadow object for | |
7855 | * Copy on write region. | |
7856 | */ | |
7857 | if (entry->needs_copy && | |
2d21ac55 | 7858 | ((entry->protection & VM_PROT_WRITE) != 0)) |
1c79356b A |
7859 | { |
7860 | if (vm_map_lock_read_to_write(dst_map)) { | |
7861 | vm_map_lock_read(dst_map); | |
7862 | goto RetryLookup; | |
7863 | } | |
3e170ce0 A |
7864 | VME_OBJECT_SHADOW(entry, |
7865 | (vm_map_size_t)(entry->vme_end | |
7866 | - entry->vme_start)); | |
1c79356b A |
7867 | entry->needs_copy = FALSE; |
7868 | vm_map_lock_write_to_read(dst_map); | |
7869 | } | |
3e170ce0 | 7870 | dst_object = VME_OBJECT(entry); |
1c79356b A |
7871 | /* |
7872 | * unlike with the virtual (aligned) copy we're going | |
7873 | * to fault on it therefore we need a target object. | |
7874 | */ | |
7875 | if (dst_object == VM_OBJECT_NULL) { | |
7876 | if (vm_map_lock_read_to_write(dst_map)) { | |
7877 | vm_map_lock_read(dst_map); | |
7878 | goto RetryLookup; | |
7879 | } | |
91447636 | 7880 | dst_object = vm_object_allocate((vm_map_size_t) |
2d21ac55 | 7881 | entry->vme_end - entry->vme_start); |
3e170ce0 A |
7882 | VME_OBJECT(entry) = dst_object; |
7883 | VME_OFFSET_SET(entry, 0); | |
fe8ab488 | 7884 | assert(entry->use_pmap); |
1c79356b A |
7885 | vm_map_lock_write_to_read(dst_map); |
7886 | } | |
7887 | /* | |
7888 | * Take an object reference and unlock map. The "entry" may | |
7889 | * disappear or change when the map is unlocked. | |
7890 | */ | |
7891 | vm_object_reference(dst_object); | |
7892 | version.main_timestamp = dst_map->timestamp; | |
3e170ce0 | 7893 | entry_offset = VME_OFFSET(entry); |
1c79356b A |
7894 | entry_end = entry->vme_end; |
7895 | vm_map_unlock_read(dst_map); | |
7896 | /* | |
7897 | * Copy as much as possible in one pass | |
7898 | */ | |
7899 | kr = vm_fault_copy( | |
3e170ce0 A |
7900 | VME_OBJECT(copy_entry), |
7901 | VME_OFFSET(copy_entry) + src_offset, | |
1c79356b A |
7902 | ©_size, |
7903 | dst_object, | |
7904 | entry_offset + dst_offset, | |
7905 | dst_map, | |
7906 | &version, | |
7907 | THREAD_UNINT ); | |
7908 | ||
7909 | start += copy_size; | |
7910 | src_offset += copy_size; | |
7911 | amount_left -= copy_size; | |
7912 | /* | |
7913 | * Release the object reference | |
7914 | */ | |
7915 | vm_object_deallocate(dst_object); | |
7916 | /* | |
7917 | * If a hard error occurred, return it now | |
7918 | */ | |
7919 | if (kr != KERN_SUCCESS) | |
7920 | return kr; | |
7921 | ||
7922 | if ((copy_entry->vme_start + src_offset) == copy_entry->vme_end | |
2d21ac55 | 7923 | || amount_left == 0) |
1c79356b A |
7924 | { |
7925 | /* | |
7926 | * all done with this copy entry, dispose. | |
7927 | */ | |
39236c6e A |
7928 | copy_entry_next = copy_entry->vme_next; |
7929 | ||
7930 | if (discard_on_success) { | |
7931 | vm_map_copy_entry_unlink(copy, copy_entry); | |
7932 | assert(!copy_entry->is_sub_map); | |
3e170ce0 | 7933 | vm_object_deallocate(VME_OBJECT(copy_entry)); |
39236c6e A |
7934 | vm_map_copy_entry_dispose(copy, copy_entry); |
7935 | } | |
1c79356b | 7936 | |
39236c6e A |
7937 | if (copy_entry_next == vm_map_copy_to_entry(copy) && |
7938 | amount_left) { | |
1c79356b A |
7939 | /* |
7940 | * not finished copying but run out of source | |
7941 | */ | |
7942 | return KERN_INVALID_ADDRESS; | |
7943 | } | |
39236c6e A |
7944 | |
7945 | copy_entry = copy_entry_next; | |
7946 | ||
1c79356b A |
7947 | src_offset = 0; |
7948 | } | |
7949 | ||
7950 | if (amount_left == 0) | |
7951 | return KERN_SUCCESS; | |
7952 | ||
7953 | vm_map_lock_read(dst_map); | |
7954 | if (version.main_timestamp == dst_map->timestamp) { | |
7955 | if (start == entry_end) { | |
7956 | /* | |
7957 | * destination region is split. Use the version | |
7958 | * information to avoid a lookup in the normal | |
7959 | * case. | |
7960 | */ | |
7961 | entry = entry->vme_next; | |
7962 | /* | |
7963 | * should be contiguous. Fail if we encounter | |
7964 | * a hole in the destination. | |
7965 | */ | |
7966 | if (start != entry->vme_start) { | |
7967 | vm_map_unlock_read(dst_map); | |
7968 | return KERN_INVALID_ADDRESS ; | |
7969 | } | |
7970 | } | |
7971 | } else { | |
7972 | /* | |
7973 | * Map version check failed. | |
7974 | * we must lookup the entry because somebody | |
7975 | * might have changed the map behind our backs. | |
7976 | */ | |
2d21ac55 | 7977 | RetryLookup: |
1c79356b A |
7978 | if (!vm_map_lookup_entry(dst_map, start, &entry)) |
7979 | { | |
7980 | vm_map_unlock_read(dst_map); | |
7981 | return KERN_INVALID_ADDRESS ; | |
7982 | } | |
7983 | } | |
7984 | }/* while */ | |
7985 | ||
1c79356b A |
7986 | return KERN_SUCCESS; |
7987 | }/* vm_map_copy_overwrite_unaligned */ | |
7988 | ||
7989 | /* | |
91447636 | 7990 | * Routine: vm_map_copy_overwrite_aligned [internal use only] |
1c79356b A |
7991 | * |
7992 | * Description: | |
7993 | * Does all the vm_trickery possible for whole pages. | |
7994 | * | |
7995 | * Implementation: | |
7996 | * | |
7997 | * If there are no permanent objects in the destination, | |
7998 | * and the source and destination map entry zones match, | |
7999 | * and the destination map entry is not shared, | |
8000 | * then the map entries can be deleted and replaced | |
8001 | * with those from the copy. The following code is the | |
8002 | * basic idea of what to do, but there are lots of annoying | |
8003 | * little details about getting protection and inheritance | |
8004 | * right. Should add protection, inheritance, and sharing checks | |
8005 | * to the above pass and make sure that no wiring is involved. | |
8006 | */ | |
8007 | ||
e2d2fc5c A |
8008 | int vm_map_copy_overwrite_aligned_src_not_internal = 0; |
8009 | int vm_map_copy_overwrite_aligned_src_not_symmetric = 0; | |
8010 | int vm_map_copy_overwrite_aligned_src_large = 0; | |
8011 | ||
91447636 | 8012 | static kern_return_t |
1c79356b A |
8013 | vm_map_copy_overwrite_aligned( |
8014 | vm_map_t dst_map, | |
8015 | vm_map_entry_t tmp_entry, | |
8016 | vm_map_copy_t copy, | |
91447636 | 8017 | vm_map_offset_t start, |
2d21ac55 | 8018 | __unused pmap_t pmap) |
1c79356b A |
8019 | { |
8020 | vm_object_t object; | |
8021 | vm_map_entry_t copy_entry; | |
91447636 A |
8022 | vm_map_size_t copy_size; |
8023 | vm_map_size_t size; | |
1c79356b A |
8024 | vm_map_entry_t entry; |
8025 | ||
8026 | while ((copy_entry = vm_map_copy_first_entry(copy)) | |
2d21ac55 | 8027 | != vm_map_copy_to_entry(copy)) |
1c79356b A |
8028 | { |
8029 | copy_size = (copy_entry->vme_end - copy_entry->vme_start); | |
8030 | ||
8031 | entry = tmp_entry; | |
fe8ab488 A |
8032 | if (entry->is_sub_map) { |
8033 | /* unnested when clipped earlier */ | |
8034 | assert(!entry->use_pmap); | |
8035 | } | |
1c79356b A |
8036 | if (entry == vm_map_to_entry(dst_map)) { |
8037 | vm_map_unlock(dst_map); | |
8038 | return KERN_INVALID_ADDRESS; | |
8039 | } | |
8040 | size = (entry->vme_end - entry->vme_start); | |
8041 | /* | |
8042 | * Make sure that no holes popped up in the | |
8043 | * address map, and that the protection is | |
8044 | * still valid, in case the map was unlocked | |
8045 | * earlier. | |
8046 | */ | |
8047 | ||
8048 | if ((entry->vme_start != start) || ((entry->is_sub_map) | |
2d21ac55 | 8049 | && !entry->needs_copy)) { |
1c79356b A |
8050 | vm_map_unlock(dst_map); |
8051 | return(KERN_INVALID_ADDRESS); | |
8052 | } | |
8053 | assert(entry != vm_map_to_entry(dst_map)); | |
8054 | ||
8055 | /* | |
8056 | * Check protection again | |
8057 | */ | |
8058 | ||
8059 | if ( ! (entry->protection & VM_PROT_WRITE)) { | |
8060 | vm_map_unlock(dst_map); | |
8061 | return(KERN_PROTECTION_FAILURE); | |
8062 | } | |
8063 | ||
8064 | /* | |
8065 | * Adjust to source size first | |
8066 | */ | |
8067 | ||
8068 | if (copy_size < size) { | |
fe8ab488 A |
8069 | if (entry->map_aligned && |
8070 | !VM_MAP_PAGE_ALIGNED(entry->vme_start + copy_size, | |
8071 | VM_MAP_PAGE_MASK(dst_map))) { | |
8072 | /* no longer map-aligned */ | |
8073 | entry->map_aligned = FALSE; | |
8074 | } | |
1c79356b A |
8075 | vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size); |
8076 | size = copy_size; | |
8077 | } | |
8078 | ||
8079 | /* | |
8080 | * Adjust to destination size | |
8081 | */ | |
8082 | ||
8083 | if (size < copy_size) { | |
8084 | vm_map_copy_clip_end(copy, copy_entry, | |
2d21ac55 | 8085 | copy_entry->vme_start + size); |
1c79356b A |
8086 | copy_size = size; |
8087 | } | |
8088 | ||
8089 | assert((entry->vme_end - entry->vme_start) == size); | |
8090 | assert((tmp_entry->vme_end - tmp_entry->vme_start) == size); | |
8091 | assert((copy_entry->vme_end - copy_entry->vme_start) == size); | |
8092 | ||
8093 | /* | |
8094 | * If the destination contains temporary unshared memory, | |
8095 | * we can perform the copy by throwing it away and | |
8096 | * installing the source data. | |
8097 | */ | |
8098 | ||
3e170ce0 | 8099 | object = VME_OBJECT(entry); |
1c79356b | 8100 | if ((!entry->is_shared && |
2d21ac55 A |
8101 | ((object == VM_OBJECT_NULL) || |
8102 | (object->internal && !object->true_share))) || | |
1c79356b | 8103 | entry->needs_copy) { |
3e170ce0 A |
8104 | vm_object_t old_object = VME_OBJECT(entry); |
8105 | vm_object_offset_t old_offset = VME_OFFSET(entry); | |
1c79356b A |
8106 | vm_object_offset_t offset; |
8107 | ||
8108 | /* | |
8109 | * Ensure that the source and destination aren't | |
8110 | * identical | |
8111 | */ | |
3e170ce0 A |
8112 | if (old_object == VME_OBJECT(copy_entry) && |
8113 | old_offset == VME_OFFSET(copy_entry)) { | |
1c79356b A |
8114 | vm_map_copy_entry_unlink(copy, copy_entry); |
8115 | vm_map_copy_entry_dispose(copy, copy_entry); | |
8116 | ||
8117 | if (old_object != VM_OBJECT_NULL) | |
8118 | vm_object_deallocate(old_object); | |
8119 | ||
8120 | start = tmp_entry->vme_end; | |
8121 | tmp_entry = tmp_entry->vme_next; | |
8122 | continue; | |
8123 | } | |
8124 | ||
e2d2fc5c A |
8125 | #define __TRADEOFF1_OBJ_SIZE (64 * 1024 * 1024) /* 64 MB */ |
8126 | #define __TRADEOFF1_COPY_SIZE (128 * 1024) /* 128 KB */ | |
3e170ce0 A |
8127 | if (VME_OBJECT(copy_entry) != VM_OBJECT_NULL && |
8128 | VME_OBJECT(copy_entry)->vo_size >= __TRADEOFF1_OBJ_SIZE && | |
e2d2fc5c A |
8129 | copy_size <= __TRADEOFF1_COPY_SIZE) { |
8130 | /* | |
8131 | * Virtual vs. Physical copy tradeoff #1. | |
8132 | * | |
8133 | * Copying only a few pages out of a large | |
8134 | * object: do a physical copy instead of | |
8135 | * a virtual copy, to avoid possibly keeping | |
8136 | * the entire large object alive because of | |
8137 | * those few copy-on-write pages. | |
8138 | */ | |
8139 | vm_map_copy_overwrite_aligned_src_large++; | |
8140 | goto slow_copy; | |
8141 | } | |
e2d2fc5c | 8142 | |
3e170ce0 A |
8143 | if ((dst_map->pmap != kernel_pmap) && |
8144 | (VME_ALIAS(entry) >= VM_MEMORY_MALLOC) && | |
8145 | (VME_ALIAS(entry) <= VM_MEMORY_MALLOC_LARGE_REUSED)) { | |
ebb1b9f4 A |
8146 | vm_object_t new_object, new_shadow; |
8147 | ||
8148 | /* | |
8149 | * We're about to map something over a mapping | |
8150 | * established by malloc()... | |
8151 | */ | |
3e170ce0 | 8152 | new_object = VME_OBJECT(copy_entry); |
ebb1b9f4 A |
8153 | if (new_object != VM_OBJECT_NULL) { |
8154 | vm_object_lock_shared(new_object); | |
8155 | } | |
8156 | while (new_object != VM_OBJECT_NULL && | |
e2d2fc5c A |
8157 | !new_object->true_share && |
8158 | new_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && | |
ebb1b9f4 A |
8159 | new_object->internal) { |
8160 | new_shadow = new_object->shadow; | |
8161 | if (new_shadow == VM_OBJECT_NULL) { | |
8162 | break; | |
8163 | } | |
8164 | vm_object_lock_shared(new_shadow); | |
8165 | vm_object_unlock(new_object); | |
8166 | new_object = new_shadow; | |
8167 | } | |
8168 | if (new_object != VM_OBJECT_NULL) { | |
8169 | if (!new_object->internal) { | |
8170 | /* | |
8171 | * The new mapping is backed | |
8172 | * by an external object. We | |
8173 | * don't want malloc'ed memory | |
8174 | * to be replaced with such a | |
8175 | * non-anonymous mapping, so | |
8176 | * let's go off the optimized | |
8177 | * path... | |
8178 | */ | |
e2d2fc5c | 8179 | vm_map_copy_overwrite_aligned_src_not_internal++; |
ebb1b9f4 A |
8180 | vm_object_unlock(new_object); |
8181 | goto slow_copy; | |
8182 | } | |
e2d2fc5c A |
8183 | if (new_object->true_share || |
8184 | new_object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { | |
8185 | /* | |
8186 | * Same if there's a "true_share" | |
8187 | * object in the shadow chain, or | |
8188 | * an object with a non-default | |
8189 | * (SYMMETRIC) copy strategy. | |
8190 | */ | |
8191 | vm_map_copy_overwrite_aligned_src_not_symmetric++; | |
8192 | vm_object_unlock(new_object); | |
8193 | goto slow_copy; | |
8194 | } | |
ebb1b9f4 A |
8195 | vm_object_unlock(new_object); |
8196 | } | |
8197 | /* | |
8198 | * The new mapping is still backed by | |
8199 | * anonymous (internal) memory, so it's | |
8200 | * OK to substitute it for the original | |
8201 | * malloc() mapping. | |
8202 | */ | |
8203 | } | |
8204 | ||
1c79356b A |
8205 | if (old_object != VM_OBJECT_NULL) { |
8206 | if(entry->is_sub_map) { | |
9bccf70c | 8207 | if(entry->use_pmap) { |
0c530ab8 | 8208 | #ifndef NO_NESTED_PMAP |
9bccf70c | 8209 | pmap_unnest(dst_map->pmap, |
2d21ac55 A |
8210 | (addr64_t)entry->vme_start, |
8211 | entry->vme_end - entry->vme_start); | |
0c530ab8 | 8212 | #endif /* NO_NESTED_PMAP */ |
316670eb | 8213 | if(dst_map->mapped_in_other_pmaps) { |
9bccf70c A |
8214 | /* clean up parent */ |
8215 | /* map/maps */ | |
2d21ac55 A |
8216 | vm_map_submap_pmap_clean( |
8217 | dst_map, entry->vme_start, | |
8218 | entry->vme_end, | |
3e170ce0 A |
8219 | VME_SUBMAP(entry), |
8220 | VME_OFFSET(entry)); | |
9bccf70c A |
8221 | } |
8222 | } else { | |
8223 | vm_map_submap_pmap_clean( | |
8224 | dst_map, entry->vme_start, | |
8225 | entry->vme_end, | |
3e170ce0 A |
8226 | VME_SUBMAP(entry), |
8227 | VME_OFFSET(entry)); | |
9bccf70c | 8228 | } |
3e170ce0 | 8229 | vm_map_deallocate(VME_SUBMAP(entry)); |
9bccf70c | 8230 | } else { |
316670eb | 8231 | if(dst_map->mapped_in_other_pmaps) { |
39236c6e | 8232 | vm_object_pmap_protect_options( |
3e170ce0 A |
8233 | VME_OBJECT(entry), |
8234 | VME_OFFSET(entry), | |
9bccf70c | 8235 | entry->vme_end |
2d21ac55 | 8236 | - entry->vme_start, |
9bccf70c A |
8237 | PMAP_NULL, |
8238 | entry->vme_start, | |
39236c6e A |
8239 | VM_PROT_NONE, |
8240 | PMAP_OPTIONS_REMOVE); | |
9bccf70c | 8241 | } else { |
39236c6e A |
8242 | pmap_remove_options( |
8243 | dst_map->pmap, | |
8244 | (addr64_t)(entry->vme_start), | |
8245 | (addr64_t)(entry->vme_end), | |
8246 | PMAP_OPTIONS_REMOVE); | |
9bccf70c | 8247 | } |
1c79356b | 8248 | vm_object_deallocate(old_object); |
9bccf70c | 8249 | } |
1c79356b A |
8250 | } |
8251 | ||
8252 | entry->is_sub_map = FALSE; | |
3e170ce0 A |
8253 | VME_OBJECT_SET(entry, VME_OBJECT(copy_entry)); |
8254 | object = VME_OBJECT(entry); | |
1c79356b A |
8255 | entry->needs_copy = copy_entry->needs_copy; |
8256 | entry->wired_count = 0; | |
8257 | entry->user_wired_count = 0; | |
3e170ce0 A |
8258 | offset = VME_OFFSET(copy_entry); |
8259 | VME_OFFSET_SET(entry, offset); | |
1c79356b A |
8260 | |
8261 | vm_map_copy_entry_unlink(copy, copy_entry); | |
8262 | vm_map_copy_entry_dispose(copy, copy_entry); | |
2d21ac55 | 8263 | |
1c79356b | 8264 | /* |
2d21ac55 | 8265 | * we could try to push pages into the pmap at this point, BUT |
1c79356b A |
8266 | * this optimization only saved on average 2 us per page if ALL |
8267 | * the pages in the source were currently mapped | |
8268 | * and ALL the pages in the dest were touched, if there were fewer | |
8269 | * than 2/3 of the pages touched, this optimization actually cost more cycles | |
2d21ac55 | 8270 | * it also puts a lot of pressure on the pmap layer w/r to mapping structures |
1c79356b A |
8271 | */ |
8272 | ||
1c79356b A |
8273 | /* |
8274 | * Set up for the next iteration. The map | |
8275 | * has not been unlocked, so the next | |
8276 | * address should be at the end of this | |
8277 | * entry, and the next map entry should be | |
8278 | * the one following it. | |
8279 | */ | |
8280 | ||
8281 | start = tmp_entry->vme_end; | |
8282 | tmp_entry = tmp_entry->vme_next; | |
8283 | } else { | |
8284 | vm_map_version_t version; | |
ebb1b9f4 A |
8285 | vm_object_t dst_object; |
8286 | vm_object_offset_t dst_offset; | |
1c79356b A |
8287 | kern_return_t r; |
8288 | ||
ebb1b9f4 | 8289 | slow_copy: |
e2d2fc5c | 8290 | if (entry->needs_copy) { |
3e170ce0 A |
8291 | VME_OBJECT_SHADOW(entry, |
8292 | (entry->vme_end - | |
8293 | entry->vme_start)); | |
e2d2fc5c A |
8294 | entry->needs_copy = FALSE; |
8295 | } | |
8296 | ||
3e170ce0 A |
8297 | dst_object = VME_OBJECT(entry); |
8298 | dst_offset = VME_OFFSET(entry); | |
ebb1b9f4 | 8299 | |
1c79356b A |
8300 | /* |
8301 | * Take an object reference, and record | |
8302 | * the map version information so that the | |
8303 | * map can be safely unlocked. | |
8304 | */ | |
8305 | ||
ebb1b9f4 A |
8306 | if (dst_object == VM_OBJECT_NULL) { |
8307 | /* | |
8308 | * We would usually have just taken the | |
8309 | * optimized path above if the destination | |
8310 | * object has not been allocated yet. But we | |
8311 | * now disable that optimization if the copy | |
8312 | * entry's object is not backed by anonymous | |
8313 | * memory to avoid replacing malloc'ed | |
8314 | * (i.e. re-usable) anonymous memory with a | |
8315 | * not-so-anonymous mapping. | |
8316 | * So we have to handle this case here and | |
8317 | * allocate a new VM object for this map entry. | |
8318 | */ | |
8319 | dst_object = vm_object_allocate( | |
8320 | entry->vme_end - entry->vme_start); | |
8321 | dst_offset = 0; | |
3e170ce0 A |
8322 | VME_OBJECT_SET(entry, dst_object); |
8323 | VME_OFFSET_SET(entry, dst_offset); | |
fe8ab488 | 8324 | assert(entry->use_pmap); |
ebb1b9f4 A |
8325 | |
8326 | } | |
8327 | ||
1c79356b A |
8328 | vm_object_reference(dst_object); |
8329 | ||
9bccf70c A |
8330 | /* account for unlock bumping up timestamp */ |
8331 | version.main_timestamp = dst_map->timestamp + 1; | |
1c79356b A |
8332 | |
8333 | vm_map_unlock(dst_map); | |
8334 | ||
8335 | /* | |
8336 | * Copy as much as possible in one pass | |
8337 | */ | |
8338 | ||
8339 | copy_size = size; | |
8340 | r = vm_fault_copy( | |
3e170ce0 A |
8341 | VME_OBJECT(copy_entry), |
8342 | VME_OFFSET(copy_entry), | |
2d21ac55 A |
8343 | ©_size, |
8344 | dst_object, | |
8345 | dst_offset, | |
8346 | dst_map, | |
8347 | &version, | |
8348 | THREAD_UNINT ); | |
1c79356b A |
8349 | |
8350 | /* | |
8351 | * Release the object reference | |
8352 | */ | |
8353 | ||
8354 | vm_object_deallocate(dst_object); | |
8355 | ||
8356 | /* | |
8357 | * If a hard error occurred, return it now | |
8358 | */ | |
8359 | ||
8360 | if (r != KERN_SUCCESS) | |
8361 | return(r); | |
8362 | ||
8363 | if (copy_size != 0) { | |
8364 | /* | |
8365 | * Dispose of the copied region | |
8366 | */ | |
8367 | ||
8368 | vm_map_copy_clip_end(copy, copy_entry, | |
2d21ac55 | 8369 | copy_entry->vme_start + copy_size); |
1c79356b | 8370 | vm_map_copy_entry_unlink(copy, copy_entry); |
3e170ce0 | 8371 | vm_object_deallocate(VME_OBJECT(copy_entry)); |
1c79356b A |
8372 | vm_map_copy_entry_dispose(copy, copy_entry); |
8373 | } | |
8374 | ||
8375 | /* | |
8376 | * Pick up in the destination map where we left off. | |
8377 | * | |
8378 | * Use the version information to avoid a lookup | |
8379 | * in the normal case. | |
8380 | */ | |
8381 | ||
8382 | start += copy_size; | |
8383 | vm_map_lock(dst_map); | |
e2d2fc5c A |
8384 | if (version.main_timestamp == dst_map->timestamp && |
8385 | copy_size != 0) { | |
1c79356b A |
8386 | /* We can safely use saved tmp_entry value */ |
8387 | ||
fe8ab488 A |
8388 | if (tmp_entry->map_aligned && |
8389 | !VM_MAP_PAGE_ALIGNED( | |
8390 | start, | |
8391 | VM_MAP_PAGE_MASK(dst_map))) { | |
8392 | /* no longer map-aligned */ | |
8393 | tmp_entry->map_aligned = FALSE; | |
8394 | } | |
1c79356b A |
8395 | vm_map_clip_end(dst_map, tmp_entry, start); |
8396 | tmp_entry = tmp_entry->vme_next; | |
8397 | } else { | |
8398 | /* Must do lookup of tmp_entry */ | |
8399 | ||
8400 | if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { | |
8401 | vm_map_unlock(dst_map); | |
8402 | return(KERN_INVALID_ADDRESS); | |
8403 | } | |
fe8ab488 A |
8404 | if (tmp_entry->map_aligned && |
8405 | !VM_MAP_PAGE_ALIGNED( | |
8406 | start, | |
8407 | VM_MAP_PAGE_MASK(dst_map))) { | |
8408 | /* no longer map-aligned */ | |
8409 | tmp_entry->map_aligned = FALSE; | |
8410 | } | |
1c79356b A |
8411 | vm_map_clip_start(dst_map, tmp_entry, start); |
8412 | } | |
8413 | } | |
8414 | }/* while */ | |
8415 | ||
8416 | return(KERN_SUCCESS); | |
8417 | }/* vm_map_copy_overwrite_aligned */ | |
8418 | ||
8419 | /* | |
91447636 | 8420 | * Routine: vm_map_copyin_kernel_buffer [internal use only] |
1c79356b A |
8421 | * |
8422 | * Description: | |
8423 | * Copy in data to a kernel buffer from space in the | |
91447636 | 8424 | * source map. The original space may be optionally |
1c79356b A |
8425 | * deallocated. |
8426 | * | |
8427 | * If successful, returns a new copy object. | |
8428 | */ | |
91447636 | 8429 | static kern_return_t |
1c79356b A |
8430 | vm_map_copyin_kernel_buffer( |
8431 | vm_map_t src_map, | |
91447636 A |
8432 | vm_map_offset_t src_addr, |
8433 | vm_map_size_t len, | |
1c79356b A |
8434 | boolean_t src_destroy, |
8435 | vm_map_copy_t *copy_result) | |
8436 | { | |
91447636 | 8437 | kern_return_t kr; |
1c79356b | 8438 | vm_map_copy_t copy; |
b0d623f7 A |
8439 | vm_size_t kalloc_size; |
8440 | ||
3e170ce0 A |
8441 | if (len > msg_ool_size_small) |
8442 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 8443 | |
3e170ce0 A |
8444 | kalloc_size = (vm_size_t)(cpy_kdata_hdr_sz + len); |
8445 | ||
8446 | copy = (vm_map_copy_t)kalloc(kalloc_size); | |
8447 | if (copy == VM_MAP_COPY_NULL) | |
1c79356b | 8448 | return KERN_RESOURCE_SHORTAGE; |
1c79356b A |
8449 | copy->type = VM_MAP_COPY_KERNEL_BUFFER; |
8450 | copy->size = len; | |
8451 | copy->offset = 0; | |
1c79356b | 8452 | |
3e170ce0 | 8453 | kr = copyinmap(src_map, src_addr, copy->cpy_kdata, (vm_size_t)len); |
91447636 A |
8454 | if (kr != KERN_SUCCESS) { |
8455 | kfree(copy, kalloc_size); | |
8456 | return kr; | |
1c79356b A |
8457 | } |
8458 | if (src_destroy) { | |
39236c6e A |
8459 | (void) vm_map_remove( |
8460 | src_map, | |
8461 | vm_map_trunc_page(src_addr, | |
8462 | VM_MAP_PAGE_MASK(src_map)), | |
8463 | vm_map_round_page(src_addr + len, | |
8464 | VM_MAP_PAGE_MASK(src_map)), | |
8465 | (VM_MAP_REMOVE_INTERRUPTIBLE | | |
8466 | VM_MAP_REMOVE_WAIT_FOR_KWIRE | | |
8467 | (src_map == kernel_map) ? VM_MAP_REMOVE_KUNWIRE : 0)); | |
1c79356b A |
8468 | } |
8469 | *copy_result = copy; | |
8470 | return KERN_SUCCESS; | |
8471 | } | |
8472 | ||
8473 | /* | |
91447636 | 8474 | * Routine: vm_map_copyout_kernel_buffer [internal use only] |
1c79356b A |
8475 | * |
8476 | * Description: | |
8477 | * Copy out data from a kernel buffer into space in the | |
8478 | * destination map. The space may be otpionally dynamically | |
8479 | * allocated. | |
8480 | * | |
8481 | * If successful, consumes the copy object. | |
8482 | * Otherwise, the caller is responsible for it. | |
8483 | */ | |
91447636 A |
8484 | static int vm_map_copyout_kernel_buffer_failures = 0; |
8485 | static kern_return_t | |
1c79356b | 8486 | vm_map_copyout_kernel_buffer( |
91447636 A |
8487 | vm_map_t map, |
8488 | vm_map_address_t *addr, /* IN/OUT */ | |
8489 | vm_map_copy_t copy, | |
39236c6e A |
8490 | boolean_t overwrite, |
8491 | boolean_t consume_on_success) | |
1c79356b A |
8492 | { |
8493 | kern_return_t kr = KERN_SUCCESS; | |
91447636 | 8494 | thread_t thread = current_thread(); |
1c79356b | 8495 | |
3e170ce0 A |
8496 | /* |
8497 | * check for corrupted vm_map_copy structure | |
8498 | */ | |
8499 | if (copy->size > msg_ool_size_small || copy->offset) | |
8500 | panic("Invalid vm_map_copy_t sz:%lld, ofst:%lld", | |
8501 | (long long)copy->size, (long long)copy->offset); | |
8502 | ||
1c79356b A |
8503 | if (!overwrite) { |
8504 | ||
8505 | /* | |
8506 | * Allocate space in the target map for the data | |
8507 | */ | |
8508 | *addr = 0; | |
8509 | kr = vm_map_enter(map, | |
8510 | addr, | |
39236c6e A |
8511 | vm_map_round_page(copy->size, |
8512 | VM_MAP_PAGE_MASK(map)), | |
91447636 A |
8513 | (vm_map_offset_t) 0, |
8514 | VM_FLAGS_ANYWHERE, | |
1c79356b A |
8515 | VM_OBJECT_NULL, |
8516 | (vm_object_offset_t) 0, | |
8517 | FALSE, | |
8518 | VM_PROT_DEFAULT, | |
8519 | VM_PROT_ALL, | |
8520 | VM_INHERIT_DEFAULT); | |
8521 | if (kr != KERN_SUCCESS) | |
91447636 | 8522 | return kr; |
1c79356b A |
8523 | } |
8524 | ||
8525 | /* | |
8526 | * Copyout the data from the kernel buffer to the target map. | |
8527 | */ | |
91447636 | 8528 | if (thread->map == map) { |
1c79356b A |
8529 | |
8530 | /* | |
8531 | * If the target map is the current map, just do | |
8532 | * the copy. | |
8533 | */ | |
b0d623f7 A |
8534 | assert((vm_size_t) copy->size == copy->size); |
8535 | if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) { | |
91447636 | 8536 | kr = KERN_INVALID_ADDRESS; |
1c79356b A |
8537 | } |
8538 | } | |
8539 | else { | |
8540 | vm_map_t oldmap; | |
8541 | ||
8542 | /* | |
8543 | * If the target map is another map, assume the | |
8544 | * target's address space identity for the duration | |
8545 | * of the copy. | |
8546 | */ | |
8547 | vm_map_reference(map); | |
8548 | oldmap = vm_map_switch(map); | |
8549 | ||
b0d623f7 A |
8550 | assert((vm_size_t) copy->size == copy->size); |
8551 | if (copyout(copy->cpy_kdata, *addr, (vm_size_t) copy->size)) { | |
91447636 A |
8552 | vm_map_copyout_kernel_buffer_failures++; |
8553 | kr = KERN_INVALID_ADDRESS; | |
1c79356b A |
8554 | } |
8555 | ||
8556 | (void) vm_map_switch(oldmap); | |
8557 | vm_map_deallocate(map); | |
8558 | } | |
8559 | ||
91447636 A |
8560 | if (kr != KERN_SUCCESS) { |
8561 | /* the copy failed, clean up */ | |
8562 | if (!overwrite) { | |
8563 | /* | |
8564 | * Deallocate the space we allocated in the target map. | |
8565 | */ | |
39236c6e A |
8566 | (void) vm_map_remove( |
8567 | map, | |
8568 | vm_map_trunc_page(*addr, | |
8569 | VM_MAP_PAGE_MASK(map)), | |
8570 | vm_map_round_page((*addr + | |
8571 | vm_map_round_page(copy->size, | |
8572 | VM_MAP_PAGE_MASK(map))), | |
8573 | VM_MAP_PAGE_MASK(map)), | |
8574 | VM_MAP_NO_FLAGS); | |
91447636 A |
8575 | *addr = 0; |
8576 | } | |
8577 | } else { | |
8578 | /* copy was successful, dicard the copy structure */ | |
39236c6e | 8579 | if (consume_on_success) { |
3e170ce0 | 8580 | kfree(copy, copy->size + cpy_kdata_hdr_sz); |
39236c6e | 8581 | } |
91447636 | 8582 | } |
1c79356b | 8583 | |
91447636 | 8584 | return kr; |
1c79356b A |
8585 | } |
8586 | ||
8587 | /* | |
8588 | * Macro: vm_map_copy_insert | |
8589 | * | |
8590 | * Description: | |
8591 | * Link a copy chain ("copy") into a map at the | |
8592 | * specified location (after "where"). | |
8593 | * Side effects: | |
8594 | * The copy chain is destroyed. | |
8595 | * Warning: | |
8596 | * The arguments are evaluated multiple times. | |
8597 | */ | |
8598 | #define vm_map_copy_insert(map, where, copy) \ | |
8599 | MACRO_BEGIN \ | |
6d2010ae A |
8600 | vm_map_store_copy_insert(map, where, copy); \ |
8601 | zfree(vm_map_copy_zone, copy); \ | |
1c79356b A |
8602 | MACRO_END |
8603 | ||
39236c6e A |
8604 | void |
8605 | vm_map_copy_remap( | |
8606 | vm_map_t map, | |
8607 | vm_map_entry_t where, | |
8608 | vm_map_copy_t copy, | |
8609 | vm_map_offset_t adjustment, | |
8610 | vm_prot_t cur_prot, | |
8611 | vm_prot_t max_prot, | |
8612 | vm_inherit_t inheritance) | |
8613 | { | |
8614 | vm_map_entry_t copy_entry, new_entry; | |
8615 | ||
8616 | for (copy_entry = vm_map_copy_first_entry(copy); | |
8617 | copy_entry != vm_map_copy_to_entry(copy); | |
8618 | copy_entry = copy_entry->vme_next) { | |
8619 | /* get a new VM map entry for the map */ | |
8620 | new_entry = vm_map_entry_create(map, | |
8621 | !map->hdr.entries_pageable); | |
8622 | /* copy the "copy entry" to the new entry */ | |
8623 | vm_map_entry_copy(new_entry, copy_entry); | |
8624 | /* adjust "start" and "end" */ | |
8625 | new_entry->vme_start += adjustment; | |
8626 | new_entry->vme_end += adjustment; | |
8627 | /* clear some attributes */ | |
8628 | new_entry->inheritance = inheritance; | |
8629 | new_entry->protection = cur_prot; | |
8630 | new_entry->max_protection = max_prot; | |
8631 | new_entry->behavior = VM_BEHAVIOR_DEFAULT; | |
8632 | /* take an extra reference on the entry's "object" */ | |
8633 | if (new_entry->is_sub_map) { | |
fe8ab488 | 8634 | assert(!new_entry->use_pmap); /* not nested */ |
3e170ce0 A |
8635 | vm_map_lock(VME_SUBMAP(new_entry)); |
8636 | vm_map_reference(VME_SUBMAP(new_entry)); | |
8637 | vm_map_unlock(VME_SUBMAP(new_entry)); | |
39236c6e | 8638 | } else { |
3e170ce0 | 8639 | vm_object_reference(VME_OBJECT(new_entry)); |
39236c6e A |
8640 | } |
8641 | /* insert the new entry in the map */ | |
8642 | vm_map_store_entry_link(map, where, new_entry); | |
8643 | /* continue inserting the "copy entries" after the new entry */ | |
8644 | where = new_entry; | |
8645 | } | |
8646 | } | |
8647 | ||
2dced7af A |
8648 | |
8649 | boolean_t | |
8650 | vm_map_copy_validate_size( | |
8651 | vm_map_t dst_map, | |
8652 | vm_map_copy_t copy, | |
8653 | vm_map_size_t size) | |
8654 | { | |
8655 | if (copy == VM_MAP_COPY_NULL) | |
8656 | return FALSE; | |
8657 | switch (copy->type) { | |
8658 | case VM_MAP_COPY_OBJECT: | |
8659 | case VM_MAP_COPY_KERNEL_BUFFER: | |
8660 | if (size == copy->size) | |
8661 | return TRUE; | |
8662 | break; | |
8663 | case VM_MAP_COPY_ENTRY_LIST: | |
8664 | /* | |
8665 | * potential page-size rounding prevents us from exactly | |
8666 | * validating this flavor of vm_map_copy, but we can at least | |
8667 | * assert that it's within a range. | |
8668 | */ | |
8669 | if (copy->size >= size && | |
8670 | copy->size <= vm_map_round_page(size, | |
8671 | VM_MAP_PAGE_MASK(dst_map))) | |
8672 | return TRUE; | |
8673 | break; | |
8674 | default: | |
8675 | break; | |
8676 | } | |
8677 | return FALSE; | |
8678 | } | |
8679 | ||
8680 | ||
1c79356b A |
8681 | /* |
8682 | * Routine: vm_map_copyout | |
8683 | * | |
8684 | * Description: | |
8685 | * Copy out a copy chain ("copy") into newly-allocated | |
8686 | * space in the destination map. | |
8687 | * | |
8688 | * If successful, consumes the copy object. | |
8689 | * Otherwise, the caller is responsible for it. | |
8690 | */ | |
39236c6e | 8691 | |
1c79356b A |
8692 | kern_return_t |
8693 | vm_map_copyout( | |
91447636 A |
8694 | vm_map_t dst_map, |
8695 | vm_map_address_t *dst_addr, /* OUT */ | |
8696 | vm_map_copy_t copy) | |
39236c6e A |
8697 | { |
8698 | return vm_map_copyout_internal(dst_map, dst_addr, copy, | |
8699 | TRUE, /* consume_on_success */ | |
8700 | VM_PROT_DEFAULT, | |
8701 | VM_PROT_ALL, | |
8702 | VM_INHERIT_DEFAULT); | |
8703 | } | |
8704 | ||
8705 | kern_return_t | |
8706 | vm_map_copyout_internal( | |
8707 | vm_map_t dst_map, | |
8708 | vm_map_address_t *dst_addr, /* OUT */ | |
8709 | vm_map_copy_t copy, | |
8710 | boolean_t consume_on_success, | |
8711 | vm_prot_t cur_protection, | |
8712 | vm_prot_t max_protection, | |
8713 | vm_inherit_t inheritance) | |
1c79356b | 8714 | { |
91447636 A |
8715 | vm_map_size_t size; |
8716 | vm_map_size_t adjustment; | |
8717 | vm_map_offset_t start; | |
1c79356b A |
8718 | vm_object_offset_t vm_copy_start; |
8719 | vm_map_entry_t last; | |
1c79356b | 8720 | vm_map_entry_t entry; |
3e170ce0 | 8721 | vm_map_entry_t hole_entry; |
1c79356b A |
8722 | |
8723 | /* | |
8724 | * Check for null copy object. | |
8725 | */ | |
8726 | ||
8727 | if (copy == VM_MAP_COPY_NULL) { | |
8728 | *dst_addr = 0; | |
8729 | return(KERN_SUCCESS); | |
8730 | } | |
8731 | ||
8732 | /* | |
8733 | * Check for special copy object, created | |
8734 | * by vm_map_copyin_object. | |
8735 | */ | |
8736 | ||
8737 | if (copy->type == VM_MAP_COPY_OBJECT) { | |
8738 | vm_object_t object = copy->cpy_object; | |
8739 | kern_return_t kr; | |
8740 | vm_object_offset_t offset; | |
8741 | ||
91447636 | 8742 | offset = vm_object_trunc_page(copy->offset); |
39236c6e A |
8743 | size = vm_map_round_page((copy->size + |
8744 | (vm_map_size_t)(copy->offset - | |
8745 | offset)), | |
8746 | VM_MAP_PAGE_MASK(dst_map)); | |
1c79356b A |
8747 | *dst_addr = 0; |
8748 | kr = vm_map_enter(dst_map, dst_addr, size, | |
91447636 | 8749 | (vm_map_offset_t) 0, VM_FLAGS_ANYWHERE, |
1c79356b A |
8750 | object, offset, FALSE, |
8751 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
8752 | VM_INHERIT_DEFAULT); | |
8753 | if (kr != KERN_SUCCESS) | |
8754 | return(kr); | |
8755 | /* Account for non-pagealigned copy object */ | |
91447636 | 8756 | *dst_addr += (vm_map_offset_t)(copy->offset - offset); |
39236c6e A |
8757 | if (consume_on_success) |
8758 | zfree(vm_map_copy_zone, copy); | |
1c79356b A |
8759 | return(KERN_SUCCESS); |
8760 | } | |
8761 | ||
8762 | /* | |
8763 | * Check for special kernel buffer allocated | |
8764 | * by new_ipc_kmsg_copyin. | |
8765 | */ | |
8766 | ||
8767 | if (copy->type == VM_MAP_COPY_KERNEL_BUFFER) { | |
39236c6e A |
8768 | return vm_map_copyout_kernel_buffer(dst_map, dst_addr, |
8769 | copy, FALSE, | |
8770 | consume_on_success); | |
1c79356b A |
8771 | } |
8772 | ||
39236c6e | 8773 | |
1c79356b A |
8774 | /* |
8775 | * Find space for the data | |
8776 | */ | |
8777 | ||
39236c6e A |
8778 | vm_copy_start = vm_map_trunc_page((vm_map_size_t)copy->offset, |
8779 | VM_MAP_COPY_PAGE_MASK(copy)); | |
8780 | size = vm_map_round_page((vm_map_size_t)copy->offset + copy->size, | |
8781 | VM_MAP_COPY_PAGE_MASK(copy)) | |
2d21ac55 | 8782 | - vm_copy_start; |
1c79356b | 8783 | |
39236c6e | 8784 | |
2d21ac55 | 8785 | StartAgain: ; |
1c79356b A |
8786 | |
8787 | vm_map_lock(dst_map); | |
6d2010ae A |
8788 | if( dst_map->disable_vmentry_reuse == TRUE) { |
8789 | VM_MAP_HIGHEST_ENTRY(dst_map, entry, start); | |
8790 | last = entry; | |
8791 | } else { | |
3e170ce0 A |
8792 | if (dst_map->holelistenabled) { |
8793 | hole_entry = (vm_map_entry_t)dst_map->holes_list; | |
8794 | ||
8795 | if (hole_entry == NULL) { | |
8796 | /* | |
8797 | * No more space in the map? | |
8798 | */ | |
8799 | vm_map_unlock(dst_map); | |
8800 | return(KERN_NO_SPACE); | |
8801 | } | |
8802 | ||
8803 | last = hole_entry; | |
8804 | start = last->vme_start; | |
8805 | } else { | |
8806 | assert(first_free_is_valid(dst_map)); | |
8807 | start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)) ? | |
8808 | vm_map_min(dst_map) : last->vme_end; | |
8809 | } | |
39236c6e A |
8810 | start = vm_map_round_page(start, |
8811 | VM_MAP_PAGE_MASK(dst_map)); | |
6d2010ae | 8812 | } |
1c79356b A |
8813 | |
8814 | while (TRUE) { | |
8815 | vm_map_entry_t next = last->vme_next; | |
91447636 | 8816 | vm_map_offset_t end = start + size; |
1c79356b A |
8817 | |
8818 | if ((end > dst_map->max_offset) || (end < start)) { | |
8819 | if (dst_map->wait_for_space) { | |
8820 | if (size <= (dst_map->max_offset - dst_map->min_offset)) { | |
8821 | assert_wait((event_t) dst_map, | |
8822 | THREAD_INTERRUPTIBLE); | |
8823 | vm_map_unlock(dst_map); | |
91447636 | 8824 | thread_block(THREAD_CONTINUE_NULL); |
1c79356b A |
8825 | goto StartAgain; |
8826 | } | |
8827 | } | |
8828 | vm_map_unlock(dst_map); | |
8829 | return(KERN_NO_SPACE); | |
8830 | } | |
8831 | ||
3e170ce0 A |
8832 | if (dst_map->holelistenabled) { |
8833 | if (last->vme_end >= end) | |
8834 | break; | |
8835 | } else { | |
8836 | /* | |
8837 | * If there are no more entries, we must win. | |
8838 | * | |
8839 | * OR | |
8840 | * | |
8841 | * If there is another entry, it must be | |
8842 | * after the end of the potential new region. | |
8843 | */ | |
8844 | ||
8845 | if (next == vm_map_to_entry(dst_map)) | |
8846 | break; | |
8847 | ||
8848 | if (next->vme_start >= end) | |
8849 | break; | |
8850 | } | |
1c79356b A |
8851 | |
8852 | last = next; | |
3e170ce0 A |
8853 | |
8854 | if (dst_map->holelistenabled) { | |
8855 | if (last == (vm_map_entry_t) dst_map->holes_list) { | |
8856 | /* | |
8857 | * Wrapped around | |
8858 | */ | |
8859 | vm_map_unlock(dst_map); | |
8860 | return(KERN_NO_SPACE); | |
8861 | } | |
8862 | start = last->vme_start; | |
8863 | } else { | |
8864 | start = last->vme_end; | |
8865 | } | |
39236c6e A |
8866 | start = vm_map_round_page(start, |
8867 | VM_MAP_PAGE_MASK(dst_map)); | |
8868 | } | |
8869 | ||
3e170ce0 A |
8870 | if (dst_map->holelistenabled) { |
8871 | if (vm_map_lookup_entry(dst_map, last->vme_start, &last)) { | |
8872 | panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", last, (unsigned long long)last->vme_start); | |
8873 | } | |
8874 | } | |
8875 | ||
8876 | ||
39236c6e A |
8877 | adjustment = start - vm_copy_start; |
8878 | if (! consume_on_success) { | |
8879 | /* | |
8880 | * We're not allowed to consume "copy", so we'll have to | |
8881 | * copy its map entries into the destination map below. | |
8882 | * No need to re-allocate map entries from the correct | |
8883 | * (pageable or not) zone, since we'll get new map entries | |
8884 | * during the transfer. | |
8885 | * We'll also adjust the map entries's "start" and "end" | |
8886 | * during the transfer, to keep "copy"'s entries consistent | |
8887 | * with its "offset". | |
8888 | */ | |
8889 | goto after_adjustments; | |
1c79356b A |
8890 | } |
8891 | ||
8892 | /* | |
8893 | * Since we're going to just drop the map | |
8894 | * entries from the copy into the destination | |
8895 | * map, they must come from the same pool. | |
8896 | */ | |
8897 | ||
8898 | if (copy->cpy_hdr.entries_pageable != dst_map->hdr.entries_pageable) { | |
2d21ac55 A |
8899 | /* |
8900 | * Mismatches occur when dealing with the default | |
8901 | * pager. | |
8902 | */ | |
8903 | zone_t old_zone; | |
8904 | vm_map_entry_t next, new; | |
8905 | ||
8906 | /* | |
8907 | * Find the zone that the copies were allocated from | |
8908 | */ | |
7ddcb079 | 8909 | |
2d21ac55 A |
8910 | entry = vm_map_copy_first_entry(copy); |
8911 | ||
8912 | /* | |
8913 | * Reinitialize the copy so that vm_map_copy_entry_link | |
8914 | * will work. | |
8915 | */ | |
6d2010ae | 8916 | vm_map_store_copy_reset(copy, entry); |
2d21ac55 | 8917 | copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable; |
2d21ac55 A |
8918 | |
8919 | /* | |
8920 | * Copy each entry. | |
8921 | */ | |
8922 | while (entry != vm_map_copy_to_entry(copy)) { | |
7ddcb079 | 8923 | new = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); |
2d21ac55 | 8924 | vm_map_entry_copy_full(new, entry); |
fe8ab488 A |
8925 | assert(!new->iokit_acct); |
8926 | if (new->is_sub_map) { | |
8927 | /* clr address space specifics */ | |
8928 | new->use_pmap = FALSE; | |
8929 | } | |
2d21ac55 A |
8930 | vm_map_copy_entry_link(copy, |
8931 | vm_map_copy_last_entry(copy), | |
8932 | new); | |
8933 | next = entry->vme_next; | |
7ddcb079 | 8934 | old_zone = entry->from_reserved_zone ? vm_map_entry_reserved_zone : vm_map_entry_zone; |
2d21ac55 A |
8935 | zfree(old_zone, entry); |
8936 | entry = next; | |
8937 | } | |
1c79356b A |
8938 | } |
8939 | ||
8940 | /* | |
8941 | * Adjust the addresses in the copy chain, and | |
8942 | * reset the region attributes. | |
8943 | */ | |
8944 | ||
1c79356b A |
8945 | for (entry = vm_map_copy_first_entry(copy); |
8946 | entry != vm_map_copy_to_entry(copy); | |
8947 | entry = entry->vme_next) { | |
39236c6e A |
8948 | if (VM_MAP_PAGE_SHIFT(dst_map) == PAGE_SHIFT) { |
8949 | /* | |
8950 | * We're injecting this copy entry into a map that | |
8951 | * has the standard page alignment, so clear | |
8952 | * "map_aligned" (which might have been inherited | |
8953 | * from the original map entry). | |
8954 | */ | |
8955 | entry->map_aligned = FALSE; | |
8956 | } | |
8957 | ||
1c79356b A |
8958 | entry->vme_start += adjustment; |
8959 | entry->vme_end += adjustment; | |
8960 | ||
39236c6e A |
8961 | if (entry->map_aligned) { |
8962 | assert(VM_MAP_PAGE_ALIGNED(entry->vme_start, | |
8963 | VM_MAP_PAGE_MASK(dst_map))); | |
8964 | assert(VM_MAP_PAGE_ALIGNED(entry->vme_end, | |
8965 | VM_MAP_PAGE_MASK(dst_map))); | |
8966 | } | |
8967 | ||
1c79356b A |
8968 | entry->inheritance = VM_INHERIT_DEFAULT; |
8969 | entry->protection = VM_PROT_DEFAULT; | |
8970 | entry->max_protection = VM_PROT_ALL; | |
8971 | entry->behavior = VM_BEHAVIOR_DEFAULT; | |
8972 | ||
8973 | /* | |
8974 | * If the entry is now wired, | |
8975 | * map the pages into the destination map. | |
8976 | */ | |
8977 | if (entry->wired_count != 0) { | |
2d21ac55 A |
8978 | register vm_map_offset_t va; |
8979 | vm_object_offset_t offset; | |
8980 | register vm_object_t object; | |
8981 | vm_prot_t prot; | |
8982 | int type_of_fault; | |
1c79356b | 8983 | |
3e170ce0 A |
8984 | object = VME_OBJECT(entry); |
8985 | offset = VME_OFFSET(entry); | |
2d21ac55 | 8986 | va = entry->vme_start; |
1c79356b | 8987 | |
2d21ac55 A |
8988 | pmap_pageable(dst_map->pmap, |
8989 | entry->vme_start, | |
8990 | entry->vme_end, | |
8991 | TRUE); | |
1c79356b | 8992 | |
2d21ac55 A |
8993 | while (va < entry->vme_end) { |
8994 | register vm_page_t m; | |
1c79356b | 8995 | |
2d21ac55 A |
8996 | /* |
8997 | * Look up the page in the object. | |
8998 | * Assert that the page will be found in the | |
8999 | * top object: | |
9000 | * either | |
9001 | * the object was newly created by | |
9002 | * vm_object_copy_slowly, and has | |
9003 | * copies of all of the pages from | |
9004 | * the source object | |
9005 | * or | |
9006 | * the object was moved from the old | |
9007 | * map entry; because the old map | |
9008 | * entry was wired, all of the pages | |
9009 | * were in the top-level object. | |
9010 | * (XXX not true if we wire pages for | |
9011 | * reading) | |
9012 | */ | |
9013 | vm_object_lock(object); | |
91447636 | 9014 | |
2d21ac55 | 9015 | m = vm_page_lookup(object, offset); |
b0d623f7 | 9016 | if (m == VM_PAGE_NULL || !VM_PAGE_WIRED(m) || |
2d21ac55 A |
9017 | m->absent) |
9018 | panic("vm_map_copyout: wiring %p", m); | |
1c79356b | 9019 | |
2d21ac55 A |
9020 | /* |
9021 | * ENCRYPTED SWAP: | |
9022 | * The page is assumed to be wired here, so it | |
9023 | * shouldn't be encrypted. Otherwise, we | |
9024 | * couldn't enter it in the page table, since | |
9025 | * we don't want the user to see the encrypted | |
9026 | * data. | |
9027 | */ | |
9028 | ASSERT_PAGE_DECRYPTED(m); | |
1c79356b | 9029 | |
2d21ac55 | 9030 | prot = entry->protection; |
1c79356b | 9031 | |
3e170ce0 A |
9032 | if (override_nx(dst_map, VME_ALIAS(entry)) && |
9033 | prot) | |
2d21ac55 | 9034 | prot |= VM_PROT_EXECUTE; |
1c79356b | 9035 | |
2d21ac55 | 9036 | type_of_fault = DBG_CACHE_HIT_FAULT; |
1c79356b | 9037 | |
6d2010ae | 9038 | vm_fault_enter(m, dst_map->pmap, va, prot, prot, |
fe8ab488 | 9039 | VM_PAGE_WIRED(m), FALSE, FALSE, |
3e170ce0 | 9040 | FALSE, VME_ALIAS(entry), |
fe8ab488 A |
9041 | ((entry->iokit_acct || |
9042 | (!entry->is_sub_map && | |
9043 | !entry->use_pmap)) | |
9044 | ? PMAP_OPTIONS_ALT_ACCT | |
9045 | : 0), | |
9046 | NULL, &type_of_fault); | |
1c79356b | 9047 | |
2d21ac55 | 9048 | vm_object_unlock(object); |
1c79356b | 9049 | |
2d21ac55 A |
9050 | offset += PAGE_SIZE_64; |
9051 | va += PAGE_SIZE; | |
1c79356b A |
9052 | } |
9053 | } | |
9054 | } | |
9055 | ||
39236c6e A |
9056 | after_adjustments: |
9057 | ||
1c79356b A |
9058 | /* |
9059 | * Correct the page alignment for the result | |
9060 | */ | |
9061 | ||
9062 | *dst_addr = start + (copy->offset - vm_copy_start); | |
9063 | ||
9064 | /* | |
9065 | * Update the hints and the map size | |
9066 | */ | |
9067 | ||
39236c6e A |
9068 | if (consume_on_success) { |
9069 | SAVE_HINT_MAP_WRITE(dst_map, vm_map_copy_last_entry(copy)); | |
9070 | } else { | |
9071 | SAVE_HINT_MAP_WRITE(dst_map, last); | |
9072 | } | |
1c79356b A |
9073 | |
9074 | dst_map->size += size; | |
9075 | ||
9076 | /* | |
9077 | * Link in the copy | |
9078 | */ | |
9079 | ||
39236c6e A |
9080 | if (consume_on_success) { |
9081 | vm_map_copy_insert(dst_map, last, copy); | |
9082 | } else { | |
9083 | vm_map_copy_remap(dst_map, last, copy, adjustment, | |
9084 | cur_protection, max_protection, | |
9085 | inheritance); | |
9086 | } | |
1c79356b A |
9087 | |
9088 | vm_map_unlock(dst_map); | |
9089 | ||
9090 | /* | |
9091 | * XXX If wiring_required, call vm_map_pageable | |
9092 | */ | |
9093 | ||
9094 | return(KERN_SUCCESS); | |
9095 | } | |
9096 | ||
1c79356b A |
9097 | /* |
9098 | * Routine: vm_map_copyin | |
9099 | * | |
9100 | * Description: | |
2d21ac55 A |
9101 | * see vm_map_copyin_common. Exported via Unsupported.exports. |
9102 | * | |
9103 | */ | |
9104 | ||
9105 | #undef vm_map_copyin | |
9106 | ||
9107 | kern_return_t | |
9108 | vm_map_copyin( | |
9109 | vm_map_t src_map, | |
9110 | vm_map_address_t src_addr, | |
9111 | vm_map_size_t len, | |
9112 | boolean_t src_destroy, | |
9113 | vm_map_copy_t *copy_result) /* OUT */ | |
9114 | { | |
9115 | return(vm_map_copyin_common(src_map, src_addr, len, src_destroy, | |
9116 | FALSE, copy_result, FALSE)); | |
9117 | } | |
9118 | ||
9119 | /* | |
9120 | * Routine: vm_map_copyin_common | |
9121 | * | |
9122 | * Description: | |
1c79356b A |
9123 | * Copy the specified region (src_addr, len) from the |
9124 | * source address space (src_map), possibly removing | |
9125 | * the region from the source address space (src_destroy). | |
9126 | * | |
9127 | * Returns: | |
9128 | * A vm_map_copy_t object (copy_result), suitable for | |
9129 | * insertion into another address space (using vm_map_copyout), | |
9130 | * copying over another address space region (using | |
9131 | * vm_map_copy_overwrite). If the copy is unused, it | |
9132 | * should be destroyed (using vm_map_copy_discard). | |
9133 | * | |
9134 | * In/out conditions: | |
9135 | * The source map should not be locked on entry. | |
9136 | */ | |
9137 | ||
9138 | typedef struct submap_map { | |
9139 | vm_map_t parent_map; | |
91447636 A |
9140 | vm_map_offset_t base_start; |
9141 | vm_map_offset_t base_end; | |
2d21ac55 | 9142 | vm_map_size_t base_len; |
1c79356b A |
9143 | struct submap_map *next; |
9144 | } submap_map_t; | |
9145 | ||
9146 | kern_return_t | |
9147 | vm_map_copyin_common( | |
9148 | vm_map_t src_map, | |
91447636 A |
9149 | vm_map_address_t src_addr, |
9150 | vm_map_size_t len, | |
1c79356b | 9151 | boolean_t src_destroy, |
91447636 | 9152 | __unused boolean_t src_volatile, |
1c79356b A |
9153 | vm_map_copy_t *copy_result, /* OUT */ |
9154 | boolean_t use_maxprot) | |
9155 | { | |
1c79356b A |
9156 | vm_map_entry_t tmp_entry; /* Result of last map lookup -- |
9157 | * in multi-level lookup, this | |
9158 | * entry contains the actual | |
9159 | * vm_object/offset. | |
9160 | */ | |
9161 | register | |
9162 | vm_map_entry_t new_entry = VM_MAP_ENTRY_NULL; /* Map entry for copy */ | |
9163 | ||
91447636 | 9164 | vm_map_offset_t src_start; /* Start of current entry -- |
1c79356b A |
9165 | * where copy is taking place now |
9166 | */ | |
91447636 | 9167 | vm_map_offset_t src_end; /* End of entire region to be |
1c79356b | 9168 | * copied */ |
2d21ac55 | 9169 | vm_map_offset_t src_base; |
91447636 | 9170 | vm_map_t base_map = src_map; |
1c79356b A |
9171 | boolean_t map_share=FALSE; |
9172 | submap_map_t *parent_maps = NULL; | |
9173 | ||
9174 | register | |
9175 | vm_map_copy_t copy; /* Resulting copy */ | |
fe8ab488 A |
9176 | vm_map_address_t copy_addr; |
9177 | vm_map_size_t copy_size; | |
1c79356b A |
9178 | |
9179 | /* | |
9180 | * Check for copies of zero bytes. | |
9181 | */ | |
9182 | ||
9183 | if (len == 0) { | |
9184 | *copy_result = VM_MAP_COPY_NULL; | |
9185 | return(KERN_SUCCESS); | |
9186 | } | |
9187 | ||
4a249263 A |
9188 | /* |
9189 | * Check that the end address doesn't overflow | |
9190 | */ | |
9191 | src_end = src_addr + len; | |
9192 | if (src_end < src_addr) | |
9193 | return KERN_INVALID_ADDRESS; | |
9194 | ||
1c79356b A |
9195 | /* |
9196 | * If the copy is sufficiently small, use a kernel buffer instead | |
9197 | * of making a virtual copy. The theory being that the cost of | |
9198 | * setting up VM (and taking C-O-W faults) dominates the copy costs | |
9199 | * for small regions. | |
9200 | */ | |
9201 | if ((len < msg_ool_size_small) && !use_maxprot) | |
2d21ac55 A |
9202 | return vm_map_copyin_kernel_buffer(src_map, src_addr, len, |
9203 | src_destroy, copy_result); | |
1c79356b A |
9204 | |
9205 | /* | |
4a249263 | 9206 | * Compute (page aligned) start and end of region |
1c79356b | 9207 | */ |
39236c6e A |
9208 | src_start = vm_map_trunc_page(src_addr, |
9209 | VM_MAP_PAGE_MASK(src_map)); | |
9210 | src_end = vm_map_round_page(src_end, | |
9211 | VM_MAP_PAGE_MASK(src_map)); | |
1c79356b | 9212 | |
b0d623f7 | 9213 | XPR(XPR_VM_MAP, "vm_map_copyin_common map 0x%x addr 0x%x len 0x%x dest %d\n", src_map, src_addr, len, src_destroy, 0); |
1c79356b | 9214 | |
1c79356b A |
9215 | /* |
9216 | * Allocate a header element for the list. | |
9217 | * | |
9218 | * Use the start and end in the header to | |
9219 | * remember the endpoints prior to rounding. | |
9220 | */ | |
9221 | ||
9222 | copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 9223 | copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
1c79356b | 9224 | vm_map_copy_first_entry(copy) = |
2d21ac55 | 9225 | vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); |
1c79356b A |
9226 | copy->type = VM_MAP_COPY_ENTRY_LIST; |
9227 | copy->cpy_hdr.nentries = 0; | |
9228 | copy->cpy_hdr.entries_pageable = TRUE; | |
39236c6e A |
9229 | #if 00 |
9230 | copy->cpy_hdr.page_shift = src_map->hdr.page_shift; | |
9231 | #else | |
9232 | /* | |
9233 | * The copy entries can be broken down for a variety of reasons, | |
9234 | * so we can't guarantee that they will remain map-aligned... | |
9235 | * Will need to adjust the first copy_entry's "vme_start" and | |
9236 | * the last copy_entry's "vme_end" to be rounded to PAGE_MASK | |
9237 | * rather than the original map's alignment. | |
9238 | */ | |
9239 | copy->cpy_hdr.page_shift = PAGE_SHIFT; | |
9240 | #endif | |
1c79356b | 9241 | |
6d2010ae A |
9242 | vm_map_store_init( &(copy->cpy_hdr) ); |
9243 | ||
1c79356b A |
9244 | copy->offset = src_addr; |
9245 | copy->size = len; | |
9246 | ||
7ddcb079 | 9247 | new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); |
1c79356b A |
9248 | |
9249 | #define RETURN(x) \ | |
9250 | MACRO_BEGIN \ | |
9251 | vm_map_unlock(src_map); \ | |
9bccf70c A |
9252 | if(src_map != base_map) \ |
9253 | vm_map_deallocate(src_map); \ | |
1c79356b A |
9254 | if (new_entry != VM_MAP_ENTRY_NULL) \ |
9255 | vm_map_copy_entry_dispose(copy,new_entry); \ | |
9256 | vm_map_copy_discard(copy); \ | |
9257 | { \ | |
91447636 | 9258 | submap_map_t *_ptr; \ |
1c79356b | 9259 | \ |
91447636 | 9260 | for(_ptr = parent_maps; _ptr != NULL; _ptr = parent_maps) { \ |
1c79356b | 9261 | parent_maps=parent_maps->next; \ |
91447636 A |
9262 | if (_ptr->parent_map != base_map) \ |
9263 | vm_map_deallocate(_ptr->parent_map); \ | |
9264 | kfree(_ptr, sizeof(submap_map_t)); \ | |
1c79356b A |
9265 | } \ |
9266 | } \ | |
9267 | MACRO_RETURN(x); \ | |
9268 | MACRO_END | |
9269 | ||
9270 | /* | |
9271 | * Find the beginning of the region. | |
9272 | */ | |
9273 | ||
9274 | vm_map_lock(src_map); | |
9275 | ||
fe8ab488 A |
9276 | /* |
9277 | * Lookup the original "src_addr" rather than the truncated | |
9278 | * "src_start", in case "src_start" falls in a non-map-aligned | |
9279 | * map entry *before* the map entry that contains "src_addr"... | |
9280 | */ | |
9281 | if (!vm_map_lookup_entry(src_map, src_addr, &tmp_entry)) | |
1c79356b A |
9282 | RETURN(KERN_INVALID_ADDRESS); |
9283 | if(!tmp_entry->is_sub_map) { | |
fe8ab488 A |
9284 | /* |
9285 | * ... but clip to the map-rounded "src_start" rather than | |
9286 | * "src_addr" to preserve map-alignment. We'll adjust the | |
9287 | * first copy entry at the end, if needed. | |
9288 | */ | |
1c79356b A |
9289 | vm_map_clip_start(src_map, tmp_entry, src_start); |
9290 | } | |
fe8ab488 A |
9291 | if (src_start < tmp_entry->vme_start) { |
9292 | /* | |
9293 | * Move "src_start" up to the start of the | |
9294 | * first map entry to copy. | |
9295 | */ | |
9296 | src_start = tmp_entry->vme_start; | |
9297 | } | |
1c79356b A |
9298 | /* set for later submap fix-up */ |
9299 | copy_addr = src_start; | |
9300 | ||
9301 | /* | |
9302 | * Go through entries until we get to the end. | |
9303 | */ | |
9304 | ||
9305 | while (TRUE) { | |
9306 | register | |
9307 | vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ | |
91447636 | 9308 | vm_map_size_t src_size; /* Size of source |
1c79356b A |
9309 | * map entry (in both |
9310 | * maps) | |
9311 | */ | |
9312 | ||
9313 | register | |
9314 | vm_object_t src_object; /* Object to copy */ | |
9315 | vm_object_offset_t src_offset; | |
9316 | ||
9317 | boolean_t src_needs_copy; /* Should source map | |
9318 | * be made read-only | |
9319 | * for copy-on-write? | |
9320 | */ | |
9321 | ||
9322 | boolean_t new_entry_needs_copy; /* Will new entry be COW? */ | |
9323 | ||
9324 | boolean_t was_wired; /* Was source wired? */ | |
9325 | vm_map_version_t version; /* Version before locks | |
9326 | * dropped to make copy | |
9327 | */ | |
9328 | kern_return_t result; /* Return value from | |
9329 | * copy_strategically. | |
9330 | */ | |
9331 | while(tmp_entry->is_sub_map) { | |
91447636 | 9332 | vm_map_size_t submap_len; |
1c79356b A |
9333 | submap_map_t *ptr; |
9334 | ||
9335 | ptr = (submap_map_t *)kalloc(sizeof(submap_map_t)); | |
9336 | ptr->next = parent_maps; | |
9337 | parent_maps = ptr; | |
9338 | ptr->parent_map = src_map; | |
9339 | ptr->base_start = src_start; | |
9340 | ptr->base_end = src_end; | |
9341 | submap_len = tmp_entry->vme_end - src_start; | |
9342 | if(submap_len > (src_end-src_start)) | |
9343 | submap_len = src_end-src_start; | |
2d21ac55 | 9344 | ptr->base_len = submap_len; |
1c79356b A |
9345 | |
9346 | src_start -= tmp_entry->vme_start; | |
3e170ce0 | 9347 | src_start += VME_OFFSET(tmp_entry); |
1c79356b | 9348 | src_end = src_start + submap_len; |
3e170ce0 | 9349 | src_map = VME_SUBMAP(tmp_entry); |
1c79356b | 9350 | vm_map_lock(src_map); |
9bccf70c A |
9351 | /* keep an outstanding reference for all maps in */ |
9352 | /* the parents tree except the base map */ | |
9353 | vm_map_reference(src_map); | |
1c79356b A |
9354 | vm_map_unlock(ptr->parent_map); |
9355 | if (!vm_map_lookup_entry( | |
2d21ac55 | 9356 | src_map, src_start, &tmp_entry)) |
1c79356b A |
9357 | RETURN(KERN_INVALID_ADDRESS); |
9358 | map_share = TRUE; | |
9359 | if(!tmp_entry->is_sub_map) | |
2d21ac55 | 9360 | vm_map_clip_start(src_map, tmp_entry, src_start); |
1c79356b A |
9361 | src_entry = tmp_entry; |
9362 | } | |
2d21ac55 A |
9363 | /* we are now in the lowest level submap... */ |
9364 | ||
3e170ce0 A |
9365 | if ((VME_OBJECT(tmp_entry) != VM_OBJECT_NULL) && |
9366 | (VME_OBJECT(tmp_entry)->phys_contiguous)) { | |
55e303ae A |
9367 | /* This is not, supported for now.In future */ |
9368 | /* we will need to detect the phys_contig */ | |
9369 | /* condition and then upgrade copy_slowly */ | |
9370 | /* to do physical copy from the device mem */ | |
9371 | /* based object. We can piggy-back off of */ | |
9372 | /* the was wired boolean to set-up the */ | |
9373 | /* proper handling */ | |
0b4e3aa0 A |
9374 | RETURN(KERN_PROTECTION_FAILURE); |
9375 | } | |
1c79356b A |
9376 | /* |
9377 | * Create a new address map entry to hold the result. | |
9378 | * Fill in the fields from the appropriate source entries. | |
9379 | * We must unlock the source map to do this if we need | |
9380 | * to allocate a map entry. | |
9381 | */ | |
9382 | if (new_entry == VM_MAP_ENTRY_NULL) { | |
2d21ac55 A |
9383 | version.main_timestamp = src_map->timestamp; |
9384 | vm_map_unlock(src_map); | |
1c79356b | 9385 | |
7ddcb079 | 9386 | new_entry = vm_map_copy_entry_create(copy, !copy->cpy_hdr.entries_pageable); |
1c79356b | 9387 | |
2d21ac55 A |
9388 | vm_map_lock(src_map); |
9389 | if ((version.main_timestamp + 1) != src_map->timestamp) { | |
9390 | if (!vm_map_lookup_entry(src_map, src_start, | |
9391 | &tmp_entry)) { | |
9392 | RETURN(KERN_INVALID_ADDRESS); | |
9393 | } | |
9394 | if (!tmp_entry->is_sub_map) | |
9395 | vm_map_clip_start(src_map, tmp_entry, src_start); | |
9396 | continue; /* restart w/ new tmp_entry */ | |
1c79356b | 9397 | } |
1c79356b A |
9398 | } |
9399 | ||
9400 | /* | |
9401 | * Verify that the region can be read. | |
9402 | */ | |
9403 | if (((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE && | |
2d21ac55 | 9404 | !use_maxprot) || |
1c79356b A |
9405 | (src_entry->max_protection & VM_PROT_READ) == 0) |
9406 | RETURN(KERN_PROTECTION_FAILURE); | |
9407 | ||
9408 | /* | |
9409 | * Clip against the endpoints of the entire region. | |
9410 | */ | |
9411 | ||
9412 | vm_map_clip_end(src_map, src_entry, src_end); | |
9413 | ||
9414 | src_size = src_entry->vme_end - src_start; | |
3e170ce0 A |
9415 | src_object = VME_OBJECT(src_entry); |
9416 | src_offset = VME_OFFSET(src_entry); | |
1c79356b A |
9417 | was_wired = (src_entry->wired_count != 0); |
9418 | ||
9419 | vm_map_entry_copy(new_entry, src_entry); | |
fe8ab488 A |
9420 | if (new_entry->is_sub_map) { |
9421 | /* clr address space specifics */ | |
9422 | new_entry->use_pmap = FALSE; | |
9423 | } | |
1c79356b A |
9424 | |
9425 | /* | |
9426 | * Attempt non-blocking copy-on-write optimizations. | |
9427 | */ | |
9428 | ||
9429 | if (src_destroy && | |
9430 | (src_object == VM_OBJECT_NULL || | |
2d21ac55 A |
9431 | (src_object->internal && !src_object->true_share |
9432 | && !map_share))) { | |
9433 | /* | |
9434 | * If we are destroying the source, and the object | |
9435 | * is internal, we can move the object reference | |
9436 | * from the source to the copy. The copy is | |
9437 | * copy-on-write only if the source is. | |
9438 | * We make another reference to the object, because | |
9439 | * destroying the source entry will deallocate it. | |
9440 | */ | |
9441 | vm_object_reference(src_object); | |
1c79356b | 9442 | |
2d21ac55 A |
9443 | /* |
9444 | * Copy is always unwired. vm_map_copy_entry | |
9445 | * set its wired count to zero. | |
9446 | */ | |
1c79356b | 9447 | |
2d21ac55 | 9448 | goto CopySuccessful; |
1c79356b A |
9449 | } |
9450 | ||
9451 | ||
2d21ac55 | 9452 | RestartCopy: |
1c79356b | 9453 | XPR(XPR_VM_MAP, "vm_map_copyin_common src_obj 0x%x ent 0x%x obj 0x%x was_wired %d\n", |
3e170ce0 | 9454 | src_object, new_entry, VME_OBJECT(new_entry), |
1c79356b | 9455 | was_wired, 0); |
55e303ae | 9456 | if ((src_object == VM_OBJECT_NULL || |
2d21ac55 A |
9457 | (!was_wired && !map_share && !tmp_entry->is_shared)) && |
9458 | vm_object_copy_quickly( | |
3e170ce0 | 9459 | &VME_OBJECT(new_entry), |
2d21ac55 A |
9460 | src_offset, |
9461 | src_size, | |
9462 | &src_needs_copy, | |
9463 | &new_entry_needs_copy)) { | |
1c79356b A |
9464 | |
9465 | new_entry->needs_copy = new_entry_needs_copy; | |
9466 | ||
9467 | /* | |
9468 | * Handle copy-on-write obligations | |
9469 | */ | |
9470 | ||
9471 | if (src_needs_copy && !tmp_entry->needs_copy) { | |
0c530ab8 A |
9472 | vm_prot_t prot; |
9473 | ||
9474 | prot = src_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 9475 | |
3e170ce0 A |
9476 | if (override_nx(src_map, VME_ALIAS(src_entry)) |
9477 | && prot) | |
0c530ab8 | 9478 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 9479 | |
55e303ae A |
9480 | vm_object_pmap_protect( |
9481 | src_object, | |
9482 | src_offset, | |
9483 | src_size, | |
9484 | (src_entry->is_shared ? | |
2d21ac55 A |
9485 | PMAP_NULL |
9486 | : src_map->pmap), | |
55e303ae | 9487 | src_entry->vme_start, |
0c530ab8 A |
9488 | prot); |
9489 | ||
3e170ce0 | 9490 | assert(tmp_entry->wired_count == 0); |
55e303ae | 9491 | tmp_entry->needs_copy = TRUE; |
1c79356b A |
9492 | } |
9493 | ||
9494 | /* | |
9495 | * The map has never been unlocked, so it's safe | |
9496 | * to move to the next entry rather than doing | |
9497 | * another lookup. | |
9498 | */ | |
9499 | ||
9500 | goto CopySuccessful; | |
9501 | } | |
9502 | ||
1c79356b A |
9503 | /* |
9504 | * Take an object reference, so that we may | |
9505 | * release the map lock(s). | |
9506 | */ | |
9507 | ||
9508 | assert(src_object != VM_OBJECT_NULL); | |
9509 | vm_object_reference(src_object); | |
9510 | ||
9511 | /* | |
9512 | * Record the timestamp for later verification. | |
9513 | * Unlock the map. | |
9514 | */ | |
9515 | ||
9516 | version.main_timestamp = src_map->timestamp; | |
9bccf70c | 9517 | vm_map_unlock(src_map); /* Increments timestamp once! */ |
1c79356b A |
9518 | |
9519 | /* | |
9520 | * Perform the copy | |
9521 | */ | |
9522 | ||
9523 | if (was_wired) { | |
55e303ae | 9524 | CopySlowly: |
1c79356b A |
9525 | vm_object_lock(src_object); |
9526 | result = vm_object_copy_slowly( | |
2d21ac55 A |
9527 | src_object, |
9528 | src_offset, | |
9529 | src_size, | |
9530 | THREAD_UNINT, | |
3e170ce0 A |
9531 | &VME_OBJECT(new_entry)); |
9532 | VME_OFFSET_SET(new_entry, 0); | |
1c79356b | 9533 | new_entry->needs_copy = FALSE; |
55e303ae A |
9534 | |
9535 | } | |
9536 | else if (src_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && | |
2d21ac55 | 9537 | (tmp_entry->is_shared || map_share)) { |
55e303ae A |
9538 | vm_object_t new_object; |
9539 | ||
2d21ac55 | 9540 | vm_object_lock_shared(src_object); |
55e303ae | 9541 | new_object = vm_object_copy_delayed( |
2d21ac55 A |
9542 | src_object, |
9543 | src_offset, | |
9544 | src_size, | |
9545 | TRUE); | |
55e303ae A |
9546 | if (new_object == VM_OBJECT_NULL) |
9547 | goto CopySlowly; | |
9548 | ||
3e170ce0 A |
9549 | VME_OBJECT_SET(new_entry, new_object); |
9550 | assert(new_entry->wired_count == 0); | |
55e303ae | 9551 | new_entry->needs_copy = TRUE; |
fe8ab488 A |
9552 | assert(!new_entry->iokit_acct); |
9553 | assert(new_object->purgable == VM_PURGABLE_DENY); | |
9554 | new_entry->use_pmap = TRUE; | |
55e303ae A |
9555 | result = KERN_SUCCESS; |
9556 | ||
1c79356b | 9557 | } else { |
3e170ce0 A |
9558 | vm_object_offset_t new_offset; |
9559 | new_offset = VME_OFFSET(new_entry); | |
1c79356b | 9560 | result = vm_object_copy_strategically(src_object, |
2d21ac55 A |
9561 | src_offset, |
9562 | src_size, | |
3e170ce0 A |
9563 | &VME_OBJECT(new_entry), |
9564 | &new_offset, | |
2d21ac55 | 9565 | &new_entry_needs_copy); |
3e170ce0 A |
9566 | if (new_offset != VME_OFFSET(new_entry)) { |
9567 | VME_OFFSET_SET(new_entry, new_offset); | |
9568 | } | |
1c79356b A |
9569 | |
9570 | new_entry->needs_copy = new_entry_needs_copy; | |
1c79356b A |
9571 | } |
9572 | ||
9573 | if (result != KERN_SUCCESS && | |
9574 | result != KERN_MEMORY_RESTART_COPY) { | |
9575 | vm_map_lock(src_map); | |
9576 | RETURN(result); | |
9577 | } | |
9578 | ||
9579 | /* | |
9580 | * Throw away the extra reference | |
9581 | */ | |
9582 | ||
9583 | vm_object_deallocate(src_object); | |
9584 | ||
9585 | /* | |
9586 | * Verify that the map has not substantially | |
9587 | * changed while the copy was being made. | |
9588 | */ | |
9589 | ||
9bccf70c | 9590 | vm_map_lock(src_map); |
1c79356b A |
9591 | |
9592 | if ((version.main_timestamp + 1) == src_map->timestamp) | |
9593 | goto VerificationSuccessful; | |
9594 | ||
9595 | /* | |
9596 | * Simple version comparison failed. | |
9597 | * | |
9598 | * Retry the lookup and verify that the | |
9599 | * same object/offset are still present. | |
9600 | * | |
9601 | * [Note: a memory manager that colludes with | |
9602 | * the calling task can detect that we have | |
9603 | * cheated. While the map was unlocked, the | |
9604 | * mapping could have been changed and restored.] | |
9605 | */ | |
9606 | ||
9607 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { | |
fe8ab488 | 9608 | if (result != KERN_MEMORY_RESTART_COPY) { |
3e170ce0 A |
9609 | vm_object_deallocate(VME_OBJECT(new_entry)); |
9610 | VME_OBJECT_SET(new_entry, VM_OBJECT_NULL); | |
fe8ab488 A |
9611 | assert(!new_entry->iokit_acct); |
9612 | new_entry->use_pmap = TRUE; | |
9613 | } | |
1c79356b A |
9614 | RETURN(KERN_INVALID_ADDRESS); |
9615 | } | |
9616 | ||
9617 | src_entry = tmp_entry; | |
9618 | vm_map_clip_start(src_map, src_entry, src_start); | |
9619 | ||
91447636 A |
9620 | if ((((src_entry->protection & VM_PROT_READ) == VM_PROT_NONE) && |
9621 | !use_maxprot) || | |
9622 | ((src_entry->max_protection & VM_PROT_READ) == 0)) | |
1c79356b A |
9623 | goto VerificationFailed; |
9624 | ||
39236c6e A |
9625 | if (src_entry->vme_end < new_entry->vme_end) { |
9626 | assert(VM_MAP_PAGE_ALIGNED(src_entry->vme_end, | |
9627 | VM_MAP_COPY_PAGE_MASK(copy))); | |
9628 | new_entry->vme_end = src_entry->vme_end; | |
9629 | src_size = new_entry->vme_end - src_start; | |
9630 | } | |
1c79356b | 9631 | |
3e170ce0 A |
9632 | if ((VME_OBJECT(src_entry) != src_object) || |
9633 | (VME_OFFSET(src_entry) != src_offset) ) { | |
1c79356b A |
9634 | |
9635 | /* | |
9636 | * Verification failed. | |
9637 | * | |
9638 | * Start over with this top-level entry. | |
9639 | */ | |
9640 | ||
2d21ac55 | 9641 | VerificationFailed: ; |
1c79356b | 9642 | |
3e170ce0 | 9643 | vm_object_deallocate(VME_OBJECT(new_entry)); |
1c79356b A |
9644 | tmp_entry = src_entry; |
9645 | continue; | |
9646 | } | |
9647 | ||
9648 | /* | |
9649 | * Verification succeeded. | |
9650 | */ | |
9651 | ||
2d21ac55 | 9652 | VerificationSuccessful: ; |
1c79356b A |
9653 | |
9654 | if (result == KERN_MEMORY_RESTART_COPY) | |
9655 | goto RestartCopy; | |
9656 | ||
9657 | /* | |
9658 | * Copy succeeded. | |
9659 | */ | |
9660 | ||
2d21ac55 | 9661 | CopySuccessful: ; |
1c79356b A |
9662 | |
9663 | /* | |
9664 | * Link in the new copy entry. | |
9665 | */ | |
9666 | ||
9667 | vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy), | |
9668 | new_entry); | |
9669 | ||
9670 | /* | |
9671 | * Determine whether the entire region | |
9672 | * has been copied. | |
9673 | */ | |
2d21ac55 | 9674 | src_base = src_start; |
1c79356b A |
9675 | src_start = new_entry->vme_end; |
9676 | new_entry = VM_MAP_ENTRY_NULL; | |
9677 | while ((src_start >= src_end) && (src_end != 0)) { | |
fe8ab488 A |
9678 | submap_map_t *ptr; |
9679 | ||
9680 | if (src_map == base_map) { | |
9681 | /* back to the top */ | |
1c79356b | 9682 | break; |
fe8ab488 A |
9683 | } |
9684 | ||
9685 | ptr = parent_maps; | |
9686 | assert(ptr != NULL); | |
9687 | parent_maps = parent_maps->next; | |
9688 | ||
9689 | /* fix up the damage we did in that submap */ | |
9690 | vm_map_simplify_range(src_map, | |
9691 | src_base, | |
9692 | src_end); | |
9693 | ||
9694 | vm_map_unlock(src_map); | |
9695 | vm_map_deallocate(src_map); | |
9696 | vm_map_lock(ptr->parent_map); | |
9697 | src_map = ptr->parent_map; | |
9698 | src_base = ptr->base_start; | |
9699 | src_start = ptr->base_start + ptr->base_len; | |
9700 | src_end = ptr->base_end; | |
9701 | if (!vm_map_lookup_entry(src_map, | |
9702 | src_start, | |
9703 | &tmp_entry) && | |
9704 | (src_end > src_start)) { | |
9705 | RETURN(KERN_INVALID_ADDRESS); | |
9706 | } | |
9707 | kfree(ptr, sizeof(submap_map_t)); | |
9708 | if (parent_maps == NULL) | |
9709 | map_share = FALSE; | |
9710 | src_entry = tmp_entry->vme_prev; | |
9711 | } | |
9712 | ||
9713 | if ((VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) && | |
9714 | (src_start >= src_addr + len) && | |
9715 | (src_addr + len != 0)) { | |
9716 | /* | |
9717 | * Stop copying now, even though we haven't reached | |
9718 | * "src_end". We'll adjust the end of the last copy | |
9719 | * entry at the end, if needed. | |
9720 | * | |
9721 | * If src_map's aligment is different from the | |
9722 | * system's page-alignment, there could be | |
9723 | * extra non-map-aligned map entries between | |
9724 | * the original (non-rounded) "src_addr + len" | |
9725 | * and the rounded "src_end". | |
9726 | * We do not want to copy those map entries since | |
9727 | * they're not part of the copied range. | |
9728 | */ | |
9729 | break; | |
1c79356b | 9730 | } |
fe8ab488 | 9731 | |
1c79356b A |
9732 | if ((src_start >= src_end) && (src_end != 0)) |
9733 | break; | |
9734 | ||
9735 | /* | |
9736 | * Verify that there are no gaps in the region | |
9737 | */ | |
9738 | ||
9739 | tmp_entry = src_entry->vme_next; | |
fe8ab488 | 9740 | if ((tmp_entry->vme_start != src_start) || |
39236c6e | 9741 | (tmp_entry == vm_map_to_entry(src_map))) { |
1c79356b | 9742 | RETURN(KERN_INVALID_ADDRESS); |
39236c6e | 9743 | } |
1c79356b A |
9744 | } |
9745 | ||
9746 | /* | |
9747 | * If the source should be destroyed, do it now, since the | |
9748 | * copy was successful. | |
9749 | */ | |
9750 | if (src_destroy) { | |
39236c6e A |
9751 | (void) vm_map_delete( |
9752 | src_map, | |
9753 | vm_map_trunc_page(src_addr, | |
9754 | VM_MAP_PAGE_MASK(src_map)), | |
9755 | src_end, | |
9756 | ((src_map == kernel_map) ? | |
9757 | VM_MAP_REMOVE_KUNWIRE : | |
9758 | VM_MAP_NO_FLAGS), | |
9759 | VM_MAP_NULL); | |
2d21ac55 A |
9760 | } else { |
9761 | /* fix up the damage we did in the base map */ | |
39236c6e A |
9762 | vm_map_simplify_range( |
9763 | src_map, | |
9764 | vm_map_trunc_page(src_addr, | |
9765 | VM_MAP_PAGE_MASK(src_map)), | |
9766 | vm_map_round_page(src_end, | |
9767 | VM_MAP_PAGE_MASK(src_map))); | |
1c79356b A |
9768 | } |
9769 | ||
9770 | vm_map_unlock(src_map); | |
9771 | ||
39236c6e | 9772 | if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT) { |
fe8ab488 A |
9773 | vm_map_offset_t original_start, original_offset, original_end; |
9774 | ||
39236c6e A |
9775 | assert(VM_MAP_COPY_PAGE_MASK(copy) == PAGE_MASK); |
9776 | ||
9777 | /* adjust alignment of first copy_entry's "vme_start" */ | |
9778 | tmp_entry = vm_map_copy_first_entry(copy); | |
9779 | if (tmp_entry != vm_map_copy_to_entry(copy)) { | |
9780 | vm_map_offset_t adjustment; | |
fe8ab488 A |
9781 | |
9782 | original_start = tmp_entry->vme_start; | |
3e170ce0 | 9783 | original_offset = VME_OFFSET(tmp_entry); |
fe8ab488 A |
9784 | |
9785 | /* map-align the start of the first copy entry... */ | |
9786 | adjustment = (tmp_entry->vme_start - | |
9787 | vm_map_trunc_page( | |
9788 | tmp_entry->vme_start, | |
9789 | VM_MAP_PAGE_MASK(src_map))); | |
9790 | tmp_entry->vme_start -= adjustment; | |
3e170ce0 A |
9791 | VME_OFFSET_SET(tmp_entry, |
9792 | VME_OFFSET(tmp_entry) - adjustment); | |
fe8ab488 A |
9793 | copy_addr -= adjustment; |
9794 | assert(tmp_entry->vme_start < tmp_entry->vme_end); | |
9795 | /* ... adjust for mis-aligned start of copy range */ | |
39236c6e A |
9796 | adjustment = |
9797 | (vm_map_trunc_page(copy->offset, | |
9798 | PAGE_MASK) - | |
9799 | vm_map_trunc_page(copy->offset, | |
9800 | VM_MAP_PAGE_MASK(src_map))); | |
9801 | if (adjustment) { | |
9802 | assert(page_aligned(adjustment)); | |
9803 | assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); | |
9804 | tmp_entry->vme_start += adjustment; | |
3e170ce0 A |
9805 | VME_OFFSET_SET(tmp_entry, |
9806 | (VME_OFFSET(tmp_entry) + | |
9807 | adjustment)); | |
39236c6e A |
9808 | copy_addr += adjustment; |
9809 | assert(tmp_entry->vme_start < tmp_entry->vme_end); | |
9810 | } | |
fe8ab488 A |
9811 | |
9812 | /* | |
9813 | * Assert that the adjustments haven't exposed | |
9814 | * more than was originally copied... | |
9815 | */ | |
9816 | assert(tmp_entry->vme_start >= original_start); | |
3e170ce0 | 9817 | assert(VME_OFFSET(tmp_entry) >= original_offset); |
fe8ab488 A |
9818 | /* |
9819 | * ... and that it did not adjust outside of a | |
9820 | * a single 16K page. | |
9821 | */ | |
9822 | assert(vm_map_trunc_page(tmp_entry->vme_start, | |
9823 | VM_MAP_PAGE_MASK(src_map)) == | |
9824 | vm_map_trunc_page(original_start, | |
9825 | VM_MAP_PAGE_MASK(src_map))); | |
39236c6e A |
9826 | } |
9827 | ||
9828 | /* adjust alignment of last copy_entry's "vme_end" */ | |
9829 | tmp_entry = vm_map_copy_last_entry(copy); | |
9830 | if (tmp_entry != vm_map_copy_to_entry(copy)) { | |
9831 | vm_map_offset_t adjustment; | |
fe8ab488 A |
9832 | |
9833 | original_end = tmp_entry->vme_end; | |
9834 | ||
9835 | /* map-align the end of the last copy entry... */ | |
9836 | tmp_entry->vme_end = | |
9837 | vm_map_round_page(tmp_entry->vme_end, | |
9838 | VM_MAP_PAGE_MASK(src_map)); | |
9839 | /* ... adjust for mis-aligned end of copy range */ | |
39236c6e A |
9840 | adjustment = |
9841 | (vm_map_round_page((copy->offset + | |
9842 | copy->size), | |
9843 | VM_MAP_PAGE_MASK(src_map)) - | |
9844 | vm_map_round_page((copy->offset + | |
9845 | copy->size), | |
9846 | PAGE_MASK)); | |
9847 | if (adjustment) { | |
9848 | assert(page_aligned(adjustment)); | |
9849 | assert(adjustment < VM_MAP_PAGE_SIZE(src_map)); | |
9850 | tmp_entry->vme_end -= adjustment; | |
9851 | assert(tmp_entry->vme_start < tmp_entry->vme_end); | |
9852 | } | |
fe8ab488 A |
9853 | |
9854 | /* | |
9855 | * Assert that the adjustments haven't exposed | |
9856 | * more than was originally copied... | |
9857 | */ | |
9858 | assert(tmp_entry->vme_end <= original_end); | |
9859 | /* | |
9860 | * ... and that it did not adjust outside of a | |
9861 | * a single 16K page. | |
9862 | */ | |
9863 | assert(vm_map_round_page(tmp_entry->vme_end, | |
9864 | VM_MAP_PAGE_MASK(src_map)) == | |
9865 | vm_map_round_page(original_end, | |
9866 | VM_MAP_PAGE_MASK(src_map))); | |
39236c6e A |
9867 | } |
9868 | } | |
9869 | ||
1c79356b A |
9870 | /* Fix-up start and end points in copy. This is necessary */ |
9871 | /* when the various entries in the copy object were picked */ | |
9872 | /* up from different sub-maps */ | |
9873 | ||
9874 | tmp_entry = vm_map_copy_first_entry(copy); | |
fe8ab488 | 9875 | copy_size = 0; /* compute actual size */ |
1c79356b | 9876 | while (tmp_entry != vm_map_copy_to_entry(copy)) { |
39236c6e A |
9877 | assert(VM_MAP_PAGE_ALIGNED( |
9878 | copy_addr + (tmp_entry->vme_end - | |
9879 | tmp_entry->vme_start), | |
9880 | VM_MAP_COPY_PAGE_MASK(copy))); | |
9881 | assert(VM_MAP_PAGE_ALIGNED( | |
9882 | copy_addr, | |
9883 | VM_MAP_COPY_PAGE_MASK(copy))); | |
9884 | ||
9885 | /* | |
9886 | * The copy_entries will be injected directly into the | |
9887 | * destination map and might not be "map aligned" there... | |
9888 | */ | |
9889 | tmp_entry->map_aligned = FALSE; | |
9890 | ||
1c79356b A |
9891 | tmp_entry->vme_end = copy_addr + |
9892 | (tmp_entry->vme_end - tmp_entry->vme_start); | |
9893 | tmp_entry->vme_start = copy_addr; | |
e2d2fc5c | 9894 | assert(tmp_entry->vme_start < tmp_entry->vme_end); |
1c79356b | 9895 | copy_addr += tmp_entry->vme_end - tmp_entry->vme_start; |
fe8ab488 | 9896 | copy_size += tmp_entry->vme_end - tmp_entry->vme_start; |
1c79356b A |
9897 | tmp_entry = (struct vm_map_entry *)tmp_entry->vme_next; |
9898 | } | |
9899 | ||
fe8ab488 A |
9900 | if (VM_MAP_PAGE_SHIFT(src_map) != PAGE_SHIFT && |
9901 | copy_size < copy->size) { | |
9902 | /* | |
9903 | * The actual size of the VM map copy is smaller than what | |
9904 | * was requested by the caller. This must be because some | |
9905 | * PAGE_SIZE-sized pages are missing at the end of the last | |
9906 | * VM_MAP_PAGE_SIZE(src_map)-sized chunk of the range. | |
9907 | * The caller might not have been aware of those missing | |
9908 | * pages and might not want to be aware of it, which is | |
9909 | * fine as long as they don't try to access (and crash on) | |
9910 | * those missing pages. | |
9911 | * Let's adjust the size of the "copy", to avoid failing | |
9912 | * in vm_map_copyout() or vm_map_copy_overwrite(). | |
9913 | */ | |
9914 | assert(vm_map_round_page(copy_size, | |
9915 | VM_MAP_PAGE_MASK(src_map)) == | |
9916 | vm_map_round_page(copy->size, | |
9917 | VM_MAP_PAGE_MASK(src_map))); | |
9918 | copy->size = copy_size; | |
9919 | } | |
9920 | ||
1c79356b A |
9921 | *copy_result = copy; |
9922 | return(KERN_SUCCESS); | |
9923 | ||
9924 | #undef RETURN | |
9925 | } | |
9926 | ||
39236c6e A |
9927 | kern_return_t |
9928 | vm_map_copy_extract( | |
9929 | vm_map_t src_map, | |
9930 | vm_map_address_t src_addr, | |
9931 | vm_map_size_t len, | |
9932 | vm_map_copy_t *copy_result, /* OUT */ | |
9933 | vm_prot_t *cur_prot, /* OUT */ | |
9934 | vm_prot_t *max_prot) | |
9935 | { | |
9936 | vm_map_offset_t src_start, src_end; | |
9937 | vm_map_copy_t copy; | |
9938 | kern_return_t kr; | |
9939 | ||
9940 | /* | |
9941 | * Check for copies of zero bytes. | |
9942 | */ | |
9943 | ||
9944 | if (len == 0) { | |
9945 | *copy_result = VM_MAP_COPY_NULL; | |
9946 | return(KERN_SUCCESS); | |
9947 | } | |
9948 | ||
9949 | /* | |
9950 | * Check that the end address doesn't overflow | |
9951 | */ | |
9952 | src_end = src_addr + len; | |
9953 | if (src_end < src_addr) | |
9954 | return KERN_INVALID_ADDRESS; | |
9955 | ||
9956 | /* | |
9957 | * Compute (page aligned) start and end of region | |
9958 | */ | |
9959 | src_start = vm_map_trunc_page(src_addr, PAGE_MASK); | |
9960 | src_end = vm_map_round_page(src_end, PAGE_MASK); | |
9961 | ||
9962 | /* | |
9963 | * Allocate a header element for the list. | |
9964 | * | |
9965 | * Use the start and end in the header to | |
9966 | * remember the endpoints prior to rounding. | |
9967 | */ | |
9968 | ||
9969 | copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 9970 | copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
39236c6e A |
9971 | vm_map_copy_first_entry(copy) = |
9972 | vm_map_copy_last_entry(copy) = vm_map_copy_to_entry(copy); | |
9973 | copy->type = VM_MAP_COPY_ENTRY_LIST; | |
9974 | copy->cpy_hdr.nentries = 0; | |
9975 | copy->cpy_hdr.entries_pageable = TRUE; | |
9976 | ||
9977 | vm_map_store_init(©->cpy_hdr); | |
9978 | ||
9979 | copy->offset = 0; | |
9980 | copy->size = len; | |
9981 | ||
9982 | kr = vm_map_remap_extract(src_map, | |
9983 | src_addr, | |
9984 | len, | |
9985 | FALSE, /* copy */ | |
9986 | ©->cpy_hdr, | |
9987 | cur_prot, | |
9988 | max_prot, | |
9989 | VM_INHERIT_SHARE, | |
9990 | TRUE); /* pageable */ | |
9991 | if (kr != KERN_SUCCESS) { | |
9992 | vm_map_copy_discard(copy); | |
9993 | return kr; | |
9994 | } | |
9995 | ||
9996 | *copy_result = copy; | |
9997 | return KERN_SUCCESS; | |
9998 | } | |
9999 | ||
1c79356b A |
10000 | /* |
10001 | * vm_map_copyin_object: | |
10002 | * | |
10003 | * Create a copy object from an object. | |
10004 | * Our caller donates an object reference. | |
10005 | */ | |
10006 | ||
10007 | kern_return_t | |
10008 | vm_map_copyin_object( | |
10009 | vm_object_t object, | |
10010 | vm_object_offset_t offset, /* offset of region in object */ | |
10011 | vm_object_size_t size, /* size of region in object */ | |
10012 | vm_map_copy_t *copy_result) /* OUT */ | |
10013 | { | |
10014 | vm_map_copy_t copy; /* Resulting copy */ | |
10015 | ||
10016 | /* | |
10017 | * We drop the object into a special copy object | |
10018 | * that contains the object directly. | |
10019 | */ | |
10020 | ||
10021 | copy = (vm_map_copy_t) zalloc(vm_map_copy_zone); | |
04b8595b | 10022 | copy->c_u.hdr.rb_head_store.rbh_root = (void*)(int)SKIP_RB_TREE; |
1c79356b A |
10023 | copy->type = VM_MAP_COPY_OBJECT; |
10024 | copy->cpy_object = object; | |
1c79356b A |
10025 | copy->offset = offset; |
10026 | copy->size = size; | |
10027 | ||
10028 | *copy_result = copy; | |
10029 | return(KERN_SUCCESS); | |
10030 | } | |
10031 | ||
91447636 | 10032 | static void |
1c79356b A |
10033 | vm_map_fork_share( |
10034 | vm_map_t old_map, | |
10035 | vm_map_entry_t old_entry, | |
10036 | vm_map_t new_map) | |
10037 | { | |
10038 | vm_object_t object; | |
10039 | vm_map_entry_t new_entry; | |
1c79356b A |
10040 | |
10041 | /* | |
10042 | * New sharing code. New map entry | |
10043 | * references original object. Internal | |
10044 | * objects use asynchronous copy algorithm for | |
10045 | * future copies. First make sure we have | |
10046 | * the right object. If we need a shadow, | |
10047 | * or someone else already has one, then | |
10048 | * make a new shadow and share it. | |
10049 | */ | |
10050 | ||
3e170ce0 | 10051 | object = VME_OBJECT(old_entry); |
1c79356b A |
10052 | if (old_entry->is_sub_map) { |
10053 | assert(old_entry->wired_count == 0); | |
0c530ab8 | 10054 | #ifndef NO_NESTED_PMAP |
1c79356b | 10055 | if(old_entry->use_pmap) { |
91447636 A |
10056 | kern_return_t result; |
10057 | ||
1c79356b | 10058 | result = pmap_nest(new_map->pmap, |
3e170ce0 | 10059 | (VME_SUBMAP(old_entry))->pmap, |
2d21ac55 A |
10060 | (addr64_t)old_entry->vme_start, |
10061 | (addr64_t)old_entry->vme_start, | |
10062 | (uint64_t)(old_entry->vme_end - old_entry->vme_start)); | |
1c79356b A |
10063 | if(result) |
10064 | panic("vm_map_fork_share: pmap_nest failed!"); | |
10065 | } | |
0c530ab8 | 10066 | #endif /* NO_NESTED_PMAP */ |
1c79356b | 10067 | } else if (object == VM_OBJECT_NULL) { |
91447636 | 10068 | object = vm_object_allocate((vm_map_size_t)(old_entry->vme_end - |
2d21ac55 | 10069 | old_entry->vme_start)); |
3e170ce0 A |
10070 | VME_OFFSET_SET(old_entry, 0); |
10071 | VME_OBJECT_SET(old_entry, object); | |
fe8ab488 | 10072 | old_entry->use_pmap = TRUE; |
1c79356b A |
10073 | assert(!old_entry->needs_copy); |
10074 | } else if (object->copy_strategy != | |
2d21ac55 | 10075 | MEMORY_OBJECT_COPY_SYMMETRIC) { |
1c79356b A |
10076 | |
10077 | /* | |
10078 | * We are already using an asymmetric | |
10079 | * copy, and therefore we already have | |
10080 | * the right object. | |
10081 | */ | |
10082 | ||
10083 | assert(! old_entry->needs_copy); | |
10084 | } | |
10085 | else if (old_entry->needs_copy || /* case 1 */ | |
10086 | object->shadowed || /* case 2 */ | |
10087 | (!object->true_share && /* case 3 */ | |
2d21ac55 | 10088 | !old_entry->is_shared && |
6d2010ae | 10089 | (object->vo_size > |
2d21ac55 A |
10090 | (vm_map_size_t)(old_entry->vme_end - |
10091 | old_entry->vme_start)))) { | |
1c79356b A |
10092 | |
10093 | /* | |
10094 | * We need to create a shadow. | |
10095 | * There are three cases here. | |
10096 | * In the first case, we need to | |
10097 | * complete a deferred symmetrical | |
10098 | * copy that we participated in. | |
10099 | * In the second and third cases, | |
10100 | * we need to create the shadow so | |
10101 | * that changes that we make to the | |
10102 | * object do not interfere with | |
10103 | * any symmetrical copies which | |
10104 | * have occured (case 2) or which | |
10105 | * might occur (case 3). | |
10106 | * | |
10107 | * The first case is when we had | |
10108 | * deferred shadow object creation | |
10109 | * via the entry->needs_copy mechanism. | |
10110 | * This mechanism only works when | |
10111 | * only one entry points to the source | |
10112 | * object, and we are about to create | |
10113 | * a second entry pointing to the | |
10114 | * same object. The problem is that | |
10115 | * there is no way of mapping from | |
10116 | * an object to the entries pointing | |
10117 | * to it. (Deferred shadow creation | |
10118 | * works with one entry because occurs | |
10119 | * at fault time, and we walk from the | |
10120 | * entry to the object when handling | |
10121 | * the fault.) | |
10122 | * | |
10123 | * The second case is when the object | |
10124 | * to be shared has already been copied | |
10125 | * with a symmetric copy, but we point | |
10126 | * directly to the object without | |
10127 | * needs_copy set in our entry. (This | |
10128 | * can happen because different ranges | |
10129 | * of an object can be pointed to by | |
10130 | * different entries. In particular, | |
10131 | * a single entry pointing to an object | |
10132 | * can be split by a call to vm_inherit, | |
10133 | * which, combined with task_create, can | |
10134 | * result in the different entries | |
10135 | * having different needs_copy values.) | |
10136 | * The shadowed flag in the object allows | |
10137 | * us to detect this case. The problem | |
10138 | * with this case is that if this object | |
10139 | * has or will have shadows, then we | |
10140 | * must not perform an asymmetric copy | |
10141 | * of this object, since such a copy | |
10142 | * allows the object to be changed, which | |
10143 | * will break the previous symmetrical | |
10144 | * copies (which rely upon the object | |
10145 | * not changing). In a sense, the shadowed | |
10146 | * flag says "don't change this object". | |
10147 | * We fix this by creating a shadow | |
10148 | * object for this object, and sharing | |
10149 | * that. This works because we are free | |
10150 | * to change the shadow object (and thus | |
10151 | * to use an asymmetric copy strategy); | |
10152 | * this is also semantically correct, | |
10153 | * since this object is temporary, and | |
10154 | * therefore a copy of the object is | |
10155 | * as good as the object itself. (This | |
10156 | * is not true for permanent objects, | |
10157 | * since the pager needs to see changes, | |
10158 | * which won't happen if the changes | |
10159 | * are made to a copy.) | |
10160 | * | |
10161 | * The third case is when the object | |
10162 | * to be shared has parts sticking | |
10163 | * outside of the entry we're working | |
10164 | * with, and thus may in the future | |
10165 | * be subject to a symmetrical copy. | |
10166 | * (This is a preemptive version of | |
10167 | * case 2.) | |
10168 | */ | |
3e170ce0 A |
10169 | VME_OBJECT_SHADOW(old_entry, |
10170 | (vm_map_size_t) (old_entry->vme_end - | |
10171 | old_entry->vme_start)); | |
1c79356b A |
10172 | |
10173 | /* | |
10174 | * If we're making a shadow for other than | |
10175 | * copy on write reasons, then we have | |
10176 | * to remove write permission. | |
10177 | */ | |
10178 | ||
1c79356b A |
10179 | if (!old_entry->needs_copy && |
10180 | (old_entry->protection & VM_PROT_WRITE)) { | |
0c530ab8 A |
10181 | vm_prot_t prot; |
10182 | ||
10183 | prot = old_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 10184 | |
3e170ce0 | 10185 | if (override_nx(old_map, VME_ALIAS(old_entry)) && prot) |
0c530ab8 | 10186 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 10187 | |
316670eb | 10188 | if (old_map->mapped_in_other_pmaps) { |
9bccf70c | 10189 | vm_object_pmap_protect( |
3e170ce0 A |
10190 | VME_OBJECT(old_entry), |
10191 | VME_OFFSET(old_entry), | |
9bccf70c | 10192 | (old_entry->vme_end - |
2d21ac55 | 10193 | old_entry->vme_start), |
9bccf70c A |
10194 | PMAP_NULL, |
10195 | old_entry->vme_start, | |
0c530ab8 | 10196 | prot); |
1c79356b | 10197 | } else { |
9bccf70c | 10198 | pmap_protect(old_map->pmap, |
2d21ac55 A |
10199 | old_entry->vme_start, |
10200 | old_entry->vme_end, | |
10201 | prot); | |
1c79356b A |
10202 | } |
10203 | } | |
10204 | ||
10205 | old_entry->needs_copy = FALSE; | |
3e170ce0 | 10206 | object = VME_OBJECT(old_entry); |
1c79356b | 10207 | } |
6d2010ae | 10208 | |
1c79356b A |
10209 | |
10210 | /* | |
10211 | * If object was using a symmetric copy strategy, | |
10212 | * change its copy strategy to the default | |
10213 | * asymmetric copy strategy, which is copy_delay | |
10214 | * in the non-norma case and copy_call in the | |
10215 | * norma case. Bump the reference count for the | |
10216 | * new entry. | |
10217 | */ | |
10218 | ||
10219 | if(old_entry->is_sub_map) { | |
3e170ce0 A |
10220 | vm_map_lock(VME_SUBMAP(old_entry)); |
10221 | vm_map_reference(VME_SUBMAP(old_entry)); | |
10222 | vm_map_unlock(VME_SUBMAP(old_entry)); | |
1c79356b A |
10223 | } else { |
10224 | vm_object_lock(object); | |
2d21ac55 | 10225 | vm_object_reference_locked(object); |
1c79356b A |
10226 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
10227 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
10228 | } | |
10229 | vm_object_unlock(object); | |
10230 | } | |
10231 | ||
10232 | /* | |
10233 | * Clone the entry, using object ref from above. | |
10234 | * Mark both entries as shared. | |
10235 | */ | |
10236 | ||
7ddcb079 A |
10237 | new_entry = vm_map_entry_create(new_map, FALSE); /* Never the kernel |
10238 | * map or descendants */ | |
1c79356b A |
10239 | vm_map_entry_copy(new_entry, old_entry); |
10240 | old_entry->is_shared = TRUE; | |
10241 | new_entry->is_shared = TRUE; | |
10242 | ||
10243 | /* | |
10244 | * Insert the entry into the new map -- we | |
10245 | * know we're inserting at the end of the new | |
10246 | * map. | |
10247 | */ | |
10248 | ||
6d2010ae | 10249 | vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), new_entry); |
1c79356b A |
10250 | |
10251 | /* | |
10252 | * Update the physical map | |
10253 | */ | |
10254 | ||
10255 | if (old_entry->is_sub_map) { | |
10256 | /* Bill Angell pmap support goes here */ | |
10257 | } else { | |
10258 | pmap_copy(new_map->pmap, old_map->pmap, new_entry->vme_start, | |
2d21ac55 A |
10259 | old_entry->vme_end - old_entry->vme_start, |
10260 | old_entry->vme_start); | |
1c79356b A |
10261 | } |
10262 | } | |
10263 | ||
91447636 | 10264 | static boolean_t |
1c79356b A |
10265 | vm_map_fork_copy( |
10266 | vm_map_t old_map, | |
10267 | vm_map_entry_t *old_entry_p, | |
10268 | vm_map_t new_map) | |
10269 | { | |
10270 | vm_map_entry_t old_entry = *old_entry_p; | |
91447636 A |
10271 | vm_map_size_t entry_size = old_entry->vme_end - old_entry->vme_start; |
10272 | vm_map_offset_t start = old_entry->vme_start; | |
1c79356b A |
10273 | vm_map_copy_t copy; |
10274 | vm_map_entry_t last = vm_map_last_entry(new_map); | |
10275 | ||
10276 | vm_map_unlock(old_map); | |
10277 | /* | |
10278 | * Use maxprot version of copyin because we | |
10279 | * care about whether this memory can ever | |
10280 | * be accessed, not just whether it's accessible | |
10281 | * right now. | |
10282 | */ | |
10283 | if (vm_map_copyin_maxprot(old_map, start, entry_size, FALSE, ©) | |
10284 | != KERN_SUCCESS) { | |
10285 | /* | |
10286 | * The map might have changed while it | |
10287 | * was unlocked, check it again. Skip | |
10288 | * any blank space or permanently | |
10289 | * unreadable region. | |
10290 | */ | |
10291 | vm_map_lock(old_map); | |
10292 | if (!vm_map_lookup_entry(old_map, start, &last) || | |
55e303ae | 10293 | (last->max_protection & VM_PROT_READ) == VM_PROT_NONE) { |
1c79356b A |
10294 | last = last->vme_next; |
10295 | } | |
10296 | *old_entry_p = last; | |
10297 | ||
10298 | /* | |
10299 | * XXX For some error returns, want to | |
10300 | * XXX skip to the next element. Note | |
10301 | * that INVALID_ADDRESS and | |
10302 | * PROTECTION_FAILURE are handled above. | |
10303 | */ | |
10304 | ||
10305 | return FALSE; | |
10306 | } | |
10307 | ||
10308 | /* | |
10309 | * Insert the copy into the new map | |
10310 | */ | |
10311 | ||
10312 | vm_map_copy_insert(new_map, last, copy); | |
10313 | ||
10314 | /* | |
10315 | * Pick up the traversal at the end of | |
10316 | * the copied region. | |
10317 | */ | |
10318 | ||
10319 | vm_map_lock(old_map); | |
10320 | start += entry_size; | |
10321 | if (! vm_map_lookup_entry(old_map, start, &last)) { | |
10322 | last = last->vme_next; | |
10323 | } else { | |
2d21ac55 A |
10324 | if (last->vme_start == start) { |
10325 | /* | |
10326 | * No need to clip here and we don't | |
10327 | * want to cause any unnecessary | |
10328 | * unnesting... | |
10329 | */ | |
10330 | } else { | |
10331 | vm_map_clip_start(old_map, last, start); | |
10332 | } | |
1c79356b A |
10333 | } |
10334 | *old_entry_p = last; | |
10335 | ||
10336 | return TRUE; | |
10337 | } | |
10338 | ||
10339 | /* | |
10340 | * vm_map_fork: | |
10341 | * | |
10342 | * Create and return a new map based on the old | |
10343 | * map, according to the inheritance values on the | |
10344 | * regions in that map. | |
10345 | * | |
10346 | * The source map must not be locked. | |
10347 | */ | |
10348 | vm_map_t | |
10349 | vm_map_fork( | |
316670eb | 10350 | ledger_t ledger, |
1c79356b A |
10351 | vm_map_t old_map) |
10352 | { | |
2d21ac55 | 10353 | pmap_t new_pmap; |
1c79356b A |
10354 | vm_map_t new_map; |
10355 | vm_map_entry_t old_entry; | |
91447636 | 10356 | vm_map_size_t new_size = 0, entry_size; |
1c79356b A |
10357 | vm_map_entry_t new_entry; |
10358 | boolean_t src_needs_copy; | |
10359 | boolean_t new_entry_needs_copy; | |
3e170ce0 | 10360 | boolean_t pmap_is64bit; |
1c79356b | 10361 | |
3e170ce0 | 10362 | pmap_is64bit = |
b0d623f7 | 10363 | #if defined(__i386__) || defined(__x86_64__) |
3e170ce0 | 10364 | old_map->pmap->pm_task_map != TASK_MAP_32BIT; |
b0d623f7 | 10365 | #else |
316670eb | 10366 | #error Unknown architecture. |
b0d623f7 | 10367 | #endif |
3e170ce0 A |
10368 | |
10369 | new_pmap = pmap_create(ledger, (vm_map_size_t) 0, pmap_is64bit); | |
2d21ac55 | 10370 | |
1c79356b A |
10371 | vm_map_reference_swap(old_map); |
10372 | vm_map_lock(old_map); | |
10373 | ||
10374 | new_map = vm_map_create(new_pmap, | |
2d21ac55 A |
10375 | old_map->min_offset, |
10376 | old_map->max_offset, | |
10377 | old_map->hdr.entries_pageable); | |
39236c6e A |
10378 | /* inherit the parent map's page size */ |
10379 | vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(old_map)); | |
1c79356b | 10380 | for ( |
2d21ac55 A |
10381 | old_entry = vm_map_first_entry(old_map); |
10382 | old_entry != vm_map_to_entry(old_map); | |
10383 | ) { | |
1c79356b A |
10384 | |
10385 | entry_size = old_entry->vme_end - old_entry->vme_start; | |
10386 | ||
10387 | switch (old_entry->inheritance) { | |
10388 | case VM_INHERIT_NONE: | |
10389 | break; | |
10390 | ||
10391 | case VM_INHERIT_SHARE: | |
10392 | vm_map_fork_share(old_map, old_entry, new_map); | |
10393 | new_size += entry_size; | |
10394 | break; | |
10395 | ||
10396 | case VM_INHERIT_COPY: | |
10397 | ||
10398 | /* | |
10399 | * Inline the copy_quickly case; | |
10400 | * upon failure, fall back on call | |
10401 | * to vm_map_fork_copy. | |
10402 | */ | |
10403 | ||
10404 | if(old_entry->is_sub_map) | |
10405 | break; | |
9bccf70c | 10406 | if ((old_entry->wired_count != 0) || |
3e170ce0 A |
10407 | ((VME_OBJECT(old_entry) != NULL) && |
10408 | (VME_OBJECT(old_entry)->true_share))) { | |
1c79356b A |
10409 | goto slow_vm_map_fork_copy; |
10410 | } | |
10411 | ||
7ddcb079 | 10412 | new_entry = vm_map_entry_create(new_map, FALSE); /* never the kernel map or descendants */ |
1c79356b | 10413 | vm_map_entry_copy(new_entry, old_entry); |
fe8ab488 A |
10414 | if (new_entry->is_sub_map) { |
10415 | /* clear address space specifics */ | |
10416 | new_entry->use_pmap = FALSE; | |
10417 | } | |
1c79356b A |
10418 | |
10419 | if (! vm_object_copy_quickly( | |
3e170ce0 A |
10420 | &VME_OBJECT(new_entry), |
10421 | VME_OFFSET(old_entry), | |
2d21ac55 A |
10422 | (old_entry->vme_end - |
10423 | old_entry->vme_start), | |
10424 | &src_needs_copy, | |
10425 | &new_entry_needs_copy)) { | |
1c79356b A |
10426 | vm_map_entry_dispose(new_map, new_entry); |
10427 | goto slow_vm_map_fork_copy; | |
10428 | } | |
10429 | ||
10430 | /* | |
10431 | * Handle copy-on-write obligations | |
10432 | */ | |
10433 | ||
10434 | if (src_needs_copy && !old_entry->needs_copy) { | |
0c530ab8 A |
10435 | vm_prot_t prot; |
10436 | ||
10437 | prot = old_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 10438 | |
3e170ce0 A |
10439 | if (override_nx(old_map, VME_ALIAS(old_entry)) |
10440 | && prot) | |
0c530ab8 | 10441 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 10442 | |
1c79356b | 10443 | vm_object_pmap_protect( |
3e170ce0 A |
10444 | VME_OBJECT(old_entry), |
10445 | VME_OFFSET(old_entry), | |
1c79356b | 10446 | (old_entry->vme_end - |
2d21ac55 | 10447 | old_entry->vme_start), |
1c79356b | 10448 | ((old_entry->is_shared |
316670eb | 10449 | || old_map->mapped_in_other_pmaps) |
2d21ac55 A |
10450 | ? PMAP_NULL : |
10451 | old_map->pmap), | |
1c79356b | 10452 | old_entry->vme_start, |
0c530ab8 | 10453 | prot); |
1c79356b | 10454 | |
3e170ce0 | 10455 | assert(old_entry->wired_count == 0); |
1c79356b A |
10456 | old_entry->needs_copy = TRUE; |
10457 | } | |
10458 | new_entry->needs_copy = new_entry_needs_copy; | |
10459 | ||
10460 | /* | |
10461 | * Insert the entry at the end | |
10462 | * of the map. | |
10463 | */ | |
10464 | ||
6d2010ae | 10465 | vm_map_store_entry_link(new_map, vm_map_last_entry(new_map), |
1c79356b A |
10466 | new_entry); |
10467 | new_size += entry_size; | |
10468 | break; | |
10469 | ||
10470 | slow_vm_map_fork_copy: | |
10471 | if (vm_map_fork_copy(old_map, &old_entry, new_map)) { | |
10472 | new_size += entry_size; | |
10473 | } | |
10474 | continue; | |
10475 | } | |
10476 | old_entry = old_entry->vme_next; | |
10477 | } | |
10478 | ||
fe8ab488 | 10479 | |
1c79356b A |
10480 | new_map->size = new_size; |
10481 | vm_map_unlock(old_map); | |
10482 | vm_map_deallocate(old_map); | |
10483 | ||
10484 | return(new_map); | |
10485 | } | |
10486 | ||
2d21ac55 A |
10487 | /* |
10488 | * vm_map_exec: | |
10489 | * | |
10490 | * Setup the "new_map" with the proper execution environment according | |
10491 | * to the type of executable (platform, 64bit, chroot environment). | |
10492 | * Map the comm page and shared region, etc... | |
10493 | */ | |
10494 | kern_return_t | |
10495 | vm_map_exec( | |
10496 | vm_map_t new_map, | |
10497 | task_t task, | |
10498 | void *fsroot, | |
10499 | cpu_type_t cpu) | |
10500 | { | |
10501 | SHARED_REGION_TRACE_DEBUG( | |
10502 | ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): ->\n", | |
fe8ab488 A |
10503 | (void *)VM_KERNEL_ADDRPERM(current_task()), |
10504 | (void *)VM_KERNEL_ADDRPERM(new_map), | |
10505 | (void *)VM_KERNEL_ADDRPERM(task), | |
10506 | (void *)VM_KERNEL_ADDRPERM(fsroot), | |
10507 | cpu)); | |
2d21ac55 A |
10508 | (void) vm_commpage_enter(new_map, task); |
10509 | (void) vm_shared_region_enter(new_map, task, fsroot, cpu); | |
10510 | SHARED_REGION_TRACE_DEBUG( | |
10511 | ("shared_region: task %p: vm_map_exec(%p,%p,%p,0x%x): <-\n", | |
fe8ab488 A |
10512 | (void *)VM_KERNEL_ADDRPERM(current_task()), |
10513 | (void *)VM_KERNEL_ADDRPERM(new_map), | |
10514 | (void *)VM_KERNEL_ADDRPERM(task), | |
10515 | (void *)VM_KERNEL_ADDRPERM(fsroot), | |
10516 | cpu)); | |
2d21ac55 A |
10517 | return KERN_SUCCESS; |
10518 | } | |
1c79356b A |
10519 | |
10520 | /* | |
10521 | * vm_map_lookup_locked: | |
10522 | * | |
10523 | * Finds the VM object, offset, and | |
10524 | * protection for a given virtual address in the | |
10525 | * specified map, assuming a page fault of the | |
10526 | * type specified. | |
10527 | * | |
10528 | * Returns the (object, offset, protection) for | |
10529 | * this address, whether it is wired down, and whether | |
10530 | * this map has the only reference to the data in question. | |
10531 | * In order to later verify this lookup, a "version" | |
10532 | * is returned. | |
10533 | * | |
10534 | * The map MUST be locked by the caller and WILL be | |
10535 | * locked on exit. In order to guarantee the | |
10536 | * existence of the returned object, it is returned | |
10537 | * locked. | |
10538 | * | |
10539 | * If a lookup is requested with "write protection" | |
10540 | * specified, the map may be changed to perform virtual | |
10541 | * copying operations, although the data referenced will | |
10542 | * remain the same. | |
10543 | */ | |
10544 | kern_return_t | |
10545 | vm_map_lookup_locked( | |
10546 | vm_map_t *var_map, /* IN/OUT */ | |
2d21ac55 | 10547 | vm_map_offset_t vaddr, |
91447636 | 10548 | vm_prot_t fault_type, |
2d21ac55 | 10549 | int object_lock_type, |
1c79356b A |
10550 | vm_map_version_t *out_version, /* OUT */ |
10551 | vm_object_t *object, /* OUT */ | |
10552 | vm_object_offset_t *offset, /* OUT */ | |
10553 | vm_prot_t *out_prot, /* OUT */ | |
10554 | boolean_t *wired, /* OUT */ | |
2d21ac55 | 10555 | vm_object_fault_info_t fault_info, /* OUT */ |
91447636 | 10556 | vm_map_t *real_map) |
1c79356b A |
10557 | { |
10558 | vm_map_entry_t entry; | |
10559 | register vm_map_t map = *var_map; | |
10560 | vm_map_t old_map = *var_map; | |
10561 | vm_map_t cow_sub_map_parent = VM_MAP_NULL; | |
91447636 A |
10562 | vm_map_offset_t cow_parent_vaddr = 0; |
10563 | vm_map_offset_t old_start = 0; | |
10564 | vm_map_offset_t old_end = 0; | |
1c79356b | 10565 | register vm_prot_t prot; |
6d2010ae | 10566 | boolean_t mask_protections; |
fe8ab488 | 10567 | boolean_t force_copy; |
6d2010ae A |
10568 | vm_prot_t original_fault_type; |
10569 | ||
10570 | /* | |
10571 | * VM_PROT_MASK means that the caller wants us to use "fault_type" | |
10572 | * as a mask against the mapping's actual protections, not as an | |
10573 | * absolute value. | |
10574 | */ | |
10575 | mask_protections = (fault_type & VM_PROT_IS_MASK) ? TRUE : FALSE; | |
fe8ab488 A |
10576 | force_copy = (fault_type & VM_PROT_COPY) ? TRUE : FALSE; |
10577 | fault_type &= VM_PROT_ALL; | |
6d2010ae | 10578 | original_fault_type = fault_type; |
1c79356b | 10579 | |
91447636 | 10580 | *real_map = map; |
6d2010ae A |
10581 | |
10582 | RetryLookup: | |
10583 | fault_type = original_fault_type; | |
1c79356b A |
10584 | |
10585 | /* | |
10586 | * If the map has an interesting hint, try it before calling | |
10587 | * full blown lookup routine. | |
10588 | */ | |
1c79356b | 10589 | entry = map->hint; |
1c79356b A |
10590 | |
10591 | if ((entry == vm_map_to_entry(map)) || | |
10592 | (vaddr < entry->vme_start) || (vaddr >= entry->vme_end)) { | |
10593 | vm_map_entry_t tmp_entry; | |
10594 | ||
10595 | /* | |
10596 | * Entry was either not a valid hint, or the vaddr | |
10597 | * was not contained in the entry, so do a full lookup. | |
10598 | */ | |
10599 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) { | |
10600 | if((cow_sub_map_parent) && (cow_sub_map_parent != map)) | |
10601 | vm_map_unlock(cow_sub_map_parent); | |
91447636 | 10602 | if((*real_map != map) |
2d21ac55 | 10603 | && (*real_map != cow_sub_map_parent)) |
91447636 | 10604 | vm_map_unlock(*real_map); |
1c79356b A |
10605 | return KERN_INVALID_ADDRESS; |
10606 | } | |
10607 | ||
10608 | entry = tmp_entry; | |
10609 | } | |
10610 | if(map == old_map) { | |
10611 | old_start = entry->vme_start; | |
10612 | old_end = entry->vme_end; | |
10613 | } | |
10614 | ||
10615 | /* | |
10616 | * Handle submaps. Drop lock on upper map, submap is | |
10617 | * returned locked. | |
10618 | */ | |
10619 | ||
10620 | submap_recurse: | |
10621 | if (entry->is_sub_map) { | |
91447636 A |
10622 | vm_map_offset_t local_vaddr; |
10623 | vm_map_offset_t end_delta; | |
10624 | vm_map_offset_t start_delta; | |
1c79356b A |
10625 | vm_map_entry_t submap_entry; |
10626 | boolean_t mapped_needs_copy=FALSE; | |
10627 | ||
10628 | local_vaddr = vaddr; | |
10629 | ||
2d21ac55 | 10630 | if ((entry->use_pmap && !(fault_type & VM_PROT_WRITE))) { |
91447636 A |
10631 | /* if real_map equals map we unlock below */ |
10632 | if ((*real_map != map) && | |
2d21ac55 | 10633 | (*real_map != cow_sub_map_parent)) |
91447636 | 10634 | vm_map_unlock(*real_map); |
3e170ce0 | 10635 | *real_map = VME_SUBMAP(entry); |
1c79356b A |
10636 | } |
10637 | ||
2d21ac55 | 10638 | if(entry->needs_copy && (fault_type & VM_PROT_WRITE)) { |
1c79356b A |
10639 | if (!mapped_needs_copy) { |
10640 | if (vm_map_lock_read_to_write(map)) { | |
10641 | vm_map_lock_read(map); | |
99c3a104 | 10642 | *real_map = map; |
1c79356b A |
10643 | goto RetryLookup; |
10644 | } | |
3e170ce0 A |
10645 | vm_map_lock_read(VME_SUBMAP(entry)); |
10646 | *var_map = VME_SUBMAP(entry); | |
1c79356b A |
10647 | cow_sub_map_parent = map; |
10648 | /* reset base to map before cow object */ | |
10649 | /* this is the map which will accept */ | |
10650 | /* the new cow object */ | |
10651 | old_start = entry->vme_start; | |
10652 | old_end = entry->vme_end; | |
10653 | cow_parent_vaddr = vaddr; | |
10654 | mapped_needs_copy = TRUE; | |
10655 | } else { | |
3e170ce0 A |
10656 | vm_map_lock_read(VME_SUBMAP(entry)); |
10657 | *var_map = VME_SUBMAP(entry); | |
1c79356b | 10658 | if((cow_sub_map_parent != map) && |
2d21ac55 | 10659 | (*real_map != map)) |
1c79356b A |
10660 | vm_map_unlock(map); |
10661 | } | |
10662 | } else { | |
3e170ce0 A |
10663 | vm_map_lock_read(VME_SUBMAP(entry)); |
10664 | *var_map = VME_SUBMAP(entry); | |
1c79356b A |
10665 | /* leave map locked if it is a target */ |
10666 | /* cow sub_map above otherwise, just */ | |
10667 | /* follow the maps down to the object */ | |
10668 | /* here we unlock knowing we are not */ | |
10669 | /* revisiting the map. */ | |
91447636 | 10670 | if((*real_map != map) && (map != cow_sub_map_parent)) |
1c79356b A |
10671 | vm_map_unlock_read(map); |
10672 | } | |
10673 | ||
99c3a104 | 10674 | map = *var_map; |
1c79356b A |
10675 | |
10676 | /* calculate the offset in the submap for vaddr */ | |
3e170ce0 | 10677 | local_vaddr = (local_vaddr - entry->vme_start) + VME_OFFSET(entry); |
1c79356b | 10678 | |
2d21ac55 | 10679 | RetrySubMap: |
1c79356b A |
10680 | if(!vm_map_lookup_entry(map, local_vaddr, &submap_entry)) { |
10681 | if((cow_sub_map_parent) && (cow_sub_map_parent != map)){ | |
10682 | vm_map_unlock(cow_sub_map_parent); | |
10683 | } | |
91447636 | 10684 | if((*real_map != map) |
2d21ac55 | 10685 | && (*real_map != cow_sub_map_parent)) { |
91447636 | 10686 | vm_map_unlock(*real_map); |
1c79356b | 10687 | } |
91447636 | 10688 | *real_map = map; |
1c79356b A |
10689 | return KERN_INVALID_ADDRESS; |
10690 | } | |
2d21ac55 | 10691 | |
1c79356b A |
10692 | /* find the attenuated shadow of the underlying object */ |
10693 | /* on our target map */ | |
10694 | ||
10695 | /* in english the submap object may extend beyond the */ | |
10696 | /* region mapped by the entry or, may only fill a portion */ | |
10697 | /* of it. For our purposes, we only care if the object */ | |
10698 | /* doesn't fill. In this case the area which will */ | |
10699 | /* ultimately be clipped in the top map will only need */ | |
10700 | /* to be as big as the portion of the underlying entry */ | |
10701 | /* which is mapped */ | |
3e170ce0 A |
10702 | start_delta = submap_entry->vme_start > VME_OFFSET(entry) ? |
10703 | submap_entry->vme_start - VME_OFFSET(entry) : 0; | |
1c79356b A |
10704 | |
10705 | end_delta = | |
3e170ce0 | 10706 | (VME_OFFSET(entry) + start_delta + (old_end - old_start)) <= |
1c79356b | 10707 | submap_entry->vme_end ? |
3e170ce0 | 10708 | 0 : (VME_OFFSET(entry) + |
2d21ac55 A |
10709 | (old_end - old_start)) |
10710 | - submap_entry->vme_end; | |
1c79356b A |
10711 | |
10712 | old_start += start_delta; | |
10713 | old_end -= end_delta; | |
10714 | ||
10715 | if(submap_entry->is_sub_map) { | |
10716 | entry = submap_entry; | |
10717 | vaddr = local_vaddr; | |
10718 | goto submap_recurse; | |
10719 | } | |
10720 | ||
10721 | if(((fault_type & VM_PROT_WRITE) && cow_sub_map_parent)) { | |
10722 | ||
2d21ac55 A |
10723 | vm_object_t sub_object, copy_object; |
10724 | vm_object_offset_t copy_offset; | |
91447636 A |
10725 | vm_map_offset_t local_start; |
10726 | vm_map_offset_t local_end; | |
0b4e3aa0 | 10727 | boolean_t copied_slowly = FALSE; |
1c79356b A |
10728 | |
10729 | if (vm_map_lock_read_to_write(map)) { | |
10730 | vm_map_lock_read(map); | |
10731 | old_start -= start_delta; | |
10732 | old_end += end_delta; | |
10733 | goto RetrySubMap; | |
10734 | } | |
0b4e3aa0 A |
10735 | |
10736 | ||
3e170ce0 | 10737 | sub_object = VME_OBJECT(submap_entry); |
2d21ac55 A |
10738 | if (sub_object == VM_OBJECT_NULL) { |
10739 | sub_object = | |
1c79356b | 10740 | vm_object_allocate( |
91447636 | 10741 | (vm_map_size_t) |
2d21ac55 A |
10742 | (submap_entry->vme_end - |
10743 | submap_entry->vme_start)); | |
3e170ce0 A |
10744 | VME_OBJECT_SET(submap_entry, sub_object); |
10745 | VME_OFFSET_SET(submap_entry, 0); | |
1c79356b A |
10746 | } |
10747 | local_start = local_vaddr - | |
2d21ac55 | 10748 | (cow_parent_vaddr - old_start); |
1c79356b | 10749 | local_end = local_vaddr + |
2d21ac55 | 10750 | (old_end - cow_parent_vaddr); |
1c79356b A |
10751 | vm_map_clip_start(map, submap_entry, local_start); |
10752 | vm_map_clip_end(map, submap_entry, local_end); | |
fe8ab488 A |
10753 | if (submap_entry->is_sub_map) { |
10754 | /* unnesting was done when clipping */ | |
10755 | assert(!submap_entry->use_pmap); | |
10756 | } | |
1c79356b A |
10757 | |
10758 | /* This is the COW case, lets connect */ | |
10759 | /* an entry in our space to the underlying */ | |
10760 | /* object in the submap, bypassing the */ | |
10761 | /* submap. */ | |
0b4e3aa0 A |
10762 | |
10763 | ||
2d21ac55 | 10764 | if(submap_entry->wired_count != 0 || |
4a3eedf9 A |
10765 | (sub_object->copy_strategy == |
10766 | MEMORY_OBJECT_COPY_NONE)) { | |
2d21ac55 A |
10767 | vm_object_lock(sub_object); |
10768 | vm_object_copy_slowly(sub_object, | |
3e170ce0 | 10769 | VME_OFFSET(submap_entry), |
2d21ac55 A |
10770 | (submap_entry->vme_end - |
10771 | submap_entry->vme_start), | |
10772 | FALSE, | |
10773 | ©_object); | |
10774 | copied_slowly = TRUE; | |
0b4e3aa0 | 10775 | } else { |
2d21ac55 | 10776 | |
0b4e3aa0 | 10777 | /* set up shadow object */ |
2d21ac55 | 10778 | copy_object = sub_object; |
0b4e3aa0 | 10779 | vm_object_reference(copy_object); |
2d21ac55 | 10780 | sub_object->shadowed = TRUE; |
3e170ce0 | 10781 | assert(submap_entry->wired_count == 0); |
0b4e3aa0 | 10782 | submap_entry->needs_copy = TRUE; |
0c530ab8 A |
10783 | |
10784 | prot = submap_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 10785 | |
3e170ce0 A |
10786 | if (override_nx(old_map, |
10787 | VME_ALIAS(submap_entry)) | |
10788 | && prot) | |
0c530ab8 | 10789 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 10790 | |
0b4e3aa0 | 10791 | vm_object_pmap_protect( |
2d21ac55 | 10792 | sub_object, |
3e170ce0 | 10793 | VME_OFFSET(submap_entry), |
1c79356b | 10794 | submap_entry->vme_end - |
2d21ac55 | 10795 | submap_entry->vme_start, |
9bccf70c | 10796 | (submap_entry->is_shared |
316670eb | 10797 | || map->mapped_in_other_pmaps) ? |
2d21ac55 | 10798 | PMAP_NULL : map->pmap, |
1c79356b | 10799 | submap_entry->vme_start, |
0c530ab8 | 10800 | prot); |
0b4e3aa0 | 10801 | } |
1c79356b | 10802 | |
2d21ac55 A |
10803 | /* |
10804 | * Adjust the fault offset to the submap entry. | |
10805 | */ | |
10806 | copy_offset = (local_vaddr - | |
10807 | submap_entry->vme_start + | |
3e170ce0 | 10808 | VME_OFFSET(submap_entry)); |
1c79356b A |
10809 | |
10810 | /* This works diffently than the */ | |
10811 | /* normal submap case. We go back */ | |
10812 | /* to the parent of the cow map and*/ | |
10813 | /* clip out the target portion of */ | |
10814 | /* the sub_map, substituting the */ | |
10815 | /* new copy object, */ | |
10816 | ||
10817 | vm_map_unlock(map); | |
10818 | local_start = old_start; | |
10819 | local_end = old_end; | |
10820 | map = cow_sub_map_parent; | |
10821 | *var_map = cow_sub_map_parent; | |
10822 | vaddr = cow_parent_vaddr; | |
10823 | cow_sub_map_parent = NULL; | |
10824 | ||
2d21ac55 A |
10825 | if(!vm_map_lookup_entry(map, |
10826 | vaddr, &entry)) { | |
10827 | vm_object_deallocate( | |
10828 | copy_object); | |
10829 | vm_map_lock_write_to_read(map); | |
10830 | return KERN_INVALID_ADDRESS; | |
10831 | } | |
10832 | ||
10833 | /* clip out the portion of space */ | |
10834 | /* mapped by the sub map which */ | |
10835 | /* corresponds to the underlying */ | |
10836 | /* object */ | |
10837 | ||
10838 | /* | |
10839 | * Clip (and unnest) the smallest nested chunk | |
10840 | * possible around the faulting address... | |
10841 | */ | |
10842 | local_start = vaddr & ~(pmap_nesting_size_min - 1); | |
10843 | local_end = local_start + pmap_nesting_size_min; | |
10844 | /* | |
10845 | * ... but don't go beyond the "old_start" to "old_end" | |
10846 | * range, to avoid spanning over another VM region | |
10847 | * with a possibly different VM object and/or offset. | |
10848 | */ | |
10849 | if (local_start < old_start) { | |
10850 | local_start = old_start; | |
10851 | } | |
10852 | if (local_end > old_end) { | |
10853 | local_end = old_end; | |
10854 | } | |
10855 | /* | |
10856 | * Adjust copy_offset to the start of the range. | |
10857 | */ | |
10858 | copy_offset -= (vaddr - local_start); | |
10859 | ||
1c79356b A |
10860 | vm_map_clip_start(map, entry, local_start); |
10861 | vm_map_clip_end(map, entry, local_end); | |
fe8ab488 A |
10862 | if (entry->is_sub_map) { |
10863 | /* unnesting was done when clipping */ | |
10864 | assert(!entry->use_pmap); | |
10865 | } | |
1c79356b A |
10866 | |
10867 | /* substitute copy object for */ | |
10868 | /* shared map entry */ | |
3e170ce0 | 10869 | vm_map_deallocate(VME_SUBMAP(entry)); |
fe8ab488 | 10870 | assert(!entry->iokit_acct); |
1c79356b | 10871 | entry->is_sub_map = FALSE; |
fe8ab488 | 10872 | entry->use_pmap = TRUE; |
3e170ce0 | 10873 | VME_OBJECT_SET(entry, copy_object); |
1c79356b | 10874 | |
2d21ac55 A |
10875 | /* propagate the submap entry's protections */ |
10876 | entry->protection |= submap_entry->protection; | |
10877 | entry->max_protection |= submap_entry->max_protection; | |
10878 | ||
0b4e3aa0 | 10879 | if(copied_slowly) { |
3e170ce0 | 10880 | VME_OFFSET_SET(entry, local_start - old_start); |
0b4e3aa0 A |
10881 | entry->needs_copy = FALSE; |
10882 | entry->is_shared = FALSE; | |
10883 | } else { | |
3e170ce0 A |
10884 | VME_OFFSET_SET(entry, copy_offset); |
10885 | assert(entry->wired_count == 0); | |
0b4e3aa0 A |
10886 | entry->needs_copy = TRUE; |
10887 | if(entry->inheritance == VM_INHERIT_SHARE) | |
10888 | entry->inheritance = VM_INHERIT_COPY; | |
10889 | if (map != old_map) | |
10890 | entry->is_shared = TRUE; | |
10891 | } | |
1c79356b | 10892 | if(entry->inheritance == VM_INHERIT_SHARE) |
0b4e3aa0 | 10893 | entry->inheritance = VM_INHERIT_COPY; |
1c79356b A |
10894 | |
10895 | vm_map_lock_write_to_read(map); | |
10896 | } else { | |
10897 | if((cow_sub_map_parent) | |
2d21ac55 A |
10898 | && (cow_sub_map_parent != *real_map) |
10899 | && (cow_sub_map_parent != map)) { | |
1c79356b A |
10900 | vm_map_unlock(cow_sub_map_parent); |
10901 | } | |
10902 | entry = submap_entry; | |
10903 | vaddr = local_vaddr; | |
10904 | } | |
10905 | } | |
10906 | ||
10907 | /* | |
10908 | * Check whether this task is allowed to have | |
10909 | * this page. | |
10910 | */ | |
2d21ac55 | 10911 | |
6601e61a | 10912 | prot = entry->protection; |
0c530ab8 | 10913 | |
3e170ce0 | 10914 | if (override_nx(old_map, VME_ALIAS(entry)) && prot) { |
0c530ab8 | 10915 | /* |
2d21ac55 | 10916 | * HACK -- if not a stack, then allow execution |
0c530ab8 A |
10917 | */ |
10918 | prot |= VM_PROT_EXECUTE; | |
2d21ac55 A |
10919 | } |
10920 | ||
6d2010ae A |
10921 | if (mask_protections) { |
10922 | fault_type &= prot; | |
10923 | if (fault_type == VM_PROT_NONE) { | |
10924 | goto protection_failure; | |
10925 | } | |
10926 | } | |
1c79356b | 10927 | if ((fault_type & (prot)) != fault_type) { |
6d2010ae | 10928 | protection_failure: |
2d21ac55 A |
10929 | if (*real_map != map) { |
10930 | vm_map_unlock(*real_map); | |
0c530ab8 A |
10931 | } |
10932 | *real_map = map; | |
10933 | ||
10934 | if ((fault_type & VM_PROT_EXECUTE) && prot) | |
2d21ac55 | 10935 | log_stack_execution_failure((addr64_t)vaddr, prot); |
0c530ab8 | 10936 | |
2d21ac55 | 10937 | DTRACE_VM2(prot_fault, int, 1, (uint64_t *), NULL); |
0c530ab8 | 10938 | return KERN_PROTECTION_FAILURE; |
1c79356b A |
10939 | } |
10940 | ||
10941 | /* | |
10942 | * If this page is not pageable, we have to get | |
10943 | * it for all possible accesses. | |
10944 | */ | |
10945 | ||
91447636 A |
10946 | *wired = (entry->wired_count != 0); |
10947 | if (*wired) | |
0c530ab8 | 10948 | fault_type = prot; |
1c79356b A |
10949 | |
10950 | /* | |
10951 | * If the entry was copy-on-write, we either ... | |
10952 | */ | |
10953 | ||
10954 | if (entry->needs_copy) { | |
10955 | /* | |
10956 | * If we want to write the page, we may as well | |
10957 | * handle that now since we've got the map locked. | |
10958 | * | |
10959 | * If we don't need to write the page, we just | |
10960 | * demote the permissions allowed. | |
10961 | */ | |
10962 | ||
fe8ab488 | 10963 | if ((fault_type & VM_PROT_WRITE) || *wired || force_copy) { |
1c79356b A |
10964 | /* |
10965 | * Make a new object, and place it in the | |
10966 | * object chain. Note that no new references | |
10967 | * have appeared -- one just moved from the | |
10968 | * map to the new object. | |
10969 | */ | |
10970 | ||
10971 | if (vm_map_lock_read_to_write(map)) { | |
10972 | vm_map_lock_read(map); | |
10973 | goto RetryLookup; | |
10974 | } | |
3e170ce0 A |
10975 | VME_OBJECT_SHADOW(entry, |
10976 | (vm_map_size_t) (entry->vme_end - | |
10977 | entry->vme_start)); | |
1c79356b | 10978 | |
3e170ce0 | 10979 | VME_OBJECT(entry)->shadowed = TRUE; |
1c79356b A |
10980 | entry->needs_copy = FALSE; |
10981 | vm_map_lock_write_to_read(map); | |
10982 | } | |
10983 | else { | |
10984 | /* | |
10985 | * We're attempting to read a copy-on-write | |
10986 | * page -- don't allow writes. | |
10987 | */ | |
10988 | ||
10989 | prot &= (~VM_PROT_WRITE); | |
10990 | } | |
10991 | } | |
10992 | ||
10993 | /* | |
10994 | * Create an object if necessary. | |
10995 | */ | |
3e170ce0 | 10996 | if (VME_OBJECT(entry) == VM_OBJECT_NULL) { |
1c79356b A |
10997 | |
10998 | if (vm_map_lock_read_to_write(map)) { | |
10999 | vm_map_lock_read(map); | |
11000 | goto RetryLookup; | |
11001 | } | |
11002 | ||
3e170ce0 A |
11003 | VME_OBJECT_SET(entry, |
11004 | vm_object_allocate( | |
11005 | (vm_map_size_t)(entry->vme_end - | |
11006 | entry->vme_start))); | |
11007 | VME_OFFSET_SET(entry, 0); | |
1c79356b A |
11008 | vm_map_lock_write_to_read(map); |
11009 | } | |
11010 | ||
11011 | /* | |
11012 | * Return the object/offset from this entry. If the entry | |
11013 | * was copy-on-write or empty, it has been fixed up. Also | |
11014 | * return the protection. | |
11015 | */ | |
11016 | ||
3e170ce0 A |
11017 | *offset = (vaddr - entry->vme_start) + VME_OFFSET(entry); |
11018 | *object = VME_OBJECT(entry); | |
1c79356b | 11019 | *out_prot = prot; |
2d21ac55 A |
11020 | |
11021 | if (fault_info) { | |
11022 | fault_info->interruptible = THREAD_UNINT; /* for now... */ | |
11023 | /* ... the caller will change "interruptible" if needed */ | |
11024 | fault_info->cluster_size = 0; | |
3e170ce0 | 11025 | fault_info->user_tag = VME_ALIAS(entry); |
fe8ab488 A |
11026 | fault_info->pmap_options = 0; |
11027 | if (entry->iokit_acct || | |
11028 | (!entry->is_sub_map && !entry->use_pmap)) { | |
11029 | fault_info->pmap_options |= PMAP_OPTIONS_ALT_ACCT; | |
11030 | } | |
2d21ac55 | 11031 | fault_info->behavior = entry->behavior; |
3e170ce0 A |
11032 | fault_info->lo_offset = VME_OFFSET(entry); |
11033 | fault_info->hi_offset = | |
11034 | (entry->vme_end - entry->vme_start) + VME_OFFSET(entry); | |
2d21ac55 | 11035 | fault_info->no_cache = entry->no_cache; |
b0d623f7 | 11036 | fault_info->stealth = FALSE; |
6d2010ae | 11037 | fault_info->io_sync = FALSE; |
3e170ce0 A |
11038 | if (entry->used_for_jit || |
11039 | entry->vme_resilient_codesign) { | |
11040 | fault_info->cs_bypass = TRUE; | |
11041 | } else { | |
11042 | fault_info->cs_bypass = FALSE; | |
11043 | } | |
0b4c1975 | 11044 | fault_info->mark_zf_absent = FALSE; |
316670eb | 11045 | fault_info->batch_pmap_op = FALSE; |
2d21ac55 | 11046 | } |
1c79356b A |
11047 | |
11048 | /* | |
11049 | * Lock the object to prevent it from disappearing | |
11050 | */ | |
2d21ac55 A |
11051 | if (object_lock_type == OBJECT_LOCK_EXCLUSIVE) |
11052 | vm_object_lock(*object); | |
11053 | else | |
11054 | vm_object_lock_shared(*object); | |
11055 | ||
1c79356b A |
11056 | /* |
11057 | * Save the version number | |
11058 | */ | |
11059 | ||
11060 | out_version->main_timestamp = map->timestamp; | |
11061 | ||
11062 | return KERN_SUCCESS; | |
11063 | } | |
11064 | ||
11065 | ||
11066 | /* | |
11067 | * vm_map_verify: | |
11068 | * | |
11069 | * Verifies that the map in question has not changed | |
11070 | * since the given version. If successful, the map | |
11071 | * will not change until vm_map_verify_done() is called. | |
11072 | */ | |
11073 | boolean_t | |
11074 | vm_map_verify( | |
11075 | register vm_map_t map, | |
11076 | register vm_map_version_t *version) /* REF */ | |
11077 | { | |
11078 | boolean_t result; | |
11079 | ||
11080 | vm_map_lock_read(map); | |
11081 | result = (map->timestamp == version->main_timestamp); | |
11082 | ||
11083 | if (!result) | |
11084 | vm_map_unlock_read(map); | |
11085 | ||
11086 | return(result); | |
11087 | } | |
11088 | ||
11089 | /* | |
11090 | * vm_map_verify_done: | |
11091 | * | |
11092 | * Releases locks acquired by a vm_map_verify. | |
11093 | * | |
11094 | * This is now a macro in vm/vm_map.h. It does a | |
11095 | * vm_map_unlock_read on the map. | |
11096 | */ | |
11097 | ||
11098 | ||
91447636 A |
11099 | /* |
11100 | * TEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARYTEMPORARY | |
11101 | * Goes away after regular vm_region_recurse function migrates to | |
11102 | * 64 bits | |
11103 | * vm_region_recurse: A form of vm_region which follows the | |
11104 | * submaps in a target map | |
11105 | * | |
11106 | */ | |
11107 | ||
11108 | kern_return_t | |
11109 | vm_map_region_recurse_64( | |
11110 | vm_map_t map, | |
11111 | vm_map_offset_t *address, /* IN/OUT */ | |
11112 | vm_map_size_t *size, /* OUT */ | |
11113 | natural_t *nesting_depth, /* IN/OUT */ | |
11114 | vm_region_submap_info_64_t submap_info, /* IN/OUT */ | |
11115 | mach_msg_type_number_t *count) /* IN/OUT */ | |
11116 | { | |
39236c6e | 11117 | mach_msg_type_number_t original_count; |
91447636 A |
11118 | vm_region_extended_info_data_t extended; |
11119 | vm_map_entry_t tmp_entry; | |
11120 | vm_map_offset_t user_address; | |
11121 | unsigned int user_max_depth; | |
11122 | ||
11123 | /* | |
11124 | * "curr_entry" is the VM map entry preceding or including the | |
11125 | * address we're looking for. | |
11126 | * "curr_map" is the map or sub-map containing "curr_entry". | |
6d2010ae A |
11127 | * "curr_address" is the equivalent of the top map's "user_address" |
11128 | * in the current map. | |
91447636 A |
11129 | * "curr_offset" is the cumulated offset of "curr_map" in the |
11130 | * target task's address space. | |
11131 | * "curr_depth" is the depth of "curr_map" in the chain of | |
11132 | * sub-maps. | |
6d2010ae A |
11133 | * |
11134 | * "curr_max_below" and "curr_max_above" limit the range (around | |
11135 | * "curr_address") we should take into account in the current (sub)map. | |
11136 | * They limit the range to what's visible through the map entries | |
11137 | * we've traversed from the top map to the current map. | |
11138 | ||
91447636 A |
11139 | */ |
11140 | vm_map_entry_t curr_entry; | |
6d2010ae | 11141 | vm_map_address_t curr_address; |
91447636 A |
11142 | vm_map_offset_t curr_offset; |
11143 | vm_map_t curr_map; | |
11144 | unsigned int curr_depth; | |
6d2010ae A |
11145 | vm_map_offset_t curr_max_below, curr_max_above; |
11146 | vm_map_offset_t curr_skip; | |
91447636 A |
11147 | |
11148 | /* | |
11149 | * "next_" is the same as "curr_" but for the VM region immediately | |
11150 | * after the address we're looking for. We need to keep track of this | |
11151 | * too because we want to return info about that region if the | |
11152 | * address we're looking for is not mapped. | |
11153 | */ | |
11154 | vm_map_entry_t next_entry; | |
11155 | vm_map_offset_t next_offset; | |
6d2010ae | 11156 | vm_map_offset_t next_address; |
91447636 A |
11157 | vm_map_t next_map; |
11158 | unsigned int next_depth; | |
6d2010ae A |
11159 | vm_map_offset_t next_max_below, next_max_above; |
11160 | vm_map_offset_t next_skip; | |
91447636 | 11161 | |
2d21ac55 A |
11162 | boolean_t look_for_pages; |
11163 | vm_region_submap_short_info_64_t short_info; | |
11164 | ||
91447636 A |
11165 | if (map == VM_MAP_NULL) { |
11166 | /* no address space to work on */ | |
11167 | return KERN_INVALID_ARGUMENT; | |
11168 | } | |
11169 | ||
39236c6e A |
11170 | |
11171 | if (*count < VM_REGION_SUBMAP_SHORT_INFO_COUNT_64) { | |
11172 | /* | |
11173 | * "info" structure is not big enough and | |
11174 | * would overflow | |
11175 | */ | |
11176 | return KERN_INVALID_ARGUMENT; | |
11177 | } | |
11178 | ||
11179 | original_count = *count; | |
11180 | ||
11181 | if (original_count < VM_REGION_SUBMAP_INFO_V0_COUNT_64) { | |
11182 | *count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64; | |
11183 | look_for_pages = FALSE; | |
11184 | short_info = (vm_region_submap_short_info_64_t) submap_info; | |
11185 | submap_info = NULL; | |
2d21ac55 A |
11186 | } else { |
11187 | look_for_pages = TRUE; | |
39236c6e | 11188 | *count = VM_REGION_SUBMAP_INFO_V0_COUNT_64; |
2d21ac55 | 11189 | short_info = NULL; |
39236c6e A |
11190 | |
11191 | if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) { | |
11192 | *count = VM_REGION_SUBMAP_INFO_V1_COUNT_64; | |
11193 | } | |
91447636 | 11194 | } |
39236c6e | 11195 | |
91447636 A |
11196 | user_address = *address; |
11197 | user_max_depth = *nesting_depth; | |
11198 | ||
3e170ce0 A |
11199 | if (not_in_kdp) { |
11200 | vm_map_lock_read(map); | |
11201 | } | |
11202 | ||
11203 | recurse_again: | |
91447636 A |
11204 | curr_entry = NULL; |
11205 | curr_map = map; | |
6d2010ae | 11206 | curr_address = user_address; |
91447636 | 11207 | curr_offset = 0; |
6d2010ae | 11208 | curr_skip = 0; |
91447636 | 11209 | curr_depth = 0; |
6d2010ae A |
11210 | curr_max_above = ((vm_map_offset_t) -1) - curr_address; |
11211 | curr_max_below = curr_address; | |
91447636 A |
11212 | |
11213 | next_entry = NULL; | |
11214 | next_map = NULL; | |
6d2010ae | 11215 | next_address = 0; |
91447636 | 11216 | next_offset = 0; |
6d2010ae | 11217 | next_skip = 0; |
91447636 | 11218 | next_depth = 0; |
6d2010ae A |
11219 | next_max_above = (vm_map_offset_t) -1; |
11220 | next_max_below = (vm_map_offset_t) -1; | |
91447636 | 11221 | |
91447636 A |
11222 | for (;;) { |
11223 | if (vm_map_lookup_entry(curr_map, | |
6d2010ae | 11224 | curr_address, |
91447636 A |
11225 | &tmp_entry)) { |
11226 | /* tmp_entry contains the address we're looking for */ | |
11227 | curr_entry = tmp_entry; | |
11228 | } else { | |
6d2010ae | 11229 | vm_map_offset_t skip; |
91447636 A |
11230 | /* |
11231 | * The address is not mapped. "tmp_entry" is the | |
11232 | * map entry preceding the address. We want the next | |
11233 | * one, if it exists. | |
11234 | */ | |
11235 | curr_entry = tmp_entry->vme_next; | |
6d2010ae | 11236 | |
91447636 | 11237 | if (curr_entry == vm_map_to_entry(curr_map) || |
6d2010ae A |
11238 | (curr_entry->vme_start >= |
11239 | curr_address + curr_max_above)) { | |
91447636 A |
11240 | /* no next entry at this level: stop looking */ |
11241 | if (not_in_kdp) { | |
11242 | vm_map_unlock_read(curr_map); | |
11243 | } | |
11244 | curr_entry = NULL; | |
11245 | curr_map = NULL; | |
3e170ce0 | 11246 | curr_skip = 0; |
91447636 A |
11247 | curr_offset = 0; |
11248 | curr_depth = 0; | |
6d2010ae A |
11249 | curr_max_above = 0; |
11250 | curr_max_below = 0; | |
91447636 A |
11251 | break; |
11252 | } | |
6d2010ae A |
11253 | |
11254 | /* adjust current address and offset */ | |
11255 | skip = curr_entry->vme_start - curr_address; | |
11256 | curr_address = curr_entry->vme_start; | |
3e170ce0 | 11257 | curr_skip += skip; |
6d2010ae A |
11258 | curr_offset += skip; |
11259 | curr_max_above -= skip; | |
11260 | curr_max_below = 0; | |
91447636 A |
11261 | } |
11262 | ||
11263 | /* | |
11264 | * Is the next entry at this level closer to the address (or | |
11265 | * deeper in the submap chain) than the one we had | |
11266 | * so far ? | |
11267 | */ | |
11268 | tmp_entry = curr_entry->vme_next; | |
11269 | if (tmp_entry == vm_map_to_entry(curr_map)) { | |
11270 | /* no next entry at this level */ | |
6d2010ae A |
11271 | } else if (tmp_entry->vme_start >= |
11272 | curr_address + curr_max_above) { | |
91447636 A |
11273 | /* |
11274 | * tmp_entry is beyond the scope of what we mapped of | |
11275 | * this submap in the upper level: ignore it. | |
11276 | */ | |
11277 | } else if ((next_entry == NULL) || | |
11278 | (tmp_entry->vme_start + curr_offset <= | |
11279 | next_entry->vme_start + next_offset)) { | |
11280 | /* | |
11281 | * We didn't have a "next_entry" or this one is | |
11282 | * closer to the address we're looking for: | |
11283 | * use this "tmp_entry" as the new "next_entry". | |
11284 | */ | |
11285 | if (next_entry != NULL) { | |
11286 | /* unlock the last "next_map" */ | |
11287 | if (next_map != curr_map && not_in_kdp) { | |
11288 | vm_map_unlock_read(next_map); | |
11289 | } | |
11290 | } | |
11291 | next_entry = tmp_entry; | |
11292 | next_map = curr_map; | |
91447636 | 11293 | next_depth = curr_depth; |
6d2010ae A |
11294 | next_address = next_entry->vme_start; |
11295 | next_skip = curr_skip; | |
3e170ce0 | 11296 | next_skip += (next_address - curr_address); |
6d2010ae A |
11297 | next_offset = curr_offset; |
11298 | next_offset += (next_address - curr_address); | |
11299 | next_max_above = MIN(next_max_above, curr_max_above); | |
11300 | next_max_above = MIN(next_max_above, | |
11301 | next_entry->vme_end - next_address); | |
11302 | next_max_below = MIN(next_max_below, curr_max_below); | |
11303 | next_max_below = MIN(next_max_below, | |
11304 | next_address - next_entry->vme_start); | |
91447636 A |
11305 | } |
11306 | ||
6d2010ae A |
11307 | /* |
11308 | * "curr_max_{above,below}" allow us to keep track of the | |
11309 | * portion of the submap that is actually mapped at this level: | |
11310 | * the rest of that submap is irrelevant to us, since it's not | |
11311 | * mapped here. | |
11312 | * The relevant portion of the map starts at | |
3e170ce0 | 11313 | * "VME_OFFSET(curr_entry)" up to the size of "curr_entry". |
6d2010ae A |
11314 | */ |
11315 | curr_max_above = MIN(curr_max_above, | |
11316 | curr_entry->vme_end - curr_address); | |
11317 | curr_max_below = MIN(curr_max_below, | |
11318 | curr_address - curr_entry->vme_start); | |
11319 | ||
91447636 A |
11320 | if (!curr_entry->is_sub_map || |
11321 | curr_depth >= user_max_depth) { | |
11322 | /* | |
11323 | * We hit a leaf map or we reached the maximum depth | |
11324 | * we could, so stop looking. Keep the current map | |
11325 | * locked. | |
11326 | */ | |
11327 | break; | |
11328 | } | |
11329 | ||
11330 | /* | |
11331 | * Get down to the next submap level. | |
11332 | */ | |
11333 | ||
11334 | /* | |
11335 | * Lock the next level and unlock the current level, | |
11336 | * unless we need to keep it locked to access the "next_entry" | |
11337 | * later. | |
11338 | */ | |
11339 | if (not_in_kdp) { | |
3e170ce0 | 11340 | vm_map_lock_read(VME_SUBMAP(curr_entry)); |
91447636 A |
11341 | } |
11342 | if (curr_map == next_map) { | |
11343 | /* keep "next_map" locked in case we need it */ | |
11344 | } else { | |
11345 | /* release this map */ | |
b0d623f7 A |
11346 | if (not_in_kdp) |
11347 | vm_map_unlock_read(curr_map); | |
91447636 A |
11348 | } |
11349 | ||
11350 | /* | |
11351 | * Adjust the offset. "curr_entry" maps the submap | |
11352 | * at relative address "curr_entry->vme_start" in the | |
3e170ce0 | 11353 | * curr_map but skips the first "VME_OFFSET(curr_entry)" |
91447636 A |
11354 | * bytes of the submap. |
11355 | * "curr_offset" always represents the offset of a virtual | |
11356 | * address in the curr_map relative to the absolute address | |
11357 | * space (i.e. the top-level VM map). | |
11358 | */ | |
11359 | curr_offset += | |
3e170ce0 | 11360 | (VME_OFFSET(curr_entry) - curr_entry->vme_start); |
6d2010ae | 11361 | curr_address = user_address + curr_offset; |
91447636 | 11362 | /* switch to the submap */ |
3e170ce0 | 11363 | curr_map = VME_SUBMAP(curr_entry); |
91447636 | 11364 | curr_depth++; |
91447636 A |
11365 | curr_entry = NULL; |
11366 | } | |
11367 | ||
11368 | if (curr_entry == NULL) { | |
11369 | /* no VM region contains the address... */ | |
11370 | if (next_entry == NULL) { | |
11371 | /* ... and no VM region follows it either */ | |
11372 | return KERN_INVALID_ADDRESS; | |
11373 | } | |
11374 | /* ... gather info about the next VM region */ | |
11375 | curr_entry = next_entry; | |
11376 | curr_map = next_map; /* still locked ... */ | |
6d2010ae A |
11377 | curr_address = next_address; |
11378 | curr_skip = next_skip; | |
91447636 A |
11379 | curr_offset = next_offset; |
11380 | curr_depth = next_depth; | |
6d2010ae A |
11381 | curr_max_above = next_max_above; |
11382 | curr_max_below = next_max_below; | |
91447636 A |
11383 | } else { |
11384 | /* we won't need "next_entry" after all */ | |
11385 | if (next_entry != NULL) { | |
11386 | /* release "next_map" */ | |
11387 | if (next_map != curr_map && not_in_kdp) { | |
11388 | vm_map_unlock_read(next_map); | |
11389 | } | |
11390 | } | |
11391 | } | |
11392 | next_entry = NULL; | |
11393 | next_map = NULL; | |
11394 | next_offset = 0; | |
6d2010ae | 11395 | next_skip = 0; |
91447636 | 11396 | next_depth = 0; |
6d2010ae A |
11397 | next_max_below = -1; |
11398 | next_max_above = -1; | |
91447636 | 11399 | |
3e170ce0 A |
11400 | if (curr_entry->is_sub_map && |
11401 | curr_depth < user_max_depth) { | |
11402 | /* | |
11403 | * We're not as deep as we could be: we must have | |
11404 | * gone back up after not finding anything mapped | |
11405 | * below the original top-level map entry's. | |
11406 | * Let's move "curr_address" forward and recurse again. | |
11407 | */ | |
11408 | user_address = curr_address; | |
11409 | goto recurse_again; | |
11410 | } | |
11411 | ||
91447636 | 11412 | *nesting_depth = curr_depth; |
6d2010ae A |
11413 | *size = curr_max_above + curr_max_below; |
11414 | *address = user_address + curr_skip - curr_max_below; | |
91447636 | 11415 | |
b0d623f7 A |
11416 | // LP64todo: all the current tools are 32bit, obviously never worked for 64b |
11417 | // so probably should be a real 32b ID vs. ptr. | |
11418 | // Current users just check for equality | |
39236c6e | 11419 | #define INFO_MAKE_OBJECT_ID(p) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM(p)) |
b0d623f7 | 11420 | |
2d21ac55 | 11421 | if (look_for_pages) { |
3e170ce0 A |
11422 | submap_info->user_tag = VME_ALIAS(curr_entry); |
11423 | submap_info->offset = VME_OFFSET(curr_entry); | |
2d21ac55 A |
11424 | submap_info->protection = curr_entry->protection; |
11425 | submap_info->inheritance = curr_entry->inheritance; | |
11426 | submap_info->max_protection = curr_entry->max_protection; | |
11427 | submap_info->behavior = curr_entry->behavior; | |
11428 | submap_info->user_wired_count = curr_entry->user_wired_count; | |
11429 | submap_info->is_submap = curr_entry->is_sub_map; | |
3e170ce0 | 11430 | submap_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); |
2d21ac55 | 11431 | } else { |
3e170ce0 A |
11432 | short_info->user_tag = VME_ALIAS(curr_entry); |
11433 | short_info->offset = VME_OFFSET(curr_entry); | |
2d21ac55 A |
11434 | short_info->protection = curr_entry->protection; |
11435 | short_info->inheritance = curr_entry->inheritance; | |
11436 | short_info->max_protection = curr_entry->max_protection; | |
11437 | short_info->behavior = curr_entry->behavior; | |
11438 | short_info->user_wired_count = curr_entry->user_wired_count; | |
11439 | short_info->is_submap = curr_entry->is_sub_map; | |
3e170ce0 | 11440 | short_info->object_id = INFO_MAKE_OBJECT_ID(VME_OBJECT(curr_entry)); |
2d21ac55 | 11441 | } |
91447636 A |
11442 | |
11443 | extended.pages_resident = 0; | |
11444 | extended.pages_swapped_out = 0; | |
11445 | extended.pages_shared_now_private = 0; | |
11446 | extended.pages_dirtied = 0; | |
39236c6e | 11447 | extended.pages_reusable = 0; |
91447636 A |
11448 | extended.external_pager = 0; |
11449 | extended.shadow_depth = 0; | |
3e170ce0 A |
11450 | extended.share_mode = SM_EMPTY; |
11451 | extended.ref_count = 0; | |
91447636 A |
11452 | |
11453 | if (not_in_kdp) { | |
11454 | if (!curr_entry->is_sub_map) { | |
6d2010ae A |
11455 | vm_map_offset_t range_start, range_end; |
11456 | range_start = MAX((curr_address - curr_max_below), | |
11457 | curr_entry->vme_start); | |
11458 | range_end = MIN((curr_address + curr_max_above), | |
11459 | curr_entry->vme_end); | |
91447636 | 11460 | vm_map_region_walk(curr_map, |
6d2010ae | 11461 | range_start, |
91447636 | 11462 | curr_entry, |
3e170ce0 | 11463 | (VME_OFFSET(curr_entry) + |
6d2010ae A |
11464 | (range_start - |
11465 | curr_entry->vme_start)), | |
11466 | range_end - range_start, | |
2d21ac55 | 11467 | &extended, |
39236c6e | 11468 | look_for_pages, VM_REGION_EXTENDED_INFO_COUNT); |
91447636 A |
11469 | if (extended.external_pager && |
11470 | extended.ref_count == 2 && | |
11471 | extended.share_mode == SM_SHARED) { | |
2d21ac55 | 11472 | extended.share_mode = SM_PRIVATE; |
91447636 | 11473 | } |
91447636 A |
11474 | } else { |
11475 | if (curr_entry->use_pmap) { | |
2d21ac55 | 11476 | extended.share_mode = SM_TRUESHARED; |
91447636 | 11477 | } else { |
2d21ac55 | 11478 | extended.share_mode = SM_PRIVATE; |
91447636 | 11479 | } |
3e170ce0 | 11480 | extended.ref_count = VME_SUBMAP(curr_entry)->ref_count; |
91447636 A |
11481 | } |
11482 | } | |
11483 | ||
2d21ac55 A |
11484 | if (look_for_pages) { |
11485 | submap_info->pages_resident = extended.pages_resident; | |
11486 | submap_info->pages_swapped_out = extended.pages_swapped_out; | |
11487 | submap_info->pages_shared_now_private = | |
11488 | extended.pages_shared_now_private; | |
11489 | submap_info->pages_dirtied = extended.pages_dirtied; | |
11490 | submap_info->external_pager = extended.external_pager; | |
11491 | submap_info->shadow_depth = extended.shadow_depth; | |
11492 | submap_info->share_mode = extended.share_mode; | |
11493 | submap_info->ref_count = extended.ref_count; | |
39236c6e A |
11494 | |
11495 | if (original_count >= VM_REGION_SUBMAP_INFO_V1_COUNT_64) { | |
11496 | submap_info->pages_reusable = extended.pages_reusable; | |
11497 | } | |
2d21ac55 A |
11498 | } else { |
11499 | short_info->external_pager = extended.external_pager; | |
11500 | short_info->shadow_depth = extended.shadow_depth; | |
11501 | short_info->share_mode = extended.share_mode; | |
11502 | short_info->ref_count = extended.ref_count; | |
11503 | } | |
91447636 A |
11504 | |
11505 | if (not_in_kdp) { | |
11506 | vm_map_unlock_read(curr_map); | |
11507 | } | |
11508 | ||
11509 | return KERN_SUCCESS; | |
11510 | } | |
11511 | ||
1c79356b A |
11512 | /* |
11513 | * vm_region: | |
11514 | * | |
11515 | * User call to obtain information about a region in | |
11516 | * a task's address map. Currently, only one flavor is | |
11517 | * supported. | |
11518 | * | |
11519 | * XXX The reserved and behavior fields cannot be filled | |
11520 | * in until the vm merge from the IK is completed, and | |
11521 | * vm_reserve is implemented. | |
1c79356b A |
11522 | */ |
11523 | ||
11524 | kern_return_t | |
91447636 | 11525 | vm_map_region( |
1c79356b | 11526 | vm_map_t map, |
91447636 A |
11527 | vm_map_offset_t *address, /* IN/OUT */ |
11528 | vm_map_size_t *size, /* OUT */ | |
1c79356b A |
11529 | vm_region_flavor_t flavor, /* IN */ |
11530 | vm_region_info_t info, /* OUT */ | |
91447636 A |
11531 | mach_msg_type_number_t *count, /* IN/OUT */ |
11532 | mach_port_t *object_name) /* OUT */ | |
1c79356b A |
11533 | { |
11534 | vm_map_entry_t tmp_entry; | |
1c79356b | 11535 | vm_map_entry_t entry; |
91447636 | 11536 | vm_map_offset_t start; |
1c79356b A |
11537 | |
11538 | if (map == VM_MAP_NULL) | |
11539 | return(KERN_INVALID_ARGUMENT); | |
11540 | ||
11541 | switch (flavor) { | |
91447636 | 11542 | |
1c79356b | 11543 | case VM_REGION_BASIC_INFO: |
2d21ac55 | 11544 | /* legacy for old 32-bit objects info */ |
1c79356b | 11545 | { |
2d21ac55 | 11546 | vm_region_basic_info_t basic; |
91447636 | 11547 | |
2d21ac55 A |
11548 | if (*count < VM_REGION_BASIC_INFO_COUNT) |
11549 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 11550 | |
2d21ac55 A |
11551 | basic = (vm_region_basic_info_t) info; |
11552 | *count = VM_REGION_BASIC_INFO_COUNT; | |
1c79356b | 11553 | |
2d21ac55 | 11554 | vm_map_lock_read(map); |
1c79356b | 11555 | |
2d21ac55 A |
11556 | start = *address; |
11557 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
11558 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
11559 | vm_map_unlock_read(map); | |
11560 | return(KERN_INVALID_ADDRESS); | |
11561 | } | |
11562 | } else { | |
11563 | entry = tmp_entry; | |
1c79356b | 11564 | } |
1c79356b | 11565 | |
2d21ac55 | 11566 | start = entry->vme_start; |
1c79356b | 11567 | |
3e170ce0 | 11568 | basic->offset = (uint32_t)VME_OFFSET(entry); |
2d21ac55 A |
11569 | basic->protection = entry->protection; |
11570 | basic->inheritance = entry->inheritance; | |
11571 | basic->max_protection = entry->max_protection; | |
11572 | basic->behavior = entry->behavior; | |
11573 | basic->user_wired_count = entry->user_wired_count; | |
11574 | basic->reserved = entry->is_sub_map; | |
11575 | *address = start; | |
11576 | *size = (entry->vme_end - start); | |
91447636 | 11577 | |
2d21ac55 A |
11578 | if (object_name) *object_name = IP_NULL; |
11579 | if (entry->is_sub_map) { | |
11580 | basic->shared = FALSE; | |
11581 | } else { | |
11582 | basic->shared = entry->is_shared; | |
11583 | } | |
91447636 | 11584 | |
2d21ac55 A |
11585 | vm_map_unlock_read(map); |
11586 | return(KERN_SUCCESS); | |
91447636 A |
11587 | } |
11588 | ||
11589 | case VM_REGION_BASIC_INFO_64: | |
11590 | { | |
2d21ac55 | 11591 | vm_region_basic_info_64_t basic; |
91447636 | 11592 | |
2d21ac55 A |
11593 | if (*count < VM_REGION_BASIC_INFO_COUNT_64) |
11594 | return(KERN_INVALID_ARGUMENT); | |
11595 | ||
11596 | basic = (vm_region_basic_info_64_t) info; | |
11597 | *count = VM_REGION_BASIC_INFO_COUNT_64; | |
11598 | ||
11599 | vm_map_lock_read(map); | |
11600 | ||
11601 | start = *address; | |
11602 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
11603 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
11604 | vm_map_unlock_read(map); | |
11605 | return(KERN_INVALID_ADDRESS); | |
11606 | } | |
11607 | } else { | |
11608 | entry = tmp_entry; | |
11609 | } | |
91447636 | 11610 | |
2d21ac55 | 11611 | start = entry->vme_start; |
91447636 | 11612 | |
3e170ce0 | 11613 | basic->offset = VME_OFFSET(entry); |
2d21ac55 A |
11614 | basic->protection = entry->protection; |
11615 | basic->inheritance = entry->inheritance; | |
11616 | basic->max_protection = entry->max_protection; | |
11617 | basic->behavior = entry->behavior; | |
11618 | basic->user_wired_count = entry->user_wired_count; | |
11619 | basic->reserved = entry->is_sub_map; | |
11620 | *address = start; | |
11621 | *size = (entry->vme_end - start); | |
91447636 | 11622 | |
2d21ac55 A |
11623 | if (object_name) *object_name = IP_NULL; |
11624 | if (entry->is_sub_map) { | |
11625 | basic->shared = FALSE; | |
11626 | } else { | |
11627 | basic->shared = entry->is_shared; | |
91447636 | 11628 | } |
2d21ac55 A |
11629 | |
11630 | vm_map_unlock_read(map); | |
11631 | return(KERN_SUCCESS); | |
1c79356b A |
11632 | } |
11633 | case VM_REGION_EXTENDED_INFO: | |
2d21ac55 A |
11634 | if (*count < VM_REGION_EXTENDED_INFO_COUNT) |
11635 | return(KERN_INVALID_ARGUMENT); | |
39236c6e A |
11636 | /*fallthru*/ |
11637 | case VM_REGION_EXTENDED_INFO__legacy: | |
11638 | if (*count < VM_REGION_EXTENDED_INFO_COUNT__legacy) | |
11639 | return KERN_INVALID_ARGUMENT; | |
11640 | ||
11641 | { | |
11642 | vm_region_extended_info_t extended; | |
11643 | mach_msg_type_number_t original_count; | |
1c79356b | 11644 | |
2d21ac55 | 11645 | extended = (vm_region_extended_info_t) info; |
1c79356b | 11646 | |
2d21ac55 | 11647 | vm_map_lock_read(map); |
1c79356b | 11648 | |
2d21ac55 A |
11649 | start = *address; |
11650 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
11651 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
11652 | vm_map_unlock_read(map); | |
11653 | return(KERN_INVALID_ADDRESS); | |
11654 | } | |
11655 | } else { | |
11656 | entry = tmp_entry; | |
1c79356b | 11657 | } |
2d21ac55 | 11658 | start = entry->vme_start; |
1c79356b | 11659 | |
2d21ac55 | 11660 | extended->protection = entry->protection; |
3e170ce0 | 11661 | extended->user_tag = VME_ALIAS(entry); |
2d21ac55 A |
11662 | extended->pages_resident = 0; |
11663 | extended->pages_swapped_out = 0; | |
11664 | extended->pages_shared_now_private = 0; | |
11665 | extended->pages_dirtied = 0; | |
11666 | extended->external_pager = 0; | |
11667 | extended->shadow_depth = 0; | |
1c79356b | 11668 | |
39236c6e A |
11669 | original_count = *count; |
11670 | if (flavor == VM_REGION_EXTENDED_INFO__legacy) { | |
11671 | *count = VM_REGION_EXTENDED_INFO_COUNT__legacy; | |
11672 | } else { | |
11673 | extended->pages_reusable = 0; | |
11674 | *count = VM_REGION_EXTENDED_INFO_COUNT; | |
11675 | } | |
11676 | ||
3e170ce0 | 11677 | vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, extended, TRUE, *count); |
1c79356b | 11678 | |
2d21ac55 A |
11679 | if (extended->external_pager && extended->ref_count == 2 && extended->share_mode == SM_SHARED) |
11680 | extended->share_mode = SM_PRIVATE; | |
1c79356b | 11681 | |
2d21ac55 A |
11682 | if (object_name) |
11683 | *object_name = IP_NULL; | |
11684 | *address = start; | |
11685 | *size = (entry->vme_end - start); | |
1c79356b | 11686 | |
2d21ac55 A |
11687 | vm_map_unlock_read(map); |
11688 | return(KERN_SUCCESS); | |
1c79356b A |
11689 | } |
11690 | case VM_REGION_TOP_INFO: | |
11691 | { | |
2d21ac55 | 11692 | vm_region_top_info_t top; |
1c79356b | 11693 | |
2d21ac55 A |
11694 | if (*count < VM_REGION_TOP_INFO_COUNT) |
11695 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 11696 | |
2d21ac55 A |
11697 | top = (vm_region_top_info_t) info; |
11698 | *count = VM_REGION_TOP_INFO_COUNT; | |
1c79356b | 11699 | |
2d21ac55 | 11700 | vm_map_lock_read(map); |
1c79356b | 11701 | |
2d21ac55 A |
11702 | start = *address; |
11703 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |
11704 | if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) { | |
11705 | vm_map_unlock_read(map); | |
11706 | return(KERN_INVALID_ADDRESS); | |
11707 | } | |
11708 | } else { | |
11709 | entry = tmp_entry; | |
1c79356b | 11710 | |
2d21ac55 A |
11711 | } |
11712 | start = entry->vme_start; | |
1c79356b | 11713 | |
2d21ac55 A |
11714 | top->private_pages_resident = 0; |
11715 | top->shared_pages_resident = 0; | |
1c79356b | 11716 | |
2d21ac55 | 11717 | vm_map_region_top_walk(entry, top); |
1c79356b | 11718 | |
2d21ac55 A |
11719 | if (object_name) |
11720 | *object_name = IP_NULL; | |
11721 | *address = start; | |
11722 | *size = (entry->vme_end - start); | |
1c79356b | 11723 | |
2d21ac55 A |
11724 | vm_map_unlock_read(map); |
11725 | return(KERN_SUCCESS); | |
1c79356b A |
11726 | } |
11727 | default: | |
2d21ac55 | 11728 | return(KERN_INVALID_ARGUMENT); |
1c79356b A |
11729 | } |
11730 | } | |
11731 | ||
b0d623f7 A |
11732 | #define OBJ_RESIDENT_COUNT(obj, entry_size) \ |
11733 | MIN((entry_size), \ | |
11734 | ((obj)->all_reusable ? \ | |
11735 | (obj)->wired_page_count : \ | |
11736 | (obj)->resident_page_count - (obj)->reusable_page_count)) | |
2d21ac55 | 11737 | |
0c530ab8 | 11738 | void |
91447636 A |
11739 | vm_map_region_top_walk( |
11740 | vm_map_entry_t entry, | |
11741 | vm_region_top_info_t top) | |
1c79356b | 11742 | { |
1c79356b | 11743 | |
3e170ce0 | 11744 | if (VME_OBJECT(entry) == 0 || entry->is_sub_map) { |
2d21ac55 A |
11745 | top->share_mode = SM_EMPTY; |
11746 | top->ref_count = 0; | |
11747 | top->obj_id = 0; | |
11748 | return; | |
1c79356b | 11749 | } |
2d21ac55 | 11750 | |
91447636 | 11751 | { |
2d21ac55 A |
11752 | struct vm_object *obj, *tmp_obj; |
11753 | int ref_count; | |
11754 | uint32_t entry_size; | |
1c79356b | 11755 | |
b0d623f7 | 11756 | entry_size = (uint32_t) ((entry->vme_end - entry->vme_start) / PAGE_SIZE_64); |
1c79356b | 11757 | |
3e170ce0 | 11758 | obj = VME_OBJECT(entry); |
1c79356b | 11759 | |
2d21ac55 A |
11760 | vm_object_lock(obj); |
11761 | ||
11762 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) | |
11763 | ref_count--; | |
11764 | ||
b0d623f7 | 11765 | assert(obj->reusable_page_count <= obj->resident_page_count); |
2d21ac55 A |
11766 | if (obj->shadow) { |
11767 | if (ref_count == 1) | |
b0d623f7 A |
11768 | top->private_pages_resident = |
11769 | OBJ_RESIDENT_COUNT(obj, entry_size); | |
2d21ac55 | 11770 | else |
b0d623f7 A |
11771 | top->shared_pages_resident = |
11772 | OBJ_RESIDENT_COUNT(obj, entry_size); | |
2d21ac55 A |
11773 | top->ref_count = ref_count; |
11774 | top->share_mode = SM_COW; | |
91447636 | 11775 | |
2d21ac55 A |
11776 | while ((tmp_obj = obj->shadow)) { |
11777 | vm_object_lock(tmp_obj); | |
11778 | vm_object_unlock(obj); | |
11779 | obj = tmp_obj; | |
1c79356b | 11780 | |
2d21ac55 A |
11781 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) |
11782 | ref_count--; | |
1c79356b | 11783 | |
b0d623f7 A |
11784 | assert(obj->reusable_page_count <= obj->resident_page_count); |
11785 | top->shared_pages_resident += | |
11786 | OBJ_RESIDENT_COUNT(obj, entry_size); | |
2d21ac55 A |
11787 | top->ref_count += ref_count - 1; |
11788 | } | |
1c79356b | 11789 | } else { |
6d2010ae A |
11790 | if (entry->superpage_size) { |
11791 | top->share_mode = SM_LARGE_PAGE; | |
11792 | top->shared_pages_resident = 0; | |
11793 | top->private_pages_resident = entry_size; | |
11794 | } else if (entry->needs_copy) { | |
2d21ac55 | 11795 | top->share_mode = SM_COW; |
b0d623f7 A |
11796 | top->shared_pages_resident = |
11797 | OBJ_RESIDENT_COUNT(obj, entry_size); | |
2d21ac55 A |
11798 | } else { |
11799 | if (ref_count == 1 || | |
11800 | (ref_count == 2 && !(obj->pager_trusted) && !(obj->internal))) { | |
11801 | top->share_mode = SM_PRIVATE; | |
39236c6e A |
11802 | top->private_pages_resident = |
11803 | OBJ_RESIDENT_COUNT(obj, | |
11804 | entry_size); | |
2d21ac55 A |
11805 | } else { |
11806 | top->share_mode = SM_SHARED; | |
b0d623f7 A |
11807 | top->shared_pages_resident = |
11808 | OBJ_RESIDENT_COUNT(obj, | |
11809 | entry_size); | |
2d21ac55 A |
11810 | } |
11811 | } | |
11812 | top->ref_count = ref_count; | |
1c79356b | 11813 | } |
b0d623f7 | 11814 | /* XXX K64: obj_id will be truncated */ |
39236c6e | 11815 | top->obj_id = (unsigned int) (uintptr_t)VM_KERNEL_ADDRPERM(obj); |
1c79356b | 11816 | |
2d21ac55 | 11817 | vm_object_unlock(obj); |
1c79356b | 11818 | } |
91447636 A |
11819 | } |
11820 | ||
0c530ab8 | 11821 | void |
91447636 A |
11822 | vm_map_region_walk( |
11823 | vm_map_t map, | |
2d21ac55 A |
11824 | vm_map_offset_t va, |
11825 | vm_map_entry_t entry, | |
91447636 A |
11826 | vm_object_offset_t offset, |
11827 | vm_object_size_t range, | |
2d21ac55 | 11828 | vm_region_extended_info_t extended, |
39236c6e A |
11829 | boolean_t look_for_pages, |
11830 | mach_msg_type_number_t count) | |
91447636 A |
11831 | { |
11832 | register struct vm_object *obj, *tmp_obj; | |
11833 | register vm_map_offset_t last_offset; | |
11834 | register int i; | |
11835 | register int ref_count; | |
11836 | struct vm_object *shadow_object; | |
11837 | int shadow_depth; | |
11838 | ||
3e170ce0 | 11839 | if ((VME_OBJECT(entry) == 0) || |
2d21ac55 | 11840 | (entry->is_sub_map) || |
3e170ce0 | 11841 | (VME_OBJECT(entry)->phys_contiguous && |
6d2010ae | 11842 | !entry->superpage_size)) { |
2d21ac55 A |
11843 | extended->share_mode = SM_EMPTY; |
11844 | extended->ref_count = 0; | |
11845 | return; | |
1c79356b | 11846 | } |
6d2010ae A |
11847 | |
11848 | if (entry->superpage_size) { | |
11849 | extended->shadow_depth = 0; | |
11850 | extended->share_mode = SM_LARGE_PAGE; | |
11851 | extended->ref_count = 1; | |
11852 | extended->external_pager = 0; | |
11853 | extended->pages_resident = (unsigned int)(range >> PAGE_SHIFT); | |
11854 | extended->shadow_depth = 0; | |
11855 | return; | |
11856 | } | |
11857 | ||
91447636 | 11858 | { |
3e170ce0 | 11859 | obj = VME_OBJECT(entry); |
2d21ac55 A |
11860 | |
11861 | vm_object_lock(obj); | |
11862 | ||
11863 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) | |
11864 | ref_count--; | |
11865 | ||
11866 | if (look_for_pages) { | |
11867 | for (last_offset = offset + range; | |
11868 | offset < last_offset; | |
39236c6e A |
11869 | offset += PAGE_SIZE_64, va += PAGE_SIZE) { |
11870 | vm_map_region_look_for_page(map, va, obj, | |
11871 | offset, ref_count, | |
11872 | 0, extended, count); | |
11873 | } | |
b0d623f7 A |
11874 | } else { |
11875 | shadow_object = obj->shadow; | |
11876 | shadow_depth = 0; | |
11877 | ||
11878 | if ( !(obj->pager_trusted) && !(obj->internal)) | |
11879 | extended->external_pager = 1; | |
11880 | ||
11881 | if (shadow_object != VM_OBJECT_NULL) { | |
11882 | vm_object_lock(shadow_object); | |
11883 | for (; | |
11884 | shadow_object != VM_OBJECT_NULL; | |
11885 | shadow_depth++) { | |
11886 | vm_object_t next_shadow; | |
11887 | ||
11888 | if ( !(shadow_object->pager_trusted) && | |
11889 | !(shadow_object->internal)) | |
11890 | extended->external_pager = 1; | |
11891 | ||
11892 | next_shadow = shadow_object->shadow; | |
11893 | if (next_shadow) { | |
11894 | vm_object_lock(next_shadow); | |
11895 | } | |
11896 | vm_object_unlock(shadow_object); | |
11897 | shadow_object = next_shadow; | |
2d21ac55 | 11898 | } |
2d21ac55 | 11899 | } |
b0d623f7 | 11900 | extended->shadow_depth = shadow_depth; |
2d21ac55 | 11901 | } |
2d21ac55 A |
11902 | |
11903 | if (extended->shadow_depth || entry->needs_copy) | |
11904 | extended->share_mode = SM_COW; | |
91447636 | 11905 | else { |
2d21ac55 A |
11906 | if (ref_count == 1) |
11907 | extended->share_mode = SM_PRIVATE; | |
11908 | else { | |
11909 | if (obj->true_share) | |
11910 | extended->share_mode = SM_TRUESHARED; | |
11911 | else | |
11912 | extended->share_mode = SM_SHARED; | |
11913 | } | |
91447636 | 11914 | } |
2d21ac55 | 11915 | extended->ref_count = ref_count - extended->shadow_depth; |
91447636 | 11916 | |
2d21ac55 A |
11917 | for (i = 0; i < extended->shadow_depth; i++) { |
11918 | if ((tmp_obj = obj->shadow) == 0) | |
11919 | break; | |
11920 | vm_object_lock(tmp_obj); | |
11921 | vm_object_unlock(obj); | |
1c79356b | 11922 | |
2d21ac55 A |
11923 | if ((ref_count = tmp_obj->ref_count) > 1 && tmp_obj->paging_in_progress) |
11924 | ref_count--; | |
1c79356b | 11925 | |
2d21ac55 A |
11926 | extended->ref_count += ref_count; |
11927 | obj = tmp_obj; | |
11928 | } | |
11929 | vm_object_unlock(obj); | |
1c79356b | 11930 | |
2d21ac55 A |
11931 | if (extended->share_mode == SM_SHARED) { |
11932 | register vm_map_entry_t cur; | |
11933 | register vm_map_entry_t last; | |
11934 | int my_refs; | |
91447636 | 11935 | |
3e170ce0 | 11936 | obj = VME_OBJECT(entry); |
2d21ac55 A |
11937 | last = vm_map_to_entry(map); |
11938 | my_refs = 0; | |
91447636 | 11939 | |
2d21ac55 A |
11940 | if ((ref_count = obj->ref_count) > 1 && obj->paging_in_progress) |
11941 | ref_count--; | |
11942 | for (cur = vm_map_first_entry(map); cur != last; cur = cur->vme_next) | |
11943 | my_refs += vm_map_region_count_obj_refs(cur, obj); | |
91447636 | 11944 | |
2d21ac55 A |
11945 | if (my_refs == ref_count) |
11946 | extended->share_mode = SM_PRIVATE_ALIASED; | |
11947 | else if (my_refs > 1) | |
11948 | extended->share_mode = SM_SHARED_ALIASED; | |
11949 | } | |
91447636 | 11950 | } |
1c79356b A |
11951 | } |
11952 | ||
1c79356b | 11953 | |
91447636 A |
11954 | /* object is locked on entry and locked on return */ |
11955 | ||
11956 | ||
11957 | static void | |
11958 | vm_map_region_look_for_page( | |
11959 | __unused vm_map_t map, | |
2d21ac55 A |
11960 | __unused vm_map_offset_t va, |
11961 | vm_object_t object, | |
11962 | vm_object_offset_t offset, | |
91447636 A |
11963 | int max_refcnt, |
11964 | int depth, | |
39236c6e A |
11965 | vm_region_extended_info_t extended, |
11966 | mach_msg_type_number_t count) | |
1c79356b | 11967 | { |
2d21ac55 A |
11968 | register vm_page_t p; |
11969 | register vm_object_t shadow; | |
11970 | register int ref_count; | |
11971 | vm_object_t caller_object; | |
2d21ac55 | 11972 | kern_return_t kr; |
91447636 A |
11973 | shadow = object->shadow; |
11974 | caller_object = object; | |
1c79356b | 11975 | |
91447636 A |
11976 | |
11977 | while (TRUE) { | |
1c79356b | 11978 | |
91447636 | 11979 | if ( !(object->pager_trusted) && !(object->internal)) |
2d21ac55 | 11980 | extended->external_pager = 1; |
1c79356b | 11981 | |
91447636 A |
11982 | if ((p = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
11983 | if (shadow && (max_refcnt == 1)) | |
11984 | extended->pages_shared_now_private++; | |
1c79356b | 11985 | |
39236c6e | 11986 | if (!p->fictitious && |
91447636 A |
11987 | (p->dirty || pmap_is_modified(p->phys_page))) |
11988 | extended->pages_dirtied++; | |
39236c6e A |
11989 | else if (count >= VM_REGION_EXTENDED_INFO_COUNT) { |
11990 | if (p->reusable || p->object->all_reusable) { | |
11991 | extended->pages_reusable++; | |
11992 | } | |
11993 | } | |
1c79356b | 11994 | |
39236c6e | 11995 | extended->pages_resident++; |
91447636 A |
11996 | |
11997 | if(object != caller_object) | |
2d21ac55 | 11998 | vm_object_unlock(object); |
91447636 A |
11999 | |
12000 | return; | |
1c79356b | 12001 | } |
2d21ac55 | 12002 | #if MACH_PAGEMAP |
91447636 A |
12003 | if (object->existence_map) { |
12004 | if (vm_external_state_get(object->existence_map, offset) == VM_EXTERNAL_STATE_EXISTS) { | |
1c79356b | 12005 | |
91447636 | 12006 | extended->pages_swapped_out++; |
1c79356b | 12007 | |
91447636 | 12008 | if(object != caller_object) |
2d21ac55 | 12009 | vm_object_unlock(object); |
1c79356b | 12010 | |
91447636 A |
12011 | return; |
12012 | } | |
39236c6e A |
12013 | } else |
12014 | #endif /* MACH_PAGEMAP */ | |
12015 | if (object->internal && | |
12016 | object->alive && | |
12017 | !object->terminating && | |
12018 | object->pager_ready) { | |
12019 | ||
12020 | if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { | |
12021 | if (VM_COMPRESSOR_PAGER_STATE_GET(object, | |
12022 | offset) | |
12023 | == VM_EXTERNAL_STATE_EXISTS) { | |
12024 | /* the pager has that page */ | |
12025 | extended->pages_swapped_out++; | |
12026 | if (object != caller_object) | |
12027 | vm_object_unlock(object); | |
12028 | return; | |
12029 | } | |
12030 | } else { | |
12031 | memory_object_t pager; | |
2d21ac55 | 12032 | |
39236c6e A |
12033 | vm_object_paging_begin(object); |
12034 | pager = object->pager; | |
12035 | vm_object_unlock(object); | |
2d21ac55 | 12036 | |
39236c6e A |
12037 | kr = memory_object_data_request( |
12038 | pager, | |
12039 | offset + object->paging_offset, | |
12040 | 0, /* just poke the pager */ | |
12041 | VM_PROT_READ, | |
12042 | NULL); | |
2d21ac55 | 12043 | |
39236c6e A |
12044 | vm_object_lock(object); |
12045 | vm_object_paging_end(object); | |
12046 | ||
12047 | if (kr == KERN_SUCCESS) { | |
12048 | /* the pager has that page */ | |
12049 | extended->pages_swapped_out++; | |
12050 | if (object != caller_object) | |
12051 | vm_object_unlock(object); | |
12052 | return; | |
12053 | } | |
2d21ac55 | 12054 | } |
1c79356b | 12055 | } |
2d21ac55 | 12056 | |
91447636 | 12057 | if (shadow) { |
2d21ac55 | 12058 | vm_object_lock(shadow); |
1c79356b | 12059 | |
91447636 A |
12060 | if ((ref_count = shadow->ref_count) > 1 && shadow->paging_in_progress) |
12061 | ref_count--; | |
1c79356b | 12062 | |
91447636 A |
12063 | if (++depth > extended->shadow_depth) |
12064 | extended->shadow_depth = depth; | |
1c79356b | 12065 | |
91447636 A |
12066 | if (ref_count > max_refcnt) |
12067 | max_refcnt = ref_count; | |
12068 | ||
12069 | if(object != caller_object) | |
2d21ac55 | 12070 | vm_object_unlock(object); |
91447636 | 12071 | |
6d2010ae | 12072 | offset = offset + object->vo_shadow_offset; |
91447636 A |
12073 | object = shadow; |
12074 | shadow = object->shadow; | |
12075 | continue; | |
1c79356b | 12076 | } |
91447636 | 12077 | if(object != caller_object) |
2d21ac55 | 12078 | vm_object_unlock(object); |
91447636 A |
12079 | break; |
12080 | } | |
12081 | } | |
1c79356b | 12082 | |
91447636 A |
12083 | static int |
12084 | vm_map_region_count_obj_refs( | |
12085 | vm_map_entry_t entry, | |
12086 | vm_object_t object) | |
12087 | { | |
12088 | register int ref_count; | |
12089 | register vm_object_t chk_obj; | |
12090 | register vm_object_t tmp_obj; | |
1c79356b | 12091 | |
3e170ce0 | 12092 | if (VME_OBJECT(entry) == 0) |
2d21ac55 | 12093 | return(0); |
1c79356b | 12094 | |
91447636 | 12095 | if (entry->is_sub_map) |
2d21ac55 | 12096 | return(0); |
91447636 | 12097 | else { |
2d21ac55 | 12098 | ref_count = 0; |
1c79356b | 12099 | |
3e170ce0 | 12100 | chk_obj = VME_OBJECT(entry); |
2d21ac55 | 12101 | vm_object_lock(chk_obj); |
1c79356b | 12102 | |
2d21ac55 A |
12103 | while (chk_obj) { |
12104 | if (chk_obj == object) | |
12105 | ref_count++; | |
12106 | tmp_obj = chk_obj->shadow; | |
12107 | if (tmp_obj) | |
12108 | vm_object_lock(tmp_obj); | |
12109 | vm_object_unlock(chk_obj); | |
1c79356b | 12110 | |
2d21ac55 A |
12111 | chk_obj = tmp_obj; |
12112 | } | |
1c79356b | 12113 | } |
91447636 | 12114 | return(ref_count); |
1c79356b A |
12115 | } |
12116 | ||
12117 | ||
12118 | /* | |
91447636 A |
12119 | * Routine: vm_map_simplify |
12120 | * | |
12121 | * Description: | |
12122 | * Attempt to simplify the map representation in | |
12123 | * the vicinity of the given starting address. | |
12124 | * Note: | |
12125 | * This routine is intended primarily to keep the | |
12126 | * kernel maps more compact -- they generally don't | |
12127 | * benefit from the "expand a map entry" technology | |
12128 | * at allocation time because the adjacent entry | |
12129 | * is often wired down. | |
1c79356b | 12130 | */ |
91447636 A |
12131 | void |
12132 | vm_map_simplify_entry( | |
12133 | vm_map_t map, | |
12134 | vm_map_entry_t this_entry) | |
1c79356b | 12135 | { |
91447636 | 12136 | vm_map_entry_t prev_entry; |
1c79356b | 12137 | |
91447636 | 12138 | counter(c_vm_map_simplify_entry_called++); |
1c79356b | 12139 | |
91447636 | 12140 | prev_entry = this_entry->vme_prev; |
1c79356b | 12141 | |
91447636 | 12142 | if ((this_entry != vm_map_to_entry(map)) && |
2d21ac55 | 12143 | (prev_entry != vm_map_to_entry(map)) && |
1c79356b | 12144 | |
91447636 | 12145 | (prev_entry->vme_end == this_entry->vme_start) && |
1c79356b | 12146 | |
2d21ac55 | 12147 | (prev_entry->is_sub_map == this_entry->is_sub_map) && |
3e170ce0 A |
12148 | (VME_OBJECT(prev_entry) == VME_OBJECT(this_entry)) && |
12149 | ((VME_OFFSET(prev_entry) + (prev_entry->vme_end - | |
91447636 | 12150 | prev_entry->vme_start)) |
3e170ce0 | 12151 | == VME_OFFSET(this_entry)) && |
1c79356b | 12152 | |
fe8ab488 A |
12153 | (prev_entry->behavior == this_entry->behavior) && |
12154 | (prev_entry->needs_copy == this_entry->needs_copy) && | |
91447636 A |
12155 | (prev_entry->protection == this_entry->protection) && |
12156 | (prev_entry->max_protection == this_entry->max_protection) && | |
fe8ab488 A |
12157 | (prev_entry->inheritance == this_entry->inheritance) && |
12158 | (prev_entry->use_pmap == this_entry->use_pmap) && | |
3e170ce0 | 12159 | (VME_ALIAS(prev_entry) == VME_ALIAS(this_entry)) && |
2d21ac55 | 12160 | (prev_entry->no_cache == this_entry->no_cache) && |
fe8ab488 A |
12161 | (prev_entry->permanent == this_entry->permanent) && |
12162 | (prev_entry->map_aligned == this_entry->map_aligned) && | |
12163 | (prev_entry->zero_wired_pages == this_entry->zero_wired_pages) && | |
12164 | (prev_entry->used_for_jit == this_entry->used_for_jit) && | |
12165 | /* from_reserved_zone: OK if that field doesn't match */ | |
12166 | (prev_entry->iokit_acct == this_entry->iokit_acct) && | |
3e170ce0 A |
12167 | (prev_entry->vme_resilient_codesign == |
12168 | this_entry->vme_resilient_codesign) && | |
12169 | (prev_entry->vme_resilient_media == | |
12170 | this_entry->vme_resilient_media) && | |
fe8ab488 | 12171 | |
91447636 A |
12172 | (prev_entry->wired_count == this_entry->wired_count) && |
12173 | (prev_entry->user_wired_count == this_entry->user_wired_count) && | |
1c79356b | 12174 | |
91447636 A |
12175 | (prev_entry->in_transition == FALSE) && |
12176 | (this_entry->in_transition == FALSE) && | |
12177 | (prev_entry->needs_wakeup == FALSE) && | |
12178 | (this_entry->needs_wakeup == FALSE) && | |
12179 | (prev_entry->is_shared == FALSE) && | |
fe8ab488 A |
12180 | (this_entry->is_shared == FALSE) && |
12181 | (prev_entry->superpage_size == FALSE) && | |
12182 | (this_entry->superpage_size == FALSE) | |
2d21ac55 | 12183 | ) { |
316670eb | 12184 | vm_map_store_entry_unlink(map, prev_entry); |
e2d2fc5c | 12185 | assert(prev_entry->vme_start < this_entry->vme_end); |
39236c6e A |
12186 | if (prev_entry->map_aligned) |
12187 | assert(VM_MAP_PAGE_ALIGNED(prev_entry->vme_start, | |
12188 | VM_MAP_PAGE_MASK(map))); | |
91447636 | 12189 | this_entry->vme_start = prev_entry->vme_start; |
3e170ce0 A |
12190 | VME_OFFSET_SET(this_entry, VME_OFFSET(prev_entry)); |
12191 | ||
12192 | if (map->holelistenabled) { | |
12193 | vm_map_store_update_first_free(map, this_entry, TRUE); | |
12194 | } | |
12195 | ||
2d21ac55 | 12196 | if (prev_entry->is_sub_map) { |
3e170ce0 | 12197 | vm_map_deallocate(VME_SUBMAP(prev_entry)); |
2d21ac55 | 12198 | } else { |
3e170ce0 | 12199 | vm_object_deallocate(VME_OBJECT(prev_entry)); |
2d21ac55 | 12200 | } |
91447636 | 12201 | vm_map_entry_dispose(map, prev_entry); |
0c530ab8 | 12202 | SAVE_HINT_MAP_WRITE(map, this_entry); |
91447636 | 12203 | counter(c_vm_map_simplified++); |
1c79356b | 12204 | } |
91447636 | 12205 | } |
1c79356b | 12206 | |
91447636 A |
12207 | void |
12208 | vm_map_simplify( | |
12209 | vm_map_t map, | |
12210 | vm_map_offset_t start) | |
12211 | { | |
12212 | vm_map_entry_t this_entry; | |
1c79356b | 12213 | |
91447636 A |
12214 | vm_map_lock(map); |
12215 | if (vm_map_lookup_entry(map, start, &this_entry)) { | |
12216 | vm_map_simplify_entry(map, this_entry); | |
12217 | vm_map_simplify_entry(map, this_entry->vme_next); | |
12218 | } | |
12219 | counter(c_vm_map_simplify_called++); | |
12220 | vm_map_unlock(map); | |
12221 | } | |
1c79356b | 12222 | |
91447636 A |
12223 | static void |
12224 | vm_map_simplify_range( | |
12225 | vm_map_t map, | |
12226 | vm_map_offset_t start, | |
12227 | vm_map_offset_t end) | |
12228 | { | |
12229 | vm_map_entry_t entry; | |
1c79356b | 12230 | |
91447636 A |
12231 | /* |
12232 | * The map should be locked (for "write") by the caller. | |
12233 | */ | |
1c79356b | 12234 | |
91447636 A |
12235 | if (start >= end) { |
12236 | /* invalid address range */ | |
12237 | return; | |
12238 | } | |
1c79356b | 12239 | |
39236c6e A |
12240 | start = vm_map_trunc_page(start, |
12241 | VM_MAP_PAGE_MASK(map)); | |
12242 | end = vm_map_round_page(end, | |
12243 | VM_MAP_PAGE_MASK(map)); | |
2d21ac55 | 12244 | |
91447636 A |
12245 | if (!vm_map_lookup_entry(map, start, &entry)) { |
12246 | /* "start" is not mapped and "entry" ends before "start" */ | |
12247 | if (entry == vm_map_to_entry(map)) { | |
12248 | /* start with first entry in the map */ | |
12249 | entry = vm_map_first_entry(map); | |
12250 | } else { | |
12251 | /* start with next entry */ | |
12252 | entry = entry->vme_next; | |
12253 | } | |
12254 | } | |
12255 | ||
12256 | while (entry != vm_map_to_entry(map) && | |
12257 | entry->vme_start <= end) { | |
12258 | /* try and coalesce "entry" with its previous entry */ | |
12259 | vm_map_simplify_entry(map, entry); | |
12260 | entry = entry->vme_next; | |
12261 | } | |
12262 | } | |
1c79356b | 12263 | |
1c79356b | 12264 | |
91447636 A |
12265 | /* |
12266 | * Routine: vm_map_machine_attribute | |
12267 | * Purpose: | |
12268 | * Provide machine-specific attributes to mappings, | |
12269 | * such as cachability etc. for machines that provide | |
12270 | * them. NUMA architectures and machines with big/strange | |
12271 | * caches will use this. | |
12272 | * Note: | |
12273 | * Responsibilities for locking and checking are handled here, | |
12274 | * everything else in the pmap module. If any non-volatile | |
12275 | * information must be kept, the pmap module should handle | |
12276 | * it itself. [This assumes that attributes do not | |
12277 | * need to be inherited, which seems ok to me] | |
12278 | */ | |
12279 | kern_return_t | |
12280 | vm_map_machine_attribute( | |
12281 | vm_map_t map, | |
12282 | vm_map_offset_t start, | |
12283 | vm_map_offset_t end, | |
12284 | vm_machine_attribute_t attribute, | |
12285 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
12286 | { | |
12287 | kern_return_t ret; | |
12288 | vm_map_size_t sync_size; | |
12289 | vm_map_entry_t entry; | |
12290 | ||
12291 | if (start < vm_map_min(map) || end > vm_map_max(map)) | |
12292 | return KERN_INVALID_ADDRESS; | |
1c79356b | 12293 | |
91447636 A |
12294 | /* Figure how much memory we need to flush (in page increments) */ |
12295 | sync_size = end - start; | |
1c79356b | 12296 | |
91447636 A |
12297 | vm_map_lock(map); |
12298 | ||
12299 | if (attribute != MATTR_CACHE) { | |
12300 | /* If we don't have to find physical addresses, we */ | |
12301 | /* don't have to do an explicit traversal here. */ | |
12302 | ret = pmap_attribute(map->pmap, start, end-start, | |
12303 | attribute, value); | |
12304 | vm_map_unlock(map); | |
12305 | return ret; | |
12306 | } | |
1c79356b | 12307 | |
91447636 | 12308 | ret = KERN_SUCCESS; /* Assume it all worked */ |
1c79356b | 12309 | |
91447636 A |
12310 | while(sync_size) { |
12311 | if (vm_map_lookup_entry(map, start, &entry)) { | |
12312 | vm_map_size_t sub_size; | |
12313 | if((entry->vme_end - start) > sync_size) { | |
12314 | sub_size = sync_size; | |
12315 | sync_size = 0; | |
12316 | } else { | |
12317 | sub_size = entry->vme_end - start; | |
2d21ac55 | 12318 | sync_size -= sub_size; |
91447636 A |
12319 | } |
12320 | if(entry->is_sub_map) { | |
12321 | vm_map_offset_t sub_start; | |
12322 | vm_map_offset_t sub_end; | |
1c79356b | 12323 | |
91447636 | 12324 | sub_start = (start - entry->vme_start) |
3e170ce0 | 12325 | + VME_OFFSET(entry); |
91447636 A |
12326 | sub_end = sub_start + sub_size; |
12327 | vm_map_machine_attribute( | |
3e170ce0 | 12328 | VME_SUBMAP(entry), |
91447636 A |
12329 | sub_start, |
12330 | sub_end, | |
12331 | attribute, value); | |
12332 | } else { | |
3e170ce0 | 12333 | if (VME_OBJECT(entry)) { |
91447636 A |
12334 | vm_page_t m; |
12335 | vm_object_t object; | |
12336 | vm_object_t base_object; | |
12337 | vm_object_t last_object; | |
12338 | vm_object_offset_t offset; | |
12339 | vm_object_offset_t base_offset; | |
12340 | vm_map_size_t range; | |
12341 | range = sub_size; | |
12342 | offset = (start - entry->vme_start) | |
3e170ce0 | 12343 | + VME_OFFSET(entry); |
91447636 | 12344 | base_offset = offset; |
3e170ce0 | 12345 | object = VME_OBJECT(entry); |
91447636 A |
12346 | base_object = object; |
12347 | last_object = NULL; | |
1c79356b | 12348 | |
91447636 | 12349 | vm_object_lock(object); |
1c79356b | 12350 | |
91447636 A |
12351 | while (range) { |
12352 | m = vm_page_lookup( | |
12353 | object, offset); | |
1c79356b | 12354 | |
91447636 A |
12355 | if (m && !m->fictitious) { |
12356 | ret = | |
2d21ac55 A |
12357 | pmap_attribute_cache_sync( |
12358 | m->phys_page, | |
12359 | PAGE_SIZE, | |
12360 | attribute, value); | |
91447636 A |
12361 | |
12362 | } else if (object->shadow) { | |
6d2010ae | 12363 | offset = offset + object->vo_shadow_offset; |
91447636 A |
12364 | last_object = object; |
12365 | object = object->shadow; | |
12366 | vm_object_lock(last_object->shadow); | |
12367 | vm_object_unlock(last_object); | |
12368 | continue; | |
12369 | } | |
12370 | range -= PAGE_SIZE; | |
1c79356b | 12371 | |
91447636 A |
12372 | if (base_object != object) { |
12373 | vm_object_unlock(object); | |
12374 | vm_object_lock(base_object); | |
12375 | object = base_object; | |
12376 | } | |
12377 | /* Bump to the next page */ | |
12378 | base_offset += PAGE_SIZE; | |
12379 | offset = base_offset; | |
12380 | } | |
12381 | vm_object_unlock(object); | |
12382 | } | |
12383 | } | |
12384 | start += sub_size; | |
12385 | } else { | |
12386 | vm_map_unlock(map); | |
12387 | return KERN_FAILURE; | |
12388 | } | |
12389 | ||
1c79356b | 12390 | } |
e5568f75 | 12391 | |
91447636 | 12392 | vm_map_unlock(map); |
e5568f75 | 12393 | |
91447636 A |
12394 | return ret; |
12395 | } | |
e5568f75 | 12396 | |
91447636 A |
12397 | /* |
12398 | * vm_map_behavior_set: | |
12399 | * | |
12400 | * Sets the paging reference behavior of the specified address | |
12401 | * range in the target map. Paging reference behavior affects | |
12402 | * how pagein operations resulting from faults on the map will be | |
12403 | * clustered. | |
12404 | */ | |
12405 | kern_return_t | |
12406 | vm_map_behavior_set( | |
12407 | vm_map_t map, | |
12408 | vm_map_offset_t start, | |
12409 | vm_map_offset_t end, | |
12410 | vm_behavior_t new_behavior) | |
12411 | { | |
12412 | register vm_map_entry_t entry; | |
12413 | vm_map_entry_t temp_entry; | |
e5568f75 | 12414 | |
91447636 | 12415 | XPR(XPR_VM_MAP, |
2d21ac55 | 12416 | "vm_map_behavior_set, 0x%X start 0x%X end 0x%X behavior %d", |
b0d623f7 | 12417 | map, start, end, new_behavior, 0); |
e5568f75 | 12418 | |
6d2010ae A |
12419 | if (start > end || |
12420 | start < vm_map_min(map) || | |
12421 | end > vm_map_max(map)) { | |
12422 | return KERN_NO_SPACE; | |
12423 | } | |
12424 | ||
91447636 | 12425 | switch (new_behavior) { |
b0d623f7 A |
12426 | |
12427 | /* | |
12428 | * This first block of behaviors all set a persistent state on the specified | |
12429 | * memory range. All we have to do here is to record the desired behavior | |
12430 | * in the vm_map_entry_t's. | |
12431 | */ | |
12432 | ||
91447636 A |
12433 | case VM_BEHAVIOR_DEFAULT: |
12434 | case VM_BEHAVIOR_RANDOM: | |
12435 | case VM_BEHAVIOR_SEQUENTIAL: | |
12436 | case VM_BEHAVIOR_RSEQNTL: | |
b0d623f7 A |
12437 | case VM_BEHAVIOR_ZERO_WIRED_PAGES: |
12438 | vm_map_lock(map); | |
12439 | ||
12440 | /* | |
12441 | * The entire address range must be valid for the map. | |
12442 | * Note that vm_map_range_check() does a | |
12443 | * vm_map_lookup_entry() internally and returns the | |
12444 | * entry containing the start of the address range if | |
12445 | * the entire range is valid. | |
12446 | */ | |
12447 | if (vm_map_range_check(map, start, end, &temp_entry)) { | |
12448 | entry = temp_entry; | |
12449 | vm_map_clip_start(map, entry, start); | |
12450 | } | |
12451 | else { | |
12452 | vm_map_unlock(map); | |
12453 | return(KERN_INVALID_ADDRESS); | |
12454 | } | |
12455 | ||
12456 | while ((entry != vm_map_to_entry(map)) && (entry->vme_start < end)) { | |
12457 | vm_map_clip_end(map, entry, end); | |
fe8ab488 A |
12458 | if (entry->is_sub_map) { |
12459 | assert(!entry->use_pmap); | |
12460 | } | |
b0d623f7 A |
12461 | |
12462 | if( new_behavior == VM_BEHAVIOR_ZERO_WIRED_PAGES ) { | |
12463 | entry->zero_wired_pages = TRUE; | |
12464 | } else { | |
12465 | entry->behavior = new_behavior; | |
12466 | } | |
12467 | entry = entry->vme_next; | |
12468 | } | |
12469 | ||
12470 | vm_map_unlock(map); | |
91447636 | 12471 | break; |
b0d623f7 A |
12472 | |
12473 | /* | |
12474 | * The rest of these are different from the above in that they cause | |
12475 | * an immediate action to take place as opposed to setting a behavior that | |
12476 | * affects future actions. | |
12477 | */ | |
12478 | ||
91447636 | 12479 | case VM_BEHAVIOR_WILLNEED: |
b0d623f7 A |
12480 | return vm_map_willneed(map, start, end); |
12481 | ||
91447636 | 12482 | case VM_BEHAVIOR_DONTNEED: |
b0d623f7 A |
12483 | return vm_map_msync(map, start, end - start, VM_SYNC_DEACTIVATE | VM_SYNC_CONTIGUOUS); |
12484 | ||
12485 | case VM_BEHAVIOR_FREE: | |
12486 | return vm_map_msync(map, start, end - start, VM_SYNC_KILLPAGES | VM_SYNC_CONTIGUOUS); | |
12487 | ||
12488 | case VM_BEHAVIOR_REUSABLE: | |
12489 | return vm_map_reusable_pages(map, start, end); | |
12490 | ||
12491 | case VM_BEHAVIOR_REUSE: | |
12492 | return vm_map_reuse_pages(map, start, end); | |
12493 | ||
12494 | case VM_BEHAVIOR_CAN_REUSE: | |
12495 | return vm_map_can_reuse(map, start, end); | |
12496 | ||
3e170ce0 A |
12497 | #if MACH_ASSERT |
12498 | case VM_BEHAVIOR_PAGEOUT: | |
12499 | return vm_map_pageout(map, start, end); | |
12500 | #endif /* MACH_ASSERT */ | |
12501 | ||
1c79356b | 12502 | default: |
91447636 | 12503 | return(KERN_INVALID_ARGUMENT); |
1c79356b | 12504 | } |
1c79356b | 12505 | |
b0d623f7 A |
12506 | return(KERN_SUCCESS); |
12507 | } | |
12508 | ||
12509 | ||
12510 | /* | |
12511 | * Internals for madvise(MADV_WILLNEED) system call. | |
12512 | * | |
12513 | * The present implementation is to do a read-ahead if the mapping corresponds | |
12514 | * to a mapped regular file. If it's an anonymous mapping, then we do nothing | |
12515 | * and basically ignore the "advice" (which we are always free to do). | |
12516 | */ | |
12517 | ||
12518 | ||
12519 | static kern_return_t | |
12520 | vm_map_willneed( | |
12521 | vm_map_t map, | |
12522 | vm_map_offset_t start, | |
12523 | vm_map_offset_t end | |
12524 | ) | |
12525 | { | |
12526 | vm_map_entry_t entry; | |
12527 | vm_object_t object; | |
12528 | memory_object_t pager; | |
12529 | struct vm_object_fault_info fault_info; | |
12530 | kern_return_t kr; | |
12531 | vm_object_size_t len; | |
12532 | vm_object_offset_t offset; | |
1c79356b | 12533 | |
91447636 | 12534 | /* |
b0d623f7 A |
12535 | * Fill in static values in fault_info. Several fields get ignored by the code |
12536 | * we call, but we'll fill them in anyway since uninitialized fields are bad | |
12537 | * when it comes to future backwards compatibility. | |
91447636 | 12538 | */ |
b0d623f7 A |
12539 | |
12540 | fault_info.interruptible = THREAD_UNINT; /* ignored value */ | |
12541 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
12542 | fault_info.no_cache = FALSE; /* ignored value */ | |
12543 | fault_info.stealth = TRUE; | |
6d2010ae A |
12544 | fault_info.io_sync = FALSE; |
12545 | fault_info.cs_bypass = FALSE; | |
0b4c1975 | 12546 | fault_info.mark_zf_absent = FALSE; |
316670eb | 12547 | fault_info.batch_pmap_op = FALSE; |
b0d623f7 A |
12548 | |
12549 | /* | |
12550 | * The MADV_WILLNEED operation doesn't require any changes to the | |
12551 | * vm_map_entry_t's, so the read lock is sufficient. | |
12552 | */ | |
12553 | ||
12554 | vm_map_lock_read(map); | |
12555 | ||
12556 | /* | |
12557 | * The madvise semantics require that the address range be fully | |
12558 | * allocated with no holes. Otherwise, we're required to return | |
12559 | * an error. | |
12560 | */ | |
12561 | ||
6d2010ae A |
12562 | if (! vm_map_range_check(map, start, end, &entry)) { |
12563 | vm_map_unlock_read(map); | |
12564 | return KERN_INVALID_ADDRESS; | |
12565 | } | |
b0d623f7 | 12566 | |
6d2010ae A |
12567 | /* |
12568 | * Examine each vm_map_entry_t in the range. | |
12569 | */ | |
12570 | for (; entry != vm_map_to_entry(map) && start < end; ) { | |
12571 | ||
b0d623f7 | 12572 | /* |
6d2010ae A |
12573 | * The first time through, the start address could be anywhere |
12574 | * within the vm_map_entry we found. So adjust the offset to | |
12575 | * correspond. After that, the offset will always be zero to | |
12576 | * correspond to the beginning of the current vm_map_entry. | |
b0d623f7 | 12577 | */ |
3e170ce0 | 12578 | offset = (start - entry->vme_start) + VME_OFFSET(entry); |
b0d623f7 | 12579 | |
6d2010ae A |
12580 | /* |
12581 | * Set the length so we don't go beyond the end of the | |
12582 | * map_entry or beyond the end of the range we were given. | |
12583 | * This range could span also multiple map entries all of which | |
12584 | * map different files, so make sure we only do the right amount | |
12585 | * of I/O for each object. Note that it's possible for there | |
12586 | * to be multiple map entries all referring to the same object | |
12587 | * but with different page permissions, but it's not worth | |
12588 | * trying to optimize that case. | |
12589 | */ | |
12590 | len = MIN(entry->vme_end - start, end - start); | |
b0d623f7 | 12591 | |
6d2010ae A |
12592 | if ((vm_size_t) len != len) { |
12593 | /* 32-bit overflow */ | |
12594 | len = (vm_size_t) (0 - PAGE_SIZE); | |
12595 | } | |
12596 | fault_info.cluster_size = (vm_size_t) len; | |
12597 | fault_info.lo_offset = offset; | |
12598 | fault_info.hi_offset = offset + len; | |
3e170ce0 | 12599 | fault_info.user_tag = VME_ALIAS(entry); |
fe8ab488 A |
12600 | fault_info.pmap_options = 0; |
12601 | if (entry->iokit_acct || | |
12602 | (!entry->is_sub_map && !entry->use_pmap)) { | |
12603 | fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT; | |
12604 | } | |
b0d623f7 | 12605 | |
6d2010ae A |
12606 | /* |
12607 | * If there's no read permission to this mapping, then just | |
12608 | * skip it. | |
12609 | */ | |
12610 | if ((entry->protection & VM_PROT_READ) == 0) { | |
12611 | entry = entry->vme_next; | |
12612 | start = entry->vme_start; | |
12613 | continue; | |
12614 | } | |
b0d623f7 | 12615 | |
6d2010ae A |
12616 | /* |
12617 | * Find the file object backing this map entry. If there is | |
12618 | * none, then we simply ignore the "will need" advice for this | |
12619 | * entry and go on to the next one. | |
12620 | */ | |
12621 | if ((object = find_vnode_object(entry)) == VM_OBJECT_NULL) { | |
12622 | entry = entry->vme_next; | |
12623 | start = entry->vme_start; | |
12624 | continue; | |
12625 | } | |
b0d623f7 | 12626 | |
6d2010ae A |
12627 | /* |
12628 | * The data_request() could take a long time, so let's | |
12629 | * release the map lock to avoid blocking other threads. | |
12630 | */ | |
12631 | vm_map_unlock_read(map); | |
b0d623f7 | 12632 | |
6d2010ae A |
12633 | vm_object_paging_begin(object); |
12634 | pager = object->pager; | |
12635 | vm_object_unlock(object); | |
b0d623f7 | 12636 | |
6d2010ae A |
12637 | /* |
12638 | * Get the data from the object asynchronously. | |
12639 | * | |
12640 | * Note that memory_object_data_request() places limits on the | |
12641 | * amount of I/O it will do. Regardless of the len we | |
fe8ab488 | 12642 | * specified, it won't do more than MAX_UPL_TRANSFER_BYTES and it |
6d2010ae A |
12643 | * silently truncates the len to that size. This isn't |
12644 | * necessarily bad since madvise shouldn't really be used to | |
12645 | * page in unlimited amounts of data. Other Unix variants | |
12646 | * limit the willneed case as well. If this turns out to be an | |
12647 | * issue for developers, then we can always adjust the policy | |
12648 | * here and still be backwards compatible since this is all | |
12649 | * just "advice". | |
12650 | */ | |
12651 | kr = memory_object_data_request( | |
12652 | pager, | |
12653 | offset + object->paging_offset, | |
12654 | 0, /* ignored */ | |
12655 | VM_PROT_READ, | |
12656 | (memory_object_fault_info_t)&fault_info); | |
b0d623f7 | 12657 | |
6d2010ae A |
12658 | vm_object_lock(object); |
12659 | vm_object_paging_end(object); | |
12660 | vm_object_unlock(object); | |
b0d623f7 | 12661 | |
6d2010ae A |
12662 | /* |
12663 | * If we couldn't do the I/O for some reason, just give up on | |
12664 | * the madvise. We still return success to the user since | |
12665 | * madvise isn't supposed to fail when the advice can't be | |
12666 | * taken. | |
12667 | */ | |
12668 | if (kr != KERN_SUCCESS) { | |
12669 | return KERN_SUCCESS; | |
12670 | } | |
b0d623f7 | 12671 | |
6d2010ae A |
12672 | start += len; |
12673 | if (start >= end) { | |
12674 | /* done */ | |
12675 | return KERN_SUCCESS; | |
12676 | } | |
b0d623f7 | 12677 | |
6d2010ae A |
12678 | /* look up next entry */ |
12679 | vm_map_lock_read(map); | |
12680 | if (! vm_map_lookup_entry(map, start, &entry)) { | |
b0d623f7 | 12681 | /* |
6d2010ae | 12682 | * There's a new hole in the address range. |
b0d623f7 | 12683 | */ |
6d2010ae A |
12684 | vm_map_unlock_read(map); |
12685 | return KERN_INVALID_ADDRESS; | |
b0d623f7 | 12686 | } |
6d2010ae | 12687 | } |
b0d623f7 A |
12688 | |
12689 | vm_map_unlock_read(map); | |
6d2010ae | 12690 | return KERN_SUCCESS; |
b0d623f7 A |
12691 | } |
12692 | ||
12693 | static boolean_t | |
12694 | vm_map_entry_is_reusable( | |
12695 | vm_map_entry_t entry) | |
12696 | { | |
3e170ce0 A |
12697 | /* Only user map entries */ |
12698 | ||
b0d623f7 A |
12699 | vm_object_t object; |
12700 | ||
2dced7af A |
12701 | if (entry->is_sub_map) { |
12702 | return FALSE; | |
12703 | } | |
12704 | ||
3e170ce0 | 12705 | switch (VME_ALIAS(entry)) { |
39236c6e A |
12706 | case VM_MEMORY_MALLOC: |
12707 | case VM_MEMORY_MALLOC_SMALL: | |
12708 | case VM_MEMORY_MALLOC_LARGE: | |
12709 | case VM_MEMORY_REALLOC: | |
12710 | case VM_MEMORY_MALLOC_TINY: | |
12711 | case VM_MEMORY_MALLOC_LARGE_REUSABLE: | |
12712 | case VM_MEMORY_MALLOC_LARGE_REUSED: | |
12713 | /* | |
12714 | * This is a malloc() memory region: check if it's still | |
12715 | * in its original state and can be re-used for more | |
12716 | * malloc() allocations. | |
12717 | */ | |
12718 | break; | |
12719 | default: | |
12720 | /* | |
12721 | * Not a malloc() memory region: let the caller decide if | |
12722 | * it's re-usable. | |
12723 | */ | |
12724 | return TRUE; | |
12725 | } | |
12726 | ||
b0d623f7 A |
12727 | if (entry->is_shared || |
12728 | entry->is_sub_map || | |
12729 | entry->in_transition || | |
12730 | entry->protection != VM_PROT_DEFAULT || | |
12731 | entry->max_protection != VM_PROT_ALL || | |
12732 | entry->inheritance != VM_INHERIT_DEFAULT || | |
12733 | entry->no_cache || | |
12734 | entry->permanent || | |
39236c6e | 12735 | entry->superpage_size != FALSE || |
b0d623f7 A |
12736 | entry->zero_wired_pages || |
12737 | entry->wired_count != 0 || | |
12738 | entry->user_wired_count != 0) { | |
12739 | return FALSE; | |
91447636 | 12740 | } |
b0d623f7 | 12741 | |
3e170ce0 | 12742 | object = VME_OBJECT(entry); |
b0d623f7 A |
12743 | if (object == VM_OBJECT_NULL) { |
12744 | return TRUE; | |
12745 | } | |
316670eb A |
12746 | if ( |
12747 | #if 0 | |
12748 | /* | |
12749 | * Let's proceed even if the VM object is potentially | |
12750 | * shared. | |
12751 | * We check for this later when processing the actual | |
12752 | * VM pages, so the contents will be safe if shared. | |
12753 | * | |
12754 | * But we can still mark this memory region as "reusable" to | |
12755 | * acknowledge that the caller did let us know that the memory | |
12756 | * could be re-used and should not be penalized for holding | |
12757 | * on to it. This allows its "resident size" to not include | |
12758 | * the reusable range. | |
12759 | */ | |
12760 | object->ref_count == 1 && | |
12761 | #endif | |
b0d623f7 A |
12762 | object->wired_page_count == 0 && |
12763 | object->copy == VM_OBJECT_NULL && | |
12764 | object->shadow == VM_OBJECT_NULL && | |
12765 | object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC && | |
12766 | object->internal && | |
12767 | !object->true_share && | |
6d2010ae | 12768 | object->wimg_bits == VM_WIMG_USE_DEFAULT && |
b0d623f7 A |
12769 | !object->code_signed) { |
12770 | return TRUE; | |
1c79356b | 12771 | } |
b0d623f7 A |
12772 | return FALSE; |
12773 | ||
12774 | ||
12775 | } | |
1c79356b | 12776 | |
b0d623f7 A |
12777 | static kern_return_t |
12778 | vm_map_reuse_pages( | |
12779 | vm_map_t map, | |
12780 | vm_map_offset_t start, | |
12781 | vm_map_offset_t end) | |
12782 | { | |
12783 | vm_map_entry_t entry; | |
12784 | vm_object_t object; | |
12785 | vm_object_offset_t start_offset, end_offset; | |
12786 | ||
12787 | /* | |
12788 | * The MADV_REUSE operation doesn't require any changes to the | |
12789 | * vm_map_entry_t's, so the read lock is sufficient. | |
12790 | */ | |
0b4e3aa0 | 12791 | |
b0d623f7 | 12792 | vm_map_lock_read(map); |
3e170ce0 | 12793 | assert(map->pmap != kernel_pmap); /* protect alias access */ |
1c79356b | 12794 | |
b0d623f7 A |
12795 | /* |
12796 | * The madvise semantics require that the address range be fully | |
12797 | * allocated with no holes. Otherwise, we're required to return | |
12798 | * an error. | |
12799 | */ | |
12800 | ||
12801 | if (!vm_map_range_check(map, start, end, &entry)) { | |
12802 | vm_map_unlock_read(map); | |
12803 | vm_page_stats_reusable.reuse_pages_failure++; | |
12804 | return KERN_INVALID_ADDRESS; | |
1c79356b | 12805 | } |
91447636 | 12806 | |
b0d623f7 A |
12807 | /* |
12808 | * Examine each vm_map_entry_t in the range. | |
12809 | */ | |
12810 | for (; entry != vm_map_to_entry(map) && entry->vme_start < end; | |
12811 | entry = entry->vme_next) { | |
12812 | /* | |
12813 | * Sanity check on the VM map entry. | |
12814 | */ | |
12815 | if (! vm_map_entry_is_reusable(entry)) { | |
12816 | vm_map_unlock_read(map); | |
12817 | vm_page_stats_reusable.reuse_pages_failure++; | |
12818 | return KERN_INVALID_ADDRESS; | |
12819 | } | |
12820 | ||
12821 | /* | |
12822 | * The first time through, the start address could be anywhere | |
12823 | * within the vm_map_entry we found. So adjust the offset to | |
12824 | * correspond. | |
12825 | */ | |
12826 | if (entry->vme_start < start) { | |
12827 | start_offset = start - entry->vme_start; | |
12828 | } else { | |
12829 | start_offset = 0; | |
12830 | } | |
12831 | end_offset = MIN(end, entry->vme_end) - entry->vme_start; | |
3e170ce0 A |
12832 | start_offset += VME_OFFSET(entry); |
12833 | end_offset += VME_OFFSET(entry); | |
b0d623f7 | 12834 | |
2dced7af | 12835 | assert(!entry->is_sub_map); |
3e170ce0 | 12836 | object = VME_OBJECT(entry); |
b0d623f7 A |
12837 | if (object != VM_OBJECT_NULL) { |
12838 | vm_object_lock(object); | |
12839 | vm_object_reuse_pages(object, start_offset, end_offset, | |
12840 | TRUE); | |
12841 | vm_object_unlock(object); | |
12842 | } | |
12843 | ||
3e170ce0 | 12844 | if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSABLE) { |
b0d623f7 A |
12845 | /* |
12846 | * XXX | |
12847 | * We do not hold the VM map exclusively here. | |
12848 | * The "alias" field is not that critical, so it's | |
12849 | * safe to update it here, as long as it is the only | |
12850 | * one that can be modified while holding the VM map | |
12851 | * "shared". | |
12852 | */ | |
3e170ce0 | 12853 | VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSED); |
b0d623f7 A |
12854 | } |
12855 | } | |
12856 | ||
12857 | vm_map_unlock_read(map); | |
12858 | vm_page_stats_reusable.reuse_pages_success++; | |
12859 | return KERN_SUCCESS; | |
1c79356b A |
12860 | } |
12861 | ||
1c79356b | 12862 | |
b0d623f7 A |
12863 | static kern_return_t |
12864 | vm_map_reusable_pages( | |
12865 | vm_map_t map, | |
12866 | vm_map_offset_t start, | |
12867 | vm_map_offset_t end) | |
12868 | { | |
12869 | vm_map_entry_t entry; | |
12870 | vm_object_t object; | |
12871 | vm_object_offset_t start_offset, end_offset; | |
3e170ce0 | 12872 | vm_map_offset_t pmap_offset; |
b0d623f7 A |
12873 | |
12874 | /* | |
12875 | * The MADV_REUSABLE operation doesn't require any changes to the | |
12876 | * vm_map_entry_t's, so the read lock is sufficient. | |
12877 | */ | |
12878 | ||
12879 | vm_map_lock_read(map); | |
3e170ce0 | 12880 | assert(map->pmap != kernel_pmap); /* protect alias access */ |
b0d623f7 A |
12881 | |
12882 | /* | |
12883 | * The madvise semantics require that the address range be fully | |
12884 | * allocated with no holes. Otherwise, we're required to return | |
12885 | * an error. | |
12886 | */ | |
12887 | ||
12888 | if (!vm_map_range_check(map, start, end, &entry)) { | |
12889 | vm_map_unlock_read(map); | |
12890 | vm_page_stats_reusable.reusable_pages_failure++; | |
12891 | return KERN_INVALID_ADDRESS; | |
12892 | } | |
12893 | ||
12894 | /* | |
12895 | * Examine each vm_map_entry_t in the range. | |
12896 | */ | |
12897 | for (; entry != vm_map_to_entry(map) && entry->vme_start < end; | |
12898 | entry = entry->vme_next) { | |
12899 | int kill_pages = 0; | |
12900 | ||
12901 | /* | |
12902 | * Sanity check on the VM map entry. | |
12903 | */ | |
12904 | if (! vm_map_entry_is_reusable(entry)) { | |
12905 | vm_map_unlock_read(map); | |
12906 | vm_page_stats_reusable.reusable_pages_failure++; | |
12907 | return KERN_INVALID_ADDRESS; | |
12908 | } | |
12909 | ||
12910 | /* | |
12911 | * The first time through, the start address could be anywhere | |
12912 | * within the vm_map_entry we found. So adjust the offset to | |
12913 | * correspond. | |
12914 | */ | |
12915 | if (entry->vme_start < start) { | |
12916 | start_offset = start - entry->vme_start; | |
3e170ce0 | 12917 | pmap_offset = start; |
b0d623f7 A |
12918 | } else { |
12919 | start_offset = 0; | |
3e170ce0 | 12920 | pmap_offset = entry->vme_start; |
b0d623f7 A |
12921 | } |
12922 | end_offset = MIN(end, entry->vme_end) - entry->vme_start; | |
3e170ce0 A |
12923 | start_offset += VME_OFFSET(entry); |
12924 | end_offset += VME_OFFSET(entry); | |
b0d623f7 | 12925 | |
2dced7af | 12926 | assert(!entry->is_sub_map); |
3e170ce0 | 12927 | object = VME_OBJECT(entry); |
b0d623f7 A |
12928 | if (object == VM_OBJECT_NULL) |
12929 | continue; | |
12930 | ||
12931 | ||
12932 | vm_object_lock(object); | |
fe8ab488 A |
12933 | if (object->ref_count == 1 && |
12934 | !object->shadow && | |
12935 | /* | |
12936 | * "iokit_acct" entries are billed for their virtual size | |
12937 | * (rather than for their resident pages only), so they | |
12938 | * wouldn't benefit from making pages reusable, and it | |
12939 | * would be hard to keep track of pages that are both | |
12940 | * "iokit_acct" and "reusable" in the pmap stats and ledgers. | |
12941 | */ | |
12942 | !(entry->iokit_acct || | |
12943 | (!entry->is_sub_map && !entry->use_pmap))) | |
b0d623f7 A |
12944 | kill_pages = 1; |
12945 | else | |
12946 | kill_pages = -1; | |
12947 | if (kill_pages != -1) { | |
12948 | vm_object_deactivate_pages(object, | |
12949 | start_offset, | |
12950 | end_offset - start_offset, | |
12951 | kill_pages, | |
3e170ce0 A |
12952 | TRUE /*reusable_pages*/, |
12953 | map->pmap, | |
12954 | pmap_offset); | |
b0d623f7 A |
12955 | } else { |
12956 | vm_page_stats_reusable.reusable_pages_shared++; | |
12957 | } | |
12958 | vm_object_unlock(object); | |
12959 | ||
3e170ce0 A |
12960 | if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE || |
12961 | VME_ALIAS(entry) == VM_MEMORY_MALLOC_LARGE_REUSED) { | |
b0d623f7 A |
12962 | /* |
12963 | * XXX | |
12964 | * We do not hold the VM map exclusively here. | |
12965 | * The "alias" field is not that critical, so it's | |
12966 | * safe to update it here, as long as it is the only | |
12967 | * one that can be modified while holding the VM map | |
12968 | * "shared". | |
12969 | */ | |
3e170ce0 | 12970 | VME_ALIAS_SET(entry, VM_MEMORY_MALLOC_LARGE_REUSABLE); |
b0d623f7 A |
12971 | } |
12972 | } | |
12973 | ||
12974 | vm_map_unlock_read(map); | |
12975 | vm_page_stats_reusable.reusable_pages_success++; | |
12976 | return KERN_SUCCESS; | |
12977 | } | |
12978 | ||
12979 | ||
12980 | static kern_return_t | |
12981 | vm_map_can_reuse( | |
12982 | vm_map_t map, | |
12983 | vm_map_offset_t start, | |
12984 | vm_map_offset_t end) | |
12985 | { | |
12986 | vm_map_entry_t entry; | |
12987 | ||
12988 | /* | |
12989 | * The MADV_REUSABLE operation doesn't require any changes to the | |
12990 | * vm_map_entry_t's, so the read lock is sufficient. | |
12991 | */ | |
12992 | ||
12993 | vm_map_lock_read(map); | |
3e170ce0 | 12994 | assert(map->pmap != kernel_pmap); /* protect alias access */ |
b0d623f7 A |
12995 | |
12996 | /* | |
12997 | * The madvise semantics require that the address range be fully | |
12998 | * allocated with no holes. Otherwise, we're required to return | |
12999 | * an error. | |
13000 | */ | |
13001 | ||
13002 | if (!vm_map_range_check(map, start, end, &entry)) { | |
13003 | vm_map_unlock_read(map); | |
13004 | vm_page_stats_reusable.can_reuse_failure++; | |
13005 | return KERN_INVALID_ADDRESS; | |
13006 | } | |
13007 | ||
13008 | /* | |
13009 | * Examine each vm_map_entry_t in the range. | |
13010 | */ | |
13011 | for (; entry != vm_map_to_entry(map) && entry->vme_start < end; | |
13012 | entry = entry->vme_next) { | |
13013 | /* | |
13014 | * Sanity check on the VM map entry. | |
13015 | */ | |
13016 | if (! vm_map_entry_is_reusable(entry)) { | |
13017 | vm_map_unlock_read(map); | |
13018 | vm_page_stats_reusable.can_reuse_failure++; | |
13019 | return KERN_INVALID_ADDRESS; | |
13020 | } | |
13021 | } | |
13022 | ||
13023 | vm_map_unlock_read(map); | |
13024 | vm_page_stats_reusable.can_reuse_success++; | |
13025 | return KERN_SUCCESS; | |
13026 | } | |
13027 | ||
13028 | ||
3e170ce0 A |
13029 | #if MACH_ASSERT |
13030 | static kern_return_t | |
13031 | vm_map_pageout( | |
13032 | vm_map_t map, | |
13033 | vm_map_offset_t start, | |
13034 | vm_map_offset_t end) | |
13035 | { | |
13036 | vm_map_entry_t entry; | |
13037 | ||
13038 | /* | |
13039 | * The MADV_PAGEOUT operation doesn't require any changes to the | |
13040 | * vm_map_entry_t's, so the read lock is sufficient. | |
13041 | */ | |
13042 | ||
13043 | vm_map_lock_read(map); | |
13044 | ||
13045 | /* | |
13046 | * The madvise semantics require that the address range be fully | |
13047 | * allocated with no holes. Otherwise, we're required to return | |
13048 | * an error. | |
13049 | */ | |
13050 | ||
13051 | if (!vm_map_range_check(map, start, end, &entry)) { | |
13052 | vm_map_unlock_read(map); | |
13053 | return KERN_INVALID_ADDRESS; | |
13054 | } | |
13055 | ||
13056 | /* | |
13057 | * Examine each vm_map_entry_t in the range. | |
13058 | */ | |
13059 | for (; entry != vm_map_to_entry(map) && entry->vme_start < end; | |
13060 | entry = entry->vme_next) { | |
13061 | vm_object_t object; | |
13062 | ||
13063 | /* | |
13064 | * Sanity check on the VM map entry. | |
13065 | */ | |
13066 | if (entry->is_sub_map) { | |
13067 | vm_map_t submap; | |
13068 | vm_map_offset_t submap_start; | |
13069 | vm_map_offset_t submap_end; | |
13070 | vm_map_entry_t submap_entry; | |
13071 | ||
13072 | submap = VME_SUBMAP(entry); | |
13073 | submap_start = VME_OFFSET(entry); | |
13074 | submap_end = submap_start + (entry->vme_end - | |
13075 | entry->vme_start); | |
13076 | ||
13077 | vm_map_lock_read(submap); | |
13078 | ||
13079 | if (! vm_map_range_check(submap, | |
13080 | submap_start, | |
13081 | submap_end, | |
13082 | &submap_entry)) { | |
13083 | vm_map_unlock_read(submap); | |
13084 | vm_map_unlock_read(map); | |
13085 | return KERN_INVALID_ADDRESS; | |
13086 | } | |
13087 | ||
13088 | object = VME_OBJECT(submap_entry); | |
13089 | if (submap_entry->is_sub_map || | |
13090 | object == VM_OBJECT_NULL || | |
13091 | !object->internal) { | |
13092 | vm_map_unlock_read(submap); | |
13093 | continue; | |
13094 | } | |
13095 | ||
13096 | vm_object_pageout(object); | |
13097 | ||
13098 | vm_map_unlock_read(submap); | |
13099 | submap = VM_MAP_NULL; | |
13100 | submap_entry = VM_MAP_ENTRY_NULL; | |
13101 | continue; | |
13102 | } | |
13103 | ||
13104 | object = VME_OBJECT(entry); | |
13105 | if (entry->is_sub_map || | |
13106 | object == VM_OBJECT_NULL || | |
13107 | !object->internal) { | |
13108 | continue; | |
13109 | } | |
13110 | ||
13111 | vm_object_pageout(object); | |
13112 | } | |
13113 | ||
13114 | vm_map_unlock_read(map); | |
13115 | return KERN_SUCCESS; | |
13116 | } | |
13117 | #endif /* MACH_ASSERT */ | |
13118 | ||
13119 | ||
1c79356b | 13120 | /* |
91447636 A |
13121 | * Routine: vm_map_entry_insert |
13122 | * | |
13123 | * Descritpion: This routine inserts a new vm_entry in a locked map. | |
1c79356b | 13124 | */ |
91447636 A |
13125 | vm_map_entry_t |
13126 | vm_map_entry_insert( | |
13127 | vm_map_t map, | |
13128 | vm_map_entry_t insp_entry, | |
13129 | vm_map_offset_t start, | |
13130 | vm_map_offset_t end, | |
13131 | vm_object_t object, | |
13132 | vm_object_offset_t offset, | |
13133 | boolean_t needs_copy, | |
13134 | boolean_t is_shared, | |
13135 | boolean_t in_transition, | |
13136 | vm_prot_t cur_protection, | |
13137 | vm_prot_t max_protection, | |
13138 | vm_behavior_t behavior, | |
13139 | vm_inherit_t inheritance, | |
2d21ac55 | 13140 | unsigned wired_count, |
b0d623f7 A |
13141 | boolean_t no_cache, |
13142 | boolean_t permanent, | |
39236c6e | 13143 | unsigned int superpage_size, |
fe8ab488 A |
13144 | boolean_t clear_map_aligned, |
13145 | boolean_t is_submap) | |
1c79356b | 13146 | { |
91447636 | 13147 | vm_map_entry_t new_entry; |
1c79356b | 13148 | |
91447636 | 13149 | assert(insp_entry != (vm_map_entry_t)0); |
1c79356b | 13150 | |
7ddcb079 | 13151 | new_entry = vm_map_entry_create(map, !map->hdr.entries_pageable); |
1c79356b | 13152 | |
39236c6e A |
13153 | if (VM_MAP_PAGE_SHIFT(map) != PAGE_SHIFT) { |
13154 | new_entry->map_aligned = TRUE; | |
13155 | } else { | |
13156 | new_entry->map_aligned = FALSE; | |
13157 | } | |
13158 | if (clear_map_aligned && | |
fe8ab488 A |
13159 | (! VM_MAP_PAGE_ALIGNED(start, VM_MAP_PAGE_MASK(map)) || |
13160 | ! VM_MAP_PAGE_ALIGNED(end, VM_MAP_PAGE_MASK(map)))) { | |
39236c6e A |
13161 | new_entry->map_aligned = FALSE; |
13162 | } | |
13163 | ||
91447636 A |
13164 | new_entry->vme_start = start; |
13165 | new_entry->vme_end = end; | |
13166 | assert(page_aligned(new_entry->vme_start)); | |
13167 | assert(page_aligned(new_entry->vme_end)); | |
39236c6e | 13168 | if (new_entry->map_aligned) { |
fe8ab488 A |
13169 | assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_start, |
13170 | VM_MAP_PAGE_MASK(map))); | |
39236c6e A |
13171 | assert(VM_MAP_PAGE_ALIGNED(new_entry->vme_end, |
13172 | VM_MAP_PAGE_MASK(map))); | |
13173 | } | |
e2d2fc5c | 13174 | assert(new_entry->vme_start < new_entry->vme_end); |
1c79356b | 13175 | |
3e170ce0 A |
13176 | VME_OBJECT_SET(new_entry, object); |
13177 | VME_OFFSET_SET(new_entry, offset); | |
91447636 | 13178 | new_entry->is_shared = is_shared; |
fe8ab488 | 13179 | new_entry->is_sub_map = is_submap; |
91447636 A |
13180 | new_entry->needs_copy = needs_copy; |
13181 | new_entry->in_transition = in_transition; | |
13182 | new_entry->needs_wakeup = FALSE; | |
13183 | new_entry->inheritance = inheritance; | |
13184 | new_entry->protection = cur_protection; | |
13185 | new_entry->max_protection = max_protection; | |
13186 | new_entry->behavior = behavior; | |
13187 | new_entry->wired_count = wired_count; | |
13188 | new_entry->user_wired_count = 0; | |
fe8ab488 A |
13189 | if (is_submap) { |
13190 | /* | |
13191 | * submap: "use_pmap" means "nested". | |
13192 | * default: false. | |
13193 | */ | |
13194 | new_entry->use_pmap = FALSE; | |
13195 | } else { | |
13196 | /* | |
13197 | * object: "use_pmap" means "use pmap accounting" for footprint. | |
13198 | * default: true. | |
13199 | */ | |
13200 | new_entry->use_pmap = TRUE; | |
13201 | } | |
3e170ce0 | 13202 | VME_ALIAS_SET(new_entry, 0); |
b0d623f7 | 13203 | new_entry->zero_wired_pages = FALSE; |
2d21ac55 | 13204 | new_entry->no_cache = no_cache; |
b0d623f7 | 13205 | new_entry->permanent = permanent; |
39236c6e A |
13206 | if (superpage_size) |
13207 | new_entry->superpage_size = TRUE; | |
13208 | else | |
13209 | new_entry->superpage_size = FALSE; | |
6d2010ae | 13210 | new_entry->used_for_jit = FALSE; |
fe8ab488 | 13211 | new_entry->iokit_acct = FALSE; |
3e170ce0 A |
13212 | new_entry->vme_resilient_codesign = FALSE; |
13213 | new_entry->vme_resilient_media = FALSE; | |
1c79356b | 13214 | |
91447636 A |
13215 | /* |
13216 | * Insert the new entry into the list. | |
13217 | */ | |
1c79356b | 13218 | |
6d2010ae | 13219 | vm_map_store_entry_link(map, insp_entry, new_entry); |
91447636 A |
13220 | map->size += end - start; |
13221 | ||
13222 | /* | |
13223 | * Update the free space hint and the lookup hint. | |
13224 | */ | |
13225 | ||
0c530ab8 | 13226 | SAVE_HINT_MAP_WRITE(map, new_entry); |
91447636 | 13227 | return new_entry; |
1c79356b A |
13228 | } |
13229 | ||
13230 | /* | |
91447636 A |
13231 | * Routine: vm_map_remap_extract |
13232 | * | |
13233 | * Descritpion: This routine returns a vm_entry list from a map. | |
1c79356b | 13234 | */ |
91447636 A |
13235 | static kern_return_t |
13236 | vm_map_remap_extract( | |
13237 | vm_map_t map, | |
13238 | vm_map_offset_t addr, | |
13239 | vm_map_size_t size, | |
13240 | boolean_t copy, | |
13241 | struct vm_map_header *map_header, | |
13242 | vm_prot_t *cur_protection, | |
13243 | vm_prot_t *max_protection, | |
13244 | /* What, no behavior? */ | |
13245 | vm_inherit_t inheritance, | |
13246 | boolean_t pageable) | |
1c79356b | 13247 | { |
91447636 A |
13248 | kern_return_t result; |
13249 | vm_map_size_t mapped_size; | |
13250 | vm_map_size_t tmp_size; | |
13251 | vm_map_entry_t src_entry; /* result of last map lookup */ | |
13252 | vm_map_entry_t new_entry; | |
13253 | vm_object_offset_t offset; | |
13254 | vm_map_offset_t map_address; | |
13255 | vm_map_offset_t src_start; /* start of entry to map */ | |
13256 | vm_map_offset_t src_end; /* end of region to be mapped */ | |
13257 | vm_object_t object; | |
13258 | vm_map_version_t version; | |
13259 | boolean_t src_needs_copy; | |
13260 | boolean_t new_entry_needs_copy; | |
1c79356b | 13261 | |
91447636 | 13262 | assert(map != VM_MAP_NULL); |
39236c6e A |
13263 | assert(size != 0); |
13264 | assert(size == vm_map_round_page(size, PAGE_MASK)); | |
91447636 A |
13265 | assert(inheritance == VM_INHERIT_NONE || |
13266 | inheritance == VM_INHERIT_COPY || | |
13267 | inheritance == VM_INHERIT_SHARE); | |
1c79356b | 13268 | |
91447636 A |
13269 | /* |
13270 | * Compute start and end of region. | |
13271 | */ | |
39236c6e A |
13272 | src_start = vm_map_trunc_page(addr, PAGE_MASK); |
13273 | src_end = vm_map_round_page(src_start + size, PAGE_MASK); | |
13274 | ||
1c79356b | 13275 | |
91447636 A |
13276 | /* |
13277 | * Initialize map_header. | |
13278 | */ | |
13279 | map_header->links.next = (struct vm_map_entry *)&map_header->links; | |
13280 | map_header->links.prev = (struct vm_map_entry *)&map_header->links; | |
13281 | map_header->nentries = 0; | |
13282 | map_header->entries_pageable = pageable; | |
39236c6e | 13283 | map_header->page_shift = PAGE_SHIFT; |
1c79356b | 13284 | |
6d2010ae A |
13285 | vm_map_store_init( map_header ); |
13286 | ||
91447636 A |
13287 | *cur_protection = VM_PROT_ALL; |
13288 | *max_protection = VM_PROT_ALL; | |
1c79356b | 13289 | |
91447636 A |
13290 | map_address = 0; |
13291 | mapped_size = 0; | |
13292 | result = KERN_SUCCESS; | |
1c79356b | 13293 | |
91447636 A |
13294 | /* |
13295 | * The specified source virtual space might correspond to | |
13296 | * multiple map entries, need to loop on them. | |
13297 | */ | |
13298 | vm_map_lock(map); | |
13299 | while (mapped_size != size) { | |
13300 | vm_map_size_t entry_size; | |
1c79356b | 13301 | |
91447636 A |
13302 | /* |
13303 | * Find the beginning of the region. | |
13304 | */ | |
13305 | if (! vm_map_lookup_entry(map, src_start, &src_entry)) { | |
13306 | result = KERN_INVALID_ADDRESS; | |
13307 | break; | |
13308 | } | |
1c79356b | 13309 | |
91447636 A |
13310 | if (src_start < src_entry->vme_start || |
13311 | (mapped_size && src_start != src_entry->vme_start)) { | |
13312 | result = KERN_INVALID_ADDRESS; | |
13313 | break; | |
13314 | } | |
1c79356b | 13315 | |
91447636 A |
13316 | tmp_size = size - mapped_size; |
13317 | if (src_end > src_entry->vme_end) | |
13318 | tmp_size -= (src_end - src_entry->vme_end); | |
1c79356b | 13319 | |
91447636 | 13320 | entry_size = (vm_map_size_t)(src_entry->vme_end - |
2d21ac55 | 13321 | src_entry->vme_start); |
1c79356b | 13322 | |
91447636 | 13323 | if(src_entry->is_sub_map) { |
3e170ce0 | 13324 | vm_map_reference(VME_SUBMAP(src_entry)); |
91447636 A |
13325 | object = VM_OBJECT_NULL; |
13326 | } else { | |
3e170ce0 | 13327 | object = VME_OBJECT(src_entry); |
fe8ab488 A |
13328 | if (src_entry->iokit_acct) { |
13329 | /* | |
13330 | * This entry uses "IOKit accounting". | |
13331 | */ | |
13332 | } else if (object != VM_OBJECT_NULL && | |
13333 | object->purgable != VM_PURGABLE_DENY) { | |
13334 | /* | |
13335 | * Purgeable objects have their own accounting: | |
13336 | * no pmap accounting for them. | |
13337 | */ | |
13338 | assert(!src_entry->use_pmap); | |
13339 | } else { | |
13340 | /* | |
13341 | * Not IOKit or purgeable: | |
13342 | * must be accounted by pmap stats. | |
13343 | */ | |
13344 | assert(src_entry->use_pmap); | |
13345 | } | |
55e303ae | 13346 | |
91447636 A |
13347 | if (object == VM_OBJECT_NULL) { |
13348 | object = vm_object_allocate(entry_size); | |
3e170ce0 A |
13349 | VME_OFFSET_SET(src_entry, 0); |
13350 | VME_OBJECT_SET(src_entry, object); | |
91447636 A |
13351 | } else if (object->copy_strategy != |
13352 | MEMORY_OBJECT_COPY_SYMMETRIC) { | |
13353 | /* | |
13354 | * We are already using an asymmetric | |
13355 | * copy, and therefore we already have | |
13356 | * the right object. | |
13357 | */ | |
13358 | assert(!src_entry->needs_copy); | |
13359 | } else if (src_entry->needs_copy || object->shadowed || | |
13360 | (object->internal && !object->true_share && | |
2d21ac55 | 13361 | !src_entry->is_shared && |
6d2010ae | 13362 | object->vo_size > entry_size)) { |
1c79356b | 13363 | |
3e170ce0 | 13364 | VME_OBJECT_SHADOW(src_entry, entry_size); |
1c79356b | 13365 | |
91447636 A |
13366 | if (!src_entry->needs_copy && |
13367 | (src_entry->protection & VM_PROT_WRITE)) { | |
0c530ab8 A |
13368 | vm_prot_t prot; |
13369 | ||
13370 | prot = src_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 13371 | |
3e170ce0 A |
13372 | if (override_nx(map, |
13373 | VME_ALIAS(src_entry)) | |
13374 | && prot) | |
0c530ab8 | 13375 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 13376 | |
316670eb | 13377 | if(map->mapped_in_other_pmaps) { |
2d21ac55 | 13378 | vm_object_pmap_protect( |
3e170ce0 A |
13379 | VME_OBJECT(src_entry), |
13380 | VME_OFFSET(src_entry), | |
2d21ac55 A |
13381 | entry_size, |
13382 | PMAP_NULL, | |
0c530ab8 | 13383 | src_entry->vme_start, |
0c530ab8 | 13384 | prot); |
2d21ac55 A |
13385 | } else { |
13386 | pmap_protect(vm_map_pmap(map), | |
13387 | src_entry->vme_start, | |
13388 | src_entry->vme_end, | |
13389 | prot); | |
91447636 A |
13390 | } |
13391 | } | |
1c79356b | 13392 | |
3e170ce0 | 13393 | object = VME_OBJECT(src_entry); |
91447636 A |
13394 | src_entry->needs_copy = FALSE; |
13395 | } | |
1c79356b | 13396 | |
1c79356b | 13397 | |
91447636 | 13398 | vm_object_lock(object); |
2d21ac55 | 13399 | vm_object_reference_locked(object); /* object ref. for new entry */ |
91447636 | 13400 | if (object->copy_strategy == |
2d21ac55 | 13401 | MEMORY_OBJECT_COPY_SYMMETRIC) { |
91447636 A |
13402 | object->copy_strategy = |
13403 | MEMORY_OBJECT_COPY_DELAY; | |
13404 | } | |
13405 | vm_object_unlock(object); | |
13406 | } | |
1c79356b | 13407 | |
3e170ce0 A |
13408 | offset = (VME_OFFSET(src_entry) + |
13409 | (src_start - src_entry->vme_start)); | |
1c79356b | 13410 | |
7ddcb079 | 13411 | new_entry = _vm_map_entry_create(map_header, !map_header->entries_pageable); |
91447636 | 13412 | vm_map_entry_copy(new_entry, src_entry); |
fe8ab488 A |
13413 | if (new_entry->is_sub_map) { |
13414 | /* clr address space specifics */ | |
13415 | new_entry->use_pmap = FALSE; | |
13416 | } | |
1c79356b | 13417 | |
39236c6e A |
13418 | new_entry->map_aligned = FALSE; |
13419 | ||
91447636 A |
13420 | new_entry->vme_start = map_address; |
13421 | new_entry->vme_end = map_address + tmp_size; | |
e2d2fc5c | 13422 | assert(new_entry->vme_start < new_entry->vme_end); |
91447636 | 13423 | new_entry->inheritance = inheritance; |
3e170ce0 | 13424 | VME_OFFSET_SET(new_entry, offset); |
1c79356b | 13425 | |
91447636 A |
13426 | /* |
13427 | * The new region has to be copied now if required. | |
13428 | */ | |
13429 | RestartCopy: | |
13430 | if (!copy) { | |
316670eb A |
13431 | /* |
13432 | * Cannot allow an entry describing a JIT | |
13433 | * region to be shared across address spaces. | |
13434 | */ | |
13435 | if (src_entry->used_for_jit == TRUE) { | |
13436 | result = KERN_INVALID_ARGUMENT; | |
13437 | break; | |
13438 | } | |
91447636 A |
13439 | src_entry->is_shared = TRUE; |
13440 | new_entry->is_shared = TRUE; | |
13441 | if (!(new_entry->is_sub_map)) | |
13442 | new_entry->needs_copy = FALSE; | |
1c79356b | 13443 | |
91447636 A |
13444 | } else if (src_entry->is_sub_map) { |
13445 | /* make this a COW sub_map if not already */ | |
3e170ce0 | 13446 | assert(new_entry->wired_count == 0); |
91447636 A |
13447 | new_entry->needs_copy = TRUE; |
13448 | object = VM_OBJECT_NULL; | |
13449 | } else if (src_entry->wired_count == 0 && | |
3e170ce0 A |
13450 | vm_object_copy_quickly(&VME_OBJECT(new_entry), |
13451 | VME_OFFSET(new_entry), | |
2d21ac55 A |
13452 | (new_entry->vme_end - |
13453 | new_entry->vme_start), | |
13454 | &src_needs_copy, | |
13455 | &new_entry_needs_copy)) { | |
55e303ae | 13456 | |
91447636 A |
13457 | new_entry->needs_copy = new_entry_needs_copy; |
13458 | new_entry->is_shared = FALSE; | |
1c79356b | 13459 | |
91447636 A |
13460 | /* |
13461 | * Handle copy_on_write semantics. | |
13462 | */ | |
13463 | if (src_needs_copy && !src_entry->needs_copy) { | |
0c530ab8 A |
13464 | vm_prot_t prot; |
13465 | ||
13466 | prot = src_entry->protection & ~VM_PROT_WRITE; | |
2d21ac55 | 13467 | |
3e170ce0 A |
13468 | if (override_nx(map, |
13469 | VME_ALIAS(src_entry)) | |
13470 | && prot) | |
0c530ab8 | 13471 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 13472 | |
91447636 A |
13473 | vm_object_pmap_protect(object, |
13474 | offset, | |
13475 | entry_size, | |
13476 | ((src_entry->is_shared | |
316670eb | 13477 | || map->mapped_in_other_pmaps) ? |
91447636 A |
13478 | PMAP_NULL : map->pmap), |
13479 | src_entry->vme_start, | |
0c530ab8 | 13480 | prot); |
1c79356b | 13481 | |
3e170ce0 | 13482 | assert(src_entry->wired_count == 0); |
91447636 A |
13483 | src_entry->needs_copy = TRUE; |
13484 | } | |
13485 | /* | |
13486 | * Throw away the old object reference of the new entry. | |
13487 | */ | |
13488 | vm_object_deallocate(object); | |
1c79356b | 13489 | |
91447636 A |
13490 | } else { |
13491 | new_entry->is_shared = FALSE; | |
1c79356b | 13492 | |
91447636 A |
13493 | /* |
13494 | * The map can be safely unlocked since we | |
13495 | * already hold a reference on the object. | |
13496 | * | |
13497 | * Record the timestamp of the map for later | |
13498 | * verification, and unlock the map. | |
13499 | */ | |
13500 | version.main_timestamp = map->timestamp; | |
13501 | vm_map_unlock(map); /* Increments timestamp once! */ | |
55e303ae | 13502 | |
91447636 A |
13503 | /* |
13504 | * Perform the copy. | |
13505 | */ | |
13506 | if (src_entry->wired_count > 0) { | |
13507 | vm_object_lock(object); | |
13508 | result = vm_object_copy_slowly( | |
2d21ac55 A |
13509 | object, |
13510 | offset, | |
13511 | entry_size, | |
13512 | THREAD_UNINT, | |
3e170ce0 | 13513 | &VME_OBJECT(new_entry)); |
1c79356b | 13514 | |
3e170ce0 | 13515 | VME_OFFSET_SET(new_entry, 0); |
91447636 A |
13516 | new_entry->needs_copy = FALSE; |
13517 | } else { | |
3e170ce0 A |
13518 | vm_object_offset_t new_offset; |
13519 | ||
13520 | new_offset = VME_OFFSET(new_entry); | |
91447636 | 13521 | result = vm_object_copy_strategically( |
2d21ac55 A |
13522 | object, |
13523 | offset, | |
13524 | entry_size, | |
3e170ce0 A |
13525 | &VME_OBJECT(new_entry), |
13526 | &new_offset, | |
2d21ac55 | 13527 | &new_entry_needs_copy); |
3e170ce0 A |
13528 | if (new_offset != VME_OFFSET(new_entry)) { |
13529 | VME_OFFSET_SET(new_entry, new_offset); | |
13530 | } | |
1c79356b | 13531 | |
91447636 A |
13532 | new_entry->needs_copy = new_entry_needs_copy; |
13533 | } | |
1c79356b | 13534 | |
91447636 A |
13535 | /* |
13536 | * Throw away the old object reference of the new entry. | |
13537 | */ | |
13538 | vm_object_deallocate(object); | |
1c79356b | 13539 | |
91447636 A |
13540 | if (result != KERN_SUCCESS && |
13541 | result != KERN_MEMORY_RESTART_COPY) { | |
13542 | _vm_map_entry_dispose(map_header, new_entry); | |
13543 | break; | |
13544 | } | |
1c79356b | 13545 | |
91447636 A |
13546 | /* |
13547 | * Verify that the map has not substantially | |
13548 | * changed while the copy was being made. | |
13549 | */ | |
1c79356b | 13550 | |
91447636 A |
13551 | vm_map_lock(map); |
13552 | if (version.main_timestamp + 1 != map->timestamp) { | |
13553 | /* | |
13554 | * Simple version comparison failed. | |
13555 | * | |
13556 | * Retry the lookup and verify that the | |
13557 | * same object/offset are still present. | |
13558 | */ | |
3e170ce0 | 13559 | vm_object_deallocate(VME_OBJECT(new_entry)); |
91447636 A |
13560 | _vm_map_entry_dispose(map_header, new_entry); |
13561 | if (result == KERN_MEMORY_RESTART_COPY) | |
13562 | result = KERN_SUCCESS; | |
13563 | continue; | |
13564 | } | |
1c79356b | 13565 | |
91447636 A |
13566 | if (result == KERN_MEMORY_RESTART_COPY) { |
13567 | vm_object_reference(object); | |
13568 | goto RestartCopy; | |
13569 | } | |
13570 | } | |
1c79356b | 13571 | |
6d2010ae | 13572 | _vm_map_store_entry_link(map_header, |
91447636 | 13573 | map_header->links.prev, new_entry); |
1c79356b | 13574 | |
6d2010ae A |
13575 | /*Protections for submap mapping are irrelevant here*/ |
13576 | if( !src_entry->is_sub_map ) { | |
13577 | *cur_protection &= src_entry->protection; | |
13578 | *max_protection &= src_entry->max_protection; | |
13579 | } | |
91447636 A |
13580 | map_address += tmp_size; |
13581 | mapped_size += tmp_size; | |
13582 | src_start += tmp_size; | |
1c79356b | 13583 | |
91447636 | 13584 | } /* end while */ |
1c79356b | 13585 | |
91447636 A |
13586 | vm_map_unlock(map); |
13587 | if (result != KERN_SUCCESS) { | |
13588 | /* | |
13589 | * Free all allocated elements. | |
13590 | */ | |
13591 | for (src_entry = map_header->links.next; | |
13592 | src_entry != (struct vm_map_entry *)&map_header->links; | |
13593 | src_entry = new_entry) { | |
13594 | new_entry = src_entry->vme_next; | |
6d2010ae | 13595 | _vm_map_store_entry_unlink(map_header, src_entry); |
39236c6e | 13596 | if (src_entry->is_sub_map) { |
3e170ce0 | 13597 | vm_map_deallocate(VME_SUBMAP(src_entry)); |
39236c6e | 13598 | } else { |
3e170ce0 | 13599 | vm_object_deallocate(VME_OBJECT(src_entry)); |
39236c6e | 13600 | } |
91447636 A |
13601 | _vm_map_entry_dispose(map_header, src_entry); |
13602 | } | |
13603 | } | |
13604 | return result; | |
1c79356b A |
13605 | } |
13606 | ||
13607 | /* | |
91447636 | 13608 | * Routine: vm_remap |
1c79356b | 13609 | * |
91447636 A |
13610 | * Map portion of a task's address space. |
13611 | * Mapped region must not overlap more than | |
13612 | * one vm memory object. Protections and | |
13613 | * inheritance attributes remain the same | |
13614 | * as in the original task and are out parameters. | |
13615 | * Source and Target task can be identical | |
13616 | * Other attributes are identical as for vm_map() | |
1c79356b A |
13617 | */ |
13618 | kern_return_t | |
91447636 A |
13619 | vm_map_remap( |
13620 | vm_map_t target_map, | |
13621 | vm_map_address_t *address, | |
13622 | vm_map_size_t size, | |
13623 | vm_map_offset_t mask, | |
060df5ea | 13624 | int flags, |
91447636 A |
13625 | vm_map_t src_map, |
13626 | vm_map_offset_t memory_address, | |
1c79356b | 13627 | boolean_t copy, |
1c79356b A |
13628 | vm_prot_t *cur_protection, |
13629 | vm_prot_t *max_protection, | |
91447636 | 13630 | vm_inherit_t inheritance) |
1c79356b A |
13631 | { |
13632 | kern_return_t result; | |
91447636 | 13633 | vm_map_entry_t entry; |
0c530ab8 | 13634 | vm_map_entry_t insp_entry = VM_MAP_ENTRY_NULL; |
1c79356b | 13635 | vm_map_entry_t new_entry; |
91447636 | 13636 | struct vm_map_header map_header; |
39236c6e | 13637 | vm_map_offset_t offset_in_mapping; |
1c79356b | 13638 | |
91447636 A |
13639 | if (target_map == VM_MAP_NULL) |
13640 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 13641 | |
91447636 | 13642 | switch (inheritance) { |
2d21ac55 A |
13643 | case VM_INHERIT_NONE: |
13644 | case VM_INHERIT_COPY: | |
13645 | case VM_INHERIT_SHARE: | |
91447636 A |
13646 | if (size != 0 && src_map != VM_MAP_NULL) |
13647 | break; | |
13648 | /*FALL THRU*/ | |
2d21ac55 | 13649 | default: |
91447636 A |
13650 | return KERN_INVALID_ARGUMENT; |
13651 | } | |
1c79356b | 13652 | |
39236c6e A |
13653 | /* |
13654 | * If the user is requesting that we return the address of the | |
13655 | * first byte of the data (rather than the base of the page), | |
13656 | * then we use different rounding semantics: specifically, | |
13657 | * we assume that (memory_address, size) describes a region | |
13658 | * all of whose pages we must cover, rather than a base to be truncated | |
13659 | * down and a size to be added to that base. So we figure out | |
13660 | * the highest page that the requested region includes and make | |
13661 | * sure that the size will cover it. | |
13662 | * | |
13663 | * The key example we're worried about it is of the form: | |
13664 | * | |
13665 | * memory_address = 0x1ff0, size = 0x20 | |
13666 | * | |
13667 | * With the old semantics, we round down the memory_address to 0x1000 | |
13668 | * and round up the size to 0x1000, resulting in our covering *only* | |
13669 | * page 0x1000. With the new semantics, we'd realize that the region covers | |
13670 | * 0x1ff0-0x2010, and compute a size of 0x2000. Thus, we cover both page | |
13671 | * 0x1000 and page 0x2000 in the region we remap. | |
13672 | */ | |
13673 | if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) { | |
13674 | offset_in_mapping = memory_address - vm_map_trunc_page(memory_address, PAGE_MASK); | |
13675 | size = vm_map_round_page(memory_address + size - vm_map_trunc_page(memory_address, PAGE_MASK), PAGE_MASK); | |
13676 | } else { | |
13677 | size = vm_map_round_page(size, PAGE_MASK); | |
13678 | } | |
1c79356b | 13679 | |
91447636 | 13680 | result = vm_map_remap_extract(src_map, memory_address, |
2d21ac55 A |
13681 | size, copy, &map_header, |
13682 | cur_protection, | |
13683 | max_protection, | |
13684 | inheritance, | |
39236c6e | 13685 | target_map->hdr.entries_pageable); |
1c79356b | 13686 | |
91447636 A |
13687 | if (result != KERN_SUCCESS) { |
13688 | return result; | |
13689 | } | |
1c79356b | 13690 | |
91447636 A |
13691 | /* |
13692 | * Allocate/check a range of free virtual address | |
13693 | * space for the target | |
1c79356b | 13694 | */ |
39236c6e A |
13695 | *address = vm_map_trunc_page(*address, |
13696 | VM_MAP_PAGE_MASK(target_map)); | |
91447636 A |
13697 | vm_map_lock(target_map); |
13698 | result = vm_map_remap_range_allocate(target_map, address, size, | |
060df5ea | 13699 | mask, flags, &insp_entry); |
1c79356b | 13700 | |
91447636 A |
13701 | for (entry = map_header.links.next; |
13702 | entry != (struct vm_map_entry *)&map_header.links; | |
13703 | entry = new_entry) { | |
13704 | new_entry = entry->vme_next; | |
6d2010ae | 13705 | _vm_map_store_entry_unlink(&map_header, entry); |
91447636 | 13706 | if (result == KERN_SUCCESS) { |
3e170ce0 A |
13707 | if (flags & VM_FLAGS_RESILIENT_CODESIGN) { |
13708 | /* no codesigning -> read-only access */ | |
13709 | assert(!entry->used_for_jit); | |
13710 | entry->max_protection = VM_PROT_READ; | |
13711 | entry->protection = VM_PROT_READ; | |
13712 | entry->vme_resilient_codesign = TRUE; | |
13713 | } | |
91447636 A |
13714 | entry->vme_start += *address; |
13715 | entry->vme_end += *address; | |
39236c6e | 13716 | assert(!entry->map_aligned); |
6d2010ae | 13717 | vm_map_store_entry_link(target_map, insp_entry, entry); |
91447636 A |
13718 | insp_entry = entry; |
13719 | } else { | |
13720 | if (!entry->is_sub_map) { | |
3e170ce0 | 13721 | vm_object_deallocate(VME_OBJECT(entry)); |
91447636 | 13722 | } else { |
3e170ce0 | 13723 | vm_map_deallocate(VME_SUBMAP(entry)); |
2d21ac55 | 13724 | } |
91447636 | 13725 | _vm_map_entry_dispose(&map_header, entry); |
1c79356b | 13726 | } |
91447636 | 13727 | } |
1c79356b | 13728 | |
3e170ce0 A |
13729 | if (flags & VM_FLAGS_RESILIENT_CODESIGN) { |
13730 | *cur_protection = VM_PROT_READ; | |
13731 | *max_protection = VM_PROT_READ; | |
13732 | } | |
13733 | ||
6d2010ae A |
13734 | if( target_map->disable_vmentry_reuse == TRUE) { |
13735 | if( target_map->highest_entry_end < insp_entry->vme_end ){ | |
13736 | target_map->highest_entry_end = insp_entry->vme_end; | |
13737 | } | |
13738 | } | |
13739 | ||
91447636 A |
13740 | if (result == KERN_SUCCESS) { |
13741 | target_map->size += size; | |
0c530ab8 | 13742 | SAVE_HINT_MAP_WRITE(target_map, insp_entry); |
91447636 A |
13743 | } |
13744 | vm_map_unlock(target_map); | |
1c79356b | 13745 | |
91447636 A |
13746 | if (result == KERN_SUCCESS && target_map->wiring_required) |
13747 | result = vm_map_wire(target_map, *address, | |
3e170ce0 A |
13748 | *address + size, *cur_protection | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), |
13749 | TRUE); | |
39236c6e A |
13750 | |
13751 | /* | |
13752 | * If requested, return the address of the data pointed to by the | |
13753 | * request, rather than the base of the resulting page. | |
13754 | */ | |
13755 | if ((flags & VM_FLAGS_RETURN_DATA_ADDR) != 0) { | |
13756 | *address += offset_in_mapping; | |
13757 | } | |
13758 | ||
91447636 A |
13759 | return result; |
13760 | } | |
1c79356b | 13761 | |
91447636 A |
13762 | /* |
13763 | * Routine: vm_map_remap_range_allocate | |
13764 | * | |
13765 | * Description: | |
13766 | * Allocate a range in the specified virtual address map. | |
13767 | * returns the address and the map entry just before the allocated | |
13768 | * range | |
13769 | * | |
13770 | * Map must be locked. | |
13771 | */ | |
1c79356b | 13772 | |
91447636 A |
13773 | static kern_return_t |
13774 | vm_map_remap_range_allocate( | |
13775 | vm_map_t map, | |
13776 | vm_map_address_t *address, /* IN/OUT */ | |
13777 | vm_map_size_t size, | |
13778 | vm_map_offset_t mask, | |
060df5ea | 13779 | int flags, |
91447636 A |
13780 | vm_map_entry_t *map_entry) /* OUT */ |
13781 | { | |
060df5ea A |
13782 | vm_map_entry_t entry; |
13783 | vm_map_offset_t start; | |
13784 | vm_map_offset_t end; | |
13785 | kern_return_t kr; | |
3e170ce0 | 13786 | vm_map_entry_t hole_entry; |
1c79356b | 13787 | |
2d21ac55 | 13788 | StartAgain: ; |
1c79356b | 13789 | |
2d21ac55 | 13790 | start = *address; |
1c79356b | 13791 | |
060df5ea | 13792 | if (flags & VM_FLAGS_ANYWHERE) |
2d21ac55 A |
13793 | { |
13794 | /* | |
13795 | * Calculate the first possible address. | |
13796 | */ | |
1c79356b | 13797 | |
2d21ac55 A |
13798 | if (start < map->min_offset) |
13799 | start = map->min_offset; | |
13800 | if (start > map->max_offset) | |
13801 | return(KERN_NO_SPACE); | |
91447636 | 13802 | |
2d21ac55 A |
13803 | /* |
13804 | * Look for the first possible address; | |
13805 | * if there's already something at this | |
13806 | * address, we have to start after it. | |
13807 | */ | |
1c79356b | 13808 | |
6d2010ae A |
13809 | if( map->disable_vmentry_reuse == TRUE) { |
13810 | VM_MAP_HIGHEST_ENTRY(map, entry, start); | |
2d21ac55 | 13811 | } else { |
3e170ce0 A |
13812 | |
13813 | if (map->holelistenabled) { | |
13814 | hole_entry = (vm_map_entry_t)map->holes_list; | |
13815 | ||
13816 | if (hole_entry == NULL) { | |
13817 | /* | |
13818 | * No more space in the map? | |
13819 | */ | |
13820 | return(KERN_NO_SPACE); | |
13821 | } else { | |
13822 | ||
13823 | boolean_t found_hole = FALSE; | |
13824 | ||
13825 | do { | |
13826 | if (hole_entry->vme_start >= start) { | |
13827 | start = hole_entry->vme_start; | |
13828 | found_hole = TRUE; | |
13829 | break; | |
13830 | } | |
13831 | ||
13832 | if (hole_entry->vme_end > start) { | |
13833 | found_hole = TRUE; | |
13834 | break; | |
13835 | } | |
13836 | hole_entry = hole_entry->vme_next; | |
13837 | ||
13838 | } while (hole_entry != (vm_map_entry_t) map->holes_list); | |
13839 | ||
13840 | if (found_hole == FALSE) { | |
13841 | return (KERN_NO_SPACE); | |
13842 | } | |
13843 | ||
13844 | entry = hole_entry; | |
13845 | } | |
6d2010ae | 13846 | } else { |
3e170ce0 A |
13847 | assert(first_free_is_valid(map)); |
13848 | if (start == map->min_offset) { | |
13849 | if ((entry = map->first_free) != vm_map_to_entry(map)) | |
13850 | start = entry->vme_end; | |
13851 | } else { | |
13852 | vm_map_entry_t tmp_entry; | |
13853 | if (vm_map_lookup_entry(map, start, &tmp_entry)) | |
13854 | start = tmp_entry->vme_end; | |
13855 | entry = tmp_entry; | |
13856 | } | |
6d2010ae | 13857 | } |
39236c6e A |
13858 | start = vm_map_round_page(start, |
13859 | VM_MAP_PAGE_MASK(map)); | |
2d21ac55 | 13860 | } |
91447636 | 13861 | |
2d21ac55 A |
13862 | /* |
13863 | * In any case, the "entry" always precedes | |
13864 | * the proposed new region throughout the | |
13865 | * loop: | |
13866 | */ | |
1c79356b | 13867 | |
2d21ac55 A |
13868 | while (TRUE) { |
13869 | register vm_map_entry_t next; | |
13870 | ||
13871 | /* | |
13872 | * Find the end of the proposed new region. | |
13873 | * Be sure we didn't go beyond the end, or | |
13874 | * wrap around the address. | |
13875 | */ | |
13876 | ||
13877 | end = ((start + mask) & ~mask); | |
39236c6e A |
13878 | end = vm_map_round_page(end, |
13879 | VM_MAP_PAGE_MASK(map)); | |
2d21ac55 A |
13880 | if (end < start) |
13881 | return(KERN_NO_SPACE); | |
13882 | start = end; | |
13883 | end += size; | |
13884 | ||
13885 | if ((end > map->max_offset) || (end < start)) { | |
13886 | if (map->wait_for_space) { | |
13887 | if (size <= (map->max_offset - | |
13888 | map->min_offset)) { | |
13889 | assert_wait((event_t) map, THREAD_INTERRUPTIBLE); | |
13890 | vm_map_unlock(map); | |
13891 | thread_block(THREAD_CONTINUE_NULL); | |
13892 | vm_map_lock(map); | |
13893 | goto StartAgain; | |
13894 | } | |
13895 | } | |
91447636 | 13896 | |
2d21ac55 A |
13897 | return(KERN_NO_SPACE); |
13898 | } | |
1c79356b | 13899 | |
2d21ac55 | 13900 | next = entry->vme_next; |
1c79356b | 13901 | |
3e170ce0 A |
13902 | if (map->holelistenabled) { |
13903 | if (entry->vme_end >= end) | |
13904 | break; | |
13905 | } else { | |
13906 | /* | |
13907 | * If there are no more entries, we must win. | |
13908 | * | |
13909 | * OR | |
13910 | * | |
13911 | * If there is another entry, it must be | |
13912 | * after the end of the potential new region. | |
13913 | */ | |
1c79356b | 13914 | |
3e170ce0 A |
13915 | if (next == vm_map_to_entry(map)) |
13916 | break; | |
13917 | ||
13918 | if (next->vme_start >= end) | |
13919 | break; | |
13920 | } | |
1c79356b | 13921 | |
2d21ac55 A |
13922 | /* |
13923 | * Didn't fit -- move to the next entry. | |
13924 | */ | |
1c79356b | 13925 | |
2d21ac55 | 13926 | entry = next; |
3e170ce0 A |
13927 | |
13928 | if (map->holelistenabled) { | |
13929 | if (entry == (vm_map_entry_t) map->holes_list) { | |
13930 | /* | |
13931 | * Wrapped around | |
13932 | */ | |
13933 | return(KERN_NO_SPACE); | |
13934 | } | |
13935 | start = entry->vme_start; | |
13936 | } else { | |
13937 | start = entry->vme_end; | |
13938 | } | |
13939 | } | |
13940 | ||
13941 | if (map->holelistenabled) { | |
13942 | ||
13943 | if (vm_map_lookup_entry(map, entry->vme_start, &entry)) { | |
13944 | panic("Found an existing entry (%p) instead of potential hole at address: 0x%llx.\n", entry, (unsigned long long)entry->vme_start); | |
13945 | } | |
2d21ac55 | 13946 | } |
3e170ce0 | 13947 | |
2d21ac55 | 13948 | *address = start; |
3e170ce0 | 13949 | |
2d21ac55 A |
13950 | } else { |
13951 | vm_map_entry_t temp_entry; | |
91447636 | 13952 | |
2d21ac55 A |
13953 | /* |
13954 | * Verify that: | |
13955 | * the address doesn't itself violate | |
13956 | * the mask requirement. | |
13957 | */ | |
1c79356b | 13958 | |
2d21ac55 A |
13959 | if ((start & mask) != 0) |
13960 | return(KERN_NO_SPACE); | |
1c79356b | 13961 | |
1c79356b | 13962 | |
2d21ac55 A |
13963 | /* |
13964 | * ... the address is within bounds | |
13965 | */ | |
1c79356b | 13966 | |
2d21ac55 | 13967 | end = start + size; |
1c79356b | 13968 | |
2d21ac55 A |
13969 | if ((start < map->min_offset) || |
13970 | (end > map->max_offset) || | |
13971 | (start >= end)) { | |
13972 | return(KERN_INVALID_ADDRESS); | |
13973 | } | |
1c79356b | 13974 | |
060df5ea A |
13975 | /* |
13976 | * If we're asked to overwrite whatever was mapped in that | |
13977 | * range, first deallocate that range. | |
13978 | */ | |
13979 | if (flags & VM_FLAGS_OVERWRITE) { | |
13980 | vm_map_t zap_map; | |
13981 | ||
13982 | /* | |
13983 | * We use a "zap_map" to avoid having to unlock | |
13984 | * the "map" in vm_map_delete(), which would compromise | |
13985 | * the atomicity of the "deallocate" and then "remap" | |
13986 | * combination. | |
13987 | */ | |
13988 | zap_map = vm_map_create(PMAP_NULL, | |
13989 | start, | |
316670eb | 13990 | end, |
060df5ea A |
13991 | map->hdr.entries_pageable); |
13992 | if (zap_map == VM_MAP_NULL) { | |
13993 | return KERN_RESOURCE_SHORTAGE; | |
13994 | } | |
39236c6e | 13995 | vm_map_set_page_shift(zap_map, VM_MAP_PAGE_SHIFT(map)); |
3e170ce0 | 13996 | vm_map_disable_hole_optimization(zap_map); |
060df5ea A |
13997 | |
13998 | kr = vm_map_delete(map, start, end, | |
fe8ab488 A |
13999 | (VM_MAP_REMOVE_SAVE_ENTRIES | |
14000 | VM_MAP_REMOVE_NO_MAP_ALIGN), | |
060df5ea A |
14001 | zap_map); |
14002 | if (kr == KERN_SUCCESS) { | |
14003 | vm_map_destroy(zap_map, | |
14004 | VM_MAP_REMOVE_NO_PMAP_CLEANUP); | |
14005 | zap_map = VM_MAP_NULL; | |
14006 | } | |
14007 | } | |
14008 | ||
2d21ac55 A |
14009 | /* |
14010 | * ... the starting address isn't allocated | |
14011 | */ | |
91447636 | 14012 | |
2d21ac55 A |
14013 | if (vm_map_lookup_entry(map, start, &temp_entry)) |
14014 | return(KERN_NO_SPACE); | |
91447636 | 14015 | |
2d21ac55 | 14016 | entry = temp_entry; |
91447636 | 14017 | |
2d21ac55 A |
14018 | /* |
14019 | * ... the next region doesn't overlap the | |
14020 | * end point. | |
14021 | */ | |
1c79356b | 14022 | |
2d21ac55 A |
14023 | if ((entry->vme_next != vm_map_to_entry(map)) && |
14024 | (entry->vme_next->vme_start < end)) | |
14025 | return(KERN_NO_SPACE); | |
14026 | } | |
14027 | *map_entry = entry; | |
14028 | return(KERN_SUCCESS); | |
91447636 | 14029 | } |
1c79356b | 14030 | |
91447636 A |
14031 | /* |
14032 | * vm_map_switch: | |
14033 | * | |
14034 | * Set the address map for the current thread to the specified map | |
14035 | */ | |
1c79356b | 14036 | |
91447636 A |
14037 | vm_map_t |
14038 | vm_map_switch( | |
14039 | vm_map_t map) | |
14040 | { | |
14041 | int mycpu; | |
14042 | thread_t thread = current_thread(); | |
14043 | vm_map_t oldmap = thread->map; | |
1c79356b | 14044 | |
91447636 A |
14045 | mp_disable_preemption(); |
14046 | mycpu = cpu_number(); | |
1c79356b | 14047 | |
91447636 A |
14048 | /* |
14049 | * Deactivate the current map and activate the requested map | |
14050 | */ | |
14051 | PMAP_SWITCH_USER(thread, map, mycpu); | |
1c79356b | 14052 | |
91447636 A |
14053 | mp_enable_preemption(); |
14054 | return(oldmap); | |
14055 | } | |
1c79356b | 14056 | |
1c79356b | 14057 | |
91447636 A |
14058 | /* |
14059 | * Routine: vm_map_write_user | |
14060 | * | |
14061 | * Description: | |
14062 | * Copy out data from a kernel space into space in the | |
14063 | * destination map. The space must already exist in the | |
14064 | * destination map. | |
14065 | * NOTE: This routine should only be called by threads | |
14066 | * which can block on a page fault. i.e. kernel mode user | |
14067 | * threads. | |
14068 | * | |
14069 | */ | |
14070 | kern_return_t | |
14071 | vm_map_write_user( | |
14072 | vm_map_t map, | |
14073 | void *src_p, | |
14074 | vm_map_address_t dst_addr, | |
14075 | vm_size_t size) | |
14076 | { | |
14077 | kern_return_t kr = KERN_SUCCESS; | |
1c79356b | 14078 | |
91447636 A |
14079 | if(current_map() == map) { |
14080 | if (copyout(src_p, dst_addr, size)) { | |
14081 | kr = KERN_INVALID_ADDRESS; | |
14082 | } | |
14083 | } else { | |
14084 | vm_map_t oldmap; | |
1c79356b | 14085 | |
91447636 A |
14086 | /* take on the identity of the target map while doing */ |
14087 | /* the transfer */ | |
1c79356b | 14088 | |
91447636 A |
14089 | vm_map_reference(map); |
14090 | oldmap = vm_map_switch(map); | |
14091 | if (copyout(src_p, dst_addr, size)) { | |
14092 | kr = KERN_INVALID_ADDRESS; | |
1c79356b | 14093 | } |
91447636 A |
14094 | vm_map_switch(oldmap); |
14095 | vm_map_deallocate(map); | |
1c79356b | 14096 | } |
91447636 | 14097 | return kr; |
1c79356b A |
14098 | } |
14099 | ||
14100 | /* | |
91447636 A |
14101 | * Routine: vm_map_read_user |
14102 | * | |
14103 | * Description: | |
14104 | * Copy in data from a user space source map into the | |
14105 | * kernel map. The space must already exist in the | |
14106 | * kernel map. | |
14107 | * NOTE: This routine should only be called by threads | |
14108 | * which can block on a page fault. i.e. kernel mode user | |
14109 | * threads. | |
1c79356b | 14110 | * |
1c79356b A |
14111 | */ |
14112 | kern_return_t | |
91447636 A |
14113 | vm_map_read_user( |
14114 | vm_map_t map, | |
14115 | vm_map_address_t src_addr, | |
14116 | void *dst_p, | |
14117 | vm_size_t size) | |
1c79356b | 14118 | { |
91447636 | 14119 | kern_return_t kr = KERN_SUCCESS; |
1c79356b | 14120 | |
91447636 A |
14121 | if(current_map() == map) { |
14122 | if (copyin(src_addr, dst_p, size)) { | |
14123 | kr = KERN_INVALID_ADDRESS; | |
14124 | } | |
14125 | } else { | |
14126 | vm_map_t oldmap; | |
1c79356b | 14127 | |
91447636 A |
14128 | /* take on the identity of the target map while doing */ |
14129 | /* the transfer */ | |
14130 | ||
14131 | vm_map_reference(map); | |
14132 | oldmap = vm_map_switch(map); | |
14133 | if (copyin(src_addr, dst_p, size)) { | |
14134 | kr = KERN_INVALID_ADDRESS; | |
14135 | } | |
14136 | vm_map_switch(oldmap); | |
14137 | vm_map_deallocate(map); | |
1c79356b | 14138 | } |
91447636 A |
14139 | return kr; |
14140 | } | |
14141 | ||
1c79356b | 14142 | |
91447636 A |
14143 | /* |
14144 | * vm_map_check_protection: | |
14145 | * | |
14146 | * Assert that the target map allows the specified | |
14147 | * privilege on the entire address region given. | |
14148 | * The entire region must be allocated. | |
14149 | */ | |
2d21ac55 A |
14150 | boolean_t |
14151 | vm_map_check_protection(vm_map_t map, vm_map_offset_t start, | |
14152 | vm_map_offset_t end, vm_prot_t protection) | |
91447636 | 14153 | { |
2d21ac55 A |
14154 | vm_map_entry_t entry; |
14155 | vm_map_entry_t tmp_entry; | |
1c79356b | 14156 | |
91447636 | 14157 | vm_map_lock(map); |
1c79356b | 14158 | |
2d21ac55 | 14159 | if (start < vm_map_min(map) || end > vm_map_max(map) || start > end) |
91447636 | 14160 | { |
2d21ac55 A |
14161 | vm_map_unlock(map); |
14162 | return (FALSE); | |
1c79356b A |
14163 | } |
14164 | ||
91447636 A |
14165 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { |
14166 | vm_map_unlock(map); | |
14167 | return(FALSE); | |
14168 | } | |
1c79356b | 14169 | |
91447636 A |
14170 | entry = tmp_entry; |
14171 | ||
14172 | while (start < end) { | |
14173 | if (entry == vm_map_to_entry(map)) { | |
14174 | vm_map_unlock(map); | |
14175 | return(FALSE); | |
1c79356b | 14176 | } |
1c79356b | 14177 | |
91447636 A |
14178 | /* |
14179 | * No holes allowed! | |
14180 | */ | |
1c79356b | 14181 | |
91447636 A |
14182 | if (start < entry->vme_start) { |
14183 | vm_map_unlock(map); | |
14184 | return(FALSE); | |
14185 | } | |
14186 | ||
14187 | /* | |
14188 | * Check protection associated with entry. | |
14189 | */ | |
14190 | ||
14191 | if ((entry->protection & protection) != protection) { | |
14192 | vm_map_unlock(map); | |
14193 | return(FALSE); | |
14194 | } | |
14195 | ||
14196 | /* go to next entry */ | |
14197 | ||
14198 | start = entry->vme_end; | |
14199 | entry = entry->vme_next; | |
14200 | } | |
14201 | vm_map_unlock(map); | |
14202 | return(TRUE); | |
1c79356b A |
14203 | } |
14204 | ||
1c79356b | 14205 | kern_return_t |
91447636 A |
14206 | vm_map_purgable_control( |
14207 | vm_map_t map, | |
14208 | vm_map_offset_t address, | |
14209 | vm_purgable_t control, | |
14210 | int *state) | |
1c79356b | 14211 | { |
91447636 A |
14212 | vm_map_entry_t entry; |
14213 | vm_object_t object; | |
14214 | kern_return_t kr; | |
fe8ab488 | 14215 | boolean_t was_nonvolatile; |
1c79356b | 14216 | |
1c79356b | 14217 | /* |
91447636 A |
14218 | * Vet all the input parameters and current type and state of the |
14219 | * underlaying object. Return with an error if anything is amiss. | |
1c79356b | 14220 | */ |
91447636 A |
14221 | if (map == VM_MAP_NULL) |
14222 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 14223 | |
91447636 | 14224 | if (control != VM_PURGABLE_SET_STATE && |
b0d623f7 A |
14225 | control != VM_PURGABLE_GET_STATE && |
14226 | control != VM_PURGABLE_PURGE_ALL) | |
91447636 | 14227 | return(KERN_INVALID_ARGUMENT); |
1c79356b | 14228 | |
b0d623f7 A |
14229 | if (control == VM_PURGABLE_PURGE_ALL) { |
14230 | vm_purgeable_object_purge_all(); | |
14231 | return KERN_SUCCESS; | |
14232 | } | |
14233 | ||
91447636 | 14234 | if (control == VM_PURGABLE_SET_STATE && |
b0d623f7 | 14235 | (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || |
2d21ac55 | 14236 | ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) |
91447636 A |
14237 | return(KERN_INVALID_ARGUMENT); |
14238 | ||
b0d623f7 | 14239 | vm_map_lock_read(map); |
91447636 A |
14240 | |
14241 | if (!vm_map_lookup_entry(map, address, &entry) || entry->is_sub_map) { | |
14242 | ||
14243 | /* | |
14244 | * Must pass a valid non-submap address. | |
14245 | */ | |
b0d623f7 | 14246 | vm_map_unlock_read(map); |
91447636 A |
14247 | return(KERN_INVALID_ADDRESS); |
14248 | } | |
14249 | ||
14250 | if ((entry->protection & VM_PROT_WRITE) == 0) { | |
14251 | /* | |
14252 | * Can't apply purgable controls to something you can't write. | |
14253 | */ | |
b0d623f7 | 14254 | vm_map_unlock_read(map); |
91447636 A |
14255 | return(KERN_PROTECTION_FAILURE); |
14256 | } | |
14257 | ||
3e170ce0 | 14258 | object = VME_OBJECT(entry); |
fe8ab488 A |
14259 | if (object == VM_OBJECT_NULL || |
14260 | object->purgable == VM_PURGABLE_DENY) { | |
91447636 | 14261 | /* |
fe8ab488 | 14262 | * Object must already be present and be purgeable. |
91447636 | 14263 | */ |
b0d623f7 | 14264 | vm_map_unlock_read(map); |
91447636 A |
14265 | return KERN_INVALID_ARGUMENT; |
14266 | } | |
14267 | ||
14268 | vm_object_lock(object); | |
14269 | ||
39236c6e | 14270 | #if 00 |
3e170ce0 | 14271 | if (VME_OFFSET(entry) != 0 || |
6d2010ae | 14272 | entry->vme_end - entry->vme_start != object->vo_size) { |
91447636 A |
14273 | /* |
14274 | * Can only apply purgable controls to the whole (existing) | |
14275 | * object at once. | |
14276 | */ | |
b0d623f7 | 14277 | vm_map_unlock_read(map); |
91447636 A |
14278 | vm_object_unlock(object); |
14279 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 14280 | } |
39236c6e | 14281 | #endif |
fe8ab488 A |
14282 | |
14283 | assert(!entry->is_sub_map); | |
14284 | assert(!entry->use_pmap); /* purgeable has its own accounting */ | |
14285 | ||
b0d623f7 | 14286 | vm_map_unlock_read(map); |
1c79356b | 14287 | |
fe8ab488 A |
14288 | was_nonvolatile = (object->purgable == VM_PURGABLE_NONVOLATILE); |
14289 | ||
91447636 | 14290 | kr = vm_object_purgable_control(object, control, state); |
1c79356b | 14291 | |
fe8ab488 A |
14292 | if (was_nonvolatile && |
14293 | object->purgable != VM_PURGABLE_NONVOLATILE && | |
14294 | map->pmap == kernel_pmap) { | |
14295 | #if DEBUG | |
14296 | object->vo_purgeable_volatilizer = kernel_task; | |
14297 | #endif /* DEBUG */ | |
14298 | } | |
14299 | ||
91447636 | 14300 | vm_object_unlock(object); |
1c79356b | 14301 | |
91447636 A |
14302 | return kr; |
14303 | } | |
1c79356b | 14304 | |
91447636 | 14305 | kern_return_t |
b0d623f7 | 14306 | vm_map_page_query_internal( |
2d21ac55 | 14307 | vm_map_t target_map, |
91447636 | 14308 | vm_map_offset_t offset, |
2d21ac55 A |
14309 | int *disposition, |
14310 | int *ref_count) | |
91447636 | 14311 | { |
b0d623f7 A |
14312 | kern_return_t kr; |
14313 | vm_page_info_basic_data_t info; | |
14314 | mach_msg_type_number_t count; | |
14315 | ||
14316 | count = VM_PAGE_INFO_BASIC_COUNT; | |
14317 | kr = vm_map_page_info(target_map, | |
14318 | offset, | |
14319 | VM_PAGE_INFO_BASIC, | |
14320 | (vm_page_info_t) &info, | |
14321 | &count); | |
14322 | if (kr == KERN_SUCCESS) { | |
14323 | *disposition = info.disposition; | |
14324 | *ref_count = info.ref_count; | |
14325 | } else { | |
14326 | *disposition = 0; | |
14327 | *ref_count = 0; | |
14328 | } | |
2d21ac55 | 14329 | |
b0d623f7 A |
14330 | return kr; |
14331 | } | |
14332 | ||
14333 | kern_return_t | |
14334 | vm_map_page_info( | |
14335 | vm_map_t map, | |
14336 | vm_map_offset_t offset, | |
14337 | vm_page_info_flavor_t flavor, | |
14338 | vm_page_info_t info, | |
14339 | mach_msg_type_number_t *count) | |
14340 | { | |
14341 | vm_map_entry_t map_entry; | |
14342 | vm_object_t object; | |
14343 | vm_page_t m; | |
14344 | kern_return_t kr; | |
14345 | kern_return_t retval = KERN_SUCCESS; | |
14346 | boolean_t top_object; | |
14347 | int disposition; | |
14348 | int ref_count; | |
b0d623f7 A |
14349 | vm_page_info_basic_t basic_info; |
14350 | int depth; | |
6d2010ae | 14351 | vm_map_offset_t offset_in_page; |
2d21ac55 | 14352 | |
b0d623f7 A |
14353 | switch (flavor) { |
14354 | case VM_PAGE_INFO_BASIC: | |
14355 | if (*count != VM_PAGE_INFO_BASIC_COUNT) { | |
6d2010ae A |
14356 | /* |
14357 | * The "vm_page_info_basic_data" structure was not | |
14358 | * properly padded, so allow the size to be off by | |
14359 | * one to maintain backwards binary compatibility... | |
14360 | */ | |
14361 | if (*count != VM_PAGE_INFO_BASIC_COUNT - 1) | |
14362 | return KERN_INVALID_ARGUMENT; | |
b0d623f7 A |
14363 | } |
14364 | break; | |
14365 | default: | |
14366 | return KERN_INVALID_ARGUMENT; | |
91447636 | 14367 | } |
2d21ac55 | 14368 | |
b0d623f7 A |
14369 | disposition = 0; |
14370 | ref_count = 0; | |
b0d623f7 A |
14371 | top_object = TRUE; |
14372 | depth = 0; | |
14373 | ||
14374 | retval = KERN_SUCCESS; | |
6d2010ae | 14375 | offset_in_page = offset & PAGE_MASK; |
39236c6e | 14376 | offset = vm_map_trunc_page(offset, PAGE_MASK); |
b0d623f7 A |
14377 | |
14378 | vm_map_lock_read(map); | |
14379 | ||
14380 | /* | |
14381 | * First, find the map entry covering "offset", going down | |
14382 | * submaps if necessary. | |
14383 | */ | |
14384 | for (;;) { | |
14385 | if (!vm_map_lookup_entry(map, offset, &map_entry)) { | |
14386 | vm_map_unlock_read(map); | |
14387 | return KERN_INVALID_ADDRESS; | |
14388 | } | |
14389 | /* compute offset from this map entry's start */ | |
14390 | offset -= map_entry->vme_start; | |
14391 | /* compute offset into this map entry's object (or submap) */ | |
3e170ce0 | 14392 | offset += VME_OFFSET(map_entry); |
b0d623f7 A |
14393 | |
14394 | if (map_entry->is_sub_map) { | |
14395 | vm_map_t sub_map; | |
2d21ac55 | 14396 | |
3e170ce0 | 14397 | sub_map = VME_SUBMAP(map_entry); |
2d21ac55 | 14398 | vm_map_lock_read(sub_map); |
b0d623f7 | 14399 | vm_map_unlock_read(map); |
2d21ac55 | 14400 | |
b0d623f7 A |
14401 | map = sub_map; |
14402 | ||
14403 | ref_count = MAX(ref_count, map->ref_count); | |
14404 | continue; | |
1c79356b | 14405 | } |
b0d623f7 | 14406 | break; |
91447636 | 14407 | } |
b0d623f7 | 14408 | |
3e170ce0 | 14409 | object = VME_OBJECT(map_entry); |
b0d623f7 A |
14410 | if (object == VM_OBJECT_NULL) { |
14411 | /* no object -> no page */ | |
14412 | vm_map_unlock_read(map); | |
14413 | goto done; | |
14414 | } | |
14415 | ||
91447636 | 14416 | vm_object_lock(object); |
b0d623f7 A |
14417 | vm_map_unlock_read(map); |
14418 | ||
14419 | /* | |
14420 | * Go down the VM object shadow chain until we find the page | |
14421 | * we're looking for. | |
14422 | */ | |
14423 | for (;;) { | |
14424 | ref_count = MAX(ref_count, object->ref_count); | |
2d21ac55 | 14425 | |
91447636 | 14426 | m = vm_page_lookup(object, offset); |
2d21ac55 | 14427 | |
91447636 | 14428 | if (m != VM_PAGE_NULL) { |
b0d623f7 | 14429 | disposition |= VM_PAGE_QUERY_PAGE_PRESENT; |
91447636 A |
14430 | break; |
14431 | } else { | |
2d21ac55 A |
14432 | #if MACH_PAGEMAP |
14433 | if (object->existence_map) { | |
b0d623f7 A |
14434 | if (vm_external_state_get(object->existence_map, |
14435 | offset) == | |
14436 | VM_EXTERNAL_STATE_EXISTS) { | |
2d21ac55 A |
14437 | /* |
14438 | * this page has been paged out | |
14439 | */ | |
b0d623f7 | 14440 | disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; |
2d21ac55 A |
14441 | break; |
14442 | } | |
14443 | } else | |
14444 | #endif | |
39236c6e A |
14445 | if (object->internal && |
14446 | object->alive && | |
14447 | !object->terminating && | |
14448 | object->pager_ready) { | |
14449 | ||
14450 | if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { | |
14451 | if (VM_COMPRESSOR_PAGER_STATE_GET( | |
14452 | object, | |
14453 | offset) | |
14454 | == VM_EXTERNAL_STATE_EXISTS) { | |
14455 | /* the pager has that page */ | |
14456 | disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; | |
14457 | break; | |
14458 | } | |
14459 | } else { | |
b0d623f7 | 14460 | memory_object_t pager; |
2d21ac55 | 14461 | |
b0d623f7 A |
14462 | vm_object_paging_begin(object); |
14463 | pager = object->pager; | |
14464 | vm_object_unlock(object); | |
2d21ac55 | 14465 | |
2d21ac55 | 14466 | /* |
b0d623f7 A |
14467 | * Ask the default pager if |
14468 | * it has this page. | |
2d21ac55 | 14469 | */ |
b0d623f7 A |
14470 | kr = memory_object_data_request( |
14471 | pager, | |
14472 | offset + object->paging_offset, | |
14473 | 0, /* just poke the pager */ | |
14474 | VM_PROT_READ, | |
14475 | NULL); | |
14476 | ||
14477 | vm_object_lock(object); | |
14478 | vm_object_paging_end(object); | |
14479 | ||
14480 | if (kr == KERN_SUCCESS) { | |
14481 | /* the default pager has it */ | |
14482 | disposition |= VM_PAGE_QUERY_PAGE_PAGED_OUT; | |
14483 | break; | |
14484 | } | |
2d21ac55 A |
14485 | } |
14486 | } | |
b0d623f7 | 14487 | |
2d21ac55 A |
14488 | if (object->shadow != VM_OBJECT_NULL) { |
14489 | vm_object_t shadow; | |
14490 | ||
6d2010ae | 14491 | offset += object->vo_shadow_offset; |
2d21ac55 A |
14492 | shadow = object->shadow; |
14493 | ||
14494 | vm_object_lock(shadow); | |
14495 | vm_object_unlock(object); | |
14496 | ||
14497 | object = shadow; | |
14498 | top_object = FALSE; | |
b0d623f7 | 14499 | depth++; |
2d21ac55 | 14500 | } else { |
b0d623f7 A |
14501 | // if (!object->internal) |
14502 | // break; | |
14503 | // retval = KERN_FAILURE; | |
14504 | // goto done_with_object; | |
14505 | break; | |
91447636 | 14506 | } |
91447636 A |
14507 | } |
14508 | } | |
91447636 A |
14509 | /* The ref_count is not strictly accurate, it measures the number */ |
14510 | /* of entities holding a ref on the object, they may not be mapping */ | |
14511 | /* the object or may not be mapping the section holding the */ | |
14512 | /* target page but its still a ball park number and though an over- */ | |
14513 | /* count, it picks up the copy-on-write cases */ | |
1c79356b | 14514 | |
91447636 A |
14515 | /* We could also get a picture of page sharing from pmap_attributes */ |
14516 | /* but this would under count as only faulted-in mappings would */ | |
14517 | /* show up. */ | |
1c79356b | 14518 | |
2d21ac55 | 14519 | if (top_object == TRUE && object->shadow) |
b0d623f7 A |
14520 | disposition |= VM_PAGE_QUERY_PAGE_COPIED; |
14521 | ||
14522 | if (! object->internal) | |
14523 | disposition |= VM_PAGE_QUERY_PAGE_EXTERNAL; | |
2d21ac55 A |
14524 | |
14525 | if (m == VM_PAGE_NULL) | |
b0d623f7 | 14526 | goto done_with_object; |
2d21ac55 | 14527 | |
91447636 | 14528 | if (m->fictitious) { |
b0d623f7 A |
14529 | disposition |= VM_PAGE_QUERY_PAGE_FICTITIOUS; |
14530 | goto done_with_object; | |
91447636 | 14531 | } |
2d21ac55 | 14532 | if (m->dirty || pmap_is_modified(m->phys_page)) |
b0d623f7 | 14533 | disposition |= VM_PAGE_QUERY_PAGE_DIRTY; |
1c79356b | 14534 | |
2d21ac55 | 14535 | if (m->reference || pmap_is_referenced(m->phys_page)) |
b0d623f7 | 14536 | disposition |= VM_PAGE_QUERY_PAGE_REF; |
1c79356b | 14537 | |
2d21ac55 | 14538 | if (m->speculative) |
b0d623f7 | 14539 | disposition |= VM_PAGE_QUERY_PAGE_SPECULATIVE; |
1c79356b | 14540 | |
593a1d5f | 14541 | if (m->cs_validated) |
b0d623f7 | 14542 | disposition |= VM_PAGE_QUERY_PAGE_CS_VALIDATED; |
593a1d5f | 14543 | if (m->cs_tainted) |
b0d623f7 | 14544 | disposition |= VM_PAGE_QUERY_PAGE_CS_TAINTED; |
c18c124e A |
14545 | if (m->cs_nx) |
14546 | disposition |= VM_PAGE_QUERY_PAGE_CS_NX; | |
593a1d5f | 14547 | |
b0d623f7 | 14548 | done_with_object: |
2d21ac55 | 14549 | vm_object_unlock(object); |
b0d623f7 A |
14550 | done: |
14551 | ||
14552 | switch (flavor) { | |
14553 | case VM_PAGE_INFO_BASIC: | |
14554 | basic_info = (vm_page_info_basic_t) info; | |
14555 | basic_info->disposition = disposition; | |
14556 | basic_info->ref_count = ref_count; | |
39236c6e A |
14557 | basic_info->object_id = (vm_object_id_t) (uintptr_t) |
14558 | VM_KERNEL_ADDRPERM(object); | |
6d2010ae A |
14559 | basic_info->offset = |
14560 | (memory_object_offset_t) offset + offset_in_page; | |
b0d623f7 A |
14561 | basic_info->depth = depth; |
14562 | break; | |
14563 | } | |
0c530ab8 | 14564 | |
2d21ac55 | 14565 | return retval; |
91447636 A |
14566 | } |
14567 | ||
14568 | /* | |
14569 | * vm_map_msync | |
14570 | * | |
14571 | * Synchronises the memory range specified with its backing store | |
14572 | * image by either flushing or cleaning the contents to the appropriate | |
14573 | * memory manager engaging in a memory object synchronize dialog with | |
14574 | * the manager. The client doesn't return until the manager issues | |
14575 | * m_o_s_completed message. MIG Magically converts user task parameter | |
14576 | * to the task's address map. | |
14577 | * | |
14578 | * interpretation of sync_flags | |
14579 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
14580 | * pages to manager. | |
14581 | * | |
14582 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
14583 | * - discard pages, write dirty or precious | |
14584 | * pages back to memory manager. | |
14585 | * | |
14586 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
14587 | * - write dirty or precious pages back to | |
14588 | * the memory manager. | |
14589 | * | |
14590 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
14591 | * is a hole in the region, and we would | |
14592 | * have returned KERN_SUCCESS, return | |
14593 | * KERN_INVALID_ADDRESS instead. | |
14594 | * | |
14595 | * NOTE | |
14596 | * The memory object attributes have not yet been implemented, this | |
14597 | * function will have to deal with the invalidate attribute | |
14598 | * | |
14599 | * RETURNS | |
14600 | * KERN_INVALID_TASK Bad task parameter | |
14601 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
14602 | * KERN_SUCCESS The usual. | |
14603 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
14604 | */ | |
14605 | ||
14606 | kern_return_t | |
14607 | vm_map_msync( | |
14608 | vm_map_t map, | |
14609 | vm_map_address_t address, | |
14610 | vm_map_size_t size, | |
14611 | vm_sync_t sync_flags) | |
14612 | { | |
14613 | msync_req_t msr; | |
14614 | msync_req_t new_msr; | |
14615 | queue_chain_t req_q; /* queue of requests for this msync */ | |
14616 | vm_map_entry_t entry; | |
14617 | vm_map_size_t amount_left; | |
14618 | vm_object_offset_t offset; | |
14619 | boolean_t do_sync_req; | |
91447636 | 14620 | boolean_t had_hole = FALSE; |
2d21ac55 | 14621 | memory_object_t pager; |
3e170ce0 | 14622 | vm_map_offset_t pmap_offset; |
91447636 A |
14623 | |
14624 | if ((sync_flags & VM_SYNC_ASYNCHRONOUS) && | |
14625 | (sync_flags & VM_SYNC_SYNCHRONOUS)) | |
14626 | return(KERN_INVALID_ARGUMENT); | |
1c79356b A |
14627 | |
14628 | /* | |
91447636 | 14629 | * align address and size on page boundaries |
1c79356b | 14630 | */ |
39236c6e A |
14631 | size = (vm_map_round_page(address + size, |
14632 | VM_MAP_PAGE_MASK(map)) - | |
14633 | vm_map_trunc_page(address, | |
14634 | VM_MAP_PAGE_MASK(map))); | |
14635 | address = vm_map_trunc_page(address, | |
14636 | VM_MAP_PAGE_MASK(map)); | |
1c79356b | 14637 | |
91447636 A |
14638 | if (map == VM_MAP_NULL) |
14639 | return(KERN_INVALID_TASK); | |
1c79356b | 14640 | |
91447636 A |
14641 | if (size == 0) |
14642 | return(KERN_SUCCESS); | |
1c79356b | 14643 | |
91447636 A |
14644 | queue_init(&req_q); |
14645 | amount_left = size; | |
1c79356b | 14646 | |
91447636 A |
14647 | while (amount_left > 0) { |
14648 | vm_object_size_t flush_size; | |
14649 | vm_object_t object; | |
1c79356b | 14650 | |
91447636 A |
14651 | vm_map_lock(map); |
14652 | if (!vm_map_lookup_entry(map, | |
3e170ce0 | 14653 | address, |
39236c6e | 14654 | &entry)) { |
91447636 | 14655 | |
2d21ac55 | 14656 | vm_map_size_t skip; |
91447636 A |
14657 | |
14658 | /* | |
14659 | * hole in the address map. | |
14660 | */ | |
14661 | had_hole = TRUE; | |
14662 | ||
14663 | /* | |
14664 | * Check for empty map. | |
14665 | */ | |
14666 | if (entry == vm_map_to_entry(map) && | |
14667 | entry->vme_next == entry) { | |
14668 | vm_map_unlock(map); | |
14669 | break; | |
14670 | } | |
14671 | /* | |
14672 | * Check that we don't wrap and that | |
14673 | * we have at least one real map entry. | |
14674 | */ | |
14675 | if ((map->hdr.nentries == 0) || | |
14676 | (entry->vme_next->vme_start < address)) { | |
14677 | vm_map_unlock(map); | |
14678 | break; | |
14679 | } | |
14680 | /* | |
14681 | * Move up to the next entry if needed | |
14682 | */ | |
14683 | skip = (entry->vme_next->vme_start - address); | |
14684 | if (skip >= amount_left) | |
14685 | amount_left = 0; | |
14686 | else | |
14687 | amount_left -= skip; | |
14688 | address = entry->vme_next->vme_start; | |
14689 | vm_map_unlock(map); | |
14690 | continue; | |
14691 | } | |
1c79356b | 14692 | |
91447636 | 14693 | offset = address - entry->vme_start; |
3e170ce0 | 14694 | pmap_offset = address; |
1c79356b | 14695 | |
91447636 A |
14696 | /* |
14697 | * do we have more to flush than is contained in this | |
14698 | * entry ? | |
14699 | */ | |
14700 | if (amount_left + entry->vme_start + offset > entry->vme_end) { | |
14701 | flush_size = entry->vme_end - | |
2d21ac55 | 14702 | (entry->vme_start + offset); |
91447636 A |
14703 | } else { |
14704 | flush_size = amount_left; | |
14705 | } | |
14706 | amount_left -= flush_size; | |
14707 | address += flush_size; | |
1c79356b | 14708 | |
91447636 A |
14709 | if (entry->is_sub_map == TRUE) { |
14710 | vm_map_t local_map; | |
14711 | vm_map_offset_t local_offset; | |
1c79356b | 14712 | |
3e170ce0 A |
14713 | local_map = VME_SUBMAP(entry); |
14714 | local_offset = VME_OFFSET(entry); | |
91447636 A |
14715 | vm_map_unlock(map); |
14716 | if (vm_map_msync( | |
2d21ac55 A |
14717 | local_map, |
14718 | local_offset, | |
14719 | flush_size, | |
14720 | sync_flags) == KERN_INVALID_ADDRESS) { | |
91447636 A |
14721 | had_hole = TRUE; |
14722 | } | |
14723 | continue; | |
14724 | } | |
3e170ce0 | 14725 | object = VME_OBJECT(entry); |
1c79356b | 14726 | |
91447636 A |
14727 | /* |
14728 | * We can't sync this object if the object has not been | |
14729 | * created yet | |
14730 | */ | |
14731 | if (object == VM_OBJECT_NULL) { | |
14732 | vm_map_unlock(map); | |
14733 | continue; | |
14734 | } | |
3e170ce0 | 14735 | offset += VME_OFFSET(entry); |
1c79356b | 14736 | |
91447636 | 14737 | vm_object_lock(object); |
1c79356b | 14738 | |
91447636 | 14739 | if (sync_flags & (VM_SYNC_KILLPAGES | VM_SYNC_DEACTIVATE)) { |
b0d623f7 A |
14740 | int kill_pages = 0; |
14741 | boolean_t reusable_pages = FALSE; | |
91447636 A |
14742 | |
14743 | if (sync_flags & VM_SYNC_KILLPAGES) { | |
b0d623f7 | 14744 | if (object->ref_count == 1 && !object->shadow) |
91447636 A |
14745 | kill_pages = 1; |
14746 | else | |
14747 | kill_pages = -1; | |
14748 | } | |
14749 | if (kill_pages != -1) | |
3e170ce0 A |
14750 | vm_object_deactivate_pages( |
14751 | object, | |
14752 | offset, | |
14753 | (vm_object_size_t) flush_size, | |
14754 | kill_pages, | |
14755 | reusable_pages, | |
14756 | map->pmap, | |
14757 | pmap_offset); | |
91447636 A |
14758 | vm_object_unlock(object); |
14759 | vm_map_unlock(map); | |
14760 | continue; | |
1c79356b | 14761 | } |
91447636 A |
14762 | /* |
14763 | * We can't sync this object if there isn't a pager. | |
14764 | * Don't bother to sync internal objects, since there can't | |
14765 | * be any "permanent" storage for these objects anyway. | |
14766 | */ | |
14767 | if ((object->pager == MEMORY_OBJECT_NULL) || | |
14768 | (object->internal) || (object->private)) { | |
14769 | vm_object_unlock(object); | |
14770 | vm_map_unlock(map); | |
14771 | continue; | |
14772 | } | |
14773 | /* | |
14774 | * keep reference on the object until syncing is done | |
14775 | */ | |
2d21ac55 | 14776 | vm_object_reference_locked(object); |
91447636 | 14777 | vm_object_unlock(object); |
1c79356b | 14778 | |
91447636 | 14779 | vm_map_unlock(map); |
1c79356b | 14780 | |
91447636 | 14781 | do_sync_req = vm_object_sync(object, |
2d21ac55 A |
14782 | offset, |
14783 | flush_size, | |
14784 | sync_flags & VM_SYNC_INVALIDATE, | |
b0d623f7 A |
14785 | ((sync_flags & VM_SYNC_SYNCHRONOUS) || |
14786 | (sync_flags & VM_SYNC_ASYNCHRONOUS)), | |
2d21ac55 | 14787 | sync_flags & VM_SYNC_SYNCHRONOUS); |
91447636 A |
14788 | /* |
14789 | * only send a m_o_s if we returned pages or if the entry | |
14790 | * is writable (ie dirty pages may have already been sent back) | |
14791 | */ | |
b0d623f7 | 14792 | if (!do_sync_req) { |
2d21ac55 A |
14793 | if ((sync_flags & VM_SYNC_INVALIDATE) && object->resident_page_count == 0) { |
14794 | /* | |
14795 | * clear out the clustering and read-ahead hints | |
14796 | */ | |
14797 | vm_object_lock(object); | |
14798 | ||
14799 | object->pages_created = 0; | |
14800 | object->pages_used = 0; | |
14801 | object->sequential = 0; | |
14802 | object->last_alloc = 0; | |
14803 | ||
14804 | vm_object_unlock(object); | |
14805 | } | |
91447636 A |
14806 | vm_object_deallocate(object); |
14807 | continue; | |
1c79356b | 14808 | } |
91447636 | 14809 | msync_req_alloc(new_msr); |
1c79356b | 14810 | |
91447636 A |
14811 | vm_object_lock(object); |
14812 | offset += object->paging_offset; | |
1c79356b | 14813 | |
91447636 A |
14814 | new_msr->offset = offset; |
14815 | new_msr->length = flush_size; | |
14816 | new_msr->object = object; | |
14817 | new_msr->flag = VM_MSYNC_SYNCHRONIZING; | |
2d21ac55 A |
14818 | re_iterate: |
14819 | ||
14820 | /* | |
14821 | * We can't sync this object if there isn't a pager. The | |
14822 | * pager can disappear anytime we're not holding the object | |
14823 | * lock. So this has to be checked anytime we goto re_iterate. | |
14824 | */ | |
14825 | ||
14826 | pager = object->pager; | |
14827 | ||
14828 | if (pager == MEMORY_OBJECT_NULL) { | |
14829 | vm_object_unlock(object); | |
14830 | vm_object_deallocate(object); | |
39236c6e A |
14831 | msync_req_free(new_msr); |
14832 | new_msr = NULL; | |
2d21ac55 A |
14833 | continue; |
14834 | } | |
14835 | ||
91447636 A |
14836 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { |
14837 | /* | |
14838 | * need to check for overlapping entry, if found, wait | |
14839 | * on overlapping msr to be done, then reiterate | |
14840 | */ | |
14841 | msr_lock(msr); | |
14842 | if (msr->flag == VM_MSYNC_SYNCHRONIZING && | |
14843 | ((offset >= msr->offset && | |
14844 | offset < (msr->offset + msr->length)) || | |
14845 | (msr->offset >= offset && | |
14846 | msr->offset < (offset + flush_size)))) | |
14847 | { | |
14848 | assert_wait((event_t) msr,THREAD_INTERRUPTIBLE); | |
14849 | msr_unlock(msr); | |
14850 | vm_object_unlock(object); | |
14851 | thread_block(THREAD_CONTINUE_NULL); | |
14852 | vm_object_lock(object); | |
14853 | goto re_iterate; | |
14854 | } | |
14855 | msr_unlock(msr); | |
14856 | }/* queue_iterate */ | |
1c79356b | 14857 | |
91447636 | 14858 | queue_enter(&object->msr_q, new_msr, msync_req_t, msr_q); |
2d21ac55 A |
14859 | |
14860 | vm_object_paging_begin(object); | |
91447636 | 14861 | vm_object_unlock(object); |
1c79356b | 14862 | |
91447636 A |
14863 | queue_enter(&req_q, new_msr, msync_req_t, req_q); |
14864 | ||
14865 | (void) memory_object_synchronize( | |
2d21ac55 A |
14866 | pager, |
14867 | offset, | |
14868 | flush_size, | |
14869 | sync_flags & ~VM_SYNC_CONTIGUOUS); | |
14870 | ||
14871 | vm_object_lock(object); | |
14872 | vm_object_paging_end(object); | |
14873 | vm_object_unlock(object); | |
91447636 A |
14874 | }/* while */ |
14875 | ||
14876 | /* | |
14877 | * wait for memory_object_sychronize_completed messages from pager(s) | |
14878 | */ | |
14879 | ||
14880 | while (!queue_empty(&req_q)) { | |
14881 | msr = (msync_req_t)queue_first(&req_q); | |
14882 | msr_lock(msr); | |
14883 | while(msr->flag != VM_MSYNC_DONE) { | |
14884 | assert_wait((event_t) msr, THREAD_INTERRUPTIBLE); | |
14885 | msr_unlock(msr); | |
14886 | thread_block(THREAD_CONTINUE_NULL); | |
14887 | msr_lock(msr); | |
14888 | }/* while */ | |
14889 | queue_remove(&req_q, msr, msync_req_t, req_q); | |
14890 | msr_unlock(msr); | |
14891 | vm_object_deallocate(msr->object); | |
14892 | msync_req_free(msr); | |
14893 | }/* queue_iterate */ | |
14894 | ||
14895 | /* for proper msync() behaviour */ | |
14896 | if (had_hole == TRUE && (sync_flags & VM_SYNC_CONTIGUOUS)) | |
14897 | return(KERN_INVALID_ADDRESS); | |
14898 | ||
14899 | return(KERN_SUCCESS); | |
14900 | }/* vm_msync */ | |
1c79356b | 14901 | |
1c79356b | 14902 | /* |
91447636 A |
14903 | * Routine: convert_port_entry_to_map |
14904 | * Purpose: | |
14905 | * Convert from a port specifying an entry or a task | |
14906 | * to a map. Doesn't consume the port ref; produces a map ref, | |
14907 | * which may be null. Unlike convert_port_to_map, the | |
14908 | * port may be task or a named entry backed. | |
14909 | * Conditions: | |
14910 | * Nothing locked. | |
1c79356b | 14911 | */ |
1c79356b | 14912 | |
1c79356b | 14913 | |
91447636 A |
14914 | vm_map_t |
14915 | convert_port_entry_to_map( | |
14916 | ipc_port_t port) | |
14917 | { | |
14918 | vm_map_t map; | |
14919 | vm_named_entry_t named_entry; | |
2d21ac55 | 14920 | uint32_t try_failed_count = 0; |
1c79356b | 14921 | |
91447636 A |
14922 | if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { |
14923 | while(TRUE) { | |
14924 | ip_lock(port); | |
14925 | if(ip_active(port) && (ip_kotype(port) | |
2d21ac55 | 14926 | == IKOT_NAMED_ENTRY)) { |
91447636 | 14927 | named_entry = |
2d21ac55 | 14928 | (vm_named_entry_t)port->ip_kobject; |
b0d623f7 | 14929 | if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { |
91447636 | 14930 | ip_unlock(port); |
2d21ac55 A |
14931 | |
14932 | try_failed_count++; | |
14933 | mutex_pause(try_failed_count); | |
91447636 A |
14934 | continue; |
14935 | } | |
14936 | named_entry->ref_count++; | |
b0d623f7 | 14937 | lck_mtx_unlock(&(named_entry)->Lock); |
91447636 A |
14938 | ip_unlock(port); |
14939 | if ((named_entry->is_sub_map) && | |
2d21ac55 A |
14940 | (named_entry->protection |
14941 | & VM_PROT_WRITE)) { | |
91447636 A |
14942 | map = named_entry->backing.map; |
14943 | } else { | |
14944 | mach_destroy_memory_entry(port); | |
14945 | return VM_MAP_NULL; | |
14946 | } | |
14947 | vm_map_reference_swap(map); | |
14948 | mach_destroy_memory_entry(port); | |
14949 | break; | |
14950 | } | |
14951 | else | |
14952 | return VM_MAP_NULL; | |
14953 | } | |
1c79356b | 14954 | } |
91447636 A |
14955 | else |
14956 | map = convert_port_to_map(port); | |
1c79356b | 14957 | |
91447636 A |
14958 | return map; |
14959 | } | |
1c79356b | 14960 | |
91447636 A |
14961 | /* |
14962 | * Routine: convert_port_entry_to_object | |
14963 | * Purpose: | |
14964 | * Convert from a port specifying a named entry to an | |
14965 | * object. Doesn't consume the port ref; produces a map ref, | |
14966 | * which may be null. | |
14967 | * Conditions: | |
14968 | * Nothing locked. | |
14969 | */ | |
1c79356b | 14970 | |
1c79356b | 14971 | |
91447636 A |
14972 | vm_object_t |
14973 | convert_port_entry_to_object( | |
14974 | ipc_port_t port) | |
14975 | { | |
39236c6e | 14976 | vm_object_t object = VM_OBJECT_NULL; |
91447636 | 14977 | vm_named_entry_t named_entry; |
39236c6e A |
14978 | uint32_t try_failed_count = 0; |
14979 | ||
14980 | if (IP_VALID(port) && | |
14981 | (ip_kotype(port) == IKOT_NAMED_ENTRY)) { | |
14982 | try_again: | |
14983 | ip_lock(port); | |
14984 | if (ip_active(port) && | |
14985 | (ip_kotype(port) == IKOT_NAMED_ENTRY)) { | |
14986 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
14987 | if (!(lck_mtx_try_lock(&(named_entry)->Lock))) { | |
91447636 | 14988 | ip_unlock(port); |
39236c6e A |
14989 | try_failed_count++; |
14990 | mutex_pause(try_failed_count); | |
14991 | goto try_again; | |
14992 | } | |
14993 | named_entry->ref_count++; | |
14994 | lck_mtx_unlock(&(named_entry)->Lock); | |
14995 | ip_unlock(port); | |
14996 | if (!(named_entry->is_sub_map) && | |
14997 | !(named_entry->is_pager) && | |
14998 | !(named_entry->is_copy) && | |
14999 | (named_entry->protection & VM_PROT_WRITE)) { | |
15000 | object = named_entry->backing.object; | |
15001 | vm_object_reference(object); | |
91447636 | 15002 | } |
39236c6e | 15003 | mach_destroy_memory_entry(port); |
1c79356b | 15004 | } |
1c79356b | 15005 | } |
91447636 A |
15006 | |
15007 | return object; | |
1c79356b | 15008 | } |
9bccf70c A |
15009 | |
15010 | /* | |
91447636 A |
15011 | * Export routines to other components for the things we access locally through |
15012 | * macros. | |
9bccf70c | 15013 | */ |
91447636 A |
15014 | #undef current_map |
15015 | vm_map_t | |
15016 | current_map(void) | |
9bccf70c | 15017 | { |
91447636 | 15018 | return (current_map_fast()); |
9bccf70c A |
15019 | } |
15020 | ||
15021 | /* | |
15022 | * vm_map_reference: | |
15023 | * | |
15024 | * Most code internal to the osfmk will go through a | |
15025 | * macro defining this. This is always here for the | |
15026 | * use of other kernel components. | |
15027 | */ | |
15028 | #undef vm_map_reference | |
15029 | void | |
15030 | vm_map_reference( | |
15031 | register vm_map_t map) | |
15032 | { | |
15033 | if (map == VM_MAP_NULL) | |
15034 | return; | |
15035 | ||
b0d623f7 | 15036 | lck_mtx_lock(&map->s_lock); |
9bccf70c A |
15037 | #if TASK_SWAPPER |
15038 | assert(map->res_count > 0); | |
15039 | assert(map->ref_count >= map->res_count); | |
15040 | map->res_count++; | |
15041 | #endif | |
15042 | map->ref_count++; | |
b0d623f7 | 15043 | lck_mtx_unlock(&map->s_lock); |
9bccf70c A |
15044 | } |
15045 | ||
15046 | /* | |
15047 | * vm_map_deallocate: | |
15048 | * | |
15049 | * Removes a reference from the specified map, | |
15050 | * destroying it if no references remain. | |
15051 | * The map should not be locked. | |
15052 | */ | |
15053 | void | |
15054 | vm_map_deallocate( | |
15055 | register vm_map_t map) | |
15056 | { | |
15057 | unsigned int ref; | |
15058 | ||
15059 | if (map == VM_MAP_NULL) | |
15060 | return; | |
15061 | ||
b0d623f7 | 15062 | lck_mtx_lock(&map->s_lock); |
9bccf70c A |
15063 | ref = --map->ref_count; |
15064 | if (ref > 0) { | |
15065 | vm_map_res_deallocate(map); | |
b0d623f7 | 15066 | lck_mtx_unlock(&map->s_lock); |
9bccf70c A |
15067 | return; |
15068 | } | |
15069 | assert(map->ref_count == 0); | |
b0d623f7 | 15070 | lck_mtx_unlock(&map->s_lock); |
9bccf70c A |
15071 | |
15072 | #if TASK_SWAPPER | |
15073 | /* | |
15074 | * The map residence count isn't decremented here because | |
15075 | * the vm_map_delete below will traverse the entire map, | |
15076 | * deleting entries, and the residence counts on objects | |
15077 | * and sharing maps will go away then. | |
15078 | */ | |
15079 | #endif | |
15080 | ||
2d21ac55 | 15081 | vm_map_destroy(map, VM_MAP_NO_FLAGS); |
0c530ab8 | 15082 | } |
91447636 | 15083 | |
91447636 | 15084 | |
0c530ab8 A |
15085 | void |
15086 | vm_map_disable_NX(vm_map_t map) | |
15087 | { | |
15088 | if (map == NULL) | |
15089 | return; | |
15090 | if (map->pmap == NULL) | |
15091 | return; | |
15092 | ||
15093 | pmap_disable_NX(map->pmap); | |
15094 | } | |
15095 | ||
6d2010ae A |
15096 | void |
15097 | vm_map_disallow_data_exec(vm_map_t map) | |
15098 | { | |
15099 | if (map == NULL) | |
15100 | return; | |
15101 | ||
15102 | map->map_disallow_data_exec = TRUE; | |
15103 | } | |
15104 | ||
0c530ab8 A |
15105 | /* XXX Consider making these constants (VM_MAX_ADDRESS and MACH_VM_MAX_ADDRESS) |
15106 | * more descriptive. | |
15107 | */ | |
15108 | void | |
15109 | vm_map_set_32bit(vm_map_t map) | |
15110 | { | |
15111 | map->max_offset = (vm_map_offset_t)VM_MAX_ADDRESS; | |
15112 | } | |
15113 | ||
15114 | ||
15115 | void | |
15116 | vm_map_set_64bit(vm_map_t map) | |
15117 | { | |
15118 | map->max_offset = (vm_map_offset_t)MACH_VM_MAX_ADDRESS; | |
15119 | } | |
15120 | ||
15121 | vm_map_offset_t | |
3e170ce0 | 15122 | vm_compute_max_offset(boolean_t is64) |
0c530ab8 A |
15123 | { |
15124 | return (is64 ? (vm_map_offset_t)MACH_VM_MAX_ADDRESS : (vm_map_offset_t)VM_MAX_ADDRESS); | |
15125 | } | |
15126 | ||
39236c6e A |
15127 | uint64_t |
15128 | vm_map_get_max_aslr_slide_pages(vm_map_t map) | |
15129 | { | |
15130 | return (1 << (vm_map_is_64bit(map) ? 16 : 8)); | |
15131 | } | |
15132 | ||
0c530ab8 | 15133 | boolean_t |
2d21ac55 A |
15134 | vm_map_is_64bit( |
15135 | vm_map_t map) | |
15136 | { | |
15137 | return map->max_offset > ((vm_map_offset_t)VM_MAX_ADDRESS); | |
15138 | } | |
15139 | ||
15140 | boolean_t | |
316670eb A |
15141 | vm_map_has_hard_pagezero( |
15142 | vm_map_t map, | |
15143 | vm_map_offset_t pagezero_size) | |
0c530ab8 A |
15144 | { |
15145 | /* | |
15146 | * XXX FBDP | |
15147 | * We should lock the VM map (for read) here but we can get away | |
15148 | * with it for now because there can't really be any race condition: | |
15149 | * the VM map's min_offset is changed only when the VM map is created | |
15150 | * and when the zero page is established (when the binary gets loaded), | |
15151 | * and this routine gets called only when the task terminates and the | |
15152 | * VM map is being torn down, and when a new map is created via | |
15153 | * load_machfile()/execve(). | |
15154 | */ | |
316670eb | 15155 | return (map->min_offset >= pagezero_size); |
0c530ab8 A |
15156 | } |
15157 | ||
316670eb A |
15158 | /* |
15159 | * Raise a VM map's maximun offset. | |
15160 | */ | |
15161 | kern_return_t | |
15162 | vm_map_raise_max_offset( | |
15163 | vm_map_t map, | |
15164 | vm_map_offset_t new_max_offset) | |
15165 | { | |
15166 | kern_return_t ret; | |
15167 | ||
15168 | vm_map_lock(map); | |
15169 | ret = KERN_INVALID_ADDRESS; | |
15170 | ||
15171 | if (new_max_offset >= map->max_offset) { | |
15172 | if (!vm_map_is_64bit(map)) { | |
15173 | if (new_max_offset <= (vm_map_offset_t)VM_MAX_ADDRESS) { | |
15174 | map->max_offset = new_max_offset; | |
15175 | ret = KERN_SUCCESS; | |
15176 | } | |
15177 | } else { | |
15178 | if (new_max_offset <= (vm_map_offset_t)MACH_VM_MAX_ADDRESS) { | |
15179 | map->max_offset = new_max_offset; | |
15180 | ret = KERN_SUCCESS; | |
15181 | } | |
15182 | } | |
15183 | } | |
15184 | ||
15185 | vm_map_unlock(map); | |
15186 | return ret; | |
15187 | } | |
15188 | ||
15189 | ||
0c530ab8 A |
15190 | /* |
15191 | * Raise a VM map's minimum offset. | |
15192 | * To strictly enforce "page zero" reservation. | |
15193 | */ | |
15194 | kern_return_t | |
15195 | vm_map_raise_min_offset( | |
15196 | vm_map_t map, | |
15197 | vm_map_offset_t new_min_offset) | |
15198 | { | |
15199 | vm_map_entry_t first_entry; | |
15200 | ||
39236c6e A |
15201 | new_min_offset = vm_map_round_page(new_min_offset, |
15202 | VM_MAP_PAGE_MASK(map)); | |
0c530ab8 A |
15203 | |
15204 | vm_map_lock(map); | |
15205 | ||
15206 | if (new_min_offset < map->min_offset) { | |
15207 | /* | |
15208 | * Can't move min_offset backwards, as that would expose | |
15209 | * a part of the address space that was previously, and for | |
15210 | * possibly good reasons, inaccessible. | |
15211 | */ | |
15212 | vm_map_unlock(map); | |
15213 | return KERN_INVALID_ADDRESS; | |
15214 | } | |
3e170ce0 A |
15215 | if (new_min_offset >= map->max_offset) { |
15216 | /* can't go beyond the end of the address space */ | |
15217 | vm_map_unlock(map); | |
15218 | return KERN_INVALID_ADDRESS; | |
15219 | } | |
0c530ab8 A |
15220 | |
15221 | first_entry = vm_map_first_entry(map); | |
15222 | if (first_entry != vm_map_to_entry(map) && | |
15223 | first_entry->vme_start < new_min_offset) { | |
15224 | /* | |
15225 | * Some memory was already allocated below the new | |
15226 | * minimun offset. It's too late to change it now... | |
15227 | */ | |
15228 | vm_map_unlock(map); | |
15229 | return KERN_NO_SPACE; | |
15230 | } | |
15231 | ||
15232 | map->min_offset = new_min_offset; | |
15233 | ||
3e170ce0 A |
15234 | assert(map->holes_list); |
15235 | map->holes_list->start = new_min_offset; | |
15236 | assert(new_min_offset < map->holes_list->end); | |
15237 | ||
0c530ab8 A |
15238 | vm_map_unlock(map); |
15239 | ||
15240 | return KERN_SUCCESS; | |
15241 | } | |
2d21ac55 A |
15242 | |
15243 | /* | |
15244 | * Set the limit on the maximum amount of user wired memory allowed for this map. | |
15245 | * This is basically a copy of the MEMLOCK rlimit value maintained by the BSD side of | |
15246 | * the kernel. The limits are checked in the mach VM side, so we keep a copy so we | |
15247 | * don't have to reach over to the BSD data structures. | |
15248 | */ | |
15249 | ||
15250 | void | |
15251 | vm_map_set_user_wire_limit(vm_map_t map, | |
15252 | vm_size_t limit) | |
15253 | { | |
15254 | map->user_wire_limit = limit; | |
15255 | } | |
593a1d5f | 15256 | |
b0d623f7 A |
15257 | |
15258 | void vm_map_switch_protect(vm_map_t map, | |
15259 | boolean_t val) | |
593a1d5f A |
15260 | { |
15261 | vm_map_lock(map); | |
b0d623f7 | 15262 | map->switch_protect=val; |
593a1d5f | 15263 | vm_map_unlock(map); |
b0d623f7 | 15264 | } |
b7266188 | 15265 | |
39236c6e A |
15266 | /* |
15267 | * IOKit has mapped a region into this map; adjust the pmap's ledgers appropriately. | |
15268 | * phys_footprint is a composite limit consisting of iokit + physmem, so we need to | |
15269 | * bump both counters. | |
15270 | */ | |
15271 | void | |
15272 | vm_map_iokit_mapped_region(vm_map_t map, vm_size_t bytes) | |
15273 | { | |
15274 | pmap_t pmap = vm_map_pmap(map); | |
15275 | ||
fe8ab488 | 15276 | ledger_credit(pmap->ledger, task_ledgers.iokit_mapped, bytes); |
39236c6e A |
15277 | ledger_credit(pmap->ledger, task_ledgers.phys_footprint, bytes); |
15278 | } | |
15279 | ||
15280 | void | |
15281 | vm_map_iokit_unmapped_region(vm_map_t map, vm_size_t bytes) | |
15282 | { | |
15283 | pmap_t pmap = vm_map_pmap(map); | |
15284 | ||
fe8ab488 | 15285 | ledger_debit(pmap->ledger, task_ledgers.iokit_mapped, bytes); |
39236c6e A |
15286 | ledger_debit(pmap->ledger, task_ledgers.phys_footprint, bytes); |
15287 | } | |
15288 | ||
b7266188 A |
15289 | /* Add (generate) code signature for memory range */ |
15290 | #if CONFIG_DYNAMIC_CODE_SIGNING | |
15291 | kern_return_t vm_map_sign(vm_map_t map, | |
15292 | vm_map_offset_t start, | |
15293 | vm_map_offset_t end) | |
15294 | { | |
15295 | vm_map_entry_t entry; | |
15296 | vm_page_t m; | |
15297 | vm_object_t object; | |
15298 | ||
15299 | /* | |
15300 | * Vet all the input parameters and current type and state of the | |
15301 | * underlaying object. Return with an error if anything is amiss. | |
15302 | */ | |
15303 | if (map == VM_MAP_NULL) | |
15304 | return(KERN_INVALID_ARGUMENT); | |
15305 | ||
15306 | vm_map_lock_read(map); | |
15307 | ||
15308 | if (!vm_map_lookup_entry(map, start, &entry) || entry->is_sub_map) { | |
15309 | /* | |
15310 | * Must pass a valid non-submap address. | |
15311 | */ | |
15312 | vm_map_unlock_read(map); | |
15313 | return(KERN_INVALID_ADDRESS); | |
15314 | } | |
15315 | ||
15316 | if((entry->vme_start > start) || (entry->vme_end < end)) { | |
15317 | /* | |
15318 | * Map entry doesn't cover the requested range. Not handling | |
15319 | * this situation currently. | |
15320 | */ | |
15321 | vm_map_unlock_read(map); | |
15322 | return(KERN_INVALID_ARGUMENT); | |
15323 | } | |
15324 | ||
3e170ce0 | 15325 | object = VME_OBJECT(entry); |
b7266188 A |
15326 | if (object == VM_OBJECT_NULL) { |
15327 | /* | |
15328 | * Object must already be present or we can't sign. | |
15329 | */ | |
15330 | vm_map_unlock_read(map); | |
15331 | return KERN_INVALID_ARGUMENT; | |
15332 | } | |
15333 | ||
15334 | vm_object_lock(object); | |
15335 | vm_map_unlock_read(map); | |
15336 | ||
15337 | while(start < end) { | |
15338 | uint32_t refmod; | |
15339 | ||
3e170ce0 A |
15340 | m = vm_page_lookup(object, |
15341 | start - entry->vme_start + VME_OFFSET(entry)); | |
b7266188 A |
15342 | if (m==VM_PAGE_NULL) { |
15343 | /* shoud we try to fault a page here? we can probably | |
15344 | * demand it exists and is locked for this request */ | |
15345 | vm_object_unlock(object); | |
15346 | return KERN_FAILURE; | |
15347 | } | |
15348 | /* deal with special page status */ | |
15349 | if (m->busy || | |
15350 | (m->unusual && (m->error || m->restart || m->private || m->absent))) { | |
15351 | vm_object_unlock(object); | |
15352 | return KERN_FAILURE; | |
15353 | } | |
15354 | ||
15355 | /* Page is OK... now "validate" it */ | |
15356 | /* This is the place where we'll call out to create a code | |
15357 | * directory, later */ | |
15358 | m->cs_validated = TRUE; | |
15359 | ||
15360 | /* The page is now "clean" for codesigning purposes. That means | |
15361 | * we don't consider it as modified (wpmapped) anymore. But | |
15362 | * we'll disconnect the page so we note any future modification | |
15363 | * attempts. */ | |
15364 | m->wpmapped = FALSE; | |
15365 | refmod = pmap_disconnect(m->phys_page); | |
15366 | ||
15367 | /* Pull the dirty status from the pmap, since we cleared the | |
15368 | * wpmapped bit */ | |
15369 | if ((refmod & VM_MEM_MODIFIED) && !m->dirty) { | |
316670eb | 15370 | SET_PAGE_DIRTY(m, FALSE); |
b7266188 A |
15371 | } |
15372 | ||
15373 | /* On to the next page */ | |
15374 | start += PAGE_SIZE; | |
15375 | } | |
15376 | vm_object_unlock(object); | |
15377 | ||
15378 | return KERN_SUCCESS; | |
15379 | } | |
15380 | #endif | |
6d2010ae | 15381 | |
fe8ab488 A |
15382 | kern_return_t vm_map_partial_reap(vm_map_t map, unsigned int *reclaimed_resident, unsigned int *reclaimed_compressed) |
15383 | { | |
15384 | vm_map_entry_t entry = VM_MAP_ENTRY_NULL; | |
15385 | vm_map_entry_t next_entry; | |
15386 | kern_return_t kr = KERN_SUCCESS; | |
15387 | vm_map_t zap_map; | |
15388 | ||
15389 | vm_map_lock(map); | |
15390 | ||
15391 | /* | |
15392 | * We use a "zap_map" to avoid having to unlock | |
15393 | * the "map" in vm_map_delete(). | |
15394 | */ | |
15395 | zap_map = vm_map_create(PMAP_NULL, | |
15396 | map->min_offset, | |
15397 | map->max_offset, | |
15398 | map->hdr.entries_pageable); | |
15399 | ||
15400 | if (zap_map == VM_MAP_NULL) { | |
15401 | return KERN_RESOURCE_SHORTAGE; | |
15402 | } | |
15403 | ||
15404 | vm_map_set_page_shift(zap_map, | |
15405 | VM_MAP_PAGE_SHIFT(map)); | |
3e170ce0 | 15406 | vm_map_disable_hole_optimization(zap_map); |
fe8ab488 A |
15407 | |
15408 | for (entry = vm_map_first_entry(map); | |
15409 | entry != vm_map_to_entry(map); | |
15410 | entry = next_entry) { | |
15411 | next_entry = entry->vme_next; | |
15412 | ||
3e170ce0 A |
15413 | if (VME_OBJECT(entry) && |
15414 | !entry->is_sub_map && | |
15415 | (VME_OBJECT(entry)->internal == TRUE) && | |
15416 | (VME_OBJECT(entry)->ref_count == 1)) { | |
fe8ab488 | 15417 | |
3e170ce0 A |
15418 | *reclaimed_resident += VME_OBJECT(entry)->resident_page_count; |
15419 | *reclaimed_compressed += vm_compressor_pager_get_count(VME_OBJECT(entry)->pager); | |
fe8ab488 A |
15420 | |
15421 | (void)vm_map_delete(map, | |
15422 | entry->vme_start, | |
15423 | entry->vme_end, | |
15424 | VM_MAP_REMOVE_SAVE_ENTRIES, | |
15425 | zap_map); | |
15426 | } | |
15427 | } | |
15428 | ||
15429 | vm_map_unlock(map); | |
15430 | ||
15431 | /* | |
15432 | * Get rid of the "zap_maps" and all the map entries that | |
15433 | * they may still contain. | |
15434 | */ | |
15435 | if (zap_map != VM_MAP_NULL) { | |
15436 | vm_map_destroy(zap_map, VM_MAP_REMOVE_NO_PMAP_CLEANUP); | |
15437 | zap_map = VM_MAP_NULL; | |
15438 | } | |
15439 | ||
15440 | return kr; | |
15441 | } | |
15442 | ||
6d2010ae A |
15443 | #if CONFIG_FREEZE |
15444 | ||
15445 | kern_return_t vm_map_freeze_walk( | |
15446 | vm_map_t map, | |
15447 | unsigned int *purgeable_count, | |
15448 | unsigned int *wired_count, | |
15449 | unsigned int *clean_count, | |
15450 | unsigned int *dirty_count, | |
316670eb | 15451 | unsigned int dirty_budget, |
6d2010ae A |
15452 | boolean_t *has_shared) |
15453 | { | |
15454 | vm_map_entry_t entry; | |
15455 | ||
15456 | vm_map_lock_read(map); | |
15457 | ||
15458 | *purgeable_count = *wired_count = *clean_count = *dirty_count = 0; | |
15459 | *has_shared = FALSE; | |
15460 | ||
15461 | for (entry = vm_map_first_entry(map); | |
15462 | entry != vm_map_to_entry(map); | |
15463 | entry = entry->vme_next) { | |
15464 | unsigned int purgeable, clean, dirty, wired; | |
15465 | boolean_t shared; | |
15466 | ||
3e170ce0 | 15467 | if ((VME_OBJECT(entry) == 0) || |
6d2010ae | 15468 | (entry->is_sub_map) || |
3e170ce0 | 15469 | (VME_OBJECT(entry)->phys_contiguous)) { |
6d2010ae A |
15470 | continue; |
15471 | } | |
15472 | ||
3e170ce0 | 15473 | default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared, VME_OBJECT(entry), NULL); |
6d2010ae A |
15474 | |
15475 | *purgeable_count += purgeable; | |
15476 | *wired_count += wired; | |
15477 | *clean_count += clean; | |
15478 | *dirty_count += dirty; | |
15479 | ||
15480 | if (shared) { | |
15481 | *has_shared = TRUE; | |
15482 | } | |
316670eb A |
15483 | |
15484 | /* Adjust pageout budget and finish up if reached */ | |
15485 | if (dirty_budget) { | |
15486 | dirty_budget -= dirty; | |
15487 | if (dirty_budget == 0) { | |
15488 | break; | |
15489 | } | |
15490 | } | |
6d2010ae A |
15491 | } |
15492 | ||
15493 | vm_map_unlock_read(map); | |
15494 | ||
15495 | return KERN_SUCCESS; | |
15496 | } | |
15497 | ||
3e170ce0 A |
15498 | int c_freezer_swapout_count; |
15499 | int c_freezer_compression_count = 0; | |
15500 | AbsoluteTime c_freezer_last_yield_ts = 0; | |
15501 | ||
6d2010ae A |
15502 | kern_return_t vm_map_freeze( |
15503 | vm_map_t map, | |
15504 | unsigned int *purgeable_count, | |
15505 | unsigned int *wired_count, | |
15506 | unsigned int *clean_count, | |
15507 | unsigned int *dirty_count, | |
316670eb | 15508 | unsigned int dirty_budget, |
6d2010ae A |
15509 | boolean_t *has_shared) |
15510 | { | |
39236c6e A |
15511 | vm_map_entry_t entry2 = VM_MAP_ENTRY_NULL; |
15512 | kern_return_t kr = KERN_SUCCESS; | |
15513 | boolean_t default_freezer_active = TRUE; | |
6d2010ae A |
15514 | |
15515 | *purgeable_count = *wired_count = *clean_count = *dirty_count = 0; | |
15516 | *has_shared = FALSE; | |
15517 | ||
6d2010ae A |
15518 | /* |
15519 | * We need the exclusive lock here so that we can | |
15520 | * block any page faults or lookups while we are | |
15521 | * in the middle of freezing this vm map. | |
15522 | */ | |
15523 | vm_map_lock(map); | |
15524 | ||
39236c6e A |
15525 | if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { |
15526 | default_freezer_active = FALSE; | |
3e170ce0 A |
15527 | |
15528 | if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { | |
15529 | kr = KERN_NO_SPACE; | |
15530 | goto done; | |
15531 | } | |
316670eb | 15532 | } |
3e170ce0 | 15533 | assert(default_freezer_active == FALSE); |
316670eb | 15534 | |
39236c6e A |
15535 | if (default_freezer_active) { |
15536 | if (map->default_freezer_handle == NULL) { | |
15537 | map->default_freezer_handle = default_freezer_handle_allocate(); | |
15538 | } | |
15539 | ||
15540 | if ((kr = default_freezer_handle_init(map->default_freezer_handle)) != KERN_SUCCESS) { | |
15541 | /* | |
15542 | * Can happen if default_freezer_handle passed in is NULL | |
15543 | * Or, a table has already been allocated and associated | |
15544 | * with this handle, i.e. the map is already frozen. | |
15545 | */ | |
15546 | goto done; | |
15547 | } | |
6d2010ae | 15548 | } |
3e170ce0 A |
15549 | c_freezer_compression_count = 0; |
15550 | clock_get_uptime(&c_freezer_last_yield_ts); | |
15551 | ||
6d2010ae A |
15552 | for (entry2 = vm_map_first_entry(map); |
15553 | entry2 != vm_map_to_entry(map); | |
15554 | entry2 = entry2->vme_next) { | |
15555 | ||
3e170ce0 | 15556 | vm_object_t src_object = VME_OBJECT(entry2); |
6d2010ae | 15557 | |
3e170ce0 A |
15558 | if (VME_OBJECT(entry2) && |
15559 | !entry2->is_sub_map && | |
15560 | !VME_OBJECT(entry2)->phys_contiguous) { | |
39236c6e A |
15561 | /* If eligible, scan the entry, moving eligible pages over to our parent object */ |
15562 | if (default_freezer_active) { | |
15563 | unsigned int purgeable, clean, dirty, wired; | |
15564 | boolean_t shared; | |
316670eb | 15565 | |
39236c6e A |
15566 | default_freezer_pack(&purgeable, &wired, &clean, &dirty, dirty_budget, &shared, |
15567 | src_object, map->default_freezer_handle); | |
15568 | ||
15569 | *purgeable_count += purgeable; | |
15570 | *wired_count += wired; | |
15571 | *clean_count += clean; | |
15572 | *dirty_count += dirty; | |
15573 | ||
15574 | /* Adjust pageout budget and finish up if reached */ | |
15575 | if (dirty_budget) { | |
15576 | dirty_budget -= dirty; | |
15577 | if (dirty_budget == 0) { | |
15578 | break; | |
15579 | } | |
316670eb | 15580 | } |
6d2010ae | 15581 | |
39236c6e A |
15582 | if (shared) { |
15583 | *has_shared = TRUE; | |
15584 | } | |
15585 | } else { | |
3e170ce0 A |
15586 | if (VME_OBJECT(entry2)->internal == TRUE) { |
15587 | ||
15588 | if (DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED) { | |
15589 | /* | |
15590 | * Pages belonging to this object could be swapped to disk. | |
15591 | * Make sure it's not a shared object because we could end | |
15592 | * up just bringing it back in again. | |
15593 | */ | |
15594 | if (VME_OBJECT(entry2)->ref_count > 1) { | |
15595 | continue; | |
15596 | } | |
15597 | } | |
15598 | vm_object_compressed_freezer_pageout(VME_OBJECT(entry2)); | |
15599 | } | |
15600 | ||
15601 | if (vm_compressor_low_on_space() || vm_swap_low_on_space()) { | |
15602 | kr = KERN_NO_SPACE; | |
15603 | break; | |
39236c6e | 15604 | } |
6d2010ae A |
15605 | } |
15606 | } | |
15607 | } | |
15608 | ||
39236c6e A |
15609 | if (default_freezer_active) { |
15610 | /* Finally, throw out the pages to swap */ | |
15611 | default_freezer_pageout(map->default_freezer_handle); | |
15612 | } | |
6d2010ae A |
15613 | |
15614 | done: | |
15615 | vm_map_unlock(map); | |
6d2010ae | 15616 | |
3e170ce0 A |
15617 | if (!default_freezer_active) { |
15618 | vm_object_compressed_freezer_done(); | |
15619 | } | |
15620 | if (DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPBACKED) { | |
15621 | /* | |
15622 | * reset the counter tracking the # of swapped c_segs | |
15623 | * because we are now done with this freeze session and task. | |
15624 | */ | |
15625 | c_freezer_swapout_count = 0; | |
15626 | } | |
6d2010ae A |
15627 | return kr; |
15628 | } | |
15629 | ||
316670eb | 15630 | kern_return_t |
6d2010ae A |
15631 | vm_map_thaw( |
15632 | vm_map_t map) | |
15633 | { | |
316670eb | 15634 | kern_return_t kr = KERN_SUCCESS; |
6d2010ae | 15635 | |
39236c6e A |
15636 | if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) { |
15637 | /* | |
15638 | * We will on-demand thaw in the presence of the compressed pager. | |
15639 | */ | |
15640 | return kr; | |
15641 | } | |
15642 | ||
6d2010ae A |
15643 | vm_map_lock(map); |
15644 | ||
316670eb | 15645 | if (map->default_freezer_handle == NULL) { |
6d2010ae A |
15646 | /* |
15647 | * This map is not in a frozen state. | |
15648 | */ | |
316670eb | 15649 | kr = KERN_FAILURE; |
6d2010ae A |
15650 | goto out; |
15651 | } | |
6d2010ae | 15652 | |
39236c6e | 15653 | kr = default_freezer_unpack(map->default_freezer_handle); |
6d2010ae A |
15654 | out: |
15655 | vm_map_unlock(map); | |
316670eb A |
15656 | |
15657 | return kr; | |
6d2010ae A |
15658 | } |
15659 | #endif | |
e2d2fc5c | 15660 | |
e2d2fc5c A |
15661 | /* |
15662 | * vm_map_entry_should_cow_for_true_share: | |
15663 | * | |
15664 | * Determines if the map entry should be clipped and setup for copy-on-write | |
15665 | * to avoid applying "true_share" to a large VM object when only a subset is | |
15666 | * targeted. | |
15667 | * | |
15668 | * For now, we target only the map entries created for the Objective C | |
15669 | * Garbage Collector, which initially have the following properties: | |
15670 | * - alias == VM_MEMORY_MALLOC | |
15671 | * - wired_count == 0 | |
15672 | * - !needs_copy | |
15673 | * and a VM object with: | |
15674 | * - internal | |
15675 | * - copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC | |
15676 | * - !true_share | |
15677 | * - vo_size == ANON_CHUNK_SIZE | |
3e170ce0 A |
15678 | * |
15679 | * Only non-kernel map entries. | |
e2d2fc5c A |
15680 | */ |
15681 | boolean_t | |
15682 | vm_map_entry_should_cow_for_true_share( | |
15683 | vm_map_entry_t entry) | |
15684 | { | |
15685 | vm_object_t object; | |
15686 | ||
15687 | if (entry->is_sub_map) { | |
15688 | /* entry does not point at a VM object */ | |
15689 | return FALSE; | |
15690 | } | |
15691 | ||
15692 | if (entry->needs_copy) { | |
15693 | /* already set for copy_on_write: done! */ | |
15694 | return FALSE; | |
15695 | } | |
15696 | ||
3e170ce0 A |
15697 | if (VME_ALIAS(entry) != VM_MEMORY_MALLOC && |
15698 | VME_ALIAS(entry) != VM_MEMORY_MALLOC_SMALL) { | |
fe8ab488 | 15699 | /* not a malloc heap or Obj-C Garbage Collector heap */ |
e2d2fc5c A |
15700 | return FALSE; |
15701 | } | |
15702 | ||
15703 | if (entry->wired_count) { | |
15704 | /* wired: can't change the map entry... */ | |
fe8ab488 | 15705 | vm_counters.should_cow_but_wired++; |
e2d2fc5c A |
15706 | return FALSE; |
15707 | } | |
15708 | ||
3e170ce0 | 15709 | object = VME_OBJECT(entry); |
e2d2fc5c A |
15710 | |
15711 | if (object == VM_OBJECT_NULL) { | |
15712 | /* no object yet... */ | |
15713 | return FALSE; | |
15714 | } | |
15715 | ||
15716 | if (!object->internal) { | |
15717 | /* not an internal object */ | |
15718 | return FALSE; | |
15719 | } | |
15720 | ||
15721 | if (object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC) { | |
15722 | /* not the default copy strategy */ | |
15723 | return FALSE; | |
15724 | } | |
15725 | ||
15726 | if (object->true_share) { | |
15727 | /* already true_share: too late to avoid it */ | |
15728 | return FALSE; | |
15729 | } | |
15730 | ||
3e170ce0 | 15731 | if (VME_ALIAS(entry) == VM_MEMORY_MALLOC && |
fe8ab488 A |
15732 | object->vo_size != ANON_CHUNK_SIZE) { |
15733 | /* ... not an object created for the ObjC Garbage Collector */ | |
15734 | return FALSE; | |
15735 | } | |
15736 | ||
3e170ce0 | 15737 | if (VME_ALIAS(entry) == VM_MEMORY_MALLOC_SMALL && |
fe8ab488 A |
15738 | object->vo_size != 2048 * 4096) { |
15739 | /* ... not a "MALLOC_SMALL" heap */ | |
e2d2fc5c A |
15740 | return FALSE; |
15741 | } | |
15742 | ||
15743 | /* | |
15744 | * All the criteria match: we have a large object being targeted for "true_share". | |
15745 | * To limit the adverse side-effects linked with "true_share", tell the caller to | |
15746 | * try and avoid setting up the entire object for "true_share" by clipping the | |
15747 | * targeted range and setting it up for copy-on-write. | |
15748 | */ | |
15749 | return TRUE; | |
15750 | } | |
39236c6e A |
15751 | |
15752 | vm_map_offset_t | |
15753 | vm_map_round_page_mask( | |
15754 | vm_map_offset_t offset, | |
15755 | vm_map_offset_t mask) | |
15756 | { | |
15757 | return VM_MAP_ROUND_PAGE(offset, mask); | |
15758 | } | |
15759 | ||
15760 | vm_map_offset_t | |
15761 | vm_map_trunc_page_mask( | |
15762 | vm_map_offset_t offset, | |
15763 | vm_map_offset_t mask) | |
15764 | { | |
15765 | return VM_MAP_TRUNC_PAGE(offset, mask); | |
15766 | } | |
15767 | ||
3e170ce0 A |
15768 | boolean_t |
15769 | vm_map_page_aligned( | |
15770 | vm_map_offset_t offset, | |
15771 | vm_map_offset_t mask) | |
15772 | { | |
15773 | return ((offset) & mask) == 0; | |
15774 | } | |
15775 | ||
39236c6e A |
15776 | int |
15777 | vm_map_page_shift( | |
15778 | vm_map_t map) | |
15779 | { | |
15780 | return VM_MAP_PAGE_SHIFT(map); | |
15781 | } | |
15782 | ||
15783 | int | |
15784 | vm_map_page_size( | |
15785 | vm_map_t map) | |
15786 | { | |
15787 | return VM_MAP_PAGE_SIZE(map); | |
15788 | } | |
15789 | ||
3e170ce0 | 15790 | vm_map_offset_t |
39236c6e A |
15791 | vm_map_page_mask( |
15792 | vm_map_t map) | |
15793 | { | |
15794 | return VM_MAP_PAGE_MASK(map); | |
15795 | } | |
15796 | ||
15797 | kern_return_t | |
15798 | vm_map_set_page_shift( | |
15799 | vm_map_t map, | |
15800 | int pageshift) | |
15801 | { | |
15802 | if (map->hdr.nentries != 0) { | |
15803 | /* too late to change page size */ | |
15804 | return KERN_FAILURE; | |
15805 | } | |
15806 | ||
15807 | map->hdr.page_shift = pageshift; | |
15808 | ||
15809 | return KERN_SUCCESS; | |
15810 | } | |
15811 | ||
fe8ab488 A |
15812 | int |
15813 | vm_map_purge( | |
15814 | vm_map_t map) | |
15815 | { | |
15816 | int num_object_purged; | |
15817 | vm_map_entry_t entry; | |
15818 | vm_map_offset_t next_address; | |
15819 | vm_object_t object; | |
15820 | int state; | |
15821 | kern_return_t kr; | |
15822 | ||
15823 | num_object_purged = 0; | |
15824 | ||
15825 | vm_map_lock_read(map); | |
15826 | entry = vm_map_first_entry(map); | |
15827 | while (entry != vm_map_to_entry(map)) { | |
15828 | if (entry->is_sub_map) { | |
15829 | goto next; | |
15830 | } | |
15831 | if (! (entry->protection & VM_PROT_WRITE)) { | |
15832 | goto next; | |
15833 | } | |
3e170ce0 | 15834 | object = VME_OBJECT(entry); |
fe8ab488 A |
15835 | if (object == VM_OBJECT_NULL) { |
15836 | goto next; | |
15837 | } | |
15838 | if (object->purgable != VM_PURGABLE_VOLATILE) { | |
15839 | goto next; | |
15840 | } | |
15841 | ||
15842 | vm_object_lock(object); | |
15843 | #if 00 | |
3e170ce0 | 15844 | if (VME_OFFSET(entry) != 0 || |
fe8ab488 A |
15845 | (entry->vme_end - entry->vme_start) != object->vo_size) { |
15846 | vm_object_unlock(object); | |
15847 | goto next; | |
15848 | } | |
15849 | #endif | |
15850 | next_address = entry->vme_end; | |
15851 | vm_map_unlock_read(map); | |
15852 | state = VM_PURGABLE_EMPTY; | |
15853 | kr = vm_object_purgable_control(object, | |
15854 | VM_PURGABLE_SET_STATE, | |
15855 | &state); | |
15856 | if (kr == KERN_SUCCESS) { | |
15857 | num_object_purged++; | |
15858 | } | |
15859 | vm_object_unlock(object); | |
15860 | ||
15861 | vm_map_lock_read(map); | |
15862 | if (vm_map_lookup_entry(map, next_address, &entry)) { | |
15863 | continue; | |
15864 | } | |
15865 | next: | |
15866 | entry = entry->vme_next; | |
15867 | } | |
15868 | vm_map_unlock_read(map); | |
15869 | ||
15870 | return num_object_purged; | |
15871 | } | |
15872 | ||
39236c6e A |
15873 | kern_return_t |
15874 | vm_map_query_volatile( | |
15875 | vm_map_t map, | |
15876 | mach_vm_size_t *volatile_virtual_size_p, | |
15877 | mach_vm_size_t *volatile_resident_size_p, | |
3e170ce0 A |
15878 | mach_vm_size_t *volatile_compressed_size_p, |
15879 | mach_vm_size_t *volatile_pmap_size_p, | |
15880 | mach_vm_size_t *volatile_compressed_pmap_size_p) | |
39236c6e A |
15881 | { |
15882 | mach_vm_size_t volatile_virtual_size; | |
15883 | mach_vm_size_t volatile_resident_count; | |
3e170ce0 | 15884 | mach_vm_size_t volatile_compressed_count; |
39236c6e | 15885 | mach_vm_size_t volatile_pmap_count; |
3e170ce0 | 15886 | mach_vm_size_t volatile_compressed_pmap_count; |
39236c6e | 15887 | mach_vm_size_t resident_count; |
3e170ce0 | 15888 | unsigned int compressed_count; |
39236c6e A |
15889 | vm_map_entry_t entry; |
15890 | vm_object_t object; | |
15891 | ||
15892 | /* map should be locked by caller */ | |
15893 | ||
15894 | volatile_virtual_size = 0; | |
15895 | volatile_resident_count = 0; | |
3e170ce0 | 15896 | volatile_compressed_count = 0; |
39236c6e | 15897 | volatile_pmap_count = 0; |
3e170ce0 | 15898 | volatile_compressed_pmap_count = 0; |
39236c6e A |
15899 | |
15900 | for (entry = vm_map_first_entry(map); | |
15901 | entry != vm_map_to_entry(map); | |
15902 | entry = entry->vme_next) { | |
15903 | if (entry->is_sub_map) { | |
15904 | continue; | |
15905 | } | |
15906 | if (! (entry->protection & VM_PROT_WRITE)) { | |
15907 | continue; | |
15908 | } | |
3e170ce0 | 15909 | object = VME_OBJECT(entry); |
39236c6e A |
15910 | if (object == VM_OBJECT_NULL) { |
15911 | continue; | |
15912 | } | |
3e170ce0 A |
15913 | if (object->purgable != VM_PURGABLE_VOLATILE && |
15914 | object->purgable != VM_PURGABLE_EMPTY) { | |
39236c6e A |
15915 | continue; |
15916 | } | |
3e170ce0 | 15917 | if (VME_OFFSET(entry)) { |
39236c6e A |
15918 | /* |
15919 | * If the map entry has been split and the object now | |
15920 | * appears several times in the VM map, we don't want | |
15921 | * to count the object's resident_page_count more than | |
15922 | * once. We count it only for the first one, starting | |
15923 | * at offset 0 and ignore the other VM map entries. | |
15924 | */ | |
15925 | continue; | |
15926 | } | |
15927 | resident_count = object->resident_page_count; | |
3e170ce0 | 15928 | if ((VME_OFFSET(entry) / PAGE_SIZE) >= resident_count) { |
39236c6e A |
15929 | resident_count = 0; |
15930 | } else { | |
3e170ce0 | 15931 | resident_count -= (VME_OFFSET(entry) / PAGE_SIZE); |
39236c6e A |
15932 | } |
15933 | ||
15934 | volatile_virtual_size += entry->vme_end - entry->vme_start; | |
15935 | volatile_resident_count += resident_count; | |
3e170ce0 A |
15936 | if (object->pager) { |
15937 | volatile_compressed_count += | |
15938 | vm_compressor_pager_get_count(object->pager); | |
15939 | } | |
15940 | compressed_count = 0; | |
39236c6e A |
15941 | volatile_pmap_count += pmap_query_resident(map->pmap, |
15942 | entry->vme_start, | |
3e170ce0 A |
15943 | entry->vme_end, |
15944 | &compressed_count); | |
15945 | volatile_compressed_pmap_count += compressed_count; | |
39236c6e A |
15946 | } |
15947 | ||
15948 | /* map is still locked on return */ | |
15949 | ||
15950 | *volatile_virtual_size_p = volatile_virtual_size; | |
15951 | *volatile_resident_size_p = volatile_resident_count * PAGE_SIZE; | |
3e170ce0 | 15952 | *volatile_compressed_size_p = volatile_compressed_count * PAGE_SIZE; |
39236c6e | 15953 | *volatile_pmap_size_p = volatile_pmap_count * PAGE_SIZE; |
3e170ce0 | 15954 | *volatile_compressed_pmap_size_p = volatile_compressed_pmap_count * PAGE_SIZE; |
39236c6e A |
15955 | |
15956 | return KERN_SUCCESS; | |
15957 | } | |
fe8ab488 | 15958 | |
3e170ce0 A |
15959 | void |
15960 | vm_map_sizes(vm_map_t map, | |
15961 | vm_map_size_t * psize, | |
15962 | vm_map_size_t * pfree, | |
15963 | vm_map_size_t * plargest_free) | |
15964 | { | |
15965 | vm_map_entry_t entry; | |
15966 | vm_map_offset_t prev; | |
15967 | vm_map_size_t free, total_free, largest_free; | |
15968 | boolean_t end; | |
15969 | ||
15970 | total_free = largest_free = 0; | |
15971 | ||
15972 | vm_map_lock_read(map); | |
15973 | if (psize) *psize = map->max_offset - map->min_offset; | |
15974 | ||
15975 | prev = map->min_offset; | |
15976 | for (entry = vm_map_first_entry(map);; entry = entry->vme_next) | |
15977 | { | |
15978 | end = (entry == vm_map_to_entry(map)); | |
15979 | ||
15980 | if (end) free = entry->vme_end - prev; | |
15981 | else free = entry->vme_start - prev; | |
15982 | ||
15983 | total_free += free; | |
15984 | if (free > largest_free) largest_free = free; | |
15985 | ||
15986 | if (end) break; | |
15987 | prev = entry->vme_end; | |
15988 | } | |
15989 | vm_map_unlock_read(map); | |
15990 | if (pfree) *pfree = total_free; | |
15991 | if (plargest_free) *plargest_free = largest_free; | |
15992 | } | |
15993 | ||
fe8ab488 A |
15994 | #if VM_SCAN_FOR_SHADOW_CHAIN |
15995 | int vm_map_shadow_max(vm_map_t map); | |
15996 | int vm_map_shadow_max( | |
15997 | vm_map_t map) | |
15998 | { | |
15999 | int shadows, shadows_max; | |
16000 | vm_map_entry_t entry; | |
16001 | vm_object_t object, next_object; | |
16002 | ||
16003 | if (map == NULL) | |
16004 | return 0; | |
16005 | ||
16006 | shadows_max = 0; | |
16007 | ||
16008 | vm_map_lock_read(map); | |
16009 | ||
16010 | for (entry = vm_map_first_entry(map); | |
16011 | entry != vm_map_to_entry(map); | |
16012 | entry = entry->vme_next) { | |
16013 | if (entry->is_sub_map) { | |
16014 | continue; | |
16015 | } | |
3e170ce0 | 16016 | object = VME_OBJECT(entry); |
fe8ab488 A |
16017 | if (object == NULL) { |
16018 | continue; | |
16019 | } | |
16020 | vm_object_lock_shared(object); | |
16021 | for (shadows = 0; | |
16022 | object->shadow != NULL; | |
16023 | shadows++, object = next_object) { | |
16024 | next_object = object->shadow; | |
16025 | vm_object_lock_shared(next_object); | |
16026 | vm_object_unlock(object); | |
16027 | } | |
16028 | vm_object_unlock(object); | |
16029 | if (shadows > shadows_max) { | |
16030 | shadows_max = shadows; | |
16031 | } | |
16032 | } | |
16033 | ||
16034 | vm_map_unlock_read(map); | |
16035 | ||
16036 | return shadows_max; | |
16037 | } | |
16038 | #endif /* VM_SCAN_FOR_SHADOW_CHAIN */ |