]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_user.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
0a7de745 | 61 | * |
1c79356b A |
62 | * User-exported virtual memory functions. |
63 | */ | |
1c79356b | 64 | |
b0d623f7 A |
65 | /* |
66 | * There are three implementations of the "XXX_allocate" functionality in | |
67 | * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate | |
68 | * (for a task with the same address space size, especially the current task), | |
69 | * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate | |
70 | * in the kernel should only be used on the kernel_task. vm32_vm_allocate only | |
71 | * makes sense on platforms where a user task can either be 32 or 64, or the kernel | |
72 | * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred | |
73 | * for new code. | |
74 | * | |
75 | * The entrypoints into the kernel are more complex. All platforms support a | |
76 | * mach_vm_allocate-style API (subsystem 4800) which operates with the largest | |
77 | * size types for the platform. On platforms that only support U32/K32, | |
78 | * subsystem 4800 is all you need. On platforms that support both U32 and U64, | |
79 | * subsystem 3800 is used disambiguate the size of parameters, and they will | |
80 | * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms, | |
81 | * the MIG glue should never call into vm_allocate directly, because the calling | |
82 | * task and kernel_task are unlikely to use the same size parameters | |
83 | * | |
84 | * New VM call implementations should be added here and to mach_vm.defs | |
85 | * (subsystem 4800), and use mach_vm_* "wide" types. | |
86 | */ | |
87 | ||
91447636 A |
88 | #include <debug.h> |
89 | ||
1c79356b A |
90 | #include <vm_cpm.h> |
91 | #include <mach/boolean.h> | |
92 | #include <mach/kern_return.h> | |
0a7de745 | 93 | #include <mach/mach_types.h> /* to get vm_address_t */ |
1c79356b | 94 | #include <mach/memory_object.h> |
0a7de745 | 95 | #include <mach/std_types.h> /* to get pointer_t */ |
91447636 | 96 | #include <mach/upl.h> |
1c79356b A |
97 | #include <mach/vm_attributes.h> |
98 | #include <mach/vm_param.h> | |
99 | #include <mach/vm_statistics.h> | |
1c79356b | 100 | #include <mach/mach_syscalls.h> |
39037602 | 101 | #include <mach/sdt.h> |
9bccf70c | 102 | |
91447636 A |
103 | #include <mach/host_priv_server.h> |
104 | #include <mach/mach_vm_server.h> | |
d9a64523 | 105 | #include <mach/memory_entry_server.h> |
91447636 | 106 | #include <mach/vm_map_server.h> |
1c79356b A |
107 | |
108 | #include <kern/host.h> | |
91447636 | 109 | #include <kern/kalloc.h> |
1c79356b A |
110 | #include <kern/task.h> |
111 | #include <kern/misc_protos.h> | |
91447636 | 112 | #include <vm/vm_fault.h> |
1c79356b A |
113 | #include <vm/vm_map.h> |
114 | #include <vm/vm_object.h> | |
115 | #include <vm/vm_page.h> | |
116 | #include <vm/memory_object.h> | |
117 | #include <vm/vm_pageout.h> | |
91447636 | 118 | #include <vm/vm_protos.h> |
fe8ab488 | 119 | #include <vm/vm_purgeable_internal.h> |
d190cdc3 | 120 | #include <vm/vm_init.h> |
1c79356b | 121 | |
5ba3f43e A |
122 | #include <san/kasan.h> |
123 | ||
d9a64523 | 124 | #include <libkern/OSDebug.h> |
cb323159 | 125 | #include <IOKit/IOBSD.h> |
d9a64523 | 126 | |
1c79356b A |
127 | vm_size_t upl_offset_to_pagelist = 0; |
128 | ||
0a7de745 | 129 | #if VM_CPM |
1c79356b | 130 | #include <vm/cpm.h> |
0a7de745 | 131 | #endif /* VM_CPM */ |
1c79356b | 132 | |
1c79356b | 133 | /* |
91447636 | 134 | * mach_vm_allocate allocates "zero fill" memory in the specfied |
1c79356b A |
135 | * map. |
136 | */ | |
137 | kern_return_t | |
5ba3f43e | 138 | mach_vm_allocate_external( |
0a7de745 A |
139 | vm_map_t map, |
140 | mach_vm_offset_t *addr, | |
141 | mach_vm_size_t size, | |
142 | int flags) | |
5ba3f43e | 143 | { |
0a7de745 | 144 | vm_tag_t tag; |
5ba3f43e | 145 | |
0a7de745 A |
146 | VM_GET_FLAGS_ALIAS(flags, tag); |
147 | return mach_vm_allocate_kernel(map, addr, size, flags, tag); | |
5ba3f43e A |
148 | } |
149 | ||
150 | kern_return_t | |
151 | mach_vm_allocate_kernel( | |
0a7de745 A |
152 | vm_map_t map, |
153 | mach_vm_offset_t *addr, | |
154 | mach_vm_size_t size, | |
155 | int flags, | |
5ba3f43e | 156 | vm_tag_t tag) |
1c79356b | 157 | { |
91447636 | 158 | vm_map_offset_t map_addr; |
0a7de745 A |
159 | vm_map_size_t map_size; |
160 | kern_return_t result; | |
161 | boolean_t anywhere; | |
2d21ac55 A |
162 | |
163 | /* filter out any kernel-only flags */ | |
0a7de745 | 164 | if (flags & ~VM_FLAGS_USER_ALLOCATE) { |
2d21ac55 | 165 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 166 | } |
1c79356b | 167 | |
0a7de745 A |
168 | if (map == VM_MAP_NULL) { |
169 | return KERN_INVALID_ARGUMENT; | |
170 | } | |
1c79356b A |
171 | if (size == 0) { |
172 | *addr = 0; | |
0a7de745 | 173 | return KERN_SUCCESS; |
1c79356b A |
174 | } |
175 | ||
2d21ac55 | 176 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
177 | if (anywhere) { |
178 | /* | |
179 | * No specific address requested, so start candidate address | |
180 | * search at the minimum address in the map. However, if that | |
181 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
182 | * allocations of PAGEZERO to explicit requests since its | |
183 | * normal use is to catch dereferences of NULL and many | |
184 | * applications also treat pointers with a value of 0 as | |
185 | * special and suddenly having address 0 contain useable | |
186 | * memory would tend to confuse those applications. | |
187 | */ | |
188 | map_addr = vm_map_min(map); | |
0a7de745 | 189 | if (map_addr == 0) { |
39236c6e | 190 | map_addr += VM_MAP_PAGE_SIZE(map); |
0a7de745 A |
191 | } |
192 | } else { | |
39236c6e | 193 | map_addr = vm_map_trunc_page(*addr, |
0a7de745 A |
194 | VM_MAP_PAGE_MASK(map)); |
195 | } | |
39236c6e | 196 | map_size = vm_map_round_page(size, |
0a7de745 | 197 | VM_MAP_PAGE_MASK(map)); |
91447636 | 198 | if (map_size == 0) { |
0a7de745 | 199 | return KERN_INVALID_ARGUMENT; |
91447636 A |
200 | } |
201 | ||
202 | result = vm_map_enter( | |
0a7de745 A |
203 | map, |
204 | &map_addr, | |
205 | map_size, | |
206 | (vm_map_offset_t)0, | |
207 | flags, | |
208 | VM_MAP_KERNEL_FLAGS_NONE, | |
209 | tag, | |
210 | VM_OBJECT_NULL, | |
211 | (vm_object_offset_t)0, | |
212 | FALSE, | |
213 | VM_PROT_DEFAULT, | |
214 | VM_PROT_ALL, | |
215 | VM_INHERIT_DEFAULT); | |
91447636 A |
216 | |
217 | *addr = map_addr; | |
0a7de745 | 218 | return result; |
91447636 A |
219 | } |
220 | ||
221 | /* | |
0a7de745 | 222 | * vm_allocate |
91447636 A |
223 | * Legacy routine that allocates "zero fill" memory in the specfied |
224 | * map (which is limited to the same size as the kernel). | |
225 | */ | |
226 | kern_return_t | |
5ba3f43e | 227 | vm_allocate_external( |
0a7de745 A |
228 | vm_map_t map, |
229 | vm_offset_t *addr, | |
230 | vm_size_t size, | |
231 | int flags) | |
5ba3f43e A |
232 | { |
233 | vm_tag_t tag; | |
234 | ||
0a7de745 A |
235 | VM_GET_FLAGS_ALIAS(flags, tag); |
236 | return vm_allocate_kernel(map, addr, size, flags, tag); | |
5ba3f43e A |
237 | } |
238 | ||
239 | kern_return_t | |
240 | vm_allocate_kernel( | |
0a7de745 A |
241 | vm_map_t map, |
242 | vm_offset_t *addr, | |
243 | vm_size_t size, | |
5ba3f43e A |
244 | int flags, |
245 | vm_tag_t tag) | |
91447636 A |
246 | { |
247 | vm_map_offset_t map_addr; | |
0a7de745 A |
248 | vm_map_size_t map_size; |
249 | kern_return_t result; | |
250 | boolean_t anywhere; | |
2d21ac55 A |
251 | |
252 | /* filter out any kernel-only flags */ | |
0a7de745 | 253 | if (flags & ~VM_FLAGS_USER_ALLOCATE) { |
2d21ac55 | 254 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 255 | } |
91447636 | 256 | |
0a7de745 A |
257 | if (map == VM_MAP_NULL) { |
258 | return KERN_INVALID_ARGUMENT; | |
259 | } | |
1c79356b | 260 | if (size == 0) { |
91447636 | 261 | *addr = 0; |
0a7de745 | 262 | return KERN_SUCCESS; |
91447636 A |
263 | } |
264 | ||
2d21ac55 | 265 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
266 | if (anywhere) { |
267 | /* | |
268 | * No specific address requested, so start candidate address | |
269 | * search at the minimum address in the map. However, if that | |
270 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
271 | * allocations of PAGEZERO to explicit requests since its | |
272 | * normal use is to catch dereferences of NULL and many | |
273 | * applications also treat pointers with a value of 0 as | |
274 | * special and suddenly having address 0 contain useable | |
275 | * memory would tend to confuse those applications. | |
276 | */ | |
277 | map_addr = vm_map_min(map); | |
0a7de745 | 278 | if (map_addr == 0) { |
39236c6e | 279 | map_addr += VM_MAP_PAGE_SIZE(map); |
0a7de745 A |
280 | } |
281 | } else { | |
39236c6e | 282 | map_addr = vm_map_trunc_page(*addr, |
0a7de745 A |
283 | VM_MAP_PAGE_MASK(map)); |
284 | } | |
39236c6e | 285 | map_size = vm_map_round_page(size, |
0a7de745 | 286 | VM_MAP_PAGE_MASK(map)); |
91447636 | 287 | if (map_size == 0) { |
0a7de745 | 288 | return KERN_INVALID_ARGUMENT; |
1c79356b A |
289 | } |
290 | ||
291 | result = vm_map_enter( | |
0a7de745 A |
292 | map, |
293 | &map_addr, | |
294 | map_size, | |
295 | (vm_map_offset_t)0, | |
296 | flags, | |
297 | VM_MAP_KERNEL_FLAGS_NONE, | |
298 | tag, | |
299 | VM_OBJECT_NULL, | |
300 | (vm_object_offset_t)0, | |
301 | FALSE, | |
302 | VM_PROT_DEFAULT, | |
303 | VM_PROT_ALL, | |
304 | VM_INHERIT_DEFAULT); | |
1c79356b | 305 | |
5ba3f43e A |
306 | #if KASAN |
307 | if (result == KERN_SUCCESS && map->pmap == kernel_pmap) { | |
308 | kasan_notify_address(map_addr, map_size); | |
309 | } | |
310 | #endif | |
311 | ||
91447636 | 312 | *addr = CAST_DOWN(vm_offset_t, map_addr); |
0a7de745 | 313 | return result; |
1c79356b A |
314 | } |
315 | ||
316 | /* | |
91447636 A |
317 | * mach_vm_deallocate - |
318 | * deallocates the specified range of addresses in the | |
1c79356b A |
319 | * specified address map. |
320 | */ | |
321 | kern_return_t | |
91447636 | 322 | mach_vm_deallocate( |
0a7de745 A |
323 | vm_map_t map, |
324 | mach_vm_offset_t start, | |
325 | mach_vm_size_t size) | |
91447636 | 326 | { |
0a7de745 A |
327 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
328 | return KERN_INVALID_ARGUMENT; | |
329 | } | |
91447636 | 330 | |
0a7de745 A |
331 | if (size == (mach_vm_offset_t) 0) { |
332 | return KERN_SUCCESS; | |
333 | } | |
91447636 | 334 | |
d9a64523 | 335 | return vm_map_remove(map, |
0a7de745 A |
336 | vm_map_trunc_page(start, |
337 | VM_MAP_PAGE_MASK(map)), | |
338 | vm_map_round_page(start + size, | |
339 | VM_MAP_PAGE_MASK(map)), | |
340 | VM_MAP_REMOVE_NO_FLAGS); | |
91447636 A |
341 | } |
342 | ||
343 | /* | |
344 | * vm_deallocate - | |
345 | * deallocates the specified range of addresses in the | |
346 | * specified address map (limited to addresses the same | |
347 | * size as the kernel). | |
348 | */ | |
349 | kern_return_t | |
1c79356b | 350 | vm_deallocate( |
0a7de745 A |
351 | vm_map_t map, |
352 | vm_offset_t start, | |
353 | vm_size_t size) | |
1c79356b | 354 | { |
0a7de745 A |
355 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
356 | return KERN_INVALID_ARGUMENT; | |
357 | } | |
1c79356b | 358 | |
0a7de745 A |
359 | if (size == (vm_offset_t) 0) { |
360 | return KERN_SUCCESS; | |
361 | } | |
1c79356b | 362 | |
d9a64523 | 363 | return vm_map_remove(map, |
0a7de745 A |
364 | vm_map_trunc_page(start, |
365 | VM_MAP_PAGE_MASK(map)), | |
366 | vm_map_round_page(start + size, | |
367 | VM_MAP_PAGE_MASK(map)), | |
368 | VM_MAP_REMOVE_NO_FLAGS); | |
1c79356b A |
369 | } |
370 | ||
371 | /* | |
91447636 A |
372 | * mach_vm_inherit - |
373 | * Sets the inheritance of the specified range in the | |
1c79356b A |
374 | * specified map. |
375 | */ | |
376 | kern_return_t | |
91447636 | 377 | mach_vm_inherit( |
0a7de745 A |
378 | vm_map_t map, |
379 | mach_vm_offset_t start, | |
380 | mach_vm_size_t size, | |
381 | vm_inherit_t new_inheritance) | |
91447636 A |
382 | { |
383 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
0a7de745 A |
384 | (new_inheritance > VM_INHERIT_LAST_VALID)) { |
385 | return KERN_INVALID_ARGUMENT; | |
386 | } | |
91447636 | 387 | |
0a7de745 | 388 | if (size == 0) { |
91447636 | 389 | return KERN_SUCCESS; |
0a7de745 | 390 | } |
91447636 | 391 | |
0a7de745 A |
392 | return vm_map_inherit(map, |
393 | vm_map_trunc_page(start, | |
394 | VM_MAP_PAGE_MASK(map)), | |
395 | vm_map_round_page(start + size, | |
396 | VM_MAP_PAGE_MASK(map)), | |
397 | new_inheritance); | |
91447636 A |
398 | } |
399 | ||
400 | /* | |
401 | * vm_inherit - | |
402 | * Sets the inheritance of the specified range in the | |
403 | * specified map (range limited to addresses | |
404 | */ | |
405 | kern_return_t | |
1c79356b | 406 | vm_inherit( |
0a7de745 A |
407 | vm_map_t map, |
408 | vm_offset_t start, | |
409 | vm_size_t size, | |
410 | vm_inherit_t new_inheritance) | |
1c79356b | 411 | { |
91447636 | 412 | if ((map == VM_MAP_NULL) || (start + size < start) || |
0a7de745 A |
413 | (new_inheritance > VM_INHERIT_LAST_VALID)) { |
414 | return KERN_INVALID_ARGUMENT; | |
415 | } | |
1c79356b | 416 | |
0a7de745 | 417 | if (size == 0) { |
91447636 | 418 | return KERN_SUCCESS; |
0a7de745 | 419 | } |
91447636 | 420 | |
0a7de745 A |
421 | return vm_map_inherit(map, |
422 | vm_map_trunc_page(start, | |
423 | VM_MAP_PAGE_MASK(map)), | |
424 | vm_map_round_page(start + size, | |
425 | VM_MAP_PAGE_MASK(map)), | |
426 | new_inheritance); | |
1c79356b A |
427 | } |
428 | ||
429 | /* | |
91447636 A |
430 | * mach_vm_protect - |
431 | * Sets the protection of the specified range in the | |
1c79356b A |
432 | * specified map. |
433 | */ | |
434 | ||
91447636 A |
435 | kern_return_t |
436 | mach_vm_protect( | |
0a7de745 A |
437 | vm_map_t map, |
438 | mach_vm_offset_t start, | |
439 | mach_vm_size_t size, | |
440 | boolean_t set_maximum, | |
441 | vm_prot_t new_protection) | |
91447636 A |
442 | { |
443 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
0a7de745 A |
444 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { |
445 | return KERN_INVALID_ARGUMENT; | |
446 | } | |
91447636 | 447 | |
0a7de745 | 448 | if (size == 0) { |
91447636 | 449 | return KERN_SUCCESS; |
0a7de745 | 450 | } |
91447636 | 451 | |
0a7de745 A |
452 | return vm_map_protect(map, |
453 | vm_map_trunc_page(start, | |
454 | VM_MAP_PAGE_MASK(map)), | |
455 | vm_map_round_page(start + size, | |
456 | VM_MAP_PAGE_MASK(map)), | |
457 | new_protection, | |
458 | set_maximum); | |
91447636 A |
459 | } |
460 | ||
461 | /* | |
462 | * vm_protect - | |
463 | * Sets the protection of the specified range in the | |
464 | * specified map. Addressability of the range limited | |
465 | * to the same size as the kernel. | |
466 | */ | |
467 | ||
1c79356b A |
468 | kern_return_t |
469 | vm_protect( | |
0a7de745 A |
470 | vm_map_t map, |
471 | vm_offset_t start, | |
472 | vm_size_t size, | |
473 | boolean_t set_maximum, | |
474 | vm_prot_t new_protection) | |
1c79356b | 475 | { |
91447636 | 476 | if ((map == VM_MAP_NULL) || (start + size < start) || |
0a7de745 A |
477 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { |
478 | return KERN_INVALID_ARGUMENT; | |
479 | } | |
1c79356b | 480 | |
0a7de745 | 481 | if (size == 0) { |
91447636 | 482 | return KERN_SUCCESS; |
0a7de745 | 483 | } |
91447636 | 484 | |
0a7de745 A |
485 | return vm_map_protect(map, |
486 | vm_map_trunc_page(start, | |
487 | VM_MAP_PAGE_MASK(map)), | |
488 | vm_map_round_page(start + size, | |
489 | VM_MAP_PAGE_MASK(map)), | |
490 | new_protection, | |
491 | set_maximum); | |
1c79356b A |
492 | } |
493 | ||
494 | /* | |
91447636 | 495 | * mach_vm_machine_attributes - |
1c79356b A |
496 | * Handle machine-specific attributes for a mapping, such |
497 | * as cachability, migrability, etc. | |
498 | */ | |
499 | kern_return_t | |
91447636 | 500 | mach_vm_machine_attribute( |
0a7de745 A |
501 | vm_map_t map, |
502 | mach_vm_address_t addr, | |
503 | mach_vm_size_t size, | |
504 | vm_machine_attribute_t attribute, | |
505 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
91447636 | 506 | { |
0a7de745 A |
507 | if ((map == VM_MAP_NULL) || (addr + size < addr)) { |
508 | return KERN_INVALID_ARGUMENT; | |
509 | } | |
91447636 | 510 | |
0a7de745 | 511 | if (size == 0) { |
91447636 | 512 | return KERN_SUCCESS; |
0a7de745 | 513 | } |
91447636 | 514 | |
39236c6e | 515 | return vm_map_machine_attribute( |
0a7de745 | 516 | map, |
39236c6e | 517 | vm_map_trunc_page(addr, |
0a7de745 A |
518 | VM_MAP_PAGE_MASK(map)), |
519 | vm_map_round_page(addr + size, | |
520 | VM_MAP_PAGE_MASK(map)), | |
39236c6e A |
521 | attribute, |
522 | value); | |
91447636 A |
523 | } |
524 | ||
525 | /* | |
526 | * vm_machine_attribute - | |
527 | * Handle machine-specific attributes for a mapping, such | |
528 | * as cachability, migrability, etc. Limited addressability | |
529 | * (same range limits as for the native kernel map). | |
530 | */ | |
531 | kern_return_t | |
1c79356b | 532 | vm_machine_attribute( |
0a7de745 A |
533 | vm_map_t map, |
534 | vm_address_t addr, | |
535 | vm_size_t size, | |
536 | vm_machine_attribute_t attribute, | |
537 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
1c79356b | 538 | { |
0a7de745 A |
539 | if ((map == VM_MAP_NULL) || (addr + size < addr)) { |
540 | return KERN_INVALID_ARGUMENT; | |
541 | } | |
91447636 | 542 | |
0a7de745 | 543 | if (size == 0) { |
91447636 | 544 | return KERN_SUCCESS; |
0a7de745 | 545 | } |
91447636 | 546 | |
39236c6e | 547 | return vm_map_machine_attribute( |
0a7de745 | 548 | map, |
39236c6e | 549 | vm_map_trunc_page(addr, |
0a7de745 A |
550 | VM_MAP_PAGE_MASK(map)), |
551 | vm_map_round_page(addr + size, | |
552 | VM_MAP_PAGE_MASK(map)), | |
39236c6e A |
553 | attribute, |
554 | value); | |
91447636 A |
555 | } |
556 | ||
557 | /* | |
558 | * mach_vm_read - | |
559 | * Read/copy a range from one address space and return it to the caller. | |
560 | * | |
561 | * It is assumed that the address for the returned memory is selected by | |
562 | * the IPC implementation as part of receiving the reply to this call. | |
563 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
564 | * that gets returned. | |
0a7de745 | 565 | * |
91447636 A |
566 | * JMM - because of mach_msg_type_number_t, this call is limited to a |
567 | * single 4GB region at this time. | |
568 | * | |
569 | */ | |
570 | kern_return_t | |
571 | mach_vm_read( | |
0a7de745 A |
572 | vm_map_t map, |
573 | mach_vm_address_t addr, | |
574 | mach_vm_size_t size, | |
575 | pointer_t *data, | |
576 | mach_msg_type_number_t *data_size) | |
91447636 | 577 | { |
0a7de745 A |
578 | kern_return_t error; |
579 | vm_map_copy_t ipc_address; | |
91447636 | 580 | |
0a7de745 A |
581 | if (map == VM_MAP_NULL) { |
582 | return KERN_INVALID_ARGUMENT; | |
583 | } | |
1c79356b | 584 | |
0a7de745 | 585 | if ((mach_msg_type_number_t) size != size) { |
b0d623f7 | 586 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
587 | } |
588 | ||
91447636 | 589 | error = vm_map_copyin(map, |
0a7de745 A |
590 | (vm_map_address_t)addr, |
591 | (vm_map_size_t)size, | |
592 | FALSE, /* src_destroy */ | |
593 | &ipc_address); | |
91447636 A |
594 | |
595 | if (KERN_SUCCESS == error) { | |
596 | *data = (pointer_t) ipc_address; | |
b0d623f7 A |
597 | *data_size = (mach_msg_type_number_t) size; |
598 | assert(*data_size == size); | |
91447636 | 599 | } |
0a7de745 | 600 | return error; |
1c79356b A |
601 | } |
602 | ||
91447636 A |
603 | /* |
604 | * vm_read - | |
605 | * Read/copy a range from one address space and return it to the caller. | |
606 | * Limited addressability (same range limits as for the native kernel map). | |
0a7de745 | 607 | * |
91447636 A |
608 | * It is assumed that the address for the returned memory is selected by |
609 | * the IPC implementation as part of receiving the reply to this call. | |
610 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
611 | * that gets returned. | |
612 | */ | |
1c79356b A |
613 | kern_return_t |
614 | vm_read( | |
0a7de745 A |
615 | vm_map_t map, |
616 | vm_address_t addr, | |
617 | vm_size_t size, | |
618 | pointer_t *data, | |
619 | mach_msg_type_number_t *data_size) | |
1c79356b | 620 | { |
0a7de745 A |
621 | kern_return_t error; |
622 | vm_map_copy_t ipc_address; | |
1c79356b | 623 | |
0a7de745 A |
624 | if (map == VM_MAP_NULL) { |
625 | return KERN_INVALID_ARGUMENT; | |
626 | } | |
1c79356b | 627 | |
d9a64523 A |
628 | mach_msg_type_number_t dsize; |
629 | if (os_convert_overflow(size, &dsize)) { | |
b0d623f7 A |
630 | /* |
631 | * The kernel could handle a 64-bit "size" value, but | |
632 | * it could not return the size of the data in "*data_size" | |
633 | * without overflowing. | |
634 | * Let's reject this "size" as invalid. | |
635 | */ | |
636 | return KERN_INVALID_ARGUMENT; | |
637 | } | |
638 | ||
91447636 | 639 | error = vm_map_copyin(map, |
0a7de745 A |
640 | (vm_map_address_t)addr, |
641 | (vm_map_size_t)size, | |
642 | FALSE, /* src_destroy */ | |
643 | &ipc_address); | |
91447636 A |
644 | |
645 | if (KERN_SUCCESS == error) { | |
1c79356b | 646 | *data = (pointer_t) ipc_address; |
d9a64523 | 647 | *data_size = dsize; |
b0d623f7 | 648 | assert(*data_size == size); |
1c79356b | 649 | } |
0a7de745 | 650 | return error; |
1c79356b A |
651 | } |
652 | ||
0a7de745 | 653 | /* |
91447636 A |
654 | * mach_vm_read_list - |
655 | * Read/copy a list of address ranges from specified map. | |
656 | * | |
657 | * MIG does not know how to deal with a returned array of | |
658 | * vm_map_copy_t structures, so we have to do the copyout | |
659 | * manually here. | |
660 | */ | |
661 | kern_return_t | |
662 | mach_vm_read_list( | |
0a7de745 A |
663 | vm_map_t map, |
664 | mach_vm_read_entry_t data_list, | |
665 | natural_t count) | |
91447636 | 666 | { |
0a7de745 A |
667 | mach_msg_type_number_t i; |
668 | kern_return_t error; | |
669 | vm_map_copy_t copy; | |
91447636 | 670 | |
8ad349bb | 671 | if (map == VM_MAP_NULL || |
0a7de745 A |
672 | count > VM_MAP_ENTRY_MAX) { |
673 | return KERN_INVALID_ARGUMENT; | |
674 | } | |
91447636 A |
675 | |
676 | error = KERN_SUCCESS; | |
0a7de745 | 677 | for (i = 0; i < count; i++) { |
91447636 A |
678 | vm_map_address_t map_addr; |
679 | vm_map_size_t map_size; | |
680 | ||
681 | map_addr = (vm_map_address_t)(data_list[i].address); | |
682 | map_size = (vm_map_size_t)(data_list[i].size); | |
683 | ||
0a7de745 | 684 | if (map_size != 0) { |
91447636 | 685 | error = vm_map_copyin(map, |
0a7de745 A |
686 | map_addr, |
687 | map_size, | |
688 | FALSE, /* src_destroy */ | |
689 | ©); | |
91447636 A |
690 | if (KERN_SUCCESS == error) { |
691 | error = vm_map_copyout( | |
0a7de745 A |
692 | current_task()->map, |
693 | &map_addr, | |
694 | copy); | |
91447636 A |
695 | if (KERN_SUCCESS == error) { |
696 | data_list[i].address = map_addr; | |
697 | continue; | |
698 | } | |
699 | vm_map_copy_discard(copy); | |
700 | } | |
701 | } | |
702 | data_list[i].address = (mach_vm_address_t)0; | |
703 | data_list[i].size = (mach_vm_size_t)0; | |
704 | } | |
0a7de745 | 705 | return error; |
91447636 A |
706 | } |
707 | ||
0a7de745 | 708 | /* |
91447636 A |
709 | * vm_read_list - |
710 | * Read/copy a list of address ranges from specified map. | |
711 | * | |
712 | * MIG does not know how to deal with a returned array of | |
713 | * vm_map_copy_t structures, so we have to do the copyout | |
714 | * manually here. | |
715 | * | |
716 | * The source and destination ranges are limited to those | |
717 | * that can be described with a vm_address_t (i.e. same | |
718 | * size map as the kernel). | |
719 | * | |
720 | * JMM - If the result of the copyout is an address range | |
721 | * that cannot be described with a vm_address_t (i.e. the | |
722 | * caller had a larger address space but used this call | |
723 | * anyway), it will result in a truncated address being | |
724 | * returned (and a likely confused caller). | |
725 | */ | |
726 | ||
1c79356b A |
727 | kern_return_t |
728 | vm_read_list( | |
0a7de745 A |
729 | vm_map_t map, |
730 | vm_read_entry_t data_list, | |
731 | natural_t count) | |
1c79356b | 732 | { |
0a7de745 A |
733 | mach_msg_type_number_t i; |
734 | kern_return_t error; | |
735 | vm_map_copy_t copy; | |
1c79356b | 736 | |
8ad349bb | 737 | if (map == VM_MAP_NULL || |
0a7de745 A |
738 | count > VM_MAP_ENTRY_MAX) { |
739 | return KERN_INVALID_ARGUMENT; | |
740 | } | |
1c79356b | 741 | |
91447636 | 742 | error = KERN_SUCCESS; |
0a7de745 | 743 | for (i = 0; i < count; i++) { |
91447636 A |
744 | vm_map_address_t map_addr; |
745 | vm_map_size_t map_size; | |
746 | ||
747 | map_addr = (vm_map_address_t)(data_list[i].address); | |
748 | map_size = (vm_map_size_t)(data_list[i].size); | |
749 | ||
0a7de745 | 750 | if (map_size != 0) { |
91447636 | 751 | error = vm_map_copyin(map, |
0a7de745 A |
752 | map_addr, |
753 | map_size, | |
754 | FALSE, /* src_destroy */ | |
755 | ©); | |
91447636 | 756 | if (KERN_SUCCESS == error) { |
0a7de745 A |
757 | error = vm_map_copyout(current_task()->map, |
758 | &map_addr, | |
759 | copy); | |
91447636 A |
760 | if (KERN_SUCCESS == error) { |
761 | data_list[i].address = | |
0a7de745 | 762 | CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
763 | continue; |
764 | } | |
765 | vm_map_copy_discard(copy); | |
1c79356b A |
766 | } |
767 | } | |
91447636 A |
768 | data_list[i].address = (mach_vm_address_t)0; |
769 | data_list[i].size = (mach_vm_size_t)0; | |
1c79356b | 770 | } |
0a7de745 | 771 | return error; |
1c79356b A |
772 | } |
773 | ||
774 | /* | |
91447636 A |
775 | * mach_vm_read_overwrite - |
776 | * Overwrite a range of the current map with data from the specified | |
777 | * map/address range. | |
0a7de745 | 778 | * |
91447636 A |
779 | * In making an assumption that the current thread is local, it is |
780 | * no longer cluster-safe without a fully supportive local proxy | |
781 | * thread/task (but we don't support cluster's anymore so this is moot). | |
1c79356b A |
782 | */ |
783 | ||
1c79356b | 784 | kern_return_t |
91447636 | 785 | mach_vm_read_overwrite( |
0a7de745 A |
786 | vm_map_t map, |
787 | mach_vm_address_t address, | |
788 | mach_vm_size_t size, | |
789 | mach_vm_address_t data, | |
790 | mach_vm_size_t *data_size) | |
91447636 | 791 | { |
0a7de745 A |
792 | kern_return_t error; |
793 | vm_map_copy_t copy; | |
1c79356b | 794 | |
0a7de745 A |
795 | if (map == VM_MAP_NULL) { |
796 | return KERN_INVALID_ARGUMENT; | |
797 | } | |
1c79356b | 798 | |
91447636 | 799 | error = vm_map_copyin(map, (vm_map_address_t)address, |
0a7de745 | 800 | (vm_map_size_t)size, FALSE, ©); |
91447636 A |
801 | |
802 | if (KERN_SUCCESS == error) { | |
eb6b6ca3 A |
803 | if (copy) { |
804 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
805 | } | |
806 | ||
91447636 | 807 | error = vm_map_copy_overwrite(current_thread()->map, |
0a7de745 | 808 | (vm_map_address_t)data, |
eb6b6ca3 | 809 | copy, (vm_map_size_t) size, FALSE); |
91447636 A |
810 | if (KERN_SUCCESS == error) { |
811 | *data_size = size; | |
812 | return error; | |
1c79356b | 813 | } |
91447636 | 814 | vm_map_copy_discard(copy); |
1c79356b | 815 | } |
0a7de745 | 816 | return error; |
91447636 A |
817 | } |
818 | ||
819 | /* | |
820 | * vm_read_overwrite - | |
821 | * Overwrite a range of the current map with data from the specified | |
822 | * map/address range. | |
0a7de745 | 823 | * |
91447636 A |
824 | * This routine adds the additional limitation that the source and |
825 | * destination ranges must be describable with vm_address_t values | |
826 | * (i.e. the same size address spaces as the kernel, or at least the | |
827 | * the ranges are in that first portion of the respective address | |
828 | * spaces). | |
829 | */ | |
830 | ||
831 | kern_return_t | |
832 | vm_read_overwrite( | |
0a7de745 A |
833 | vm_map_t map, |
834 | vm_address_t address, | |
835 | vm_size_t size, | |
836 | vm_address_t data, | |
837 | vm_size_t *data_size) | |
91447636 | 838 | { |
0a7de745 A |
839 | kern_return_t error; |
840 | vm_map_copy_t copy; | |
91447636 | 841 | |
0a7de745 A |
842 | if (map == VM_MAP_NULL) { |
843 | return KERN_INVALID_ARGUMENT; | |
844 | } | |
91447636 A |
845 | |
846 | error = vm_map_copyin(map, (vm_map_address_t)address, | |
0a7de745 | 847 | (vm_map_size_t)size, FALSE, ©); |
91447636 A |
848 | |
849 | if (KERN_SUCCESS == error) { | |
eb6b6ca3 A |
850 | if (copy) { |
851 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
852 | } | |
853 | ||
91447636 | 854 | error = vm_map_copy_overwrite(current_thread()->map, |
0a7de745 | 855 | (vm_map_address_t)data, |
eb6b6ca3 | 856 | copy, (vm_map_size_t) size, FALSE); |
91447636 A |
857 | if (KERN_SUCCESS == error) { |
858 | *data_size = size; | |
859 | return error; | |
1c79356b | 860 | } |
91447636 | 861 | vm_map_copy_discard(copy); |
1c79356b | 862 | } |
0a7de745 | 863 | return error; |
1c79356b A |
864 | } |
865 | ||
866 | ||
91447636 A |
867 | /* |
868 | * mach_vm_write - | |
869 | * Overwrite the specified address range with the data provided | |
870 | * (from the current map). | |
871 | */ | |
872 | kern_return_t | |
873 | mach_vm_write( | |
0a7de745 A |
874 | vm_map_t map, |
875 | mach_vm_address_t address, | |
876 | pointer_t data, | |
eb6b6ca3 | 877 | mach_msg_type_number_t size) |
91447636 | 878 | { |
0a7de745 | 879 | if (map == VM_MAP_NULL) { |
91447636 | 880 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 881 | } |
1c79356b | 882 | |
91447636 | 883 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, |
eb6b6ca3 | 884 | (vm_map_copy_t) data, size, FALSE /* interruptible XXX */); |
91447636 | 885 | } |
1c79356b | 886 | |
91447636 A |
887 | /* |
888 | * vm_write - | |
889 | * Overwrite the specified address range with the data provided | |
890 | * (from the current map). | |
891 | * | |
892 | * The addressability of the range of addresses to overwrite is | |
893 | * limited bu the use of a vm_address_t (same size as kernel map). | |
894 | * Either the target map is also small, or the range is in the | |
895 | * low addresses within it. | |
896 | */ | |
1c79356b A |
897 | kern_return_t |
898 | vm_write( | |
0a7de745 A |
899 | vm_map_t map, |
900 | vm_address_t address, | |
901 | pointer_t data, | |
eb6b6ca3 | 902 | mach_msg_type_number_t size) |
91447636 | 903 | { |
0a7de745 | 904 | if (map == VM_MAP_NULL) { |
91447636 | 905 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 906 | } |
91447636 A |
907 | |
908 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, | |
eb6b6ca3 | 909 | (vm_map_copy_t) data, size, FALSE /* interruptible XXX */); |
91447636 A |
910 | } |
911 | ||
912 | /* | |
913 | * mach_vm_copy - | |
914 | * Overwrite one range of the specified map with the contents of | |
915 | * another range within that same map (i.e. both address ranges | |
916 | * are "over there"). | |
917 | */ | |
918 | kern_return_t | |
919 | mach_vm_copy( | |
0a7de745 A |
920 | vm_map_t map, |
921 | mach_vm_address_t source_address, | |
922 | mach_vm_size_t size, | |
923 | mach_vm_address_t dest_address) | |
1c79356b | 924 | { |
91447636 A |
925 | vm_map_copy_t copy; |
926 | kern_return_t kr; | |
927 | ||
0a7de745 | 928 | if (map == VM_MAP_NULL) { |
1c79356b | 929 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 930 | } |
1c79356b | 931 | |
91447636 | 932 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
0a7de745 | 933 | (vm_map_size_t)size, FALSE, ©); |
91447636 A |
934 | |
935 | if (KERN_SUCCESS == kr) { | |
eb6b6ca3 A |
936 | if (copy) { |
937 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
938 | } | |
939 | ||
91447636 | 940 | kr = vm_map_copy_overwrite(map, |
0a7de745 | 941 | (vm_map_address_t)dest_address, |
eb6b6ca3 | 942 | copy, (vm_map_size_t) size, FALSE /* interruptible XXX */); |
91447636 | 943 | |
0a7de745 | 944 | if (KERN_SUCCESS != kr) { |
91447636 | 945 | vm_map_copy_discard(copy); |
0a7de745 | 946 | } |
91447636 A |
947 | } |
948 | return kr; | |
1c79356b A |
949 | } |
950 | ||
951 | kern_return_t | |
952 | vm_copy( | |
0a7de745 A |
953 | vm_map_t map, |
954 | vm_address_t source_address, | |
955 | vm_size_t size, | |
956 | vm_address_t dest_address) | |
1c79356b A |
957 | { |
958 | vm_map_copy_t copy; | |
959 | kern_return_t kr; | |
960 | ||
0a7de745 | 961 | if (map == VM_MAP_NULL) { |
1c79356b | 962 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 963 | } |
1c79356b | 964 | |
91447636 | 965 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
0a7de745 | 966 | (vm_map_size_t)size, FALSE, ©); |
1c79356b | 967 | |
91447636 | 968 | if (KERN_SUCCESS == kr) { |
eb6b6ca3 A |
969 | if (copy) { |
970 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
971 | } | |
972 | ||
91447636 | 973 | kr = vm_map_copy_overwrite(map, |
0a7de745 | 974 | (vm_map_address_t)dest_address, |
eb6b6ca3 | 975 | copy, (vm_map_size_t) size, FALSE /* interruptible XXX */); |
1c79356b | 976 | |
0a7de745 | 977 | if (KERN_SUCCESS != kr) { |
91447636 | 978 | vm_map_copy_discard(copy); |
0a7de745 | 979 | } |
91447636 A |
980 | } |
981 | return kr; | |
1c79356b A |
982 | } |
983 | ||
984 | /* | |
91447636 A |
985 | * mach_vm_map - |
986 | * Map some range of an object into an address space. | |
987 | * | |
988 | * The object can be one of several types of objects: | |
989 | * NULL - anonymous memory | |
990 | * a named entry - a range within another address space | |
991 | * or a range within a memory object | |
992 | * a whole memory object | |
993 | * | |
1c79356b A |
994 | */ |
995 | kern_return_t | |
5ba3f43e | 996 | mach_vm_map_external( |
0a7de745 A |
997 | vm_map_t target_map, |
998 | mach_vm_offset_t *address, | |
999 | mach_vm_size_t initial_size, | |
1000 | mach_vm_offset_t mask, | |
1001 | int flags, | |
1002 | ipc_port_t port, | |
1003 | vm_object_offset_t offset, | |
1004 | boolean_t copy, | |
1005 | vm_prot_t cur_protection, | |
1006 | vm_prot_t max_protection, | |
1007 | vm_inherit_t inheritance) | |
5ba3f43e A |
1008 | { |
1009 | vm_tag_t tag; | |
1010 | ||
1011 | VM_GET_FLAGS_ALIAS(flags, tag); | |
0a7de745 A |
1012 | return mach_vm_map_kernel(target_map, address, initial_size, mask, |
1013 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, | |
1014 | port, offset, copy, | |
1015 | cur_protection, max_protection, | |
1016 | inheritance); | |
5ba3f43e A |
1017 | } |
1018 | ||
1019 | kern_return_t | |
1020 | mach_vm_map_kernel( | |
0a7de745 A |
1021 | vm_map_t target_map, |
1022 | mach_vm_offset_t *address, | |
1023 | mach_vm_size_t initial_size, | |
1024 | mach_vm_offset_t mask, | |
1025 | int flags, | |
1026 | vm_map_kernel_flags_t vmk_flags, | |
1027 | vm_tag_t tag, | |
1028 | ipc_port_t port, | |
1029 | vm_object_offset_t offset, | |
1030 | boolean_t copy, | |
1031 | vm_prot_t cur_protection, | |
1032 | vm_prot_t max_protection, | |
1033 | vm_inherit_t inheritance) | |
1c79356b | 1034 | { |
0a7de745 A |
1035 | kern_return_t kr; |
1036 | vm_map_offset_t vmmaddr; | |
316670eb A |
1037 | |
1038 | vmmaddr = (vm_map_offset_t) *address; | |
1039 | ||
2d21ac55 | 1040 | /* filter out any kernel-only flags */ |
0a7de745 | 1041 | if (flags & ~VM_FLAGS_USER_MAP) { |
2d21ac55 | 1042 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1043 | } |
1c79356b | 1044 | |
316670eb | 1045 | kr = vm_map_enter_mem_object(target_map, |
0a7de745 A |
1046 | &vmmaddr, |
1047 | initial_size, | |
1048 | mask, | |
1049 | flags, | |
1050 | vmk_flags, | |
1051 | tag, | |
1052 | port, | |
1053 | offset, | |
1054 | copy, | |
1055 | cur_protection, | |
1056 | max_protection, | |
1057 | inheritance); | |
5ba3f43e A |
1058 | |
1059 | #if KASAN | |
1060 | if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) { | |
1061 | kasan_notify_address(vmmaddr, initial_size); | |
1062 | } | |
1063 | #endif | |
316670eb A |
1064 | |
1065 | *address = vmmaddr; | |
1066 | return kr; | |
1c79356b A |
1067 | } |
1068 | ||
91447636 A |
1069 | |
1070 | /* legacy interface */ | |
1071 | kern_return_t | |
5ba3f43e | 1072 | vm_map_64_external( |
0a7de745 A |
1073 | vm_map_t target_map, |
1074 | vm_offset_t *address, | |
1075 | vm_size_t size, | |
1076 | vm_offset_t mask, | |
1077 | int flags, | |
1078 | ipc_port_t port, | |
1079 | vm_object_offset_t offset, | |
1080 | boolean_t copy, | |
1081 | vm_prot_t cur_protection, | |
1082 | vm_prot_t max_protection, | |
1083 | vm_inherit_t inheritance) | |
5ba3f43e A |
1084 | { |
1085 | vm_tag_t tag; | |
1086 | ||
1087 | VM_GET_FLAGS_ALIAS(flags, tag); | |
0a7de745 A |
1088 | return vm_map_64_kernel(target_map, address, size, mask, |
1089 | flags, VM_MAP_KERNEL_FLAGS_NONE, | |
1090 | tag, port, offset, copy, | |
1091 | cur_protection, max_protection, | |
1092 | inheritance); | |
5ba3f43e A |
1093 | } |
1094 | ||
1095 | kern_return_t | |
1096 | vm_map_64_kernel( | |
0a7de745 A |
1097 | vm_map_t target_map, |
1098 | vm_offset_t *address, | |
1099 | vm_size_t size, | |
1100 | vm_offset_t mask, | |
1101 | int flags, | |
1102 | vm_map_kernel_flags_t vmk_flags, | |
1103 | vm_tag_t tag, | |
1104 | ipc_port_t port, | |
1105 | vm_object_offset_t offset, | |
1106 | boolean_t copy, | |
1107 | vm_prot_t cur_protection, | |
1108 | vm_prot_t max_protection, | |
1109 | vm_inherit_t inheritance) | |
91447636 A |
1110 | { |
1111 | mach_vm_address_t map_addr; | |
1112 | mach_vm_size_t map_size; | |
1113 | mach_vm_offset_t map_mask; | |
1114 | kern_return_t kr; | |
1115 | ||
1116 | map_addr = (mach_vm_address_t)*address; | |
1117 | map_size = (mach_vm_size_t)size; | |
1118 | map_mask = (mach_vm_offset_t)mask; | |
1119 | ||
d9a64523 | 1120 | kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, |
0a7de745 A |
1121 | flags, vmk_flags, tag, |
1122 | port, offset, copy, | |
1123 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 1124 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
1125 | return kr; |
1126 | } | |
1127 | ||
1c79356b | 1128 | /* temporary, until world build */ |
55e303ae | 1129 | kern_return_t |
5ba3f43e | 1130 | vm_map_external( |
0a7de745 A |
1131 | vm_map_t target_map, |
1132 | vm_offset_t *address, | |
1133 | vm_size_t size, | |
1134 | vm_offset_t mask, | |
1135 | int flags, | |
1136 | ipc_port_t port, | |
1137 | vm_offset_t offset, | |
1138 | boolean_t copy, | |
1139 | vm_prot_t cur_protection, | |
1140 | vm_prot_t max_protection, | |
1141 | vm_inherit_t inheritance) | |
5ba3f43e A |
1142 | { |
1143 | vm_tag_t tag; | |
1144 | ||
1145 | VM_GET_FLAGS_ALIAS(flags, tag); | |
0a7de745 A |
1146 | return vm_map_kernel(target_map, address, size, mask, |
1147 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, | |
1148 | port, offset, copy, | |
1149 | cur_protection, max_protection, inheritance); | |
5ba3f43e A |
1150 | } |
1151 | ||
1152 | kern_return_t | |
1153 | vm_map_kernel( | |
0a7de745 A |
1154 | vm_map_t target_map, |
1155 | vm_offset_t *address, | |
1156 | vm_size_t size, | |
1157 | vm_offset_t mask, | |
1158 | int flags, | |
1159 | vm_map_kernel_flags_t vmk_flags, | |
1160 | vm_tag_t tag, | |
1161 | ipc_port_t port, | |
1162 | vm_offset_t offset, | |
1163 | boolean_t copy, | |
1164 | vm_prot_t cur_protection, | |
1165 | vm_prot_t max_protection, | |
1166 | vm_inherit_t inheritance) | |
1c79356b | 1167 | { |
91447636 A |
1168 | mach_vm_address_t map_addr; |
1169 | mach_vm_size_t map_size; | |
1170 | mach_vm_offset_t map_mask; | |
1171 | vm_object_offset_t obj_offset; | |
1172 | kern_return_t kr; | |
1173 | ||
1174 | map_addr = (mach_vm_address_t)*address; | |
1175 | map_size = (mach_vm_size_t)size; | |
1176 | map_mask = (mach_vm_offset_t)mask; | |
1177 | obj_offset = (vm_object_offset_t)offset; | |
1178 | ||
d9a64523 | 1179 | kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, |
0a7de745 A |
1180 | flags, vmk_flags, tag, |
1181 | port, obj_offset, copy, | |
1182 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 1183 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
1184 | return kr; |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * mach_vm_remap - | |
1189 | * Remap a range of memory from one task into another, | |
1190 | * to another address range within the same task, or | |
1191 | * over top of itself (with altered permissions and/or | |
1192 | * as an in-place copy of itself). | |
1193 | */ | |
5ba3f43e A |
1194 | kern_return_t |
1195 | mach_vm_remap_external( | |
0a7de745 A |
1196 | vm_map_t target_map, |
1197 | mach_vm_offset_t *address, | |
1198 | mach_vm_size_t size, | |
1199 | mach_vm_offset_t mask, | |
1200 | int flags, | |
1201 | vm_map_t src_map, | |
1202 | mach_vm_offset_t memory_address, | |
1203 | boolean_t copy, | |
1204 | vm_prot_t *cur_protection, | |
1205 | vm_prot_t *max_protection, | |
1206 | vm_inherit_t inheritance) | |
5ba3f43e A |
1207 | { |
1208 | vm_tag_t tag; | |
1209 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1210 | ||
0a7de745 A |
1211 | return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address, |
1212 | copy, cur_protection, max_protection, inheritance); | |
5ba3f43e | 1213 | } |
91447636 A |
1214 | |
1215 | kern_return_t | |
5ba3f43e | 1216 | mach_vm_remap_kernel( |
0a7de745 A |
1217 | vm_map_t target_map, |
1218 | mach_vm_offset_t *address, | |
1219 | mach_vm_size_t size, | |
1220 | mach_vm_offset_t mask, | |
1221 | int flags, | |
1222 | vm_tag_t tag, | |
1223 | vm_map_t src_map, | |
1224 | mach_vm_offset_t memory_address, | |
1225 | boolean_t copy, | |
1226 | vm_prot_t *cur_protection, | |
1227 | vm_prot_t *max_protection, | |
1228 | vm_inherit_t inheritance) | |
91447636 | 1229 | { |
0a7de745 A |
1230 | vm_map_offset_t map_addr; |
1231 | kern_return_t kr; | |
91447636 | 1232 | |
0a7de745 | 1233 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { |
91447636 | 1234 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1235 | } |
91447636 | 1236 | |
060df5ea | 1237 | /* filter out any kernel-only flags */ |
0a7de745 | 1238 | if (flags & ~VM_FLAGS_USER_REMAP) { |
060df5ea | 1239 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1240 | } |
060df5ea | 1241 | |
91447636 A |
1242 | map_addr = (vm_map_offset_t)*address; |
1243 | ||
1244 | kr = vm_map_remap(target_map, | |
0a7de745 A |
1245 | &map_addr, |
1246 | size, | |
1247 | mask, | |
1248 | flags, | |
1249 | VM_MAP_KERNEL_FLAGS_NONE, | |
1250 | tag, | |
1251 | src_map, | |
1252 | memory_address, | |
1253 | copy, | |
1254 | cur_protection, | |
1255 | max_protection, | |
1256 | inheritance); | |
91447636 A |
1257 | *address = map_addr; |
1258 | return kr; | |
1c79356b A |
1259 | } |
1260 | ||
91447636 A |
1261 | /* |
1262 | * vm_remap - | |
1263 | * Remap a range of memory from one task into another, | |
1264 | * to another address range within the same task, or | |
1265 | * over top of itself (with altered permissions and/or | |
1266 | * as an in-place copy of itself). | |
1267 | * | |
1268 | * The addressability of the source and target address | |
1269 | * range is limited by the size of vm_address_t (in the | |
1270 | * kernel context). | |
1271 | */ | |
1272 | kern_return_t | |
5ba3f43e | 1273 | vm_remap_external( |
0a7de745 A |
1274 | vm_map_t target_map, |
1275 | vm_offset_t *address, | |
1276 | vm_size_t size, | |
1277 | vm_offset_t mask, | |
1278 | int flags, | |
1279 | vm_map_t src_map, | |
1280 | vm_offset_t memory_address, | |
1281 | boolean_t copy, | |
1282 | vm_prot_t *cur_protection, | |
1283 | vm_prot_t *max_protection, | |
1284 | vm_inherit_t inheritance) | |
5ba3f43e A |
1285 | { |
1286 | vm_tag_t tag; | |
1287 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1288 | ||
0a7de745 A |
1289 | return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, |
1290 | memory_address, copy, cur_protection, max_protection, inheritance); | |
5ba3f43e A |
1291 | } |
1292 | ||
1293 | kern_return_t | |
1294 | vm_remap_kernel( | |
0a7de745 A |
1295 | vm_map_t target_map, |
1296 | vm_offset_t *address, | |
1297 | vm_size_t size, | |
1298 | vm_offset_t mask, | |
1299 | int flags, | |
1300 | vm_tag_t tag, | |
1301 | vm_map_t src_map, | |
1302 | vm_offset_t memory_address, | |
1303 | boolean_t copy, | |
1304 | vm_prot_t *cur_protection, | |
1305 | vm_prot_t *max_protection, | |
1306 | vm_inherit_t inheritance) | |
91447636 | 1307 | { |
0a7de745 A |
1308 | vm_map_offset_t map_addr; |
1309 | kern_return_t kr; | |
91447636 | 1310 | |
0a7de745 | 1311 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { |
91447636 | 1312 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1313 | } |
91447636 | 1314 | |
060df5ea | 1315 | /* filter out any kernel-only flags */ |
0a7de745 | 1316 | if (flags & ~VM_FLAGS_USER_REMAP) { |
060df5ea | 1317 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1318 | } |
060df5ea | 1319 | |
91447636 A |
1320 | map_addr = (vm_map_offset_t)*address; |
1321 | ||
1322 | kr = vm_map_remap(target_map, | |
0a7de745 A |
1323 | &map_addr, |
1324 | size, | |
1325 | mask, | |
1326 | flags, | |
1327 | VM_MAP_KERNEL_FLAGS_NONE, | |
1328 | tag, | |
1329 | src_map, | |
1330 | memory_address, | |
1331 | copy, | |
1332 | cur_protection, | |
1333 | max_protection, | |
1334 | inheritance); | |
91447636 A |
1335 | *address = CAST_DOWN(vm_offset_t, map_addr); |
1336 | return kr; | |
1337 | } | |
1c79356b A |
1338 | |
1339 | /* | |
91447636 A |
1340 | * NOTE: these routine (and this file) will no longer require mach_host_server.h |
1341 | * when mach_vm_wire and vm_wire are changed to use ledgers. | |
1c79356b A |
1342 | */ |
1343 | #include <mach/mach_host_server.h> | |
1344 | /* | |
91447636 A |
1345 | * mach_vm_wire |
1346 | * Specify that the range of the virtual address space | |
1347 | * of the target task must not cause page faults for | |
1348 | * the indicated accesses. | |
1349 | * | |
1350 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1351 | */ | |
1352 | kern_return_t | |
5ba3f43e | 1353 | mach_vm_wire_external( |
0a7de745 A |
1354 | host_priv_t host_priv, |
1355 | vm_map_t map, | |
1356 | mach_vm_offset_t start, | |
1357 | mach_vm_size_t size, | |
1358 | vm_prot_t access) | |
5ba3f43e | 1359 | { |
0a7de745 | 1360 | return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK); |
5ba3f43e A |
1361 | } |
1362 | ||
1363 | kern_return_t | |
1364 | mach_vm_wire_kernel( | |
0a7de745 A |
1365 | host_priv_t host_priv, |
1366 | vm_map_t map, | |
1367 | mach_vm_offset_t start, | |
1368 | mach_vm_size_t size, | |
1369 | vm_prot_t access, | |
1370 | vm_tag_t tag) | |
91447636 | 1371 | { |
0a7de745 | 1372 | kern_return_t rc; |
91447636 | 1373 | |
0a7de745 | 1374 | if (host_priv == HOST_PRIV_NULL) { |
91447636 | 1375 | return KERN_INVALID_HOST; |
0a7de745 | 1376 | } |
91447636 A |
1377 | |
1378 | assert(host_priv == &realhost); | |
1379 | ||
0a7de745 | 1380 | if (map == VM_MAP_NULL) { |
91447636 | 1381 | return KERN_INVALID_TASK; |
0a7de745 | 1382 | } |
91447636 | 1383 | |
0a7de745 | 1384 | if (access & ~VM_PROT_ALL || (start + size < start)) { |
91447636 | 1385 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1386 | } |
91447636 A |
1387 | |
1388 | if (access != VM_PROT_NONE) { | |
5ba3f43e | 1389 | rc = vm_map_wire_kernel(map, |
0a7de745 A |
1390 | vm_map_trunc_page(start, |
1391 | VM_MAP_PAGE_MASK(map)), | |
1392 | vm_map_round_page(start + size, | |
1393 | VM_MAP_PAGE_MASK(map)), | |
1394 | access, tag, | |
1395 | TRUE); | |
91447636 | 1396 | } else { |
39236c6e | 1397 | rc = vm_map_unwire(map, |
0a7de745 A |
1398 | vm_map_trunc_page(start, |
1399 | VM_MAP_PAGE_MASK(map)), | |
1400 | vm_map_round_page(start + size, | |
1401 | VM_MAP_PAGE_MASK(map)), | |
1402 | TRUE); | |
91447636 A |
1403 | } |
1404 | return rc; | |
1405 | } | |
1406 | ||
1407 | /* | |
1408 | * vm_wire - | |
1c79356b A |
1409 | * Specify that the range of the virtual address space |
1410 | * of the target task must not cause page faults for | |
1411 | * the indicated accesses. | |
1412 | * | |
1413 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1414 | */ | |
1415 | kern_return_t | |
1416 | vm_wire( | |
0a7de745 A |
1417 | host_priv_t host_priv, |
1418 | vm_map_t map, | |
1419 | vm_offset_t start, | |
1420 | vm_size_t size, | |
1421 | vm_prot_t access) | |
1c79356b | 1422 | { |
0a7de745 | 1423 | kern_return_t rc; |
1c79356b | 1424 | |
0a7de745 | 1425 | if (host_priv == HOST_PRIV_NULL) { |
1c79356b | 1426 | return KERN_INVALID_HOST; |
0a7de745 | 1427 | } |
1c79356b A |
1428 | |
1429 | assert(host_priv == &realhost); | |
1430 | ||
0a7de745 | 1431 | if (map == VM_MAP_NULL) { |
1c79356b | 1432 | return KERN_INVALID_TASK; |
0a7de745 | 1433 | } |
1c79356b | 1434 | |
0a7de745 | 1435 | if ((access & ~VM_PROT_ALL) || (start + size < start)) { |
1c79356b | 1436 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1437 | } |
1c79356b | 1438 | |
91447636 A |
1439 | if (size == 0) { |
1440 | rc = KERN_SUCCESS; | |
1441 | } else if (access != VM_PROT_NONE) { | |
5ba3f43e | 1442 | rc = vm_map_wire_kernel(map, |
0a7de745 A |
1443 | vm_map_trunc_page(start, |
1444 | VM_MAP_PAGE_MASK(map)), | |
1445 | vm_map_round_page(start + size, | |
1446 | VM_MAP_PAGE_MASK(map)), | |
1447 | access, VM_KERN_MEMORY_OSFMK, | |
1448 | TRUE); | |
1c79356b | 1449 | } else { |
39236c6e | 1450 | rc = vm_map_unwire(map, |
0a7de745 A |
1451 | vm_map_trunc_page(start, |
1452 | VM_MAP_PAGE_MASK(map)), | |
1453 | vm_map_round_page(start + size, | |
1454 | VM_MAP_PAGE_MASK(map)), | |
1455 | TRUE); | |
1c79356b A |
1456 | } |
1457 | return rc; | |
1458 | } | |
1459 | ||
1460 | /* | |
1461 | * vm_msync | |
1462 | * | |
1463 | * Synchronises the memory range specified with its backing store | |
1464 | * image by either flushing or cleaning the contents to the appropriate | |
91447636 A |
1465 | * memory manager. |
1466 | * | |
1467 | * interpretation of sync_flags | |
1468 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1469 | * pages to manager. | |
1470 | * | |
1471 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1472 | * - discard pages, write dirty or precious | |
1473 | * pages back to memory manager. | |
1474 | * | |
1475 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1476 | * - write dirty or precious pages back to | |
1477 | * the memory manager. | |
1478 | * | |
1479 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
1480 | * is a hole in the region, and we would | |
1481 | * have returned KERN_SUCCESS, return | |
1482 | * KERN_INVALID_ADDRESS instead. | |
1483 | * | |
1484 | * RETURNS | |
1485 | * KERN_INVALID_TASK Bad task parameter | |
1486 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1487 | * KERN_SUCCESS The usual. | |
1488 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
1489 | */ | |
1490 | ||
1491 | kern_return_t | |
1492 | mach_vm_msync( | |
0a7de745 A |
1493 | vm_map_t map, |
1494 | mach_vm_address_t address, | |
1495 | mach_vm_size_t size, | |
1496 | vm_sync_t sync_flags) | |
91447636 | 1497 | { |
0a7de745 A |
1498 | if (map == VM_MAP_NULL) { |
1499 | return KERN_INVALID_TASK; | |
1500 | } | |
91447636 A |
1501 | |
1502 | return vm_map_msync(map, (vm_map_address_t)address, | |
0a7de745 | 1503 | (vm_map_size_t)size, sync_flags); |
91447636 | 1504 | } |
0a7de745 | 1505 | |
91447636 A |
1506 | /* |
1507 | * vm_msync | |
1508 | * | |
1509 | * Synchronises the memory range specified with its backing store | |
1510 | * image by either flushing or cleaning the contents to the appropriate | |
1511 | * memory manager. | |
1c79356b A |
1512 | * |
1513 | * interpretation of sync_flags | |
1514 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1515 | * pages to manager. | |
1516 | * | |
1517 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1518 | * - discard pages, write dirty or precious | |
1519 | * pages back to memory manager. | |
1520 | * | |
1521 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1522 | * - write dirty or precious pages back to | |
1523 | * the memory manager. | |
1524 | * | |
91447636 A |
1525 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there |
1526 | * is a hole in the region, and we would | |
1527 | * have returned KERN_SUCCESS, return | |
1528 | * KERN_INVALID_ADDRESS instead. | |
1529 | * | |
1530 | * The addressability of the range is limited to that which can | |
1531 | * be described by a vm_address_t. | |
1c79356b A |
1532 | * |
1533 | * RETURNS | |
1534 | * KERN_INVALID_TASK Bad task parameter | |
1535 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1536 | * KERN_SUCCESS The usual. | |
91447636 | 1537 | * KERN_INVALID_ADDRESS There was a hole in the region. |
1c79356b A |
1538 | */ |
1539 | ||
1540 | kern_return_t | |
1541 | vm_msync( | |
0a7de745 A |
1542 | vm_map_t map, |
1543 | vm_address_t address, | |
1544 | vm_size_t size, | |
1545 | vm_sync_t sync_flags) | |
1c79356b | 1546 | { |
0a7de745 A |
1547 | if (map == VM_MAP_NULL) { |
1548 | return KERN_INVALID_TASK; | |
1549 | } | |
1c79356b | 1550 | |
91447636 | 1551 | return vm_map_msync(map, (vm_map_address_t)address, |
0a7de745 | 1552 | (vm_map_size_t)size, sync_flags); |
91447636 | 1553 | } |
1c79356b | 1554 | |
91447636 | 1555 | |
6d2010ae A |
1556 | int |
1557 | vm_toggle_entry_reuse(int toggle, int *old_value) | |
1558 | { | |
1559 | vm_map_t map = current_map(); | |
0a7de745 | 1560 | |
39037602 | 1561 | assert(!map->is_nested_map); |
0a7de745 | 1562 | if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) { |
6d2010ae | 1563 | *old_value = map->disable_vmentry_reuse; |
0a7de745 | 1564 | } else if (toggle == VM_TOGGLE_SET) { |
3e170ce0 A |
1565 | vm_map_entry_t map_to_entry; |
1566 | ||
6d2010ae | 1567 | vm_map_lock(map); |
3e170ce0 | 1568 | vm_map_disable_hole_optimization(map); |
6d2010ae | 1569 | map->disable_vmentry_reuse = TRUE; |
3e170ce0 A |
1570 | __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map)); |
1571 | if (map->first_free == map_to_entry) { | |
6d2010ae A |
1572 | map->highest_entry_end = vm_map_min(map); |
1573 | } else { | |
1574 | map->highest_entry_end = map->first_free->vme_end; | |
1575 | } | |
1576 | vm_map_unlock(map); | |
0a7de745 | 1577 | } else if (toggle == VM_TOGGLE_CLEAR) { |
6d2010ae A |
1578 | vm_map_lock(map); |
1579 | map->disable_vmentry_reuse = FALSE; | |
1580 | vm_map_unlock(map); | |
0a7de745 | 1581 | } else { |
6d2010ae | 1582 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1583 | } |
6d2010ae A |
1584 | |
1585 | return KERN_SUCCESS; | |
1586 | } | |
1587 | ||
91447636 | 1588 | /* |
0a7de745 | 1589 | * mach_vm_behavior_set |
91447636 A |
1590 | * |
1591 | * Sets the paging behavior attribute for the specified range | |
1592 | * in the specified map. | |
1593 | * | |
1594 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1595 | * in [start,start+size) is not a valid allocated memory region. | |
1596 | */ | |
0a7de745 | 1597 | kern_return_t |
91447636 | 1598 | mach_vm_behavior_set( |
0a7de745 A |
1599 | vm_map_t map, |
1600 | mach_vm_offset_t start, | |
1601 | mach_vm_size_t size, | |
1602 | vm_behavior_t new_behavior) | |
91447636 | 1603 | { |
0a7de745 | 1604 | vm_map_offset_t align_mask; |
39037602 | 1605 | |
0a7de745 A |
1606 | if ((map == VM_MAP_NULL) || (start + size < start)) { |
1607 | return KERN_INVALID_ARGUMENT; | |
1608 | } | |
1c79356b | 1609 | |
0a7de745 | 1610 | if (size == 0) { |
91447636 | 1611 | return KERN_SUCCESS; |
0a7de745 | 1612 | } |
1c79356b | 1613 | |
39037602 A |
1614 | switch (new_behavior) { |
1615 | case VM_BEHAVIOR_REUSABLE: | |
1616 | case VM_BEHAVIOR_REUSE: | |
1617 | case VM_BEHAVIOR_CAN_REUSE: | |
1618 | /* | |
1619 | * Align to the hardware page size, to allow | |
1620 | * malloc() to maximize the amount of re-usability, | |
1621 | * even on systems with larger software page size. | |
1622 | */ | |
1623 | align_mask = PAGE_MASK; | |
1624 | break; | |
1625 | default: | |
1626 | align_mask = VM_MAP_PAGE_MASK(map); | |
1627 | break; | |
1628 | } | |
1629 | ||
1630 | return vm_map_behavior_set(map, | |
0a7de745 A |
1631 | vm_map_trunc_page(start, align_mask), |
1632 | vm_map_round_page(start + size, align_mask), | |
1633 | new_behavior); | |
91447636 | 1634 | } |
1c79356b | 1635 | |
91447636 | 1636 | /* |
0a7de745 | 1637 | * vm_behavior_set |
91447636 A |
1638 | * |
1639 | * Sets the paging behavior attribute for the specified range | |
1640 | * in the specified map. | |
1641 | * | |
1642 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1643 | * in [start,start+size) is not a valid allocated memory region. | |
1644 | * | |
1645 | * This routine is potentially limited in addressibility by the | |
1646 | * use of vm_offset_t (if the map provided is larger than the | |
1647 | * kernel's). | |
1648 | */ | |
0a7de745 | 1649 | kern_return_t |
91447636 | 1650 | vm_behavior_set( |
0a7de745 A |
1651 | vm_map_t map, |
1652 | vm_offset_t start, | |
1653 | vm_size_t size, | |
1654 | vm_behavior_t new_behavior) | |
91447636 | 1655 | { |
0a7de745 | 1656 | if (start + size < start) { |
39037602 | 1657 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1658 | } |
1c79356b | 1659 | |
39037602 | 1660 | return mach_vm_behavior_set(map, |
0a7de745 A |
1661 | (mach_vm_offset_t) start, |
1662 | (mach_vm_size_t) size, | |
1663 | new_behavior); | |
91447636 | 1664 | } |
1c79356b | 1665 | |
91447636 A |
1666 | /* |
1667 | * mach_vm_region: | |
1668 | * | |
1669 | * User call to obtain information about a region in | |
1670 | * a task's address map. Currently, only one flavor is | |
1671 | * supported. | |
1672 | * | |
1673 | * XXX The reserved and behavior fields cannot be filled | |
1674 | * in until the vm merge from the IK is completed, and | |
1675 | * vm_reserve is implemented. | |
1676 | * | |
1677 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1678 | */ | |
1c79356b | 1679 | |
91447636 A |
1680 | kern_return_t |
1681 | mach_vm_region( | |
0a7de745 A |
1682 | vm_map_t map, |
1683 | mach_vm_offset_t *address, /* IN/OUT */ | |
1684 | mach_vm_size_t *size, /* OUT */ | |
1685 | vm_region_flavor_t flavor, /* IN */ | |
1686 | vm_region_info_t info, /* OUT */ | |
1687 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1688 | mach_port_t *object_name) /* OUT */ | |
91447636 | 1689 | { |
0a7de745 A |
1690 | vm_map_offset_t map_addr; |
1691 | vm_map_size_t map_size; | |
1692 | kern_return_t kr; | |
1c79356b | 1693 | |
0a7de745 | 1694 | if (VM_MAP_NULL == map) { |
91447636 | 1695 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1696 | } |
1c79356b | 1697 | |
91447636 A |
1698 | map_addr = (vm_map_offset_t)*address; |
1699 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1700 | |
91447636 | 1701 | /* legacy conversion */ |
0a7de745 | 1702 | if (VM_REGION_BASIC_INFO == flavor) { |
91447636 | 1703 | flavor = VM_REGION_BASIC_INFO_64; |
0a7de745 | 1704 | } |
1c79356b | 1705 | |
91447636 | 1706 | kr = vm_map_region(map, |
0a7de745 A |
1707 | &map_addr, &map_size, |
1708 | flavor, info, count, | |
1709 | object_name); | |
1c79356b | 1710 | |
91447636 A |
1711 | *address = map_addr; |
1712 | *size = map_size; | |
1713 | return kr; | |
1714 | } | |
1c79356b | 1715 | |
91447636 A |
1716 | /* |
1717 | * vm_region_64 and vm_region: | |
1718 | * | |
1719 | * User call to obtain information about a region in | |
1720 | * a task's address map. Currently, only one flavor is | |
1721 | * supported. | |
1722 | * | |
1723 | * XXX The reserved and behavior fields cannot be filled | |
1724 | * in until the vm merge from the IK is completed, and | |
1725 | * vm_reserve is implemented. | |
1726 | * | |
1727 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1728 | */ | |
1c79356b | 1729 | |
91447636 A |
1730 | kern_return_t |
1731 | vm_region_64( | |
0a7de745 A |
1732 | vm_map_t map, |
1733 | vm_offset_t *address, /* IN/OUT */ | |
1734 | vm_size_t *size, /* OUT */ | |
1735 | vm_region_flavor_t flavor, /* IN */ | |
1736 | vm_region_info_t info, /* OUT */ | |
1737 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1738 | mach_port_t *object_name) /* OUT */ | |
91447636 | 1739 | { |
0a7de745 A |
1740 | vm_map_offset_t map_addr; |
1741 | vm_map_size_t map_size; | |
1742 | kern_return_t kr; | |
1c79356b | 1743 | |
0a7de745 | 1744 | if (VM_MAP_NULL == map) { |
91447636 | 1745 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1746 | } |
1c79356b | 1747 | |
91447636 A |
1748 | map_addr = (vm_map_offset_t)*address; |
1749 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1750 | |
91447636 | 1751 | /* legacy conversion */ |
0a7de745 | 1752 | if (VM_REGION_BASIC_INFO == flavor) { |
91447636 | 1753 | flavor = VM_REGION_BASIC_INFO_64; |
0a7de745 | 1754 | } |
1c79356b | 1755 | |
91447636 | 1756 | kr = vm_map_region(map, |
0a7de745 A |
1757 | &map_addr, &map_size, |
1758 | flavor, info, count, | |
1759 | object_name); | |
1c79356b | 1760 | |
91447636 A |
1761 | *address = CAST_DOWN(vm_offset_t, map_addr); |
1762 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1763 | |
0a7de745 | 1764 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { |
91447636 | 1765 | return KERN_INVALID_ADDRESS; |
0a7de745 | 1766 | } |
91447636 A |
1767 | return kr; |
1768 | } | |
1c79356b | 1769 | |
91447636 A |
1770 | kern_return_t |
1771 | vm_region( | |
0a7de745 A |
1772 | vm_map_t map, |
1773 | vm_address_t *address, /* IN/OUT */ | |
1774 | vm_size_t *size, /* OUT */ | |
1775 | vm_region_flavor_t flavor, /* IN */ | |
1776 | vm_region_info_t info, /* OUT */ | |
1777 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1778 | mach_port_t *object_name) /* OUT */ | |
91447636 | 1779 | { |
0a7de745 A |
1780 | vm_map_address_t map_addr; |
1781 | vm_map_size_t map_size; | |
1782 | kern_return_t kr; | |
1c79356b | 1783 | |
0a7de745 | 1784 | if (VM_MAP_NULL == map) { |
91447636 | 1785 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1786 | } |
1c79356b | 1787 | |
91447636 A |
1788 | map_addr = (vm_map_address_t)*address; |
1789 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1790 | |
91447636 | 1791 | kr = vm_map_region(map, |
0a7de745 A |
1792 | &map_addr, &map_size, |
1793 | flavor, info, count, | |
1794 | object_name); | |
1c79356b | 1795 | |
91447636 A |
1796 | *address = CAST_DOWN(vm_address_t, map_addr); |
1797 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1798 | |
0a7de745 | 1799 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { |
91447636 | 1800 | return KERN_INVALID_ADDRESS; |
0a7de745 | 1801 | } |
91447636 A |
1802 | return kr; |
1803 | } | |
1c79356b A |
1804 | |
1805 | /* | |
91447636 A |
1806 | * vm_region_recurse: A form of vm_region which follows the |
1807 | * submaps in a target map | |
1c79356b | 1808 | * |
1c79356b A |
1809 | */ |
1810 | kern_return_t | |
91447636 | 1811 | mach_vm_region_recurse( |
0a7de745 A |
1812 | vm_map_t map, |
1813 | mach_vm_address_t *address, | |
1814 | mach_vm_size_t *size, | |
1815 | uint32_t *depth, | |
1816 | vm_region_recurse_info_t info, | |
1817 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1818 | { |
0a7de745 A |
1819 | vm_map_address_t map_addr; |
1820 | vm_map_size_t map_size; | |
1821 | kern_return_t kr; | |
1c79356b | 1822 | |
0a7de745 | 1823 | if (VM_MAP_NULL == map) { |
91447636 | 1824 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1825 | } |
1c79356b | 1826 | |
91447636 A |
1827 | map_addr = (vm_map_address_t)*address; |
1828 | map_size = (vm_map_size_t)*size; | |
1829 | ||
1830 | kr = vm_map_region_recurse_64( | |
0a7de745 A |
1831 | map, |
1832 | &map_addr, | |
1833 | &map_size, | |
1834 | depth, | |
1835 | (vm_region_submap_info_64_t)info, | |
1836 | infoCnt); | |
91447636 A |
1837 | |
1838 | *address = map_addr; | |
1839 | *size = map_size; | |
1840 | return kr; | |
1c79356b A |
1841 | } |
1842 | ||
1843 | /* | |
91447636 A |
1844 | * vm_region_recurse: A form of vm_region which follows the |
1845 | * submaps in a target map | |
1846 | * | |
1c79356b | 1847 | */ |
91447636 A |
1848 | kern_return_t |
1849 | vm_region_recurse_64( | |
0a7de745 A |
1850 | vm_map_t map, |
1851 | vm_address_t *address, | |
1852 | vm_size_t *size, | |
1853 | uint32_t *depth, | |
1854 | vm_region_recurse_info_64_t info, | |
1855 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1856 | { |
0a7de745 A |
1857 | vm_map_address_t map_addr; |
1858 | vm_map_size_t map_size; | |
1859 | kern_return_t kr; | |
91447636 | 1860 | |
0a7de745 | 1861 | if (VM_MAP_NULL == map) { |
91447636 | 1862 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1863 | } |
91447636 A |
1864 | |
1865 | map_addr = (vm_map_address_t)*address; | |
1866 | map_size = (vm_map_size_t)*size; | |
1867 | ||
1868 | kr = vm_map_region_recurse_64( | |
0a7de745 A |
1869 | map, |
1870 | &map_addr, | |
1871 | &map_size, | |
1872 | depth, | |
1873 | (vm_region_submap_info_64_t)info, | |
1874 | infoCnt); | |
1c79356b | 1875 | |
91447636 A |
1876 | *address = CAST_DOWN(vm_address_t, map_addr); |
1877 | *size = CAST_DOWN(vm_size_t, map_size); | |
1878 | ||
0a7de745 | 1879 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { |
91447636 | 1880 | return KERN_INVALID_ADDRESS; |
0a7de745 | 1881 | } |
91447636 | 1882 | return kr; |
1c79356b A |
1883 | } |
1884 | ||
91447636 A |
1885 | kern_return_t |
1886 | vm_region_recurse( | |
0a7de745 A |
1887 | vm_map_t map, |
1888 | vm_offset_t *address, /* IN/OUT */ | |
1889 | vm_size_t *size, /* OUT */ | |
1890 | natural_t *depth, /* IN/OUT */ | |
1891 | vm_region_recurse_info_t info32, /* IN/OUT */ | |
1892 | mach_msg_type_number_t *infoCnt) /* IN/OUT */ | |
91447636 A |
1893 | { |
1894 | vm_region_submap_info_data_64_t info64; | |
1895 | vm_region_submap_info_t info; | |
0a7de745 A |
1896 | vm_map_address_t map_addr; |
1897 | vm_map_size_t map_size; | |
1898 | kern_return_t kr; | |
91447636 | 1899 | |
0a7de745 | 1900 | if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { |
91447636 | 1901 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
1902 | } |
1903 | ||
91447636 | 1904 | |
91447636 A |
1905 | map_addr = (vm_map_address_t)*address; |
1906 | map_size = (vm_map_size_t)*size; | |
1907 | info = (vm_region_submap_info_t)info32; | |
1908 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1909 | ||
0a7de745 A |
1910 | kr = vm_map_region_recurse_64(map, &map_addr, &map_size, |
1911 | depth, &info64, infoCnt); | |
91447636 A |
1912 | |
1913 | info->protection = info64.protection; | |
1914 | info->max_protection = info64.max_protection; | |
1915 | info->inheritance = info64.inheritance; | |
1916 | info->offset = (uint32_t)info64.offset; /* trouble-maker */ | |
0a7de745 A |
1917 | info->user_tag = info64.user_tag; |
1918 | info->pages_resident = info64.pages_resident; | |
1919 | info->pages_shared_now_private = info64.pages_shared_now_private; | |
1920 | info->pages_swapped_out = info64.pages_swapped_out; | |
1921 | info->pages_dirtied = info64.pages_dirtied; | |
1922 | info->ref_count = info64.ref_count; | |
1923 | info->shadow_depth = info64.shadow_depth; | |
1924 | info->external_pager = info64.external_pager; | |
1925 | info->share_mode = info64.share_mode; | |
91447636 A |
1926 | info->is_submap = info64.is_submap; |
1927 | info->behavior = info64.behavior; | |
1928 | info->object_id = info64.object_id; | |
0a7de745 | 1929 | info->user_wired_count = info64.user_wired_count; |
91447636 A |
1930 | |
1931 | *address = CAST_DOWN(vm_address_t, map_addr); | |
1932 | *size = CAST_DOWN(vm_size_t, map_size); | |
1933 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; | |
1934 | ||
0a7de745 | 1935 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { |
91447636 | 1936 | return KERN_INVALID_ADDRESS; |
0a7de745 | 1937 | } |
91447636 A |
1938 | return kr; |
1939 | } | |
1940 | ||
2d21ac55 A |
1941 | kern_return_t |
1942 | mach_vm_purgable_control( | |
0a7de745 A |
1943 | vm_map_t map, |
1944 | mach_vm_offset_t address, | |
1945 | vm_purgable_t control, | |
1946 | int *state) | |
2d21ac55 | 1947 | { |
0a7de745 | 1948 | if (VM_MAP_NULL == map) { |
2d21ac55 | 1949 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1950 | } |
2d21ac55 | 1951 | |
5ba3f43e A |
1952 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { |
1953 | /* not allowed from user-space */ | |
1954 | return KERN_INVALID_ARGUMENT; | |
1955 | } | |
1956 | ||
2d21ac55 | 1957 | return vm_map_purgable_control(map, |
0a7de745 A |
1958 | vm_map_trunc_page(address, PAGE_MASK), |
1959 | control, | |
1960 | state); | |
2d21ac55 A |
1961 | } |
1962 | ||
91447636 A |
1963 | kern_return_t |
1964 | vm_purgable_control( | |
0a7de745 A |
1965 | vm_map_t map, |
1966 | vm_offset_t address, | |
1967 | vm_purgable_t control, | |
1968 | int *state) | |
91447636 | 1969 | { |
0a7de745 | 1970 | if (VM_MAP_NULL == map) { |
91447636 | 1971 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 1972 | } |
91447636 | 1973 | |
5ba3f43e A |
1974 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { |
1975 | /* not allowed from user-space */ | |
1976 | return KERN_INVALID_ARGUMENT; | |
1977 | } | |
1978 | ||
91447636 | 1979 | return vm_map_purgable_control(map, |
0a7de745 A |
1980 | vm_map_trunc_page(address, PAGE_MASK), |
1981 | control, | |
1982 | state); | |
91447636 | 1983 | } |
0a7de745 | 1984 | |
1c79356b A |
1985 | |
1986 | /* | |
1987 | * Ordinarily, the right to allocate CPM is restricted | |
1988 | * to privileged applications (those that can gain access | |
91447636 A |
1989 | * to the host priv port). Set this variable to zero if |
1990 | * you want to let any application allocate CPM. | |
1c79356b | 1991 | */ |
0a7de745 | 1992 | unsigned int vm_allocate_cpm_privileged = 0; |
1c79356b A |
1993 | |
1994 | /* | |
1995 | * Allocate memory in the specified map, with the caveat that | |
1996 | * the memory is physically contiguous. This call may fail | |
1997 | * if the system can't find sufficient contiguous memory. | |
1998 | * This call may cause or lead to heart-stopping amounts of | |
1999 | * paging activity. | |
2000 | * | |
2001 | * Memory obtained from this call should be freed in the | |
2002 | * normal way, viz., via vm_deallocate. | |
2003 | */ | |
2004 | kern_return_t | |
2005 | vm_allocate_cpm( | |
0a7de745 A |
2006 | host_priv_t host_priv, |
2007 | vm_map_t map, | |
2008 | vm_address_t *addr, | |
2009 | vm_size_t size, | |
2010 | int flags) | |
1c79356b | 2011 | { |
0a7de745 A |
2012 | vm_map_address_t map_addr; |
2013 | vm_map_size_t map_size; | |
2014 | kern_return_t kr; | |
1c79356b | 2015 | |
0a7de745 | 2016 | if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) { |
1c79356b | 2017 | return KERN_INVALID_HOST; |
0a7de745 | 2018 | } |
1c79356b | 2019 | |
0a7de745 | 2020 | if (VM_MAP_NULL == map) { |
1c79356b | 2021 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2022 | } |
1c79356b | 2023 | |
91447636 A |
2024 | map_addr = (vm_map_address_t)*addr; |
2025 | map_size = (vm_map_size_t)size; | |
1c79356b | 2026 | |
91447636 | 2027 | kr = vm_map_enter_cpm(map, |
0a7de745 A |
2028 | &map_addr, |
2029 | map_size, | |
2030 | flags); | |
1c79356b | 2031 | |
91447636 | 2032 | *addr = CAST_DOWN(vm_address_t, map_addr); |
1c79356b A |
2033 | return kr; |
2034 | } | |
2035 | ||
2036 | ||
91447636 A |
2037 | kern_return_t |
2038 | mach_vm_page_query( | |
0a7de745 A |
2039 | vm_map_t map, |
2040 | mach_vm_offset_t offset, | |
2041 | int *disposition, | |
2042 | int *ref_count) | |
91447636 | 2043 | { |
0a7de745 | 2044 | if (VM_MAP_NULL == map) { |
91447636 | 2045 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2046 | } |
1c79356b | 2047 | |
39236c6e A |
2048 | return vm_map_page_query_internal( |
2049 | map, | |
2050 | vm_map_trunc_page(offset, PAGE_MASK), | |
2051 | disposition, ref_count); | |
91447636 | 2052 | } |
1c79356b A |
2053 | |
2054 | kern_return_t | |
91447636 | 2055 | vm_map_page_query( |
0a7de745 A |
2056 | vm_map_t map, |
2057 | vm_offset_t offset, | |
2058 | int *disposition, | |
2059 | int *ref_count) | |
1c79356b | 2060 | { |
0a7de745 | 2061 | if (VM_MAP_NULL == map) { |
91447636 | 2062 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2063 | } |
91447636 | 2064 | |
39236c6e A |
2065 | return vm_map_page_query_internal( |
2066 | map, | |
2067 | vm_map_trunc_page(offset, PAGE_MASK), | |
2068 | disposition, ref_count); | |
b0d623f7 A |
2069 | } |
2070 | ||
5ba3f43e A |
2071 | kern_return_t |
2072 | mach_vm_page_range_query( | |
0a7de745 A |
2073 | vm_map_t map, |
2074 | mach_vm_offset_t address, | |
2075 | mach_vm_size_t size, | |
2076 | mach_vm_address_t dispositions_addr, | |
2077 | mach_vm_size_t *dispositions_count) | |
5ba3f43e | 2078 | { |
0a7de745 A |
2079 | kern_return_t kr = KERN_SUCCESS; |
2080 | int num_pages = 0, i = 0; | |
2081 | mach_vm_size_t curr_sz = 0, copy_sz = 0; | |
2082 | mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0; | |
2083 | mach_msg_type_number_t count = 0; | |
5ba3f43e | 2084 | |
0a7de745 A |
2085 | void *info = NULL; |
2086 | void *local_disp = NULL;; | |
2087 | vm_map_size_t info_size = 0, local_disp_size = 0; | |
2088 | mach_vm_offset_t start = 0, end = 0; | |
5ba3f43e A |
2089 | |
2090 | if (map == VM_MAP_NULL || dispositions_count == NULL) { | |
2091 | return KERN_INVALID_ARGUMENT; | |
2092 | } | |
2093 | ||
0a7de745 | 2094 | disp_buf_req_size = (*dispositions_count * sizeof(int)); |
5ba3f43e A |
2095 | start = mach_vm_trunc_page(address); |
2096 | end = mach_vm_round_page(address + size); | |
2097 | ||
2098 | if (end < start) { | |
2099 | return KERN_INVALID_ARGUMENT; | |
2100 | } | |
2101 | ||
0a7de745 A |
2102 | if ((end - start) < size) { |
2103 | /* | |
2104 | * Aligned size is less than unaligned size. | |
2105 | */ | |
2106 | return KERN_INVALID_ARGUMENT; | |
2107 | } | |
2108 | ||
5ba3f43e A |
2109 | if (disp_buf_req_size == 0 || (end == start)) { |
2110 | return KERN_SUCCESS; | |
2111 | } | |
2112 | ||
2113 | /* | |
2114 | * For large requests, we will go through them | |
2115 | * MAX_PAGE_RANGE_QUERY chunk at a time. | |
2116 | */ | |
2117 | ||
2118 | curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY); | |
2119 | num_pages = (int) (curr_sz >> PAGE_SHIFT); | |
2120 | ||
2121 | info_size = num_pages * sizeof(vm_page_info_basic_data_t); | |
2122 | info = kalloc(info_size); | |
2123 | ||
2124 | if (info == NULL) { | |
2125 | return KERN_RESOURCE_SHORTAGE; | |
2126 | } | |
2127 | ||
2128 | local_disp_size = num_pages * sizeof(int); | |
2129 | local_disp = kalloc(local_disp_size); | |
2130 | ||
2131 | if (local_disp == NULL) { | |
5ba3f43e A |
2132 | kfree(info, info_size); |
2133 | info = NULL; | |
2134 | return KERN_RESOURCE_SHORTAGE; | |
2135 | } | |
2136 | ||
2137 | while (size) { | |
5ba3f43e A |
2138 | count = VM_PAGE_INFO_BASIC_COUNT; |
2139 | kr = vm_map_page_range_info_internal( | |
0a7de745 A |
2140 | map, |
2141 | start, | |
2142 | mach_vm_round_page(start + curr_sz), | |
2143 | VM_PAGE_INFO_BASIC, | |
2144 | (vm_page_info_t) info, | |
2145 | &count); | |
5ba3f43e A |
2146 | |
2147 | assert(kr == KERN_SUCCESS); | |
2148 | ||
2149 | for (i = 0; i < num_pages; i++) { | |
5ba3f43e A |
2150 | ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition; |
2151 | } | |
2152 | ||
0a7de745 | 2153 | copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */); |
5ba3f43e A |
2154 | kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz); |
2155 | ||
2156 | start += curr_sz; | |
2157 | disp_buf_req_size -= copy_sz; | |
2158 | disp_buf_total_size += copy_sz; | |
2159 | ||
2160 | if (kr != 0) { | |
2161 | break; | |
2162 | } | |
2163 | ||
2164 | if ((disp_buf_req_size == 0) || (curr_sz >= size)) { | |
5ba3f43e A |
2165 | /* |
2166 | * We might have inspected the full range OR | |
2167 | * more than it esp. if the user passed in | |
2168 | * non-page aligned start/size and/or if we | |
2169 | * descended into a submap. We are done here. | |
2170 | */ | |
2171 | ||
2172 | size = 0; | |
5ba3f43e | 2173 | } else { |
5ba3f43e A |
2174 | dispositions_addr += copy_sz; |
2175 | ||
2176 | size -= curr_sz; | |
2177 | ||
2178 | curr_sz = MIN(mach_vm_round_page(size), MAX_PAGE_RANGE_QUERY); | |
2179 | num_pages = (int)(curr_sz >> PAGE_SHIFT); | |
2180 | } | |
2181 | } | |
2182 | ||
2183 | *dispositions_count = disp_buf_total_size / sizeof(int); | |
2184 | ||
2185 | kfree(local_disp, local_disp_size); | |
2186 | local_disp = NULL; | |
2187 | ||
2188 | kfree(info, info_size); | |
2189 | info = NULL; | |
2190 | ||
2191 | return kr; | |
2192 | } | |
2193 | ||
b0d623f7 A |
2194 | kern_return_t |
2195 | mach_vm_page_info( | |
0a7de745 A |
2196 | vm_map_t map, |
2197 | mach_vm_address_t address, | |
2198 | vm_page_info_flavor_t flavor, | |
2199 | vm_page_info_t info, | |
2200 | mach_msg_type_number_t *count) | |
b0d623f7 | 2201 | { |
0a7de745 | 2202 | kern_return_t kr; |
b0d623f7 A |
2203 | |
2204 | if (map == VM_MAP_NULL) { | |
2205 | return KERN_INVALID_ARGUMENT; | |
2206 | } | |
2207 | ||
2208 | kr = vm_map_page_info(map, address, flavor, info, count); | |
2209 | return kr; | |
1c79356b A |
2210 | } |
2211 | ||
91447636 | 2212 | /* map a (whole) upl into an address space */ |
1c79356b | 2213 | kern_return_t |
91447636 | 2214 | vm_upl_map( |
0a7de745 A |
2215 | vm_map_t map, |
2216 | upl_t upl, | |
2217 | vm_address_t *dst_addr) | |
1c79356b | 2218 | { |
0a7de745 A |
2219 | vm_map_offset_t map_addr; |
2220 | kern_return_t kr; | |
1c79356b | 2221 | |
0a7de745 | 2222 | if (VM_MAP_NULL == map) { |
91447636 | 2223 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2224 | } |
1c79356b | 2225 | |
91447636 | 2226 | kr = vm_map_enter_upl(map, upl, &map_addr); |
b0d623f7 | 2227 | *dst_addr = CAST_DOWN(vm_address_t, map_addr); |
91447636 A |
2228 | return kr; |
2229 | } | |
1c79356b | 2230 | |
91447636 A |
2231 | kern_return_t |
2232 | vm_upl_unmap( | |
0a7de745 A |
2233 | vm_map_t map, |
2234 | upl_t upl) | |
91447636 | 2235 | { |
0a7de745 | 2236 | if (VM_MAP_NULL == map) { |
91447636 | 2237 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2238 | } |
1c79356b | 2239 | |
0a7de745 | 2240 | return vm_map_remove_upl(map, upl); |
91447636 | 2241 | } |
1c79356b | 2242 | |
91447636 A |
2243 | /* Retrieve a upl for an object underlying an address range in a map */ |
2244 | ||
2245 | kern_return_t | |
2246 | vm_map_get_upl( | |
0a7de745 A |
2247 | vm_map_t map, |
2248 | vm_map_offset_t map_offset, | |
2249 | upl_size_t *upl_size, | |
2250 | upl_t *upl, | |
2251 | upl_page_info_array_t page_list, | |
2252 | unsigned int *count, | |
2253 | upl_control_flags_t *flags, | |
2254 | vm_tag_t tag, | |
2255 | int force_data_sync) | |
91447636 | 2256 | { |
3e170ce0 | 2257 | upl_control_flags_t map_flags; |
0a7de745 | 2258 | kern_return_t kr; |
1c79356b | 2259 | |
0a7de745 | 2260 | if (VM_MAP_NULL == map) { |
91447636 | 2261 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 2262 | } |
1c79356b | 2263 | |
91447636 | 2264 | map_flags = *flags & ~UPL_NOZEROFILL; |
0a7de745 | 2265 | if (force_data_sync) { |
91447636 | 2266 | map_flags |= UPL_FORCE_DATA_SYNC; |
0a7de745 | 2267 | } |
1c79356b | 2268 | |
91447636 | 2269 | kr = vm_map_create_upl(map, |
0a7de745 A |
2270 | map_offset, |
2271 | upl_size, | |
2272 | upl, | |
2273 | page_list, | |
2274 | count, | |
2275 | &map_flags, | |
2276 | tag); | |
1c79356b | 2277 | |
91447636 A |
2278 | *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); |
2279 | return kr; | |
1c79356b A |
2280 | } |
2281 | ||
5ba3f43e A |
2282 | #if CONFIG_EMBEDDED |
2283 | extern int proc_selfpid(void); | |
2284 | extern char *proc_name_address(void *p); | |
2285 | int cs_executable_mem_entry = 0; | |
2286 | int log_executable_mem_entry = 0; | |
2287 | #endif /* CONFIG_EMBEDDED */ | |
39037602 | 2288 | |
1c79356b | 2289 | /* |
91447636 A |
2290 | * mach_make_memory_entry_64 |
2291 | * | |
2292 | * Think of it as a two-stage vm_remap() operation. First | |
2293 | * you get a handle. Second, you get map that handle in | |
2294 | * somewhere else. Rather than doing it all at once (and | |
2295 | * without needing access to the other whole map). | |
1c79356b | 2296 | */ |
1c79356b A |
2297 | kern_return_t |
2298 | mach_make_memory_entry_64( | |
0a7de745 A |
2299 | vm_map_t target_map, |
2300 | memory_object_size_t *size, | |
91447636 | 2301 | memory_object_offset_t offset, |
0a7de745 A |
2302 | vm_prot_t permission, |
2303 | ipc_port_t *object_handle, | |
2304 | ipc_port_t parent_handle) | |
9d749ea3 | 2305 | { |
cb323159 A |
2306 | vm_named_entry_kernel_flags_t vmne_kflags; |
2307 | ||
9d749ea3 A |
2308 | if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) { |
2309 | /* | |
2310 | * Unknown flag: reject for forward compatibility. | |
2311 | */ | |
2312 | return KERN_INVALID_VALUE; | |
2313 | } | |
2314 | ||
cb323159 A |
2315 | vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE; |
2316 | if (permission & MAP_MEM_LEDGER_TAGGED) { | |
2317 | vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT; | |
2318 | } | |
9d749ea3 | 2319 | return mach_make_memory_entry_internal(target_map, |
0a7de745 A |
2320 | size, |
2321 | offset, | |
2322 | permission, | |
cb323159 | 2323 | vmne_kflags, |
0a7de745 A |
2324 | object_handle, |
2325 | parent_handle); | |
9d749ea3 A |
2326 | } |
2327 | ||
9d749ea3 A |
2328 | kern_return_t |
2329 | mach_make_memory_entry_internal( | |
0a7de745 A |
2330 | vm_map_t target_map, |
2331 | memory_object_size_t *size, | |
cb323159 | 2332 | memory_object_offset_t offset, |
0a7de745 | 2333 | vm_prot_t permission, |
cb323159 | 2334 | vm_named_entry_kernel_flags_t vmne_kflags, |
0a7de745 A |
2335 | ipc_port_t *object_handle, |
2336 | ipc_port_t parent_handle) | |
1c79356b | 2337 | { |
0a7de745 A |
2338 | vm_map_version_t version; |
2339 | vm_named_entry_t parent_entry; | |
2340 | vm_named_entry_t user_entry; | |
2341 | ipc_port_t user_handle; | |
2342 | kern_return_t kr; | |
2343 | vm_map_t real_map; | |
1c79356b A |
2344 | |
2345 | /* needed for call to vm_map_lookup_locked */ | |
0a7de745 A |
2346 | boolean_t wired; |
2347 | boolean_t iskernel; | |
2348 | vm_object_offset_t obj_off; | |
2349 | vm_prot_t prot; | |
2350 | struct vm_object_fault_info fault_info = {}; | |
2351 | vm_object_t object; | |
2352 | vm_object_t shadow_object; | |
1c79356b A |
2353 | |
2354 | /* needed for direct map entry manipulation */ | |
0a7de745 A |
2355 | vm_map_entry_t map_entry; |
2356 | vm_map_entry_t next_entry; | |
2357 | vm_map_t local_map; | |
2358 | vm_map_t original_map = target_map; | |
2359 | vm_map_size_t total_size, map_size; | |
2360 | vm_map_offset_t map_start, map_end; | |
2361 | vm_map_offset_t local_offset; | |
2362 | vm_object_size_t mappable_size; | |
2363 | ||
2364 | /* | |
39236c6e A |
2365 | * Stash the offset in the page for use by vm_map_enter_mem_object() |
2366 | * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case. | |
2367 | */ | |
0a7de745 | 2368 | vm_object_offset_t offset_in_page; |
39236c6e | 2369 | |
0a7de745 A |
2370 | unsigned int access; |
2371 | vm_prot_t protections; | |
2372 | vm_prot_t original_protections, mask_protections; | |
2373 | unsigned int wimg_mode; | |
91447636 | 2374 | |
0a7de745 A |
2375 | boolean_t force_shadow = FALSE; |
2376 | boolean_t use_data_addr; | |
2377 | boolean_t use_4K_compat; | |
d9a64523 | 2378 | #if VM_NAMED_ENTRY_LIST |
0a7de745 | 2379 | int alias = -1; |
d9a64523 | 2380 | #endif /* VM_NAMED_ENTRY_LIST */ |
e2d2fc5c | 2381 | |
9d749ea3 | 2382 | if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) { |
91447636 A |
2383 | /* |
2384 | * Unknown flag: reject for forward compatibility. | |
2385 | */ | |
2386 | return KERN_INVALID_VALUE; | |
2387 | } | |
2388 | ||
d9a64523 | 2389 | if (IP_VALID(parent_handle) && |
91447636 | 2390 | ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) { |
ea3f0419 | 2391 | parent_entry = (vm_named_entry_t) ip_get_kobject(parent_handle); |
91447636 A |
2392 | } else { |
2393 | parent_entry = NULL; | |
2394 | } | |
55e303ae | 2395 | |
39236c6e A |
2396 | if (parent_entry && parent_entry->is_copy) { |
2397 | return KERN_INVALID_ARGUMENT; | |
2398 | } | |
2399 | ||
6d2010ae A |
2400 | original_protections = permission & VM_PROT_ALL; |
2401 | protections = original_protections; | |
2402 | mask_protections = permission & VM_PROT_IS_MASK; | |
55e303ae | 2403 | access = GET_MAP_MEM(permission); |
39236c6e | 2404 | use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0); |
3e170ce0 | 2405 | use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0); |
55e303ae | 2406 | |
91447636 A |
2407 | user_handle = IP_NULL; |
2408 | user_entry = NULL; | |
2409 | ||
3e170ce0 | 2410 | map_start = vm_map_trunc_page(offset, PAGE_MASK); |
1c79356b | 2411 | |
91447636 | 2412 | if (permission & MAP_MEM_ONLY) { |
0a7de745 | 2413 | boolean_t parent_is_object; |
55e303ae | 2414 | |
3e170ce0 A |
2415 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2416 | map_size = map_end - map_start; | |
0a7de745 | 2417 | |
3e170ce0 | 2418 | if (use_data_addr || use_4K_compat || parent_entry == NULL) { |
55e303ae A |
2419 | return KERN_INVALID_ARGUMENT; |
2420 | } | |
91447636 | 2421 | |
5ba3f43e | 2422 | parent_is_object = !parent_entry->is_sub_map; |
91447636 | 2423 | object = parent_entry->backing.object; |
0a7de745 | 2424 | if (parent_is_object && object != VM_OBJECT_NULL) { |
55e303ae | 2425 | wimg_mode = object->wimg_bits; |
0a7de745 | 2426 | } else { |
6d2010ae | 2427 | wimg_mode = VM_WIMG_USE_DEFAULT; |
0a7de745 A |
2428 | } |
2429 | if ((access != GET_MAP_MEM(parent_entry->protection)) && | |
2430 | !(parent_entry->protection & VM_PROT_WRITE)) { | |
55e303ae A |
2431 | return KERN_INVALID_RIGHT; |
2432 | } | |
5ba3f43e | 2433 | vm_prot_to_wimg(access, &wimg_mode); |
0a7de745 | 2434 | if (access != MAP_MEM_NOOP) { |
5ba3f43e | 2435 | SET_MAP_MEM(access, parent_entry->protection); |
0a7de745 | 2436 | } |
6d2010ae | 2437 | if (parent_is_object && object && |
0a7de745 A |
2438 | (access != MAP_MEM_NOOP) && |
2439 | (!(object->nophyscache))) { | |
6d2010ae A |
2440 | if (object->wimg_bits != wimg_mode) { |
2441 | vm_object_lock(object); | |
2442 | vm_object_change_wimg_mode(object, wimg_mode); | |
2443 | vm_object_unlock(object); | |
55e303ae A |
2444 | } |
2445 | } | |
0a7de745 | 2446 | if (object_handle) { |
91447636 | 2447 | *object_handle = IP_NULL; |
0a7de745 | 2448 | } |
55e303ae | 2449 | return KERN_SUCCESS; |
39236c6e | 2450 | } else if (permission & MAP_MEM_NAMED_CREATE) { |
cb323159 A |
2451 | int ledger_flags = 0; |
2452 | task_t owner; | |
2453 | ||
3e170ce0 A |
2454 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2455 | map_size = map_end - map_start; | |
39236c6e | 2456 | |
3e170ce0 | 2457 | if (use_data_addr || use_4K_compat) { |
39236c6e A |
2458 | return KERN_INVALID_ARGUMENT; |
2459 | } | |
55e303ae | 2460 | |
91447636 A |
2461 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); |
2462 | if (kr != KERN_SUCCESS) { | |
2463 | return KERN_FAILURE; | |
2464 | } | |
55e303ae | 2465 | |
91447636 A |
2466 | /* |
2467 | * Force the creation of the VM object now. | |
2468 | */ | |
b0d623f7 | 2469 | if (map_size > (vm_map_size_t) ANON_MAX_SIZE) { |
91447636 | 2470 | /* |
b0d623f7 | 2471 | * LP64todo - for now, we can only allocate 4GB-4096 |
91447636 A |
2472 | * internal objects because the default pager can't |
2473 | * page bigger ones. Remove this when it can. | |
2474 | */ | |
2475 | kr = KERN_FAILURE; | |
2476 | goto make_mem_done; | |
2477 | } | |
1c79356b | 2478 | |
91447636 A |
2479 | object = vm_object_allocate(map_size); |
2480 | assert(object != VM_OBJECT_NULL); | |
1c79356b | 2481 | |
cb323159 A |
2482 | /* |
2483 | * XXX | |
2484 | * We use this path when we want to make sure that | |
2485 | * nobody messes with the object (coalesce, for | |
2486 | * example) before we map it. | |
2487 | * We might want to use these objects for transposition via | |
2488 | * vm_object_transpose() too, so we don't want any copy or | |
2489 | * shadow objects either... | |
2490 | */ | |
2491 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
2492 | object->true_share = TRUE; | |
d9a64523 | 2493 | |
cb323159 A |
2494 | owner = current_task(); |
2495 | if ((permission & MAP_MEM_PURGABLE) || | |
2496 | vmne_kflags.vmnekf_ledger_tag) { | |
d9a64523 | 2497 | assert(object->vo_owner == NULL); |
fe8ab488 A |
2498 | assert(object->resident_page_count == 0); |
2499 | assert(object->wired_page_count == 0); | |
cb323159 A |
2500 | assert(owner != TASK_NULL); |
2501 | if (vmne_kflags.vmnekf_ledger_no_footprint) { | |
2502 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; | |
2503 | object->vo_no_footprint = TRUE; | |
5ba3f43e | 2504 | } |
cb323159 A |
2505 | if (permission & MAP_MEM_PURGABLE) { |
2506 | if (!(permission & VM_PROT_WRITE)) { | |
2507 | /* if we can't write, we can't purge */ | |
2508 | vm_object_deallocate(object); | |
2509 | kr = KERN_INVALID_ARGUMENT; | |
2510 | goto make_mem_done; | |
2511 | } | |
2512 | object->purgable = VM_PURGABLE_NONVOLATILE; | |
2513 | if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) { | |
2514 | object->purgeable_only_by_kernel = TRUE; | |
2515 | } | |
2516 | #if __arm64__ | |
2517 | if (owner->task_legacy_footprint) { | |
2518 | /* | |
2519 | * For ios11, we failed to account for | |
2520 | * this memory. Keep doing that for | |
2521 | * legacy apps (built before ios12), | |
2522 | * for backwards compatibility's sake... | |
2523 | */ | |
2524 | owner = kernel_task; | |
2525 | } | |
d9a64523 | 2526 | #endif /* __arm64__ */ |
cb323159 A |
2527 | vm_object_lock(object); |
2528 | vm_purgeable_nonvolatile_enqueue(object, owner); | |
2529 | vm_object_unlock(object); | |
2530 | } | |
d9a64523 A |
2531 | } |
2532 | ||
cb323159 A |
2533 | if (vmne_kflags.vmnekf_ledger_tag) { |
2534 | /* | |
2535 | * Bill this object to the current task's | |
2536 | * ledgers for the given tag. | |
2537 | */ | |
2538 | if (vmne_kflags.vmnekf_ledger_no_footprint) { | |
2539 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; | |
2540 | } | |
d9a64523 | 2541 | vm_object_lock(object); |
cb323159 A |
2542 | object->vo_ledger_tag = vmne_kflags.vmnekf_ledger_tag; |
2543 | kr = vm_object_ownership_change( | |
d9a64523 | 2544 | object, |
cb323159 A |
2545 | vmne_kflags.vmnekf_ledger_tag, |
2546 | owner, /* new owner */ | |
2547 | ledger_flags, | |
d9a64523 | 2548 | FALSE); /* task_objq locked? */ |
fe8ab488 | 2549 | vm_object_unlock(object); |
cb323159 A |
2550 | if (kr != KERN_SUCCESS) { |
2551 | vm_object_deallocate(object); | |
2552 | goto make_mem_done; | |
2553 | } | |
91447636 | 2554 | } |
1c79356b | 2555 | |
39037602 A |
2556 | #if CONFIG_SECLUDED_MEMORY |
2557 | if (secluded_for_iokit && /* global boot-arg */ | |
2558 | ((permission & MAP_MEM_GRAB_SECLUDED) | |
2559 | #if 11 | |
0a7de745 A |
2560 | /* XXX FBDP for my testing only */ |
2561 | || (secluded_for_fbdp && map_size == 97550336) | |
39037602 | 2562 | #endif |
0a7de745 | 2563 | )) { |
39037602 A |
2564 | #if 11 |
2565 | if (!(permission & MAP_MEM_GRAB_SECLUDED) && | |
2566 | secluded_for_fbdp) { | |
2567 | printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size); | |
2568 | } | |
2569 | #endif | |
2570 | object->can_grab_secluded = TRUE; | |
2571 | assert(!object->eligible_for_secluded); | |
2572 | } | |
2573 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
2574 | ||
91447636 A |
2575 | /* |
2576 | * The VM object is brand new and nobody else knows about it, | |
2577 | * so we don't need to lock it. | |
2578 | */ | |
1c79356b | 2579 | |
91447636 | 2580 | wimg_mode = object->wimg_bits; |
5ba3f43e | 2581 | vm_prot_to_wimg(access, &wimg_mode); |
0a7de745 A |
2582 | if (access != MAP_MEM_NOOP) { |
2583 | object->wimg_bits = wimg_mode; | |
2584 | } | |
5ba3f43e | 2585 | |
91447636 | 2586 | /* the object has no pages, so no WIMG bits to update here */ |
1c79356b | 2587 | |
91447636 A |
2588 | user_entry->backing.object = object; |
2589 | user_entry->internal = TRUE; | |
2590 | user_entry->is_sub_map = FALSE; | |
91447636 | 2591 | user_entry->offset = 0; |
39236c6e | 2592 | user_entry->data_offset = 0; |
91447636 A |
2593 | user_entry->protection = protections; |
2594 | SET_MAP_MEM(access, user_entry->protection); | |
2595 | user_entry->size = map_size; | |
55e303ae A |
2596 | |
2597 | /* user_object pager and internal fields are not used */ | |
2598 | /* when the object field is filled in. */ | |
2599 | ||
3e170ce0 | 2600 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
0a7de745 | 2601 | user_entry->data_offset)); |
55e303ae A |
2602 | *object_handle = user_handle; |
2603 | return KERN_SUCCESS; | |
2604 | } | |
2605 | ||
39236c6e | 2606 | if (permission & MAP_MEM_VM_COPY) { |
0a7de745 | 2607 | vm_map_copy_t copy; |
39236c6e A |
2608 | |
2609 | if (target_map == VM_MAP_NULL) { | |
2610 | return KERN_INVALID_TASK; | |
2611 | } | |
2612 | ||
3e170ce0 A |
2613 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2614 | map_size = map_end - map_start; | |
2615 | if (use_data_addr || use_4K_compat) { | |
2616 | offset_in_page = offset - map_start; | |
0a7de745 | 2617 | if (use_4K_compat) { |
3e170ce0 | 2618 | offset_in_page &= ~((signed)(0xFFF)); |
0a7de745 | 2619 | } |
39236c6e | 2620 | } else { |
39236c6e A |
2621 | offset_in_page = 0; |
2622 | } | |
2623 | ||
4bd07ac2 | 2624 | kr = vm_map_copyin_internal(target_map, |
0a7de745 A |
2625 | map_start, |
2626 | map_size, | |
2627 | VM_MAP_COPYIN_ENTRY_LIST, | |
2628 | ©); | |
39236c6e A |
2629 | if (kr != KERN_SUCCESS) { |
2630 | return kr; | |
2631 | } | |
0a7de745 | 2632 | |
39236c6e A |
2633 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); |
2634 | if (kr != KERN_SUCCESS) { | |
2635 | vm_map_copy_discard(copy); | |
2636 | return KERN_FAILURE; | |
2637 | } | |
2638 | ||
2639 | user_entry->backing.copy = copy; | |
2640 | user_entry->internal = FALSE; | |
2641 | user_entry->is_sub_map = FALSE; | |
39236c6e A |
2642 | user_entry->is_copy = TRUE; |
2643 | user_entry->offset = 0; | |
2644 | user_entry->protection = protections; | |
2645 | user_entry->size = map_size; | |
2646 | user_entry->data_offset = offset_in_page; | |
2647 | ||
3e170ce0 | 2648 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
0a7de745 | 2649 | user_entry->data_offset)); |
39236c6e A |
2650 | *object_handle = user_handle; |
2651 | return KERN_SUCCESS; | |
2652 | } | |
2653 | ||
2654 | if (permission & MAP_MEM_VM_SHARE) { | |
0a7de745 A |
2655 | vm_map_copy_t copy; |
2656 | vm_prot_t cur_prot, max_prot; | |
39236c6e A |
2657 | |
2658 | if (target_map == VM_MAP_NULL) { | |
2659 | return KERN_INVALID_TASK; | |
2660 | } | |
2661 | ||
3e170ce0 A |
2662 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2663 | map_size = map_end - map_start; | |
2664 | if (use_data_addr || use_4K_compat) { | |
2665 | offset_in_page = offset - map_start; | |
0a7de745 | 2666 | if (use_4K_compat) { |
3e170ce0 | 2667 | offset_in_page &= ~((signed)(0xFFF)); |
0a7de745 | 2668 | } |
39236c6e | 2669 | } else { |
39236c6e A |
2670 | offset_in_page = 0; |
2671 | } | |
2672 | ||
39037602 | 2673 | cur_prot = VM_PROT_ALL; |
39236c6e | 2674 | kr = vm_map_copy_extract(target_map, |
0a7de745 A |
2675 | map_start, |
2676 | map_size, | |
2677 | ©, | |
2678 | &cur_prot, | |
2679 | &max_prot); | |
39236c6e A |
2680 | if (kr != KERN_SUCCESS) { |
2681 | return kr; | |
2682 | } | |
2683 | ||
2684 | if (mask_protections) { | |
2685 | /* | |
0a7de745 | 2686 | * We just want as much of "original_protections" |
39236c6e A |
2687 | * as we can get out of the actual "cur_prot". |
2688 | */ | |
2689 | protections &= cur_prot; | |
2690 | if (protections == VM_PROT_NONE) { | |
2691 | /* no access at all: fail */ | |
2692 | vm_map_copy_discard(copy); | |
2693 | return KERN_PROTECTION_FAILURE; | |
2694 | } | |
2695 | } else { | |
2696 | /* | |
2697 | * We want exactly "original_protections" | |
2698 | * out of "cur_prot". | |
2699 | */ | |
2700 | if ((cur_prot & protections) != protections) { | |
2701 | vm_map_copy_discard(copy); | |
2702 | return KERN_PROTECTION_FAILURE; | |
2703 | } | |
2704 | } | |
2705 | ||
2706 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
2707 | if (kr != KERN_SUCCESS) { | |
2708 | vm_map_copy_discard(copy); | |
2709 | return KERN_FAILURE; | |
2710 | } | |
2711 | ||
2712 | user_entry->backing.copy = copy; | |
2713 | user_entry->internal = FALSE; | |
2714 | user_entry->is_sub_map = FALSE; | |
39236c6e A |
2715 | user_entry->is_copy = TRUE; |
2716 | user_entry->offset = 0; | |
2717 | user_entry->protection = protections; | |
2718 | user_entry->size = map_size; | |
2719 | user_entry->data_offset = offset_in_page; | |
2720 | ||
3e170ce0 | 2721 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
0a7de745 | 2722 | user_entry->data_offset)); |
39236c6e A |
2723 | *object_handle = user_handle; |
2724 | return KERN_SUCCESS; | |
2725 | } | |
2726 | ||
91447636 A |
2727 | if (parent_entry == NULL || |
2728 | (permission & MAP_MEM_NAMED_REUSE)) { | |
3e170ce0 A |
2729 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2730 | map_size = map_end - map_start; | |
2731 | if (use_data_addr || use_4K_compat) { | |
2732 | offset_in_page = offset - map_start; | |
0a7de745 | 2733 | if (use_4K_compat) { |
3e170ce0 | 2734 | offset_in_page &= ~((signed)(0xFFF)); |
0a7de745 | 2735 | } |
39236c6e | 2736 | } else { |
39236c6e A |
2737 | offset_in_page = 0; |
2738 | } | |
2739 | ||
91447636 A |
2740 | /* Create a named object based on address range within the task map */ |
2741 | /* Go find the object at given address */ | |
1c79356b | 2742 | |
2d21ac55 A |
2743 | if (target_map == VM_MAP_NULL) { |
2744 | return KERN_INVALID_TASK; | |
2745 | } | |
2746 | ||
91447636 | 2747 | redo_lookup: |
6d2010ae | 2748 | protections = original_protections; |
1c79356b A |
2749 | vm_map_lock_read(target_map); |
2750 | ||
2751 | /* get the object associated with the target address */ | |
2752 | /* note we check the permission of the range against */ | |
2753 | /* that requested by the caller */ | |
2754 | ||
0a7de745 A |
2755 | kr = vm_map_lookup_locked(&target_map, map_start, |
2756 | protections | mask_protections, | |
2757 | OBJECT_LOCK_EXCLUSIVE, &version, | |
2758 | &object, &obj_off, &prot, &wired, | |
2759 | &fault_info, | |
2760 | &real_map); | |
1c79356b A |
2761 | if (kr != KERN_SUCCESS) { |
2762 | vm_map_unlock_read(target_map); | |
2763 | goto make_mem_done; | |
2764 | } | |
6d2010ae A |
2765 | if (mask_protections) { |
2766 | /* | |
2767 | * The caller asked us to use the "protections" as | |
2768 | * a mask, so restrict "protections" to what this | |
2769 | * mapping actually allows. | |
2770 | */ | |
2771 | protections &= prot; | |
2772 | } | |
5ba3f43e A |
2773 | #if CONFIG_EMBEDDED |
2774 | /* | |
2775 | * Wiring would copy the pages to a shadow object. | |
2776 | * The shadow object would not be code-signed so | |
2777 | * attempting to execute code from these copied pages | |
2778 | * would trigger a code-signing violation. | |
2779 | */ | |
2780 | if (prot & VM_PROT_EXECUTE) { | |
2781 | if (log_executable_mem_entry) { | |
2782 | void *bsd_info; | |
2783 | bsd_info = current_task()->bsd_info; | |
2784 | printf("pid %d[%s] making memory entry out of " | |
0a7de745 A |
2785 | "executable range from 0x%llx to 0x%llx:" |
2786 | "might cause code-signing issues " | |
2787 | "later\n", | |
2788 | proc_selfpid(), | |
2789 | (bsd_info != NULL | |
2790 | ? proc_name_address(bsd_info) | |
2791 | : "?"), | |
2792 | (uint64_t) map_start, | |
2793 | (uint64_t) map_end); | |
5ba3f43e A |
2794 | } |
2795 | DTRACE_VM2(cs_executable_mem_entry, | |
0a7de745 A |
2796 | uint64_t, (uint64_t)map_start, |
2797 | uint64_t, (uint64_t)map_end); | |
5ba3f43e A |
2798 | cs_executable_mem_entry++; |
2799 | ||
2800 | #if 11 | |
2801 | /* | |
2802 | * We don't know how the memory entry will be used. | |
2803 | * It might never get wired and might not cause any | |
2804 | * trouble, so let's not reject this request... | |
2805 | */ | |
2806 | #else /* 11 */ | |
2807 | kr = KERN_PROTECTION_FAILURE; | |
2808 | vm_object_unlock(object); | |
2809 | vm_map_unlock_read(target_map); | |
0a7de745 | 2810 | if (real_map != target_map) { |
5ba3f43e | 2811 | vm_map_unlock_read(real_map); |
0a7de745 | 2812 | } |
5ba3f43e A |
2813 | goto make_mem_done; |
2814 | #endif /* 11 */ | |
5ba3f43e A |
2815 | } |
2816 | #endif /* CONFIG_EMBEDDED */ | |
39037602 | 2817 | |
0a7de745 | 2818 | if (((prot & protections) != protections) |
39037602 | 2819 | || (object == kernel_object)) { |
1c79356b A |
2820 | kr = KERN_INVALID_RIGHT; |
2821 | vm_object_unlock(object); | |
2822 | vm_map_unlock_read(target_map); | |
0a7de745 | 2823 | if (real_map != target_map) { |
91447636 | 2824 | vm_map_unlock_read(real_map); |
0a7de745 A |
2825 | } |
2826 | if (object == kernel_object) { | |
9bccf70c | 2827 | printf("Warning: Attempt to create a named" |
0a7de745 | 2828 | " entry from the kernel_object\n"); |
9bccf70c | 2829 | } |
1c79356b A |
2830 | goto make_mem_done; |
2831 | } | |
2832 | ||
2833 | /* We have an object, now check to see if this object */ | |
2834 | /* is suitable. If not, create a shadow and share that */ | |
91447636 A |
2835 | |
2836 | /* | |
2837 | * We have to unlock the VM object to avoid deadlocking with | |
2838 | * a VM map lock (the lock ordering is map, the object), if we | |
2839 | * need to modify the VM map to create a shadow object. Since | |
2840 | * we might release the VM map lock below anyway, we have | |
2841 | * to release the VM map lock now. | |
2842 | * XXX FBDP There must be a way to avoid this double lookup... | |
2843 | * | |
2844 | * Take an extra reference on the VM object to make sure it's | |
2845 | * not going to disappear. | |
2846 | */ | |
2847 | vm_object_reference_locked(object); /* extra ref to hold obj */ | |
2848 | vm_object_unlock(object); | |
2849 | ||
9bccf70c | 2850 | local_map = original_map; |
3e170ce0 | 2851 | local_offset = map_start; |
0a7de745 | 2852 | if (target_map != local_map) { |
9bccf70c | 2853 | vm_map_unlock_read(target_map); |
0a7de745 | 2854 | if (real_map != target_map) { |
91447636 | 2855 | vm_map_unlock_read(real_map); |
0a7de745 | 2856 | } |
9bccf70c A |
2857 | vm_map_lock_read(local_map); |
2858 | target_map = local_map; | |
91447636 | 2859 | real_map = local_map; |
9bccf70c | 2860 | } |
0a7de745 A |
2861 | while (TRUE) { |
2862 | if (!vm_map_lookup_entry(local_map, | |
2863 | local_offset, &map_entry)) { | |
2864 | kr = KERN_INVALID_ARGUMENT; | |
2865 | vm_map_unlock_read(target_map); | |
2866 | if (real_map != target_map) { | |
2867 | vm_map_unlock_read(real_map); | |
2868 | } | |
2869 | vm_object_deallocate(object); /* release extra ref */ | |
2870 | object = VM_OBJECT_NULL; | |
2871 | goto make_mem_done; | |
2872 | } | |
2873 | iskernel = (local_map->pmap == kernel_pmap); | |
2874 | if (!(map_entry->is_sub_map)) { | |
2875 | if (VME_OBJECT(map_entry) != object) { | |
2876 | kr = KERN_INVALID_ARGUMENT; | |
2877 | vm_map_unlock_read(target_map); | |
2878 | if (real_map != target_map) { | |
2879 | vm_map_unlock_read(real_map); | |
2880 | } | |
2881 | vm_object_deallocate(object); /* release extra ref */ | |
2882 | object = VM_OBJECT_NULL; | |
2883 | goto make_mem_done; | |
2884 | } | |
2885 | break; | |
2886 | } else { | |
2887 | vm_map_t tmap; | |
2888 | tmap = local_map; | |
2889 | local_map = VME_SUBMAP(map_entry); | |
2890 | ||
2891 | vm_map_lock_read(local_map); | |
2892 | vm_map_unlock_read(tmap); | |
2893 | target_map = local_map; | |
2894 | real_map = local_map; | |
2895 | local_offset = local_offset - map_entry->vme_start; | |
2896 | local_offset += VME_OFFSET(map_entry); | |
2897 | } | |
1c79356b | 2898 | } |
91447636 | 2899 | |
d9a64523 A |
2900 | #if VM_NAMED_ENTRY_LIST |
2901 | alias = VME_ALIAS(map_entry); | |
2902 | #endif /* VM_NAMED_ENTRY_LIST */ | |
2903 | ||
91447636 A |
2904 | /* |
2905 | * We found the VM map entry, lock the VM object again. | |
2906 | */ | |
2907 | vm_object_lock(object); | |
0a7de745 A |
2908 | if (map_entry->wired_count) { |
2909 | /* JMM - The check below should be reworked instead. */ | |
2910 | object->true_share = TRUE; | |
2911 | } | |
6d2010ae A |
2912 | if (mask_protections) { |
2913 | /* | |
2914 | * The caller asked us to use the "protections" as | |
2915 | * a mask, so restrict "protections" to what this | |
2916 | * mapping actually allows. | |
2917 | */ | |
2918 | protections &= map_entry->max_protection; | |
2919 | } | |
0a7de745 A |
2920 | if (((map_entry->max_protection) & protections) != protections) { |
2921 | kr = KERN_INVALID_RIGHT; | |
2922 | vm_object_unlock(object); | |
2923 | vm_map_unlock_read(target_map); | |
2924 | if (real_map != target_map) { | |
91447636 | 2925 | vm_map_unlock_read(real_map); |
0a7de745 A |
2926 | } |
2927 | vm_object_deallocate(object); | |
2928 | object = VM_OBJECT_NULL; | |
2929 | goto make_mem_done; | |
1c79356b | 2930 | } |
9bccf70c | 2931 | |
2d21ac55 | 2932 | mappable_size = fault_info.hi_offset - obj_off; |
9bccf70c | 2933 | total_size = map_entry->vme_end - map_entry->vme_start; |
0a7de745 | 2934 | if (map_size > mappable_size) { |
9bccf70c A |
2935 | /* try to extend mappable size if the entries */ |
2936 | /* following are from the same object and are */ | |
2937 | /* compatible */ | |
2938 | next_entry = map_entry->vme_next; | |
2939 | /* lets see if the next map entry is still */ | |
2940 | /* pointing at this object and is contiguous */ | |
0a7de745 | 2941 | while (map_size > mappable_size) { |
3e170ce0 | 2942 | if ((VME_OBJECT(next_entry) == object) && |
0a7de745 A |
2943 | (next_entry->vme_start == |
2944 | next_entry->vme_prev->vme_end) && | |
2945 | (VME_OFFSET(next_entry) == | |
2946 | (VME_OFFSET(next_entry->vme_prev) + | |
2947 | (next_entry->vme_prev->vme_end - | |
2948 | next_entry->vme_prev->vme_start)))) { | |
6d2010ae A |
2949 | if (mask_protections) { |
2950 | /* | |
2951 | * The caller asked us to use | |
2952 | * the "protections" as a mask, | |
2953 | * so restrict "protections" to | |
2954 | * what this mapping actually | |
2955 | * allows. | |
2956 | */ | |
2957 | protections &= next_entry->max_protection; | |
2958 | } | |
316670eb A |
2959 | if ((next_entry->wired_count) && |
2960 | (map_entry->wired_count == 0)) { | |
2961 | break; | |
2962 | } | |
0a7de745 A |
2963 | if (((next_entry->max_protection) |
2964 | & protections) != protections) { | |
2965 | break; | |
9bccf70c | 2966 | } |
55e303ae | 2967 | if (next_entry->needs_copy != |
0a7de745 | 2968 | map_entry->needs_copy) { |
55e303ae | 2969 | break; |
0a7de745 | 2970 | } |
9bccf70c | 2971 | mappable_size += next_entry->vme_end |
0a7de745 | 2972 | - next_entry->vme_start; |
9bccf70c | 2973 | total_size += next_entry->vme_end |
0a7de745 | 2974 | - next_entry->vme_start; |
9bccf70c A |
2975 | next_entry = next_entry->vme_next; |
2976 | } else { | |
2977 | break; | |
2978 | } | |
9bccf70c A |
2979 | } |
2980 | } | |
2981 | ||
3e170ce0 | 2982 | /* vm_map_entry_should_cow_for_true_share() checks for malloc tags, |
0a7de745 | 2983 | * never true in kernel */ |
3e170ce0 | 2984 | if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) && |
e2d2fc5c A |
2985 | object->vo_size > map_size && |
2986 | map_size != 0) { | |
2987 | /* | |
2988 | * Set up the targeted range for copy-on-write to | |
2989 | * limit the impact of "true_share"/"copy_delay" to | |
2990 | * that range instead of the entire VM object... | |
2991 | */ | |
0a7de745 | 2992 | |
e2d2fc5c A |
2993 | vm_object_unlock(object); |
2994 | if (vm_map_lock_read_to_write(target_map)) { | |
2995 | vm_object_deallocate(object); | |
2996 | target_map = original_map; | |
2997 | goto redo_lookup; | |
2998 | } | |
2999 | ||
39236c6e | 3000 | vm_map_clip_start(target_map, |
0a7de745 A |
3001 | map_entry, |
3002 | vm_map_trunc_page(map_start, | |
3003 | VM_MAP_PAGE_MASK(target_map))); | |
39236c6e | 3004 | vm_map_clip_end(target_map, |
0a7de745 A |
3005 | map_entry, |
3006 | (vm_map_round_page(map_end, | |
3007 | VM_MAP_PAGE_MASK(target_map)))); | |
e2d2fc5c A |
3008 | force_shadow = TRUE; |
3009 | ||
fe8ab488 | 3010 | if ((map_entry->vme_end - offset) < map_size) { |
3e170ce0 | 3011 | map_size = map_entry->vme_end - map_start; |
fe8ab488 A |
3012 | } |
3013 | total_size = map_entry->vme_end - map_entry->vme_start; | |
e2d2fc5c A |
3014 | |
3015 | vm_map_lock_write_to_read(target_map); | |
3016 | vm_object_lock(object); | |
3017 | } | |
e2d2fc5c | 3018 | |
39236c6e | 3019 | if (object->internal) { |
0a7de745 A |
3020 | /* vm_map_lookup_locked will create a shadow if */ |
3021 | /* needs_copy is set but does not check for the */ | |
3022 | /* other two conditions shown. It is important to */ | |
1c79356b A |
3023 | /* set up an object which will not be pulled from */ |
3024 | /* under us. */ | |
3025 | ||
0a7de745 A |
3026 | if (force_shadow || |
3027 | ((map_entry->needs_copy || | |
3028 | object->shadowed || | |
3029 | (object->vo_size > total_size && | |
3030 | (VME_OFFSET(map_entry) != 0 || | |
3031 | object->vo_size > | |
3032 | vm_map_round_page(total_size, | |
3033 | VM_MAP_PAGE_MASK(target_map))))) | |
3034 | && !object->true_share | |
3035 | && object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC)) { | |
91447636 A |
3036 | /* |
3037 | * We have to unlock the VM object before | |
3038 | * trying to upgrade the VM map lock, to | |
3039 | * honor lock ordering (map then object). | |
3040 | * Otherwise, we would deadlock if another | |
3041 | * thread holds a read lock on the VM map and | |
3042 | * is trying to acquire the VM object's lock. | |
3043 | * We still hold an extra reference on the | |
3044 | * VM object, guaranteeing that it won't | |
3045 | * disappear. | |
3046 | */ | |
3047 | vm_object_unlock(object); | |
3048 | ||
0a7de745 | 3049 | if (vm_map_lock_read_to_write(target_map)) { |
91447636 A |
3050 | /* |
3051 | * We couldn't upgrade our VM map lock | |
3052 | * from "read" to "write" and we lost | |
3053 | * our "read" lock. | |
3054 | * Start all over again... | |
3055 | */ | |
3056 | vm_object_deallocate(object); /* extra ref */ | |
3057 | target_map = original_map; | |
0a7de745 A |
3058 | goto redo_lookup; |
3059 | } | |
fe8ab488 | 3060 | #if 00 |
91447636 | 3061 | vm_object_lock(object); |
fe8ab488 | 3062 | #endif |
1c79356b | 3063 | |
0a7de745 | 3064 | /* |
55e303ae A |
3065 | * JMM - We need to avoid coming here when the object |
3066 | * is wired by anybody, not just the current map. Why | |
3067 | * couldn't we use the standard vm_object_copy_quickly() | |
3068 | * approach here? | |
3069 | */ | |
0a7de745 A |
3070 | |
3071 | /* create a shadow object */ | |
3e170ce0 A |
3072 | VME_OBJECT_SHADOW(map_entry, total_size); |
3073 | shadow_object = VME_OBJECT(map_entry); | |
fe8ab488 | 3074 | #if 00 |
9bccf70c | 3075 | vm_object_unlock(object); |
fe8ab488 | 3076 | #endif |
91447636 | 3077 | |
0c530ab8 | 3078 | prot = map_entry->protection & ~VM_PROT_WRITE; |
2d21ac55 | 3079 | |
3e170ce0 | 3080 | if (override_nx(target_map, |
0a7de745 A |
3081 | VME_ALIAS(map_entry)) |
3082 | && prot) { | |
3083 | prot |= VM_PROT_EXECUTE; | |
3084 | } | |
2d21ac55 | 3085 | |
9bccf70c | 3086 | vm_object_pmap_protect( |
3e170ce0 | 3087 | object, VME_OFFSET(map_entry), |
9bccf70c | 3088 | total_size, |
0a7de745 A |
3089 | ((map_entry->is_shared |
3090 | || target_map->mapped_in_other_pmaps) | |
3091 | ? PMAP_NULL : | |
3092 | target_map->pmap), | |
9bccf70c | 3093 | map_entry->vme_start, |
0c530ab8 | 3094 | prot); |
0a7de745 A |
3095 | total_size -= (map_entry->vme_end |
3096 | - map_entry->vme_start); | |
9bccf70c A |
3097 | next_entry = map_entry->vme_next; |
3098 | map_entry->needs_copy = FALSE; | |
2d21ac55 A |
3099 | |
3100 | vm_object_lock(shadow_object); | |
9bccf70c | 3101 | while (total_size) { |
0a7de745 A |
3102 | assert((next_entry->wired_count == 0) || |
3103 | (map_entry->wired_count)); | |
3104 | ||
3105 | if (VME_OBJECT(next_entry) == object) { | |
3106 | vm_object_reference_locked(shadow_object); | |
3107 | VME_OBJECT_SET(next_entry, | |
3108 | shadow_object); | |
3109 | vm_object_deallocate(object); | |
3110 | VME_OFFSET_SET( | |
3111 | next_entry, | |
3112 | (VME_OFFSET(next_entry->vme_prev) + | |
3113 | (next_entry->vme_prev->vme_end | |
3114 | - next_entry->vme_prev->vme_start))); | |
3115 | next_entry->use_pmap = TRUE; | |
9bccf70c A |
3116 | next_entry->needs_copy = FALSE; |
3117 | } else { | |
3118 | panic("mach_make_memory_entry_64:" | |
0a7de745 | 3119 | " map entries out of sync\n"); |
9bccf70c | 3120 | } |
0a7de745 A |
3121 | total_size -= |
3122 | next_entry->vme_end | |
3123 | - next_entry->vme_start; | |
9bccf70c A |
3124 | next_entry = next_entry->vme_next; |
3125 | } | |
3126 | ||
91447636 A |
3127 | /* |
3128 | * Transfer our extra reference to the | |
3129 | * shadow object. | |
3130 | */ | |
3131 | vm_object_reference_locked(shadow_object); | |
3132 | vm_object_deallocate(object); /* extra ref */ | |
9bccf70c | 3133 | object = shadow_object; |
91447636 | 3134 | |
3e170ce0 | 3135 | obj_off = ((local_offset - map_entry->vme_start) |
0a7de745 | 3136 | + VME_OFFSET(map_entry)); |
1c79356b | 3137 | |
91447636 | 3138 | vm_map_lock_write_to_read(target_map); |
0a7de745 A |
3139 | } |
3140 | } | |
1c79356b A |
3141 | |
3142 | /* note: in the future we can (if necessary) allow for */ | |
3143 | /* memory object lists, this will better support */ | |
3144 | /* fragmentation, but is it necessary? The user should */ | |
3145 | /* be encouraged to create address space oriented */ | |
3146 | /* shared objects from CLEAN memory regions which have */ | |
3147 | /* a known and defined history. i.e. no inheritence */ | |
3148 | /* share, make this call before making the region the */ | |
3149 | /* target of ipc's, etc. The code above, protecting */ | |
3150 | /* against delayed copy, etc. is mostly defensive. */ | |
3151 | ||
55e303ae | 3152 | wimg_mode = object->wimg_bits; |
0a7de745 | 3153 | if (!(object->nophyscache)) { |
5ba3f43e | 3154 | vm_prot_to_wimg(access, &wimg_mode); |
0a7de745 | 3155 | } |
d7e50217 | 3156 | |
fe8ab488 A |
3157 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
3158 | if (!object->true_share && | |
3159 | vm_object_tracking_inited) { | |
3160 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
3161 | int num = 0; | |
3162 | ||
3163 | num = OSBacktrace(bt, | |
0a7de745 | 3164 | VM_OBJECT_TRACKING_BTDEPTH); |
fe8ab488 | 3165 | btlog_add_entry(vm_object_tracking_btlog, |
0a7de745 A |
3166 | object, |
3167 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
3168 | bt, | |
3169 | num); | |
fe8ab488 A |
3170 | } |
3171 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
3172 | ||
39037602 | 3173 | vm_object_lock_assert_exclusive(object); |
de355530 | 3174 | object->true_share = TRUE; |
0a7de745 | 3175 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
55e303ae | 3176 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; |
0a7de745 | 3177 | } |
55e303ae | 3178 | |
91447636 A |
3179 | /* |
3180 | * The memory entry now points to this VM object and we | |
3181 | * need to hold a reference on the VM object. Use the extra | |
3182 | * reference we took earlier to keep the object alive when we | |
3183 | * had to unlock it. | |
3184 | */ | |
3185 | ||
55e303ae | 3186 | vm_map_unlock_read(target_map); |
0a7de745 | 3187 | if (real_map != target_map) { |
91447636 | 3188 | vm_map_unlock_read(real_map); |
0a7de745 | 3189 | } |
55e303ae | 3190 | |
0a7de745 | 3191 | if (object->wimg_bits != wimg_mode) { |
6d2010ae | 3192 | vm_object_change_wimg_mode(object, wimg_mode); |
0a7de745 | 3193 | } |
1c79356b A |
3194 | |
3195 | /* the size of mapped entry that overlaps with our region */ | |
3196 | /* which is targeted for share. */ | |
3197 | /* (entry_end - entry_start) - */ | |
3198 | /* offset of our beg addr within entry */ | |
3199 | /* it corresponds to this: */ | |
3200 | ||
0a7de745 | 3201 | if (map_size > mappable_size) { |
91447636 | 3202 | map_size = mappable_size; |
0a7de745 | 3203 | } |
91447636 A |
3204 | |
3205 | if (permission & MAP_MEM_NAMED_REUSE) { | |
3206 | /* | |
3207 | * Compare what we got with the "parent_entry". | |
3208 | * If they match, re-use the "parent_entry" instead | |
3209 | * of creating a new one. | |
3210 | */ | |
3211 | if (parent_entry != NULL && | |
3212 | parent_entry->backing.object == object && | |
3213 | parent_entry->internal == object->internal && | |
3214 | parent_entry->is_sub_map == FALSE && | |
91447636 A |
3215 | parent_entry->offset == obj_off && |
3216 | parent_entry->protection == protections && | |
39236c6e | 3217 | parent_entry->size == map_size && |
3e170ce0 | 3218 | ((!(use_data_addr || use_4K_compat) && |
0a7de745 A |
3219 | (parent_entry->data_offset == 0)) || |
3220 | ((use_data_addr || use_4K_compat) && | |
3221 | (parent_entry->data_offset == offset_in_page)))) { | |
91447636 A |
3222 | /* |
3223 | * We have a match: re-use "parent_entry". | |
3224 | */ | |
3225 | /* release our extra reference on object */ | |
3226 | vm_object_unlock(object); | |
3227 | vm_object_deallocate(object); | |
3228 | /* parent_entry->ref_count++; XXX ? */ | |
3229 | /* Get an extra send-right on handle */ | |
3230 | ipc_port_copy_send(parent_handle); | |
fe8ab488 | 3231 | |
3e170ce0 | 3232 | *size = CAST_DOWN(vm_size_t, |
0a7de745 A |
3233 | (parent_entry->size - |
3234 | parent_entry->data_offset)); | |
91447636 A |
3235 | *object_handle = parent_handle; |
3236 | return KERN_SUCCESS; | |
3237 | } else { | |
3238 | /* | |
3239 | * No match: we need to create a new entry. | |
3240 | * fall through... | |
3241 | */ | |
3242 | } | |
3243 | } | |
3244 | ||
3245 | vm_object_unlock(object); | |
3246 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
3247 | != KERN_SUCCESS) { | |
3248 | /* release our unused reference on the object */ | |
3249 | vm_object_deallocate(object); | |
3250 | return KERN_FAILURE; | |
3251 | } | |
1c79356b | 3252 | |
91447636 A |
3253 | user_entry->backing.object = object; |
3254 | user_entry->internal = object->internal; | |
3255 | user_entry->is_sub_map = FALSE; | |
91447636 | 3256 | user_entry->offset = obj_off; |
39236c6e | 3257 | user_entry->data_offset = offset_in_page; |
6d2010ae A |
3258 | user_entry->protection = protections; |
3259 | SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); | |
91447636 | 3260 | user_entry->size = map_size; |
d9a64523 A |
3261 | #if VM_NAMED_ENTRY_LIST |
3262 | user_entry->named_entry_alias = alias; | |
3263 | #endif /* VM_NAMED_ENTRY_LIST */ | |
1c79356b A |
3264 | |
3265 | /* user_object pager and internal fields are not used */ | |
3266 | /* when the object field is filled in. */ | |
3267 | ||
3e170ce0 | 3268 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
0a7de745 | 3269 | user_entry->data_offset)); |
1c79356b | 3270 | *object_handle = user_handle; |
1c79356b | 3271 | return KERN_SUCCESS; |
91447636 | 3272 | } else { |
1c79356b | 3273 | /* The new object will be base on an existing named object */ |
91447636 | 3274 | if (parent_entry == NULL) { |
1c79356b A |
3275 | kr = KERN_INVALID_ARGUMENT; |
3276 | goto make_mem_done; | |
3277 | } | |
39236c6e | 3278 | |
3e170ce0 | 3279 | if (use_data_addr || use_4K_compat) { |
39236c6e A |
3280 | /* |
3281 | * submaps and pagers should only be accessible from within | |
3282 | * the kernel, which shouldn't use the data address flag, so can fail here. | |
3283 | */ | |
5ba3f43e A |
3284 | if (parent_entry->is_sub_map) { |
3285 | panic("Shouldn't be using data address with a parent entry that is a submap."); | |
39236c6e A |
3286 | } |
3287 | /* | |
3288 | * Account for offset to data in parent entry and | |
3289 | * compute our own offset to data. | |
3290 | */ | |
0a7de745 | 3291 | if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { |
39236c6e A |
3292 | kr = KERN_INVALID_ARGUMENT; |
3293 | goto make_mem_done; | |
3294 | } | |
3295 | ||
3e170ce0 A |
3296 | map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); |
3297 | offset_in_page = (offset + parent_entry->data_offset) - map_start; | |
0a7de745 | 3298 | if (use_4K_compat) { |
3e170ce0 | 3299 | offset_in_page &= ~((signed)(0xFFF)); |
0a7de745 | 3300 | } |
3e170ce0 A |
3301 | map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); |
3302 | map_size = map_end - map_start; | |
39236c6e | 3303 | } else { |
3e170ce0 A |
3304 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
3305 | map_size = map_end - map_start; | |
39236c6e A |
3306 | offset_in_page = 0; |
3307 | ||
0a7de745 | 3308 | if ((offset + map_size) > parent_entry->size) { |
39236c6e A |
3309 | kr = KERN_INVALID_ARGUMENT; |
3310 | goto make_mem_done; | |
3311 | } | |
1c79356b A |
3312 | } |
3313 | ||
6d2010ae A |
3314 | if (mask_protections) { |
3315 | /* | |
3316 | * The caller asked us to use the "protections" as | |
3317 | * a mask, so restrict "protections" to what this | |
3318 | * mapping actually allows. | |
3319 | */ | |
3320 | protections &= parent_entry->protection; | |
3321 | } | |
0a7de745 | 3322 | if ((protections & parent_entry->protection) != protections) { |
91447636 A |
3323 | kr = KERN_PROTECTION_FAILURE; |
3324 | goto make_mem_done; | |
3325 | } | |
3326 | ||
3327 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
3328 | != KERN_SUCCESS) { | |
3329 | kr = KERN_FAILURE; | |
3330 | goto make_mem_done; | |
55e303ae | 3331 | } |
91447636 A |
3332 | |
3333 | user_entry->size = map_size; | |
3e170ce0 | 3334 | user_entry->offset = parent_entry->offset + map_start; |
0a7de745 | 3335 | user_entry->data_offset = offset_in_page; |
91447636 | 3336 | user_entry->is_sub_map = parent_entry->is_sub_map; |
39236c6e | 3337 | user_entry->is_copy = parent_entry->is_copy; |
91447636 A |
3338 | user_entry->internal = parent_entry->internal; |
3339 | user_entry->protection = protections; | |
3340 | ||
0a7de745 A |
3341 | if (access != MAP_MEM_NOOP) { |
3342 | SET_MAP_MEM(access, user_entry->protection); | |
1c79356b | 3343 | } |
91447636 | 3344 | |
0a7de745 | 3345 | if (parent_entry->is_sub_map) { |
cb323159 A |
3346 | vm_map_t map = parent_entry->backing.map; |
3347 | user_entry->backing.map = map; | |
3348 | lck_mtx_lock(&map->s_lock); | |
3349 | os_ref_retain_locked(&map->map_refcnt); | |
3350 | lck_mtx_unlock(&map->s_lock); | |
91447636 | 3351 | } else { |
0a7de745 A |
3352 | object = parent_entry->backing.object; |
3353 | assert(object != VM_OBJECT_NULL); | |
3354 | user_entry->backing.object = object; | |
3355 | /* we now point to this object, hold on */ | |
3356 | vm_object_lock(object); | |
3357 | vm_object_reference_locked(object); | |
fe8ab488 | 3358 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
0a7de745 A |
3359 | if (!object->true_share && |
3360 | vm_object_tracking_inited) { | |
3361 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
3362 | int num = 0; | |
3363 | ||
3364 | num = OSBacktrace(bt, | |
3365 | VM_OBJECT_TRACKING_BTDEPTH); | |
3366 | btlog_add_entry(vm_object_tracking_btlog, | |
3367 | object, | |
3368 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
3369 | bt, | |
3370 | num); | |
3371 | } | |
fe8ab488 A |
3372 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ |
3373 | ||
0a7de745 A |
3374 | object->true_share = TRUE; |
3375 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { | |
3376 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
3377 | } | |
3378 | vm_object_unlock(object); | |
1c79356b | 3379 | } |
3e170ce0 | 3380 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
0a7de745 | 3381 | user_entry->data_offset)); |
1c79356b A |
3382 | *object_handle = user_handle; |
3383 | return KERN_SUCCESS; | |
3384 | } | |
3385 | ||
1c79356b | 3386 | make_mem_done: |
91447636 | 3387 | if (user_handle != IP_NULL) { |
0b4c1975 A |
3388 | /* |
3389 | * Releasing "user_handle" causes the kernel object | |
3390 | * associated with it ("user_entry" here) to also be | |
3391 | * released and freed. | |
3392 | */ | |
3393 | mach_memory_entry_port_release(user_handle); | |
91447636 A |
3394 | } |
3395 | return kr; | |
3396 | } | |
3397 | ||
3398 | kern_return_t | |
3399 | _mach_make_memory_entry( | |
0a7de745 A |
3400 | vm_map_t target_map, |
3401 | memory_object_size_t *size, | |
3402 | memory_object_offset_t offset, | |
3403 | vm_prot_t permission, | |
3404 | ipc_port_t *object_handle, | |
3405 | ipc_port_t parent_entry) | |
91447636 | 3406 | { |
0a7de745 A |
3407 | memory_object_size_t mo_size; |
3408 | kern_return_t kr; | |
3409 | ||
2d21ac55 | 3410 | mo_size = (memory_object_size_t)*size; |
0a7de745 A |
3411 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
3412 | (memory_object_offset_t)offset, permission, object_handle, | |
3413 | parent_entry); | |
91447636 | 3414 | *size = mo_size; |
1c79356b A |
3415 | return kr; |
3416 | } | |
3417 | ||
3418 | kern_return_t | |
3419 | mach_make_memory_entry( | |
0a7de745 A |
3420 | vm_map_t target_map, |
3421 | vm_size_t *size, | |
3422 | vm_offset_t offset, | |
3423 | vm_prot_t permission, | |
3424 | ipc_port_t *object_handle, | |
3425 | ipc_port_t parent_entry) | |
3426 | { | |
3427 | memory_object_size_t mo_size; | |
3428 | kern_return_t kr; | |
3429 | ||
2d21ac55 | 3430 | mo_size = (memory_object_size_t)*size; |
0a7de745 A |
3431 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
3432 | (memory_object_offset_t)offset, permission, object_handle, | |
3433 | parent_entry); | |
91447636 | 3434 | *size = CAST_DOWN(vm_size_t, mo_size); |
1c79356b A |
3435 | return kr; |
3436 | } | |
3437 | ||
3438 | /* | |
91447636 A |
3439 | * task_wire |
3440 | * | |
3441 | * Set or clear the map's wiring_required flag. This flag, if set, | |
3442 | * will cause all future virtual memory allocation to allocate | |
3443 | * user wired memory. Unwiring pages wired down as a result of | |
3444 | * this routine is done with the vm_wire interface. | |
1c79356b | 3445 | */ |
1c79356b | 3446 | kern_return_t |
91447636 | 3447 | task_wire( |
0a7de745 A |
3448 | vm_map_t map, |
3449 | boolean_t must_wire) | |
91447636 | 3450 | { |
0a7de745 A |
3451 | if (map == VM_MAP_NULL) { |
3452 | return KERN_INVALID_ARGUMENT; | |
3453 | } | |
91447636 | 3454 | |
d9a64523 A |
3455 | vm_map_lock(map); |
3456 | map->wiring_required = (must_wire == TRUE); | |
3457 | vm_map_unlock(map); | |
91447636 | 3458 | |
0a7de745 | 3459 | return KERN_SUCCESS; |
91447636 A |
3460 | } |
3461 | ||
a39ff7e2 A |
3462 | kern_return_t |
3463 | vm_map_exec_lockdown( | |
0a7de745 | 3464 | vm_map_t map) |
a39ff7e2 | 3465 | { |
0a7de745 A |
3466 | if (map == VM_MAP_NULL) { |
3467 | return KERN_INVALID_ARGUMENT; | |
3468 | } | |
a39ff7e2 A |
3469 | |
3470 | vm_map_lock(map); | |
3471 | map->map_disallow_new_exec = TRUE; | |
3472 | vm_map_unlock(map); | |
3473 | ||
0a7de745 | 3474 | return KERN_SUCCESS; |
a39ff7e2 A |
3475 | } |
3476 | ||
d9a64523 | 3477 | #if VM_NAMED_ENTRY_LIST |
0a7de745 A |
3478 | queue_head_t vm_named_entry_list; |
3479 | int vm_named_entry_count = 0; | |
3480 | lck_mtx_t vm_named_entry_list_lock_data; | |
3481 | lck_mtx_ext_t vm_named_entry_list_lock_data_ext; | |
d9a64523 A |
3482 | #endif /* VM_NAMED_ENTRY_LIST */ |
3483 | ||
3484 | void vm_named_entry_init(void); | |
3485 | void | |
3486 | vm_named_entry_init(void) | |
3487 | { | |
3488 | #if VM_NAMED_ENTRY_LIST | |
3489 | queue_init(&vm_named_entry_list); | |
3490 | vm_named_entry_count = 0; | |
3491 | lck_mtx_init_ext(&vm_named_entry_list_lock_data, | |
0a7de745 A |
3492 | &vm_named_entry_list_lock_data_ext, |
3493 | &vm_object_lck_grp, | |
3494 | &vm_object_lck_attr); | |
d9a64523 A |
3495 | #endif /* VM_NAMED_ENTRY_LIST */ |
3496 | } | |
3497 | ||
91447636 A |
3498 | __private_extern__ kern_return_t |
3499 | mach_memory_entry_allocate( | |
0a7de745 A |
3500 | vm_named_entry_t *user_entry_p, |
3501 | ipc_port_t *user_handle_p) | |
1c79356b | 3502 | { |
0a7de745 A |
3503 | vm_named_entry_t user_entry; |
3504 | ipc_port_t user_handle; | |
1c79356b | 3505 | |
91447636 | 3506 | user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); |
0a7de745 | 3507 | if (user_entry == NULL) { |
1c79356b | 3508 | return KERN_FAILURE; |
0a7de745 A |
3509 | } |
3510 | bzero(user_entry, sizeof(*user_entry)); | |
1c79356b | 3511 | |
91447636 | 3512 | named_entry_lock_init(user_entry); |
1c79356b | 3513 | |
5ba3f43e | 3514 | user_entry->backing.object = NULL; |
91447636 | 3515 | user_entry->is_sub_map = FALSE; |
39236c6e | 3516 | user_entry->is_copy = FALSE; |
91447636 | 3517 | user_entry->internal = FALSE; |
2d21ac55 A |
3518 | user_entry->size = 0; |
3519 | user_entry->offset = 0; | |
39236c6e | 3520 | user_entry->data_offset = 0; |
2d21ac55 | 3521 | user_entry->protection = VM_PROT_NONE; |
91447636 | 3522 | user_entry->ref_count = 1; |
1c79356b | 3523 | |
cb323159 A |
3524 | user_handle = ipc_kobject_alloc_port((ipc_kobject_t)user_entry, |
3525 | IKOT_NAMED_ENTRY, | |
3526 | IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); | |
1c79356b | 3527 | |
91447636 A |
3528 | *user_entry_p = user_entry; |
3529 | *user_handle_p = user_handle; | |
1c79356b | 3530 | |
d9a64523 A |
3531 | #if VM_NAMED_ENTRY_LIST |
3532 | /* keep a loose (no reference) pointer to the Mach port, for debugging only */ | |
3533 | user_entry->named_entry_port = user_handle; | |
3534 | /* backtrace at allocation time, for debugging only */ | |
3535 | OSBacktrace(&user_entry->named_entry_bt[0], | |
0a7de745 | 3536 | NAMED_ENTRY_BT_DEPTH); |
d9a64523 A |
3537 | |
3538 | /* add this new named entry to the global list */ | |
3539 | lck_mtx_lock_spin(&vm_named_entry_list_lock_data); | |
3540 | queue_enter(&vm_named_entry_list, user_entry, | |
0a7de745 | 3541 | vm_named_entry_t, named_entry_list); |
d9a64523 A |
3542 | vm_named_entry_count++; |
3543 | lck_mtx_unlock(&vm_named_entry_list_lock_data); | |
3544 | #endif /* VM_NAMED_ENTRY_LIST */ | |
3545 | ||
91447636 A |
3546 | return KERN_SUCCESS; |
3547 | } | |
1c79356b | 3548 | |
91447636 A |
3549 | /* |
3550 | * mach_memory_object_memory_entry_64 | |
3551 | * | |
3552 | * Create a named entry backed by the provided pager. | |
3553 | * | |
91447636 A |
3554 | */ |
3555 | kern_return_t | |
3556 | mach_memory_object_memory_entry_64( | |
0a7de745 A |
3557 | host_t host, |
3558 | boolean_t internal, | |
3559 | vm_object_offset_t size, | |
3560 | vm_prot_t permission, | |
3561 | memory_object_t pager, | |
3562 | ipc_port_t *entry_handle) | |
91447636 | 3563 | { |
0a7de745 A |
3564 | unsigned int access; |
3565 | vm_named_entry_t user_entry; | |
3566 | ipc_port_t user_handle; | |
3567 | vm_object_t object; | |
91447636 | 3568 | |
0a7de745 A |
3569 | if (host == HOST_NULL) { |
3570 | return KERN_INVALID_HOST; | |
3571 | } | |
91447636 | 3572 | |
5ba3f43e A |
3573 | if (pager == MEMORY_OBJECT_NULL && internal) { |
3574 | object = vm_object_allocate(size); | |
5c9f4661 A |
3575 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { |
3576 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
3577 | } | |
5ba3f43e A |
3578 | } else { |
3579 | object = memory_object_to_vm_object(pager); | |
3580 | if (object != VM_OBJECT_NULL) { | |
3581 | vm_object_reference(object); | |
3582 | } | |
3583 | } | |
3584 | if (object == VM_OBJECT_NULL) { | |
3585 | return KERN_INVALID_ARGUMENT; | |
3586 | } | |
3587 | ||
91447636 A |
3588 | if (mach_memory_entry_allocate(&user_entry, &user_handle) |
3589 | != KERN_SUCCESS) { | |
5ba3f43e | 3590 | vm_object_deallocate(object); |
91447636 A |
3591 | return KERN_FAILURE; |
3592 | } | |
3593 | ||
91447636 A |
3594 | user_entry->size = size; |
3595 | user_entry->offset = 0; | |
3596 | user_entry->protection = permission & VM_PROT_ALL; | |
3597 | access = GET_MAP_MEM(permission); | |
3598 | SET_MAP_MEM(access, user_entry->protection); | |
91447636 | 3599 | user_entry->is_sub_map = FALSE; |
91447636 A |
3600 | assert(user_entry->ref_count == 1); |
3601 | ||
5ba3f43e A |
3602 | user_entry->backing.object = object; |
3603 | user_entry->internal = object->internal; | |
3604 | assert(object->internal == internal); | |
3605 | ||
91447636 | 3606 | *entry_handle = user_handle; |
1c79356b | 3607 | return KERN_SUCCESS; |
5ba3f43e | 3608 | } |
91447636 A |
3609 | |
3610 | kern_return_t | |
3611 | mach_memory_object_memory_entry( | |
0a7de745 A |
3612 | host_t host, |
3613 | boolean_t internal, | |
3614 | vm_size_t size, | |
3615 | vm_prot_t permission, | |
3616 | memory_object_t pager, | |
3617 | ipc_port_t *entry_handle) | |
91447636 | 3618 | { |
0a7de745 A |
3619 | return mach_memory_object_memory_entry_64( host, internal, |
3620 | (vm_object_offset_t)size, permission, pager, entry_handle); | |
91447636 A |
3621 | } |
3622 | ||
3623 | ||
3624 | kern_return_t | |
3625 | mach_memory_entry_purgable_control( | |
0a7de745 A |
3626 | ipc_port_t entry_port, |
3627 | vm_purgable_t control, | |
3628 | int *state) | |
5ba3f43e A |
3629 | { |
3630 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
3631 | /* not allowed from user-space */ | |
3632 | return KERN_INVALID_ARGUMENT; | |
3633 | } | |
3634 | ||
3635 | return memory_entry_purgeable_control_internal(entry_port, control, state); | |
3636 | } | |
3637 | ||
3638 | kern_return_t | |
3639 | memory_entry_purgeable_control_internal( | |
0a7de745 A |
3640 | ipc_port_t entry_port, |
3641 | vm_purgable_t control, | |
3642 | int *state) | |
91447636 | 3643 | { |
0a7de745 A |
3644 | kern_return_t kr; |
3645 | vm_named_entry_t mem_entry; | |
3646 | vm_object_t object; | |
1c79356b | 3647 | |
d9a64523 | 3648 | if (!IP_VALID(entry_port) || |
91447636 A |
3649 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { |
3650 | return KERN_INVALID_ARGUMENT; | |
3651 | } | |
2d21ac55 | 3652 | if (control != VM_PURGABLE_SET_STATE && |
5ba3f43e | 3653 | control != VM_PURGABLE_GET_STATE && |
0a7de745 A |
3654 | control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { |
3655 | return KERN_INVALID_ARGUMENT; | |
3656 | } | |
2d21ac55 | 3657 | |
5ba3f43e | 3658 | if ((control == VM_PURGABLE_SET_STATE || |
0a7de745 | 3659 | control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && |
b0d623f7 | 3660 | (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || |
0a7de745 A |
3661 | ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { |
3662 | return KERN_INVALID_ARGUMENT; | |
3663 | } | |
1c79356b | 3664 | |
ea3f0419 | 3665 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
1c79356b | 3666 | |
91447636 | 3667 | named_entry_lock(mem_entry); |
1c79356b | 3668 | |
39236c6e | 3669 | if (mem_entry->is_sub_map || |
39236c6e | 3670 | mem_entry->is_copy) { |
91447636 | 3671 | named_entry_unlock(mem_entry); |
1c79356b A |
3672 | return KERN_INVALID_ARGUMENT; |
3673 | } | |
91447636 A |
3674 | |
3675 | object = mem_entry->backing.object; | |
3676 | if (object == VM_OBJECT_NULL) { | |
3677 | named_entry_unlock(mem_entry); | |
1c79356b A |
3678 | return KERN_INVALID_ARGUMENT; |
3679 | } | |
91447636 A |
3680 | |
3681 | vm_object_lock(object); | |
3682 | ||
3683 | /* check that named entry covers entire object ? */ | |
6d2010ae | 3684 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { |
91447636 A |
3685 | vm_object_unlock(object); |
3686 | named_entry_unlock(mem_entry); | |
3687 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 3688 | } |
91447636 A |
3689 | |
3690 | named_entry_unlock(mem_entry); | |
3691 | ||
3692 | kr = vm_object_purgable_control(object, control, state); | |
3693 | ||
3694 | vm_object_unlock(object); | |
3695 | ||
3696 | return kr; | |
1c79356b A |
3697 | } |
3698 | ||
d9a64523 A |
3699 | kern_return_t |
3700 | mach_memory_entry_access_tracking( | |
0a7de745 A |
3701 | ipc_port_t entry_port, |
3702 | int *access_tracking, | |
3703 | uint32_t *access_tracking_reads, | |
3704 | uint32_t *access_tracking_writes) | |
d9a64523 A |
3705 | { |
3706 | return memory_entry_access_tracking_internal(entry_port, | |
0a7de745 A |
3707 | access_tracking, |
3708 | access_tracking_reads, | |
3709 | access_tracking_writes); | |
d9a64523 A |
3710 | } |
3711 | ||
3712 | kern_return_t | |
3713 | memory_entry_access_tracking_internal( | |
0a7de745 A |
3714 | ipc_port_t entry_port, |
3715 | int *access_tracking, | |
3716 | uint32_t *access_tracking_reads, | |
3717 | uint32_t *access_tracking_writes) | |
d9a64523 | 3718 | { |
0a7de745 A |
3719 | vm_named_entry_t mem_entry; |
3720 | vm_object_t object; | |
3721 | kern_return_t kr; | |
d9a64523 A |
3722 | |
3723 | if (!IP_VALID(entry_port) || | |
3724 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3725 | return KERN_INVALID_ARGUMENT; | |
3726 | } | |
3727 | ||
ea3f0419 | 3728 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
d9a64523 A |
3729 | |
3730 | named_entry_lock(mem_entry); | |
3731 | ||
3732 | if (mem_entry->is_sub_map || | |
3733 | mem_entry->is_copy) { | |
3734 | named_entry_unlock(mem_entry); | |
3735 | return KERN_INVALID_ARGUMENT; | |
3736 | } | |
3737 | ||
3738 | object = mem_entry->backing.object; | |
3739 | if (object == VM_OBJECT_NULL) { | |
3740 | named_entry_unlock(mem_entry); | |
3741 | return KERN_INVALID_ARGUMENT; | |
3742 | } | |
3743 | ||
3744 | #if VM_OBJECT_ACCESS_TRACKING | |
3745 | vm_object_access_tracking(object, | |
0a7de745 A |
3746 | access_tracking, |
3747 | access_tracking_reads, | |
3748 | access_tracking_writes); | |
d9a64523 A |
3749 | kr = KERN_SUCCESS; |
3750 | #else /* VM_OBJECT_ACCESS_TRACKING */ | |
3751 | (void) access_tracking; | |
3752 | (void) access_tracking_reads; | |
3753 | (void) access_tracking_writes; | |
3754 | kr = KERN_NOT_SUPPORTED; | |
3755 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
3756 | ||
3757 | named_entry_unlock(mem_entry); | |
3758 | ||
3759 | return kr; | |
3760 | } | |
3761 | ||
cb323159 A |
3762 | kern_return_t |
3763 | mach_memory_entry_ownership( | |
3764 | ipc_port_t entry_port, | |
3765 | task_t owner, | |
3766 | int ledger_tag, | |
3767 | int ledger_flags) | |
3768 | { | |
3769 | task_t cur_task; | |
3770 | kern_return_t kr; | |
3771 | vm_named_entry_t mem_entry; | |
3772 | vm_object_t object; | |
3773 | ||
3774 | cur_task = current_task(); | |
3775 | if (cur_task != kernel_task && | |
3776 | (owner != cur_task || | |
3777 | (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) || | |
3778 | ledger_tag == VM_LEDGER_TAG_NETWORK)) { | |
3779 | /* | |
3780 | * An entitlement is required to: | |
3781 | * + tranfer memory ownership to someone else, | |
3782 | * + request that the memory not count against the footprint, | |
3783 | * + tag as "network" (since that implies "no footprint") | |
3784 | */ | |
3785 | if (!cur_task->task_can_transfer_memory_ownership && | |
3786 | IOTaskHasEntitlement(cur_task, | |
3787 | "com.apple.private.memory.ownership_transfer")) { | |
3788 | cur_task->task_can_transfer_memory_ownership = TRUE; | |
3789 | } | |
3790 | if (!cur_task->task_can_transfer_memory_ownership) { | |
3791 | return KERN_NO_ACCESS; | |
3792 | } | |
3793 | } | |
3794 | ||
3795 | if (ledger_flags & ~VM_LEDGER_FLAGS) { | |
3796 | return KERN_INVALID_ARGUMENT; | |
3797 | } | |
3798 | if (ledger_tag <= 0 || | |
3799 | ledger_tag > VM_LEDGER_TAG_MAX) { | |
3800 | return KERN_INVALID_ARGUMENT; | |
3801 | } | |
3802 | ||
3803 | if (!IP_VALID(entry_port) || | |
3804 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3805 | return KERN_INVALID_ARGUMENT; | |
3806 | } | |
ea3f0419 | 3807 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
cb323159 A |
3808 | |
3809 | named_entry_lock(mem_entry); | |
3810 | ||
3811 | if (mem_entry->is_sub_map || | |
3812 | mem_entry->is_copy) { | |
3813 | named_entry_unlock(mem_entry); | |
3814 | return KERN_INVALID_ARGUMENT; | |
3815 | } | |
3816 | ||
3817 | object = mem_entry->backing.object; | |
3818 | if (object == VM_OBJECT_NULL) { | |
3819 | named_entry_unlock(mem_entry); | |
3820 | return KERN_INVALID_ARGUMENT; | |
3821 | } | |
3822 | ||
3823 | vm_object_lock(object); | |
3824 | ||
3825 | /* check that named entry covers entire object ? */ | |
3826 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { | |
3827 | vm_object_unlock(object); | |
3828 | named_entry_unlock(mem_entry); | |
3829 | return KERN_INVALID_ARGUMENT; | |
3830 | } | |
3831 | ||
3832 | named_entry_unlock(mem_entry); | |
3833 | ||
3834 | kr = vm_object_ownership_change(object, | |
3835 | ledger_tag, | |
3836 | owner, | |
3837 | ledger_flags, | |
3838 | FALSE); /* task_objq_locked */ | |
3839 | vm_object_unlock(object); | |
3840 | ||
3841 | return kr; | |
3842 | } | |
3843 | ||
39236c6e A |
3844 | kern_return_t |
3845 | mach_memory_entry_get_page_counts( | |
0a7de745 A |
3846 | ipc_port_t entry_port, |
3847 | unsigned int *resident_page_count, | |
3848 | unsigned int *dirty_page_count) | |
39236c6e | 3849 | { |
0a7de745 A |
3850 | kern_return_t kr; |
3851 | vm_named_entry_t mem_entry; | |
3852 | vm_object_t object; | |
3853 | vm_object_offset_t offset; | |
3854 | vm_object_size_t size; | |
39236c6e | 3855 | |
d9a64523 | 3856 | if (!IP_VALID(entry_port) || |
39236c6e A |
3857 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { |
3858 | return KERN_INVALID_ARGUMENT; | |
3859 | } | |
3860 | ||
ea3f0419 | 3861 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
39236c6e A |
3862 | |
3863 | named_entry_lock(mem_entry); | |
3864 | ||
3865 | if (mem_entry->is_sub_map || | |
39236c6e A |
3866 | mem_entry->is_copy) { |
3867 | named_entry_unlock(mem_entry); | |
3868 | return KERN_INVALID_ARGUMENT; | |
3869 | } | |
3870 | ||
3871 | object = mem_entry->backing.object; | |
3872 | if (object == VM_OBJECT_NULL) { | |
3873 | named_entry_unlock(mem_entry); | |
3874 | return KERN_INVALID_ARGUMENT; | |
3875 | } | |
3876 | ||
3877 | vm_object_lock(object); | |
3878 | ||
3879 | offset = mem_entry->offset; | |
3880 | size = mem_entry->size; | |
3881 | ||
3882 | named_entry_unlock(mem_entry); | |
3883 | ||
3884 | kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count); | |
3885 | ||
3886 | vm_object_unlock(object); | |
3887 | ||
3888 | return kr; | |
3889 | } | |
3890 | ||
91447636 A |
3891 | /* |
3892 | * mach_memory_entry_port_release: | |
3893 | * | |
3894 | * Release a send right on a named entry port. This is the correct | |
3895 | * way to destroy a named entry. When the last right on the port is | |
3896 | * released, ipc_kobject_destroy() will call mach_destroy_memory_entry(). | |
3897 | */ | |
3898 | void | |
3899 | mach_memory_entry_port_release( | |
0a7de745 | 3900 | ipc_port_t port) |
91447636 A |
3901 | { |
3902 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3903 | ipc_port_release_send(port); | |
3904 | } | |
1c79356b | 3905 | |
91447636 A |
3906 | /* |
3907 | * mach_destroy_memory_entry: | |
3908 | * | |
3909 | * Drops a reference on a memory entry and destroys the memory entry if | |
3910 | * there are no more references on it. | |
3911 | * NOTE: This routine should not be called to destroy a memory entry from the | |
3912 | * kernel, as it will not release the Mach port associated with the memory | |
3913 | * entry. The proper way to destroy a memory entry in the kernel is to | |
3914 | * call mach_memort_entry_port_release() to release the kernel's send-right on | |
3915 | * the memory entry's port. When the last send right is released, the memory | |
3916 | * entry will be destroyed via ipc_kobject_destroy(). | |
3917 | */ | |
1c79356b A |
3918 | void |
3919 | mach_destroy_memory_entry( | |
0a7de745 | 3920 | ipc_port_t port) |
1c79356b | 3921 | { |
0a7de745 | 3922 | vm_named_entry_t named_entry; |
1c79356b A |
3923 | #if MACH_ASSERT |
3924 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3925 | #endif /* MACH_ASSERT */ | |
ea3f0419 | 3926 | named_entry = (vm_named_entry_t) ip_get_kobject(port); |
316670eb A |
3927 | |
3928 | named_entry_lock(named_entry); | |
91447636 | 3929 | named_entry->ref_count -= 1; |
316670eb | 3930 | |
0a7de745 | 3931 | if (named_entry->ref_count == 0) { |
91447636 | 3932 | if (named_entry->is_sub_map) { |
1c79356b | 3933 | vm_map_deallocate(named_entry->backing.map); |
39236c6e A |
3934 | } else if (named_entry->is_copy) { |
3935 | vm_map_copy_discard(named_entry->backing.copy); | |
3936 | } else { | |
3937 | /* release the VM object we've been pointing to */ | |
91447636 | 3938 | vm_object_deallocate(named_entry->backing.object); |
39236c6e | 3939 | } |
91447636 | 3940 | |
316670eb A |
3941 | named_entry_unlock(named_entry); |
3942 | named_entry_lock_destroy(named_entry); | |
91447636 | 3943 | |
d9a64523 A |
3944 | #if VM_NAMED_ENTRY_LIST |
3945 | lck_mtx_lock_spin(&vm_named_entry_list_lock_data); | |
3946 | queue_remove(&vm_named_entry_list, named_entry, | |
0a7de745 | 3947 | vm_named_entry_t, named_entry_list); |
d9a64523 A |
3948 | assert(vm_named_entry_count > 0); |
3949 | vm_named_entry_count--; | |
3950 | lck_mtx_unlock(&vm_named_entry_list_lock_data); | |
3951 | #endif /* VM_NAMED_ENTRY_LIST */ | |
3952 | ||
ea3f0419 | 3953 | kfree(named_entry, sizeof(struct vm_named_entry)); |
0a7de745 | 3954 | } else { |
316670eb | 3955 | named_entry_unlock(named_entry); |
0a7de745 | 3956 | } |
1c79356b A |
3957 | } |
3958 | ||
0c530ab8 A |
3959 | /* Allow manipulation of individual page state. This is actually part of */ |
3960 | /* the UPL regimen but takes place on the memory entry rather than on a UPL */ | |
3961 | ||
3962 | kern_return_t | |
3963 | mach_memory_entry_page_op( | |
0a7de745 A |
3964 | ipc_port_t entry_port, |
3965 | vm_object_offset_t offset, | |
3966 | int ops, | |
3967 | ppnum_t *phys_entry, | |
3968 | int *flags) | |
0c530ab8 | 3969 | { |
0a7de745 A |
3970 | vm_named_entry_t mem_entry; |
3971 | vm_object_t object; | |
3972 | kern_return_t kr; | |
0c530ab8 | 3973 | |
d9a64523 | 3974 | if (!IP_VALID(entry_port) || |
0c530ab8 A |
3975 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { |
3976 | return KERN_INVALID_ARGUMENT; | |
3977 | } | |
3978 | ||
ea3f0419 | 3979 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
0c530ab8 A |
3980 | |
3981 | named_entry_lock(mem_entry); | |
3982 | ||
39236c6e | 3983 | if (mem_entry->is_sub_map || |
39236c6e | 3984 | mem_entry->is_copy) { |
0c530ab8 A |
3985 | named_entry_unlock(mem_entry); |
3986 | return KERN_INVALID_ARGUMENT; | |
3987 | } | |
3988 | ||
3989 | object = mem_entry->backing.object; | |
3990 | if (object == VM_OBJECT_NULL) { | |
3991 | named_entry_unlock(mem_entry); | |
3992 | return KERN_INVALID_ARGUMENT; | |
3993 | } | |
3994 | ||
3995 | vm_object_reference(object); | |
3996 | named_entry_unlock(mem_entry); | |
3997 | ||
3998 | kr = vm_object_page_op(object, offset, ops, phys_entry, flags); | |
3999 | ||
0a7de745 | 4000 | vm_object_deallocate(object); |
0c530ab8 A |
4001 | |
4002 | return kr; | |
4003 | } | |
4004 | ||
4005 | /* | |
0a7de745 A |
4006 | * mach_memory_entry_range_op offers performance enhancement over |
4007 | * mach_memory_entry_page_op for page_op functions which do not require page | |
4008 | * level state to be returned from the call. Page_op was created to provide | |
4009 | * a low-cost alternative to page manipulation via UPLs when only a single | |
4010 | * page was involved. The range_op call establishes the ability in the _op | |
0c530ab8 A |
4011 | * family of functions to work on multiple pages where the lack of page level |
4012 | * state handling allows the caller to avoid the overhead of the upl structures. | |
4013 | */ | |
4014 | ||
4015 | kern_return_t | |
4016 | mach_memory_entry_range_op( | |
0a7de745 A |
4017 | ipc_port_t entry_port, |
4018 | vm_object_offset_t offset_beg, | |
4019 | vm_object_offset_t offset_end, | |
0c530ab8 A |
4020 | int ops, |
4021 | int *range) | |
4022 | { | |
0a7de745 A |
4023 | vm_named_entry_t mem_entry; |
4024 | vm_object_t object; | |
4025 | kern_return_t kr; | |
0c530ab8 | 4026 | |
d9a64523 | 4027 | if (!IP_VALID(entry_port) || |
0c530ab8 A |
4028 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { |
4029 | return KERN_INVALID_ARGUMENT; | |
4030 | } | |
4031 | ||
ea3f0419 | 4032 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); |
0c530ab8 A |
4033 | |
4034 | named_entry_lock(mem_entry); | |
4035 | ||
39236c6e | 4036 | if (mem_entry->is_sub_map || |
39236c6e | 4037 | mem_entry->is_copy) { |
0c530ab8 A |
4038 | named_entry_unlock(mem_entry); |
4039 | return KERN_INVALID_ARGUMENT; | |
4040 | } | |
4041 | ||
4042 | object = mem_entry->backing.object; | |
4043 | if (object == VM_OBJECT_NULL) { | |
4044 | named_entry_unlock(mem_entry); | |
4045 | return KERN_INVALID_ARGUMENT; | |
4046 | } | |
4047 | ||
4048 | vm_object_reference(object); | |
4049 | named_entry_unlock(mem_entry); | |
4050 | ||
4051 | kr = vm_object_range_op(object, | |
0a7de745 A |
4052 | offset_beg, |
4053 | offset_end, | |
4054 | ops, | |
4055 | (uint32_t *) range); | |
0c530ab8 A |
4056 | |
4057 | vm_object_deallocate(object); | |
4058 | ||
4059 | return kr; | |
4060 | } | |
1c79356b | 4061 | |
91447636 | 4062 | /* ******* Temporary Internal calls to UPL for BSD ***** */ |
1c79356b | 4063 | |
91447636 A |
4064 | extern int kernel_upl_map( |
4065 | vm_map_t map, | |
4066 | upl_t upl, | |
4067 | vm_offset_t *dst_addr); | |
1c79356b | 4068 | |
91447636 A |
4069 | extern int kernel_upl_unmap( |
4070 | vm_map_t map, | |
4071 | upl_t upl); | |
150bd074 | 4072 | |
91447636 A |
4073 | extern int kernel_upl_commit( |
4074 | upl_t upl, | |
4075 | upl_page_info_t *pl, | |
0a7de745 | 4076 | mach_msg_type_number_t count); |
1c79356b | 4077 | |
91447636 A |
4078 | extern int kernel_upl_commit_range( |
4079 | upl_t upl, | |
4080 | upl_offset_t offset, | |
0a7de745 A |
4081 | upl_size_t size, |
4082 | int flags, | |
4083 | upl_page_info_array_t pl, | |
4084 | mach_msg_type_number_t count); | |
1c79356b | 4085 | |
91447636 A |
4086 | extern int kernel_upl_abort( |
4087 | upl_t upl, | |
4088 | int abort_type); | |
1c79356b | 4089 | |
91447636 A |
4090 | extern int kernel_upl_abort_range( |
4091 | upl_t upl, | |
4092 | upl_offset_t offset, | |
4093 | upl_size_t size, | |
4094 | int abort_flags); | |
1c79356b | 4095 | |
1c79356b | 4096 | |
1c79356b A |
4097 | kern_return_t |
4098 | kernel_upl_map( | |
0a7de745 A |
4099 | vm_map_t map, |
4100 | upl_t upl, | |
4101 | vm_offset_t *dst_addr) | |
1c79356b | 4102 | { |
91447636 | 4103 | return vm_upl_map(map, upl, dst_addr); |
1c79356b A |
4104 | } |
4105 | ||
4106 | ||
4107 | kern_return_t | |
4108 | kernel_upl_unmap( | |
0a7de745 A |
4109 | vm_map_t map, |
4110 | upl_t upl) | |
1c79356b | 4111 | { |
91447636 | 4112 | return vm_upl_unmap(map, upl); |
1c79356b A |
4113 | } |
4114 | ||
4115 | kern_return_t | |
4116 | kernel_upl_commit( | |
91447636 A |
4117 | upl_t upl, |
4118 | upl_page_info_t *pl, | |
0b4e3aa0 | 4119 | mach_msg_type_number_t count) |
1c79356b | 4120 | { |
0a7de745 | 4121 | kern_return_t kr; |
0b4e3aa0 A |
4122 | |
4123 | kr = upl_commit(upl, pl, count); | |
4124 | upl_deallocate(upl); | |
1c79356b A |
4125 | return kr; |
4126 | } | |
4127 | ||
0b4e3aa0 | 4128 | |
1c79356b A |
4129 | kern_return_t |
4130 | kernel_upl_commit_range( | |
0a7de745 A |
4131 | upl_t upl, |
4132 | upl_offset_t offset, | |
4133 | upl_size_t size, | |
4134 | int flags, | |
0b4e3aa0 A |
4135 | upl_page_info_array_t pl, |
4136 | mach_msg_type_number_t count) | |
1c79356b | 4137 | { |
0a7de745 A |
4138 | boolean_t finished = FALSE; |
4139 | kern_return_t kr; | |
0b4e3aa0 | 4140 | |
0a7de745 | 4141 | if (flags & UPL_COMMIT_FREE_ON_EMPTY) { |
0b4e3aa0 | 4142 | flags |= UPL_COMMIT_NOTIFY_EMPTY; |
0a7de745 | 4143 | } |
0b4e3aa0 | 4144 | |
593a1d5f A |
4145 | if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { |
4146 | return KERN_INVALID_ARGUMENT; | |
4147 | } | |
4148 | ||
0b4e3aa0 A |
4149 | kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); |
4150 | ||
0a7de745 | 4151 | if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) { |
0b4e3aa0 | 4152 | upl_deallocate(upl); |
0a7de745 | 4153 | } |
0b4e3aa0 | 4154 | |
1c79356b A |
4155 | return kr; |
4156 | } | |
0a7de745 | 4157 | |
1c79356b A |
4158 | kern_return_t |
4159 | kernel_upl_abort_range( | |
0a7de745 A |
4160 | upl_t upl, |
4161 | upl_offset_t offset, | |
4162 | upl_size_t size, | |
4163 | int abort_flags) | |
1c79356b | 4164 | { |
0a7de745 A |
4165 | kern_return_t kr; |
4166 | boolean_t finished = FALSE; | |
1c79356b | 4167 | |
0a7de745 | 4168 | if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) { |
0b4e3aa0 | 4169 | abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; |
0a7de745 | 4170 | } |
1c79356b | 4171 | |
0b4e3aa0 | 4172 | kr = upl_abort_range(upl, offset, size, abort_flags, &finished); |
1c79356b | 4173 | |
0a7de745 | 4174 | if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) { |
0b4e3aa0 | 4175 | upl_deallocate(upl); |
0a7de745 | 4176 | } |
1c79356b | 4177 | |
0b4e3aa0 | 4178 | return kr; |
1c79356b A |
4179 | } |
4180 | ||
1c79356b | 4181 | kern_return_t |
0b4e3aa0 | 4182 | kernel_upl_abort( |
0a7de745 A |
4183 | upl_t upl, |
4184 | int abort_type) | |
1c79356b | 4185 | { |
0a7de745 | 4186 | kern_return_t kr; |
1c79356b | 4187 | |
0b4e3aa0 A |
4188 | kr = upl_abort(upl, abort_type); |
4189 | upl_deallocate(upl); | |
4190 | return kr; | |
1c79356b A |
4191 | } |
4192 | ||
91447636 A |
4193 | /* |
4194 | * Now a kernel-private interface (for BootCache | |
4195 | * use only). Need a cleaner way to create an | |
4196 | * empty vm_map() and return a handle to it. | |
4197 | */ | |
1c79356b A |
4198 | |
4199 | kern_return_t | |
91447636 | 4200 | vm_region_object_create( |
0a7de745 A |
4201 | __unused vm_map_t target_map, |
4202 | vm_size_t size, | |
4203 | ipc_port_t *object_handle) | |
1c79356b | 4204 | { |
0a7de745 A |
4205 | vm_named_entry_t user_entry; |
4206 | ipc_port_t user_handle; | |
4207 | ||
4208 | vm_map_t new_map; | |
1c79356b | 4209 | |
91447636 A |
4210 | if (mach_memory_entry_allocate(&user_entry, &user_handle) |
4211 | != KERN_SUCCESS) { | |
1c79356b | 4212 | return KERN_FAILURE; |
91447636 | 4213 | } |
1c79356b | 4214 | |
91447636 | 4215 | /* Create a named object based on a submap of specified size */ |
1c79356b | 4216 | |
91447636 | 4217 | new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, |
0a7de745 A |
4218 | vm_map_round_page(size, |
4219 | VM_MAP_PAGE_MASK(target_map)), | |
4220 | TRUE); | |
39236c6e | 4221 | vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map)); |
1c79356b | 4222 | |
91447636 A |
4223 | user_entry->backing.map = new_map; |
4224 | user_entry->internal = TRUE; | |
4225 | user_entry->is_sub_map = TRUE; | |
4226 | user_entry->offset = 0; | |
4227 | user_entry->protection = VM_PROT_ALL; | |
4228 | user_entry->size = size; | |
4229 | assert(user_entry->ref_count == 1); | |
1c79356b | 4230 | |
91447636 | 4231 | *object_handle = user_handle; |
1c79356b | 4232 | return KERN_SUCCESS; |
55e303ae A |
4233 | } |
4234 | ||
0a7de745 A |
4235 | ppnum_t vm_map_get_phys_page( /* forward */ |
4236 | vm_map_t map, | |
4237 | vm_offset_t offset); | |
91447636 | 4238 | |
55e303ae | 4239 | ppnum_t |
1c79356b | 4240 | vm_map_get_phys_page( |
0a7de745 A |
4241 | vm_map_t map, |
4242 | vm_offset_t addr) | |
1c79356b | 4243 | { |
0a7de745 A |
4244 | vm_object_offset_t offset; |
4245 | vm_object_t object; | |
4246 | vm_map_offset_t map_offset; | |
4247 | vm_map_entry_t entry; | |
4248 | ppnum_t phys_page = 0; | |
91447636 | 4249 | |
39236c6e | 4250 | map_offset = vm_map_trunc_page(addr, PAGE_MASK); |
1c79356b A |
4251 | |
4252 | vm_map_lock(map); | |
91447636 | 4253 | while (vm_map_lookup_entry(map, map_offset, &entry)) { |
3e170ce0 | 4254 | if (VME_OBJECT(entry) == VM_OBJECT_NULL) { |
1c79356b | 4255 | vm_map_unlock(map); |
91447636 | 4256 | return (ppnum_t) 0; |
1c79356b A |
4257 | } |
4258 | if (entry->is_sub_map) { | |
0a7de745 | 4259 | vm_map_t old_map; |
3e170ce0 | 4260 | vm_map_lock(VME_SUBMAP(entry)); |
1c79356b | 4261 | old_map = map; |
3e170ce0 A |
4262 | map = VME_SUBMAP(entry); |
4263 | map_offset = (VME_OFFSET(entry) + | |
0a7de745 | 4264 | (map_offset - entry->vme_start)); |
1c79356b A |
4265 | vm_map_unlock(old_map); |
4266 | continue; | |
4267 | } | |
3e170ce0 | 4268 | if (VME_OBJECT(entry)->phys_contiguous) { |
9bccf70c A |
4269 | /* These are not standard pageable memory mappings */ |
4270 | /* If they are not present in the object they will */ | |
4271 | /* have to be picked up from the pager through the */ | |
4272 | /* fault mechanism. */ | |
3e170ce0 | 4273 | if (VME_OBJECT(entry)->vo_shadow_offset == 0) { |
9bccf70c A |
4274 | /* need to call vm_fault */ |
4275 | vm_map_unlock(map); | |
0a7de745 A |
4276 | vm_fault(map, map_offset, VM_PROT_NONE, |
4277 | FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, | |
4278 | THREAD_UNINT, NULL, 0); | |
9bccf70c A |
4279 | vm_map_lock(map); |
4280 | continue; | |
4281 | } | |
3e170ce0 | 4282 | offset = (VME_OFFSET(entry) + |
0a7de745 | 4283 | (map_offset - entry->vme_start)); |
55e303ae | 4284 | phys_page = (ppnum_t) |
0a7de745 A |
4285 | ((VME_OBJECT(entry)->vo_shadow_offset |
4286 | + offset) >> PAGE_SHIFT); | |
9bccf70c | 4287 | break; |
9bccf70c | 4288 | } |
3e170ce0 A |
4289 | offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); |
4290 | object = VME_OBJECT(entry); | |
1c79356b A |
4291 | vm_object_lock(object); |
4292 | while (TRUE) { | |
0a7de745 A |
4293 | vm_page_t dst_page = vm_page_lookup(object, offset); |
4294 | if (dst_page == VM_PAGE_NULL) { | |
4295 | if (object->shadow) { | |
1c79356b A |
4296 | vm_object_t old_object; |
4297 | vm_object_lock(object->shadow); | |
4298 | old_object = object; | |
6d2010ae | 4299 | offset = offset + object->vo_shadow_offset; |
1c79356b A |
4300 | object = object->shadow; |
4301 | vm_object_unlock(old_object); | |
4302 | } else { | |
4303 | vm_object_unlock(object); | |
4304 | break; | |
4305 | } | |
4306 | } else { | |
39037602 | 4307 | phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)); |
1c79356b A |
4308 | vm_object_unlock(object); |
4309 | break; | |
4310 | } | |
4311 | } | |
4312 | break; | |
0a7de745 | 4313 | } |
1c79356b A |
4314 | |
4315 | vm_map_unlock(map); | |
55e303ae A |
4316 | return phys_page; |
4317 | } | |
4318 | ||
3e170ce0 | 4319 | #if 0 |
0a7de745 A |
4320 | kern_return_t kernel_object_iopl_request( /* forward */ |
4321 | vm_named_entry_t named_entry, | |
4322 | memory_object_offset_t offset, | |
4323 | upl_size_t *upl_size, | |
4324 | upl_t *upl_ptr, | |
4325 | upl_page_info_array_t user_page_list, | |
4326 | unsigned int *page_list_count, | |
4327 | int *flags); | |
91447636 | 4328 | |
55e303ae A |
4329 | kern_return_t |
4330 | kernel_object_iopl_request( | |
0a7de745 A |
4331 | vm_named_entry_t named_entry, |
4332 | memory_object_offset_t offset, | |
4333 | upl_size_t *upl_size, | |
4334 | upl_t *upl_ptr, | |
4335 | upl_page_info_array_t user_page_list, | |
4336 | unsigned int *page_list_count, | |
4337 | int *flags) | |
55e303ae | 4338 | { |
0a7de745 A |
4339 | vm_object_t object; |
4340 | kern_return_t ret; | |
55e303ae | 4341 | |
0a7de745 | 4342 | int caller_flags; |
55e303ae A |
4343 | |
4344 | caller_flags = *flags; | |
4345 | ||
91447636 A |
4346 | if (caller_flags & ~UPL_VALID_FLAGS) { |
4347 | /* | |
4348 | * For forward compatibility's sake, | |
4349 | * reject any unknown flag. | |
4350 | */ | |
4351 | return KERN_INVALID_VALUE; | |
4352 | } | |
4353 | ||
55e303ae | 4354 | /* a few checks to make sure user is obeying rules */ |
0a7de745 A |
4355 | if (*upl_size == 0) { |
4356 | if (offset >= named_entry->size) { | |
4357 | return KERN_INVALID_RIGHT; | |
4358 | } | |
b0d623f7 | 4359 | *upl_size = (upl_size_t) (named_entry->size - offset); |
0a7de745 | 4360 | if (*upl_size != named_entry->size - offset) { |
b0d623f7 | 4361 | return KERN_INVALID_ARGUMENT; |
0a7de745 | 4362 | } |
55e303ae | 4363 | } |
0a7de745 A |
4364 | if (caller_flags & UPL_COPYOUT_FROM) { |
4365 | if ((named_entry->protection & VM_PROT_READ) | |
4366 | != VM_PROT_READ) { | |
4367 | return KERN_INVALID_RIGHT; | |
55e303ae A |
4368 | } |
4369 | } else { | |
0a7de745 A |
4370 | if ((named_entry->protection & |
4371 | (VM_PROT_READ | VM_PROT_WRITE)) | |
4372 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
4373 | return KERN_INVALID_RIGHT; | |
55e303ae A |
4374 | } |
4375 | } | |
0a7de745 A |
4376 | if (named_entry->size < (offset + *upl_size)) { |
4377 | return KERN_INVALID_ARGUMENT; | |
4378 | } | |
55e303ae A |
4379 | |
4380 | /* the callers parameter offset is defined to be the */ | |
4381 | /* offset from beginning of named entry offset in object */ | |
4382 | offset = offset + named_entry->offset; | |
4383 | ||
39236c6e | 4384 | if (named_entry->is_sub_map || |
0a7de745 | 4385 | named_entry->is_copy) { |
39236c6e | 4386 | return KERN_INVALID_ARGUMENT; |
0a7de745 A |
4387 | } |
4388 | ||
55e303ae A |
4389 | named_entry_lock(named_entry); |
4390 | ||
5ba3f43e A |
4391 | /* This is the case where we are going to operate */ |
4392 | /* on an already known object. If the object is */ | |
4393 | /* not ready it is internal. An external */ | |
4394 | /* object cannot be mapped until it is ready */ | |
4395 | /* we can therefore avoid the ready check */ | |
4396 | /* in this case. */ | |
4397 | object = named_entry->backing.object; | |
4398 | vm_object_reference(object); | |
4399 | named_entry_unlock(named_entry); | |
55e303ae A |
4400 | |
4401 | if (!object->private) { | |
0a7de745 | 4402 | if (*upl_size > MAX_UPL_TRANSFER_BYTES) { |
fe8ab488 | 4403 | *upl_size = MAX_UPL_TRANSFER_BYTES; |
0a7de745 | 4404 | } |
55e303ae A |
4405 | if (object->phys_contiguous) { |
4406 | *flags = UPL_PHYS_CONTIG; | |
4407 | } else { | |
4408 | *flags = 0; | |
4409 | } | |
4410 | } else { | |
4411 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
4412 | } | |
4413 | ||
4414 | ret = vm_object_iopl_request(object, | |
0a7de745 A |
4415 | offset, |
4416 | *upl_size, | |
4417 | upl_ptr, | |
4418 | user_page_list, | |
4419 | page_list_count, | |
4420 | (upl_control_flags_t)(unsigned int)caller_flags); | |
55e303ae A |
4421 | vm_object_deallocate(object); |
4422 | return ret; | |
1c79356b | 4423 | } |
3e170ce0 | 4424 | #endif |
5ba3f43e A |
4425 | |
4426 | /* | |
4427 | * These symbols are looked up at runtime by vmware, VirtualBox, | |
4428 | * despite not being exported in the symbol sets. | |
4429 | */ | |
4430 | ||
4431 | #if defined(__x86_64__) | |
4432 | ||
4433 | kern_return_t | |
4434 | mach_vm_map( | |
0a7de745 A |
4435 | vm_map_t target_map, |
4436 | mach_vm_offset_t *address, | |
4437 | mach_vm_size_t initial_size, | |
4438 | mach_vm_offset_t mask, | |
4439 | int flags, | |
4440 | ipc_port_t port, | |
4441 | vm_object_offset_t offset, | |
4442 | boolean_t copy, | |
4443 | vm_prot_t cur_protection, | |
4444 | vm_prot_t max_protection, | |
4445 | vm_inherit_t inheritance); | |
5ba3f43e A |
4446 | |
4447 | kern_return_t | |
4448 | mach_vm_remap( | |
0a7de745 A |
4449 | vm_map_t target_map, |
4450 | mach_vm_offset_t *address, | |
4451 | mach_vm_size_t size, | |
4452 | mach_vm_offset_t mask, | |
4453 | int flags, | |
4454 | vm_map_t src_map, | |
4455 | mach_vm_offset_t memory_address, | |
4456 | boolean_t copy, | |
4457 | vm_prot_t *cur_protection, | |
4458 | vm_prot_t *max_protection, | |
4459 | vm_inherit_t inheritance); | |
5ba3f43e A |
4460 | |
4461 | kern_return_t | |
4462 | mach_vm_map( | |
0a7de745 A |
4463 | vm_map_t target_map, |
4464 | mach_vm_offset_t *address, | |
4465 | mach_vm_size_t initial_size, | |
4466 | mach_vm_offset_t mask, | |
4467 | int flags, | |
4468 | ipc_port_t port, | |
4469 | vm_object_offset_t offset, | |
4470 | boolean_t copy, | |
4471 | vm_prot_t cur_protection, | |
4472 | vm_prot_t max_protection, | |
4473 | vm_inherit_t inheritance) | |
5ba3f43e | 4474 | { |
0a7de745 A |
4475 | return mach_vm_map_external(target_map, address, initial_size, mask, flags, port, |
4476 | offset, copy, cur_protection, max_protection, inheritance); | |
5ba3f43e A |
4477 | } |
4478 | ||
4479 | kern_return_t | |
4480 | mach_vm_remap( | |
0a7de745 A |
4481 | vm_map_t target_map, |
4482 | mach_vm_offset_t *address, | |
4483 | mach_vm_size_t size, | |
4484 | mach_vm_offset_t mask, | |
4485 | int flags, | |
4486 | vm_map_t src_map, | |
4487 | mach_vm_offset_t memory_address, | |
4488 | boolean_t copy, | |
4489 | vm_prot_t *cur_protection, | |
4490 | vm_prot_t *max_protection, | |
4491 | vm_inherit_t inheritance) | |
5ba3f43e | 4492 | { |
0a7de745 A |
4493 | return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address, |
4494 | copy, cur_protection, max_protection, inheritance); | |
5ba3f43e A |
4495 | } |
4496 | ||
4497 | kern_return_t | |
4498 | vm_map( | |
0a7de745 A |
4499 | vm_map_t target_map, |
4500 | vm_offset_t *address, | |
4501 | vm_size_t size, | |
4502 | vm_offset_t mask, | |
4503 | int flags, | |
4504 | ipc_port_t port, | |
4505 | vm_offset_t offset, | |
4506 | boolean_t copy, | |
4507 | vm_prot_t cur_protection, | |
4508 | vm_prot_t max_protection, | |
4509 | vm_inherit_t inheritance); | |
5ba3f43e A |
4510 | |
4511 | kern_return_t | |
4512 | vm_map( | |
0a7de745 A |
4513 | vm_map_t target_map, |
4514 | vm_offset_t *address, | |
4515 | vm_size_t size, | |
4516 | vm_offset_t mask, | |
4517 | int flags, | |
4518 | ipc_port_t port, | |
4519 | vm_offset_t offset, | |
4520 | boolean_t copy, | |
4521 | vm_prot_t cur_protection, | |
4522 | vm_prot_t max_protection, | |
4523 | vm_inherit_t inheritance) | |
5ba3f43e A |
4524 | { |
4525 | vm_tag_t tag; | |
4526 | ||
4527 | VM_GET_FLAGS_ALIAS(flags, tag); | |
d9a64523 | 4528 | return vm_map_kernel(target_map, address, size, mask, |
0a7de745 A |
4529 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, |
4530 | port, offset, copy, | |
4531 | cur_protection, max_protection, inheritance); | |
5ba3f43e A |
4532 | } |
4533 | ||
4534 | #endif /* __x86_64__ */ |