]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_user.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * User-exported virtual memory functions. | |
63 | */ | |
64 | ||
65 | /* | |
66 | * There are three implementations of the "XXX_allocate" functionality in | |
67 | * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate | |
68 | * (for a task with the same address space size, especially the current task), | |
69 | * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate | |
70 | * in the kernel should only be used on the kernel_task. vm32_vm_allocate only | |
71 | * makes sense on platforms where a user task can either be 32 or 64, or the kernel | |
72 | * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred | |
73 | * for new code. | |
74 | * | |
75 | * The entrypoints into the kernel are more complex. All platforms support a | |
76 | * mach_vm_allocate-style API (subsystem 4800) which operates with the largest | |
77 | * size types for the platform. On platforms that only support U32/K32, | |
78 | * subsystem 4800 is all you need. On platforms that support both U32 and U64, | |
79 | * subsystem 3800 is used disambiguate the size of parameters, and they will | |
80 | * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms, | |
81 | * the MIG glue should never call into vm_allocate directly, because the calling | |
82 | * task and kernel_task are unlikely to use the same size parameters | |
83 | * | |
84 | * New VM call implementations should be added here and to mach_vm.defs | |
85 | * (subsystem 4800), and use mach_vm_* "wide" types. | |
86 | */ | |
87 | ||
88 | #include <debug.h> | |
89 | ||
90 | #include <vm_cpm.h> | |
91 | #include <mach/boolean.h> | |
92 | #include <mach/kern_return.h> | |
93 | #include <mach/mach_types.h> /* to get vm_address_t */ | |
94 | #include <mach/memory_object.h> | |
95 | #include <mach/std_types.h> /* to get pointer_t */ | |
96 | #include <mach/upl.h> | |
97 | #include <mach/vm_attributes.h> | |
98 | #include <mach/vm_param.h> | |
99 | #include <mach/vm_statistics.h> | |
100 | #include <mach/mach_syscalls.h> | |
101 | #include <mach/sdt.h> | |
102 | ||
103 | #include <mach/host_priv_server.h> | |
104 | #include <mach/mach_vm_server.h> | |
105 | #include <mach/memory_entry_server.h> | |
106 | #include <mach/vm_map_server.h> | |
107 | ||
108 | #include <kern/host.h> | |
109 | #include <kern/kalloc.h> | |
110 | #include <kern/task.h> | |
111 | #include <kern/misc_protos.h> | |
112 | #include <vm/vm_fault.h> | |
113 | #include <vm/vm_map.h> | |
114 | #include <vm/vm_object.h> | |
115 | #include <vm/vm_page.h> | |
116 | #include <vm/memory_object.h> | |
117 | #include <vm/vm_pageout.h> | |
118 | #include <vm/vm_protos.h> | |
119 | #include <vm/vm_purgeable_internal.h> | |
120 | #include <vm/vm_init.h> | |
121 | ||
122 | #include <san/kasan.h> | |
123 | ||
124 | #include <libkern/OSDebug.h> | |
125 | #include <IOKit/IOBSD.h> | |
126 | ||
127 | vm_size_t upl_offset_to_pagelist = 0; | |
128 | ||
129 | #if VM_CPM | |
130 | #include <vm/cpm.h> | |
131 | #endif /* VM_CPM */ | |
132 | ||
133 | /* | |
134 | * mach_vm_allocate allocates "zero fill" memory in the specfied | |
135 | * map. | |
136 | */ | |
137 | kern_return_t | |
138 | mach_vm_allocate_external( | |
139 | vm_map_t map, | |
140 | mach_vm_offset_t *addr, | |
141 | mach_vm_size_t size, | |
142 | int flags) | |
143 | { | |
144 | vm_tag_t tag; | |
145 | ||
146 | VM_GET_FLAGS_ALIAS(flags, tag); | |
147 | return mach_vm_allocate_kernel(map, addr, size, flags, tag); | |
148 | } | |
149 | ||
150 | kern_return_t | |
151 | mach_vm_allocate_kernel( | |
152 | vm_map_t map, | |
153 | mach_vm_offset_t *addr, | |
154 | mach_vm_size_t size, | |
155 | int flags, | |
156 | vm_tag_t tag) | |
157 | { | |
158 | vm_map_offset_t map_addr; | |
159 | vm_map_size_t map_size; | |
160 | kern_return_t result; | |
161 | boolean_t anywhere; | |
162 | ||
163 | /* filter out any kernel-only flags */ | |
164 | if (flags & ~VM_FLAGS_USER_ALLOCATE) { | |
165 | return KERN_INVALID_ARGUMENT; | |
166 | } | |
167 | ||
168 | if (map == VM_MAP_NULL) { | |
169 | return KERN_INVALID_ARGUMENT; | |
170 | } | |
171 | if (size == 0) { | |
172 | *addr = 0; | |
173 | return KERN_SUCCESS; | |
174 | } | |
175 | ||
176 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); | |
177 | if (anywhere) { | |
178 | /* | |
179 | * No specific address requested, so start candidate address | |
180 | * search at the minimum address in the map. However, if that | |
181 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
182 | * allocations of PAGEZERO to explicit requests since its | |
183 | * normal use is to catch dereferences of NULL and many | |
184 | * applications also treat pointers with a value of 0 as | |
185 | * special and suddenly having address 0 contain useable | |
186 | * memory would tend to confuse those applications. | |
187 | */ | |
188 | map_addr = vm_map_min(map); | |
189 | if (map_addr == 0) { | |
190 | map_addr += VM_MAP_PAGE_SIZE(map); | |
191 | } | |
192 | } else { | |
193 | map_addr = vm_map_trunc_page(*addr, | |
194 | VM_MAP_PAGE_MASK(map)); | |
195 | } | |
196 | map_size = vm_map_round_page(size, | |
197 | VM_MAP_PAGE_MASK(map)); | |
198 | if (map_size == 0) { | |
199 | return KERN_INVALID_ARGUMENT; | |
200 | } | |
201 | ||
202 | result = vm_map_enter( | |
203 | map, | |
204 | &map_addr, | |
205 | map_size, | |
206 | (vm_map_offset_t)0, | |
207 | flags, | |
208 | VM_MAP_KERNEL_FLAGS_NONE, | |
209 | tag, | |
210 | VM_OBJECT_NULL, | |
211 | (vm_object_offset_t)0, | |
212 | FALSE, | |
213 | VM_PROT_DEFAULT, | |
214 | VM_PROT_ALL, | |
215 | VM_INHERIT_DEFAULT); | |
216 | ||
217 | *addr = map_addr; | |
218 | return result; | |
219 | } | |
220 | ||
221 | /* | |
222 | * vm_allocate | |
223 | * Legacy routine that allocates "zero fill" memory in the specfied | |
224 | * map (which is limited to the same size as the kernel). | |
225 | */ | |
226 | kern_return_t | |
227 | vm_allocate_external( | |
228 | vm_map_t map, | |
229 | vm_offset_t *addr, | |
230 | vm_size_t size, | |
231 | int flags) | |
232 | { | |
233 | vm_tag_t tag; | |
234 | ||
235 | VM_GET_FLAGS_ALIAS(flags, tag); | |
236 | return vm_allocate_kernel(map, addr, size, flags, tag); | |
237 | } | |
238 | ||
239 | kern_return_t | |
240 | vm_allocate_kernel( | |
241 | vm_map_t map, | |
242 | vm_offset_t *addr, | |
243 | vm_size_t size, | |
244 | int flags, | |
245 | vm_tag_t tag) | |
246 | { | |
247 | vm_map_offset_t map_addr; | |
248 | vm_map_size_t map_size; | |
249 | kern_return_t result; | |
250 | boolean_t anywhere; | |
251 | ||
252 | /* filter out any kernel-only flags */ | |
253 | if (flags & ~VM_FLAGS_USER_ALLOCATE) { | |
254 | return KERN_INVALID_ARGUMENT; | |
255 | } | |
256 | ||
257 | if (map == VM_MAP_NULL) { | |
258 | return KERN_INVALID_ARGUMENT; | |
259 | } | |
260 | if (size == 0) { | |
261 | *addr = 0; | |
262 | return KERN_SUCCESS; | |
263 | } | |
264 | ||
265 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); | |
266 | if (anywhere) { | |
267 | /* | |
268 | * No specific address requested, so start candidate address | |
269 | * search at the minimum address in the map. However, if that | |
270 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
271 | * allocations of PAGEZERO to explicit requests since its | |
272 | * normal use is to catch dereferences of NULL and many | |
273 | * applications also treat pointers with a value of 0 as | |
274 | * special and suddenly having address 0 contain useable | |
275 | * memory would tend to confuse those applications. | |
276 | */ | |
277 | map_addr = vm_map_min(map); | |
278 | if (map_addr == 0) { | |
279 | map_addr += VM_MAP_PAGE_SIZE(map); | |
280 | } | |
281 | } else { | |
282 | map_addr = vm_map_trunc_page(*addr, | |
283 | VM_MAP_PAGE_MASK(map)); | |
284 | } | |
285 | map_size = vm_map_round_page(size, | |
286 | VM_MAP_PAGE_MASK(map)); | |
287 | if (map_size == 0) { | |
288 | return KERN_INVALID_ARGUMENT; | |
289 | } | |
290 | ||
291 | result = vm_map_enter( | |
292 | map, | |
293 | &map_addr, | |
294 | map_size, | |
295 | (vm_map_offset_t)0, | |
296 | flags, | |
297 | VM_MAP_KERNEL_FLAGS_NONE, | |
298 | tag, | |
299 | VM_OBJECT_NULL, | |
300 | (vm_object_offset_t)0, | |
301 | FALSE, | |
302 | VM_PROT_DEFAULT, | |
303 | VM_PROT_ALL, | |
304 | VM_INHERIT_DEFAULT); | |
305 | ||
306 | #if KASAN | |
307 | if (result == KERN_SUCCESS && map->pmap == kernel_pmap) { | |
308 | kasan_notify_address(map_addr, map_size); | |
309 | } | |
310 | #endif | |
311 | ||
312 | *addr = CAST_DOWN(vm_offset_t, map_addr); | |
313 | return result; | |
314 | } | |
315 | ||
316 | /* | |
317 | * mach_vm_deallocate - | |
318 | * deallocates the specified range of addresses in the | |
319 | * specified address map. | |
320 | */ | |
321 | kern_return_t | |
322 | mach_vm_deallocate( | |
323 | vm_map_t map, | |
324 | mach_vm_offset_t start, | |
325 | mach_vm_size_t size) | |
326 | { | |
327 | if ((map == VM_MAP_NULL) || (start + size < start)) { | |
328 | return KERN_INVALID_ARGUMENT; | |
329 | } | |
330 | ||
331 | if (size == (mach_vm_offset_t) 0) { | |
332 | return KERN_SUCCESS; | |
333 | } | |
334 | ||
335 | return vm_map_remove(map, | |
336 | vm_map_trunc_page(start, | |
337 | VM_MAP_PAGE_MASK(map)), | |
338 | vm_map_round_page(start + size, | |
339 | VM_MAP_PAGE_MASK(map)), | |
340 | VM_MAP_REMOVE_NO_FLAGS); | |
341 | } | |
342 | ||
343 | /* | |
344 | * vm_deallocate - | |
345 | * deallocates the specified range of addresses in the | |
346 | * specified address map (limited to addresses the same | |
347 | * size as the kernel). | |
348 | */ | |
349 | kern_return_t | |
350 | vm_deallocate( | |
351 | vm_map_t map, | |
352 | vm_offset_t start, | |
353 | vm_size_t size) | |
354 | { | |
355 | if ((map == VM_MAP_NULL) || (start + size < start)) { | |
356 | return KERN_INVALID_ARGUMENT; | |
357 | } | |
358 | ||
359 | if (size == (vm_offset_t) 0) { | |
360 | return KERN_SUCCESS; | |
361 | } | |
362 | ||
363 | return vm_map_remove(map, | |
364 | vm_map_trunc_page(start, | |
365 | VM_MAP_PAGE_MASK(map)), | |
366 | vm_map_round_page(start + size, | |
367 | VM_MAP_PAGE_MASK(map)), | |
368 | VM_MAP_REMOVE_NO_FLAGS); | |
369 | } | |
370 | ||
371 | /* | |
372 | * mach_vm_inherit - | |
373 | * Sets the inheritance of the specified range in the | |
374 | * specified map. | |
375 | */ | |
376 | kern_return_t | |
377 | mach_vm_inherit( | |
378 | vm_map_t map, | |
379 | mach_vm_offset_t start, | |
380 | mach_vm_size_t size, | |
381 | vm_inherit_t new_inheritance) | |
382 | { | |
383 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
384 | (new_inheritance > VM_INHERIT_LAST_VALID)) { | |
385 | return KERN_INVALID_ARGUMENT; | |
386 | } | |
387 | ||
388 | if (size == 0) { | |
389 | return KERN_SUCCESS; | |
390 | } | |
391 | ||
392 | return vm_map_inherit(map, | |
393 | vm_map_trunc_page(start, | |
394 | VM_MAP_PAGE_MASK(map)), | |
395 | vm_map_round_page(start + size, | |
396 | VM_MAP_PAGE_MASK(map)), | |
397 | new_inheritance); | |
398 | } | |
399 | ||
400 | /* | |
401 | * vm_inherit - | |
402 | * Sets the inheritance of the specified range in the | |
403 | * specified map (range limited to addresses | |
404 | */ | |
405 | kern_return_t | |
406 | vm_inherit( | |
407 | vm_map_t map, | |
408 | vm_offset_t start, | |
409 | vm_size_t size, | |
410 | vm_inherit_t new_inheritance) | |
411 | { | |
412 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
413 | (new_inheritance > VM_INHERIT_LAST_VALID)) { | |
414 | return KERN_INVALID_ARGUMENT; | |
415 | } | |
416 | ||
417 | if (size == 0) { | |
418 | return KERN_SUCCESS; | |
419 | } | |
420 | ||
421 | return vm_map_inherit(map, | |
422 | vm_map_trunc_page(start, | |
423 | VM_MAP_PAGE_MASK(map)), | |
424 | vm_map_round_page(start + size, | |
425 | VM_MAP_PAGE_MASK(map)), | |
426 | new_inheritance); | |
427 | } | |
428 | ||
429 | /* | |
430 | * mach_vm_protect - | |
431 | * Sets the protection of the specified range in the | |
432 | * specified map. | |
433 | */ | |
434 | ||
435 | kern_return_t | |
436 | mach_vm_protect( | |
437 | vm_map_t map, | |
438 | mach_vm_offset_t start, | |
439 | mach_vm_size_t size, | |
440 | boolean_t set_maximum, | |
441 | vm_prot_t new_protection) | |
442 | { | |
443 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
444 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { | |
445 | return KERN_INVALID_ARGUMENT; | |
446 | } | |
447 | ||
448 | if (size == 0) { | |
449 | return KERN_SUCCESS; | |
450 | } | |
451 | ||
452 | return vm_map_protect(map, | |
453 | vm_map_trunc_page(start, | |
454 | VM_MAP_PAGE_MASK(map)), | |
455 | vm_map_round_page(start + size, | |
456 | VM_MAP_PAGE_MASK(map)), | |
457 | new_protection, | |
458 | set_maximum); | |
459 | } | |
460 | ||
461 | /* | |
462 | * vm_protect - | |
463 | * Sets the protection of the specified range in the | |
464 | * specified map. Addressability of the range limited | |
465 | * to the same size as the kernel. | |
466 | */ | |
467 | ||
468 | kern_return_t | |
469 | vm_protect( | |
470 | vm_map_t map, | |
471 | vm_offset_t start, | |
472 | vm_size_t size, | |
473 | boolean_t set_maximum, | |
474 | vm_prot_t new_protection) | |
475 | { | |
476 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
477 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) { | |
478 | return KERN_INVALID_ARGUMENT; | |
479 | } | |
480 | ||
481 | if (size == 0) { | |
482 | return KERN_SUCCESS; | |
483 | } | |
484 | ||
485 | return vm_map_protect(map, | |
486 | vm_map_trunc_page(start, | |
487 | VM_MAP_PAGE_MASK(map)), | |
488 | vm_map_round_page(start + size, | |
489 | VM_MAP_PAGE_MASK(map)), | |
490 | new_protection, | |
491 | set_maximum); | |
492 | } | |
493 | ||
494 | /* | |
495 | * mach_vm_machine_attributes - | |
496 | * Handle machine-specific attributes for a mapping, such | |
497 | * as cachability, migrability, etc. | |
498 | */ | |
499 | kern_return_t | |
500 | mach_vm_machine_attribute( | |
501 | vm_map_t map, | |
502 | mach_vm_address_t addr, | |
503 | mach_vm_size_t size, | |
504 | vm_machine_attribute_t attribute, | |
505 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
506 | { | |
507 | if ((map == VM_MAP_NULL) || (addr + size < addr)) { | |
508 | return KERN_INVALID_ARGUMENT; | |
509 | } | |
510 | ||
511 | if (size == 0) { | |
512 | return KERN_SUCCESS; | |
513 | } | |
514 | ||
515 | return vm_map_machine_attribute( | |
516 | map, | |
517 | vm_map_trunc_page(addr, | |
518 | VM_MAP_PAGE_MASK(map)), | |
519 | vm_map_round_page(addr + size, | |
520 | VM_MAP_PAGE_MASK(map)), | |
521 | attribute, | |
522 | value); | |
523 | } | |
524 | ||
525 | /* | |
526 | * vm_machine_attribute - | |
527 | * Handle machine-specific attributes for a mapping, such | |
528 | * as cachability, migrability, etc. Limited addressability | |
529 | * (same range limits as for the native kernel map). | |
530 | */ | |
531 | kern_return_t | |
532 | vm_machine_attribute( | |
533 | vm_map_t map, | |
534 | vm_address_t addr, | |
535 | vm_size_t size, | |
536 | vm_machine_attribute_t attribute, | |
537 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
538 | { | |
539 | if ((map == VM_MAP_NULL) || (addr + size < addr)) { | |
540 | return KERN_INVALID_ARGUMENT; | |
541 | } | |
542 | ||
543 | if (size == 0) { | |
544 | return KERN_SUCCESS; | |
545 | } | |
546 | ||
547 | return vm_map_machine_attribute( | |
548 | map, | |
549 | vm_map_trunc_page(addr, | |
550 | VM_MAP_PAGE_MASK(map)), | |
551 | vm_map_round_page(addr + size, | |
552 | VM_MAP_PAGE_MASK(map)), | |
553 | attribute, | |
554 | value); | |
555 | } | |
556 | ||
557 | /* | |
558 | * mach_vm_read - | |
559 | * Read/copy a range from one address space and return it to the caller. | |
560 | * | |
561 | * It is assumed that the address for the returned memory is selected by | |
562 | * the IPC implementation as part of receiving the reply to this call. | |
563 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
564 | * that gets returned. | |
565 | * | |
566 | * JMM - because of mach_msg_type_number_t, this call is limited to a | |
567 | * single 4GB region at this time. | |
568 | * | |
569 | */ | |
570 | kern_return_t | |
571 | mach_vm_read( | |
572 | vm_map_t map, | |
573 | mach_vm_address_t addr, | |
574 | mach_vm_size_t size, | |
575 | pointer_t *data, | |
576 | mach_msg_type_number_t *data_size) | |
577 | { | |
578 | kern_return_t error; | |
579 | vm_map_copy_t ipc_address; | |
580 | ||
581 | if (map == VM_MAP_NULL) { | |
582 | return KERN_INVALID_ARGUMENT; | |
583 | } | |
584 | ||
585 | if ((mach_msg_type_number_t) size != size) { | |
586 | return KERN_INVALID_ARGUMENT; | |
587 | } | |
588 | ||
589 | error = vm_map_copyin(map, | |
590 | (vm_map_address_t)addr, | |
591 | (vm_map_size_t)size, | |
592 | FALSE, /* src_destroy */ | |
593 | &ipc_address); | |
594 | ||
595 | if (KERN_SUCCESS == error) { | |
596 | *data = (pointer_t) ipc_address; | |
597 | *data_size = (mach_msg_type_number_t) size; | |
598 | assert(*data_size == size); | |
599 | } | |
600 | return error; | |
601 | } | |
602 | ||
603 | /* | |
604 | * vm_read - | |
605 | * Read/copy a range from one address space and return it to the caller. | |
606 | * Limited addressability (same range limits as for the native kernel map). | |
607 | * | |
608 | * It is assumed that the address for the returned memory is selected by | |
609 | * the IPC implementation as part of receiving the reply to this call. | |
610 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
611 | * that gets returned. | |
612 | */ | |
613 | kern_return_t | |
614 | vm_read( | |
615 | vm_map_t map, | |
616 | vm_address_t addr, | |
617 | vm_size_t size, | |
618 | pointer_t *data, | |
619 | mach_msg_type_number_t *data_size) | |
620 | { | |
621 | kern_return_t error; | |
622 | vm_map_copy_t ipc_address; | |
623 | ||
624 | if (map == VM_MAP_NULL) { | |
625 | return KERN_INVALID_ARGUMENT; | |
626 | } | |
627 | ||
628 | mach_msg_type_number_t dsize; | |
629 | if (os_convert_overflow(size, &dsize)) { | |
630 | /* | |
631 | * The kernel could handle a 64-bit "size" value, but | |
632 | * it could not return the size of the data in "*data_size" | |
633 | * without overflowing. | |
634 | * Let's reject this "size" as invalid. | |
635 | */ | |
636 | return KERN_INVALID_ARGUMENT; | |
637 | } | |
638 | ||
639 | error = vm_map_copyin(map, | |
640 | (vm_map_address_t)addr, | |
641 | (vm_map_size_t)size, | |
642 | FALSE, /* src_destroy */ | |
643 | &ipc_address); | |
644 | ||
645 | if (KERN_SUCCESS == error) { | |
646 | *data = (pointer_t) ipc_address; | |
647 | *data_size = dsize; | |
648 | assert(*data_size == size); | |
649 | } | |
650 | return error; | |
651 | } | |
652 | ||
653 | /* | |
654 | * mach_vm_read_list - | |
655 | * Read/copy a list of address ranges from specified map. | |
656 | * | |
657 | * MIG does not know how to deal with a returned array of | |
658 | * vm_map_copy_t structures, so we have to do the copyout | |
659 | * manually here. | |
660 | */ | |
661 | kern_return_t | |
662 | mach_vm_read_list( | |
663 | vm_map_t map, | |
664 | mach_vm_read_entry_t data_list, | |
665 | natural_t count) | |
666 | { | |
667 | mach_msg_type_number_t i; | |
668 | kern_return_t error; | |
669 | vm_map_copy_t copy; | |
670 | ||
671 | if (map == VM_MAP_NULL || | |
672 | count > VM_MAP_ENTRY_MAX) { | |
673 | return KERN_INVALID_ARGUMENT; | |
674 | } | |
675 | ||
676 | error = KERN_SUCCESS; | |
677 | for (i = 0; i < count; i++) { | |
678 | vm_map_address_t map_addr; | |
679 | vm_map_size_t map_size; | |
680 | ||
681 | map_addr = (vm_map_address_t)(data_list[i].address); | |
682 | map_size = (vm_map_size_t)(data_list[i].size); | |
683 | ||
684 | if (map_size != 0) { | |
685 | error = vm_map_copyin(map, | |
686 | map_addr, | |
687 | map_size, | |
688 | FALSE, /* src_destroy */ | |
689 | ©); | |
690 | if (KERN_SUCCESS == error) { | |
691 | error = vm_map_copyout( | |
692 | current_task()->map, | |
693 | &map_addr, | |
694 | copy); | |
695 | if (KERN_SUCCESS == error) { | |
696 | data_list[i].address = map_addr; | |
697 | continue; | |
698 | } | |
699 | vm_map_copy_discard(copy); | |
700 | } | |
701 | } | |
702 | data_list[i].address = (mach_vm_address_t)0; | |
703 | data_list[i].size = (mach_vm_size_t)0; | |
704 | } | |
705 | return error; | |
706 | } | |
707 | ||
708 | /* | |
709 | * vm_read_list - | |
710 | * Read/copy a list of address ranges from specified map. | |
711 | * | |
712 | * MIG does not know how to deal with a returned array of | |
713 | * vm_map_copy_t structures, so we have to do the copyout | |
714 | * manually here. | |
715 | * | |
716 | * The source and destination ranges are limited to those | |
717 | * that can be described with a vm_address_t (i.e. same | |
718 | * size map as the kernel). | |
719 | * | |
720 | * JMM - If the result of the copyout is an address range | |
721 | * that cannot be described with a vm_address_t (i.e. the | |
722 | * caller had a larger address space but used this call | |
723 | * anyway), it will result in a truncated address being | |
724 | * returned (and a likely confused caller). | |
725 | */ | |
726 | ||
727 | kern_return_t | |
728 | vm_read_list( | |
729 | vm_map_t map, | |
730 | vm_read_entry_t data_list, | |
731 | natural_t count) | |
732 | { | |
733 | mach_msg_type_number_t i; | |
734 | kern_return_t error; | |
735 | vm_map_copy_t copy; | |
736 | ||
737 | if (map == VM_MAP_NULL || | |
738 | count > VM_MAP_ENTRY_MAX) { | |
739 | return KERN_INVALID_ARGUMENT; | |
740 | } | |
741 | ||
742 | error = KERN_SUCCESS; | |
743 | for (i = 0; i < count; i++) { | |
744 | vm_map_address_t map_addr; | |
745 | vm_map_size_t map_size; | |
746 | ||
747 | map_addr = (vm_map_address_t)(data_list[i].address); | |
748 | map_size = (vm_map_size_t)(data_list[i].size); | |
749 | ||
750 | if (map_size != 0) { | |
751 | error = vm_map_copyin(map, | |
752 | map_addr, | |
753 | map_size, | |
754 | FALSE, /* src_destroy */ | |
755 | ©); | |
756 | if (KERN_SUCCESS == error) { | |
757 | error = vm_map_copyout(current_task()->map, | |
758 | &map_addr, | |
759 | copy); | |
760 | if (KERN_SUCCESS == error) { | |
761 | data_list[i].address = | |
762 | CAST_DOWN(vm_offset_t, map_addr); | |
763 | continue; | |
764 | } | |
765 | vm_map_copy_discard(copy); | |
766 | } | |
767 | } | |
768 | data_list[i].address = (mach_vm_address_t)0; | |
769 | data_list[i].size = (mach_vm_size_t)0; | |
770 | } | |
771 | return error; | |
772 | } | |
773 | ||
774 | /* | |
775 | * mach_vm_read_overwrite - | |
776 | * Overwrite a range of the current map with data from the specified | |
777 | * map/address range. | |
778 | * | |
779 | * In making an assumption that the current thread is local, it is | |
780 | * no longer cluster-safe without a fully supportive local proxy | |
781 | * thread/task (but we don't support cluster's anymore so this is moot). | |
782 | */ | |
783 | ||
784 | kern_return_t | |
785 | mach_vm_read_overwrite( | |
786 | vm_map_t map, | |
787 | mach_vm_address_t address, | |
788 | mach_vm_size_t size, | |
789 | mach_vm_address_t data, | |
790 | mach_vm_size_t *data_size) | |
791 | { | |
792 | kern_return_t error; | |
793 | vm_map_copy_t copy; | |
794 | ||
795 | if (map == VM_MAP_NULL) { | |
796 | return KERN_INVALID_ARGUMENT; | |
797 | } | |
798 | ||
799 | error = vm_map_copyin(map, (vm_map_address_t)address, | |
800 | (vm_map_size_t)size, FALSE, ©); | |
801 | ||
802 | if (KERN_SUCCESS == error) { | |
803 | if (copy) { | |
804 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
805 | } | |
806 | ||
807 | error = vm_map_copy_overwrite(current_thread()->map, | |
808 | (vm_map_address_t)data, | |
809 | copy, (vm_map_size_t) size, FALSE); | |
810 | if (KERN_SUCCESS == error) { | |
811 | *data_size = size; | |
812 | return error; | |
813 | } | |
814 | vm_map_copy_discard(copy); | |
815 | } | |
816 | return error; | |
817 | } | |
818 | ||
819 | /* | |
820 | * vm_read_overwrite - | |
821 | * Overwrite a range of the current map with data from the specified | |
822 | * map/address range. | |
823 | * | |
824 | * This routine adds the additional limitation that the source and | |
825 | * destination ranges must be describable with vm_address_t values | |
826 | * (i.e. the same size address spaces as the kernel, or at least the | |
827 | * the ranges are in that first portion of the respective address | |
828 | * spaces). | |
829 | */ | |
830 | ||
831 | kern_return_t | |
832 | vm_read_overwrite( | |
833 | vm_map_t map, | |
834 | vm_address_t address, | |
835 | vm_size_t size, | |
836 | vm_address_t data, | |
837 | vm_size_t *data_size) | |
838 | { | |
839 | kern_return_t error; | |
840 | vm_map_copy_t copy; | |
841 | ||
842 | if (map == VM_MAP_NULL) { | |
843 | return KERN_INVALID_ARGUMENT; | |
844 | } | |
845 | ||
846 | error = vm_map_copyin(map, (vm_map_address_t)address, | |
847 | (vm_map_size_t)size, FALSE, ©); | |
848 | ||
849 | if (KERN_SUCCESS == error) { | |
850 | if (copy) { | |
851 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
852 | } | |
853 | ||
854 | error = vm_map_copy_overwrite(current_thread()->map, | |
855 | (vm_map_address_t)data, | |
856 | copy, (vm_map_size_t) size, FALSE); | |
857 | if (KERN_SUCCESS == error) { | |
858 | *data_size = size; | |
859 | return error; | |
860 | } | |
861 | vm_map_copy_discard(copy); | |
862 | } | |
863 | return error; | |
864 | } | |
865 | ||
866 | ||
867 | /* | |
868 | * mach_vm_write - | |
869 | * Overwrite the specified address range with the data provided | |
870 | * (from the current map). | |
871 | */ | |
872 | kern_return_t | |
873 | mach_vm_write( | |
874 | vm_map_t map, | |
875 | mach_vm_address_t address, | |
876 | pointer_t data, | |
877 | mach_msg_type_number_t size) | |
878 | { | |
879 | if (map == VM_MAP_NULL) { | |
880 | return KERN_INVALID_ARGUMENT; | |
881 | } | |
882 | ||
883 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, | |
884 | (vm_map_copy_t) data, size, FALSE /* interruptible XXX */); | |
885 | } | |
886 | ||
887 | /* | |
888 | * vm_write - | |
889 | * Overwrite the specified address range with the data provided | |
890 | * (from the current map). | |
891 | * | |
892 | * The addressability of the range of addresses to overwrite is | |
893 | * limited bu the use of a vm_address_t (same size as kernel map). | |
894 | * Either the target map is also small, or the range is in the | |
895 | * low addresses within it. | |
896 | */ | |
897 | kern_return_t | |
898 | vm_write( | |
899 | vm_map_t map, | |
900 | vm_address_t address, | |
901 | pointer_t data, | |
902 | mach_msg_type_number_t size) | |
903 | { | |
904 | if (map == VM_MAP_NULL) { | |
905 | return KERN_INVALID_ARGUMENT; | |
906 | } | |
907 | ||
908 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, | |
909 | (vm_map_copy_t) data, size, FALSE /* interruptible XXX */); | |
910 | } | |
911 | ||
912 | /* | |
913 | * mach_vm_copy - | |
914 | * Overwrite one range of the specified map with the contents of | |
915 | * another range within that same map (i.e. both address ranges | |
916 | * are "over there"). | |
917 | */ | |
918 | kern_return_t | |
919 | mach_vm_copy( | |
920 | vm_map_t map, | |
921 | mach_vm_address_t source_address, | |
922 | mach_vm_size_t size, | |
923 | mach_vm_address_t dest_address) | |
924 | { | |
925 | vm_map_copy_t copy; | |
926 | kern_return_t kr; | |
927 | ||
928 | if (map == VM_MAP_NULL) { | |
929 | return KERN_INVALID_ARGUMENT; | |
930 | } | |
931 | ||
932 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, | |
933 | (vm_map_size_t)size, FALSE, ©); | |
934 | ||
935 | if (KERN_SUCCESS == kr) { | |
936 | if (copy) { | |
937 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
938 | } | |
939 | ||
940 | kr = vm_map_copy_overwrite(map, | |
941 | (vm_map_address_t)dest_address, | |
942 | copy, (vm_map_size_t) size, FALSE /* interruptible XXX */); | |
943 | ||
944 | if (KERN_SUCCESS != kr) { | |
945 | vm_map_copy_discard(copy); | |
946 | } | |
947 | } | |
948 | return kr; | |
949 | } | |
950 | ||
951 | kern_return_t | |
952 | vm_copy( | |
953 | vm_map_t map, | |
954 | vm_address_t source_address, | |
955 | vm_size_t size, | |
956 | vm_address_t dest_address) | |
957 | { | |
958 | vm_map_copy_t copy; | |
959 | kern_return_t kr; | |
960 | ||
961 | if (map == VM_MAP_NULL) { | |
962 | return KERN_INVALID_ARGUMENT; | |
963 | } | |
964 | ||
965 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, | |
966 | (vm_map_size_t)size, FALSE, ©); | |
967 | ||
968 | if (KERN_SUCCESS == kr) { | |
969 | if (copy) { | |
970 | assertf(copy->size == (vm_map_size_t) size, "Req size: 0x%llx, Copy size: 0x%llx\n", (uint64_t) size, (uint64_t) copy->size); | |
971 | } | |
972 | ||
973 | kr = vm_map_copy_overwrite(map, | |
974 | (vm_map_address_t)dest_address, | |
975 | copy, (vm_map_size_t) size, FALSE /* interruptible XXX */); | |
976 | ||
977 | if (KERN_SUCCESS != kr) { | |
978 | vm_map_copy_discard(copy); | |
979 | } | |
980 | } | |
981 | return kr; | |
982 | } | |
983 | ||
984 | /* | |
985 | * mach_vm_map - | |
986 | * Map some range of an object into an address space. | |
987 | * | |
988 | * The object can be one of several types of objects: | |
989 | * NULL - anonymous memory | |
990 | * a named entry - a range within another address space | |
991 | * or a range within a memory object | |
992 | * a whole memory object | |
993 | * | |
994 | */ | |
995 | kern_return_t | |
996 | mach_vm_map_external( | |
997 | vm_map_t target_map, | |
998 | mach_vm_offset_t *address, | |
999 | mach_vm_size_t initial_size, | |
1000 | mach_vm_offset_t mask, | |
1001 | int flags, | |
1002 | ipc_port_t port, | |
1003 | vm_object_offset_t offset, | |
1004 | boolean_t copy, | |
1005 | vm_prot_t cur_protection, | |
1006 | vm_prot_t max_protection, | |
1007 | vm_inherit_t inheritance) | |
1008 | { | |
1009 | vm_tag_t tag; | |
1010 | ||
1011 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1012 | return mach_vm_map_kernel(target_map, address, initial_size, mask, | |
1013 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, | |
1014 | port, offset, copy, | |
1015 | cur_protection, max_protection, | |
1016 | inheritance); | |
1017 | } | |
1018 | ||
1019 | kern_return_t | |
1020 | mach_vm_map_kernel( | |
1021 | vm_map_t target_map, | |
1022 | mach_vm_offset_t *address, | |
1023 | mach_vm_size_t initial_size, | |
1024 | mach_vm_offset_t mask, | |
1025 | int flags, | |
1026 | vm_map_kernel_flags_t vmk_flags, | |
1027 | vm_tag_t tag, | |
1028 | ipc_port_t port, | |
1029 | vm_object_offset_t offset, | |
1030 | boolean_t copy, | |
1031 | vm_prot_t cur_protection, | |
1032 | vm_prot_t max_protection, | |
1033 | vm_inherit_t inheritance) | |
1034 | { | |
1035 | kern_return_t kr; | |
1036 | vm_map_offset_t vmmaddr; | |
1037 | ||
1038 | vmmaddr = (vm_map_offset_t) *address; | |
1039 | ||
1040 | /* filter out any kernel-only flags */ | |
1041 | if (flags & ~VM_FLAGS_USER_MAP) { | |
1042 | return KERN_INVALID_ARGUMENT; | |
1043 | } | |
1044 | ||
1045 | kr = vm_map_enter_mem_object(target_map, | |
1046 | &vmmaddr, | |
1047 | initial_size, | |
1048 | mask, | |
1049 | flags, | |
1050 | vmk_flags, | |
1051 | tag, | |
1052 | port, | |
1053 | offset, | |
1054 | copy, | |
1055 | cur_protection, | |
1056 | max_protection, | |
1057 | inheritance); | |
1058 | ||
1059 | #if KASAN | |
1060 | if (kr == KERN_SUCCESS && target_map->pmap == kernel_pmap) { | |
1061 | kasan_notify_address(vmmaddr, initial_size); | |
1062 | } | |
1063 | #endif | |
1064 | ||
1065 | *address = vmmaddr; | |
1066 | return kr; | |
1067 | } | |
1068 | ||
1069 | ||
1070 | /* legacy interface */ | |
1071 | kern_return_t | |
1072 | vm_map_64_external( | |
1073 | vm_map_t target_map, | |
1074 | vm_offset_t *address, | |
1075 | vm_size_t size, | |
1076 | vm_offset_t mask, | |
1077 | int flags, | |
1078 | ipc_port_t port, | |
1079 | vm_object_offset_t offset, | |
1080 | boolean_t copy, | |
1081 | vm_prot_t cur_protection, | |
1082 | vm_prot_t max_protection, | |
1083 | vm_inherit_t inheritance) | |
1084 | { | |
1085 | vm_tag_t tag; | |
1086 | ||
1087 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1088 | return vm_map_64_kernel(target_map, address, size, mask, | |
1089 | flags, VM_MAP_KERNEL_FLAGS_NONE, | |
1090 | tag, port, offset, copy, | |
1091 | cur_protection, max_protection, | |
1092 | inheritance); | |
1093 | } | |
1094 | ||
1095 | kern_return_t | |
1096 | vm_map_64_kernel( | |
1097 | vm_map_t target_map, | |
1098 | vm_offset_t *address, | |
1099 | vm_size_t size, | |
1100 | vm_offset_t mask, | |
1101 | int flags, | |
1102 | vm_map_kernel_flags_t vmk_flags, | |
1103 | vm_tag_t tag, | |
1104 | ipc_port_t port, | |
1105 | vm_object_offset_t offset, | |
1106 | boolean_t copy, | |
1107 | vm_prot_t cur_protection, | |
1108 | vm_prot_t max_protection, | |
1109 | vm_inherit_t inheritance) | |
1110 | { | |
1111 | mach_vm_address_t map_addr; | |
1112 | mach_vm_size_t map_size; | |
1113 | mach_vm_offset_t map_mask; | |
1114 | kern_return_t kr; | |
1115 | ||
1116 | map_addr = (mach_vm_address_t)*address; | |
1117 | map_size = (mach_vm_size_t)size; | |
1118 | map_mask = (mach_vm_offset_t)mask; | |
1119 | ||
1120 | kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, | |
1121 | flags, vmk_flags, tag, | |
1122 | port, offset, copy, | |
1123 | cur_protection, max_protection, inheritance); | |
1124 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1125 | return kr; | |
1126 | } | |
1127 | ||
1128 | /* temporary, until world build */ | |
1129 | kern_return_t | |
1130 | vm_map_external( | |
1131 | vm_map_t target_map, | |
1132 | vm_offset_t *address, | |
1133 | vm_size_t size, | |
1134 | vm_offset_t mask, | |
1135 | int flags, | |
1136 | ipc_port_t port, | |
1137 | vm_offset_t offset, | |
1138 | boolean_t copy, | |
1139 | vm_prot_t cur_protection, | |
1140 | vm_prot_t max_protection, | |
1141 | vm_inherit_t inheritance) | |
1142 | { | |
1143 | vm_tag_t tag; | |
1144 | ||
1145 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1146 | return vm_map_kernel(target_map, address, size, mask, | |
1147 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, | |
1148 | port, offset, copy, | |
1149 | cur_protection, max_protection, inheritance); | |
1150 | } | |
1151 | ||
1152 | kern_return_t | |
1153 | vm_map_kernel( | |
1154 | vm_map_t target_map, | |
1155 | vm_offset_t *address, | |
1156 | vm_size_t size, | |
1157 | vm_offset_t mask, | |
1158 | int flags, | |
1159 | vm_map_kernel_flags_t vmk_flags, | |
1160 | vm_tag_t tag, | |
1161 | ipc_port_t port, | |
1162 | vm_offset_t offset, | |
1163 | boolean_t copy, | |
1164 | vm_prot_t cur_protection, | |
1165 | vm_prot_t max_protection, | |
1166 | vm_inherit_t inheritance) | |
1167 | { | |
1168 | mach_vm_address_t map_addr; | |
1169 | mach_vm_size_t map_size; | |
1170 | mach_vm_offset_t map_mask; | |
1171 | vm_object_offset_t obj_offset; | |
1172 | kern_return_t kr; | |
1173 | ||
1174 | map_addr = (mach_vm_address_t)*address; | |
1175 | map_size = (mach_vm_size_t)size; | |
1176 | map_mask = (mach_vm_offset_t)mask; | |
1177 | obj_offset = (vm_object_offset_t)offset; | |
1178 | ||
1179 | kr = mach_vm_map_kernel(target_map, &map_addr, map_size, map_mask, | |
1180 | flags, vmk_flags, tag, | |
1181 | port, obj_offset, copy, | |
1182 | cur_protection, max_protection, inheritance); | |
1183 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1184 | return kr; | |
1185 | } | |
1186 | ||
1187 | /* | |
1188 | * mach_vm_remap_new - | |
1189 | * Behaves like mach_vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set | |
1190 | * and {cur,max}_protection are in/out. | |
1191 | */ | |
1192 | kern_return_t | |
1193 | mach_vm_remap_new_external( | |
1194 | vm_map_t target_map, | |
1195 | mach_vm_offset_t *address, | |
1196 | mach_vm_size_t size, | |
1197 | mach_vm_offset_t mask, | |
1198 | int flags, | |
1199 | mach_port_t src_tport, | |
1200 | mach_vm_offset_t memory_address, | |
1201 | boolean_t copy, | |
1202 | vm_prot_t *cur_protection, /* IN/OUT */ | |
1203 | vm_prot_t *max_protection, /* IN/OUT */ | |
1204 | vm_inherit_t inheritance) | |
1205 | { | |
1206 | vm_tag_t tag; | |
1207 | vm_map_offset_t map_addr; | |
1208 | kern_return_t kr; | |
1209 | vm_map_t src_map; | |
1210 | ||
1211 | flags |= VM_FLAGS_RETURN_DATA_ADDR; | |
1212 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1213 | ||
1214 | /* filter out any kernel-only flags */ | |
1215 | if (flags & ~VM_FLAGS_USER_REMAP) { | |
1216 | return KERN_INVALID_ARGUMENT; | |
1217 | } | |
1218 | ||
1219 | if (target_map == VM_MAP_NULL) { | |
1220 | return KERN_INVALID_ARGUMENT; | |
1221 | } | |
1222 | ||
1223 | if ((*cur_protection & ~VM_PROT_ALL) || | |
1224 | (*max_protection & ~VM_PROT_ALL) || | |
1225 | (*cur_protection & *max_protection) != *cur_protection) { | |
1226 | return KERN_INVALID_ARGUMENT; | |
1227 | } | |
1228 | if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == | |
1229 | (VM_PROT_WRITE | VM_PROT_EXECUTE)) { | |
1230 | /* | |
1231 | * XXX FBDP TODO | |
1232 | * enforce target's "wx" policies | |
1233 | */ | |
1234 | return KERN_PROTECTION_FAILURE; | |
1235 | } | |
1236 | ||
1237 | if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) { | |
1238 | src_map = convert_port_to_map_read(src_tport); | |
1239 | } else { | |
1240 | src_map = convert_port_to_map(src_tport); | |
1241 | } | |
1242 | ||
1243 | if (src_map == VM_MAP_NULL) { | |
1244 | return KERN_INVALID_ARGUMENT; | |
1245 | } | |
1246 | ||
1247 | map_addr = (vm_map_offset_t)*address; | |
1248 | ||
1249 | kr = vm_map_remap(target_map, | |
1250 | &map_addr, | |
1251 | size, | |
1252 | mask, | |
1253 | flags, | |
1254 | VM_MAP_KERNEL_FLAGS_NONE, | |
1255 | tag, | |
1256 | src_map, | |
1257 | memory_address, | |
1258 | copy, | |
1259 | cur_protection, /* IN/OUT */ | |
1260 | max_protection, /* IN/OUT */ | |
1261 | inheritance); | |
1262 | ||
1263 | *address = map_addr; | |
1264 | vm_map_deallocate(src_map); | |
1265 | ||
1266 | if (kr == KERN_SUCCESS) { | |
1267 | ipc_port_release_send(src_tport); /* consume on success */ | |
1268 | } | |
1269 | return kr; | |
1270 | } | |
1271 | ||
1272 | /* | |
1273 | * mach_vm_remap - | |
1274 | * Remap a range of memory from one task into another, | |
1275 | * to another address range within the same task, or | |
1276 | * over top of itself (with altered permissions and/or | |
1277 | * as an in-place copy of itself). | |
1278 | */ | |
1279 | kern_return_t | |
1280 | mach_vm_remap_external( | |
1281 | vm_map_t target_map, | |
1282 | mach_vm_offset_t *address, | |
1283 | mach_vm_size_t size, | |
1284 | mach_vm_offset_t mask, | |
1285 | int flags, | |
1286 | vm_map_t src_map, | |
1287 | mach_vm_offset_t memory_address, | |
1288 | boolean_t copy, | |
1289 | vm_prot_t *cur_protection, /* OUT */ | |
1290 | vm_prot_t *max_protection, /* OUT */ | |
1291 | vm_inherit_t inheritance) | |
1292 | { | |
1293 | vm_tag_t tag; | |
1294 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1295 | ||
1296 | return mach_vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, memory_address, | |
1297 | copy, cur_protection, max_protection, inheritance); | |
1298 | } | |
1299 | ||
1300 | kern_return_t | |
1301 | mach_vm_remap_kernel( | |
1302 | vm_map_t target_map, | |
1303 | mach_vm_offset_t *address, | |
1304 | mach_vm_size_t size, | |
1305 | mach_vm_offset_t mask, | |
1306 | int flags, | |
1307 | vm_tag_t tag, | |
1308 | vm_map_t src_map, | |
1309 | mach_vm_offset_t memory_address, | |
1310 | boolean_t copy, | |
1311 | vm_prot_t *cur_protection, /* OUT */ | |
1312 | vm_prot_t *max_protection, /* OUT */ | |
1313 | vm_inherit_t inheritance) | |
1314 | { | |
1315 | vm_map_offset_t map_addr; | |
1316 | kern_return_t kr; | |
1317 | ||
1318 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { | |
1319 | return KERN_INVALID_ARGUMENT; | |
1320 | } | |
1321 | ||
1322 | /* filter out any kernel-only flags */ | |
1323 | if (flags & ~VM_FLAGS_USER_REMAP) { | |
1324 | return KERN_INVALID_ARGUMENT; | |
1325 | } | |
1326 | ||
1327 | map_addr = (vm_map_offset_t)*address; | |
1328 | ||
1329 | *cur_protection = VM_PROT_NONE; | |
1330 | *max_protection = VM_PROT_NONE; | |
1331 | ||
1332 | kr = vm_map_remap(target_map, | |
1333 | &map_addr, | |
1334 | size, | |
1335 | mask, | |
1336 | flags, | |
1337 | VM_MAP_KERNEL_FLAGS_NONE, | |
1338 | tag, | |
1339 | src_map, | |
1340 | memory_address, | |
1341 | copy, | |
1342 | cur_protection, /* IN/OUT */ | |
1343 | max_protection, /* IN/OUT */ | |
1344 | inheritance); | |
1345 | *address = map_addr; | |
1346 | return kr; | |
1347 | } | |
1348 | ||
1349 | /* | |
1350 | * vm_remap_new - | |
1351 | * Behaves like vm_remap, except that VM_FLAGS_RETURN_DATA_ADDR is always set | |
1352 | * and {cur,max}_protection are in/out. | |
1353 | */ | |
1354 | kern_return_t | |
1355 | vm_remap_new_external( | |
1356 | vm_map_t target_map, | |
1357 | vm_offset_t *address, | |
1358 | vm_size_t size, | |
1359 | vm_offset_t mask, | |
1360 | int flags, | |
1361 | mach_port_t src_tport, | |
1362 | vm_offset_t memory_address, | |
1363 | boolean_t copy, | |
1364 | vm_prot_t *cur_protection, /* IN/OUT */ | |
1365 | vm_prot_t *max_protection, /* IN/OUT */ | |
1366 | vm_inherit_t inheritance) | |
1367 | { | |
1368 | vm_tag_t tag; | |
1369 | vm_map_offset_t map_addr; | |
1370 | kern_return_t kr; | |
1371 | vm_map_t src_map; | |
1372 | ||
1373 | flags |= VM_FLAGS_RETURN_DATA_ADDR; | |
1374 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1375 | ||
1376 | /* filter out any kernel-only flags */ | |
1377 | if (flags & ~VM_FLAGS_USER_REMAP) { | |
1378 | return KERN_INVALID_ARGUMENT; | |
1379 | } | |
1380 | ||
1381 | if (target_map == VM_MAP_NULL) { | |
1382 | return KERN_INVALID_ARGUMENT; | |
1383 | } | |
1384 | ||
1385 | if ((*cur_protection & ~VM_PROT_ALL) || | |
1386 | (*max_protection & ~VM_PROT_ALL) || | |
1387 | (*cur_protection & *max_protection) != *cur_protection) { | |
1388 | return KERN_INVALID_ARGUMENT; | |
1389 | } | |
1390 | if ((*max_protection & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == | |
1391 | (VM_PROT_WRITE | VM_PROT_EXECUTE)) { | |
1392 | /* | |
1393 | * XXX FBDP TODO | |
1394 | * enforce target's "wx" policies | |
1395 | */ | |
1396 | return KERN_PROTECTION_FAILURE; | |
1397 | } | |
1398 | ||
1399 | if (copy || *max_protection == VM_PROT_READ || *max_protection == VM_PROT_NONE) { | |
1400 | src_map = convert_port_to_map_read(src_tport); | |
1401 | } else { | |
1402 | src_map = convert_port_to_map(src_tport); | |
1403 | } | |
1404 | ||
1405 | if (src_map == VM_MAP_NULL) { | |
1406 | return KERN_INVALID_ARGUMENT; | |
1407 | } | |
1408 | ||
1409 | map_addr = (vm_map_offset_t)*address; | |
1410 | ||
1411 | kr = vm_map_remap(target_map, | |
1412 | &map_addr, | |
1413 | size, | |
1414 | mask, | |
1415 | flags, | |
1416 | VM_MAP_KERNEL_FLAGS_NONE, | |
1417 | tag, | |
1418 | src_map, | |
1419 | memory_address, | |
1420 | copy, | |
1421 | cur_protection, /* IN/OUT */ | |
1422 | max_protection, /* IN/OUT */ | |
1423 | inheritance); | |
1424 | ||
1425 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1426 | vm_map_deallocate(src_map); | |
1427 | ||
1428 | if (kr == KERN_SUCCESS) { | |
1429 | ipc_port_release_send(src_tport); /* consume on success */ | |
1430 | } | |
1431 | return kr; | |
1432 | } | |
1433 | ||
1434 | /* | |
1435 | * vm_remap - | |
1436 | * Remap a range of memory from one task into another, | |
1437 | * to another address range within the same task, or | |
1438 | * over top of itself (with altered permissions and/or | |
1439 | * as an in-place copy of itself). | |
1440 | * | |
1441 | * The addressability of the source and target address | |
1442 | * range is limited by the size of vm_address_t (in the | |
1443 | * kernel context). | |
1444 | */ | |
1445 | kern_return_t | |
1446 | vm_remap_external( | |
1447 | vm_map_t target_map, | |
1448 | vm_offset_t *address, | |
1449 | vm_size_t size, | |
1450 | vm_offset_t mask, | |
1451 | int flags, | |
1452 | vm_map_t src_map, | |
1453 | vm_offset_t memory_address, | |
1454 | boolean_t copy, | |
1455 | vm_prot_t *cur_protection, /* OUT */ | |
1456 | vm_prot_t *max_protection, /* OUT */ | |
1457 | vm_inherit_t inheritance) | |
1458 | { | |
1459 | vm_tag_t tag; | |
1460 | VM_GET_FLAGS_ALIAS(flags, tag); | |
1461 | ||
1462 | return vm_remap_kernel(target_map, address, size, mask, flags, tag, src_map, | |
1463 | memory_address, copy, cur_protection, max_protection, inheritance); | |
1464 | } | |
1465 | ||
1466 | kern_return_t | |
1467 | vm_remap_kernel( | |
1468 | vm_map_t target_map, | |
1469 | vm_offset_t *address, | |
1470 | vm_size_t size, | |
1471 | vm_offset_t mask, | |
1472 | int flags, | |
1473 | vm_tag_t tag, | |
1474 | vm_map_t src_map, | |
1475 | vm_offset_t memory_address, | |
1476 | boolean_t copy, | |
1477 | vm_prot_t *cur_protection, /* OUT */ | |
1478 | vm_prot_t *max_protection, /* OUT */ | |
1479 | vm_inherit_t inheritance) | |
1480 | { | |
1481 | vm_map_offset_t map_addr; | |
1482 | kern_return_t kr; | |
1483 | ||
1484 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) { | |
1485 | return KERN_INVALID_ARGUMENT; | |
1486 | } | |
1487 | ||
1488 | /* filter out any kernel-only flags */ | |
1489 | if (flags & ~VM_FLAGS_USER_REMAP) { | |
1490 | return KERN_INVALID_ARGUMENT; | |
1491 | } | |
1492 | ||
1493 | map_addr = (vm_map_offset_t)*address; | |
1494 | ||
1495 | *cur_protection = VM_PROT_NONE; | |
1496 | *max_protection = VM_PROT_NONE; | |
1497 | ||
1498 | kr = vm_map_remap(target_map, | |
1499 | &map_addr, | |
1500 | size, | |
1501 | mask, | |
1502 | flags, | |
1503 | VM_MAP_KERNEL_FLAGS_NONE, | |
1504 | tag, | |
1505 | src_map, | |
1506 | memory_address, | |
1507 | copy, | |
1508 | cur_protection, /* IN/OUT */ | |
1509 | max_protection, /* IN/OUT */ | |
1510 | inheritance); | |
1511 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1512 | return kr; | |
1513 | } | |
1514 | ||
1515 | /* | |
1516 | * NOTE: these routine (and this file) will no longer require mach_host_server.h | |
1517 | * when mach_vm_wire and vm_wire are changed to use ledgers. | |
1518 | */ | |
1519 | #include <mach/mach_host_server.h> | |
1520 | /* | |
1521 | * mach_vm_wire | |
1522 | * Specify that the range of the virtual address space | |
1523 | * of the target task must not cause page faults for | |
1524 | * the indicated accesses. | |
1525 | * | |
1526 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1527 | */ | |
1528 | kern_return_t | |
1529 | mach_vm_wire_external( | |
1530 | host_priv_t host_priv, | |
1531 | vm_map_t map, | |
1532 | mach_vm_offset_t start, | |
1533 | mach_vm_size_t size, | |
1534 | vm_prot_t access) | |
1535 | { | |
1536 | return mach_vm_wire_kernel(host_priv, map, start, size, access, VM_KERN_MEMORY_MLOCK); | |
1537 | } | |
1538 | ||
1539 | kern_return_t | |
1540 | mach_vm_wire_kernel( | |
1541 | host_priv_t host_priv, | |
1542 | vm_map_t map, | |
1543 | mach_vm_offset_t start, | |
1544 | mach_vm_size_t size, | |
1545 | vm_prot_t access, | |
1546 | vm_tag_t tag) | |
1547 | { | |
1548 | kern_return_t rc; | |
1549 | ||
1550 | if (host_priv == HOST_PRIV_NULL) { | |
1551 | return KERN_INVALID_HOST; | |
1552 | } | |
1553 | ||
1554 | if (map == VM_MAP_NULL) { | |
1555 | return KERN_INVALID_TASK; | |
1556 | } | |
1557 | ||
1558 | if (access & ~VM_PROT_ALL || (start + size < start)) { | |
1559 | return KERN_INVALID_ARGUMENT; | |
1560 | } | |
1561 | ||
1562 | if (access != VM_PROT_NONE) { | |
1563 | rc = vm_map_wire_kernel(map, | |
1564 | vm_map_trunc_page(start, | |
1565 | VM_MAP_PAGE_MASK(map)), | |
1566 | vm_map_round_page(start + size, | |
1567 | VM_MAP_PAGE_MASK(map)), | |
1568 | access, tag, | |
1569 | TRUE); | |
1570 | } else { | |
1571 | rc = vm_map_unwire(map, | |
1572 | vm_map_trunc_page(start, | |
1573 | VM_MAP_PAGE_MASK(map)), | |
1574 | vm_map_round_page(start + size, | |
1575 | VM_MAP_PAGE_MASK(map)), | |
1576 | TRUE); | |
1577 | } | |
1578 | return rc; | |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * vm_wire - | |
1583 | * Specify that the range of the virtual address space | |
1584 | * of the target task must not cause page faults for | |
1585 | * the indicated accesses. | |
1586 | * | |
1587 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1588 | */ | |
1589 | kern_return_t | |
1590 | vm_wire( | |
1591 | host_priv_t host_priv, | |
1592 | vm_map_t map, | |
1593 | vm_offset_t start, | |
1594 | vm_size_t size, | |
1595 | vm_prot_t access) | |
1596 | { | |
1597 | kern_return_t rc; | |
1598 | ||
1599 | if (host_priv == HOST_PRIV_NULL) { | |
1600 | return KERN_INVALID_HOST; | |
1601 | } | |
1602 | ||
1603 | if (map == VM_MAP_NULL) { | |
1604 | return KERN_INVALID_TASK; | |
1605 | } | |
1606 | ||
1607 | if ((access & ~VM_PROT_ALL) || (start + size < start)) { | |
1608 | return KERN_INVALID_ARGUMENT; | |
1609 | } | |
1610 | ||
1611 | if (size == 0) { | |
1612 | rc = KERN_SUCCESS; | |
1613 | } else if (access != VM_PROT_NONE) { | |
1614 | rc = vm_map_wire_kernel(map, | |
1615 | vm_map_trunc_page(start, | |
1616 | VM_MAP_PAGE_MASK(map)), | |
1617 | vm_map_round_page(start + size, | |
1618 | VM_MAP_PAGE_MASK(map)), | |
1619 | access, VM_KERN_MEMORY_OSFMK, | |
1620 | TRUE); | |
1621 | } else { | |
1622 | rc = vm_map_unwire(map, | |
1623 | vm_map_trunc_page(start, | |
1624 | VM_MAP_PAGE_MASK(map)), | |
1625 | vm_map_round_page(start + size, | |
1626 | VM_MAP_PAGE_MASK(map)), | |
1627 | TRUE); | |
1628 | } | |
1629 | return rc; | |
1630 | } | |
1631 | ||
1632 | /* | |
1633 | * vm_msync | |
1634 | * | |
1635 | * Synchronises the memory range specified with its backing store | |
1636 | * image by either flushing or cleaning the contents to the appropriate | |
1637 | * memory manager. | |
1638 | * | |
1639 | * interpretation of sync_flags | |
1640 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1641 | * pages to manager. | |
1642 | * | |
1643 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1644 | * - discard pages, write dirty or precious | |
1645 | * pages back to memory manager. | |
1646 | * | |
1647 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1648 | * - write dirty or precious pages back to | |
1649 | * the memory manager. | |
1650 | * | |
1651 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
1652 | * is a hole in the region, and we would | |
1653 | * have returned KERN_SUCCESS, return | |
1654 | * KERN_INVALID_ADDRESS instead. | |
1655 | * | |
1656 | * RETURNS | |
1657 | * KERN_INVALID_TASK Bad task parameter | |
1658 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1659 | * KERN_SUCCESS The usual. | |
1660 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
1661 | */ | |
1662 | ||
1663 | kern_return_t | |
1664 | mach_vm_msync( | |
1665 | vm_map_t map, | |
1666 | mach_vm_address_t address, | |
1667 | mach_vm_size_t size, | |
1668 | vm_sync_t sync_flags) | |
1669 | { | |
1670 | if (map == VM_MAP_NULL) { | |
1671 | return KERN_INVALID_TASK; | |
1672 | } | |
1673 | ||
1674 | return vm_map_msync(map, (vm_map_address_t)address, | |
1675 | (vm_map_size_t)size, sync_flags); | |
1676 | } | |
1677 | ||
1678 | /* | |
1679 | * vm_msync | |
1680 | * | |
1681 | * Synchronises the memory range specified with its backing store | |
1682 | * image by either flushing or cleaning the contents to the appropriate | |
1683 | * memory manager. | |
1684 | * | |
1685 | * interpretation of sync_flags | |
1686 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1687 | * pages to manager. | |
1688 | * | |
1689 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1690 | * - discard pages, write dirty or precious | |
1691 | * pages back to memory manager. | |
1692 | * | |
1693 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1694 | * - write dirty or precious pages back to | |
1695 | * the memory manager. | |
1696 | * | |
1697 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
1698 | * is a hole in the region, and we would | |
1699 | * have returned KERN_SUCCESS, return | |
1700 | * KERN_INVALID_ADDRESS instead. | |
1701 | * | |
1702 | * The addressability of the range is limited to that which can | |
1703 | * be described by a vm_address_t. | |
1704 | * | |
1705 | * RETURNS | |
1706 | * KERN_INVALID_TASK Bad task parameter | |
1707 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1708 | * KERN_SUCCESS The usual. | |
1709 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
1710 | */ | |
1711 | ||
1712 | kern_return_t | |
1713 | vm_msync( | |
1714 | vm_map_t map, | |
1715 | vm_address_t address, | |
1716 | vm_size_t size, | |
1717 | vm_sync_t sync_flags) | |
1718 | { | |
1719 | if (map == VM_MAP_NULL) { | |
1720 | return KERN_INVALID_TASK; | |
1721 | } | |
1722 | ||
1723 | return vm_map_msync(map, (vm_map_address_t)address, | |
1724 | (vm_map_size_t)size, sync_flags); | |
1725 | } | |
1726 | ||
1727 | ||
1728 | int | |
1729 | vm_toggle_entry_reuse(int toggle, int *old_value) | |
1730 | { | |
1731 | vm_map_t map = current_map(); | |
1732 | ||
1733 | assert(!map->is_nested_map); | |
1734 | if (toggle == VM_TOGGLE_GETVALUE && old_value != NULL) { | |
1735 | *old_value = map->disable_vmentry_reuse; | |
1736 | } else if (toggle == VM_TOGGLE_SET) { | |
1737 | vm_map_entry_t map_to_entry; | |
1738 | ||
1739 | vm_map_lock(map); | |
1740 | vm_map_disable_hole_optimization(map); | |
1741 | map->disable_vmentry_reuse = TRUE; | |
1742 | __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map)); | |
1743 | if (map->first_free == map_to_entry) { | |
1744 | map->highest_entry_end = vm_map_min(map); | |
1745 | } else { | |
1746 | map->highest_entry_end = map->first_free->vme_end; | |
1747 | } | |
1748 | vm_map_unlock(map); | |
1749 | } else if (toggle == VM_TOGGLE_CLEAR) { | |
1750 | vm_map_lock(map); | |
1751 | map->disable_vmentry_reuse = FALSE; | |
1752 | vm_map_unlock(map); | |
1753 | } else { | |
1754 | return KERN_INVALID_ARGUMENT; | |
1755 | } | |
1756 | ||
1757 | return KERN_SUCCESS; | |
1758 | } | |
1759 | ||
1760 | /* | |
1761 | * mach_vm_behavior_set | |
1762 | * | |
1763 | * Sets the paging behavior attribute for the specified range | |
1764 | * in the specified map. | |
1765 | * | |
1766 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1767 | * in [start,start+size) is not a valid allocated memory region. | |
1768 | */ | |
1769 | kern_return_t | |
1770 | mach_vm_behavior_set( | |
1771 | vm_map_t map, | |
1772 | mach_vm_offset_t start, | |
1773 | mach_vm_size_t size, | |
1774 | vm_behavior_t new_behavior) | |
1775 | { | |
1776 | vm_map_offset_t align_mask; | |
1777 | ||
1778 | if ((map == VM_MAP_NULL) || (start + size < start)) { | |
1779 | return KERN_INVALID_ARGUMENT; | |
1780 | } | |
1781 | ||
1782 | if (size == 0) { | |
1783 | return KERN_SUCCESS; | |
1784 | } | |
1785 | ||
1786 | switch (new_behavior) { | |
1787 | case VM_BEHAVIOR_REUSABLE: | |
1788 | case VM_BEHAVIOR_REUSE: | |
1789 | case VM_BEHAVIOR_CAN_REUSE: | |
1790 | /* | |
1791 | * Align to the hardware page size, to allow | |
1792 | * malloc() to maximize the amount of re-usability, | |
1793 | * even on systems with larger software page size. | |
1794 | */ | |
1795 | align_mask = PAGE_MASK; | |
1796 | break; | |
1797 | default: | |
1798 | align_mask = VM_MAP_PAGE_MASK(map); | |
1799 | break; | |
1800 | } | |
1801 | ||
1802 | return vm_map_behavior_set(map, | |
1803 | vm_map_trunc_page(start, align_mask), | |
1804 | vm_map_round_page(start + size, align_mask), | |
1805 | new_behavior); | |
1806 | } | |
1807 | ||
1808 | /* | |
1809 | * vm_behavior_set | |
1810 | * | |
1811 | * Sets the paging behavior attribute for the specified range | |
1812 | * in the specified map. | |
1813 | * | |
1814 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1815 | * in [start,start+size) is not a valid allocated memory region. | |
1816 | * | |
1817 | * This routine is potentially limited in addressibility by the | |
1818 | * use of vm_offset_t (if the map provided is larger than the | |
1819 | * kernel's). | |
1820 | */ | |
1821 | kern_return_t | |
1822 | vm_behavior_set( | |
1823 | vm_map_t map, | |
1824 | vm_offset_t start, | |
1825 | vm_size_t size, | |
1826 | vm_behavior_t new_behavior) | |
1827 | { | |
1828 | if (start + size < start) { | |
1829 | return KERN_INVALID_ARGUMENT; | |
1830 | } | |
1831 | ||
1832 | return mach_vm_behavior_set(map, | |
1833 | (mach_vm_offset_t) start, | |
1834 | (mach_vm_size_t) size, | |
1835 | new_behavior); | |
1836 | } | |
1837 | ||
1838 | /* | |
1839 | * mach_vm_region: | |
1840 | * | |
1841 | * User call to obtain information about a region in | |
1842 | * a task's address map. Currently, only one flavor is | |
1843 | * supported. | |
1844 | * | |
1845 | * XXX The reserved and behavior fields cannot be filled | |
1846 | * in until the vm merge from the IK is completed, and | |
1847 | * vm_reserve is implemented. | |
1848 | * | |
1849 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1850 | */ | |
1851 | ||
1852 | kern_return_t | |
1853 | mach_vm_region( | |
1854 | vm_map_t map, | |
1855 | mach_vm_offset_t *address, /* IN/OUT */ | |
1856 | mach_vm_size_t *size, /* OUT */ | |
1857 | vm_region_flavor_t flavor, /* IN */ | |
1858 | vm_region_info_t info, /* OUT */ | |
1859 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1860 | mach_port_t *object_name) /* OUT */ | |
1861 | { | |
1862 | vm_map_offset_t map_addr; | |
1863 | vm_map_size_t map_size; | |
1864 | kern_return_t kr; | |
1865 | ||
1866 | if (VM_MAP_NULL == map) { | |
1867 | return KERN_INVALID_ARGUMENT; | |
1868 | } | |
1869 | ||
1870 | map_addr = (vm_map_offset_t)*address; | |
1871 | map_size = (vm_map_size_t)*size; | |
1872 | ||
1873 | /* legacy conversion */ | |
1874 | if (VM_REGION_BASIC_INFO == flavor) { | |
1875 | flavor = VM_REGION_BASIC_INFO_64; | |
1876 | } | |
1877 | ||
1878 | kr = vm_map_region(map, | |
1879 | &map_addr, &map_size, | |
1880 | flavor, info, count, | |
1881 | object_name); | |
1882 | ||
1883 | *address = map_addr; | |
1884 | *size = map_size; | |
1885 | return kr; | |
1886 | } | |
1887 | ||
1888 | /* | |
1889 | * vm_region_64 and vm_region: | |
1890 | * | |
1891 | * User call to obtain information about a region in | |
1892 | * a task's address map. Currently, only one flavor is | |
1893 | * supported. | |
1894 | * | |
1895 | * XXX The reserved and behavior fields cannot be filled | |
1896 | * in until the vm merge from the IK is completed, and | |
1897 | * vm_reserve is implemented. | |
1898 | * | |
1899 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1900 | */ | |
1901 | ||
1902 | kern_return_t | |
1903 | vm_region_64( | |
1904 | vm_map_t map, | |
1905 | vm_offset_t *address, /* IN/OUT */ | |
1906 | vm_size_t *size, /* OUT */ | |
1907 | vm_region_flavor_t flavor, /* IN */ | |
1908 | vm_region_info_t info, /* OUT */ | |
1909 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1910 | mach_port_t *object_name) /* OUT */ | |
1911 | { | |
1912 | vm_map_offset_t map_addr; | |
1913 | vm_map_size_t map_size; | |
1914 | kern_return_t kr; | |
1915 | ||
1916 | if (VM_MAP_NULL == map) { | |
1917 | return KERN_INVALID_ARGUMENT; | |
1918 | } | |
1919 | ||
1920 | map_addr = (vm_map_offset_t)*address; | |
1921 | map_size = (vm_map_size_t)*size; | |
1922 | ||
1923 | /* legacy conversion */ | |
1924 | if (VM_REGION_BASIC_INFO == flavor) { | |
1925 | flavor = VM_REGION_BASIC_INFO_64; | |
1926 | } | |
1927 | ||
1928 | kr = vm_map_region(map, | |
1929 | &map_addr, &map_size, | |
1930 | flavor, info, count, | |
1931 | object_name); | |
1932 | ||
1933 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1934 | *size = CAST_DOWN(vm_size_t, map_size); | |
1935 | ||
1936 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { | |
1937 | return KERN_INVALID_ADDRESS; | |
1938 | } | |
1939 | return kr; | |
1940 | } | |
1941 | ||
1942 | kern_return_t | |
1943 | vm_region( | |
1944 | vm_map_t map, | |
1945 | vm_address_t *address, /* IN/OUT */ | |
1946 | vm_size_t *size, /* OUT */ | |
1947 | vm_region_flavor_t flavor, /* IN */ | |
1948 | vm_region_info_t info, /* OUT */ | |
1949 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1950 | mach_port_t *object_name) /* OUT */ | |
1951 | { | |
1952 | vm_map_address_t map_addr; | |
1953 | vm_map_size_t map_size; | |
1954 | kern_return_t kr; | |
1955 | ||
1956 | if (VM_MAP_NULL == map) { | |
1957 | return KERN_INVALID_ARGUMENT; | |
1958 | } | |
1959 | ||
1960 | map_addr = (vm_map_address_t)*address; | |
1961 | map_size = (vm_map_size_t)*size; | |
1962 | ||
1963 | kr = vm_map_region(map, | |
1964 | &map_addr, &map_size, | |
1965 | flavor, info, count, | |
1966 | object_name); | |
1967 | ||
1968 | *address = CAST_DOWN(vm_address_t, map_addr); | |
1969 | *size = CAST_DOWN(vm_size_t, map_size); | |
1970 | ||
1971 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { | |
1972 | return KERN_INVALID_ADDRESS; | |
1973 | } | |
1974 | return kr; | |
1975 | } | |
1976 | ||
1977 | /* | |
1978 | * vm_region_recurse: A form of vm_region which follows the | |
1979 | * submaps in a target map | |
1980 | * | |
1981 | */ | |
1982 | kern_return_t | |
1983 | mach_vm_region_recurse( | |
1984 | vm_map_t map, | |
1985 | mach_vm_address_t *address, | |
1986 | mach_vm_size_t *size, | |
1987 | uint32_t *depth, | |
1988 | vm_region_recurse_info_t info, | |
1989 | mach_msg_type_number_t *infoCnt) | |
1990 | { | |
1991 | vm_map_address_t map_addr; | |
1992 | vm_map_size_t map_size; | |
1993 | kern_return_t kr; | |
1994 | ||
1995 | if (VM_MAP_NULL == map) { | |
1996 | return KERN_INVALID_ARGUMENT; | |
1997 | } | |
1998 | ||
1999 | map_addr = (vm_map_address_t)*address; | |
2000 | map_size = (vm_map_size_t)*size; | |
2001 | ||
2002 | kr = vm_map_region_recurse_64( | |
2003 | map, | |
2004 | &map_addr, | |
2005 | &map_size, | |
2006 | depth, | |
2007 | (vm_region_submap_info_64_t)info, | |
2008 | infoCnt); | |
2009 | ||
2010 | *address = map_addr; | |
2011 | *size = map_size; | |
2012 | return kr; | |
2013 | } | |
2014 | ||
2015 | /* | |
2016 | * vm_region_recurse: A form of vm_region which follows the | |
2017 | * submaps in a target map | |
2018 | * | |
2019 | */ | |
2020 | kern_return_t | |
2021 | vm_region_recurse_64( | |
2022 | vm_map_t map, | |
2023 | vm_address_t *address, | |
2024 | vm_size_t *size, | |
2025 | uint32_t *depth, | |
2026 | vm_region_recurse_info_64_t info, | |
2027 | mach_msg_type_number_t *infoCnt) | |
2028 | { | |
2029 | vm_map_address_t map_addr; | |
2030 | vm_map_size_t map_size; | |
2031 | kern_return_t kr; | |
2032 | ||
2033 | if (VM_MAP_NULL == map) { | |
2034 | return KERN_INVALID_ARGUMENT; | |
2035 | } | |
2036 | ||
2037 | map_addr = (vm_map_address_t)*address; | |
2038 | map_size = (vm_map_size_t)*size; | |
2039 | ||
2040 | kr = vm_map_region_recurse_64( | |
2041 | map, | |
2042 | &map_addr, | |
2043 | &map_size, | |
2044 | depth, | |
2045 | (vm_region_submap_info_64_t)info, | |
2046 | infoCnt); | |
2047 | ||
2048 | *address = CAST_DOWN(vm_address_t, map_addr); | |
2049 | *size = CAST_DOWN(vm_size_t, map_size); | |
2050 | ||
2051 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { | |
2052 | return KERN_INVALID_ADDRESS; | |
2053 | } | |
2054 | return kr; | |
2055 | } | |
2056 | ||
2057 | kern_return_t | |
2058 | vm_region_recurse( | |
2059 | vm_map_t map, | |
2060 | vm_offset_t *address, /* IN/OUT */ | |
2061 | vm_size_t *size, /* OUT */ | |
2062 | natural_t *depth, /* IN/OUT */ | |
2063 | vm_region_recurse_info_t info32, /* IN/OUT */ | |
2064 | mach_msg_type_number_t *infoCnt) /* IN/OUT */ | |
2065 | { | |
2066 | vm_region_submap_info_data_64_t info64; | |
2067 | vm_region_submap_info_t info; | |
2068 | vm_map_address_t map_addr; | |
2069 | vm_map_size_t map_size; | |
2070 | kern_return_t kr; | |
2071 | ||
2072 | if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) { | |
2073 | return KERN_INVALID_ARGUMENT; | |
2074 | } | |
2075 | ||
2076 | ||
2077 | map_addr = (vm_map_address_t)*address; | |
2078 | map_size = (vm_map_size_t)*size; | |
2079 | info = (vm_region_submap_info_t)info32; | |
2080 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; | |
2081 | ||
2082 | kr = vm_map_region_recurse_64(map, &map_addr, &map_size, | |
2083 | depth, &info64, infoCnt); | |
2084 | ||
2085 | info->protection = info64.protection; | |
2086 | info->max_protection = info64.max_protection; | |
2087 | info->inheritance = info64.inheritance; | |
2088 | info->offset = (uint32_t)info64.offset; /* trouble-maker */ | |
2089 | info->user_tag = info64.user_tag; | |
2090 | info->pages_resident = info64.pages_resident; | |
2091 | info->pages_shared_now_private = info64.pages_shared_now_private; | |
2092 | info->pages_swapped_out = info64.pages_swapped_out; | |
2093 | info->pages_dirtied = info64.pages_dirtied; | |
2094 | info->ref_count = info64.ref_count; | |
2095 | info->shadow_depth = info64.shadow_depth; | |
2096 | info->external_pager = info64.external_pager; | |
2097 | info->share_mode = info64.share_mode; | |
2098 | info->is_submap = info64.is_submap; | |
2099 | info->behavior = info64.behavior; | |
2100 | info->object_id = info64.object_id; | |
2101 | info->user_wired_count = info64.user_wired_count; | |
2102 | ||
2103 | *address = CAST_DOWN(vm_address_t, map_addr); | |
2104 | *size = CAST_DOWN(vm_size_t, map_size); | |
2105 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; | |
2106 | ||
2107 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) { | |
2108 | return KERN_INVALID_ADDRESS; | |
2109 | } | |
2110 | return kr; | |
2111 | } | |
2112 | ||
2113 | kern_return_t | |
2114 | mach_vm_purgable_control( | |
2115 | vm_map_t map, | |
2116 | mach_vm_offset_t address, | |
2117 | vm_purgable_t control, | |
2118 | int *state) | |
2119 | { | |
2120 | if (VM_MAP_NULL == map) { | |
2121 | return KERN_INVALID_ARGUMENT; | |
2122 | } | |
2123 | ||
2124 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
2125 | /* not allowed from user-space */ | |
2126 | return KERN_INVALID_ARGUMENT; | |
2127 | } | |
2128 | ||
2129 | return vm_map_purgable_control(map, | |
2130 | vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)), | |
2131 | control, | |
2132 | state); | |
2133 | } | |
2134 | ||
2135 | kern_return_t | |
2136 | vm_purgable_control( | |
2137 | vm_map_t map, | |
2138 | vm_offset_t address, | |
2139 | vm_purgable_t control, | |
2140 | int *state) | |
2141 | { | |
2142 | if (VM_MAP_NULL == map) { | |
2143 | return KERN_INVALID_ARGUMENT; | |
2144 | } | |
2145 | ||
2146 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
2147 | /* not allowed from user-space */ | |
2148 | return KERN_INVALID_ARGUMENT; | |
2149 | } | |
2150 | ||
2151 | return vm_map_purgable_control(map, | |
2152 | vm_map_trunc_page(address, VM_MAP_PAGE_MASK(map)), | |
2153 | control, | |
2154 | state); | |
2155 | } | |
2156 | ||
2157 | ||
2158 | /* | |
2159 | * Ordinarily, the right to allocate CPM is restricted | |
2160 | * to privileged applications (those that can gain access | |
2161 | * to the host priv port). Set this variable to zero if | |
2162 | * you want to let any application allocate CPM. | |
2163 | */ | |
2164 | unsigned int vm_allocate_cpm_privileged = 0; | |
2165 | ||
2166 | /* | |
2167 | * Allocate memory in the specified map, with the caveat that | |
2168 | * the memory is physically contiguous. This call may fail | |
2169 | * if the system can't find sufficient contiguous memory. | |
2170 | * This call may cause or lead to heart-stopping amounts of | |
2171 | * paging activity. | |
2172 | * | |
2173 | * Memory obtained from this call should be freed in the | |
2174 | * normal way, viz., via vm_deallocate. | |
2175 | */ | |
2176 | kern_return_t | |
2177 | vm_allocate_cpm( | |
2178 | host_priv_t host_priv, | |
2179 | vm_map_t map, | |
2180 | vm_address_t *addr, | |
2181 | vm_size_t size, | |
2182 | int flags) | |
2183 | { | |
2184 | vm_map_address_t map_addr; | |
2185 | vm_map_size_t map_size; | |
2186 | kern_return_t kr; | |
2187 | ||
2188 | if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) { | |
2189 | return KERN_INVALID_HOST; | |
2190 | } | |
2191 | ||
2192 | if (VM_MAP_NULL == map) { | |
2193 | return KERN_INVALID_ARGUMENT; | |
2194 | } | |
2195 | ||
2196 | map_addr = (vm_map_address_t)*addr; | |
2197 | map_size = (vm_map_size_t)size; | |
2198 | ||
2199 | kr = vm_map_enter_cpm(map, | |
2200 | &map_addr, | |
2201 | map_size, | |
2202 | flags); | |
2203 | ||
2204 | *addr = CAST_DOWN(vm_address_t, map_addr); | |
2205 | return kr; | |
2206 | } | |
2207 | ||
2208 | ||
2209 | kern_return_t | |
2210 | mach_vm_page_query( | |
2211 | vm_map_t map, | |
2212 | mach_vm_offset_t offset, | |
2213 | int *disposition, | |
2214 | int *ref_count) | |
2215 | { | |
2216 | if (VM_MAP_NULL == map) { | |
2217 | return KERN_INVALID_ARGUMENT; | |
2218 | } | |
2219 | ||
2220 | return vm_map_page_query_internal( | |
2221 | map, | |
2222 | vm_map_trunc_page(offset, PAGE_MASK), | |
2223 | disposition, ref_count); | |
2224 | } | |
2225 | ||
2226 | kern_return_t | |
2227 | vm_map_page_query( | |
2228 | vm_map_t map, | |
2229 | vm_offset_t offset, | |
2230 | int *disposition, | |
2231 | int *ref_count) | |
2232 | { | |
2233 | if (VM_MAP_NULL == map) { | |
2234 | return KERN_INVALID_ARGUMENT; | |
2235 | } | |
2236 | ||
2237 | return vm_map_page_query_internal( | |
2238 | map, | |
2239 | vm_map_trunc_page(offset, PAGE_MASK), | |
2240 | disposition, ref_count); | |
2241 | } | |
2242 | ||
2243 | kern_return_t | |
2244 | mach_vm_page_range_query( | |
2245 | vm_map_t map, | |
2246 | mach_vm_offset_t address, | |
2247 | mach_vm_size_t size, | |
2248 | mach_vm_address_t dispositions_addr, | |
2249 | mach_vm_size_t *dispositions_count) | |
2250 | { | |
2251 | kern_return_t kr = KERN_SUCCESS; | |
2252 | int num_pages = 0, i = 0; | |
2253 | mach_vm_size_t curr_sz = 0, copy_sz = 0; | |
2254 | mach_vm_size_t disp_buf_req_size = 0, disp_buf_total_size = 0; | |
2255 | mach_msg_type_number_t count = 0; | |
2256 | ||
2257 | void *info = NULL; | |
2258 | void *local_disp = NULL;; | |
2259 | vm_map_size_t info_size = 0, local_disp_size = 0; | |
2260 | mach_vm_offset_t start = 0, end = 0; | |
2261 | int effective_page_shift, effective_page_size, effective_page_mask; | |
2262 | ||
2263 | if (map == VM_MAP_NULL || dispositions_count == NULL) { | |
2264 | return KERN_INVALID_ARGUMENT; | |
2265 | } | |
2266 | ||
2267 | effective_page_shift = vm_self_region_page_shift_safely(map); | |
2268 | if (effective_page_shift == -1) { | |
2269 | return KERN_INVALID_ARGUMENT; | |
2270 | } | |
2271 | effective_page_size = (1 << effective_page_shift); | |
2272 | effective_page_mask = effective_page_size - 1; | |
2273 | ||
2274 | if (os_mul_overflow(*dispositions_count, sizeof(int), &disp_buf_req_size)) { | |
2275 | return KERN_INVALID_ARGUMENT; | |
2276 | } | |
2277 | ||
2278 | start = vm_map_trunc_page(address, effective_page_mask); | |
2279 | end = vm_map_round_page(address + size, effective_page_mask); | |
2280 | ||
2281 | if (end < start) { | |
2282 | return KERN_INVALID_ARGUMENT; | |
2283 | } | |
2284 | ||
2285 | if ((end - start) < size) { | |
2286 | /* | |
2287 | * Aligned size is less than unaligned size. | |
2288 | */ | |
2289 | return KERN_INVALID_ARGUMENT; | |
2290 | } | |
2291 | ||
2292 | if (disp_buf_req_size == 0 || (end == start)) { | |
2293 | return KERN_SUCCESS; | |
2294 | } | |
2295 | ||
2296 | /* | |
2297 | * For large requests, we will go through them | |
2298 | * MAX_PAGE_RANGE_QUERY chunk at a time. | |
2299 | */ | |
2300 | ||
2301 | curr_sz = MIN(end - start, MAX_PAGE_RANGE_QUERY); | |
2302 | num_pages = (int) (curr_sz >> effective_page_shift); | |
2303 | ||
2304 | info_size = num_pages * sizeof(vm_page_info_basic_data_t); | |
2305 | info = kheap_alloc(KHEAP_TEMP, info_size, Z_WAITOK); | |
2306 | ||
2307 | local_disp_size = num_pages * sizeof(int); | |
2308 | local_disp = kheap_alloc(KHEAP_TEMP, local_disp_size, Z_WAITOK); | |
2309 | ||
2310 | if (info == NULL || local_disp == NULL) { | |
2311 | kr = KERN_RESOURCE_SHORTAGE; | |
2312 | goto out; | |
2313 | } | |
2314 | ||
2315 | while (size) { | |
2316 | count = VM_PAGE_INFO_BASIC_COUNT; | |
2317 | kr = vm_map_page_range_info_internal( | |
2318 | map, | |
2319 | start, | |
2320 | vm_map_round_page(start + curr_sz, effective_page_mask), | |
2321 | effective_page_shift, | |
2322 | VM_PAGE_INFO_BASIC, | |
2323 | (vm_page_info_t) info, | |
2324 | &count); | |
2325 | ||
2326 | assert(kr == KERN_SUCCESS); | |
2327 | ||
2328 | for (i = 0; i < num_pages; i++) { | |
2329 | ((int*)local_disp)[i] = ((vm_page_info_basic_t)info)[i].disposition; | |
2330 | } | |
2331 | ||
2332 | copy_sz = MIN(disp_buf_req_size, num_pages * sizeof(int) /* an int per page */); | |
2333 | kr = copyout(local_disp, (mach_vm_address_t)dispositions_addr, copy_sz); | |
2334 | ||
2335 | start += curr_sz; | |
2336 | disp_buf_req_size -= copy_sz; | |
2337 | disp_buf_total_size += copy_sz; | |
2338 | ||
2339 | if (kr != 0) { | |
2340 | break; | |
2341 | } | |
2342 | ||
2343 | if ((disp_buf_req_size == 0) || (curr_sz >= size)) { | |
2344 | /* | |
2345 | * We might have inspected the full range OR | |
2346 | * more than it esp. if the user passed in | |
2347 | * non-page aligned start/size and/or if we | |
2348 | * descended into a submap. We are done here. | |
2349 | */ | |
2350 | ||
2351 | size = 0; | |
2352 | } else { | |
2353 | dispositions_addr += copy_sz; | |
2354 | ||
2355 | size -= curr_sz; | |
2356 | ||
2357 | curr_sz = MIN(vm_map_round_page(size, effective_page_mask), MAX_PAGE_RANGE_QUERY); | |
2358 | num_pages = (int)(curr_sz >> effective_page_shift); | |
2359 | } | |
2360 | } | |
2361 | ||
2362 | *dispositions_count = disp_buf_total_size / sizeof(int); | |
2363 | ||
2364 | out: | |
2365 | if (local_disp) { | |
2366 | kheap_free(KHEAP_TEMP, local_disp, local_disp_size); | |
2367 | } | |
2368 | if (info) { | |
2369 | kheap_free(KHEAP_TEMP, info, info_size); | |
2370 | } | |
2371 | return kr; | |
2372 | } | |
2373 | ||
2374 | kern_return_t | |
2375 | mach_vm_page_info( | |
2376 | vm_map_t map, | |
2377 | mach_vm_address_t address, | |
2378 | vm_page_info_flavor_t flavor, | |
2379 | vm_page_info_t info, | |
2380 | mach_msg_type_number_t *count) | |
2381 | { | |
2382 | kern_return_t kr; | |
2383 | ||
2384 | if (map == VM_MAP_NULL) { | |
2385 | return KERN_INVALID_ARGUMENT; | |
2386 | } | |
2387 | ||
2388 | kr = vm_map_page_info(map, address, flavor, info, count); | |
2389 | return kr; | |
2390 | } | |
2391 | ||
2392 | /* map a (whole) upl into an address space */ | |
2393 | kern_return_t | |
2394 | vm_upl_map( | |
2395 | vm_map_t map, | |
2396 | upl_t upl, | |
2397 | vm_address_t *dst_addr) | |
2398 | { | |
2399 | vm_map_offset_t map_addr; | |
2400 | kern_return_t kr; | |
2401 | ||
2402 | if (VM_MAP_NULL == map) { | |
2403 | return KERN_INVALID_ARGUMENT; | |
2404 | } | |
2405 | ||
2406 | kr = vm_map_enter_upl(map, upl, &map_addr); | |
2407 | *dst_addr = CAST_DOWN(vm_address_t, map_addr); | |
2408 | return kr; | |
2409 | } | |
2410 | ||
2411 | kern_return_t | |
2412 | vm_upl_unmap( | |
2413 | vm_map_t map, | |
2414 | upl_t upl) | |
2415 | { | |
2416 | if (VM_MAP_NULL == map) { | |
2417 | return KERN_INVALID_ARGUMENT; | |
2418 | } | |
2419 | ||
2420 | return vm_map_remove_upl(map, upl); | |
2421 | } | |
2422 | ||
2423 | /* Retrieve a upl for an object underlying an address range in a map */ | |
2424 | ||
2425 | kern_return_t | |
2426 | vm_map_get_upl( | |
2427 | vm_map_t map, | |
2428 | vm_map_offset_t map_offset, | |
2429 | upl_size_t *upl_size, | |
2430 | upl_t *upl, | |
2431 | upl_page_info_array_t page_list, | |
2432 | unsigned int *count, | |
2433 | upl_control_flags_t *flags, | |
2434 | vm_tag_t tag, | |
2435 | int force_data_sync) | |
2436 | { | |
2437 | upl_control_flags_t map_flags; | |
2438 | kern_return_t kr; | |
2439 | ||
2440 | if (VM_MAP_NULL == map) { | |
2441 | return KERN_INVALID_ARGUMENT; | |
2442 | } | |
2443 | ||
2444 | map_flags = *flags & ~UPL_NOZEROFILL; | |
2445 | if (force_data_sync) { | |
2446 | map_flags |= UPL_FORCE_DATA_SYNC; | |
2447 | } | |
2448 | ||
2449 | kr = vm_map_create_upl(map, | |
2450 | map_offset, | |
2451 | upl_size, | |
2452 | upl, | |
2453 | page_list, | |
2454 | count, | |
2455 | &map_flags, | |
2456 | tag); | |
2457 | ||
2458 | *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); | |
2459 | return kr; | |
2460 | } | |
2461 | ||
2462 | /* | |
2463 | * mach_make_memory_entry_64 | |
2464 | * | |
2465 | * Think of it as a two-stage vm_remap() operation. First | |
2466 | * you get a handle. Second, you get map that handle in | |
2467 | * somewhere else. Rather than doing it all at once (and | |
2468 | * without needing access to the other whole map). | |
2469 | */ | |
2470 | kern_return_t | |
2471 | mach_make_memory_entry_64( | |
2472 | vm_map_t target_map, | |
2473 | memory_object_size_t *size, | |
2474 | memory_object_offset_t offset, | |
2475 | vm_prot_t permission, | |
2476 | ipc_port_t *object_handle, | |
2477 | ipc_port_t parent_handle) | |
2478 | { | |
2479 | vm_named_entry_kernel_flags_t vmne_kflags; | |
2480 | ||
2481 | if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_USER) { | |
2482 | /* | |
2483 | * Unknown flag: reject for forward compatibility. | |
2484 | */ | |
2485 | return KERN_INVALID_VALUE; | |
2486 | } | |
2487 | ||
2488 | vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE; | |
2489 | if (permission & MAP_MEM_LEDGER_TAGGED) { | |
2490 | vmne_kflags.vmnekf_ledger_tag = VM_LEDGER_TAG_DEFAULT; | |
2491 | } | |
2492 | return mach_make_memory_entry_internal(target_map, | |
2493 | size, | |
2494 | offset, | |
2495 | permission, | |
2496 | vmne_kflags, | |
2497 | object_handle, | |
2498 | parent_handle); | |
2499 | } | |
2500 | ||
2501 | kern_return_t | |
2502 | mach_make_memory_entry_internal( | |
2503 | vm_map_t target_map, | |
2504 | memory_object_size_t *size, | |
2505 | memory_object_offset_t offset, | |
2506 | vm_prot_t permission, | |
2507 | vm_named_entry_kernel_flags_t vmne_kflags, | |
2508 | ipc_port_t *object_handle, | |
2509 | ipc_port_t parent_handle) | |
2510 | { | |
2511 | vm_named_entry_t parent_entry; | |
2512 | vm_named_entry_t user_entry; | |
2513 | ipc_port_t user_handle; | |
2514 | kern_return_t kr; | |
2515 | vm_object_t object; | |
2516 | vm_map_size_t map_size; | |
2517 | vm_map_offset_t map_start, map_end; | |
2518 | ||
2519 | /* | |
2520 | * Stash the offset in the page for use by vm_map_enter_mem_object() | |
2521 | * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case. | |
2522 | */ | |
2523 | vm_object_offset_t offset_in_page; | |
2524 | ||
2525 | unsigned int access; | |
2526 | vm_prot_t protections; | |
2527 | vm_prot_t original_protections, mask_protections; | |
2528 | unsigned int wimg_mode; | |
2529 | boolean_t use_data_addr; | |
2530 | boolean_t use_4K_compat; | |
2531 | ||
2532 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x\n", target_map, offset, *size, permission); | |
2533 | ||
2534 | user_entry = NULL; | |
2535 | ||
2536 | if ((permission & MAP_MEM_FLAGS_MASK) & ~MAP_MEM_FLAGS_ALL) { | |
2537 | /* | |
2538 | * Unknown flag: reject for forward compatibility. | |
2539 | */ | |
2540 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_VALUE); | |
2541 | return KERN_INVALID_VALUE; | |
2542 | } | |
2543 | ||
2544 | if (IP_VALID(parent_handle) && | |
2545 | ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) { | |
2546 | parent_entry = (vm_named_entry_t) ip_get_kobject(parent_handle); | |
2547 | } else { | |
2548 | parent_entry = NULL; | |
2549 | } | |
2550 | ||
2551 | if (parent_entry && parent_entry->is_copy) { | |
2552 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2553 | return KERN_INVALID_ARGUMENT; | |
2554 | } | |
2555 | ||
2556 | original_protections = permission & VM_PROT_ALL; | |
2557 | protections = original_protections; | |
2558 | mask_protections = permission & VM_PROT_IS_MASK; | |
2559 | access = GET_MAP_MEM(permission); | |
2560 | use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0); | |
2561 | use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0); | |
2562 | ||
2563 | user_handle = IP_NULL; | |
2564 | user_entry = NULL; | |
2565 | ||
2566 | map_start = vm_map_trunc_page(offset, VM_MAP_PAGE_MASK(target_map)); | |
2567 | ||
2568 | if (permission & MAP_MEM_ONLY) { | |
2569 | boolean_t parent_is_object; | |
2570 | ||
2571 | map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); | |
2572 | map_size = map_end - map_start; | |
2573 | ||
2574 | if (use_data_addr || use_4K_compat || parent_entry == NULL) { | |
2575 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2576 | return KERN_INVALID_ARGUMENT; | |
2577 | } | |
2578 | ||
2579 | parent_is_object = parent_entry->is_object; | |
2580 | if (!parent_is_object) { | |
2581 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2582 | return KERN_INVALID_ARGUMENT; | |
2583 | } | |
2584 | object = vm_named_entry_to_vm_object(parent_entry); | |
2585 | if (parent_is_object && object != VM_OBJECT_NULL) { | |
2586 | wimg_mode = object->wimg_bits; | |
2587 | } else { | |
2588 | wimg_mode = VM_WIMG_USE_DEFAULT; | |
2589 | } | |
2590 | if ((access != GET_MAP_MEM(parent_entry->protection)) && | |
2591 | !(parent_entry->protection & VM_PROT_WRITE)) { | |
2592 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_RIGHT); | |
2593 | return KERN_INVALID_RIGHT; | |
2594 | } | |
2595 | vm_prot_to_wimg(access, &wimg_mode); | |
2596 | if (access != MAP_MEM_NOOP) { | |
2597 | SET_MAP_MEM(access, parent_entry->protection); | |
2598 | } | |
2599 | if (parent_is_object && object && | |
2600 | (access != MAP_MEM_NOOP) && | |
2601 | (!(object->nophyscache))) { | |
2602 | if (object->wimg_bits != wimg_mode) { | |
2603 | vm_object_lock(object); | |
2604 | vm_object_change_wimg_mode(object, wimg_mode); | |
2605 | vm_object_unlock(object); | |
2606 | } | |
2607 | } | |
2608 | if (object_handle) { | |
2609 | *object_handle = IP_NULL; | |
2610 | } | |
2611 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
2612 | return KERN_SUCCESS; | |
2613 | } else if (permission & MAP_MEM_NAMED_CREATE) { | |
2614 | int ledger_flags = 0; | |
2615 | task_t owner; | |
2616 | ||
2617 | map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); | |
2618 | map_size = map_end - map_start; | |
2619 | ||
2620 | if (use_data_addr || use_4K_compat) { | |
2621 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2622 | return KERN_INVALID_ARGUMENT; | |
2623 | } | |
2624 | ||
2625 | if (map_size == 0) { | |
2626 | *size = 0; | |
2627 | *object_handle = IPC_PORT_NULL; | |
2628 | return KERN_SUCCESS; | |
2629 | } | |
2630 | ||
2631 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
2632 | if (kr != KERN_SUCCESS) { | |
2633 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); | |
2634 | return KERN_FAILURE; | |
2635 | } | |
2636 | ||
2637 | /* | |
2638 | * Force the creation of the VM object now. | |
2639 | */ | |
2640 | if (map_size > (vm_map_size_t) ANON_MAX_SIZE) { | |
2641 | /* | |
2642 | * LP64todo - for now, we can only allocate 4GB-4096 | |
2643 | * internal objects because the default pager can't | |
2644 | * page bigger ones. Remove this when it can. | |
2645 | */ | |
2646 | kr = KERN_FAILURE; | |
2647 | goto make_mem_done; | |
2648 | } | |
2649 | ||
2650 | object = vm_object_allocate(map_size); | |
2651 | assert(object != VM_OBJECT_NULL); | |
2652 | ||
2653 | /* | |
2654 | * XXX | |
2655 | * We use this path when we want to make sure that | |
2656 | * nobody messes with the object (coalesce, for | |
2657 | * example) before we map it. | |
2658 | * We might want to use these objects for transposition via | |
2659 | * vm_object_transpose() too, so we don't want any copy or | |
2660 | * shadow objects either... | |
2661 | */ | |
2662 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
2663 | object->true_share = TRUE; | |
2664 | ||
2665 | owner = current_task(); | |
2666 | if ((permission & MAP_MEM_PURGABLE) || | |
2667 | vmne_kflags.vmnekf_ledger_tag) { | |
2668 | assert(object->vo_owner == NULL); | |
2669 | assert(object->resident_page_count == 0); | |
2670 | assert(object->wired_page_count == 0); | |
2671 | assert(owner != TASK_NULL); | |
2672 | if (vmne_kflags.vmnekf_ledger_no_footprint) { | |
2673 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; | |
2674 | object->vo_no_footprint = TRUE; | |
2675 | } | |
2676 | if (permission & MAP_MEM_PURGABLE) { | |
2677 | if (!(permission & VM_PROT_WRITE)) { | |
2678 | /* if we can't write, we can't purge */ | |
2679 | vm_object_deallocate(object); | |
2680 | kr = KERN_INVALID_ARGUMENT; | |
2681 | goto make_mem_done; | |
2682 | } | |
2683 | object->purgable = VM_PURGABLE_NONVOLATILE; | |
2684 | if (permission & MAP_MEM_PURGABLE_KERNEL_ONLY) { | |
2685 | object->purgeable_only_by_kernel = TRUE; | |
2686 | } | |
2687 | #if __arm64__ | |
2688 | if (owner->task_legacy_footprint) { | |
2689 | /* | |
2690 | * For ios11, we failed to account for | |
2691 | * this memory. Keep doing that for | |
2692 | * legacy apps (built before ios12), | |
2693 | * for backwards compatibility's sake... | |
2694 | */ | |
2695 | owner = kernel_task; | |
2696 | } | |
2697 | #endif /* __arm64__ */ | |
2698 | vm_object_lock(object); | |
2699 | vm_purgeable_nonvolatile_enqueue(object, owner); | |
2700 | vm_object_unlock(object); | |
2701 | } | |
2702 | } | |
2703 | ||
2704 | if (vmne_kflags.vmnekf_ledger_tag) { | |
2705 | /* | |
2706 | * Bill this object to the current task's | |
2707 | * ledgers for the given tag. | |
2708 | */ | |
2709 | if (vmne_kflags.vmnekf_ledger_no_footprint) { | |
2710 | ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT; | |
2711 | } | |
2712 | vm_object_lock(object); | |
2713 | object->vo_ledger_tag = vmne_kflags.vmnekf_ledger_tag; | |
2714 | kr = vm_object_ownership_change( | |
2715 | object, | |
2716 | vmne_kflags.vmnekf_ledger_tag, | |
2717 | owner, /* new owner */ | |
2718 | ledger_flags, | |
2719 | FALSE); /* task_objq locked? */ | |
2720 | vm_object_unlock(object); | |
2721 | if (kr != KERN_SUCCESS) { | |
2722 | vm_object_deallocate(object); | |
2723 | goto make_mem_done; | |
2724 | } | |
2725 | } | |
2726 | ||
2727 | #if CONFIG_SECLUDED_MEMORY | |
2728 | if (secluded_for_iokit && /* global boot-arg */ | |
2729 | ((permission & MAP_MEM_GRAB_SECLUDED) | |
2730 | #if 11 | |
2731 | /* XXX FBDP for my testing only */ | |
2732 | || (secluded_for_fbdp && map_size == 97550336) | |
2733 | #endif | |
2734 | )) { | |
2735 | #if 11 | |
2736 | if (!(permission & MAP_MEM_GRAB_SECLUDED) && | |
2737 | secluded_for_fbdp) { | |
2738 | printf("FBDP: object %p size %lld can grab secluded\n", object, (uint64_t) map_size); | |
2739 | } | |
2740 | #endif | |
2741 | object->can_grab_secluded = TRUE; | |
2742 | assert(!object->eligible_for_secluded); | |
2743 | } | |
2744 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
2745 | ||
2746 | /* | |
2747 | * The VM object is brand new and nobody else knows about it, | |
2748 | * so we don't need to lock it. | |
2749 | */ | |
2750 | ||
2751 | wimg_mode = object->wimg_bits; | |
2752 | vm_prot_to_wimg(access, &wimg_mode); | |
2753 | if (access != MAP_MEM_NOOP) { | |
2754 | object->wimg_bits = wimg_mode; | |
2755 | } | |
2756 | ||
2757 | /* the object has no pages, so no WIMG bits to update here */ | |
2758 | ||
2759 | kr = vm_named_entry_from_vm_object( | |
2760 | user_entry, | |
2761 | object, | |
2762 | 0, | |
2763 | map_size, | |
2764 | (protections & VM_PROT_ALL)); | |
2765 | if (kr != KERN_SUCCESS) { | |
2766 | vm_object_deallocate(object); | |
2767 | goto make_mem_done; | |
2768 | } | |
2769 | user_entry->internal = TRUE; | |
2770 | user_entry->is_sub_map = FALSE; | |
2771 | user_entry->offset = 0; | |
2772 | user_entry->data_offset = 0; | |
2773 | user_entry->protection = protections; | |
2774 | SET_MAP_MEM(access, user_entry->protection); | |
2775 | user_entry->size = map_size; | |
2776 | ||
2777 | /* user_object pager and internal fields are not used */ | |
2778 | /* when the object field is filled in. */ | |
2779 | ||
2780 | *size = CAST_DOWN(vm_size_t, (user_entry->size - | |
2781 | user_entry->data_offset)); | |
2782 | *object_handle = user_handle; | |
2783 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
2784 | return KERN_SUCCESS; | |
2785 | } | |
2786 | ||
2787 | if (permission & MAP_MEM_VM_COPY) { | |
2788 | vm_map_copy_t copy; | |
2789 | ||
2790 | if (target_map == VM_MAP_NULL) { | |
2791 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK); | |
2792 | return KERN_INVALID_TASK; | |
2793 | } | |
2794 | ||
2795 | map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); | |
2796 | map_size = map_end - map_start; | |
2797 | if (map_size == 0) { | |
2798 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2799 | return KERN_INVALID_ARGUMENT; | |
2800 | } | |
2801 | ||
2802 | if (use_data_addr || use_4K_compat) { | |
2803 | offset_in_page = offset - map_start; | |
2804 | if (use_4K_compat) { | |
2805 | offset_in_page &= ~((signed)(0xFFF)); | |
2806 | } | |
2807 | } else { | |
2808 | offset_in_page = 0; | |
2809 | } | |
2810 | ||
2811 | kr = vm_map_copyin_internal(target_map, | |
2812 | map_start, | |
2813 | map_size, | |
2814 | VM_MAP_COPYIN_ENTRY_LIST, | |
2815 | ©); | |
2816 | if (kr != KERN_SUCCESS) { | |
2817 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); | |
2818 | return kr; | |
2819 | } | |
2820 | assert(copy != VM_MAP_COPY_NULL); | |
2821 | ||
2822 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
2823 | if (kr != KERN_SUCCESS) { | |
2824 | vm_map_copy_discard(copy); | |
2825 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); | |
2826 | return KERN_FAILURE; | |
2827 | } | |
2828 | ||
2829 | user_entry->backing.copy = copy; | |
2830 | user_entry->internal = FALSE; | |
2831 | user_entry->is_sub_map = FALSE; | |
2832 | user_entry->is_copy = TRUE; | |
2833 | user_entry->offset = 0; | |
2834 | user_entry->protection = protections; | |
2835 | user_entry->size = map_size; | |
2836 | user_entry->data_offset = offset_in_page; | |
2837 | ||
2838 | *size = CAST_DOWN(vm_size_t, (user_entry->size - | |
2839 | user_entry->data_offset)); | |
2840 | *object_handle = user_handle; | |
2841 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
2842 | return KERN_SUCCESS; | |
2843 | } | |
2844 | ||
2845 | if ((permission & MAP_MEM_VM_SHARE) | |
2846 | || parent_entry == NULL | |
2847 | || (permission & MAP_MEM_NAMED_REUSE)) { | |
2848 | vm_map_copy_t copy; | |
2849 | vm_prot_t cur_prot, max_prot; | |
2850 | vm_map_kernel_flags_t vmk_flags; | |
2851 | vm_map_entry_t parent_copy_entry; | |
2852 | ||
2853 | if (target_map == VM_MAP_NULL) { | |
2854 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_TASK); | |
2855 | return KERN_INVALID_TASK; | |
2856 | } | |
2857 | ||
2858 | map_end = vm_map_round_page(offset + *size, VM_MAP_PAGE_MASK(target_map)); | |
2859 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
2860 | parent_copy_entry = VM_MAP_ENTRY_NULL; | |
2861 | if (!(permission & MAP_MEM_VM_SHARE)) { | |
2862 | vm_map_t tmp_map, real_map; | |
2863 | vm_map_version_t version; | |
2864 | vm_object_t tmp_object; | |
2865 | vm_object_offset_t obj_off; | |
2866 | vm_prot_t prot; | |
2867 | boolean_t wired; | |
2868 | bool contended; | |
2869 | ||
2870 | /* resolve any pending submap copy-on-write... */ | |
2871 | if (protections & VM_PROT_WRITE) { | |
2872 | tmp_map = target_map; | |
2873 | vm_map_lock_read(tmp_map); | |
2874 | kr = vm_map_lookup_locked(&tmp_map, | |
2875 | map_start, | |
2876 | protections | mask_protections, | |
2877 | OBJECT_LOCK_EXCLUSIVE, | |
2878 | &version, | |
2879 | &tmp_object, | |
2880 | &obj_off, | |
2881 | &prot, | |
2882 | &wired, | |
2883 | NULL, /* fault_info */ | |
2884 | &real_map, | |
2885 | &contended); | |
2886 | if (kr != KERN_SUCCESS) { | |
2887 | vm_map_unlock_read(tmp_map); | |
2888 | } else { | |
2889 | vm_object_unlock(tmp_object); | |
2890 | vm_map_unlock_read(tmp_map); | |
2891 | if (real_map != tmp_map) { | |
2892 | vm_map_unlock_read(real_map); | |
2893 | } | |
2894 | } | |
2895 | } | |
2896 | /* ... and carry on */ | |
2897 | ||
2898 | /* stop extracting if VM object changes */ | |
2899 | vmk_flags.vmkf_copy_single_object = TRUE; | |
2900 | if ((permission & MAP_MEM_NAMED_REUSE) && | |
2901 | parent_entry != NULL && | |
2902 | parent_entry->is_object) { | |
2903 | vm_map_copy_t parent_copy; | |
2904 | parent_copy = parent_entry->backing.copy; | |
2905 | assert(parent_copy->cpy_hdr.nentries == 1); | |
2906 | parent_copy_entry = vm_map_copy_first_entry(parent_copy); | |
2907 | assert(!parent_copy_entry->is_sub_map); | |
2908 | } | |
2909 | } | |
2910 | ||
2911 | map_size = map_end - map_start; | |
2912 | if (map_size == 0) { | |
2913 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_INVALID_ARGUMENT); | |
2914 | return KERN_INVALID_ARGUMENT; | |
2915 | } | |
2916 | ||
2917 | if (use_data_addr || use_4K_compat) { | |
2918 | offset_in_page = offset - map_start; | |
2919 | if (use_4K_compat) { | |
2920 | offset_in_page &= ~((signed)(0xFFF)); | |
2921 | } | |
2922 | } else { | |
2923 | offset_in_page = 0; | |
2924 | } | |
2925 | ||
2926 | if (mask_protections) { | |
2927 | /* | |
2928 | * caller is asking for whichever proctections are | |
2929 | * available: no required protections. | |
2930 | */ | |
2931 | cur_prot = VM_PROT_NONE; | |
2932 | max_prot = VM_PROT_NONE; | |
2933 | } else { | |
2934 | /* | |
2935 | * Caller wants a memory entry with "protections". | |
2936 | * Make sure we extract only memory that matches that. | |
2937 | */ | |
2938 | cur_prot = protections; | |
2939 | max_prot = protections; | |
2940 | } | |
2941 | if (target_map->pmap == kernel_pmap) { | |
2942 | /* | |
2943 | * Get "reserved" map entries to avoid deadlocking | |
2944 | * on the kernel map or a kernel submap if we | |
2945 | * run out of VM map entries and need to refill that | |
2946 | * zone. | |
2947 | */ | |
2948 | vmk_flags.vmkf_copy_pageable = FALSE; | |
2949 | } else { | |
2950 | vmk_flags.vmkf_copy_pageable = TRUE; | |
2951 | } | |
2952 | vmk_flags.vmkf_copy_same_map = FALSE; | |
2953 | assert(map_size != 0); | |
2954 | kr = vm_map_copy_extract(target_map, | |
2955 | map_start, | |
2956 | map_size, | |
2957 | FALSE, /* copy */ | |
2958 | ©, | |
2959 | &cur_prot, | |
2960 | &max_prot, | |
2961 | VM_INHERIT_SHARE, | |
2962 | vmk_flags); | |
2963 | if (kr != KERN_SUCCESS) { | |
2964 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); | |
2965 | if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { | |
2966 | // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); | |
2967 | } | |
2968 | return kr; | |
2969 | } | |
2970 | assert(copy != VM_MAP_COPY_NULL); | |
2971 | ||
2972 | if (mask_protections) { | |
2973 | /* | |
2974 | * We just want as much of "original_protections" | |
2975 | * as we can get out of the actual "cur_prot". | |
2976 | */ | |
2977 | protections &= cur_prot; | |
2978 | if (protections == VM_PROT_NONE) { | |
2979 | /* no access at all: fail */ | |
2980 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE); | |
2981 | if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { | |
2982 | // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); | |
2983 | } | |
2984 | vm_map_copy_discard(copy); | |
2985 | return KERN_PROTECTION_FAILURE; | |
2986 | } | |
2987 | } else { | |
2988 | /* | |
2989 | * We want exactly "original_protections" | |
2990 | * out of "cur_prot". | |
2991 | */ | |
2992 | assert((cur_prot & protections) == protections); | |
2993 | assert((max_prot & protections) == protections); | |
2994 | /* XXX FBDP TODO: no longer needed? */ | |
2995 | if ((cur_prot & protections) != protections) { | |
2996 | if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { | |
2997 | // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, KERN_PROTECTION_FAILURE); | |
2998 | } | |
2999 | vm_map_copy_discard(copy); | |
3000 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_PROTECTION_FAILURE); | |
3001 | return KERN_PROTECTION_FAILURE; | |
3002 | } | |
3003 | } | |
3004 | ||
3005 | if (!(permission & MAP_MEM_VM_SHARE)) { | |
3006 | vm_map_entry_t copy_entry; | |
3007 | ||
3008 | /* limit size to what's actually covered by "copy" */ | |
3009 | assert(copy->cpy_hdr.nentries == 1); | |
3010 | copy_entry = vm_map_copy_first_entry(copy); | |
3011 | map_size = copy_entry->vme_end - copy_entry->vme_start; | |
3012 | ||
3013 | if ((permission & MAP_MEM_NAMED_REUSE) && | |
3014 | parent_copy_entry != VM_MAP_ENTRY_NULL && | |
3015 | VME_OBJECT(copy_entry) == VME_OBJECT(parent_copy_entry) && | |
3016 | VME_OFFSET(copy_entry) == VME_OFFSET(parent_copy_entry) && | |
3017 | parent_entry->offset == 0 && | |
3018 | parent_entry->size == map_size && | |
3019 | (parent_entry->data_offset == offset_in_page)) { | |
3020 | /* we have a match: re-use "parent_entry" */ | |
3021 | ||
3022 | /* release our new "copy" */ | |
3023 | vm_map_copy_discard(copy); | |
3024 | /* get extra send right on handle */ | |
3025 | ipc_port_copy_send(parent_handle); | |
3026 | ||
3027 | *size = CAST_DOWN(vm_size_t, | |
3028 | (parent_entry->size - | |
3029 | parent_entry->data_offset)); | |
3030 | *object_handle = parent_handle; | |
3031 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
3032 | return KERN_SUCCESS; | |
3033 | } | |
3034 | ||
3035 | /* no match: we need to create a new entry */ | |
3036 | object = VME_OBJECT(copy_entry); | |
3037 | vm_object_lock(object); | |
3038 | wimg_mode = object->wimg_bits; | |
3039 | if (!(object->nophyscache)) { | |
3040 | vm_prot_to_wimg(access, &wimg_mode); | |
3041 | } | |
3042 | if (object->wimg_bits != wimg_mode) { | |
3043 | vm_object_change_wimg_mode(object, wimg_mode); | |
3044 | } | |
3045 | vm_object_unlock(object); | |
3046 | } | |
3047 | ||
3048 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
3049 | if (kr != KERN_SUCCESS) { | |
3050 | if (VM_MAP_PAGE_SHIFT(target_map) < PAGE_SHIFT) { | |
3051 | // panic("DEBUG4K %s:%d kr 0x%x\n", __FUNCTION__, __LINE__, kr); | |
3052 | } | |
3053 | vm_map_copy_discard(copy); | |
3054 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_FAILURE); | |
3055 | return KERN_FAILURE; | |
3056 | } | |
3057 | ||
3058 | user_entry->backing.copy = copy; | |
3059 | user_entry->is_sub_map = FALSE; | |
3060 | user_entry->is_object = FALSE; | |
3061 | user_entry->internal = FALSE; | |
3062 | user_entry->protection = protections; | |
3063 | user_entry->size = map_size; | |
3064 | user_entry->data_offset = offset_in_page; | |
3065 | ||
3066 | if (permission & MAP_MEM_VM_SHARE) { | |
3067 | user_entry->is_copy = TRUE; | |
3068 | user_entry->offset = 0; | |
3069 | } else { | |
3070 | user_entry->is_object = TRUE; | |
3071 | user_entry->internal = object->internal; | |
3072 | user_entry->offset = VME_OFFSET(vm_map_copy_first_entry(copy)); | |
3073 | SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); | |
3074 | } | |
3075 | ||
3076 | *size = CAST_DOWN(vm_size_t, (user_entry->size - | |
3077 | user_entry->data_offset)); | |
3078 | *object_handle = user_handle; | |
3079 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
3080 | return KERN_SUCCESS; | |
3081 | } | |
3082 | ||
3083 | /* The new object will be based on an existing named object */ | |
3084 | if (parent_entry == NULL) { | |
3085 | kr = KERN_INVALID_ARGUMENT; | |
3086 | goto make_mem_done; | |
3087 | } | |
3088 | ||
3089 | if (parent_entry->is_copy) { | |
3090 | panic("parent_entry %p is_copy not supported\n", parent_entry); | |
3091 | kr = KERN_INVALID_ARGUMENT; | |
3092 | goto make_mem_done; | |
3093 | } | |
3094 | ||
3095 | if (use_data_addr || use_4K_compat) { | |
3096 | /* | |
3097 | * submaps and pagers should only be accessible from within | |
3098 | * the kernel, which shouldn't use the data address flag, so can fail here. | |
3099 | */ | |
3100 | if (parent_entry->is_sub_map) { | |
3101 | panic("Shouldn't be using data address with a parent entry that is a submap."); | |
3102 | } | |
3103 | /* | |
3104 | * Account for offset to data in parent entry and | |
3105 | * compute our own offset to data. | |
3106 | */ | |
3107 | if ((offset + *size + parent_entry->data_offset) > parent_entry->size) { | |
3108 | kr = KERN_INVALID_ARGUMENT; | |
3109 | goto make_mem_done; | |
3110 | } | |
3111 | ||
3112 | map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); | |
3113 | offset_in_page = (offset + parent_entry->data_offset) - map_start; | |
3114 | if (use_4K_compat) { | |
3115 | offset_in_page &= ~((signed)(0xFFF)); | |
3116 | } | |
3117 | map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); | |
3118 | map_size = map_end - map_start; | |
3119 | } else { | |
3120 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); | |
3121 | map_size = map_end - map_start; | |
3122 | offset_in_page = 0; | |
3123 | ||
3124 | if ((offset + map_size) > parent_entry->size) { | |
3125 | kr = KERN_INVALID_ARGUMENT; | |
3126 | goto make_mem_done; | |
3127 | } | |
3128 | } | |
3129 | ||
3130 | if (mask_protections) { | |
3131 | /* | |
3132 | * The caller asked us to use the "protections" as | |
3133 | * a mask, so restrict "protections" to what this | |
3134 | * mapping actually allows. | |
3135 | */ | |
3136 | protections &= parent_entry->protection; | |
3137 | } | |
3138 | if ((protections & parent_entry->protection) != protections) { | |
3139 | kr = KERN_PROTECTION_FAILURE; | |
3140 | goto make_mem_done; | |
3141 | } | |
3142 | ||
3143 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
3144 | != KERN_SUCCESS) { | |
3145 | kr = KERN_FAILURE; | |
3146 | goto make_mem_done; | |
3147 | } | |
3148 | ||
3149 | user_entry->size = map_size; | |
3150 | user_entry->offset = parent_entry->offset + map_start; | |
3151 | user_entry->data_offset = offset_in_page; | |
3152 | user_entry->is_sub_map = parent_entry->is_sub_map; | |
3153 | user_entry->is_copy = parent_entry->is_copy; | |
3154 | user_entry->internal = parent_entry->internal; | |
3155 | user_entry->protection = protections; | |
3156 | ||
3157 | if (access != MAP_MEM_NOOP) { | |
3158 | SET_MAP_MEM(access, user_entry->protection); | |
3159 | } | |
3160 | ||
3161 | if (parent_entry->is_sub_map) { | |
3162 | vm_map_t map = parent_entry->backing.map; | |
3163 | vm_map_reference(map); | |
3164 | user_entry->backing.map = map; | |
3165 | } else { | |
3166 | object = vm_named_entry_to_vm_object(parent_entry); | |
3167 | assert(object != VM_OBJECT_NULL); | |
3168 | assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC); | |
3169 | kr = vm_named_entry_from_vm_object( | |
3170 | user_entry, | |
3171 | object, | |
3172 | user_entry->offset, | |
3173 | user_entry->size, | |
3174 | (user_entry->protection & VM_PROT_ALL)); | |
3175 | if (kr != KERN_SUCCESS) { | |
3176 | goto make_mem_done; | |
3177 | } | |
3178 | assert(user_entry->is_object); | |
3179 | /* we now point to this object, hold on */ | |
3180 | vm_object_lock(object); | |
3181 | vm_object_reference_locked(object); | |
3182 | #if VM_OBJECT_TRACKING_OP_TRUESHARE | |
3183 | if (!object->true_share && | |
3184 | vm_object_tracking_inited) { | |
3185 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
3186 | int num = 0; | |
3187 | ||
3188 | num = OSBacktrace(bt, | |
3189 | VM_OBJECT_TRACKING_BTDEPTH); | |
3190 | btlog_add_entry(vm_object_tracking_btlog, | |
3191 | object, | |
3192 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
3193 | bt, | |
3194 | num); | |
3195 | } | |
3196 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
3197 | ||
3198 | object->true_share = TRUE; | |
3199 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { | |
3200 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
3201 | } | |
3202 | vm_object_unlock(object); | |
3203 | } | |
3204 | *size = CAST_DOWN(vm_size_t, (user_entry->size - | |
3205 | user_entry->data_offset)); | |
3206 | *object_handle = user_handle; | |
3207 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, KERN_SUCCESS); | |
3208 | return KERN_SUCCESS; | |
3209 | ||
3210 | make_mem_done: | |
3211 | if (user_handle != IP_NULL) { | |
3212 | /* | |
3213 | * Releasing "user_handle" causes the kernel object | |
3214 | * associated with it ("user_entry" here) to also be | |
3215 | * released and freed. | |
3216 | */ | |
3217 | mach_memory_entry_port_release(user_handle); | |
3218 | } | |
3219 | DEBUG4K_MEMENTRY("map %p offset 0x%llx size 0x%llx prot 0x%x -> entry %p kr 0x%x\n", target_map, offset, *size, permission, user_entry, kr); | |
3220 | return kr; | |
3221 | } | |
3222 | ||
3223 | kern_return_t | |
3224 | _mach_make_memory_entry( | |
3225 | vm_map_t target_map, | |
3226 | memory_object_size_t *size, | |
3227 | memory_object_offset_t offset, | |
3228 | vm_prot_t permission, | |
3229 | ipc_port_t *object_handle, | |
3230 | ipc_port_t parent_entry) | |
3231 | { | |
3232 | memory_object_size_t mo_size; | |
3233 | kern_return_t kr; | |
3234 | ||
3235 | mo_size = (memory_object_size_t)*size; | |
3236 | kr = mach_make_memory_entry_64(target_map, &mo_size, | |
3237 | (memory_object_offset_t)offset, permission, object_handle, | |
3238 | parent_entry); | |
3239 | *size = mo_size; | |
3240 | return kr; | |
3241 | } | |
3242 | ||
3243 | kern_return_t | |
3244 | mach_make_memory_entry( | |
3245 | vm_map_t target_map, | |
3246 | vm_size_t *size, | |
3247 | vm_offset_t offset, | |
3248 | vm_prot_t permission, | |
3249 | ipc_port_t *object_handle, | |
3250 | ipc_port_t parent_entry) | |
3251 | { | |
3252 | memory_object_size_t mo_size; | |
3253 | kern_return_t kr; | |
3254 | ||
3255 | mo_size = (memory_object_size_t)*size; | |
3256 | kr = mach_make_memory_entry_64(target_map, &mo_size, | |
3257 | (memory_object_offset_t)offset, permission, object_handle, | |
3258 | parent_entry); | |
3259 | *size = CAST_DOWN(vm_size_t, mo_size); | |
3260 | return kr; | |
3261 | } | |
3262 | ||
3263 | /* | |
3264 | * task_wire | |
3265 | * | |
3266 | * Set or clear the map's wiring_required flag. This flag, if set, | |
3267 | * will cause all future virtual memory allocation to allocate | |
3268 | * user wired memory. Unwiring pages wired down as a result of | |
3269 | * this routine is done with the vm_wire interface. | |
3270 | */ | |
3271 | kern_return_t | |
3272 | task_wire( | |
3273 | vm_map_t map, | |
3274 | boolean_t must_wire) | |
3275 | { | |
3276 | if (map == VM_MAP_NULL) { | |
3277 | return KERN_INVALID_ARGUMENT; | |
3278 | } | |
3279 | ||
3280 | vm_map_lock(map); | |
3281 | map->wiring_required = (must_wire == TRUE); | |
3282 | vm_map_unlock(map); | |
3283 | ||
3284 | return KERN_SUCCESS; | |
3285 | } | |
3286 | ||
3287 | kern_return_t | |
3288 | vm_map_exec_lockdown( | |
3289 | vm_map_t map) | |
3290 | { | |
3291 | if (map == VM_MAP_NULL) { | |
3292 | return KERN_INVALID_ARGUMENT; | |
3293 | } | |
3294 | ||
3295 | vm_map_lock(map); | |
3296 | map->map_disallow_new_exec = TRUE; | |
3297 | vm_map_unlock(map); | |
3298 | ||
3299 | return KERN_SUCCESS; | |
3300 | } | |
3301 | ||
3302 | #if VM_NAMED_ENTRY_LIST | |
3303 | queue_head_t vm_named_entry_list = QUEUE_HEAD_INITIALIZER(vm_named_entry_list); | |
3304 | int vm_named_entry_count = 0; | |
3305 | LCK_MTX_EARLY_DECLARE_ATTR(vm_named_entry_list_lock_data, | |
3306 | &vm_object_lck_grp, &vm_object_lck_attr); | |
3307 | #endif /* VM_NAMED_ENTRY_LIST */ | |
3308 | ||
3309 | __private_extern__ kern_return_t | |
3310 | mach_memory_entry_allocate( | |
3311 | vm_named_entry_t *user_entry_p, | |
3312 | ipc_port_t *user_handle_p) | |
3313 | { | |
3314 | vm_named_entry_t user_entry; | |
3315 | ipc_port_t user_handle; | |
3316 | ||
3317 | user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); | |
3318 | if (user_entry == NULL) { | |
3319 | return KERN_FAILURE; | |
3320 | } | |
3321 | bzero(user_entry, sizeof(*user_entry)); | |
3322 | ||
3323 | named_entry_lock_init(user_entry); | |
3324 | ||
3325 | user_entry->backing.copy = NULL; | |
3326 | user_entry->is_object = FALSE; | |
3327 | user_entry->is_sub_map = FALSE; | |
3328 | user_entry->is_copy = FALSE; | |
3329 | user_entry->internal = FALSE; | |
3330 | user_entry->size = 0; | |
3331 | user_entry->offset = 0; | |
3332 | user_entry->data_offset = 0; | |
3333 | user_entry->protection = VM_PROT_NONE; | |
3334 | user_entry->ref_count = 1; | |
3335 | ||
3336 | user_handle = ipc_kobject_alloc_port((ipc_kobject_t)user_entry, | |
3337 | IKOT_NAMED_ENTRY, | |
3338 | IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST); | |
3339 | ||
3340 | *user_entry_p = user_entry; | |
3341 | *user_handle_p = user_handle; | |
3342 | ||
3343 | #if VM_NAMED_ENTRY_LIST | |
3344 | /* keep a loose (no reference) pointer to the Mach port, for debugging only */ | |
3345 | user_entry->named_entry_port = user_handle; | |
3346 | /* backtrace at allocation time, for debugging only */ | |
3347 | OSBacktrace(&user_entry->named_entry_bt[0], | |
3348 | NAMED_ENTRY_BT_DEPTH); | |
3349 | ||
3350 | /* add this new named entry to the global list */ | |
3351 | lck_mtx_lock_spin(&vm_named_entry_list_lock_data); | |
3352 | queue_enter(&vm_named_entry_list, user_entry, | |
3353 | vm_named_entry_t, named_entry_list); | |
3354 | vm_named_entry_count++; | |
3355 | lck_mtx_unlock(&vm_named_entry_list_lock_data); | |
3356 | #endif /* VM_NAMED_ENTRY_LIST */ | |
3357 | ||
3358 | return KERN_SUCCESS; | |
3359 | } | |
3360 | ||
3361 | /* | |
3362 | * mach_memory_object_memory_entry_64 | |
3363 | * | |
3364 | * Create a named entry backed by the provided pager. | |
3365 | * | |
3366 | */ | |
3367 | kern_return_t | |
3368 | mach_memory_object_memory_entry_64( | |
3369 | host_t host, | |
3370 | boolean_t internal, | |
3371 | vm_object_offset_t size, | |
3372 | vm_prot_t permission, | |
3373 | memory_object_t pager, | |
3374 | ipc_port_t *entry_handle) | |
3375 | { | |
3376 | unsigned int access; | |
3377 | vm_named_entry_t user_entry; | |
3378 | ipc_port_t user_handle; | |
3379 | vm_object_t object; | |
3380 | kern_return_t kr; | |
3381 | ||
3382 | if (host == HOST_NULL) { | |
3383 | return KERN_INVALID_HOST; | |
3384 | } | |
3385 | ||
3386 | if (pager == MEMORY_OBJECT_NULL && internal) { | |
3387 | object = vm_object_allocate(size); | |
3388 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) { | |
3389 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
3390 | } | |
3391 | } else { | |
3392 | object = memory_object_to_vm_object(pager); | |
3393 | if (object != VM_OBJECT_NULL) { | |
3394 | vm_object_reference(object); | |
3395 | } | |
3396 | } | |
3397 | if (object == VM_OBJECT_NULL) { | |
3398 | return KERN_INVALID_ARGUMENT; | |
3399 | } | |
3400 | ||
3401 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
3402 | != KERN_SUCCESS) { | |
3403 | vm_object_deallocate(object); | |
3404 | return KERN_FAILURE; | |
3405 | } | |
3406 | ||
3407 | user_entry->size = size; | |
3408 | user_entry->offset = 0; | |
3409 | user_entry->protection = permission & VM_PROT_ALL; | |
3410 | access = GET_MAP_MEM(permission); | |
3411 | SET_MAP_MEM(access, user_entry->protection); | |
3412 | user_entry->is_sub_map = FALSE; | |
3413 | assert(user_entry->ref_count == 1); | |
3414 | ||
3415 | kr = vm_named_entry_from_vm_object(user_entry, object, 0, size, | |
3416 | (user_entry->protection & VM_PROT_ALL)); | |
3417 | if (kr != KERN_SUCCESS) { | |
3418 | return kr; | |
3419 | } | |
3420 | user_entry->internal = object->internal; | |
3421 | assert(object->internal == internal); | |
3422 | ||
3423 | *entry_handle = user_handle; | |
3424 | return KERN_SUCCESS; | |
3425 | } | |
3426 | ||
3427 | kern_return_t | |
3428 | mach_memory_object_memory_entry( | |
3429 | host_t host, | |
3430 | boolean_t internal, | |
3431 | vm_size_t size, | |
3432 | vm_prot_t permission, | |
3433 | memory_object_t pager, | |
3434 | ipc_port_t *entry_handle) | |
3435 | { | |
3436 | return mach_memory_object_memory_entry_64( host, internal, | |
3437 | (vm_object_offset_t)size, permission, pager, entry_handle); | |
3438 | } | |
3439 | ||
3440 | ||
3441 | kern_return_t | |
3442 | mach_memory_entry_purgable_control( | |
3443 | ipc_port_t entry_port, | |
3444 | vm_purgable_t control, | |
3445 | int *state) | |
3446 | { | |
3447 | if (control == VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
3448 | /* not allowed from user-space */ | |
3449 | return KERN_INVALID_ARGUMENT; | |
3450 | } | |
3451 | ||
3452 | return memory_entry_purgeable_control_internal(entry_port, control, state); | |
3453 | } | |
3454 | ||
3455 | kern_return_t | |
3456 | memory_entry_purgeable_control_internal( | |
3457 | ipc_port_t entry_port, | |
3458 | vm_purgable_t control, | |
3459 | int *state) | |
3460 | { | |
3461 | kern_return_t kr; | |
3462 | vm_named_entry_t mem_entry; | |
3463 | vm_object_t object; | |
3464 | ||
3465 | if (!IP_VALID(entry_port) || | |
3466 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3467 | return KERN_INVALID_ARGUMENT; | |
3468 | } | |
3469 | if (control != VM_PURGABLE_SET_STATE && | |
3470 | control != VM_PURGABLE_GET_STATE && | |
3471 | control != VM_PURGABLE_SET_STATE_FROM_KERNEL) { | |
3472 | return KERN_INVALID_ARGUMENT; | |
3473 | } | |
3474 | ||
3475 | if ((control == VM_PURGABLE_SET_STATE || | |
3476 | control == VM_PURGABLE_SET_STATE_FROM_KERNEL) && | |
3477 | (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || | |
3478 | ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) { | |
3479 | return KERN_INVALID_ARGUMENT; | |
3480 | } | |
3481 | ||
3482 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3483 | ||
3484 | named_entry_lock(mem_entry); | |
3485 | ||
3486 | if (mem_entry->is_sub_map || | |
3487 | mem_entry->is_copy) { | |
3488 | named_entry_unlock(mem_entry); | |
3489 | return KERN_INVALID_ARGUMENT; | |
3490 | } | |
3491 | ||
3492 | assert(mem_entry->is_object); | |
3493 | object = vm_named_entry_to_vm_object(mem_entry); | |
3494 | if (object == VM_OBJECT_NULL) { | |
3495 | named_entry_unlock(mem_entry); | |
3496 | return KERN_INVALID_ARGUMENT; | |
3497 | } | |
3498 | ||
3499 | vm_object_lock(object); | |
3500 | ||
3501 | /* check that named entry covers entire object ? */ | |
3502 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { | |
3503 | vm_object_unlock(object); | |
3504 | named_entry_unlock(mem_entry); | |
3505 | return KERN_INVALID_ARGUMENT; | |
3506 | } | |
3507 | ||
3508 | named_entry_unlock(mem_entry); | |
3509 | ||
3510 | kr = vm_object_purgable_control(object, control, state); | |
3511 | ||
3512 | vm_object_unlock(object); | |
3513 | ||
3514 | return kr; | |
3515 | } | |
3516 | ||
3517 | kern_return_t | |
3518 | mach_memory_entry_access_tracking( | |
3519 | ipc_port_t entry_port, | |
3520 | int *access_tracking, | |
3521 | uint32_t *access_tracking_reads, | |
3522 | uint32_t *access_tracking_writes) | |
3523 | { | |
3524 | return memory_entry_access_tracking_internal(entry_port, | |
3525 | access_tracking, | |
3526 | access_tracking_reads, | |
3527 | access_tracking_writes); | |
3528 | } | |
3529 | ||
3530 | kern_return_t | |
3531 | memory_entry_access_tracking_internal( | |
3532 | ipc_port_t entry_port, | |
3533 | int *access_tracking, | |
3534 | uint32_t *access_tracking_reads, | |
3535 | uint32_t *access_tracking_writes) | |
3536 | { | |
3537 | vm_named_entry_t mem_entry; | |
3538 | vm_object_t object; | |
3539 | kern_return_t kr; | |
3540 | ||
3541 | if (!IP_VALID(entry_port) || | |
3542 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3543 | return KERN_INVALID_ARGUMENT; | |
3544 | } | |
3545 | ||
3546 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3547 | ||
3548 | named_entry_lock(mem_entry); | |
3549 | ||
3550 | if (mem_entry->is_sub_map || | |
3551 | mem_entry->is_copy) { | |
3552 | named_entry_unlock(mem_entry); | |
3553 | return KERN_INVALID_ARGUMENT; | |
3554 | } | |
3555 | ||
3556 | assert(mem_entry->is_object); | |
3557 | object = vm_named_entry_to_vm_object(mem_entry); | |
3558 | if (object == VM_OBJECT_NULL) { | |
3559 | named_entry_unlock(mem_entry); | |
3560 | return KERN_INVALID_ARGUMENT; | |
3561 | } | |
3562 | ||
3563 | #if VM_OBJECT_ACCESS_TRACKING | |
3564 | vm_object_access_tracking(object, | |
3565 | access_tracking, | |
3566 | access_tracking_reads, | |
3567 | access_tracking_writes); | |
3568 | kr = KERN_SUCCESS; | |
3569 | #else /* VM_OBJECT_ACCESS_TRACKING */ | |
3570 | (void) access_tracking; | |
3571 | (void) access_tracking_reads; | |
3572 | (void) access_tracking_writes; | |
3573 | kr = KERN_NOT_SUPPORTED; | |
3574 | #endif /* VM_OBJECT_ACCESS_TRACKING */ | |
3575 | ||
3576 | named_entry_unlock(mem_entry); | |
3577 | ||
3578 | return kr; | |
3579 | } | |
3580 | ||
3581 | kern_return_t | |
3582 | mach_memory_entry_ownership( | |
3583 | ipc_port_t entry_port, | |
3584 | task_t owner, | |
3585 | int ledger_tag, | |
3586 | int ledger_flags) | |
3587 | { | |
3588 | task_t cur_task; | |
3589 | kern_return_t kr; | |
3590 | vm_named_entry_t mem_entry; | |
3591 | vm_object_t object; | |
3592 | ||
3593 | cur_task = current_task(); | |
3594 | if (cur_task != kernel_task && | |
3595 | (owner != cur_task || | |
3596 | (ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) || | |
3597 | ledger_tag == VM_LEDGER_TAG_NETWORK)) { | |
3598 | /* | |
3599 | * An entitlement is required to: | |
3600 | * + tranfer memory ownership to someone else, | |
3601 | * + request that the memory not count against the footprint, | |
3602 | * + tag as "network" (since that implies "no footprint") | |
3603 | */ | |
3604 | if (!cur_task->task_can_transfer_memory_ownership && | |
3605 | IOTaskHasEntitlement(cur_task, | |
3606 | "com.apple.private.memory.ownership_transfer")) { | |
3607 | cur_task->task_can_transfer_memory_ownership = TRUE; | |
3608 | } | |
3609 | if (!cur_task->task_can_transfer_memory_ownership) { | |
3610 | return KERN_NO_ACCESS; | |
3611 | } | |
3612 | } | |
3613 | ||
3614 | if (ledger_flags & ~VM_LEDGER_FLAGS) { | |
3615 | return KERN_INVALID_ARGUMENT; | |
3616 | } | |
3617 | if (ledger_tag <= 0 || | |
3618 | ledger_tag > VM_LEDGER_TAG_MAX) { | |
3619 | return KERN_INVALID_ARGUMENT; | |
3620 | } | |
3621 | ||
3622 | if (!IP_VALID(entry_port) || | |
3623 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3624 | return KERN_INVALID_ARGUMENT; | |
3625 | } | |
3626 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3627 | ||
3628 | named_entry_lock(mem_entry); | |
3629 | ||
3630 | if (mem_entry->is_sub_map || | |
3631 | mem_entry->is_copy) { | |
3632 | named_entry_unlock(mem_entry); | |
3633 | return KERN_INVALID_ARGUMENT; | |
3634 | } | |
3635 | ||
3636 | assert(mem_entry->is_object); | |
3637 | object = vm_named_entry_to_vm_object(mem_entry); | |
3638 | if (object == VM_OBJECT_NULL) { | |
3639 | named_entry_unlock(mem_entry); | |
3640 | return KERN_INVALID_ARGUMENT; | |
3641 | } | |
3642 | ||
3643 | vm_object_lock(object); | |
3644 | ||
3645 | /* check that named entry covers entire object ? */ | |
3646 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { | |
3647 | vm_object_unlock(object); | |
3648 | named_entry_unlock(mem_entry); | |
3649 | return KERN_INVALID_ARGUMENT; | |
3650 | } | |
3651 | ||
3652 | named_entry_unlock(mem_entry); | |
3653 | ||
3654 | kr = vm_object_ownership_change(object, | |
3655 | ledger_tag, | |
3656 | owner, | |
3657 | ledger_flags, | |
3658 | FALSE); /* task_objq_locked */ | |
3659 | vm_object_unlock(object); | |
3660 | ||
3661 | return kr; | |
3662 | } | |
3663 | ||
3664 | kern_return_t | |
3665 | mach_memory_entry_get_page_counts( | |
3666 | ipc_port_t entry_port, | |
3667 | unsigned int *resident_page_count, | |
3668 | unsigned int *dirty_page_count) | |
3669 | { | |
3670 | kern_return_t kr; | |
3671 | vm_named_entry_t mem_entry; | |
3672 | vm_object_t object; | |
3673 | vm_object_offset_t offset; | |
3674 | vm_object_size_t size; | |
3675 | ||
3676 | if (!IP_VALID(entry_port) || | |
3677 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3678 | return KERN_INVALID_ARGUMENT; | |
3679 | } | |
3680 | ||
3681 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3682 | ||
3683 | named_entry_lock(mem_entry); | |
3684 | ||
3685 | if (mem_entry->is_sub_map || | |
3686 | mem_entry->is_copy) { | |
3687 | named_entry_unlock(mem_entry); | |
3688 | return KERN_INVALID_ARGUMENT; | |
3689 | } | |
3690 | ||
3691 | assert(mem_entry->is_object); | |
3692 | object = vm_named_entry_to_vm_object(mem_entry); | |
3693 | if (object == VM_OBJECT_NULL) { | |
3694 | named_entry_unlock(mem_entry); | |
3695 | return KERN_INVALID_ARGUMENT; | |
3696 | } | |
3697 | ||
3698 | vm_object_lock(object); | |
3699 | ||
3700 | offset = mem_entry->offset; | |
3701 | size = mem_entry->size; | |
3702 | size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset); | |
3703 | offset = vm_object_trunc_page(offset); | |
3704 | ||
3705 | named_entry_unlock(mem_entry); | |
3706 | ||
3707 | kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count); | |
3708 | ||
3709 | vm_object_unlock(object); | |
3710 | ||
3711 | return kr; | |
3712 | } | |
3713 | ||
3714 | kern_return_t | |
3715 | mach_memory_entry_phys_page_offset( | |
3716 | ipc_port_t entry_port, | |
3717 | vm_object_offset_t *offset_p) | |
3718 | { | |
3719 | vm_named_entry_t mem_entry; | |
3720 | vm_object_t object; | |
3721 | vm_object_offset_t offset; | |
3722 | vm_object_offset_t data_offset; | |
3723 | ||
3724 | if (!IP_VALID(entry_port) || | |
3725 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3726 | return KERN_INVALID_ARGUMENT; | |
3727 | } | |
3728 | ||
3729 | mem_entry = (vm_named_entry_t) ipc_kobject_get(entry_port); | |
3730 | ||
3731 | named_entry_lock(mem_entry); | |
3732 | ||
3733 | if (mem_entry->is_sub_map || | |
3734 | mem_entry->is_copy) { | |
3735 | named_entry_unlock(mem_entry); | |
3736 | return KERN_INVALID_ARGUMENT; | |
3737 | } | |
3738 | ||
3739 | assert(mem_entry->is_object); | |
3740 | object = vm_named_entry_to_vm_object(mem_entry); | |
3741 | if (object == VM_OBJECT_NULL) { | |
3742 | named_entry_unlock(mem_entry); | |
3743 | return KERN_INVALID_ARGUMENT; | |
3744 | } | |
3745 | ||
3746 | offset = mem_entry->offset; | |
3747 | data_offset = mem_entry->data_offset; | |
3748 | ||
3749 | named_entry_unlock(mem_entry); | |
3750 | ||
3751 | *offset_p = offset - vm_object_trunc_page(offset) + data_offset; | |
3752 | return KERN_SUCCESS; | |
3753 | } | |
3754 | ||
3755 | kern_return_t | |
3756 | mach_memory_entry_map_size( | |
3757 | ipc_port_t entry_port, | |
3758 | vm_map_t map, | |
3759 | memory_object_offset_t offset, | |
3760 | memory_object_offset_t size, | |
3761 | mach_vm_size_t *map_size) | |
3762 | { | |
3763 | vm_named_entry_t mem_entry; | |
3764 | vm_object_t object; | |
3765 | vm_object_offset_t object_offset_start, object_offset_end; | |
3766 | vm_map_copy_t copy_map, target_copy_map; | |
3767 | vm_map_offset_t overmap_start, overmap_end, trimmed_start; | |
3768 | kern_return_t kr; | |
3769 | ||
3770 | if (!IP_VALID(entry_port) || | |
3771 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3772 | return KERN_INVALID_ARGUMENT; | |
3773 | } | |
3774 | ||
3775 | mem_entry = (vm_named_entry_t) ipc_kobject_get(entry_port); | |
3776 | named_entry_lock(mem_entry); | |
3777 | ||
3778 | if (mem_entry->is_sub_map) { | |
3779 | named_entry_unlock(mem_entry); | |
3780 | return KERN_INVALID_ARGUMENT; | |
3781 | } | |
3782 | ||
3783 | if (mem_entry->is_object) { | |
3784 | object = vm_named_entry_to_vm_object(mem_entry); | |
3785 | if (object == VM_OBJECT_NULL) { | |
3786 | named_entry_unlock(mem_entry); | |
3787 | return KERN_INVALID_ARGUMENT; | |
3788 | } | |
3789 | ||
3790 | object_offset_start = mem_entry->offset; | |
3791 | object_offset_start += mem_entry->data_offset; | |
3792 | object_offset_start += offset; | |
3793 | object_offset_end = object_offset_start + size; | |
3794 | object_offset_start = vm_map_trunc_page(object_offset_start, | |
3795 | VM_MAP_PAGE_MASK(map)); | |
3796 | object_offset_end = vm_map_round_page(object_offset_end, | |
3797 | VM_MAP_PAGE_MASK(map)); | |
3798 | ||
3799 | named_entry_unlock(mem_entry); | |
3800 | ||
3801 | *map_size = object_offset_end - object_offset_start; | |
3802 | return KERN_SUCCESS; | |
3803 | } | |
3804 | ||
3805 | if (!mem_entry->is_copy) { | |
3806 | panic("unsupported type of mem_entry %p\n", mem_entry); | |
3807 | } | |
3808 | ||
3809 | assert(mem_entry->is_copy); | |
3810 | if (VM_MAP_COPY_PAGE_MASK(mem_entry->backing.copy) == VM_MAP_PAGE_MASK(map)) { | |
3811 | *map_size = vm_map_round_page(mem_entry->offset + mem_entry->data_offset + offset + size, VM_MAP_PAGE_MASK(map)) - vm_map_trunc_page(mem_entry->offset + mem_entry->data_offset + offset, VM_MAP_PAGE_MASK(map)); | |
3812 | DEBUG4K_SHARE("map %p (%d) mem_entry %p offset 0x%llx + 0x%llx + 0x%llx size 0x%llx -> map_size 0x%llx\n", map, VM_MAP_PAGE_MASK(map), mem_entry, mem_entry->offset, mem_entry->data_offset, offset, size, *map_size); | |
3813 | named_entry_unlock(mem_entry); | |
3814 | return KERN_SUCCESS; | |
3815 | } | |
3816 | ||
3817 | DEBUG4K_SHARE("mem_entry %p copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx\n", mem_entry, mem_entry->backing.copy, VM_MAP_COPY_PAGE_SHIFT(mem_entry->backing.copy), map, VM_MAP_PAGE_SHIFT(map), offset, size); | |
3818 | copy_map = mem_entry->backing.copy; | |
3819 | target_copy_map = VM_MAP_COPY_NULL; | |
3820 | DEBUG4K_ADJUST("adjusting...\n"); | |
3821 | kr = vm_map_copy_adjust_to_target(copy_map, | |
3822 | mem_entry->data_offset + offset, | |
3823 | size, | |
3824 | map, | |
3825 | FALSE, | |
3826 | &target_copy_map, | |
3827 | &overmap_start, | |
3828 | &overmap_end, | |
3829 | &trimmed_start); | |
3830 | if (kr == KERN_SUCCESS) { | |
3831 | if (target_copy_map->size != copy_map->size) { | |
3832 | DEBUG4K_ADJUST("copy %p (%d) map %p (%d) offset 0x%llx size 0x%llx overmap_start 0x%llx overmap_end 0x%llx trimmed_start 0x%llx map_size 0x%llx -> 0x%llx\n", copy_map, VM_MAP_COPY_PAGE_SHIFT(copy_map), map, VM_MAP_PAGE_SHIFT(map), (uint64_t)offset, (uint64_t)size, (uint64_t)overmap_start, (uint64_t)overmap_end, (uint64_t)trimmed_start, (uint64_t)copy_map->size, (uint64_t)target_copy_map->size); | |
3833 | } | |
3834 | *map_size = target_copy_map->size; | |
3835 | if (target_copy_map != copy_map) { | |
3836 | vm_map_copy_discard(target_copy_map); | |
3837 | } | |
3838 | target_copy_map = VM_MAP_COPY_NULL; | |
3839 | } | |
3840 | named_entry_unlock(mem_entry); | |
3841 | return kr; | |
3842 | } | |
3843 | ||
3844 | /* | |
3845 | * mach_memory_entry_port_release: | |
3846 | * | |
3847 | * Release a send right on a named entry port. This is the correct | |
3848 | * way to destroy a named entry. When the last right on the port is | |
3849 | * released, ipc_kobject_destroy() will call mach_destroy_memory_entry(). | |
3850 | */ | |
3851 | void | |
3852 | mach_memory_entry_port_release( | |
3853 | ipc_port_t port) | |
3854 | { | |
3855 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3856 | ipc_port_release_send(port); | |
3857 | } | |
3858 | ||
3859 | /* | |
3860 | * mach_destroy_memory_entry: | |
3861 | * | |
3862 | * Drops a reference on a memory entry and destroys the memory entry if | |
3863 | * there are no more references on it. | |
3864 | * NOTE: This routine should not be called to destroy a memory entry from the | |
3865 | * kernel, as it will not release the Mach port associated with the memory | |
3866 | * entry. The proper way to destroy a memory entry in the kernel is to | |
3867 | * call mach_memort_entry_port_release() to release the kernel's send-right on | |
3868 | * the memory entry's port. When the last send right is released, the memory | |
3869 | * entry will be destroyed via ipc_kobject_destroy(). | |
3870 | */ | |
3871 | void | |
3872 | mach_destroy_memory_entry( | |
3873 | ipc_port_t port) | |
3874 | { | |
3875 | vm_named_entry_t named_entry; | |
3876 | #if MACH_ASSERT | |
3877 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3878 | #endif /* MACH_ASSERT */ | |
3879 | named_entry = (vm_named_entry_t) ip_get_kobject(port); | |
3880 | ||
3881 | named_entry_lock(named_entry); | |
3882 | named_entry->ref_count -= 1; | |
3883 | ||
3884 | if (named_entry->ref_count == 0) { | |
3885 | if (named_entry->is_sub_map) { | |
3886 | vm_map_deallocate(named_entry->backing.map); | |
3887 | } else if (named_entry->is_copy) { | |
3888 | vm_map_copy_discard(named_entry->backing.copy); | |
3889 | } else if (named_entry->is_object) { | |
3890 | assert(named_entry->backing.copy->cpy_hdr.nentries == 1); | |
3891 | vm_map_copy_discard(named_entry->backing.copy); | |
3892 | } else { | |
3893 | assert(named_entry->backing.copy == VM_MAP_COPY_NULL); | |
3894 | } | |
3895 | ||
3896 | named_entry_unlock(named_entry); | |
3897 | named_entry_lock_destroy(named_entry); | |
3898 | ||
3899 | #if VM_NAMED_ENTRY_LIST | |
3900 | lck_mtx_lock_spin(&vm_named_entry_list_lock_data); | |
3901 | queue_remove(&vm_named_entry_list, named_entry, | |
3902 | vm_named_entry_t, named_entry_list); | |
3903 | assert(vm_named_entry_count > 0); | |
3904 | vm_named_entry_count--; | |
3905 | lck_mtx_unlock(&vm_named_entry_list_lock_data); | |
3906 | #endif /* VM_NAMED_ENTRY_LIST */ | |
3907 | ||
3908 | kfree(named_entry, sizeof(struct vm_named_entry)); | |
3909 | } else { | |
3910 | named_entry_unlock(named_entry); | |
3911 | } | |
3912 | } | |
3913 | ||
3914 | /* Allow manipulation of individual page state. This is actually part of */ | |
3915 | /* the UPL regimen but takes place on the memory entry rather than on a UPL */ | |
3916 | ||
3917 | kern_return_t | |
3918 | mach_memory_entry_page_op( | |
3919 | ipc_port_t entry_port, | |
3920 | vm_object_offset_t offset, | |
3921 | int ops, | |
3922 | ppnum_t *phys_entry, | |
3923 | int *flags) | |
3924 | { | |
3925 | vm_named_entry_t mem_entry; | |
3926 | vm_object_t object; | |
3927 | kern_return_t kr; | |
3928 | ||
3929 | if (!IP_VALID(entry_port) || | |
3930 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3931 | return KERN_INVALID_ARGUMENT; | |
3932 | } | |
3933 | ||
3934 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3935 | ||
3936 | named_entry_lock(mem_entry); | |
3937 | ||
3938 | if (mem_entry->is_sub_map || | |
3939 | mem_entry->is_copy) { | |
3940 | named_entry_unlock(mem_entry); | |
3941 | return KERN_INVALID_ARGUMENT; | |
3942 | } | |
3943 | ||
3944 | assert(mem_entry->is_object); | |
3945 | object = vm_named_entry_to_vm_object(mem_entry); | |
3946 | if (object == VM_OBJECT_NULL) { | |
3947 | named_entry_unlock(mem_entry); | |
3948 | return KERN_INVALID_ARGUMENT; | |
3949 | } | |
3950 | ||
3951 | vm_object_reference(object); | |
3952 | named_entry_unlock(mem_entry); | |
3953 | ||
3954 | kr = vm_object_page_op(object, offset, ops, phys_entry, flags); | |
3955 | ||
3956 | vm_object_deallocate(object); | |
3957 | ||
3958 | return kr; | |
3959 | } | |
3960 | ||
3961 | /* | |
3962 | * mach_memory_entry_range_op offers performance enhancement over | |
3963 | * mach_memory_entry_page_op for page_op functions which do not require page | |
3964 | * level state to be returned from the call. Page_op was created to provide | |
3965 | * a low-cost alternative to page manipulation via UPLs when only a single | |
3966 | * page was involved. The range_op call establishes the ability in the _op | |
3967 | * family of functions to work on multiple pages where the lack of page level | |
3968 | * state handling allows the caller to avoid the overhead of the upl structures. | |
3969 | */ | |
3970 | ||
3971 | kern_return_t | |
3972 | mach_memory_entry_range_op( | |
3973 | ipc_port_t entry_port, | |
3974 | vm_object_offset_t offset_beg, | |
3975 | vm_object_offset_t offset_end, | |
3976 | int ops, | |
3977 | int *range) | |
3978 | { | |
3979 | vm_named_entry_t mem_entry; | |
3980 | vm_object_t object; | |
3981 | kern_return_t kr; | |
3982 | ||
3983 | if (!IP_VALID(entry_port) || | |
3984 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3985 | return KERN_INVALID_ARGUMENT; | |
3986 | } | |
3987 | ||
3988 | mem_entry = (vm_named_entry_t) ip_get_kobject(entry_port); | |
3989 | ||
3990 | named_entry_lock(mem_entry); | |
3991 | ||
3992 | if (mem_entry->is_sub_map || | |
3993 | mem_entry->is_copy) { | |
3994 | named_entry_unlock(mem_entry); | |
3995 | return KERN_INVALID_ARGUMENT; | |
3996 | } | |
3997 | ||
3998 | assert(mem_entry->is_object); | |
3999 | object = vm_named_entry_to_vm_object(mem_entry); | |
4000 | if (object == VM_OBJECT_NULL) { | |
4001 | named_entry_unlock(mem_entry); | |
4002 | return KERN_INVALID_ARGUMENT; | |
4003 | } | |
4004 | ||
4005 | vm_object_reference(object); | |
4006 | named_entry_unlock(mem_entry); | |
4007 | ||
4008 | kr = vm_object_range_op(object, | |
4009 | offset_beg, | |
4010 | offset_end, | |
4011 | ops, | |
4012 | (uint32_t *) range); | |
4013 | ||
4014 | vm_object_deallocate(object); | |
4015 | ||
4016 | return kr; | |
4017 | } | |
4018 | ||
4019 | /* ******* Temporary Internal calls to UPL for BSD ***** */ | |
4020 | ||
4021 | extern int kernel_upl_map( | |
4022 | vm_map_t map, | |
4023 | upl_t upl, | |
4024 | vm_offset_t *dst_addr); | |
4025 | ||
4026 | extern int kernel_upl_unmap( | |
4027 | vm_map_t map, | |
4028 | upl_t upl); | |
4029 | ||
4030 | extern int kernel_upl_commit( | |
4031 | upl_t upl, | |
4032 | upl_page_info_t *pl, | |
4033 | mach_msg_type_number_t count); | |
4034 | ||
4035 | extern int kernel_upl_commit_range( | |
4036 | upl_t upl, | |
4037 | upl_offset_t offset, | |
4038 | upl_size_t size, | |
4039 | int flags, | |
4040 | upl_page_info_array_t pl, | |
4041 | mach_msg_type_number_t count); | |
4042 | ||
4043 | extern int kernel_upl_abort( | |
4044 | upl_t upl, | |
4045 | int abort_type); | |
4046 | ||
4047 | extern int kernel_upl_abort_range( | |
4048 | upl_t upl, | |
4049 | upl_offset_t offset, | |
4050 | upl_size_t size, | |
4051 | int abort_flags); | |
4052 | ||
4053 | ||
4054 | kern_return_t | |
4055 | kernel_upl_map( | |
4056 | vm_map_t map, | |
4057 | upl_t upl, | |
4058 | vm_offset_t *dst_addr) | |
4059 | { | |
4060 | return vm_upl_map(map, upl, dst_addr); | |
4061 | } | |
4062 | ||
4063 | ||
4064 | kern_return_t | |
4065 | kernel_upl_unmap( | |
4066 | vm_map_t map, | |
4067 | upl_t upl) | |
4068 | { | |
4069 | return vm_upl_unmap(map, upl); | |
4070 | } | |
4071 | ||
4072 | kern_return_t | |
4073 | kernel_upl_commit( | |
4074 | upl_t upl, | |
4075 | upl_page_info_t *pl, | |
4076 | mach_msg_type_number_t count) | |
4077 | { | |
4078 | kern_return_t kr; | |
4079 | ||
4080 | kr = upl_commit(upl, pl, count); | |
4081 | upl_deallocate(upl); | |
4082 | return kr; | |
4083 | } | |
4084 | ||
4085 | ||
4086 | kern_return_t | |
4087 | kernel_upl_commit_range( | |
4088 | upl_t upl, | |
4089 | upl_offset_t offset, | |
4090 | upl_size_t size, | |
4091 | int flags, | |
4092 | upl_page_info_array_t pl, | |
4093 | mach_msg_type_number_t count) | |
4094 | { | |
4095 | boolean_t finished = FALSE; | |
4096 | kern_return_t kr; | |
4097 | ||
4098 | if (flags & UPL_COMMIT_FREE_ON_EMPTY) { | |
4099 | flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
4100 | } | |
4101 | ||
4102 | if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { | |
4103 | return KERN_INVALID_ARGUMENT; | |
4104 | } | |
4105 | ||
4106 | kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); | |
4107 | ||
4108 | if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) { | |
4109 | upl_deallocate(upl); | |
4110 | } | |
4111 | ||
4112 | return kr; | |
4113 | } | |
4114 | ||
4115 | kern_return_t | |
4116 | kernel_upl_abort_range( | |
4117 | upl_t upl, | |
4118 | upl_offset_t offset, | |
4119 | upl_size_t size, | |
4120 | int abort_flags) | |
4121 | { | |
4122 | kern_return_t kr; | |
4123 | boolean_t finished = FALSE; | |
4124 | ||
4125 | if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) { | |
4126 | abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
4127 | } | |
4128 | ||
4129 | kr = upl_abort_range(upl, offset, size, abort_flags, &finished); | |
4130 | ||
4131 | if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) { | |
4132 | upl_deallocate(upl); | |
4133 | } | |
4134 | ||
4135 | return kr; | |
4136 | } | |
4137 | ||
4138 | kern_return_t | |
4139 | kernel_upl_abort( | |
4140 | upl_t upl, | |
4141 | int abort_type) | |
4142 | { | |
4143 | kern_return_t kr; | |
4144 | ||
4145 | kr = upl_abort(upl, abort_type); | |
4146 | upl_deallocate(upl); | |
4147 | return kr; | |
4148 | } | |
4149 | ||
4150 | /* | |
4151 | * Now a kernel-private interface (for BootCache | |
4152 | * use only). Need a cleaner way to create an | |
4153 | * empty vm_map() and return a handle to it. | |
4154 | */ | |
4155 | ||
4156 | kern_return_t | |
4157 | vm_region_object_create( | |
4158 | __unused vm_map_t target_map, | |
4159 | vm_size_t size, | |
4160 | ipc_port_t *object_handle) | |
4161 | { | |
4162 | vm_named_entry_t user_entry; | |
4163 | ipc_port_t user_handle; | |
4164 | ||
4165 | vm_map_t new_map; | |
4166 | ||
4167 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
4168 | != KERN_SUCCESS) { | |
4169 | return KERN_FAILURE; | |
4170 | } | |
4171 | ||
4172 | /* Create a named object based on a submap of specified size */ | |
4173 | ||
4174 | new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, | |
4175 | vm_map_round_page(size, | |
4176 | VM_MAP_PAGE_MASK(target_map)), | |
4177 | TRUE); | |
4178 | vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map)); | |
4179 | ||
4180 | user_entry->backing.map = new_map; | |
4181 | user_entry->internal = TRUE; | |
4182 | user_entry->is_sub_map = TRUE; | |
4183 | user_entry->offset = 0; | |
4184 | user_entry->protection = VM_PROT_ALL; | |
4185 | user_entry->size = size; | |
4186 | assert(user_entry->ref_count == 1); | |
4187 | ||
4188 | *object_handle = user_handle; | |
4189 | return KERN_SUCCESS; | |
4190 | } | |
4191 | ||
4192 | ppnum_t vm_map_get_phys_page( /* forward */ | |
4193 | vm_map_t map, | |
4194 | vm_offset_t offset); | |
4195 | ||
4196 | ppnum_t | |
4197 | vm_map_get_phys_page( | |
4198 | vm_map_t map, | |
4199 | vm_offset_t addr) | |
4200 | { | |
4201 | vm_object_offset_t offset; | |
4202 | vm_object_t object; | |
4203 | vm_map_offset_t map_offset; | |
4204 | vm_map_entry_t entry; | |
4205 | ppnum_t phys_page = 0; | |
4206 | ||
4207 | map_offset = vm_map_trunc_page(addr, PAGE_MASK); | |
4208 | ||
4209 | vm_map_lock(map); | |
4210 | while (vm_map_lookup_entry(map, map_offset, &entry)) { | |
4211 | if (VME_OBJECT(entry) == VM_OBJECT_NULL) { | |
4212 | vm_map_unlock(map); | |
4213 | return (ppnum_t) 0; | |
4214 | } | |
4215 | if (entry->is_sub_map) { | |
4216 | vm_map_t old_map; | |
4217 | vm_map_lock(VME_SUBMAP(entry)); | |
4218 | old_map = map; | |
4219 | map = VME_SUBMAP(entry); | |
4220 | map_offset = (VME_OFFSET(entry) + | |
4221 | (map_offset - entry->vme_start)); | |
4222 | vm_map_unlock(old_map); | |
4223 | continue; | |
4224 | } | |
4225 | if (VME_OBJECT(entry)->phys_contiguous) { | |
4226 | /* These are not standard pageable memory mappings */ | |
4227 | /* If they are not present in the object they will */ | |
4228 | /* have to be picked up from the pager through the */ | |
4229 | /* fault mechanism. */ | |
4230 | if (VME_OBJECT(entry)->vo_shadow_offset == 0) { | |
4231 | /* need to call vm_fault */ | |
4232 | vm_map_unlock(map); | |
4233 | vm_fault(map, map_offset, VM_PROT_NONE, | |
4234 | FALSE /* change_wiring */, VM_KERN_MEMORY_NONE, | |
4235 | THREAD_UNINT, NULL, 0); | |
4236 | vm_map_lock(map); | |
4237 | continue; | |
4238 | } | |
4239 | offset = (VME_OFFSET(entry) + | |
4240 | (map_offset - entry->vme_start)); | |
4241 | phys_page = (ppnum_t) | |
4242 | ((VME_OBJECT(entry)->vo_shadow_offset | |
4243 | + offset) >> PAGE_SHIFT); | |
4244 | break; | |
4245 | } | |
4246 | offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); | |
4247 | object = VME_OBJECT(entry); | |
4248 | vm_object_lock(object); | |
4249 | while (TRUE) { | |
4250 | vm_page_t dst_page = vm_page_lookup(object, offset); | |
4251 | if (dst_page == VM_PAGE_NULL) { | |
4252 | if (object->shadow) { | |
4253 | vm_object_t old_object; | |
4254 | vm_object_lock(object->shadow); | |
4255 | old_object = object; | |
4256 | offset = offset + object->vo_shadow_offset; | |
4257 | object = object->shadow; | |
4258 | vm_object_unlock(old_object); | |
4259 | } else { | |
4260 | vm_object_unlock(object); | |
4261 | break; | |
4262 | } | |
4263 | } else { | |
4264 | phys_page = (ppnum_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)); | |
4265 | vm_object_unlock(object); | |
4266 | break; | |
4267 | } | |
4268 | } | |
4269 | break; | |
4270 | } | |
4271 | ||
4272 | vm_map_unlock(map); | |
4273 | return phys_page; | |
4274 | } | |
4275 | ||
4276 | #if 0 | |
4277 | kern_return_t kernel_object_iopl_request( /* forward */ | |
4278 | vm_named_entry_t named_entry, | |
4279 | memory_object_offset_t offset, | |
4280 | upl_size_t *upl_size, | |
4281 | upl_t *upl_ptr, | |
4282 | upl_page_info_array_t user_page_list, | |
4283 | unsigned int *page_list_count, | |
4284 | int *flags); | |
4285 | ||
4286 | kern_return_t | |
4287 | kernel_object_iopl_request( | |
4288 | vm_named_entry_t named_entry, | |
4289 | memory_object_offset_t offset, | |
4290 | upl_size_t *upl_size, | |
4291 | upl_t *upl_ptr, | |
4292 | upl_page_info_array_t user_page_list, | |
4293 | unsigned int *page_list_count, | |
4294 | int *flags) | |
4295 | { | |
4296 | vm_object_t object; | |
4297 | kern_return_t ret; | |
4298 | ||
4299 | int caller_flags; | |
4300 | ||
4301 | caller_flags = *flags; | |
4302 | ||
4303 | if (caller_flags & ~UPL_VALID_FLAGS) { | |
4304 | /* | |
4305 | * For forward compatibility's sake, | |
4306 | * reject any unknown flag. | |
4307 | */ | |
4308 | return KERN_INVALID_VALUE; | |
4309 | } | |
4310 | ||
4311 | /* a few checks to make sure user is obeying rules */ | |
4312 | if (*upl_size == 0) { | |
4313 | if (offset >= named_entry->size) { | |
4314 | return KERN_INVALID_RIGHT; | |
4315 | } | |
4316 | *upl_size = (upl_size_t) (named_entry->size - offset); | |
4317 | if (*upl_size != named_entry->size - offset) { | |
4318 | return KERN_INVALID_ARGUMENT; | |
4319 | } | |
4320 | } | |
4321 | if (caller_flags & UPL_COPYOUT_FROM) { | |
4322 | if ((named_entry->protection & VM_PROT_READ) | |
4323 | != VM_PROT_READ) { | |
4324 | return KERN_INVALID_RIGHT; | |
4325 | } | |
4326 | } else { | |
4327 | if ((named_entry->protection & | |
4328 | (VM_PROT_READ | VM_PROT_WRITE)) | |
4329 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
4330 | return KERN_INVALID_RIGHT; | |
4331 | } | |
4332 | } | |
4333 | if (named_entry->size < (offset + *upl_size)) { | |
4334 | return KERN_INVALID_ARGUMENT; | |
4335 | } | |
4336 | ||
4337 | /* the callers parameter offset is defined to be the */ | |
4338 | /* offset from beginning of named entry offset in object */ | |
4339 | offset = offset + named_entry->offset; | |
4340 | ||
4341 | if (named_entry->is_sub_map || | |
4342 | named_entry->is_copy) { | |
4343 | return KERN_INVALID_ARGUMENT; | |
4344 | } | |
4345 | ||
4346 | named_entry_lock(named_entry); | |
4347 | ||
4348 | /* This is the case where we are going to operate */ | |
4349 | /* on an already known object. If the object is */ | |
4350 | /* not ready it is internal. An external */ | |
4351 | /* object cannot be mapped until it is ready */ | |
4352 | /* we can therefore avoid the ready check */ | |
4353 | /* in this case. */ | |
4354 | assert(named_entry->is_object); | |
4355 | object = vm_named_entry_to_vm_object(named_entry); | |
4356 | vm_object_reference(object); | |
4357 | named_entry_unlock(named_entry); | |
4358 | ||
4359 | if (!object->private) { | |
4360 | if (*upl_size > MAX_UPL_TRANSFER_BYTES) { | |
4361 | *upl_size = MAX_UPL_TRANSFER_BYTES; | |
4362 | } | |
4363 | if (object->phys_contiguous) { | |
4364 | *flags = UPL_PHYS_CONTIG; | |
4365 | } else { | |
4366 | *flags = 0; | |
4367 | } | |
4368 | } else { | |
4369 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
4370 | } | |
4371 | ||
4372 | ret = vm_object_iopl_request(object, | |
4373 | offset, | |
4374 | *upl_size, | |
4375 | upl_ptr, | |
4376 | user_page_list, | |
4377 | page_list_count, | |
4378 | (upl_control_flags_t)(unsigned int)caller_flags); | |
4379 | vm_object_deallocate(object); | |
4380 | return ret; | |
4381 | } | |
4382 | #endif | |
4383 | ||
4384 | /* | |
4385 | * These symbols are looked up at runtime by vmware, VirtualBox, | |
4386 | * despite not being exported in the symbol sets. | |
4387 | */ | |
4388 | ||
4389 | #if defined(__x86_64__) | |
4390 | ||
4391 | kern_return_t | |
4392 | mach_vm_map( | |
4393 | vm_map_t target_map, | |
4394 | mach_vm_offset_t *address, | |
4395 | mach_vm_size_t initial_size, | |
4396 | mach_vm_offset_t mask, | |
4397 | int flags, | |
4398 | ipc_port_t port, | |
4399 | vm_object_offset_t offset, | |
4400 | boolean_t copy, | |
4401 | vm_prot_t cur_protection, | |
4402 | vm_prot_t max_protection, | |
4403 | vm_inherit_t inheritance); | |
4404 | ||
4405 | kern_return_t | |
4406 | mach_vm_remap( | |
4407 | vm_map_t target_map, | |
4408 | mach_vm_offset_t *address, | |
4409 | mach_vm_size_t size, | |
4410 | mach_vm_offset_t mask, | |
4411 | int flags, | |
4412 | vm_map_t src_map, | |
4413 | mach_vm_offset_t memory_address, | |
4414 | boolean_t copy, | |
4415 | vm_prot_t *cur_protection, | |
4416 | vm_prot_t *max_protection, | |
4417 | vm_inherit_t inheritance); | |
4418 | ||
4419 | kern_return_t | |
4420 | mach_vm_map( | |
4421 | vm_map_t target_map, | |
4422 | mach_vm_offset_t *address, | |
4423 | mach_vm_size_t initial_size, | |
4424 | mach_vm_offset_t mask, | |
4425 | int flags, | |
4426 | ipc_port_t port, | |
4427 | vm_object_offset_t offset, | |
4428 | boolean_t copy, | |
4429 | vm_prot_t cur_protection, | |
4430 | vm_prot_t max_protection, | |
4431 | vm_inherit_t inheritance) | |
4432 | { | |
4433 | return mach_vm_map_external(target_map, address, initial_size, mask, flags, port, | |
4434 | offset, copy, cur_protection, max_protection, inheritance); | |
4435 | } | |
4436 | ||
4437 | kern_return_t | |
4438 | mach_vm_remap( | |
4439 | vm_map_t target_map, | |
4440 | mach_vm_offset_t *address, | |
4441 | mach_vm_size_t size, | |
4442 | mach_vm_offset_t mask, | |
4443 | int flags, | |
4444 | vm_map_t src_map, | |
4445 | mach_vm_offset_t memory_address, | |
4446 | boolean_t copy, | |
4447 | vm_prot_t *cur_protection, /* OUT */ | |
4448 | vm_prot_t *max_protection, /* OUT */ | |
4449 | vm_inherit_t inheritance) | |
4450 | { | |
4451 | return mach_vm_remap_external(target_map, address, size, mask, flags, src_map, memory_address, | |
4452 | copy, cur_protection, max_protection, inheritance); | |
4453 | } | |
4454 | ||
4455 | kern_return_t | |
4456 | vm_map( | |
4457 | vm_map_t target_map, | |
4458 | vm_offset_t *address, | |
4459 | vm_size_t size, | |
4460 | vm_offset_t mask, | |
4461 | int flags, | |
4462 | ipc_port_t port, | |
4463 | vm_offset_t offset, | |
4464 | boolean_t copy, | |
4465 | vm_prot_t cur_protection, | |
4466 | vm_prot_t max_protection, | |
4467 | vm_inherit_t inheritance); | |
4468 | ||
4469 | kern_return_t | |
4470 | vm_map( | |
4471 | vm_map_t target_map, | |
4472 | vm_offset_t *address, | |
4473 | vm_size_t size, | |
4474 | vm_offset_t mask, | |
4475 | int flags, | |
4476 | ipc_port_t port, | |
4477 | vm_offset_t offset, | |
4478 | boolean_t copy, | |
4479 | vm_prot_t cur_protection, | |
4480 | vm_prot_t max_protection, | |
4481 | vm_inherit_t inheritance) | |
4482 | { | |
4483 | vm_tag_t tag; | |
4484 | ||
4485 | VM_GET_FLAGS_ALIAS(flags, tag); | |
4486 | return vm_map_kernel(target_map, address, size, mask, | |
4487 | flags, VM_MAP_KERNEL_FLAGS_NONE, tag, | |
4488 | port, offset, copy, | |
4489 | cur_protection, max_protection, inheritance); | |
4490 | } | |
4491 | ||
4492 | #endif /* __x86_64__ */ |