]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_user.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * User-exported virtual memory functions. | |
63 | */ | |
1c79356b | 64 | |
b0d623f7 A |
65 | /* |
66 | * There are three implementations of the "XXX_allocate" functionality in | |
67 | * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate | |
68 | * (for a task with the same address space size, especially the current task), | |
69 | * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate | |
70 | * in the kernel should only be used on the kernel_task. vm32_vm_allocate only | |
71 | * makes sense on platforms where a user task can either be 32 or 64, or the kernel | |
72 | * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred | |
73 | * for new code. | |
74 | * | |
75 | * The entrypoints into the kernel are more complex. All platforms support a | |
76 | * mach_vm_allocate-style API (subsystem 4800) which operates with the largest | |
77 | * size types for the platform. On platforms that only support U32/K32, | |
78 | * subsystem 4800 is all you need. On platforms that support both U32 and U64, | |
79 | * subsystem 3800 is used disambiguate the size of parameters, and they will | |
80 | * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms, | |
81 | * the MIG glue should never call into vm_allocate directly, because the calling | |
82 | * task and kernel_task are unlikely to use the same size parameters | |
83 | * | |
84 | * New VM call implementations should be added here and to mach_vm.defs | |
85 | * (subsystem 4800), and use mach_vm_* "wide" types. | |
86 | */ | |
87 | ||
91447636 A |
88 | #include <debug.h> |
89 | ||
1c79356b A |
90 | #include <vm_cpm.h> |
91 | #include <mach/boolean.h> | |
92 | #include <mach/kern_return.h> | |
93 | #include <mach/mach_types.h> /* to get vm_address_t */ | |
94 | #include <mach/memory_object.h> | |
95 | #include <mach/std_types.h> /* to get pointer_t */ | |
91447636 | 96 | #include <mach/upl.h> |
1c79356b A |
97 | #include <mach/vm_attributes.h> |
98 | #include <mach/vm_param.h> | |
99 | #include <mach/vm_statistics.h> | |
1c79356b | 100 | #include <mach/mach_syscalls.h> |
9bccf70c | 101 | |
91447636 A |
102 | #include <mach/host_priv_server.h> |
103 | #include <mach/mach_vm_server.h> | |
91447636 | 104 | #include <mach/vm_map_server.h> |
1c79356b A |
105 | |
106 | #include <kern/host.h> | |
91447636 | 107 | #include <kern/kalloc.h> |
1c79356b A |
108 | #include <kern/task.h> |
109 | #include <kern/misc_protos.h> | |
91447636 | 110 | #include <vm/vm_fault.h> |
1c79356b A |
111 | #include <vm/vm_map.h> |
112 | #include <vm/vm_object.h> | |
113 | #include <vm/vm_page.h> | |
114 | #include <vm/memory_object.h> | |
115 | #include <vm/vm_pageout.h> | |
91447636 | 116 | #include <vm/vm_protos.h> |
fe8ab488 | 117 | #include <vm/vm_purgeable_internal.h> |
1c79356b A |
118 | |
119 | vm_size_t upl_offset_to_pagelist = 0; | |
120 | ||
121 | #if VM_CPM | |
122 | #include <vm/cpm.h> | |
123 | #endif /* VM_CPM */ | |
124 | ||
125 | ipc_port_t dynamic_pager_control_port=NULL; | |
126 | ||
127 | /* | |
91447636 | 128 | * mach_vm_allocate allocates "zero fill" memory in the specfied |
1c79356b A |
129 | * map. |
130 | */ | |
131 | kern_return_t | |
91447636 A |
132 | mach_vm_allocate( |
133 | vm_map_t map, | |
134 | mach_vm_offset_t *addr, | |
135 | mach_vm_size_t size, | |
1c79356b A |
136 | int flags) |
137 | { | |
91447636 A |
138 | vm_map_offset_t map_addr; |
139 | vm_map_size_t map_size; | |
1c79356b | 140 | kern_return_t result; |
2d21ac55 A |
141 | boolean_t anywhere; |
142 | ||
143 | /* filter out any kernel-only flags */ | |
144 | if (flags & ~VM_FLAGS_USER_ALLOCATE) | |
145 | return KERN_INVALID_ARGUMENT; | |
1c79356b A |
146 | |
147 | if (map == VM_MAP_NULL) | |
148 | return(KERN_INVALID_ARGUMENT); | |
149 | if (size == 0) { | |
150 | *addr = 0; | |
151 | return(KERN_SUCCESS); | |
152 | } | |
153 | ||
2d21ac55 | 154 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
155 | if (anywhere) { |
156 | /* | |
157 | * No specific address requested, so start candidate address | |
158 | * search at the minimum address in the map. However, if that | |
159 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
160 | * allocations of PAGEZERO to explicit requests since its | |
161 | * normal use is to catch dereferences of NULL and many | |
162 | * applications also treat pointers with a value of 0 as | |
163 | * special and suddenly having address 0 contain useable | |
164 | * memory would tend to confuse those applications. | |
165 | */ | |
166 | map_addr = vm_map_min(map); | |
167 | if (map_addr == 0) | |
39236c6e | 168 | map_addr += VM_MAP_PAGE_SIZE(map); |
91447636 | 169 | } else |
39236c6e A |
170 | map_addr = vm_map_trunc_page(*addr, |
171 | VM_MAP_PAGE_MASK(map)); | |
172 | map_size = vm_map_round_page(size, | |
173 | VM_MAP_PAGE_MASK(map)); | |
91447636 A |
174 | if (map_size == 0) { |
175 | return(KERN_INVALID_ARGUMENT); | |
176 | } | |
177 | ||
178 | result = vm_map_enter( | |
179 | map, | |
180 | &map_addr, | |
181 | map_size, | |
182 | (vm_map_offset_t)0, | |
183 | flags, | |
184 | VM_OBJECT_NULL, | |
185 | (vm_object_offset_t)0, | |
186 | FALSE, | |
187 | VM_PROT_DEFAULT, | |
188 | VM_PROT_ALL, | |
189 | VM_INHERIT_DEFAULT); | |
190 | ||
191 | *addr = map_addr; | |
192 | return(result); | |
193 | } | |
194 | ||
195 | /* | |
196 | * vm_allocate | |
197 | * Legacy routine that allocates "zero fill" memory in the specfied | |
198 | * map (which is limited to the same size as the kernel). | |
199 | */ | |
200 | kern_return_t | |
201 | vm_allocate( | |
202 | vm_map_t map, | |
203 | vm_offset_t *addr, | |
204 | vm_size_t size, | |
205 | int flags) | |
206 | { | |
207 | vm_map_offset_t map_addr; | |
208 | vm_map_size_t map_size; | |
209 | kern_return_t result; | |
2d21ac55 A |
210 | boolean_t anywhere; |
211 | ||
212 | /* filter out any kernel-only flags */ | |
213 | if (flags & ~VM_FLAGS_USER_ALLOCATE) | |
214 | return KERN_INVALID_ARGUMENT; | |
91447636 A |
215 | |
216 | if (map == VM_MAP_NULL) | |
217 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 218 | if (size == 0) { |
91447636 A |
219 | *addr = 0; |
220 | return(KERN_SUCCESS); | |
221 | } | |
222 | ||
2d21ac55 | 223 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
224 | if (anywhere) { |
225 | /* | |
226 | * No specific address requested, so start candidate address | |
227 | * search at the minimum address in the map. However, if that | |
228 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
229 | * allocations of PAGEZERO to explicit requests since its | |
230 | * normal use is to catch dereferences of NULL and many | |
231 | * applications also treat pointers with a value of 0 as | |
232 | * special and suddenly having address 0 contain useable | |
233 | * memory would tend to confuse those applications. | |
234 | */ | |
235 | map_addr = vm_map_min(map); | |
236 | if (map_addr == 0) | |
39236c6e | 237 | map_addr += VM_MAP_PAGE_SIZE(map); |
91447636 | 238 | } else |
39236c6e A |
239 | map_addr = vm_map_trunc_page(*addr, |
240 | VM_MAP_PAGE_MASK(map)); | |
241 | map_size = vm_map_round_page(size, | |
242 | VM_MAP_PAGE_MASK(map)); | |
91447636 | 243 | if (map_size == 0) { |
1c79356b A |
244 | return(KERN_INVALID_ARGUMENT); |
245 | } | |
246 | ||
247 | result = vm_map_enter( | |
248 | map, | |
91447636 A |
249 | &map_addr, |
250 | map_size, | |
251 | (vm_map_offset_t)0, | |
1c79356b A |
252 | flags, |
253 | VM_OBJECT_NULL, | |
254 | (vm_object_offset_t)0, | |
255 | FALSE, | |
256 | VM_PROT_DEFAULT, | |
257 | VM_PROT_ALL, | |
258 | VM_INHERIT_DEFAULT); | |
259 | ||
91447636 | 260 | *addr = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
261 | return(result); |
262 | } | |
263 | ||
264 | /* | |
91447636 A |
265 | * mach_vm_deallocate - |
266 | * deallocates the specified range of addresses in the | |
1c79356b A |
267 | * specified address map. |
268 | */ | |
269 | kern_return_t | |
91447636 A |
270 | mach_vm_deallocate( |
271 | vm_map_t map, | |
272 | mach_vm_offset_t start, | |
273 | mach_vm_size_t size) | |
274 | { | |
275 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
276 | return(KERN_INVALID_ARGUMENT); | |
277 | ||
278 | if (size == (mach_vm_offset_t) 0) | |
279 | return(KERN_SUCCESS); | |
280 | ||
39236c6e A |
281 | return(vm_map_remove(map, |
282 | vm_map_trunc_page(start, | |
283 | VM_MAP_PAGE_MASK(map)), | |
284 | vm_map_round_page(start+size, | |
285 | VM_MAP_PAGE_MASK(map)), | |
286 | VM_MAP_NO_FLAGS)); | |
91447636 A |
287 | } |
288 | ||
289 | /* | |
290 | * vm_deallocate - | |
291 | * deallocates the specified range of addresses in the | |
292 | * specified address map (limited to addresses the same | |
293 | * size as the kernel). | |
294 | */ | |
295 | kern_return_t | |
1c79356b A |
296 | vm_deallocate( |
297 | register vm_map_t map, | |
298 | vm_offset_t start, | |
299 | vm_size_t size) | |
300 | { | |
91447636 | 301 | if ((map == VM_MAP_NULL) || (start + size < start)) |
1c79356b A |
302 | return(KERN_INVALID_ARGUMENT); |
303 | ||
304 | if (size == (vm_offset_t) 0) | |
305 | return(KERN_SUCCESS); | |
306 | ||
39236c6e A |
307 | return(vm_map_remove(map, |
308 | vm_map_trunc_page(start, | |
309 | VM_MAP_PAGE_MASK(map)), | |
310 | vm_map_round_page(start+size, | |
311 | VM_MAP_PAGE_MASK(map)), | |
312 | VM_MAP_NO_FLAGS)); | |
1c79356b A |
313 | } |
314 | ||
315 | /* | |
91447636 A |
316 | * mach_vm_inherit - |
317 | * Sets the inheritance of the specified range in the | |
1c79356b A |
318 | * specified map. |
319 | */ | |
320 | kern_return_t | |
91447636 A |
321 | mach_vm_inherit( |
322 | vm_map_t map, | |
323 | mach_vm_offset_t start, | |
324 | mach_vm_size_t size, | |
325 | vm_inherit_t new_inheritance) | |
326 | { | |
327 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
328 | (new_inheritance > VM_INHERIT_LAST_VALID)) | |
329 | return(KERN_INVALID_ARGUMENT); | |
330 | ||
331 | if (size == 0) | |
332 | return KERN_SUCCESS; | |
333 | ||
334 | return(vm_map_inherit(map, | |
39236c6e A |
335 | vm_map_trunc_page(start, |
336 | VM_MAP_PAGE_MASK(map)), | |
337 | vm_map_round_page(start+size, | |
338 | VM_MAP_PAGE_MASK(map)), | |
91447636 A |
339 | new_inheritance)); |
340 | } | |
341 | ||
342 | /* | |
343 | * vm_inherit - | |
344 | * Sets the inheritance of the specified range in the | |
345 | * specified map (range limited to addresses | |
346 | */ | |
347 | kern_return_t | |
1c79356b A |
348 | vm_inherit( |
349 | register vm_map_t map, | |
350 | vm_offset_t start, | |
351 | vm_size_t size, | |
352 | vm_inherit_t new_inheritance) | |
353 | { | |
91447636 A |
354 | if ((map == VM_MAP_NULL) || (start + size < start) || |
355 | (new_inheritance > VM_INHERIT_LAST_VALID)) | |
1c79356b A |
356 | return(KERN_INVALID_ARGUMENT); |
357 | ||
91447636 A |
358 | if (size == 0) |
359 | return KERN_SUCCESS; | |
360 | ||
1c79356b | 361 | return(vm_map_inherit(map, |
39236c6e A |
362 | vm_map_trunc_page(start, |
363 | VM_MAP_PAGE_MASK(map)), | |
364 | vm_map_round_page(start+size, | |
365 | VM_MAP_PAGE_MASK(map)), | |
1c79356b A |
366 | new_inheritance)); |
367 | } | |
368 | ||
369 | /* | |
91447636 A |
370 | * mach_vm_protect - |
371 | * Sets the protection of the specified range in the | |
1c79356b A |
372 | * specified map. |
373 | */ | |
374 | ||
91447636 A |
375 | kern_return_t |
376 | mach_vm_protect( | |
377 | vm_map_t map, | |
378 | mach_vm_offset_t start, | |
379 | mach_vm_size_t size, | |
380 | boolean_t set_maximum, | |
381 | vm_prot_t new_protection) | |
382 | { | |
383 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
384 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) | |
385 | return(KERN_INVALID_ARGUMENT); | |
386 | ||
387 | if (size == 0) | |
388 | return KERN_SUCCESS; | |
389 | ||
390 | return(vm_map_protect(map, | |
39236c6e A |
391 | vm_map_trunc_page(start, |
392 | VM_MAP_PAGE_MASK(map)), | |
393 | vm_map_round_page(start+size, | |
394 | VM_MAP_PAGE_MASK(map)), | |
91447636 A |
395 | new_protection, |
396 | set_maximum)); | |
397 | } | |
398 | ||
399 | /* | |
400 | * vm_protect - | |
401 | * Sets the protection of the specified range in the | |
402 | * specified map. Addressability of the range limited | |
403 | * to the same size as the kernel. | |
404 | */ | |
405 | ||
1c79356b A |
406 | kern_return_t |
407 | vm_protect( | |
91447636 | 408 | vm_map_t map, |
1c79356b A |
409 | vm_offset_t start, |
410 | vm_size_t size, | |
411 | boolean_t set_maximum, | |
412 | vm_prot_t new_protection) | |
413 | { | |
91447636 A |
414 | if ((map == VM_MAP_NULL) || (start + size < start) || |
415 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) | |
1c79356b A |
416 | return(KERN_INVALID_ARGUMENT); |
417 | ||
91447636 A |
418 | if (size == 0) |
419 | return KERN_SUCCESS; | |
420 | ||
1c79356b | 421 | return(vm_map_protect(map, |
39236c6e A |
422 | vm_map_trunc_page(start, |
423 | VM_MAP_PAGE_MASK(map)), | |
424 | vm_map_round_page(start+size, | |
425 | VM_MAP_PAGE_MASK(map)), | |
1c79356b A |
426 | new_protection, |
427 | set_maximum)); | |
428 | } | |
429 | ||
430 | /* | |
91447636 | 431 | * mach_vm_machine_attributes - |
1c79356b A |
432 | * Handle machine-specific attributes for a mapping, such |
433 | * as cachability, migrability, etc. | |
434 | */ | |
435 | kern_return_t | |
91447636 A |
436 | mach_vm_machine_attribute( |
437 | vm_map_t map, | |
438 | mach_vm_address_t addr, | |
439 | mach_vm_size_t size, | |
440 | vm_machine_attribute_t attribute, | |
441 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
442 | { | |
443 | if ((map == VM_MAP_NULL) || (addr + size < addr)) | |
444 | return(KERN_INVALID_ARGUMENT); | |
445 | ||
446 | if (size == 0) | |
447 | return KERN_SUCCESS; | |
448 | ||
39236c6e A |
449 | return vm_map_machine_attribute( |
450 | map, | |
451 | vm_map_trunc_page(addr, | |
452 | VM_MAP_PAGE_MASK(map)), | |
453 | vm_map_round_page(addr+size, | |
454 | VM_MAP_PAGE_MASK(map)), | |
455 | attribute, | |
456 | value); | |
91447636 A |
457 | } |
458 | ||
459 | /* | |
460 | * vm_machine_attribute - | |
461 | * Handle machine-specific attributes for a mapping, such | |
462 | * as cachability, migrability, etc. Limited addressability | |
463 | * (same range limits as for the native kernel map). | |
464 | */ | |
465 | kern_return_t | |
1c79356b A |
466 | vm_machine_attribute( |
467 | vm_map_t map, | |
91447636 | 468 | vm_address_t addr, |
1c79356b A |
469 | vm_size_t size, |
470 | vm_machine_attribute_t attribute, | |
471 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
472 | { | |
91447636 A |
473 | if ((map == VM_MAP_NULL) || (addr + size < addr)) |
474 | return(KERN_INVALID_ARGUMENT); | |
475 | ||
476 | if (size == 0) | |
477 | return KERN_SUCCESS; | |
478 | ||
39236c6e A |
479 | return vm_map_machine_attribute( |
480 | map, | |
481 | vm_map_trunc_page(addr, | |
482 | VM_MAP_PAGE_MASK(map)), | |
483 | vm_map_round_page(addr+size, | |
484 | VM_MAP_PAGE_MASK(map)), | |
485 | attribute, | |
486 | value); | |
91447636 A |
487 | } |
488 | ||
489 | /* | |
490 | * mach_vm_read - | |
491 | * Read/copy a range from one address space and return it to the caller. | |
492 | * | |
493 | * It is assumed that the address for the returned memory is selected by | |
494 | * the IPC implementation as part of receiving the reply to this call. | |
495 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
496 | * that gets returned. | |
497 | * | |
498 | * JMM - because of mach_msg_type_number_t, this call is limited to a | |
499 | * single 4GB region at this time. | |
500 | * | |
501 | */ | |
502 | kern_return_t | |
503 | mach_vm_read( | |
504 | vm_map_t map, | |
505 | mach_vm_address_t addr, | |
506 | mach_vm_size_t size, | |
507 | pointer_t *data, | |
508 | mach_msg_type_number_t *data_size) | |
509 | { | |
510 | kern_return_t error; | |
511 | vm_map_copy_t ipc_address; | |
512 | ||
1c79356b A |
513 | if (map == VM_MAP_NULL) |
514 | return(KERN_INVALID_ARGUMENT); | |
515 | ||
b0d623f7 A |
516 | if ((mach_msg_type_number_t) size != size) |
517 | return KERN_INVALID_ARGUMENT; | |
91447636 A |
518 | |
519 | error = vm_map_copyin(map, | |
520 | (vm_map_address_t)addr, | |
521 | (vm_map_size_t)size, | |
522 | FALSE, /* src_destroy */ | |
523 | &ipc_address); | |
524 | ||
525 | if (KERN_SUCCESS == error) { | |
526 | *data = (pointer_t) ipc_address; | |
b0d623f7 A |
527 | *data_size = (mach_msg_type_number_t) size; |
528 | assert(*data_size == size); | |
91447636 A |
529 | } |
530 | return(error); | |
1c79356b A |
531 | } |
532 | ||
91447636 A |
533 | /* |
534 | * vm_read - | |
535 | * Read/copy a range from one address space and return it to the caller. | |
536 | * Limited addressability (same range limits as for the native kernel map). | |
537 | * | |
538 | * It is assumed that the address for the returned memory is selected by | |
539 | * the IPC implementation as part of receiving the reply to this call. | |
540 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
541 | * that gets returned. | |
542 | */ | |
1c79356b A |
543 | kern_return_t |
544 | vm_read( | |
545 | vm_map_t map, | |
91447636 | 546 | vm_address_t addr, |
1c79356b A |
547 | vm_size_t size, |
548 | pointer_t *data, | |
549 | mach_msg_type_number_t *data_size) | |
550 | { | |
551 | kern_return_t error; | |
552 | vm_map_copy_t ipc_address; | |
553 | ||
554 | if (map == VM_MAP_NULL) | |
555 | return(KERN_INVALID_ARGUMENT); | |
556 | ||
b0d623f7 A |
557 | if (size > (unsigned)(mach_msg_type_number_t) -1) { |
558 | /* | |
559 | * The kernel could handle a 64-bit "size" value, but | |
560 | * it could not return the size of the data in "*data_size" | |
561 | * without overflowing. | |
562 | * Let's reject this "size" as invalid. | |
563 | */ | |
564 | return KERN_INVALID_ARGUMENT; | |
565 | } | |
566 | ||
91447636 A |
567 | error = vm_map_copyin(map, |
568 | (vm_map_address_t)addr, | |
569 | (vm_map_size_t)size, | |
570 | FALSE, /* src_destroy */ | |
571 | &ipc_address); | |
572 | ||
573 | if (KERN_SUCCESS == error) { | |
1c79356b | 574 | *data = (pointer_t) ipc_address; |
b0d623f7 A |
575 | *data_size = (mach_msg_type_number_t) size; |
576 | assert(*data_size == size); | |
1c79356b A |
577 | } |
578 | return(error); | |
579 | } | |
580 | ||
91447636 A |
581 | /* |
582 | * mach_vm_read_list - | |
583 | * Read/copy a list of address ranges from specified map. | |
584 | * | |
585 | * MIG does not know how to deal with a returned array of | |
586 | * vm_map_copy_t structures, so we have to do the copyout | |
587 | * manually here. | |
588 | */ | |
589 | kern_return_t | |
590 | mach_vm_read_list( | |
591 | vm_map_t map, | |
592 | mach_vm_read_entry_t data_list, | |
593 | natural_t count) | |
594 | { | |
595 | mach_msg_type_number_t i; | |
596 | kern_return_t error; | |
597 | vm_map_copy_t copy; | |
598 | ||
8ad349bb A |
599 | if (map == VM_MAP_NULL || |
600 | count > VM_MAP_ENTRY_MAX) | |
91447636 A |
601 | return(KERN_INVALID_ARGUMENT); |
602 | ||
603 | error = KERN_SUCCESS; | |
604 | for(i=0; i<count; i++) { | |
605 | vm_map_address_t map_addr; | |
606 | vm_map_size_t map_size; | |
607 | ||
608 | map_addr = (vm_map_address_t)(data_list[i].address); | |
609 | map_size = (vm_map_size_t)(data_list[i].size); | |
610 | ||
611 | if(map_size != 0) { | |
612 | error = vm_map_copyin(map, | |
613 | map_addr, | |
614 | map_size, | |
615 | FALSE, /* src_destroy */ | |
616 | ©); | |
617 | if (KERN_SUCCESS == error) { | |
618 | error = vm_map_copyout( | |
619 | current_task()->map, | |
620 | &map_addr, | |
621 | copy); | |
622 | if (KERN_SUCCESS == error) { | |
623 | data_list[i].address = map_addr; | |
624 | continue; | |
625 | } | |
626 | vm_map_copy_discard(copy); | |
627 | } | |
628 | } | |
629 | data_list[i].address = (mach_vm_address_t)0; | |
630 | data_list[i].size = (mach_vm_size_t)0; | |
631 | } | |
632 | return(error); | |
633 | } | |
634 | ||
635 | /* | |
636 | * vm_read_list - | |
637 | * Read/copy a list of address ranges from specified map. | |
638 | * | |
639 | * MIG does not know how to deal with a returned array of | |
640 | * vm_map_copy_t structures, so we have to do the copyout | |
641 | * manually here. | |
642 | * | |
643 | * The source and destination ranges are limited to those | |
644 | * that can be described with a vm_address_t (i.e. same | |
645 | * size map as the kernel). | |
646 | * | |
647 | * JMM - If the result of the copyout is an address range | |
648 | * that cannot be described with a vm_address_t (i.e. the | |
649 | * caller had a larger address space but used this call | |
650 | * anyway), it will result in a truncated address being | |
651 | * returned (and a likely confused caller). | |
652 | */ | |
653 | ||
1c79356b A |
654 | kern_return_t |
655 | vm_read_list( | |
656 | vm_map_t map, | |
91447636 A |
657 | vm_read_entry_t data_list, |
658 | natural_t count) | |
1c79356b A |
659 | { |
660 | mach_msg_type_number_t i; | |
661 | kern_return_t error; | |
91447636 | 662 | vm_map_copy_t copy; |
1c79356b | 663 | |
8ad349bb A |
664 | if (map == VM_MAP_NULL || |
665 | count > VM_MAP_ENTRY_MAX) | |
1c79356b A |
666 | return(KERN_INVALID_ARGUMENT); |
667 | ||
91447636 | 668 | error = KERN_SUCCESS; |
1c79356b | 669 | for(i=0; i<count; i++) { |
91447636 A |
670 | vm_map_address_t map_addr; |
671 | vm_map_size_t map_size; | |
672 | ||
673 | map_addr = (vm_map_address_t)(data_list[i].address); | |
674 | map_size = (vm_map_size_t)(data_list[i].size); | |
675 | ||
676 | if(map_size != 0) { | |
677 | error = vm_map_copyin(map, | |
678 | map_addr, | |
679 | map_size, | |
680 | FALSE, /* src_destroy */ | |
681 | ©); | |
682 | if (KERN_SUCCESS == error) { | |
683 | error = vm_map_copyout(current_task()->map, | |
684 | &map_addr, | |
685 | copy); | |
686 | if (KERN_SUCCESS == error) { | |
687 | data_list[i].address = | |
688 | CAST_DOWN(vm_offset_t, map_addr); | |
689 | continue; | |
690 | } | |
691 | vm_map_copy_discard(copy); | |
1c79356b A |
692 | } |
693 | } | |
91447636 A |
694 | data_list[i].address = (mach_vm_address_t)0; |
695 | data_list[i].size = (mach_vm_size_t)0; | |
1c79356b A |
696 | } |
697 | return(error); | |
698 | } | |
699 | ||
700 | /* | |
91447636 A |
701 | * mach_vm_read_overwrite - |
702 | * Overwrite a range of the current map with data from the specified | |
703 | * map/address range. | |
704 | * | |
705 | * In making an assumption that the current thread is local, it is | |
706 | * no longer cluster-safe without a fully supportive local proxy | |
707 | * thread/task (but we don't support cluster's anymore so this is moot). | |
1c79356b A |
708 | */ |
709 | ||
1c79356b | 710 | kern_return_t |
91447636 A |
711 | mach_vm_read_overwrite( |
712 | vm_map_t map, | |
713 | mach_vm_address_t address, | |
714 | mach_vm_size_t size, | |
715 | mach_vm_address_t data, | |
716 | mach_vm_size_t *data_size) | |
717 | { | |
718 | kern_return_t error; | |
1c79356b A |
719 | vm_map_copy_t copy; |
720 | ||
721 | if (map == VM_MAP_NULL) | |
722 | return(KERN_INVALID_ARGUMENT); | |
723 | ||
91447636 A |
724 | error = vm_map_copyin(map, (vm_map_address_t)address, |
725 | (vm_map_size_t)size, FALSE, ©); | |
726 | ||
727 | if (KERN_SUCCESS == error) { | |
728 | error = vm_map_copy_overwrite(current_thread()->map, | |
729 | (vm_map_address_t)data, | |
730 | copy, FALSE); | |
731 | if (KERN_SUCCESS == error) { | |
732 | *data_size = size; | |
733 | return error; | |
1c79356b | 734 | } |
91447636 | 735 | vm_map_copy_discard(copy); |
1c79356b | 736 | } |
91447636 A |
737 | return(error); |
738 | } | |
739 | ||
740 | /* | |
741 | * vm_read_overwrite - | |
742 | * Overwrite a range of the current map with data from the specified | |
743 | * map/address range. | |
744 | * | |
745 | * This routine adds the additional limitation that the source and | |
746 | * destination ranges must be describable with vm_address_t values | |
747 | * (i.e. the same size address spaces as the kernel, or at least the | |
748 | * the ranges are in that first portion of the respective address | |
749 | * spaces). | |
750 | */ | |
751 | ||
752 | kern_return_t | |
753 | vm_read_overwrite( | |
754 | vm_map_t map, | |
755 | vm_address_t address, | |
756 | vm_size_t size, | |
757 | vm_address_t data, | |
758 | vm_size_t *data_size) | |
759 | { | |
760 | kern_return_t error; | |
761 | vm_map_copy_t copy; | |
762 | ||
763 | if (map == VM_MAP_NULL) | |
764 | return(KERN_INVALID_ARGUMENT); | |
765 | ||
766 | error = vm_map_copyin(map, (vm_map_address_t)address, | |
767 | (vm_map_size_t)size, FALSE, ©); | |
768 | ||
769 | if (KERN_SUCCESS == error) { | |
770 | error = vm_map_copy_overwrite(current_thread()->map, | |
771 | (vm_map_address_t)data, | |
772 | copy, FALSE); | |
773 | if (KERN_SUCCESS == error) { | |
774 | *data_size = size; | |
775 | return error; | |
1c79356b | 776 | } |
91447636 | 777 | vm_map_copy_discard(copy); |
1c79356b | 778 | } |
1c79356b A |
779 | return(error); |
780 | } | |
781 | ||
782 | ||
91447636 A |
783 | /* |
784 | * mach_vm_write - | |
785 | * Overwrite the specified address range with the data provided | |
786 | * (from the current map). | |
787 | */ | |
788 | kern_return_t | |
789 | mach_vm_write( | |
790 | vm_map_t map, | |
791 | mach_vm_address_t address, | |
792 | pointer_t data, | |
793 | __unused mach_msg_type_number_t size) | |
794 | { | |
795 | if (map == VM_MAP_NULL) | |
796 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 797 | |
91447636 A |
798 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, |
799 | (vm_map_copy_t) data, FALSE /* interruptible XXX */); | |
800 | } | |
1c79356b | 801 | |
91447636 A |
802 | /* |
803 | * vm_write - | |
804 | * Overwrite the specified address range with the data provided | |
805 | * (from the current map). | |
806 | * | |
807 | * The addressability of the range of addresses to overwrite is | |
808 | * limited bu the use of a vm_address_t (same size as kernel map). | |
809 | * Either the target map is also small, or the range is in the | |
810 | * low addresses within it. | |
811 | */ | |
1c79356b A |
812 | kern_return_t |
813 | vm_write( | |
91447636 A |
814 | vm_map_t map, |
815 | vm_address_t address, | |
816 | pointer_t data, | |
817 | __unused mach_msg_type_number_t size) | |
818 | { | |
819 | if (map == VM_MAP_NULL) | |
820 | return KERN_INVALID_ARGUMENT; | |
821 | ||
822 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, | |
823 | (vm_map_copy_t) data, FALSE /* interruptible XXX */); | |
824 | } | |
825 | ||
826 | /* | |
827 | * mach_vm_copy - | |
828 | * Overwrite one range of the specified map with the contents of | |
829 | * another range within that same map (i.e. both address ranges | |
830 | * are "over there"). | |
831 | */ | |
832 | kern_return_t | |
833 | mach_vm_copy( | |
1c79356b | 834 | vm_map_t map, |
91447636 A |
835 | mach_vm_address_t source_address, |
836 | mach_vm_size_t size, | |
837 | mach_vm_address_t dest_address) | |
1c79356b | 838 | { |
91447636 A |
839 | vm_map_copy_t copy; |
840 | kern_return_t kr; | |
841 | ||
1c79356b A |
842 | if (map == VM_MAP_NULL) |
843 | return KERN_INVALID_ARGUMENT; | |
844 | ||
91447636 A |
845 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
846 | (vm_map_size_t)size, FALSE, ©); | |
847 | ||
848 | if (KERN_SUCCESS == kr) { | |
849 | kr = vm_map_copy_overwrite(map, | |
850 | (vm_map_address_t)dest_address, | |
851 | copy, FALSE /* interruptible XXX */); | |
852 | ||
853 | if (KERN_SUCCESS != kr) | |
854 | vm_map_copy_discard(copy); | |
855 | } | |
856 | return kr; | |
1c79356b A |
857 | } |
858 | ||
859 | kern_return_t | |
860 | vm_copy( | |
861 | vm_map_t map, | |
862 | vm_address_t source_address, | |
863 | vm_size_t size, | |
864 | vm_address_t dest_address) | |
865 | { | |
866 | vm_map_copy_t copy; | |
867 | kern_return_t kr; | |
868 | ||
869 | if (map == VM_MAP_NULL) | |
870 | return KERN_INVALID_ARGUMENT; | |
871 | ||
91447636 A |
872 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
873 | (vm_map_size_t)size, FALSE, ©); | |
1c79356b | 874 | |
91447636 A |
875 | if (KERN_SUCCESS == kr) { |
876 | kr = vm_map_copy_overwrite(map, | |
877 | (vm_map_address_t)dest_address, | |
878 | copy, FALSE /* interruptible XXX */); | |
1c79356b | 879 | |
91447636 A |
880 | if (KERN_SUCCESS != kr) |
881 | vm_map_copy_discard(copy); | |
882 | } | |
883 | return kr; | |
1c79356b A |
884 | } |
885 | ||
886 | /* | |
91447636 A |
887 | * mach_vm_map - |
888 | * Map some range of an object into an address space. | |
889 | * | |
890 | * The object can be one of several types of objects: | |
891 | * NULL - anonymous memory | |
892 | * a named entry - a range within another address space | |
893 | * or a range within a memory object | |
894 | * a whole memory object | |
895 | * | |
1c79356b A |
896 | */ |
897 | kern_return_t | |
91447636 | 898 | mach_vm_map( |
1c79356b | 899 | vm_map_t target_map, |
91447636 A |
900 | mach_vm_offset_t *address, |
901 | mach_vm_size_t initial_size, | |
902 | mach_vm_offset_t mask, | |
1c79356b A |
903 | int flags, |
904 | ipc_port_t port, | |
905 | vm_object_offset_t offset, | |
906 | boolean_t copy, | |
907 | vm_prot_t cur_protection, | |
908 | vm_prot_t max_protection, | |
909 | vm_inherit_t inheritance) | |
910 | { | |
316670eb A |
911 | kern_return_t kr; |
912 | vm_map_offset_t vmmaddr; | |
913 | ||
914 | vmmaddr = (vm_map_offset_t) *address; | |
915 | ||
2d21ac55 A |
916 | /* filter out any kernel-only flags */ |
917 | if (flags & ~VM_FLAGS_USER_MAP) | |
918 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 919 | |
316670eb A |
920 | kr = vm_map_enter_mem_object(target_map, |
921 | &vmmaddr, | |
2d21ac55 A |
922 | initial_size, |
923 | mask, | |
924 | flags, | |
925 | port, | |
926 | offset, | |
927 | copy, | |
928 | cur_protection, | |
929 | max_protection, | |
930 | inheritance); | |
316670eb A |
931 | |
932 | *address = vmmaddr; | |
933 | return kr; | |
1c79356b A |
934 | } |
935 | ||
91447636 A |
936 | |
937 | /* legacy interface */ | |
938 | kern_return_t | |
939 | vm_map_64( | |
940 | vm_map_t target_map, | |
941 | vm_offset_t *address, | |
942 | vm_size_t size, | |
943 | vm_offset_t mask, | |
944 | int flags, | |
945 | ipc_port_t port, | |
946 | vm_object_offset_t offset, | |
947 | boolean_t copy, | |
948 | vm_prot_t cur_protection, | |
949 | vm_prot_t max_protection, | |
950 | vm_inherit_t inheritance) | |
951 | { | |
952 | mach_vm_address_t map_addr; | |
953 | mach_vm_size_t map_size; | |
954 | mach_vm_offset_t map_mask; | |
955 | kern_return_t kr; | |
956 | ||
957 | map_addr = (mach_vm_address_t)*address; | |
958 | map_size = (mach_vm_size_t)size; | |
959 | map_mask = (mach_vm_offset_t)mask; | |
960 | ||
961 | kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, | |
962 | port, offset, copy, | |
963 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 964 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
965 | return kr; |
966 | } | |
967 | ||
1c79356b | 968 | /* temporary, until world build */ |
55e303ae | 969 | kern_return_t |
1c79356b A |
970 | vm_map( |
971 | vm_map_t target_map, | |
972 | vm_offset_t *address, | |
973 | vm_size_t size, | |
974 | vm_offset_t mask, | |
975 | int flags, | |
976 | ipc_port_t port, | |
977 | vm_offset_t offset, | |
978 | boolean_t copy, | |
979 | vm_prot_t cur_protection, | |
980 | vm_prot_t max_protection, | |
981 | vm_inherit_t inheritance) | |
982 | { | |
91447636 A |
983 | mach_vm_address_t map_addr; |
984 | mach_vm_size_t map_size; | |
985 | mach_vm_offset_t map_mask; | |
986 | vm_object_offset_t obj_offset; | |
987 | kern_return_t kr; | |
988 | ||
989 | map_addr = (mach_vm_address_t)*address; | |
990 | map_size = (mach_vm_size_t)size; | |
991 | map_mask = (mach_vm_offset_t)mask; | |
992 | obj_offset = (vm_object_offset_t)offset; | |
993 | ||
994 | kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, | |
995 | port, obj_offset, copy, | |
996 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 997 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
998 | return kr; |
999 | } | |
1000 | ||
1001 | /* | |
1002 | * mach_vm_remap - | |
1003 | * Remap a range of memory from one task into another, | |
1004 | * to another address range within the same task, or | |
1005 | * over top of itself (with altered permissions and/or | |
1006 | * as an in-place copy of itself). | |
1007 | */ | |
1008 | ||
1009 | kern_return_t | |
1010 | mach_vm_remap( | |
1011 | vm_map_t target_map, | |
1012 | mach_vm_offset_t *address, | |
1013 | mach_vm_size_t size, | |
1014 | mach_vm_offset_t mask, | |
060df5ea | 1015 | int flags, |
91447636 A |
1016 | vm_map_t src_map, |
1017 | mach_vm_offset_t memory_address, | |
1018 | boolean_t copy, | |
1019 | vm_prot_t *cur_protection, | |
1020 | vm_prot_t *max_protection, | |
1021 | vm_inherit_t inheritance) | |
1022 | { | |
1023 | vm_map_offset_t map_addr; | |
1024 | kern_return_t kr; | |
1025 | ||
1026 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) | |
1027 | return KERN_INVALID_ARGUMENT; | |
1028 | ||
060df5ea A |
1029 | /* filter out any kernel-only flags */ |
1030 | if (flags & ~VM_FLAGS_USER_REMAP) | |
1031 | return KERN_INVALID_ARGUMENT; | |
1032 | ||
91447636 A |
1033 | map_addr = (vm_map_offset_t)*address; |
1034 | ||
1035 | kr = vm_map_remap(target_map, | |
1036 | &map_addr, | |
1037 | size, | |
1038 | mask, | |
060df5ea | 1039 | flags, |
91447636 A |
1040 | src_map, |
1041 | memory_address, | |
1042 | copy, | |
1043 | cur_protection, | |
1044 | max_protection, | |
1045 | inheritance); | |
1046 | *address = map_addr; | |
1047 | return kr; | |
1c79356b A |
1048 | } |
1049 | ||
91447636 A |
1050 | /* |
1051 | * vm_remap - | |
1052 | * Remap a range of memory from one task into another, | |
1053 | * to another address range within the same task, or | |
1054 | * over top of itself (with altered permissions and/or | |
1055 | * as an in-place copy of itself). | |
1056 | * | |
1057 | * The addressability of the source and target address | |
1058 | * range is limited by the size of vm_address_t (in the | |
1059 | * kernel context). | |
1060 | */ | |
1061 | kern_return_t | |
1062 | vm_remap( | |
1063 | vm_map_t target_map, | |
1064 | vm_offset_t *address, | |
1065 | vm_size_t size, | |
1066 | vm_offset_t mask, | |
060df5ea | 1067 | int flags, |
91447636 A |
1068 | vm_map_t src_map, |
1069 | vm_offset_t memory_address, | |
1070 | boolean_t copy, | |
1071 | vm_prot_t *cur_protection, | |
1072 | vm_prot_t *max_protection, | |
1073 | vm_inherit_t inheritance) | |
1074 | { | |
1075 | vm_map_offset_t map_addr; | |
1076 | kern_return_t kr; | |
1077 | ||
1078 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) | |
1079 | return KERN_INVALID_ARGUMENT; | |
1080 | ||
060df5ea A |
1081 | /* filter out any kernel-only flags */ |
1082 | if (flags & ~VM_FLAGS_USER_REMAP) | |
1083 | return KERN_INVALID_ARGUMENT; | |
1084 | ||
91447636 A |
1085 | map_addr = (vm_map_offset_t)*address; |
1086 | ||
1087 | kr = vm_map_remap(target_map, | |
1088 | &map_addr, | |
1089 | size, | |
1090 | mask, | |
060df5ea | 1091 | flags, |
91447636 A |
1092 | src_map, |
1093 | memory_address, | |
1094 | copy, | |
1095 | cur_protection, | |
1096 | max_protection, | |
1097 | inheritance); | |
1098 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1099 | return kr; | |
1100 | } | |
1c79356b A |
1101 | |
1102 | /* | |
91447636 A |
1103 | * NOTE: these routine (and this file) will no longer require mach_host_server.h |
1104 | * when mach_vm_wire and vm_wire are changed to use ledgers. | |
1c79356b A |
1105 | */ |
1106 | #include <mach/mach_host_server.h> | |
1107 | /* | |
91447636 A |
1108 | * mach_vm_wire |
1109 | * Specify that the range of the virtual address space | |
1110 | * of the target task must not cause page faults for | |
1111 | * the indicated accesses. | |
1112 | * | |
1113 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1114 | */ | |
1115 | kern_return_t | |
1116 | mach_vm_wire( | |
1117 | host_priv_t host_priv, | |
1118 | vm_map_t map, | |
1119 | mach_vm_offset_t start, | |
1120 | mach_vm_size_t size, | |
1121 | vm_prot_t access) | |
1122 | { | |
1123 | kern_return_t rc; | |
1124 | ||
1125 | if (host_priv == HOST_PRIV_NULL) | |
1126 | return KERN_INVALID_HOST; | |
1127 | ||
1128 | assert(host_priv == &realhost); | |
1129 | ||
1130 | if (map == VM_MAP_NULL) | |
1131 | return KERN_INVALID_TASK; | |
1132 | ||
b0d623f7 | 1133 | if (access & ~VM_PROT_ALL || (start + size < start)) |
91447636 A |
1134 | return KERN_INVALID_ARGUMENT; |
1135 | ||
1136 | if (access != VM_PROT_NONE) { | |
39236c6e A |
1137 | rc = vm_map_wire(map, |
1138 | vm_map_trunc_page(start, | |
1139 | VM_MAP_PAGE_MASK(map)), | |
1140 | vm_map_round_page(start+size, | |
1141 | VM_MAP_PAGE_MASK(map)), | |
3e170ce0 | 1142 | access | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_MLOCK), |
39236c6e | 1143 | TRUE); |
91447636 | 1144 | } else { |
39236c6e A |
1145 | rc = vm_map_unwire(map, |
1146 | vm_map_trunc_page(start, | |
1147 | VM_MAP_PAGE_MASK(map)), | |
1148 | vm_map_round_page(start+size, | |
1149 | VM_MAP_PAGE_MASK(map)), | |
1150 | TRUE); | |
91447636 A |
1151 | } |
1152 | return rc; | |
1153 | } | |
1154 | ||
1155 | /* | |
1156 | * vm_wire - | |
1c79356b A |
1157 | * Specify that the range of the virtual address space |
1158 | * of the target task must not cause page faults for | |
1159 | * the indicated accesses. | |
1160 | * | |
1161 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1162 | */ | |
1163 | kern_return_t | |
1164 | vm_wire( | |
1165 | host_priv_t host_priv, | |
1166 | register vm_map_t map, | |
1167 | vm_offset_t start, | |
1168 | vm_size_t size, | |
1169 | vm_prot_t access) | |
1170 | { | |
1171 | kern_return_t rc; | |
1172 | ||
1173 | if (host_priv == HOST_PRIV_NULL) | |
1174 | return KERN_INVALID_HOST; | |
1175 | ||
1176 | assert(host_priv == &realhost); | |
1177 | ||
1178 | if (map == VM_MAP_NULL) | |
1179 | return KERN_INVALID_TASK; | |
1180 | ||
91447636 | 1181 | if ((access & ~VM_PROT_ALL) || (start + size < start)) |
1c79356b A |
1182 | return KERN_INVALID_ARGUMENT; |
1183 | ||
91447636 A |
1184 | if (size == 0) { |
1185 | rc = KERN_SUCCESS; | |
1186 | } else if (access != VM_PROT_NONE) { | |
39236c6e A |
1187 | rc = vm_map_wire(map, |
1188 | vm_map_trunc_page(start, | |
1189 | VM_MAP_PAGE_MASK(map)), | |
1190 | vm_map_round_page(start+size, | |
1191 | VM_MAP_PAGE_MASK(map)), | |
3e170ce0 | 1192 | access | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), |
39236c6e | 1193 | TRUE); |
1c79356b | 1194 | } else { |
39236c6e A |
1195 | rc = vm_map_unwire(map, |
1196 | vm_map_trunc_page(start, | |
1197 | VM_MAP_PAGE_MASK(map)), | |
1198 | vm_map_round_page(start+size, | |
1199 | VM_MAP_PAGE_MASK(map)), | |
1200 | TRUE); | |
1c79356b A |
1201 | } |
1202 | return rc; | |
1203 | } | |
1204 | ||
1205 | /* | |
1206 | * vm_msync | |
1207 | * | |
1208 | * Synchronises the memory range specified with its backing store | |
1209 | * image by either flushing or cleaning the contents to the appropriate | |
91447636 A |
1210 | * memory manager. |
1211 | * | |
1212 | * interpretation of sync_flags | |
1213 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1214 | * pages to manager. | |
1215 | * | |
1216 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1217 | * - discard pages, write dirty or precious | |
1218 | * pages back to memory manager. | |
1219 | * | |
1220 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1221 | * - write dirty or precious pages back to | |
1222 | * the memory manager. | |
1223 | * | |
1224 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
1225 | * is a hole in the region, and we would | |
1226 | * have returned KERN_SUCCESS, return | |
1227 | * KERN_INVALID_ADDRESS instead. | |
1228 | * | |
1229 | * RETURNS | |
1230 | * KERN_INVALID_TASK Bad task parameter | |
1231 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1232 | * KERN_SUCCESS The usual. | |
1233 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
1234 | */ | |
1235 | ||
1236 | kern_return_t | |
1237 | mach_vm_msync( | |
1238 | vm_map_t map, | |
1239 | mach_vm_address_t address, | |
1240 | mach_vm_size_t size, | |
1241 | vm_sync_t sync_flags) | |
1242 | { | |
1243 | ||
1244 | if (map == VM_MAP_NULL) | |
1245 | return(KERN_INVALID_TASK); | |
1246 | ||
1247 | return vm_map_msync(map, (vm_map_address_t)address, | |
1248 | (vm_map_size_t)size, sync_flags); | |
1249 | } | |
1250 | ||
1251 | /* | |
1252 | * vm_msync | |
1253 | * | |
1254 | * Synchronises the memory range specified with its backing store | |
1255 | * image by either flushing or cleaning the contents to the appropriate | |
1256 | * memory manager. | |
1c79356b A |
1257 | * |
1258 | * interpretation of sync_flags | |
1259 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1260 | * pages to manager. | |
1261 | * | |
1262 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1263 | * - discard pages, write dirty or precious | |
1264 | * pages back to memory manager. | |
1265 | * | |
1266 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1267 | * - write dirty or precious pages back to | |
1268 | * the memory manager. | |
1269 | * | |
91447636 A |
1270 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there |
1271 | * is a hole in the region, and we would | |
1272 | * have returned KERN_SUCCESS, return | |
1273 | * KERN_INVALID_ADDRESS instead. | |
1274 | * | |
1275 | * The addressability of the range is limited to that which can | |
1276 | * be described by a vm_address_t. | |
1c79356b A |
1277 | * |
1278 | * RETURNS | |
1279 | * KERN_INVALID_TASK Bad task parameter | |
1280 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1281 | * KERN_SUCCESS The usual. | |
91447636 | 1282 | * KERN_INVALID_ADDRESS There was a hole in the region. |
1c79356b A |
1283 | */ |
1284 | ||
1285 | kern_return_t | |
1286 | vm_msync( | |
1287 | vm_map_t map, | |
1288 | vm_address_t address, | |
1289 | vm_size_t size, | |
1290 | vm_sync_t sync_flags) | |
1291 | { | |
1c79356b | 1292 | |
91447636 A |
1293 | if (map == VM_MAP_NULL) |
1294 | return(KERN_INVALID_TASK); | |
1c79356b | 1295 | |
91447636 A |
1296 | return vm_map_msync(map, (vm_map_address_t)address, |
1297 | (vm_map_size_t)size, sync_flags); | |
1298 | } | |
1c79356b | 1299 | |
91447636 | 1300 | |
6d2010ae A |
1301 | int |
1302 | vm_toggle_entry_reuse(int toggle, int *old_value) | |
1303 | { | |
1304 | vm_map_t map = current_map(); | |
1305 | ||
1306 | if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){ | |
1307 | *old_value = map->disable_vmentry_reuse; | |
1308 | } else if(toggle == VM_TOGGLE_SET){ | |
3e170ce0 A |
1309 | vm_map_entry_t map_to_entry; |
1310 | ||
6d2010ae | 1311 | vm_map_lock(map); |
3e170ce0 | 1312 | vm_map_disable_hole_optimization(map); |
6d2010ae | 1313 | map->disable_vmentry_reuse = TRUE; |
3e170ce0 A |
1314 | __IGNORE_WCASTALIGN(map_to_entry = vm_map_to_entry(map)); |
1315 | if (map->first_free == map_to_entry) { | |
6d2010ae A |
1316 | map->highest_entry_end = vm_map_min(map); |
1317 | } else { | |
1318 | map->highest_entry_end = map->first_free->vme_end; | |
1319 | } | |
1320 | vm_map_unlock(map); | |
1321 | } else if (toggle == VM_TOGGLE_CLEAR){ | |
1322 | vm_map_lock(map); | |
1323 | map->disable_vmentry_reuse = FALSE; | |
1324 | vm_map_unlock(map); | |
1325 | } else | |
1326 | return KERN_INVALID_ARGUMENT; | |
1327 | ||
1328 | return KERN_SUCCESS; | |
1329 | } | |
1330 | ||
91447636 A |
1331 | /* |
1332 | * mach_vm_behavior_set | |
1333 | * | |
1334 | * Sets the paging behavior attribute for the specified range | |
1335 | * in the specified map. | |
1336 | * | |
1337 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1338 | * in [start,start+size) is not a valid allocated memory region. | |
1339 | */ | |
1340 | kern_return_t | |
1341 | mach_vm_behavior_set( | |
1342 | vm_map_t map, | |
1343 | mach_vm_offset_t start, | |
1344 | mach_vm_size_t size, | |
1345 | vm_behavior_t new_behavior) | |
1346 | { | |
1347 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
1348 | return(KERN_INVALID_ARGUMENT); | |
1c79356b A |
1349 | |
1350 | if (size == 0) | |
91447636 | 1351 | return KERN_SUCCESS; |
1c79356b | 1352 | |
39236c6e A |
1353 | return(vm_map_behavior_set(map, |
1354 | vm_map_trunc_page(start, | |
1355 | VM_MAP_PAGE_MASK(map)), | |
1356 | vm_map_round_page(start+size, | |
1357 | VM_MAP_PAGE_MASK(map)), | |
1358 | new_behavior)); | |
91447636 | 1359 | } |
1c79356b | 1360 | |
91447636 A |
1361 | /* |
1362 | * vm_behavior_set | |
1363 | * | |
1364 | * Sets the paging behavior attribute for the specified range | |
1365 | * in the specified map. | |
1366 | * | |
1367 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1368 | * in [start,start+size) is not a valid allocated memory region. | |
1369 | * | |
1370 | * This routine is potentially limited in addressibility by the | |
1371 | * use of vm_offset_t (if the map provided is larger than the | |
1372 | * kernel's). | |
1373 | */ | |
1374 | kern_return_t | |
1375 | vm_behavior_set( | |
1376 | vm_map_t map, | |
1377 | vm_offset_t start, | |
1378 | vm_size_t size, | |
1379 | vm_behavior_t new_behavior) | |
1380 | { | |
1381 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
1382 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 1383 | |
91447636 A |
1384 | if (size == 0) |
1385 | return KERN_SUCCESS; | |
1c79356b | 1386 | |
39236c6e A |
1387 | return(vm_map_behavior_set(map, |
1388 | vm_map_trunc_page(start, | |
1389 | VM_MAP_PAGE_MASK(map)), | |
1390 | vm_map_round_page(start+size, | |
1391 | VM_MAP_PAGE_MASK(map)), | |
1392 | new_behavior)); | |
91447636 | 1393 | } |
1c79356b | 1394 | |
91447636 A |
1395 | /* |
1396 | * mach_vm_region: | |
1397 | * | |
1398 | * User call to obtain information about a region in | |
1399 | * a task's address map. Currently, only one flavor is | |
1400 | * supported. | |
1401 | * | |
1402 | * XXX The reserved and behavior fields cannot be filled | |
1403 | * in until the vm merge from the IK is completed, and | |
1404 | * vm_reserve is implemented. | |
1405 | * | |
1406 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1407 | */ | |
1c79356b | 1408 | |
91447636 A |
1409 | kern_return_t |
1410 | mach_vm_region( | |
1411 | vm_map_t map, | |
1412 | mach_vm_offset_t *address, /* IN/OUT */ | |
1413 | mach_vm_size_t *size, /* OUT */ | |
1414 | vm_region_flavor_t flavor, /* IN */ | |
1415 | vm_region_info_t info, /* OUT */ | |
1416 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1417 | mach_port_t *object_name) /* OUT */ | |
1418 | { | |
1419 | vm_map_offset_t map_addr; | |
1420 | vm_map_size_t map_size; | |
1421 | kern_return_t kr; | |
1c79356b | 1422 | |
91447636 A |
1423 | if (VM_MAP_NULL == map) |
1424 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1425 | |
91447636 A |
1426 | map_addr = (vm_map_offset_t)*address; |
1427 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1428 | |
91447636 A |
1429 | /* legacy conversion */ |
1430 | if (VM_REGION_BASIC_INFO == flavor) | |
1431 | flavor = VM_REGION_BASIC_INFO_64; | |
1c79356b | 1432 | |
91447636 A |
1433 | kr = vm_map_region(map, |
1434 | &map_addr, &map_size, | |
1435 | flavor, info, count, | |
1436 | object_name); | |
1c79356b | 1437 | |
91447636 A |
1438 | *address = map_addr; |
1439 | *size = map_size; | |
1440 | return kr; | |
1441 | } | |
1c79356b | 1442 | |
91447636 A |
1443 | /* |
1444 | * vm_region_64 and vm_region: | |
1445 | * | |
1446 | * User call to obtain information about a region in | |
1447 | * a task's address map. Currently, only one flavor is | |
1448 | * supported. | |
1449 | * | |
1450 | * XXX The reserved and behavior fields cannot be filled | |
1451 | * in until the vm merge from the IK is completed, and | |
1452 | * vm_reserve is implemented. | |
1453 | * | |
1454 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1455 | */ | |
1c79356b | 1456 | |
91447636 A |
1457 | kern_return_t |
1458 | vm_region_64( | |
1459 | vm_map_t map, | |
1460 | vm_offset_t *address, /* IN/OUT */ | |
1461 | vm_size_t *size, /* OUT */ | |
1462 | vm_region_flavor_t flavor, /* IN */ | |
1463 | vm_region_info_t info, /* OUT */ | |
1464 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1465 | mach_port_t *object_name) /* OUT */ | |
1466 | { | |
1467 | vm_map_offset_t map_addr; | |
1468 | vm_map_size_t map_size; | |
1469 | kern_return_t kr; | |
1c79356b | 1470 | |
91447636 A |
1471 | if (VM_MAP_NULL == map) |
1472 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1473 | |
91447636 A |
1474 | map_addr = (vm_map_offset_t)*address; |
1475 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1476 | |
91447636 A |
1477 | /* legacy conversion */ |
1478 | if (VM_REGION_BASIC_INFO == flavor) | |
1479 | flavor = VM_REGION_BASIC_INFO_64; | |
1c79356b | 1480 | |
91447636 A |
1481 | kr = vm_map_region(map, |
1482 | &map_addr, &map_size, | |
1483 | flavor, info, count, | |
1484 | object_name); | |
1c79356b | 1485 | |
91447636 A |
1486 | *address = CAST_DOWN(vm_offset_t, map_addr); |
1487 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1488 | |
91447636 A |
1489 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) |
1490 | return KERN_INVALID_ADDRESS; | |
1491 | return kr; | |
1492 | } | |
1c79356b | 1493 | |
91447636 A |
1494 | kern_return_t |
1495 | vm_region( | |
1496 | vm_map_t map, | |
1497 | vm_address_t *address, /* IN/OUT */ | |
1498 | vm_size_t *size, /* OUT */ | |
1499 | vm_region_flavor_t flavor, /* IN */ | |
1500 | vm_region_info_t info, /* OUT */ | |
1501 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1502 | mach_port_t *object_name) /* OUT */ | |
1503 | { | |
1504 | vm_map_address_t map_addr; | |
1505 | vm_map_size_t map_size; | |
1506 | kern_return_t kr; | |
1c79356b | 1507 | |
91447636 A |
1508 | if (VM_MAP_NULL == map) |
1509 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1510 | |
91447636 A |
1511 | map_addr = (vm_map_address_t)*address; |
1512 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1513 | |
91447636 A |
1514 | kr = vm_map_region(map, |
1515 | &map_addr, &map_size, | |
1516 | flavor, info, count, | |
1517 | object_name); | |
1c79356b | 1518 | |
91447636 A |
1519 | *address = CAST_DOWN(vm_address_t, map_addr); |
1520 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1521 | |
91447636 A |
1522 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) |
1523 | return KERN_INVALID_ADDRESS; | |
1524 | return kr; | |
1525 | } | |
1c79356b A |
1526 | |
1527 | /* | |
91447636 A |
1528 | * vm_region_recurse: A form of vm_region which follows the |
1529 | * submaps in a target map | |
1c79356b | 1530 | * |
1c79356b A |
1531 | */ |
1532 | kern_return_t | |
91447636 A |
1533 | mach_vm_region_recurse( |
1534 | vm_map_t map, | |
1535 | mach_vm_address_t *address, | |
1536 | mach_vm_size_t *size, | |
1537 | uint32_t *depth, | |
1538 | vm_region_recurse_info_t info, | |
1539 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1540 | { |
91447636 A |
1541 | vm_map_address_t map_addr; |
1542 | vm_map_size_t map_size; | |
1543 | kern_return_t kr; | |
1c79356b | 1544 | |
91447636 A |
1545 | if (VM_MAP_NULL == map) |
1546 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1547 | |
91447636 A |
1548 | map_addr = (vm_map_address_t)*address; |
1549 | map_size = (vm_map_size_t)*size; | |
1550 | ||
1551 | kr = vm_map_region_recurse_64( | |
1552 | map, | |
1553 | &map_addr, | |
1554 | &map_size, | |
1555 | depth, | |
1556 | (vm_region_submap_info_64_t)info, | |
1557 | infoCnt); | |
1558 | ||
1559 | *address = map_addr; | |
1560 | *size = map_size; | |
1561 | return kr; | |
1c79356b A |
1562 | } |
1563 | ||
1564 | /* | |
91447636 A |
1565 | * vm_region_recurse: A form of vm_region which follows the |
1566 | * submaps in a target map | |
1567 | * | |
1c79356b | 1568 | */ |
91447636 A |
1569 | kern_return_t |
1570 | vm_region_recurse_64( | |
1571 | vm_map_t map, | |
1572 | vm_address_t *address, | |
1573 | vm_size_t *size, | |
1574 | uint32_t *depth, | |
1575 | vm_region_recurse_info_64_t info, | |
1576 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1577 | { |
91447636 A |
1578 | vm_map_address_t map_addr; |
1579 | vm_map_size_t map_size; | |
1580 | kern_return_t kr; | |
1581 | ||
1582 | if (VM_MAP_NULL == map) | |
1583 | return KERN_INVALID_ARGUMENT; | |
1584 | ||
1585 | map_addr = (vm_map_address_t)*address; | |
1586 | map_size = (vm_map_size_t)*size; | |
1587 | ||
1588 | kr = vm_map_region_recurse_64( | |
1589 | map, | |
1590 | &map_addr, | |
1591 | &map_size, | |
1592 | depth, | |
1593 | (vm_region_submap_info_64_t)info, | |
1594 | infoCnt); | |
1c79356b | 1595 | |
91447636 A |
1596 | *address = CAST_DOWN(vm_address_t, map_addr); |
1597 | *size = CAST_DOWN(vm_size_t, map_size); | |
1598 | ||
1599 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) | |
1600 | return KERN_INVALID_ADDRESS; | |
1601 | return kr; | |
1c79356b A |
1602 | } |
1603 | ||
91447636 A |
1604 | kern_return_t |
1605 | vm_region_recurse( | |
1606 | vm_map_t map, | |
1607 | vm_offset_t *address, /* IN/OUT */ | |
1608 | vm_size_t *size, /* OUT */ | |
1609 | natural_t *depth, /* IN/OUT */ | |
1610 | vm_region_recurse_info_t info32, /* IN/OUT */ | |
1611 | mach_msg_type_number_t *infoCnt) /* IN/OUT */ | |
1612 | { | |
1613 | vm_region_submap_info_data_64_t info64; | |
1614 | vm_region_submap_info_t info; | |
1615 | vm_map_address_t map_addr; | |
1616 | vm_map_size_t map_size; | |
1617 | kern_return_t kr; | |
1618 | ||
1619 | if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) | |
1620 | return KERN_INVALID_ARGUMENT; | |
1621 | ||
1622 | ||
1623 | map_addr = (vm_map_address_t)*address; | |
1624 | map_size = (vm_map_size_t)*size; | |
1625 | info = (vm_region_submap_info_t)info32; | |
1626 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1627 | ||
1628 | kr = vm_map_region_recurse_64(map, &map_addr,&map_size, | |
1629 | depth, &info64, infoCnt); | |
1630 | ||
1631 | info->protection = info64.protection; | |
1632 | info->max_protection = info64.max_protection; | |
1633 | info->inheritance = info64.inheritance; | |
1634 | info->offset = (uint32_t)info64.offset; /* trouble-maker */ | |
1635 | info->user_tag = info64.user_tag; | |
1636 | info->pages_resident = info64.pages_resident; | |
1637 | info->pages_shared_now_private = info64.pages_shared_now_private; | |
1638 | info->pages_swapped_out = info64.pages_swapped_out; | |
1639 | info->pages_dirtied = info64.pages_dirtied; | |
1640 | info->ref_count = info64.ref_count; | |
1641 | info->shadow_depth = info64.shadow_depth; | |
1642 | info->external_pager = info64.external_pager; | |
1643 | info->share_mode = info64.share_mode; | |
1644 | info->is_submap = info64.is_submap; | |
1645 | info->behavior = info64.behavior; | |
1646 | info->object_id = info64.object_id; | |
1647 | info->user_wired_count = info64.user_wired_count; | |
1648 | ||
1649 | *address = CAST_DOWN(vm_address_t, map_addr); | |
1650 | *size = CAST_DOWN(vm_size_t, map_size); | |
1651 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; | |
1652 | ||
1653 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) | |
1654 | return KERN_INVALID_ADDRESS; | |
1655 | return kr; | |
1656 | } | |
1657 | ||
2d21ac55 A |
1658 | kern_return_t |
1659 | mach_vm_purgable_control( | |
1660 | vm_map_t map, | |
1661 | mach_vm_offset_t address, | |
1662 | vm_purgable_t control, | |
1663 | int *state) | |
1664 | { | |
1665 | if (VM_MAP_NULL == map) | |
1666 | return KERN_INVALID_ARGUMENT; | |
1667 | ||
1668 | return vm_map_purgable_control(map, | |
39236c6e | 1669 | vm_map_trunc_page(address, PAGE_MASK), |
2d21ac55 A |
1670 | control, |
1671 | state); | |
1672 | } | |
1673 | ||
91447636 A |
1674 | kern_return_t |
1675 | vm_purgable_control( | |
1676 | vm_map_t map, | |
1677 | vm_offset_t address, | |
1678 | vm_purgable_t control, | |
1679 | int *state) | |
1680 | { | |
1681 | if (VM_MAP_NULL == map) | |
1682 | return KERN_INVALID_ARGUMENT; | |
1683 | ||
1684 | return vm_map_purgable_control(map, | |
39236c6e | 1685 | vm_map_trunc_page(address, PAGE_MASK), |
91447636 A |
1686 | control, |
1687 | state); | |
1688 | } | |
1689 | ||
1c79356b A |
1690 | |
1691 | /* | |
1692 | * Ordinarily, the right to allocate CPM is restricted | |
1693 | * to privileged applications (those that can gain access | |
91447636 A |
1694 | * to the host priv port). Set this variable to zero if |
1695 | * you want to let any application allocate CPM. | |
1c79356b A |
1696 | */ |
1697 | unsigned int vm_allocate_cpm_privileged = 0; | |
1698 | ||
1699 | /* | |
1700 | * Allocate memory in the specified map, with the caveat that | |
1701 | * the memory is physically contiguous. This call may fail | |
1702 | * if the system can't find sufficient contiguous memory. | |
1703 | * This call may cause or lead to heart-stopping amounts of | |
1704 | * paging activity. | |
1705 | * | |
1706 | * Memory obtained from this call should be freed in the | |
1707 | * normal way, viz., via vm_deallocate. | |
1708 | */ | |
1709 | kern_return_t | |
1710 | vm_allocate_cpm( | |
1711 | host_priv_t host_priv, | |
91447636 A |
1712 | vm_map_t map, |
1713 | vm_address_t *addr, | |
1714 | vm_size_t size, | |
1c79356b A |
1715 | int flags) |
1716 | { | |
91447636 A |
1717 | vm_map_address_t map_addr; |
1718 | vm_map_size_t map_size; | |
1c79356b | 1719 | kern_return_t kr; |
1c79356b | 1720 | |
91447636 | 1721 | if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) |
1c79356b A |
1722 | return KERN_INVALID_HOST; |
1723 | ||
91447636 | 1724 | if (VM_MAP_NULL == map) |
1c79356b | 1725 | return KERN_INVALID_ARGUMENT; |
1c79356b | 1726 | |
91447636 A |
1727 | map_addr = (vm_map_address_t)*addr; |
1728 | map_size = (vm_map_size_t)size; | |
1c79356b | 1729 | |
91447636 A |
1730 | kr = vm_map_enter_cpm(map, |
1731 | &map_addr, | |
1732 | map_size, | |
1733 | flags); | |
1c79356b | 1734 | |
91447636 | 1735 | *addr = CAST_DOWN(vm_address_t, map_addr); |
1c79356b A |
1736 | return kr; |
1737 | } | |
1738 | ||
1739 | ||
91447636 A |
1740 | kern_return_t |
1741 | mach_vm_page_query( | |
1742 | vm_map_t map, | |
1743 | mach_vm_offset_t offset, | |
1744 | int *disposition, | |
1745 | int *ref_count) | |
1746 | { | |
1747 | if (VM_MAP_NULL == map) | |
1748 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1749 | |
39236c6e A |
1750 | return vm_map_page_query_internal( |
1751 | map, | |
1752 | vm_map_trunc_page(offset, PAGE_MASK), | |
1753 | disposition, ref_count); | |
91447636 | 1754 | } |
1c79356b A |
1755 | |
1756 | kern_return_t | |
91447636 A |
1757 | vm_map_page_query( |
1758 | vm_map_t map, | |
1759 | vm_offset_t offset, | |
1760 | int *disposition, | |
1761 | int *ref_count) | |
1c79356b | 1762 | { |
91447636 A |
1763 | if (VM_MAP_NULL == map) |
1764 | return KERN_INVALID_ARGUMENT; | |
1765 | ||
39236c6e A |
1766 | return vm_map_page_query_internal( |
1767 | map, | |
1768 | vm_map_trunc_page(offset, PAGE_MASK), | |
1769 | disposition, ref_count); | |
b0d623f7 A |
1770 | } |
1771 | ||
1772 | kern_return_t | |
1773 | mach_vm_page_info( | |
1774 | vm_map_t map, | |
1775 | mach_vm_address_t address, | |
1776 | vm_page_info_flavor_t flavor, | |
1777 | vm_page_info_t info, | |
1778 | mach_msg_type_number_t *count) | |
1779 | { | |
1780 | kern_return_t kr; | |
1781 | ||
1782 | if (map == VM_MAP_NULL) { | |
1783 | return KERN_INVALID_ARGUMENT; | |
1784 | } | |
1785 | ||
1786 | kr = vm_map_page_info(map, address, flavor, info, count); | |
1787 | return kr; | |
1c79356b A |
1788 | } |
1789 | ||
91447636 | 1790 | /* map a (whole) upl into an address space */ |
1c79356b | 1791 | kern_return_t |
91447636 A |
1792 | vm_upl_map( |
1793 | vm_map_t map, | |
1794 | upl_t upl, | |
b0d623f7 | 1795 | vm_address_t *dst_addr) |
1c79356b | 1796 | { |
91447636 | 1797 | vm_map_offset_t map_addr; |
1c79356b A |
1798 | kern_return_t kr; |
1799 | ||
91447636 A |
1800 | if (VM_MAP_NULL == map) |
1801 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1802 | |
91447636 | 1803 | kr = vm_map_enter_upl(map, upl, &map_addr); |
b0d623f7 | 1804 | *dst_addr = CAST_DOWN(vm_address_t, map_addr); |
91447636 A |
1805 | return kr; |
1806 | } | |
1c79356b | 1807 | |
91447636 A |
1808 | kern_return_t |
1809 | vm_upl_unmap( | |
1810 | vm_map_t map, | |
1811 | upl_t upl) | |
1812 | { | |
1813 | if (VM_MAP_NULL == map) | |
1814 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1815 | |
91447636 A |
1816 | return (vm_map_remove_upl(map, upl)); |
1817 | } | |
1c79356b | 1818 | |
91447636 A |
1819 | /* Retrieve a upl for an object underlying an address range in a map */ |
1820 | ||
1821 | kern_return_t | |
1822 | vm_map_get_upl( | |
1823 | vm_map_t map, | |
cc9f6e38 | 1824 | vm_map_offset_t map_offset, |
91447636 A |
1825 | upl_size_t *upl_size, |
1826 | upl_t *upl, | |
1827 | upl_page_info_array_t page_list, | |
1828 | unsigned int *count, | |
3e170ce0 | 1829 | upl_control_flags_t *flags, |
91447636 A |
1830 | int force_data_sync) |
1831 | { | |
3e170ce0 A |
1832 | upl_control_flags_t map_flags; |
1833 | kern_return_t kr; | |
1c79356b | 1834 | |
91447636 A |
1835 | if (VM_MAP_NULL == map) |
1836 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1837 | |
91447636 A |
1838 | map_flags = *flags & ~UPL_NOZEROFILL; |
1839 | if (force_data_sync) | |
1840 | map_flags |= UPL_FORCE_DATA_SYNC; | |
1c79356b | 1841 | |
91447636 A |
1842 | kr = vm_map_create_upl(map, |
1843 | map_offset, | |
1844 | upl_size, | |
1845 | upl, | |
1846 | page_list, | |
1847 | count, | |
1848 | &map_flags); | |
1c79356b | 1849 | |
91447636 A |
1850 | *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); |
1851 | return kr; | |
1c79356b A |
1852 | } |
1853 | ||
1c79356b | 1854 | /* |
91447636 A |
1855 | * mach_make_memory_entry_64 |
1856 | * | |
1857 | * Think of it as a two-stage vm_remap() operation. First | |
1858 | * you get a handle. Second, you get map that handle in | |
1859 | * somewhere else. Rather than doing it all at once (and | |
1860 | * without needing access to the other whole map). | |
1c79356b A |
1861 | */ |
1862 | ||
1863 | kern_return_t | |
1864 | mach_make_memory_entry_64( | |
1865 | vm_map_t target_map, | |
91447636 A |
1866 | memory_object_size_t *size, |
1867 | memory_object_offset_t offset, | |
1c79356b A |
1868 | vm_prot_t permission, |
1869 | ipc_port_t *object_handle, | |
91447636 | 1870 | ipc_port_t parent_handle) |
1c79356b A |
1871 | { |
1872 | vm_map_version_t version; | |
91447636 A |
1873 | vm_named_entry_t parent_entry; |
1874 | vm_named_entry_t user_entry; | |
1c79356b | 1875 | ipc_port_t user_handle; |
1c79356b | 1876 | kern_return_t kr; |
91447636 | 1877 | vm_map_t real_map; |
1c79356b A |
1878 | |
1879 | /* needed for call to vm_map_lookup_locked */ | |
91447636 | 1880 | boolean_t wired; |
3e170ce0 | 1881 | boolean_t iskernel; |
1c79356b | 1882 | vm_object_offset_t obj_off; |
91447636 | 1883 | vm_prot_t prot; |
2d21ac55 | 1884 | struct vm_object_fault_info fault_info; |
91447636 A |
1885 | vm_object_t object; |
1886 | vm_object_t shadow_object; | |
1c79356b A |
1887 | |
1888 | /* needed for direct map entry manipulation */ | |
1889 | vm_map_entry_t map_entry; | |
9bccf70c | 1890 | vm_map_entry_t next_entry; |
91447636 A |
1891 | vm_map_t local_map; |
1892 | vm_map_t original_map = target_map; | |
3e170ce0 A |
1893 | vm_map_size_t total_size, map_size; |
1894 | vm_map_offset_t map_start, map_end; | |
91447636 | 1895 | vm_map_offset_t local_offset; |
1c79356b | 1896 | vm_object_size_t mappable_size; |
9bccf70c | 1897 | |
39236c6e A |
1898 | /* |
1899 | * Stash the offset in the page for use by vm_map_enter_mem_object() | |
1900 | * in the VM_FLAGS_RETURN_DATA_ADDR/MAP_MEM_USE_DATA_ADDR case. | |
1901 | */ | |
1902 | vm_object_offset_t offset_in_page; | |
1903 | ||
91447636 A |
1904 | unsigned int access; |
1905 | vm_prot_t protections; | |
6d2010ae | 1906 | vm_prot_t original_protections, mask_protections; |
91447636 | 1907 | unsigned int wimg_mode; |
91447636 | 1908 | |
e2d2fc5c | 1909 | boolean_t force_shadow = FALSE; |
39236c6e | 1910 | boolean_t use_data_addr; |
3e170ce0 | 1911 | boolean_t use_4K_compat; |
e2d2fc5c | 1912 | |
91447636 A |
1913 | if (((permission & 0x00FF0000) & |
1914 | ~(MAP_MEM_ONLY | | |
1915 | MAP_MEM_NAMED_CREATE | | |
1916 | MAP_MEM_PURGABLE | | |
39236c6e A |
1917 | MAP_MEM_NAMED_REUSE | |
1918 | MAP_MEM_USE_DATA_ADDR | | |
1919 | MAP_MEM_VM_COPY | | |
3e170ce0 | 1920 | MAP_MEM_4K_DATA_ADDR | |
39236c6e | 1921 | MAP_MEM_VM_SHARE))) { |
91447636 A |
1922 | /* |
1923 | * Unknown flag: reject for forward compatibility. | |
1924 | */ | |
1925 | return KERN_INVALID_VALUE; | |
1926 | } | |
1927 | ||
1928 | if (parent_handle != IP_NULL && | |
1929 | ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) { | |
1930 | parent_entry = (vm_named_entry_t) parent_handle->ip_kobject; | |
1931 | } else { | |
1932 | parent_entry = NULL; | |
1933 | } | |
55e303ae | 1934 | |
39236c6e A |
1935 | if (parent_entry && parent_entry->is_copy) { |
1936 | return KERN_INVALID_ARGUMENT; | |
1937 | } | |
1938 | ||
6d2010ae A |
1939 | original_protections = permission & VM_PROT_ALL; |
1940 | protections = original_protections; | |
1941 | mask_protections = permission & VM_PROT_IS_MASK; | |
55e303ae | 1942 | access = GET_MAP_MEM(permission); |
39236c6e | 1943 | use_data_addr = ((permission & MAP_MEM_USE_DATA_ADDR) != 0); |
3e170ce0 | 1944 | use_4K_compat = ((permission & MAP_MEM_4K_DATA_ADDR) != 0); |
55e303ae | 1945 | |
91447636 A |
1946 | user_handle = IP_NULL; |
1947 | user_entry = NULL; | |
1948 | ||
3e170ce0 | 1949 | map_start = vm_map_trunc_page(offset, PAGE_MASK); |
1c79356b | 1950 | |
91447636 A |
1951 | if (permission & MAP_MEM_ONLY) { |
1952 | boolean_t parent_is_object; | |
55e303ae | 1953 | |
3e170ce0 A |
1954 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
1955 | map_size = map_end - map_start; | |
39236c6e | 1956 | |
3e170ce0 | 1957 | if (use_data_addr || use_4K_compat || parent_entry == NULL) { |
55e303ae A |
1958 | return KERN_INVALID_ARGUMENT; |
1959 | } | |
91447636 | 1960 | |
39236c6e A |
1961 | parent_is_object = !(parent_entry->is_sub_map || |
1962 | parent_entry->is_pager); | |
91447636 A |
1963 | object = parent_entry->backing.object; |
1964 | if(parent_is_object && object != VM_OBJECT_NULL) | |
55e303ae | 1965 | wimg_mode = object->wimg_bits; |
91447636 | 1966 | else |
6d2010ae | 1967 | wimg_mode = VM_WIMG_USE_DEFAULT; |
91447636 A |
1968 | if((access != GET_MAP_MEM(parent_entry->protection)) && |
1969 | !(parent_entry->protection & VM_PROT_WRITE)) { | |
55e303ae A |
1970 | return KERN_INVALID_RIGHT; |
1971 | } | |
1972 | if(access == MAP_MEM_IO) { | |
91447636 | 1973 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1974 | wimg_mode = VM_WIMG_IO; |
1975 | } else if (access == MAP_MEM_COPYBACK) { | |
91447636 | 1976 | SET_MAP_MEM(access, parent_entry->protection); |
6d2010ae | 1977 | wimg_mode = VM_WIMG_USE_DEFAULT; |
316670eb A |
1978 | } else if (access == MAP_MEM_INNERWBACK) { |
1979 | SET_MAP_MEM(access, parent_entry->protection); | |
1980 | wimg_mode = VM_WIMG_INNERWBACK; | |
55e303ae | 1981 | } else if (access == MAP_MEM_WTHRU) { |
91447636 | 1982 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1983 | wimg_mode = VM_WIMG_WTHRU; |
1984 | } else if (access == MAP_MEM_WCOMB) { | |
91447636 | 1985 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1986 | wimg_mode = VM_WIMG_WCOMB; |
1987 | } | |
6d2010ae | 1988 | if (parent_is_object && object && |
55e303ae A |
1989 | (access != MAP_MEM_NOOP) && |
1990 | (!(object->nophyscache))) { | |
6d2010ae A |
1991 | |
1992 | if (object->wimg_bits != wimg_mode) { | |
1993 | vm_object_lock(object); | |
1994 | vm_object_change_wimg_mode(object, wimg_mode); | |
1995 | vm_object_unlock(object); | |
55e303ae A |
1996 | } |
1997 | } | |
91447636 A |
1998 | if (object_handle) |
1999 | *object_handle = IP_NULL; | |
55e303ae | 2000 | return KERN_SUCCESS; |
39236c6e | 2001 | } else if (permission & MAP_MEM_NAMED_CREATE) { |
3e170ce0 A |
2002 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2003 | map_size = map_end - map_start; | |
39236c6e | 2004 | |
3e170ce0 | 2005 | if (use_data_addr || use_4K_compat) { |
39236c6e A |
2006 | return KERN_INVALID_ARGUMENT; |
2007 | } | |
55e303ae | 2008 | |
91447636 A |
2009 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); |
2010 | if (kr != KERN_SUCCESS) { | |
2011 | return KERN_FAILURE; | |
2012 | } | |
55e303ae | 2013 | |
91447636 A |
2014 | /* |
2015 | * Force the creation of the VM object now. | |
2016 | */ | |
b0d623f7 | 2017 | if (map_size > (vm_map_size_t) ANON_MAX_SIZE) { |
91447636 | 2018 | /* |
b0d623f7 | 2019 | * LP64todo - for now, we can only allocate 4GB-4096 |
91447636 A |
2020 | * internal objects because the default pager can't |
2021 | * page bigger ones. Remove this when it can. | |
2022 | */ | |
2023 | kr = KERN_FAILURE; | |
2024 | goto make_mem_done; | |
2025 | } | |
1c79356b | 2026 | |
91447636 A |
2027 | object = vm_object_allocate(map_size); |
2028 | assert(object != VM_OBJECT_NULL); | |
1c79356b | 2029 | |
91447636 A |
2030 | if (permission & MAP_MEM_PURGABLE) { |
2031 | if (! (permission & VM_PROT_WRITE)) { | |
2032 | /* if we can't write, we can't purge */ | |
2033 | vm_object_deallocate(object); | |
2034 | kr = KERN_INVALID_ARGUMENT; | |
2035 | goto make_mem_done; | |
2036 | } | |
2d21ac55 | 2037 | object->purgable = VM_PURGABLE_NONVOLATILE; |
fe8ab488 A |
2038 | assert(object->vo_purgeable_owner == NULL); |
2039 | assert(object->resident_page_count == 0); | |
2040 | assert(object->wired_page_count == 0); | |
2041 | vm_object_lock(object); | |
2042 | vm_purgeable_nonvolatile_enqueue(object, | |
2043 | current_task()); | |
2044 | vm_object_unlock(object); | |
91447636 | 2045 | } |
1c79356b | 2046 | |
91447636 A |
2047 | /* |
2048 | * The VM object is brand new and nobody else knows about it, | |
2049 | * so we don't need to lock it. | |
2050 | */ | |
1c79356b | 2051 | |
91447636 A |
2052 | wimg_mode = object->wimg_bits; |
2053 | if (access == MAP_MEM_IO) { | |
2054 | wimg_mode = VM_WIMG_IO; | |
2055 | } else if (access == MAP_MEM_COPYBACK) { | |
6d2010ae | 2056 | wimg_mode = VM_WIMG_USE_DEFAULT; |
316670eb A |
2057 | } else if (access == MAP_MEM_INNERWBACK) { |
2058 | wimg_mode = VM_WIMG_INNERWBACK; | |
91447636 A |
2059 | } else if (access == MAP_MEM_WTHRU) { |
2060 | wimg_mode = VM_WIMG_WTHRU; | |
2061 | } else if (access == MAP_MEM_WCOMB) { | |
2062 | wimg_mode = VM_WIMG_WCOMB; | |
2063 | } | |
2064 | if (access != MAP_MEM_NOOP) { | |
2065 | object->wimg_bits = wimg_mode; | |
2066 | } | |
2067 | /* the object has no pages, so no WIMG bits to update here */ | |
1c79356b | 2068 | |
91447636 A |
2069 | /* |
2070 | * XXX | |
2071 | * We use this path when we want to make sure that | |
2072 | * nobody messes with the object (coalesce, for | |
2073 | * example) before we map it. | |
2074 | * We might want to use these objects for transposition via | |
2075 | * vm_object_transpose() too, so we don't want any copy or | |
2076 | * shadow objects either... | |
2077 | */ | |
2078 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
fe8ab488 | 2079 | object->true_share = TRUE; |
1c79356b | 2080 | |
91447636 A |
2081 | user_entry->backing.object = object; |
2082 | user_entry->internal = TRUE; | |
2083 | user_entry->is_sub_map = FALSE; | |
2084 | user_entry->is_pager = FALSE; | |
2085 | user_entry->offset = 0; | |
39236c6e | 2086 | user_entry->data_offset = 0; |
91447636 A |
2087 | user_entry->protection = protections; |
2088 | SET_MAP_MEM(access, user_entry->protection); | |
2089 | user_entry->size = map_size; | |
55e303ae A |
2090 | |
2091 | /* user_object pager and internal fields are not used */ | |
2092 | /* when the object field is filled in. */ | |
2093 | ||
3e170ce0 A |
2094 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
2095 | user_entry->data_offset)); | |
55e303ae A |
2096 | *object_handle = user_handle; |
2097 | return KERN_SUCCESS; | |
2098 | } | |
2099 | ||
39236c6e A |
2100 | if (permission & MAP_MEM_VM_COPY) { |
2101 | vm_map_copy_t copy; | |
2102 | ||
2103 | if (target_map == VM_MAP_NULL) { | |
2104 | return KERN_INVALID_TASK; | |
2105 | } | |
2106 | ||
3e170ce0 A |
2107 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2108 | map_size = map_end - map_start; | |
2109 | if (use_data_addr || use_4K_compat) { | |
2110 | offset_in_page = offset - map_start; | |
2111 | if (use_4K_compat) | |
2112 | offset_in_page &= ~((signed)(0xFFF)); | |
39236c6e | 2113 | } else { |
39236c6e A |
2114 | offset_in_page = 0; |
2115 | } | |
2116 | ||
2117 | kr = vm_map_copyin(target_map, | |
3e170ce0 | 2118 | map_start, |
39236c6e A |
2119 | map_size, |
2120 | FALSE, | |
2121 | ©); | |
2122 | if (kr != KERN_SUCCESS) { | |
2123 | return kr; | |
2124 | } | |
2125 | ||
2126 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
2127 | if (kr != KERN_SUCCESS) { | |
2128 | vm_map_copy_discard(copy); | |
2129 | return KERN_FAILURE; | |
2130 | } | |
2131 | ||
2132 | user_entry->backing.copy = copy; | |
2133 | user_entry->internal = FALSE; | |
2134 | user_entry->is_sub_map = FALSE; | |
2135 | user_entry->is_pager = FALSE; | |
2136 | user_entry->is_copy = TRUE; | |
2137 | user_entry->offset = 0; | |
2138 | user_entry->protection = protections; | |
2139 | user_entry->size = map_size; | |
2140 | user_entry->data_offset = offset_in_page; | |
2141 | ||
3e170ce0 A |
2142 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
2143 | user_entry->data_offset)); | |
39236c6e A |
2144 | *object_handle = user_handle; |
2145 | return KERN_SUCCESS; | |
2146 | } | |
2147 | ||
2148 | if (permission & MAP_MEM_VM_SHARE) { | |
2149 | vm_map_copy_t copy; | |
2150 | vm_prot_t cur_prot, max_prot; | |
2151 | ||
2152 | if (target_map == VM_MAP_NULL) { | |
2153 | return KERN_INVALID_TASK; | |
2154 | } | |
2155 | ||
3e170ce0 A |
2156 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2157 | map_size = map_end - map_start; | |
2158 | if (use_data_addr || use_4K_compat) { | |
2159 | offset_in_page = offset - map_start; | |
2160 | if (use_4K_compat) | |
2161 | offset_in_page &= ~((signed)(0xFFF)); | |
39236c6e | 2162 | } else { |
39236c6e A |
2163 | offset_in_page = 0; |
2164 | } | |
2165 | ||
2166 | kr = vm_map_copy_extract(target_map, | |
3e170ce0 | 2167 | map_start, |
39236c6e A |
2168 | map_size, |
2169 | ©, | |
2170 | &cur_prot, | |
2171 | &max_prot); | |
2172 | if (kr != KERN_SUCCESS) { | |
2173 | return kr; | |
2174 | } | |
2175 | ||
2176 | if (mask_protections) { | |
2177 | /* | |
2178 | * We just want as much of "original_protections" | |
2179 | * as we can get out of the actual "cur_prot". | |
2180 | */ | |
2181 | protections &= cur_prot; | |
2182 | if (protections == VM_PROT_NONE) { | |
2183 | /* no access at all: fail */ | |
2184 | vm_map_copy_discard(copy); | |
2185 | return KERN_PROTECTION_FAILURE; | |
2186 | } | |
2187 | } else { | |
2188 | /* | |
2189 | * We want exactly "original_protections" | |
2190 | * out of "cur_prot". | |
2191 | */ | |
2192 | if ((cur_prot & protections) != protections) { | |
2193 | vm_map_copy_discard(copy); | |
2194 | return KERN_PROTECTION_FAILURE; | |
2195 | } | |
2196 | } | |
2197 | ||
2198 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
2199 | if (kr != KERN_SUCCESS) { | |
2200 | vm_map_copy_discard(copy); | |
2201 | return KERN_FAILURE; | |
2202 | } | |
2203 | ||
2204 | user_entry->backing.copy = copy; | |
2205 | user_entry->internal = FALSE; | |
2206 | user_entry->is_sub_map = FALSE; | |
2207 | user_entry->is_pager = FALSE; | |
2208 | user_entry->is_copy = TRUE; | |
2209 | user_entry->offset = 0; | |
2210 | user_entry->protection = protections; | |
2211 | user_entry->size = map_size; | |
2212 | user_entry->data_offset = offset_in_page; | |
2213 | ||
3e170ce0 A |
2214 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
2215 | user_entry->data_offset)); | |
39236c6e A |
2216 | *object_handle = user_handle; |
2217 | return KERN_SUCCESS; | |
2218 | } | |
2219 | ||
91447636 A |
2220 | if (parent_entry == NULL || |
2221 | (permission & MAP_MEM_NAMED_REUSE)) { | |
2222 | ||
3e170ce0 A |
2223 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2224 | map_size = map_end - map_start; | |
2225 | if (use_data_addr || use_4K_compat) { | |
2226 | offset_in_page = offset - map_start; | |
2227 | if (use_4K_compat) | |
2228 | offset_in_page &= ~((signed)(0xFFF)); | |
39236c6e | 2229 | } else { |
39236c6e A |
2230 | offset_in_page = 0; |
2231 | } | |
2232 | ||
91447636 A |
2233 | /* Create a named object based on address range within the task map */ |
2234 | /* Go find the object at given address */ | |
1c79356b | 2235 | |
2d21ac55 A |
2236 | if (target_map == VM_MAP_NULL) { |
2237 | return KERN_INVALID_TASK; | |
2238 | } | |
2239 | ||
91447636 | 2240 | redo_lookup: |
6d2010ae | 2241 | protections = original_protections; |
1c79356b A |
2242 | vm_map_lock_read(target_map); |
2243 | ||
2244 | /* get the object associated with the target address */ | |
2245 | /* note we check the permission of the range against */ | |
2246 | /* that requested by the caller */ | |
2247 | ||
3e170ce0 | 2248 | kr = vm_map_lookup_locked(&target_map, map_start, |
6d2010ae A |
2249 | protections | mask_protections, |
2250 | OBJECT_LOCK_EXCLUSIVE, &version, | |
2251 | &object, &obj_off, &prot, &wired, | |
2252 | &fault_info, | |
2253 | &real_map); | |
1c79356b A |
2254 | if (kr != KERN_SUCCESS) { |
2255 | vm_map_unlock_read(target_map); | |
2256 | goto make_mem_done; | |
2257 | } | |
6d2010ae A |
2258 | if (mask_protections) { |
2259 | /* | |
2260 | * The caller asked us to use the "protections" as | |
2261 | * a mask, so restrict "protections" to what this | |
2262 | * mapping actually allows. | |
2263 | */ | |
2264 | protections &= prot; | |
2265 | } | |
55e303ae | 2266 | if (((prot & protections) != protections) |
9bccf70c | 2267 | || (object == kernel_object)) { |
1c79356b A |
2268 | kr = KERN_INVALID_RIGHT; |
2269 | vm_object_unlock(object); | |
2270 | vm_map_unlock_read(target_map); | |
91447636 A |
2271 | if(real_map != target_map) |
2272 | vm_map_unlock_read(real_map); | |
9bccf70c A |
2273 | if(object == kernel_object) { |
2274 | printf("Warning: Attempt to create a named" | |
2275 | " entry from the kernel_object\n"); | |
2276 | } | |
1c79356b A |
2277 | goto make_mem_done; |
2278 | } | |
2279 | ||
2280 | /* We have an object, now check to see if this object */ | |
2281 | /* is suitable. If not, create a shadow and share that */ | |
91447636 A |
2282 | |
2283 | /* | |
2284 | * We have to unlock the VM object to avoid deadlocking with | |
2285 | * a VM map lock (the lock ordering is map, the object), if we | |
2286 | * need to modify the VM map to create a shadow object. Since | |
2287 | * we might release the VM map lock below anyway, we have | |
2288 | * to release the VM map lock now. | |
2289 | * XXX FBDP There must be a way to avoid this double lookup... | |
2290 | * | |
2291 | * Take an extra reference on the VM object to make sure it's | |
2292 | * not going to disappear. | |
2293 | */ | |
2294 | vm_object_reference_locked(object); /* extra ref to hold obj */ | |
2295 | vm_object_unlock(object); | |
2296 | ||
9bccf70c | 2297 | local_map = original_map; |
3e170ce0 | 2298 | local_offset = map_start; |
9bccf70c A |
2299 | if(target_map != local_map) { |
2300 | vm_map_unlock_read(target_map); | |
91447636 A |
2301 | if(real_map != target_map) |
2302 | vm_map_unlock_read(real_map); | |
9bccf70c A |
2303 | vm_map_lock_read(local_map); |
2304 | target_map = local_map; | |
91447636 | 2305 | real_map = local_map; |
9bccf70c | 2306 | } |
1c79356b | 2307 | while(TRUE) { |
9bccf70c A |
2308 | if(!vm_map_lookup_entry(local_map, |
2309 | local_offset, &map_entry)) { | |
1c79356b | 2310 | kr = KERN_INVALID_ARGUMENT; |
1c79356b | 2311 | vm_map_unlock_read(target_map); |
91447636 A |
2312 | if(real_map != target_map) |
2313 | vm_map_unlock_read(real_map); | |
2314 | vm_object_deallocate(object); /* release extra ref */ | |
2315 | object = VM_OBJECT_NULL; | |
1c79356b A |
2316 | goto make_mem_done; |
2317 | } | |
3e170ce0 | 2318 | iskernel = (local_map->pmap == kernel_pmap); |
1c79356b | 2319 | if(!(map_entry->is_sub_map)) { |
3e170ce0 | 2320 | if (VME_OBJECT(map_entry) != object) { |
1c79356b | 2321 | kr = KERN_INVALID_ARGUMENT; |
1c79356b | 2322 | vm_map_unlock_read(target_map); |
91447636 A |
2323 | if(real_map != target_map) |
2324 | vm_map_unlock_read(real_map); | |
2325 | vm_object_deallocate(object); /* release extra ref */ | |
2326 | object = VM_OBJECT_NULL; | |
1c79356b A |
2327 | goto make_mem_done; |
2328 | } | |
2329 | break; | |
2330 | } else { | |
9bccf70c A |
2331 | vm_map_t tmap; |
2332 | tmap = local_map; | |
3e170ce0 | 2333 | local_map = VME_SUBMAP(map_entry); |
9bccf70c | 2334 | |
1c79356b | 2335 | vm_map_lock_read(local_map); |
9bccf70c | 2336 | vm_map_unlock_read(tmap); |
1c79356b | 2337 | target_map = local_map; |
91447636 | 2338 | real_map = local_map; |
9bccf70c | 2339 | local_offset = local_offset - map_entry->vme_start; |
3e170ce0 | 2340 | local_offset += VME_OFFSET(map_entry); |
1c79356b A |
2341 | } |
2342 | } | |
91447636 A |
2343 | |
2344 | /* | |
2345 | * We found the VM map entry, lock the VM object again. | |
2346 | */ | |
2347 | vm_object_lock(object); | |
2348 | if(map_entry->wired_count) { | |
2349 | /* JMM - The check below should be reworked instead. */ | |
2350 | object->true_share = TRUE; | |
2351 | } | |
6d2010ae A |
2352 | if (mask_protections) { |
2353 | /* | |
2354 | * The caller asked us to use the "protections" as | |
2355 | * a mask, so restrict "protections" to what this | |
2356 | * mapping actually allows. | |
2357 | */ | |
2358 | protections &= map_entry->max_protection; | |
2359 | } | |
55e303ae | 2360 | if(((map_entry->max_protection) & protections) != protections) { |
1c79356b A |
2361 | kr = KERN_INVALID_RIGHT; |
2362 | vm_object_unlock(object); | |
2363 | vm_map_unlock_read(target_map); | |
91447636 A |
2364 | if(real_map != target_map) |
2365 | vm_map_unlock_read(real_map); | |
2366 | vm_object_deallocate(object); | |
2367 | object = VM_OBJECT_NULL; | |
1c79356b A |
2368 | goto make_mem_done; |
2369 | } | |
9bccf70c | 2370 | |
2d21ac55 | 2371 | mappable_size = fault_info.hi_offset - obj_off; |
9bccf70c | 2372 | total_size = map_entry->vme_end - map_entry->vme_start; |
91447636 | 2373 | if(map_size > mappable_size) { |
9bccf70c A |
2374 | /* try to extend mappable size if the entries */ |
2375 | /* following are from the same object and are */ | |
2376 | /* compatible */ | |
2377 | next_entry = map_entry->vme_next; | |
2378 | /* lets see if the next map entry is still */ | |
2379 | /* pointing at this object and is contiguous */ | |
91447636 | 2380 | while(map_size > mappable_size) { |
3e170ce0 A |
2381 | if ((VME_OBJECT(next_entry) == object) && |
2382 | (next_entry->vme_start == | |
2383 | next_entry->vme_prev->vme_end) && | |
2384 | (VME_OFFSET(next_entry) == | |
2385 | (VME_OFFSET(next_entry->vme_prev) + | |
2386 | (next_entry->vme_prev->vme_end - | |
2387 | next_entry->vme_prev->vme_start)))) { | |
6d2010ae A |
2388 | if (mask_protections) { |
2389 | /* | |
2390 | * The caller asked us to use | |
2391 | * the "protections" as a mask, | |
2392 | * so restrict "protections" to | |
2393 | * what this mapping actually | |
2394 | * allows. | |
2395 | */ | |
2396 | protections &= next_entry->max_protection; | |
2397 | } | |
316670eb A |
2398 | if ((next_entry->wired_count) && |
2399 | (map_entry->wired_count == 0)) { | |
2400 | break; | |
2401 | } | |
9bccf70c | 2402 | if(((next_entry->max_protection) |
55e303ae | 2403 | & protections) != protections) { |
9bccf70c A |
2404 | break; |
2405 | } | |
55e303ae A |
2406 | if (next_entry->needs_copy != |
2407 | map_entry->needs_copy) | |
2408 | break; | |
9bccf70c A |
2409 | mappable_size += next_entry->vme_end |
2410 | - next_entry->vme_start; | |
2411 | total_size += next_entry->vme_end | |
2412 | - next_entry->vme_start; | |
2413 | next_entry = next_entry->vme_next; | |
2414 | } else { | |
2415 | break; | |
2416 | } | |
2417 | ||
2418 | } | |
2419 | } | |
2420 | ||
3e170ce0 A |
2421 | /* vm_map_entry_should_cow_for_true_share() checks for malloc tags, |
2422 | * never true in kernel */ | |
2423 | if (!iskernel && vm_map_entry_should_cow_for_true_share(map_entry) && | |
e2d2fc5c A |
2424 | object->vo_size > map_size && |
2425 | map_size != 0) { | |
2426 | /* | |
2427 | * Set up the targeted range for copy-on-write to | |
2428 | * limit the impact of "true_share"/"copy_delay" to | |
2429 | * that range instead of the entire VM object... | |
2430 | */ | |
2431 | ||
2432 | vm_object_unlock(object); | |
2433 | if (vm_map_lock_read_to_write(target_map)) { | |
2434 | vm_object_deallocate(object); | |
2435 | target_map = original_map; | |
2436 | goto redo_lookup; | |
2437 | } | |
2438 | ||
39236c6e A |
2439 | vm_map_clip_start(target_map, |
2440 | map_entry, | |
3e170ce0 | 2441 | vm_map_trunc_page(map_start, |
39236c6e A |
2442 | VM_MAP_PAGE_MASK(target_map))); |
2443 | vm_map_clip_end(target_map, | |
2444 | map_entry, | |
3e170ce0 | 2445 | (vm_map_round_page(map_end, |
fe8ab488 | 2446 | VM_MAP_PAGE_MASK(target_map)))); |
e2d2fc5c A |
2447 | force_shadow = TRUE; |
2448 | ||
fe8ab488 | 2449 | if ((map_entry->vme_end - offset) < map_size) { |
3e170ce0 | 2450 | map_size = map_entry->vme_end - map_start; |
fe8ab488 A |
2451 | } |
2452 | total_size = map_entry->vme_end - map_entry->vme_start; | |
e2d2fc5c A |
2453 | |
2454 | vm_map_lock_write_to_read(target_map); | |
2455 | vm_object_lock(object); | |
2456 | } | |
e2d2fc5c | 2457 | |
39236c6e | 2458 | if (object->internal) { |
1c79356b A |
2459 | /* vm_map_lookup_locked will create a shadow if */ |
2460 | /* needs_copy is set but does not check for the */ | |
2461 | /* other two conditions shown. It is important to */ | |
2462 | /* set up an object which will not be pulled from */ | |
2463 | /* under us. */ | |
2464 | ||
e2d2fc5c A |
2465 | if (force_shadow || |
2466 | ((map_entry->needs_copy || | |
2467 | object->shadowed || | |
39236c6e | 2468 | (object->vo_size > total_size && |
3e170ce0 | 2469 | (VME_OFFSET(map_entry) != 0 || |
39236c6e A |
2470 | object->vo_size > |
2471 | vm_map_round_page(total_size, | |
2472 | VM_MAP_PAGE_MASK(target_map))))) | |
2473 | && !object->true_share)) { | |
91447636 A |
2474 | /* |
2475 | * We have to unlock the VM object before | |
2476 | * trying to upgrade the VM map lock, to | |
2477 | * honor lock ordering (map then object). | |
2478 | * Otherwise, we would deadlock if another | |
2479 | * thread holds a read lock on the VM map and | |
2480 | * is trying to acquire the VM object's lock. | |
2481 | * We still hold an extra reference on the | |
2482 | * VM object, guaranteeing that it won't | |
2483 | * disappear. | |
2484 | */ | |
2485 | vm_object_unlock(object); | |
2486 | ||
1c79356b | 2487 | if (vm_map_lock_read_to_write(target_map)) { |
91447636 A |
2488 | /* |
2489 | * We couldn't upgrade our VM map lock | |
2490 | * from "read" to "write" and we lost | |
2491 | * our "read" lock. | |
2492 | * Start all over again... | |
2493 | */ | |
2494 | vm_object_deallocate(object); /* extra ref */ | |
2495 | target_map = original_map; | |
1c79356b A |
2496 | goto redo_lookup; |
2497 | } | |
fe8ab488 | 2498 | #if 00 |
91447636 | 2499 | vm_object_lock(object); |
fe8ab488 | 2500 | #endif |
1c79356b | 2501 | |
55e303ae A |
2502 | /* |
2503 | * JMM - We need to avoid coming here when the object | |
2504 | * is wired by anybody, not just the current map. Why | |
2505 | * couldn't we use the standard vm_object_copy_quickly() | |
2506 | * approach here? | |
2507 | */ | |
2508 | ||
1c79356b | 2509 | /* create a shadow object */ |
3e170ce0 A |
2510 | VME_OBJECT_SHADOW(map_entry, total_size); |
2511 | shadow_object = VME_OBJECT(map_entry); | |
fe8ab488 | 2512 | #if 00 |
9bccf70c | 2513 | vm_object_unlock(object); |
fe8ab488 | 2514 | #endif |
91447636 | 2515 | |
0c530ab8 | 2516 | prot = map_entry->protection & ~VM_PROT_WRITE; |
2d21ac55 | 2517 | |
3e170ce0 A |
2518 | if (override_nx(target_map, |
2519 | VME_ALIAS(map_entry)) | |
2520 | && prot) | |
0c530ab8 | 2521 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 2522 | |
9bccf70c | 2523 | vm_object_pmap_protect( |
3e170ce0 | 2524 | object, VME_OFFSET(map_entry), |
9bccf70c A |
2525 | total_size, |
2526 | ((map_entry->is_shared | |
316670eb | 2527 | || target_map->mapped_in_other_pmaps) |
9bccf70c A |
2528 | ? PMAP_NULL : |
2529 | target_map->pmap), | |
2530 | map_entry->vme_start, | |
0c530ab8 | 2531 | prot); |
9bccf70c A |
2532 | total_size -= (map_entry->vme_end |
2533 | - map_entry->vme_start); | |
2534 | next_entry = map_entry->vme_next; | |
2535 | map_entry->needs_copy = FALSE; | |
2d21ac55 A |
2536 | |
2537 | vm_object_lock(shadow_object); | |
9bccf70c | 2538 | while (total_size) { |
316670eb A |
2539 | assert((next_entry->wired_count == 0) || |
2540 | (map_entry->wired_count)); | |
2541 | ||
3e170ce0 | 2542 | if (VME_OBJECT(next_entry) == object) { |
2d21ac55 | 2543 | vm_object_reference_locked(shadow_object); |
3e170ce0 A |
2544 | VME_OBJECT_SET(next_entry, |
2545 | shadow_object); | |
55e303ae | 2546 | vm_object_deallocate(object); |
3e170ce0 A |
2547 | VME_OFFSET_SET( |
2548 | next_entry, | |
2549 | (VME_OFFSET(next_entry->vme_prev) + | |
2550 | (next_entry->vme_prev->vme_end | |
2551 | - next_entry->vme_prev->vme_start))); | |
9bccf70c A |
2552 | next_entry->needs_copy = FALSE; |
2553 | } else { | |
2554 | panic("mach_make_memory_entry_64:" | |
2555 | " map entries out of sync\n"); | |
2556 | } | |
2557 | total_size -= | |
2558 | next_entry->vme_end | |
2559 | - next_entry->vme_start; | |
2560 | next_entry = next_entry->vme_next; | |
2561 | } | |
2562 | ||
91447636 A |
2563 | /* |
2564 | * Transfer our extra reference to the | |
2565 | * shadow object. | |
2566 | */ | |
2567 | vm_object_reference_locked(shadow_object); | |
2568 | vm_object_deallocate(object); /* extra ref */ | |
9bccf70c | 2569 | object = shadow_object; |
91447636 | 2570 | |
3e170ce0 A |
2571 | obj_off = ((local_offset - map_entry->vme_start) |
2572 | + VME_OFFSET(map_entry)); | |
1c79356b | 2573 | |
91447636 | 2574 | vm_map_lock_write_to_read(target_map); |
1c79356b A |
2575 | } |
2576 | } | |
2577 | ||
2578 | /* note: in the future we can (if necessary) allow for */ | |
2579 | /* memory object lists, this will better support */ | |
2580 | /* fragmentation, but is it necessary? The user should */ | |
2581 | /* be encouraged to create address space oriented */ | |
2582 | /* shared objects from CLEAN memory regions which have */ | |
2583 | /* a known and defined history. i.e. no inheritence */ | |
2584 | /* share, make this call before making the region the */ | |
2585 | /* target of ipc's, etc. The code above, protecting */ | |
2586 | /* against delayed copy, etc. is mostly defensive. */ | |
2587 | ||
55e303ae A |
2588 | wimg_mode = object->wimg_bits; |
2589 | if(!(object->nophyscache)) { | |
2590 | if(access == MAP_MEM_IO) { | |
2591 | wimg_mode = VM_WIMG_IO; | |
2592 | } else if (access == MAP_MEM_COPYBACK) { | |
2593 | wimg_mode = VM_WIMG_USE_DEFAULT; | |
316670eb A |
2594 | } else if (access == MAP_MEM_INNERWBACK) { |
2595 | wimg_mode = VM_WIMG_INNERWBACK; | |
55e303ae A |
2596 | } else if (access == MAP_MEM_WTHRU) { |
2597 | wimg_mode = VM_WIMG_WTHRU; | |
2598 | } else if (access == MAP_MEM_WCOMB) { | |
2599 | wimg_mode = VM_WIMG_WCOMB; | |
2600 | } | |
2601 | } | |
d7e50217 | 2602 | |
fe8ab488 A |
2603 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
2604 | if (!object->true_share && | |
2605 | vm_object_tracking_inited) { | |
2606 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
2607 | int num = 0; | |
2608 | ||
2609 | num = OSBacktrace(bt, | |
2610 | VM_OBJECT_TRACKING_BTDEPTH); | |
2611 | btlog_add_entry(vm_object_tracking_btlog, | |
2612 | object, | |
2613 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
2614 | bt, | |
2615 | num); | |
2616 | } | |
2617 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
2618 | ||
de355530 | 2619 | object->true_share = TRUE; |
55e303ae A |
2620 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) |
2621 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
2622 | ||
91447636 A |
2623 | /* |
2624 | * The memory entry now points to this VM object and we | |
2625 | * need to hold a reference on the VM object. Use the extra | |
2626 | * reference we took earlier to keep the object alive when we | |
2627 | * had to unlock it. | |
2628 | */ | |
2629 | ||
55e303ae | 2630 | vm_map_unlock_read(target_map); |
91447636 A |
2631 | if(real_map != target_map) |
2632 | vm_map_unlock_read(real_map); | |
55e303ae | 2633 | |
6d2010ae A |
2634 | if (object->wimg_bits != wimg_mode) |
2635 | vm_object_change_wimg_mode(object, wimg_mode); | |
1c79356b A |
2636 | |
2637 | /* the size of mapped entry that overlaps with our region */ | |
2638 | /* which is targeted for share. */ | |
2639 | /* (entry_end - entry_start) - */ | |
2640 | /* offset of our beg addr within entry */ | |
2641 | /* it corresponds to this: */ | |
2642 | ||
91447636 A |
2643 | if(map_size > mappable_size) |
2644 | map_size = mappable_size; | |
2645 | ||
2646 | if (permission & MAP_MEM_NAMED_REUSE) { | |
2647 | /* | |
2648 | * Compare what we got with the "parent_entry". | |
2649 | * If they match, re-use the "parent_entry" instead | |
2650 | * of creating a new one. | |
2651 | */ | |
2652 | if (parent_entry != NULL && | |
2653 | parent_entry->backing.object == object && | |
2654 | parent_entry->internal == object->internal && | |
2655 | parent_entry->is_sub_map == FALSE && | |
2656 | parent_entry->is_pager == FALSE && | |
2657 | parent_entry->offset == obj_off && | |
2658 | parent_entry->protection == protections && | |
39236c6e | 2659 | parent_entry->size == map_size && |
3e170ce0 A |
2660 | ((!(use_data_addr || use_4K_compat) && |
2661 | (parent_entry->data_offset == 0)) || | |
2662 | ((use_data_addr || use_4K_compat) && | |
2663 | (parent_entry->data_offset == offset_in_page)))) { | |
91447636 A |
2664 | /* |
2665 | * We have a match: re-use "parent_entry". | |
2666 | */ | |
2667 | /* release our extra reference on object */ | |
2668 | vm_object_unlock(object); | |
2669 | vm_object_deallocate(object); | |
2670 | /* parent_entry->ref_count++; XXX ? */ | |
2671 | /* Get an extra send-right on handle */ | |
2672 | ipc_port_copy_send(parent_handle); | |
fe8ab488 | 2673 | |
3e170ce0 A |
2674 | *size = CAST_DOWN(vm_size_t, |
2675 | (parent_entry->size - | |
2676 | parent_entry->data_offset)); | |
91447636 A |
2677 | *object_handle = parent_handle; |
2678 | return KERN_SUCCESS; | |
2679 | } else { | |
2680 | /* | |
2681 | * No match: we need to create a new entry. | |
2682 | * fall through... | |
2683 | */ | |
2684 | } | |
2685 | } | |
2686 | ||
2687 | vm_object_unlock(object); | |
2688 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2689 | != KERN_SUCCESS) { | |
2690 | /* release our unused reference on the object */ | |
2691 | vm_object_deallocate(object); | |
2692 | return KERN_FAILURE; | |
2693 | } | |
1c79356b | 2694 | |
91447636 A |
2695 | user_entry->backing.object = object; |
2696 | user_entry->internal = object->internal; | |
2697 | user_entry->is_sub_map = FALSE; | |
2698 | user_entry->is_pager = FALSE; | |
2699 | user_entry->offset = obj_off; | |
39236c6e | 2700 | user_entry->data_offset = offset_in_page; |
6d2010ae A |
2701 | user_entry->protection = protections; |
2702 | SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); | |
91447636 | 2703 | user_entry->size = map_size; |
1c79356b A |
2704 | |
2705 | /* user_object pager and internal fields are not used */ | |
2706 | /* when the object field is filled in. */ | |
2707 | ||
3e170ce0 A |
2708 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
2709 | user_entry->data_offset)); | |
1c79356b | 2710 | *object_handle = user_handle; |
1c79356b | 2711 | return KERN_SUCCESS; |
1c79356b | 2712 | |
91447636 | 2713 | } else { |
1c79356b | 2714 | /* The new object will be base on an existing named object */ |
91447636 | 2715 | if (parent_entry == NULL) { |
1c79356b A |
2716 | kr = KERN_INVALID_ARGUMENT; |
2717 | goto make_mem_done; | |
2718 | } | |
39236c6e | 2719 | |
3e170ce0 | 2720 | if (use_data_addr || use_4K_compat) { |
39236c6e A |
2721 | /* |
2722 | * submaps and pagers should only be accessible from within | |
2723 | * the kernel, which shouldn't use the data address flag, so can fail here. | |
2724 | */ | |
2725 | if (parent_entry->is_pager || parent_entry->is_sub_map) { | |
2726 | panic("Shouldn't be using data address with a parent entry that is a submap or pager."); | |
2727 | } | |
2728 | /* | |
2729 | * Account for offset to data in parent entry and | |
2730 | * compute our own offset to data. | |
2731 | */ | |
2732 | if((offset + *size + parent_entry->data_offset) > parent_entry->size) { | |
2733 | kr = KERN_INVALID_ARGUMENT; | |
2734 | goto make_mem_done; | |
2735 | } | |
2736 | ||
3e170ce0 A |
2737 | map_start = vm_map_trunc_page(offset + parent_entry->data_offset, PAGE_MASK); |
2738 | offset_in_page = (offset + parent_entry->data_offset) - map_start; | |
2739 | if (use_4K_compat) | |
2740 | offset_in_page &= ~((signed)(0xFFF)); | |
2741 | map_end = vm_map_round_page(offset + parent_entry->data_offset + *size, PAGE_MASK); | |
2742 | map_size = map_end - map_start; | |
39236c6e | 2743 | } else { |
3e170ce0 A |
2744 | map_end = vm_map_round_page(offset + *size, PAGE_MASK); |
2745 | map_size = map_end - map_start; | |
39236c6e A |
2746 | offset_in_page = 0; |
2747 | ||
2748 | if((offset + map_size) > parent_entry->size) { | |
2749 | kr = KERN_INVALID_ARGUMENT; | |
2750 | goto make_mem_done; | |
2751 | } | |
1c79356b A |
2752 | } |
2753 | ||
6d2010ae A |
2754 | if (mask_protections) { |
2755 | /* | |
2756 | * The caller asked us to use the "protections" as | |
2757 | * a mask, so restrict "protections" to what this | |
2758 | * mapping actually allows. | |
2759 | */ | |
2760 | protections &= parent_entry->protection; | |
2761 | } | |
91447636 A |
2762 | if((protections & parent_entry->protection) != protections) { |
2763 | kr = KERN_PROTECTION_FAILURE; | |
2764 | goto make_mem_done; | |
2765 | } | |
2766 | ||
2767 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2768 | != KERN_SUCCESS) { | |
2769 | kr = KERN_FAILURE; | |
2770 | goto make_mem_done; | |
55e303ae | 2771 | } |
91447636 A |
2772 | |
2773 | user_entry->size = map_size; | |
3e170ce0 | 2774 | user_entry->offset = parent_entry->offset + map_start; |
39236c6e | 2775 | user_entry->data_offset = offset_in_page; |
91447636 A |
2776 | user_entry->is_sub_map = parent_entry->is_sub_map; |
2777 | user_entry->is_pager = parent_entry->is_pager; | |
39236c6e | 2778 | user_entry->is_copy = parent_entry->is_copy; |
91447636 A |
2779 | user_entry->internal = parent_entry->internal; |
2780 | user_entry->protection = protections; | |
2781 | ||
2782 | if(access != MAP_MEM_NOOP) { | |
2783 | SET_MAP_MEM(access, user_entry->protection); | |
1c79356b | 2784 | } |
91447636 A |
2785 | |
2786 | if(parent_entry->is_sub_map) { | |
2787 | user_entry->backing.map = parent_entry->backing.map; | |
2788 | vm_map_lock(user_entry->backing.map); | |
2789 | user_entry->backing.map->ref_count++; | |
2790 | vm_map_unlock(user_entry->backing.map); | |
1c79356b | 2791 | } |
91447636 A |
2792 | else if (parent_entry->is_pager) { |
2793 | user_entry->backing.pager = parent_entry->backing.pager; | |
2794 | /* JMM - don't we need a reference here? */ | |
2795 | } else { | |
2796 | object = parent_entry->backing.object; | |
2797 | assert(object != VM_OBJECT_NULL); | |
2798 | user_entry->backing.object = object; | |
2799 | /* we now point to this object, hold on */ | |
2800 | vm_object_reference(object); | |
2801 | vm_object_lock(object); | |
fe8ab488 A |
2802 | #if VM_OBJECT_TRACKING_OP_TRUESHARE |
2803 | if (!object->true_share && | |
2804 | vm_object_tracking_inited) { | |
2805 | void *bt[VM_OBJECT_TRACKING_BTDEPTH]; | |
2806 | int num = 0; | |
2807 | ||
2808 | num = OSBacktrace(bt, | |
2809 | VM_OBJECT_TRACKING_BTDEPTH); | |
2810 | btlog_add_entry(vm_object_tracking_btlog, | |
2811 | object, | |
2812 | VM_OBJECT_TRACKING_OP_TRUESHARE, | |
2813 | bt, | |
2814 | num); | |
2815 | } | |
2816 | #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */ | |
2817 | ||
91447636 A |
2818 | object->true_share = TRUE; |
2819 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) | |
2820 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
2821 | vm_object_unlock(object); | |
1c79356b | 2822 | } |
3e170ce0 A |
2823 | *size = CAST_DOWN(vm_size_t, (user_entry->size - |
2824 | user_entry->data_offset)); | |
1c79356b A |
2825 | *object_handle = user_handle; |
2826 | return KERN_SUCCESS; | |
2827 | } | |
2828 | ||
1c79356b | 2829 | make_mem_done: |
91447636 | 2830 | if (user_handle != IP_NULL) { |
0b4c1975 A |
2831 | /* |
2832 | * Releasing "user_handle" causes the kernel object | |
2833 | * associated with it ("user_entry" here) to also be | |
2834 | * released and freed. | |
2835 | */ | |
2836 | mach_memory_entry_port_release(user_handle); | |
91447636 A |
2837 | } |
2838 | return kr; | |
2839 | } | |
2840 | ||
2841 | kern_return_t | |
2842 | _mach_make_memory_entry( | |
2843 | vm_map_t target_map, | |
2844 | memory_object_size_t *size, | |
2845 | memory_object_offset_t offset, | |
2846 | vm_prot_t permission, | |
2847 | ipc_port_t *object_handle, | |
2848 | ipc_port_t parent_entry) | |
2849 | { | |
2d21ac55 | 2850 | memory_object_size_t mo_size; |
91447636 A |
2851 | kern_return_t kr; |
2852 | ||
2d21ac55 | 2853 | mo_size = (memory_object_size_t)*size; |
91447636 A |
2854 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
2855 | (memory_object_offset_t)offset, permission, object_handle, | |
2856 | parent_entry); | |
2857 | *size = mo_size; | |
1c79356b A |
2858 | return kr; |
2859 | } | |
2860 | ||
2861 | kern_return_t | |
2862 | mach_make_memory_entry( | |
2863 | vm_map_t target_map, | |
2864 | vm_size_t *size, | |
2865 | vm_offset_t offset, | |
2866 | vm_prot_t permission, | |
2867 | ipc_port_t *object_handle, | |
2868 | ipc_port_t parent_entry) | |
91447636 | 2869 | { |
2d21ac55 | 2870 | memory_object_size_t mo_size; |
1c79356b A |
2871 | kern_return_t kr; |
2872 | ||
2d21ac55 | 2873 | mo_size = (memory_object_size_t)*size; |
91447636 A |
2874 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
2875 | (memory_object_offset_t)offset, permission, object_handle, | |
1c79356b | 2876 | parent_entry); |
91447636 | 2877 | *size = CAST_DOWN(vm_size_t, mo_size); |
1c79356b A |
2878 | return kr; |
2879 | } | |
2880 | ||
2881 | /* | |
91447636 A |
2882 | * task_wire |
2883 | * | |
2884 | * Set or clear the map's wiring_required flag. This flag, if set, | |
2885 | * will cause all future virtual memory allocation to allocate | |
2886 | * user wired memory. Unwiring pages wired down as a result of | |
2887 | * this routine is done with the vm_wire interface. | |
1c79356b | 2888 | */ |
1c79356b | 2889 | kern_return_t |
91447636 A |
2890 | task_wire( |
2891 | vm_map_t map, | |
2892 | boolean_t must_wire) | |
2893 | { | |
2894 | if (map == VM_MAP_NULL) | |
2895 | return(KERN_INVALID_ARGUMENT); | |
2896 | ||
2897 | if (must_wire) | |
2898 | map->wiring_required = TRUE; | |
2899 | else | |
2900 | map->wiring_required = FALSE; | |
2901 | ||
2902 | return(KERN_SUCCESS); | |
2903 | } | |
2904 | ||
2905 | __private_extern__ kern_return_t | |
2906 | mach_memory_entry_allocate( | |
2907 | vm_named_entry_t *user_entry_p, | |
2908 | ipc_port_t *user_handle_p) | |
1c79356b | 2909 | { |
91447636 | 2910 | vm_named_entry_t user_entry; |
1c79356b | 2911 | ipc_port_t user_handle; |
91447636 | 2912 | ipc_port_t previous; |
1c79356b | 2913 | |
91447636 A |
2914 | user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); |
2915 | if (user_entry == NULL) | |
1c79356b | 2916 | return KERN_FAILURE; |
1c79356b | 2917 | |
91447636 | 2918 | named_entry_lock_init(user_entry); |
1c79356b | 2919 | |
91447636 A |
2920 | user_handle = ipc_port_alloc_kernel(); |
2921 | if (user_handle == IP_NULL) { | |
2922 | kfree(user_entry, sizeof *user_entry); | |
2923 | return KERN_FAILURE; | |
2924 | } | |
1c79356b A |
2925 | ip_lock(user_handle); |
2926 | ||
2927 | /* make a sonce right */ | |
2928 | user_handle->ip_sorights++; | |
2929 | ip_reference(user_handle); | |
2930 | ||
2931 | user_handle->ip_destination = IP_NULL; | |
2932 | user_handle->ip_receiver_name = MACH_PORT_NULL; | |
2933 | user_handle->ip_receiver = ipc_space_kernel; | |
2934 | ||
2935 | /* make a send right */ | |
2936 | user_handle->ip_mscount++; | |
2937 | user_handle->ip_srights++; | |
2938 | ip_reference(user_handle); | |
2939 | ||
2940 | ipc_port_nsrequest(user_handle, 1, user_handle, &previous); | |
2941 | /* nsrequest unlocks user_handle */ | |
2942 | ||
91447636 A |
2943 | user_entry->backing.pager = NULL; |
2944 | user_entry->is_sub_map = FALSE; | |
2945 | user_entry->is_pager = FALSE; | |
39236c6e | 2946 | user_entry->is_copy = FALSE; |
91447636 | 2947 | user_entry->internal = FALSE; |
2d21ac55 A |
2948 | user_entry->size = 0; |
2949 | user_entry->offset = 0; | |
39236c6e | 2950 | user_entry->data_offset = 0; |
2d21ac55 | 2951 | user_entry->protection = VM_PROT_NONE; |
91447636 | 2952 | user_entry->ref_count = 1; |
1c79356b | 2953 | |
91447636 A |
2954 | ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry, |
2955 | IKOT_NAMED_ENTRY); | |
1c79356b | 2956 | |
91447636 A |
2957 | *user_entry_p = user_entry; |
2958 | *user_handle_p = user_handle; | |
1c79356b | 2959 | |
91447636 A |
2960 | return KERN_SUCCESS; |
2961 | } | |
1c79356b | 2962 | |
91447636 A |
2963 | /* |
2964 | * mach_memory_object_memory_entry_64 | |
2965 | * | |
2966 | * Create a named entry backed by the provided pager. | |
2967 | * | |
2968 | * JMM - we need to hold a reference on the pager - | |
2969 | * and release it when the named entry is destroyed. | |
2970 | */ | |
2971 | kern_return_t | |
2972 | mach_memory_object_memory_entry_64( | |
2973 | host_t host, | |
2974 | boolean_t internal, | |
2975 | vm_object_offset_t size, | |
2976 | vm_prot_t permission, | |
2977 | memory_object_t pager, | |
2978 | ipc_port_t *entry_handle) | |
2979 | { | |
2980 | unsigned int access; | |
2981 | vm_named_entry_t user_entry; | |
2982 | ipc_port_t user_handle; | |
2983 | ||
2984 | if (host == HOST_NULL) | |
2985 | return(KERN_INVALID_HOST); | |
2986 | ||
2987 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2988 | != KERN_SUCCESS) { | |
2989 | return KERN_FAILURE; | |
2990 | } | |
2991 | ||
2992 | user_entry->backing.pager = pager; | |
2993 | user_entry->size = size; | |
2994 | user_entry->offset = 0; | |
2995 | user_entry->protection = permission & VM_PROT_ALL; | |
2996 | access = GET_MAP_MEM(permission); | |
2997 | SET_MAP_MEM(access, user_entry->protection); | |
2998 | user_entry->internal = internal; | |
2999 | user_entry->is_sub_map = FALSE; | |
3000 | user_entry->is_pager = TRUE; | |
3001 | assert(user_entry->ref_count == 1); | |
3002 | ||
3003 | *entry_handle = user_handle; | |
1c79356b | 3004 | return KERN_SUCCESS; |
91447636 A |
3005 | } |
3006 | ||
3007 | kern_return_t | |
3008 | mach_memory_object_memory_entry( | |
3009 | host_t host, | |
3010 | boolean_t internal, | |
3011 | vm_size_t size, | |
3012 | vm_prot_t permission, | |
3013 | memory_object_t pager, | |
3014 | ipc_port_t *entry_handle) | |
3015 | { | |
3016 | return mach_memory_object_memory_entry_64( host, internal, | |
3017 | (vm_object_offset_t)size, permission, pager, entry_handle); | |
3018 | } | |
3019 | ||
3020 | ||
3021 | kern_return_t | |
3022 | mach_memory_entry_purgable_control( | |
3023 | ipc_port_t entry_port, | |
3024 | vm_purgable_t control, | |
3025 | int *state) | |
3026 | { | |
3027 | kern_return_t kr; | |
3028 | vm_named_entry_t mem_entry; | |
3029 | vm_object_t object; | |
1c79356b | 3030 | |
91447636 A |
3031 | if (entry_port == IP_NULL || |
3032 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3033 | return KERN_INVALID_ARGUMENT; | |
3034 | } | |
2d21ac55 A |
3035 | if (control != VM_PURGABLE_SET_STATE && |
3036 | control != VM_PURGABLE_GET_STATE) | |
3037 | return(KERN_INVALID_ARGUMENT); | |
3038 | ||
3039 | if (control == VM_PURGABLE_SET_STATE && | |
b0d623f7 | 3040 | (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || |
2d21ac55 A |
3041 | ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) |
3042 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 3043 | |
91447636 | 3044 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; |
1c79356b | 3045 | |
91447636 | 3046 | named_entry_lock(mem_entry); |
1c79356b | 3047 | |
39236c6e A |
3048 | if (mem_entry->is_sub_map || |
3049 | mem_entry->is_pager || | |
3050 | mem_entry->is_copy) { | |
91447636 | 3051 | named_entry_unlock(mem_entry); |
1c79356b A |
3052 | return KERN_INVALID_ARGUMENT; |
3053 | } | |
91447636 A |
3054 | |
3055 | object = mem_entry->backing.object; | |
3056 | if (object == VM_OBJECT_NULL) { | |
3057 | named_entry_unlock(mem_entry); | |
1c79356b A |
3058 | return KERN_INVALID_ARGUMENT; |
3059 | } | |
91447636 A |
3060 | |
3061 | vm_object_lock(object); | |
3062 | ||
3063 | /* check that named entry covers entire object ? */ | |
6d2010ae | 3064 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { |
91447636 A |
3065 | vm_object_unlock(object); |
3066 | named_entry_unlock(mem_entry); | |
3067 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 3068 | } |
91447636 A |
3069 | |
3070 | named_entry_unlock(mem_entry); | |
3071 | ||
3072 | kr = vm_object_purgable_control(object, control, state); | |
3073 | ||
3074 | vm_object_unlock(object); | |
3075 | ||
3076 | return kr; | |
1c79356b A |
3077 | } |
3078 | ||
39236c6e A |
3079 | kern_return_t |
3080 | mach_memory_entry_get_page_counts( | |
3081 | ipc_port_t entry_port, | |
3082 | unsigned int *resident_page_count, | |
3083 | unsigned int *dirty_page_count) | |
3084 | { | |
3085 | kern_return_t kr; | |
3086 | vm_named_entry_t mem_entry; | |
3087 | vm_object_t object; | |
3088 | vm_object_offset_t offset; | |
3089 | vm_object_size_t size; | |
3090 | ||
3091 | if (entry_port == IP_NULL || | |
3092 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3093 | return KERN_INVALID_ARGUMENT; | |
3094 | } | |
3095 | ||
3096 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; | |
3097 | ||
3098 | named_entry_lock(mem_entry); | |
3099 | ||
3100 | if (mem_entry->is_sub_map || | |
3101 | mem_entry->is_pager || | |
3102 | mem_entry->is_copy) { | |
3103 | named_entry_unlock(mem_entry); | |
3104 | return KERN_INVALID_ARGUMENT; | |
3105 | } | |
3106 | ||
3107 | object = mem_entry->backing.object; | |
3108 | if (object == VM_OBJECT_NULL) { | |
3109 | named_entry_unlock(mem_entry); | |
3110 | return KERN_INVALID_ARGUMENT; | |
3111 | } | |
3112 | ||
3113 | vm_object_lock(object); | |
3114 | ||
3115 | offset = mem_entry->offset; | |
3116 | size = mem_entry->size; | |
3117 | ||
3118 | named_entry_unlock(mem_entry); | |
3119 | ||
3120 | kr = vm_object_get_page_counts(object, offset, size, resident_page_count, dirty_page_count); | |
3121 | ||
3122 | vm_object_unlock(object); | |
3123 | ||
3124 | return kr; | |
3125 | } | |
3126 | ||
91447636 A |
3127 | /* |
3128 | * mach_memory_entry_port_release: | |
3129 | * | |
3130 | * Release a send right on a named entry port. This is the correct | |
3131 | * way to destroy a named entry. When the last right on the port is | |
3132 | * released, ipc_kobject_destroy() will call mach_destroy_memory_entry(). | |
3133 | */ | |
3134 | void | |
3135 | mach_memory_entry_port_release( | |
3136 | ipc_port_t port) | |
3137 | { | |
3138 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3139 | ipc_port_release_send(port); | |
3140 | } | |
1c79356b | 3141 | |
91447636 A |
3142 | /* |
3143 | * mach_destroy_memory_entry: | |
3144 | * | |
3145 | * Drops a reference on a memory entry and destroys the memory entry if | |
3146 | * there are no more references on it. | |
3147 | * NOTE: This routine should not be called to destroy a memory entry from the | |
3148 | * kernel, as it will not release the Mach port associated with the memory | |
3149 | * entry. The proper way to destroy a memory entry in the kernel is to | |
3150 | * call mach_memort_entry_port_release() to release the kernel's send-right on | |
3151 | * the memory entry's port. When the last send right is released, the memory | |
3152 | * entry will be destroyed via ipc_kobject_destroy(). | |
3153 | */ | |
1c79356b A |
3154 | void |
3155 | mach_destroy_memory_entry( | |
3156 | ipc_port_t port) | |
3157 | { | |
3158 | vm_named_entry_t named_entry; | |
3159 | #if MACH_ASSERT | |
3160 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
3161 | #endif /* MACH_ASSERT */ | |
3162 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
316670eb A |
3163 | |
3164 | named_entry_lock(named_entry); | |
91447636 | 3165 | named_entry->ref_count -= 1; |
316670eb | 3166 | |
1c79356b | 3167 | if(named_entry->ref_count == 0) { |
91447636 | 3168 | if (named_entry->is_sub_map) { |
1c79356b | 3169 | vm_map_deallocate(named_entry->backing.map); |
39236c6e A |
3170 | } else if (named_entry->is_pager) { |
3171 | /* JMM - need to drop reference on pager in that case */ | |
3172 | } else if (named_entry->is_copy) { | |
3173 | vm_map_copy_discard(named_entry->backing.copy); | |
3174 | } else { | |
3175 | /* release the VM object we've been pointing to */ | |
91447636 | 3176 | vm_object_deallocate(named_entry->backing.object); |
39236c6e | 3177 | } |
91447636 | 3178 | |
316670eb A |
3179 | named_entry_unlock(named_entry); |
3180 | named_entry_lock_destroy(named_entry); | |
91447636 A |
3181 | |
3182 | kfree((void *) port->ip_kobject, | |
3183 | sizeof (struct vm_named_entry)); | |
1c79356b | 3184 | } else |
316670eb | 3185 | named_entry_unlock(named_entry); |
1c79356b A |
3186 | } |
3187 | ||
0c530ab8 A |
3188 | /* Allow manipulation of individual page state. This is actually part of */ |
3189 | /* the UPL regimen but takes place on the memory entry rather than on a UPL */ | |
3190 | ||
3191 | kern_return_t | |
3192 | mach_memory_entry_page_op( | |
3193 | ipc_port_t entry_port, | |
3194 | vm_object_offset_t offset, | |
3195 | int ops, | |
3196 | ppnum_t *phys_entry, | |
3197 | int *flags) | |
3198 | { | |
3199 | vm_named_entry_t mem_entry; | |
3200 | vm_object_t object; | |
3201 | kern_return_t kr; | |
3202 | ||
3203 | if (entry_port == IP_NULL || | |
3204 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3205 | return KERN_INVALID_ARGUMENT; | |
3206 | } | |
3207 | ||
3208 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; | |
3209 | ||
3210 | named_entry_lock(mem_entry); | |
3211 | ||
39236c6e A |
3212 | if (mem_entry->is_sub_map || |
3213 | mem_entry->is_pager || | |
3214 | mem_entry->is_copy) { | |
0c530ab8 A |
3215 | named_entry_unlock(mem_entry); |
3216 | return KERN_INVALID_ARGUMENT; | |
3217 | } | |
3218 | ||
3219 | object = mem_entry->backing.object; | |
3220 | if (object == VM_OBJECT_NULL) { | |
3221 | named_entry_unlock(mem_entry); | |
3222 | return KERN_INVALID_ARGUMENT; | |
3223 | } | |
3224 | ||
3225 | vm_object_reference(object); | |
3226 | named_entry_unlock(mem_entry); | |
3227 | ||
3228 | kr = vm_object_page_op(object, offset, ops, phys_entry, flags); | |
3229 | ||
3230 | vm_object_deallocate(object); | |
3231 | ||
3232 | return kr; | |
3233 | } | |
3234 | ||
3235 | /* | |
3236 | * mach_memory_entry_range_op offers performance enhancement over | |
3237 | * mach_memory_entry_page_op for page_op functions which do not require page | |
3238 | * level state to be returned from the call. Page_op was created to provide | |
3239 | * a low-cost alternative to page manipulation via UPLs when only a single | |
3240 | * page was involved. The range_op call establishes the ability in the _op | |
3241 | * family of functions to work on multiple pages where the lack of page level | |
3242 | * state handling allows the caller to avoid the overhead of the upl structures. | |
3243 | */ | |
3244 | ||
3245 | kern_return_t | |
3246 | mach_memory_entry_range_op( | |
3247 | ipc_port_t entry_port, | |
3248 | vm_object_offset_t offset_beg, | |
3249 | vm_object_offset_t offset_end, | |
3250 | int ops, | |
3251 | int *range) | |
3252 | { | |
3253 | vm_named_entry_t mem_entry; | |
3254 | vm_object_t object; | |
3255 | kern_return_t kr; | |
3256 | ||
3257 | if (entry_port == IP_NULL || | |
3258 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
3259 | return KERN_INVALID_ARGUMENT; | |
3260 | } | |
3261 | ||
3262 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; | |
3263 | ||
3264 | named_entry_lock(mem_entry); | |
3265 | ||
39236c6e A |
3266 | if (mem_entry->is_sub_map || |
3267 | mem_entry->is_pager || | |
3268 | mem_entry->is_copy) { | |
0c530ab8 A |
3269 | named_entry_unlock(mem_entry); |
3270 | return KERN_INVALID_ARGUMENT; | |
3271 | } | |
3272 | ||
3273 | object = mem_entry->backing.object; | |
3274 | if (object == VM_OBJECT_NULL) { | |
3275 | named_entry_unlock(mem_entry); | |
3276 | return KERN_INVALID_ARGUMENT; | |
3277 | } | |
3278 | ||
3279 | vm_object_reference(object); | |
3280 | named_entry_unlock(mem_entry); | |
3281 | ||
3282 | kr = vm_object_range_op(object, | |
3283 | offset_beg, | |
3284 | offset_end, | |
3285 | ops, | |
b0d623f7 | 3286 | (uint32_t *) range); |
0c530ab8 A |
3287 | |
3288 | vm_object_deallocate(object); | |
3289 | ||
3290 | return kr; | |
3291 | } | |
1c79356b | 3292 | |
1c79356b A |
3293 | |
3294 | kern_return_t | |
3295 | set_dp_control_port( | |
3296 | host_priv_t host_priv, | |
3297 | ipc_port_t control_port) | |
3298 | { | |
3299 | if (host_priv == HOST_PRIV_NULL) | |
3300 | return (KERN_INVALID_HOST); | |
0b4e3aa0 A |
3301 | |
3302 | if (IP_VALID(dynamic_pager_control_port)) | |
3303 | ipc_port_release_send(dynamic_pager_control_port); | |
3304 | ||
1c79356b A |
3305 | dynamic_pager_control_port = control_port; |
3306 | return KERN_SUCCESS; | |
3307 | } | |
3308 | ||
3309 | kern_return_t | |
3310 | get_dp_control_port( | |
3311 | host_priv_t host_priv, | |
3312 | ipc_port_t *control_port) | |
3313 | { | |
3314 | if (host_priv == HOST_PRIV_NULL) | |
3315 | return (KERN_INVALID_HOST); | |
0b4e3aa0 A |
3316 | |
3317 | *control_port = ipc_port_copy_send(dynamic_pager_control_port); | |
1c79356b A |
3318 | return KERN_SUCCESS; |
3319 | ||
3320 | } | |
3321 | ||
91447636 | 3322 | /* ******* Temporary Internal calls to UPL for BSD ***** */ |
1c79356b | 3323 | |
91447636 A |
3324 | extern int kernel_upl_map( |
3325 | vm_map_t map, | |
3326 | upl_t upl, | |
3327 | vm_offset_t *dst_addr); | |
1c79356b | 3328 | |
91447636 A |
3329 | extern int kernel_upl_unmap( |
3330 | vm_map_t map, | |
3331 | upl_t upl); | |
150bd074 | 3332 | |
91447636 A |
3333 | extern int kernel_upl_commit( |
3334 | upl_t upl, | |
3335 | upl_page_info_t *pl, | |
3336 | mach_msg_type_number_t count); | |
1c79356b | 3337 | |
91447636 A |
3338 | extern int kernel_upl_commit_range( |
3339 | upl_t upl, | |
3340 | upl_offset_t offset, | |
3341 | upl_size_t size, | |
3342 | int flags, | |
3343 | upl_page_info_array_t pl, | |
3344 | mach_msg_type_number_t count); | |
1c79356b | 3345 | |
91447636 A |
3346 | extern int kernel_upl_abort( |
3347 | upl_t upl, | |
3348 | int abort_type); | |
1c79356b | 3349 | |
91447636 A |
3350 | extern int kernel_upl_abort_range( |
3351 | upl_t upl, | |
3352 | upl_offset_t offset, | |
3353 | upl_size_t size, | |
3354 | int abort_flags); | |
1c79356b | 3355 | |
1c79356b | 3356 | |
1c79356b A |
3357 | kern_return_t |
3358 | kernel_upl_map( | |
3359 | vm_map_t map, | |
3360 | upl_t upl, | |
3361 | vm_offset_t *dst_addr) | |
3362 | { | |
91447636 | 3363 | return vm_upl_map(map, upl, dst_addr); |
1c79356b A |
3364 | } |
3365 | ||
3366 | ||
3367 | kern_return_t | |
3368 | kernel_upl_unmap( | |
3369 | vm_map_t map, | |
0b4e3aa0 | 3370 | upl_t upl) |
1c79356b | 3371 | { |
91447636 | 3372 | return vm_upl_unmap(map, upl); |
1c79356b A |
3373 | } |
3374 | ||
3375 | kern_return_t | |
3376 | kernel_upl_commit( | |
91447636 A |
3377 | upl_t upl, |
3378 | upl_page_info_t *pl, | |
0b4e3aa0 | 3379 | mach_msg_type_number_t count) |
1c79356b | 3380 | { |
0b4e3aa0 A |
3381 | kern_return_t kr; |
3382 | ||
3383 | kr = upl_commit(upl, pl, count); | |
3384 | upl_deallocate(upl); | |
1c79356b A |
3385 | return kr; |
3386 | } | |
3387 | ||
0b4e3aa0 | 3388 | |
1c79356b A |
3389 | kern_return_t |
3390 | kernel_upl_commit_range( | |
3391 | upl_t upl, | |
91447636 A |
3392 | upl_offset_t offset, |
3393 | upl_size_t size, | |
1c79356b | 3394 | int flags, |
0b4e3aa0 A |
3395 | upl_page_info_array_t pl, |
3396 | mach_msg_type_number_t count) | |
1c79356b | 3397 | { |
0b4e3aa0 A |
3398 | boolean_t finished = FALSE; |
3399 | kern_return_t kr; | |
3400 | ||
3401 | if (flags & UPL_COMMIT_FREE_ON_EMPTY) | |
3402 | flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
3403 | ||
593a1d5f A |
3404 | if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { |
3405 | return KERN_INVALID_ARGUMENT; | |
3406 | } | |
3407 | ||
0b4e3aa0 A |
3408 | kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); |
3409 | ||
3410 | if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) | |
3411 | upl_deallocate(upl); | |
3412 | ||
1c79356b A |
3413 | return kr; |
3414 | } | |
3415 | ||
3416 | kern_return_t | |
3417 | kernel_upl_abort_range( | |
0b4e3aa0 | 3418 | upl_t upl, |
91447636 A |
3419 | upl_offset_t offset, |
3420 | upl_size_t size, | |
0b4e3aa0 | 3421 | int abort_flags) |
1c79356b | 3422 | { |
0b4e3aa0 A |
3423 | kern_return_t kr; |
3424 | boolean_t finished = FALSE; | |
1c79356b | 3425 | |
0b4e3aa0 A |
3426 | if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) |
3427 | abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
1c79356b | 3428 | |
0b4e3aa0 | 3429 | kr = upl_abort_range(upl, offset, size, abort_flags, &finished); |
1c79356b | 3430 | |
0b4e3aa0 A |
3431 | if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) |
3432 | upl_deallocate(upl); | |
1c79356b | 3433 | |
0b4e3aa0 | 3434 | return kr; |
1c79356b A |
3435 | } |
3436 | ||
1c79356b | 3437 | kern_return_t |
0b4e3aa0 A |
3438 | kernel_upl_abort( |
3439 | upl_t upl, | |
3440 | int abort_type) | |
1c79356b | 3441 | { |
0b4e3aa0 | 3442 | kern_return_t kr; |
1c79356b | 3443 | |
0b4e3aa0 A |
3444 | kr = upl_abort(upl, abort_type); |
3445 | upl_deallocate(upl); | |
3446 | return kr; | |
1c79356b A |
3447 | } |
3448 | ||
91447636 A |
3449 | /* |
3450 | * Now a kernel-private interface (for BootCache | |
3451 | * use only). Need a cleaner way to create an | |
3452 | * empty vm_map() and return a handle to it. | |
3453 | */ | |
1c79356b A |
3454 | |
3455 | kern_return_t | |
91447636 A |
3456 | vm_region_object_create( |
3457 | __unused vm_map_t target_map, | |
3458 | vm_size_t size, | |
3459 | ipc_port_t *object_handle) | |
1c79356b | 3460 | { |
91447636 A |
3461 | vm_named_entry_t user_entry; |
3462 | ipc_port_t user_handle; | |
1c79356b | 3463 | |
91447636 | 3464 | vm_map_t new_map; |
1c79356b | 3465 | |
91447636 A |
3466 | if (mach_memory_entry_allocate(&user_entry, &user_handle) |
3467 | != KERN_SUCCESS) { | |
1c79356b | 3468 | return KERN_FAILURE; |
91447636 | 3469 | } |
1c79356b | 3470 | |
91447636 | 3471 | /* Create a named object based on a submap of specified size */ |
1c79356b | 3472 | |
91447636 | 3473 | new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, |
39236c6e A |
3474 | vm_map_round_page(size, |
3475 | VM_MAP_PAGE_MASK(target_map)), | |
3476 | TRUE); | |
3477 | vm_map_set_page_shift(new_map, VM_MAP_PAGE_SHIFT(target_map)); | |
1c79356b | 3478 | |
91447636 A |
3479 | user_entry->backing.map = new_map; |
3480 | user_entry->internal = TRUE; | |
3481 | user_entry->is_sub_map = TRUE; | |
3482 | user_entry->offset = 0; | |
3483 | user_entry->protection = VM_PROT_ALL; | |
3484 | user_entry->size = size; | |
3485 | assert(user_entry->ref_count == 1); | |
1c79356b | 3486 | |
91447636 | 3487 | *object_handle = user_handle; |
1c79356b | 3488 | return KERN_SUCCESS; |
1c79356b | 3489 | |
55e303ae A |
3490 | } |
3491 | ||
91447636 A |
3492 | ppnum_t vm_map_get_phys_page( /* forward */ |
3493 | vm_map_t map, | |
3494 | vm_offset_t offset); | |
3495 | ||
55e303ae | 3496 | ppnum_t |
1c79356b | 3497 | vm_map_get_phys_page( |
91447636 A |
3498 | vm_map_t map, |
3499 | vm_offset_t addr) | |
1c79356b | 3500 | { |
91447636 A |
3501 | vm_object_offset_t offset; |
3502 | vm_object_t object; | |
3503 | vm_map_offset_t map_offset; | |
3504 | vm_map_entry_t entry; | |
3505 | ppnum_t phys_page = 0; | |
3506 | ||
39236c6e | 3507 | map_offset = vm_map_trunc_page(addr, PAGE_MASK); |
1c79356b A |
3508 | |
3509 | vm_map_lock(map); | |
91447636 | 3510 | while (vm_map_lookup_entry(map, map_offset, &entry)) { |
1c79356b | 3511 | |
3e170ce0 | 3512 | if (VME_OBJECT(entry) == VM_OBJECT_NULL) { |
1c79356b | 3513 | vm_map_unlock(map); |
91447636 | 3514 | return (ppnum_t) 0; |
1c79356b A |
3515 | } |
3516 | if (entry->is_sub_map) { | |
3517 | vm_map_t old_map; | |
3e170ce0 | 3518 | vm_map_lock(VME_SUBMAP(entry)); |
1c79356b | 3519 | old_map = map; |
3e170ce0 A |
3520 | map = VME_SUBMAP(entry); |
3521 | map_offset = (VME_OFFSET(entry) + | |
3522 | (map_offset - entry->vme_start)); | |
1c79356b A |
3523 | vm_map_unlock(old_map); |
3524 | continue; | |
3525 | } | |
3e170ce0 | 3526 | if (VME_OBJECT(entry)->phys_contiguous) { |
9bccf70c A |
3527 | /* These are not standard pageable memory mappings */ |
3528 | /* If they are not present in the object they will */ | |
3529 | /* have to be picked up from the pager through the */ | |
3530 | /* fault mechanism. */ | |
3e170ce0 | 3531 | if (VME_OBJECT(entry)->vo_shadow_offset == 0) { |
9bccf70c A |
3532 | /* need to call vm_fault */ |
3533 | vm_map_unlock(map); | |
91447636 | 3534 | vm_fault(map, map_offset, VM_PROT_NONE, |
9bccf70c A |
3535 | FALSE, THREAD_UNINT, NULL, 0); |
3536 | vm_map_lock(map); | |
3537 | continue; | |
3538 | } | |
3e170ce0 A |
3539 | offset = (VME_OFFSET(entry) + |
3540 | (map_offset - entry->vme_start)); | |
55e303ae | 3541 | phys_page = (ppnum_t) |
3e170ce0 A |
3542 | ((VME_OBJECT(entry)->vo_shadow_offset |
3543 | + offset) >> PAGE_SHIFT); | |
9bccf70c A |
3544 | break; |
3545 | ||
3546 | } | |
3e170ce0 A |
3547 | offset = (VME_OFFSET(entry) + (map_offset - entry->vme_start)); |
3548 | object = VME_OBJECT(entry); | |
1c79356b A |
3549 | vm_object_lock(object); |
3550 | while (TRUE) { | |
3551 | vm_page_t dst_page = vm_page_lookup(object,offset); | |
3552 | if(dst_page == VM_PAGE_NULL) { | |
3553 | if(object->shadow) { | |
3554 | vm_object_t old_object; | |
3555 | vm_object_lock(object->shadow); | |
3556 | old_object = object; | |
6d2010ae | 3557 | offset = offset + object->vo_shadow_offset; |
1c79356b A |
3558 | object = object->shadow; |
3559 | vm_object_unlock(old_object); | |
3560 | } else { | |
3561 | vm_object_unlock(object); | |
3562 | break; | |
3563 | } | |
3564 | } else { | |
55e303ae | 3565 | phys_page = (ppnum_t)(dst_page->phys_page); |
1c79356b A |
3566 | vm_object_unlock(object); |
3567 | break; | |
3568 | } | |
3569 | } | |
3570 | break; | |
3571 | ||
3572 | } | |
3573 | ||
3574 | vm_map_unlock(map); | |
55e303ae A |
3575 | return phys_page; |
3576 | } | |
3577 | ||
3578 | ||
3e170ce0 | 3579 | #if 0 |
91447636 A |
3580 | kern_return_t kernel_object_iopl_request( /* forward */ |
3581 | vm_named_entry_t named_entry, | |
3582 | memory_object_offset_t offset, | |
b0d623f7 | 3583 | upl_size_t *upl_size, |
91447636 A |
3584 | upl_t *upl_ptr, |
3585 | upl_page_info_array_t user_page_list, | |
3586 | unsigned int *page_list_count, | |
3587 | int *flags); | |
3588 | ||
55e303ae A |
3589 | kern_return_t |
3590 | kernel_object_iopl_request( | |
3591 | vm_named_entry_t named_entry, | |
3592 | memory_object_offset_t offset, | |
b0d623f7 | 3593 | upl_size_t *upl_size, |
55e303ae A |
3594 | upl_t *upl_ptr, |
3595 | upl_page_info_array_t user_page_list, | |
3596 | unsigned int *page_list_count, | |
3597 | int *flags) | |
3598 | { | |
3599 | vm_object_t object; | |
3600 | kern_return_t ret; | |
3601 | ||
3602 | int caller_flags; | |
3603 | ||
3604 | caller_flags = *flags; | |
3605 | ||
91447636 A |
3606 | if (caller_flags & ~UPL_VALID_FLAGS) { |
3607 | /* | |
3608 | * For forward compatibility's sake, | |
3609 | * reject any unknown flag. | |
3610 | */ | |
3611 | return KERN_INVALID_VALUE; | |
3612 | } | |
3613 | ||
55e303ae A |
3614 | /* a few checks to make sure user is obeying rules */ |
3615 | if(*upl_size == 0) { | |
3616 | if(offset >= named_entry->size) | |
3617 | return(KERN_INVALID_RIGHT); | |
b0d623f7 A |
3618 | *upl_size = (upl_size_t) (named_entry->size - offset); |
3619 | if (*upl_size != named_entry->size - offset) | |
3620 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
3621 | } |
3622 | if(caller_flags & UPL_COPYOUT_FROM) { | |
3623 | if((named_entry->protection & VM_PROT_READ) | |
3624 | != VM_PROT_READ) { | |
3625 | return(KERN_INVALID_RIGHT); | |
3626 | } | |
3627 | } else { | |
3628 | if((named_entry->protection & | |
3629 | (VM_PROT_READ | VM_PROT_WRITE)) | |
3630 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
3631 | return(KERN_INVALID_RIGHT); | |
3632 | } | |
3633 | } | |
3634 | if(named_entry->size < (offset + *upl_size)) | |
3635 | return(KERN_INVALID_ARGUMENT); | |
3636 | ||
3637 | /* the callers parameter offset is defined to be the */ | |
3638 | /* offset from beginning of named entry offset in object */ | |
3639 | offset = offset + named_entry->offset; | |
3640 | ||
39236c6e A |
3641 | if (named_entry->is_sub_map || |
3642 | named_entry->is_copy) | |
3643 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
3644 | |
3645 | named_entry_lock(named_entry); | |
3646 | ||
91447636 | 3647 | if (named_entry->is_pager) { |
55e303ae A |
3648 | object = vm_object_enter(named_entry->backing.pager, |
3649 | named_entry->offset + named_entry->size, | |
3650 | named_entry->internal, | |
3651 | FALSE, | |
3652 | FALSE); | |
3653 | if (object == VM_OBJECT_NULL) { | |
3654 | named_entry_unlock(named_entry); | |
3655 | return(KERN_INVALID_OBJECT); | |
3656 | } | |
55e303ae | 3657 | |
91447636 A |
3658 | /* JMM - drop reference on the pager here? */ |
3659 | ||
3660 | /* create an extra reference for the object */ | |
3661 | vm_object_lock(object); | |
55e303ae | 3662 | vm_object_reference_locked(object); |
91447636 A |
3663 | named_entry->backing.object = object; |
3664 | named_entry->is_pager = FALSE; | |
55e303ae A |
3665 | named_entry_unlock(named_entry); |
3666 | ||
3667 | /* wait for object (if any) to be ready */ | |
91447636 A |
3668 | if (!named_entry->internal) { |
3669 | while (!object->pager_ready) { | |
3670 | vm_object_wait(object, | |
3671 | VM_OBJECT_EVENT_PAGER_READY, | |
3672 | THREAD_UNINT); | |
3673 | vm_object_lock(object); | |
3674 | } | |
55e303ae A |
3675 | } |
3676 | vm_object_unlock(object); | |
91447636 A |
3677 | |
3678 | } else { | |
3679 | /* This is the case where we are going to operate */ | |
3680 | /* an an already known object. If the object is */ | |
3681 | /* not ready it is internal. An external */ | |
3682 | /* object cannot be mapped until it is ready */ | |
3683 | /* we can therefore avoid the ready check */ | |
3684 | /* in this case. */ | |
3685 | object = named_entry->backing.object; | |
3686 | vm_object_reference(object); | |
3687 | named_entry_unlock(named_entry); | |
55e303ae A |
3688 | } |
3689 | ||
3690 | if (!object->private) { | |
fe8ab488 A |
3691 | if (*upl_size > MAX_UPL_TRANSFER_BYTES) |
3692 | *upl_size = MAX_UPL_TRANSFER_BYTES; | |
55e303ae A |
3693 | if (object->phys_contiguous) { |
3694 | *flags = UPL_PHYS_CONTIG; | |
3695 | } else { | |
3696 | *flags = 0; | |
3697 | } | |
3698 | } else { | |
3699 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
3700 | } | |
3701 | ||
3702 | ret = vm_object_iopl_request(object, | |
3703 | offset, | |
3704 | *upl_size, | |
3705 | upl_ptr, | |
3706 | user_page_list, | |
3707 | page_list_count, | |
3e170ce0 | 3708 | (upl_control_flags_t)(unsigned int)caller_flags); |
55e303ae A |
3709 | vm_object_deallocate(object); |
3710 | return ret; | |
1c79356b | 3711 | } |
3e170ce0 | 3712 | #endif |