]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/vm_user.c | |
60 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |
61 | * | |
62 | * User-exported virtual memory functions. | |
63 | */ | |
1c79356b | 64 | |
b0d623f7 A |
65 | /* |
66 | * There are three implementations of the "XXX_allocate" functionality in | |
67 | * the kernel: mach_vm_allocate (for any task on the platform), vm_allocate | |
68 | * (for a task with the same address space size, especially the current task), | |
69 | * and vm32_vm_allocate (for the specific case of a 32-bit task). vm_allocate | |
70 | * in the kernel should only be used on the kernel_task. vm32_vm_allocate only | |
71 | * makes sense on platforms where a user task can either be 32 or 64, or the kernel | |
72 | * task can be 32 or 64. mach_vm_allocate makes sense everywhere, and is preferred | |
73 | * for new code. | |
74 | * | |
75 | * The entrypoints into the kernel are more complex. All platforms support a | |
76 | * mach_vm_allocate-style API (subsystem 4800) which operates with the largest | |
77 | * size types for the platform. On platforms that only support U32/K32, | |
78 | * subsystem 4800 is all you need. On platforms that support both U32 and U64, | |
79 | * subsystem 3800 is used disambiguate the size of parameters, and they will | |
80 | * always be 32-bit and call into the vm32_vm_allocate APIs. On non-U32/K32 platforms, | |
81 | * the MIG glue should never call into vm_allocate directly, because the calling | |
82 | * task and kernel_task are unlikely to use the same size parameters | |
83 | * | |
84 | * New VM call implementations should be added here and to mach_vm.defs | |
85 | * (subsystem 4800), and use mach_vm_* "wide" types. | |
86 | */ | |
87 | ||
91447636 A |
88 | #include <debug.h> |
89 | ||
1c79356b A |
90 | #include <vm_cpm.h> |
91 | #include <mach/boolean.h> | |
92 | #include <mach/kern_return.h> | |
93 | #include <mach/mach_types.h> /* to get vm_address_t */ | |
94 | #include <mach/memory_object.h> | |
95 | #include <mach/std_types.h> /* to get pointer_t */ | |
91447636 | 96 | #include <mach/upl.h> |
1c79356b A |
97 | #include <mach/vm_attributes.h> |
98 | #include <mach/vm_param.h> | |
99 | #include <mach/vm_statistics.h> | |
1c79356b | 100 | #include <mach/mach_syscalls.h> |
9bccf70c | 101 | |
91447636 A |
102 | #include <mach/host_priv_server.h> |
103 | #include <mach/mach_vm_server.h> | |
91447636 | 104 | #include <mach/vm_map_server.h> |
1c79356b A |
105 | |
106 | #include <kern/host.h> | |
91447636 | 107 | #include <kern/kalloc.h> |
1c79356b A |
108 | #include <kern/task.h> |
109 | #include <kern/misc_protos.h> | |
91447636 | 110 | #include <vm/vm_fault.h> |
1c79356b A |
111 | #include <vm/vm_map.h> |
112 | #include <vm/vm_object.h> | |
113 | #include <vm/vm_page.h> | |
114 | #include <vm/memory_object.h> | |
115 | #include <vm/vm_pageout.h> | |
91447636 | 116 | #include <vm/vm_protos.h> |
1c79356b A |
117 | |
118 | vm_size_t upl_offset_to_pagelist = 0; | |
119 | ||
120 | #if VM_CPM | |
121 | #include <vm/cpm.h> | |
122 | #endif /* VM_CPM */ | |
123 | ||
124 | ipc_port_t dynamic_pager_control_port=NULL; | |
125 | ||
126 | /* | |
91447636 | 127 | * mach_vm_allocate allocates "zero fill" memory in the specfied |
1c79356b A |
128 | * map. |
129 | */ | |
130 | kern_return_t | |
91447636 A |
131 | mach_vm_allocate( |
132 | vm_map_t map, | |
133 | mach_vm_offset_t *addr, | |
134 | mach_vm_size_t size, | |
1c79356b A |
135 | int flags) |
136 | { | |
91447636 A |
137 | vm_map_offset_t map_addr; |
138 | vm_map_size_t map_size; | |
1c79356b | 139 | kern_return_t result; |
2d21ac55 A |
140 | boolean_t anywhere; |
141 | ||
142 | /* filter out any kernel-only flags */ | |
143 | if (flags & ~VM_FLAGS_USER_ALLOCATE) | |
144 | return KERN_INVALID_ARGUMENT; | |
1c79356b A |
145 | |
146 | if (map == VM_MAP_NULL) | |
147 | return(KERN_INVALID_ARGUMENT); | |
148 | if (size == 0) { | |
149 | *addr = 0; | |
150 | return(KERN_SUCCESS); | |
151 | } | |
152 | ||
2d21ac55 | 153 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
154 | if (anywhere) { |
155 | /* | |
156 | * No specific address requested, so start candidate address | |
157 | * search at the minimum address in the map. However, if that | |
158 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
159 | * allocations of PAGEZERO to explicit requests since its | |
160 | * normal use is to catch dereferences of NULL and many | |
161 | * applications also treat pointers with a value of 0 as | |
162 | * special and suddenly having address 0 contain useable | |
163 | * memory would tend to confuse those applications. | |
164 | */ | |
165 | map_addr = vm_map_min(map); | |
166 | if (map_addr == 0) | |
167 | map_addr += PAGE_SIZE; | |
168 | } else | |
169 | map_addr = vm_map_trunc_page(*addr); | |
170 | map_size = vm_map_round_page(size); | |
171 | if (map_size == 0) { | |
172 | return(KERN_INVALID_ARGUMENT); | |
173 | } | |
174 | ||
175 | result = vm_map_enter( | |
176 | map, | |
177 | &map_addr, | |
178 | map_size, | |
179 | (vm_map_offset_t)0, | |
180 | flags, | |
181 | VM_OBJECT_NULL, | |
182 | (vm_object_offset_t)0, | |
183 | FALSE, | |
184 | VM_PROT_DEFAULT, | |
185 | VM_PROT_ALL, | |
186 | VM_INHERIT_DEFAULT); | |
187 | ||
188 | *addr = map_addr; | |
189 | return(result); | |
190 | } | |
191 | ||
192 | /* | |
193 | * vm_allocate | |
194 | * Legacy routine that allocates "zero fill" memory in the specfied | |
195 | * map (which is limited to the same size as the kernel). | |
196 | */ | |
197 | kern_return_t | |
198 | vm_allocate( | |
199 | vm_map_t map, | |
200 | vm_offset_t *addr, | |
201 | vm_size_t size, | |
202 | int flags) | |
203 | { | |
204 | vm_map_offset_t map_addr; | |
205 | vm_map_size_t map_size; | |
206 | kern_return_t result; | |
2d21ac55 A |
207 | boolean_t anywhere; |
208 | ||
209 | /* filter out any kernel-only flags */ | |
210 | if (flags & ~VM_FLAGS_USER_ALLOCATE) | |
211 | return KERN_INVALID_ARGUMENT; | |
91447636 A |
212 | |
213 | if (map == VM_MAP_NULL) | |
214 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 215 | if (size == 0) { |
91447636 A |
216 | *addr = 0; |
217 | return(KERN_SUCCESS); | |
218 | } | |
219 | ||
2d21ac55 | 220 | anywhere = ((VM_FLAGS_ANYWHERE & flags) != 0); |
91447636 A |
221 | if (anywhere) { |
222 | /* | |
223 | * No specific address requested, so start candidate address | |
224 | * search at the minimum address in the map. However, if that | |
225 | * minimum is 0, bump it up by PAGE_SIZE. We want to limit | |
226 | * allocations of PAGEZERO to explicit requests since its | |
227 | * normal use is to catch dereferences of NULL and many | |
228 | * applications also treat pointers with a value of 0 as | |
229 | * special and suddenly having address 0 contain useable | |
230 | * memory would tend to confuse those applications. | |
231 | */ | |
232 | map_addr = vm_map_min(map); | |
233 | if (map_addr == 0) | |
234 | map_addr += PAGE_SIZE; | |
235 | } else | |
236 | map_addr = vm_map_trunc_page(*addr); | |
237 | map_size = vm_map_round_page(size); | |
238 | if (map_size == 0) { | |
1c79356b A |
239 | return(KERN_INVALID_ARGUMENT); |
240 | } | |
241 | ||
242 | result = vm_map_enter( | |
243 | map, | |
91447636 A |
244 | &map_addr, |
245 | map_size, | |
246 | (vm_map_offset_t)0, | |
1c79356b A |
247 | flags, |
248 | VM_OBJECT_NULL, | |
249 | (vm_object_offset_t)0, | |
250 | FALSE, | |
251 | VM_PROT_DEFAULT, | |
252 | VM_PROT_ALL, | |
253 | VM_INHERIT_DEFAULT); | |
254 | ||
91447636 | 255 | *addr = CAST_DOWN(vm_offset_t, map_addr); |
1c79356b A |
256 | return(result); |
257 | } | |
258 | ||
259 | /* | |
91447636 A |
260 | * mach_vm_deallocate - |
261 | * deallocates the specified range of addresses in the | |
1c79356b A |
262 | * specified address map. |
263 | */ | |
264 | kern_return_t | |
91447636 A |
265 | mach_vm_deallocate( |
266 | vm_map_t map, | |
267 | mach_vm_offset_t start, | |
268 | mach_vm_size_t size) | |
269 | { | |
270 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
271 | return(KERN_INVALID_ARGUMENT); | |
272 | ||
273 | if (size == (mach_vm_offset_t) 0) | |
274 | return(KERN_SUCCESS); | |
275 | ||
276 | return(vm_map_remove(map, vm_map_trunc_page(start), | |
277 | vm_map_round_page(start+size), VM_MAP_NO_FLAGS)); | |
278 | } | |
279 | ||
280 | /* | |
281 | * vm_deallocate - | |
282 | * deallocates the specified range of addresses in the | |
283 | * specified address map (limited to addresses the same | |
284 | * size as the kernel). | |
285 | */ | |
286 | kern_return_t | |
1c79356b A |
287 | vm_deallocate( |
288 | register vm_map_t map, | |
289 | vm_offset_t start, | |
290 | vm_size_t size) | |
291 | { | |
91447636 | 292 | if ((map == VM_MAP_NULL) || (start + size < start)) |
1c79356b A |
293 | return(KERN_INVALID_ARGUMENT); |
294 | ||
295 | if (size == (vm_offset_t) 0) | |
296 | return(KERN_SUCCESS); | |
297 | ||
91447636 A |
298 | return(vm_map_remove(map, vm_map_trunc_page(start), |
299 | vm_map_round_page(start+size), VM_MAP_NO_FLAGS)); | |
1c79356b A |
300 | } |
301 | ||
302 | /* | |
91447636 A |
303 | * mach_vm_inherit - |
304 | * Sets the inheritance of the specified range in the | |
1c79356b A |
305 | * specified map. |
306 | */ | |
307 | kern_return_t | |
91447636 A |
308 | mach_vm_inherit( |
309 | vm_map_t map, | |
310 | mach_vm_offset_t start, | |
311 | mach_vm_size_t size, | |
312 | vm_inherit_t new_inheritance) | |
313 | { | |
314 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
315 | (new_inheritance > VM_INHERIT_LAST_VALID)) | |
316 | return(KERN_INVALID_ARGUMENT); | |
317 | ||
318 | if (size == 0) | |
319 | return KERN_SUCCESS; | |
320 | ||
321 | return(vm_map_inherit(map, | |
322 | vm_map_trunc_page(start), | |
323 | vm_map_round_page(start+size), | |
324 | new_inheritance)); | |
325 | } | |
326 | ||
327 | /* | |
328 | * vm_inherit - | |
329 | * Sets the inheritance of the specified range in the | |
330 | * specified map (range limited to addresses | |
331 | */ | |
332 | kern_return_t | |
1c79356b A |
333 | vm_inherit( |
334 | register vm_map_t map, | |
335 | vm_offset_t start, | |
336 | vm_size_t size, | |
337 | vm_inherit_t new_inheritance) | |
338 | { | |
91447636 A |
339 | if ((map == VM_MAP_NULL) || (start + size < start) || |
340 | (new_inheritance > VM_INHERIT_LAST_VALID)) | |
1c79356b A |
341 | return(KERN_INVALID_ARGUMENT); |
342 | ||
91447636 A |
343 | if (size == 0) |
344 | return KERN_SUCCESS; | |
345 | ||
1c79356b | 346 | return(vm_map_inherit(map, |
91447636 A |
347 | vm_map_trunc_page(start), |
348 | vm_map_round_page(start+size), | |
1c79356b A |
349 | new_inheritance)); |
350 | } | |
351 | ||
352 | /* | |
91447636 A |
353 | * mach_vm_protect - |
354 | * Sets the protection of the specified range in the | |
1c79356b A |
355 | * specified map. |
356 | */ | |
357 | ||
91447636 A |
358 | kern_return_t |
359 | mach_vm_protect( | |
360 | vm_map_t map, | |
361 | mach_vm_offset_t start, | |
362 | mach_vm_size_t size, | |
363 | boolean_t set_maximum, | |
364 | vm_prot_t new_protection) | |
365 | { | |
366 | if ((map == VM_MAP_NULL) || (start + size < start) || | |
367 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) | |
368 | return(KERN_INVALID_ARGUMENT); | |
369 | ||
370 | if (size == 0) | |
371 | return KERN_SUCCESS; | |
372 | ||
373 | return(vm_map_protect(map, | |
374 | vm_map_trunc_page(start), | |
375 | vm_map_round_page(start+size), | |
376 | new_protection, | |
377 | set_maximum)); | |
378 | } | |
379 | ||
380 | /* | |
381 | * vm_protect - | |
382 | * Sets the protection of the specified range in the | |
383 | * specified map. Addressability of the range limited | |
384 | * to the same size as the kernel. | |
385 | */ | |
386 | ||
1c79356b A |
387 | kern_return_t |
388 | vm_protect( | |
91447636 | 389 | vm_map_t map, |
1c79356b A |
390 | vm_offset_t start, |
391 | vm_size_t size, | |
392 | boolean_t set_maximum, | |
393 | vm_prot_t new_protection) | |
394 | { | |
91447636 A |
395 | if ((map == VM_MAP_NULL) || (start + size < start) || |
396 | (new_protection & ~(VM_PROT_ALL | VM_PROT_COPY))) | |
1c79356b A |
397 | return(KERN_INVALID_ARGUMENT); |
398 | ||
91447636 A |
399 | if (size == 0) |
400 | return KERN_SUCCESS; | |
401 | ||
1c79356b | 402 | return(vm_map_protect(map, |
91447636 A |
403 | vm_map_trunc_page(start), |
404 | vm_map_round_page(start+size), | |
1c79356b A |
405 | new_protection, |
406 | set_maximum)); | |
407 | } | |
408 | ||
409 | /* | |
91447636 | 410 | * mach_vm_machine_attributes - |
1c79356b A |
411 | * Handle machine-specific attributes for a mapping, such |
412 | * as cachability, migrability, etc. | |
413 | */ | |
414 | kern_return_t | |
91447636 A |
415 | mach_vm_machine_attribute( |
416 | vm_map_t map, | |
417 | mach_vm_address_t addr, | |
418 | mach_vm_size_t size, | |
419 | vm_machine_attribute_t attribute, | |
420 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
421 | { | |
422 | if ((map == VM_MAP_NULL) || (addr + size < addr)) | |
423 | return(KERN_INVALID_ARGUMENT); | |
424 | ||
425 | if (size == 0) | |
426 | return KERN_SUCCESS; | |
427 | ||
428 | return vm_map_machine_attribute(map, | |
429 | vm_map_trunc_page(addr), | |
430 | vm_map_round_page(addr+size), | |
431 | attribute, | |
432 | value); | |
433 | } | |
434 | ||
435 | /* | |
436 | * vm_machine_attribute - | |
437 | * Handle machine-specific attributes for a mapping, such | |
438 | * as cachability, migrability, etc. Limited addressability | |
439 | * (same range limits as for the native kernel map). | |
440 | */ | |
441 | kern_return_t | |
1c79356b A |
442 | vm_machine_attribute( |
443 | vm_map_t map, | |
91447636 | 444 | vm_address_t addr, |
1c79356b A |
445 | vm_size_t size, |
446 | vm_machine_attribute_t attribute, | |
447 | vm_machine_attribute_val_t* value) /* IN/OUT */ | |
448 | { | |
91447636 A |
449 | if ((map == VM_MAP_NULL) || (addr + size < addr)) |
450 | return(KERN_INVALID_ARGUMENT); | |
451 | ||
452 | if (size == 0) | |
453 | return KERN_SUCCESS; | |
454 | ||
455 | return vm_map_machine_attribute(map, | |
456 | vm_map_trunc_page(addr), | |
457 | vm_map_round_page(addr+size), | |
458 | attribute, | |
459 | value); | |
460 | } | |
461 | ||
462 | /* | |
463 | * mach_vm_read - | |
464 | * Read/copy a range from one address space and return it to the caller. | |
465 | * | |
466 | * It is assumed that the address for the returned memory is selected by | |
467 | * the IPC implementation as part of receiving the reply to this call. | |
468 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
469 | * that gets returned. | |
470 | * | |
471 | * JMM - because of mach_msg_type_number_t, this call is limited to a | |
472 | * single 4GB region at this time. | |
473 | * | |
474 | */ | |
475 | kern_return_t | |
476 | mach_vm_read( | |
477 | vm_map_t map, | |
478 | mach_vm_address_t addr, | |
479 | mach_vm_size_t size, | |
480 | pointer_t *data, | |
481 | mach_msg_type_number_t *data_size) | |
482 | { | |
483 | kern_return_t error; | |
484 | vm_map_copy_t ipc_address; | |
485 | ||
1c79356b A |
486 | if (map == VM_MAP_NULL) |
487 | return(KERN_INVALID_ARGUMENT); | |
488 | ||
b0d623f7 A |
489 | if ((mach_msg_type_number_t) size != size) |
490 | return KERN_INVALID_ARGUMENT; | |
91447636 A |
491 | |
492 | error = vm_map_copyin(map, | |
493 | (vm_map_address_t)addr, | |
494 | (vm_map_size_t)size, | |
495 | FALSE, /* src_destroy */ | |
496 | &ipc_address); | |
497 | ||
498 | if (KERN_SUCCESS == error) { | |
499 | *data = (pointer_t) ipc_address; | |
b0d623f7 A |
500 | *data_size = (mach_msg_type_number_t) size; |
501 | assert(*data_size == size); | |
91447636 A |
502 | } |
503 | return(error); | |
1c79356b A |
504 | } |
505 | ||
91447636 A |
506 | /* |
507 | * vm_read - | |
508 | * Read/copy a range from one address space and return it to the caller. | |
509 | * Limited addressability (same range limits as for the native kernel map). | |
510 | * | |
511 | * It is assumed that the address for the returned memory is selected by | |
512 | * the IPC implementation as part of receiving the reply to this call. | |
513 | * If IPC isn't used, the caller must deal with the vm_map_copy_t object | |
514 | * that gets returned. | |
515 | */ | |
1c79356b A |
516 | kern_return_t |
517 | vm_read( | |
518 | vm_map_t map, | |
91447636 | 519 | vm_address_t addr, |
1c79356b A |
520 | vm_size_t size, |
521 | pointer_t *data, | |
522 | mach_msg_type_number_t *data_size) | |
523 | { | |
524 | kern_return_t error; | |
525 | vm_map_copy_t ipc_address; | |
526 | ||
527 | if (map == VM_MAP_NULL) | |
528 | return(KERN_INVALID_ARGUMENT); | |
529 | ||
b0d623f7 A |
530 | if (size > (unsigned)(mach_msg_type_number_t) -1) { |
531 | /* | |
532 | * The kernel could handle a 64-bit "size" value, but | |
533 | * it could not return the size of the data in "*data_size" | |
534 | * without overflowing. | |
535 | * Let's reject this "size" as invalid. | |
536 | */ | |
537 | return KERN_INVALID_ARGUMENT; | |
538 | } | |
539 | ||
91447636 A |
540 | error = vm_map_copyin(map, |
541 | (vm_map_address_t)addr, | |
542 | (vm_map_size_t)size, | |
543 | FALSE, /* src_destroy */ | |
544 | &ipc_address); | |
545 | ||
546 | if (KERN_SUCCESS == error) { | |
1c79356b | 547 | *data = (pointer_t) ipc_address; |
b0d623f7 A |
548 | *data_size = (mach_msg_type_number_t) size; |
549 | assert(*data_size == size); | |
1c79356b A |
550 | } |
551 | return(error); | |
552 | } | |
553 | ||
91447636 A |
554 | /* |
555 | * mach_vm_read_list - | |
556 | * Read/copy a list of address ranges from specified map. | |
557 | * | |
558 | * MIG does not know how to deal with a returned array of | |
559 | * vm_map_copy_t structures, so we have to do the copyout | |
560 | * manually here. | |
561 | */ | |
562 | kern_return_t | |
563 | mach_vm_read_list( | |
564 | vm_map_t map, | |
565 | mach_vm_read_entry_t data_list, | |
566 | natural_t count) | |
567 | { | |
568 | mach_msg_type_number_t i; | |
569 | kern_return_t error; | |
570 | vm_map_copy_t copy; | |
571 | ||
8ad349bb A |
572 | if (map == VM_MAP_NULL || |
573 | count > VM_MAP_ENTRY_MAX) | |
91447636 A |
574 | return(KERN_INVALID_ARGUMENT); |
575 | ||
576 | error = KERN_SUCCESS; | |
577 | for(i=0; i<count; i++) { | |
578 | vm_map_address_t map_addr; | |
579 | vm_map_size_t map_size; | |
580 | ||
581 | map_addr = (vm_map_address_t)(data_list[i].address); | |
582 | map_size = (vm_map_size_t)(data_list[i].size); | |
583 | ||
584 | if(map_size != 0) { | |
585 | error = vm_map_copyin(map, | |
586 | map_addr, | |
587 | map_size, | |
588 | FALSE, /* src_destroy */ | |
589 | ©); | |
590 | if (KERN_SUCCESS == error) { | |
591 | error = vm_map_copyout( | |
592 | current_task()->map, | |
593 | &map_addr, | |
594 | copy); | |
595 | if (KERN_SUCCESS == error) { | |
596 | data_list[i].address = map_addr; | |
597 | continue; | |
598 | } | |
599 | vm_map_copy_discard(copy); | |
600 | } | |
601 | } | |
602 | data_list[i].address = (mach_vm_address_t)0; | |
603 | data_list[i].size = (mach_vm_size_t)0; | |
604 | } | |
605 | return(error); | |
606 | } | |
607 | ||
608 | /* | |
609 | * vm_read_list - | |
610 | * Read/copy a list of address ranges from specified map. | |
611 | * | |
612 | * MIG does not know how to deal with a returned array of | |
613 | * vm_map_copy_t structures, so we have to do the copyout | |
614 | * manually here. | |
615 | * | |
616 | * The source and destination ranges are limited to those | |
617 | * that can be described with a vm_address_t (i.e. same | |
618 | * size map as the kernel). | |
619 | * | |
620 | * JMM - If the result of the copyout is an address range | |
621 | * that cannot be described with a vm_address_t (i.e. the | |
622 | * caller had a larger address space but used this call | |
623 | * anyway), it will result in a truncated address being | |
624 | * returned (and a likely confused caller). | |
625 | */ | |
626 | ||
1c79356b A |
627 | kern_return_t |
628 | vm_read_list( | |
629 | vm_map_t map, | |
91447636 A |
630 | vm_read_entry_t data_list, |
631 | natural_t count) | |
1c79356b A |
632 | { |
633 | mach_msg_type_number_t i; | |
634 | kern_return_t error; | |
91447636 | 635 | vm_map_copy_t copy; |
1c79356b | 636 | |
8ad349bb A |
637 | if (map == VM_MAP_NULL || |
638 | count > VM_MAP_ENTRY_MAX) | |
1c79356b A |
639 | return(KERN_INVALID_ARGUMENT); |
640 | ||
91447636 | 641 | error = KERN_SUCCESS; |
1c79356b | 642 | for(i=0; i<count; i++) { |
91447636 A |
643 | vm_map_address_t map_addr; |
644 | vm_map_size_t map_size; | |
645 | ||
646 | map_addr = (vm_map_address_t)(data_list[i].address); | |
647 | map_size = (vm_map_size_t)(data_list[i].size); | |
648 | ||
649 | if(map_size != 0) { | |
650 | error = vm_map_copyin(map, | |
651 | map_addr, | |
652 | map_size, | |
653 | FALSE, /* src_destroy */ | |
654 | ©); | |
655 | if (KERN_SUCCESS == error) { | |
656 | error = vm_map_copyout(current_task()->map, | |
657 | &map_addr, | |
658 | copy); | |
659 | if (KERN_SUCCESS == error) { | |
660 | data_list[i].address = | |
661 | CAST_DOWN(vm_offset_t, map_addr); | |
662 | continue; | |
663 | } | |
664 | vm_map_copy_discard(copy); | |
1c79356b A |
665 | } |
666 | } | |
91447636 A |
667 | data_list[i].address = (mach_vm_address_t)0; |
668 | data_list[i].size = (mach_vm_size_t)0; | |
1c79356b A |
669 | } |
670 | return(error); | |
671 | } | |
672 | ||
673 | /* | |
91447636 A |
674 | * mach_vm_read_overwrite - |
675 | * Overwrite a range of the current map with data from the specified | |
676 | * map/address range. | |
677 | * | |
678 | * In making an assumption that the current thread is local, it is | |
679 | * no longer cluster-safe without a fully supportive local proxy | |
680 | * thread/task (but we don't support cluster's anymore so this is moot). | |
1c79356b A |
681 | */ |
682 | ||
1c79356b | 683 | kern_return_t |
91447636 A |
684 | mach_vm_read_overwrite( |
685 | vm_map_t map, | |
686 | mach_vm_address_t address, | |
687 | mach_vm_size_t size, | |
688 | mach_vm_address_t data, | |
689 | mach_vm_size_t *data_size) | |
690 | { | |
691 | kern_return_t error; | |
1c79356b A |
692 | vm_map_copy_t copy; |
693 | ||
694 | if (map == VM_MAP_NULL) | |
695 | return(KERN_INVALID_ARGUMENT); | |
696 | ||
91447636 A |
697 | error = vm_map_copyin(map, (vm_map_address_t)address, |
698 | (vm_map_size_t)size, FALSE, ©); | |
699 | ||
700 | if (KERN_SUCCESS == error) { | |
701 | error = vm_map_copy_overwrite(current_thread()->map, | |
702 | (vm_map_address_t)data, | |
703 | copy, FALSE); | |
704 | if (KERN_SUCCESS == error) { | |
705 | *data_size = size; | |
706 | return error; | |
1c79356b | 707 | } |
91447636 | 708 | vm_map_copy_discard(copy); |
1c79356b | 709 | } |
91447636 A |
710 | return(error); |
711 | } | |
712 | ||
713 | /* | |
714 | * vm_read_overwrite - | |
715 | * Overwrite a range of the current map with data from the specified | |
716 | * map/address range. | |
717 | * | |
718 | * This routine adds the additional limitation that the source and | |
719 | * destination ranges must be describable with vm_address_t values | |
720 | * (i.e. the same size address spaces as the kernel, or at least the | |
721 | * the ranges are in that first portion of the respective address | |
722 | * spaces). | |
723 | */ | |
724 | ||
725 | kern_return_t | |
726 | vm_read_overwrite( | |
727 | vm_map_t map, | |
728 | vm_address_t address, | |
729 | vm_size_t size, | |
730 | vm_address_t data, | |
731 | vm_size_t *data_size) | |
732 | { | |
733 | kern_return_t error; | |
734 | vm_map_copy_t copy; | |
735 | ||
736 | if (map == VM_MAP_NULL) | |
737 | return(KERN_INVALID_ARGUMENT); | |
738 | ||
739 | error = vm_map_copyin(map, (vm_map_address_t)address, | |
740 | (vm_map_size_t)size, FALSE, ©); | |
741 | ||
742 | if (KERN_SUCCESS == error) { | |
743 | error = vm_map_copy_overwrite(current_thread()->map, | |
744 | (vm_map_address_t)data, | |
745 | copy, FALSE); | |
746 | if (KERN_SUCCESS == error) { | |
747 | *data_size = size; | |
748 | return error; | |
1c79356b | 749 | } |
91447636 | 750 | vm_map_copy_discard(copy); |
1c79356b | 751 | } |
1c79356b A |
752 | return(error); |
753 | } | |
754 | ||
755 | ||
91447636 A |
756 | /* |
757 | * mach_vm_write - | |
758 | * Overwrite the specified address range with the data provided | |
759 | * (from the current map). | |
760 | */ | |
761 | kern_return_t | |
762 | mach_vm_write( | |
763 | vm_map_t map, | |
764 | mach_vm_address_t address, | |
765 | pointer_t data, | |
766 | __unused mach_msg_type_number_t size) | |
767 | { | |
768 | if (map == VM_MAP_NULL) | |
769 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 770 | |
91447636 A |
771 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, |
772 | (vm_map_copy_t) data, FALSE /* interruptible XXX */); | |
773 | } | |
1c79356b | 774 | |
91447636 A |
775 | /* |
776 | * vm_write - | |
777 | * Overwrite the specified address range with the data provided | |
778 | * (from the current map). | |
779 | * | |
780 | * The addressability of the range of addresses to overwrite is | |
781 | * limited bu the use of a vm_address_t (same size as kernel map). | |
782 | * Either the target map is also small, or the range is in the | |
783 | * low addresses within it. | |
784 | */ | |
1c79356b A |
785 | kern_return_t |
786 | vm_write( | |
91447636 A |
787 | vm_map_t map, |
788 | vm_address_t address, | |
789 | pointer_t data, | |
790 | __unused mach_msg_type_number_t size) | |
791 | { | |
792 | if (map == VM_MAP_NULL) | |
793 | return KERN_INVALID_ARGUMENT; | |
794 | ||
795 | return vm_map_copy_overwrite(map, (vm_map_address_t)address, | |
796 | (vm_map_copy_t) data, FALSE /* interruptible XXX */); | |
797 | } | |
798 | ||
799 | /* | |
800 | * mach_vm_copy - | |
801 | * Overwrite one range of the specified map with the contents of | |
802 | * another range within that same map (i.e. both address ranges | |
803 | * are "over there"). | |
804 | */ | |
805 | kern_return_t | |
806 | mach_vm_copy( | |
1c79356b | 807 | vm_map_t map, |
91447636 A |
808 | mach_vm_address_t source_address, |
809 | mach_vm_size_t size, | |
810 | mach_vm_address_t dest_address) | |
1c79356b | 811 | { |
91447636 A |
812 | vm_map_copy_t copy; |
813 | kern_return_t kr; | |
814 | ||
1c79356b A |
815 | if (map == VM_MAP_NULL) |
816 | return KERN_INVALID_ARGUMENT; | |
817 | ||
91447636 A |
818 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
819 | (vm_map_size_t)size, FALSE, ©); | |
820 | ||
821 | if (KERN_SUCCESS == kr) { | |
822 | kr = vm_map_copy_overwrite(map, | |
823 | (vm_map_address_t)dest_address, | |
824 | copy, FALSE /* interruptible XXX */); | |
825 | ||
826 | if (KERN_SUCCESS != kr) | |
827 | vm_map_copy_discard(copy); | |
828 | } | |
829 | return kr; | |
1c79356b A |
830 | } |
831 | ||
832 | kern_return_t | |
833 | vm_copy( | |
834 | vm_map_t map, | |
835 | vm_address_t source_address, | |
836 | vm_size_t size, | |
837 | vm_address_t dest_address) | |
838 | { | |
839 | vm_map_copy_t copy; | |
840 | kern_return_t kr; | |
841 | ||
842 | if (map == VM_MAP_NULL) | |
843 | return KERN_INVALID_ARGUMENT; | |
844 | ||
91447636 A |
845 | kr = vm_map_copyin(map, (vm_map_address_t)source_address, |
846 | (vm_map_size_t)size, FALSE, ©); | |
1c79356b | 847 | |
91447636 A |
848 | if (KERN_SUCCESS == kr) { |
849 | kr = vm_map_copy_overwrite(map, | |
850 | (vm_map_address_t)dest_address, | |
851 | copy, FALSE /* interruptible XXX */); | |
1c79356b | 852 | |
91447636 A |
853 | if (KERN_SUCCESS != kr) |
854 | vm_map_copy_discard(copy); | |
855 | } | |
856 | return kr; | |
1c79356b A |
857 | } |
858 | ||
859 | /* | |
91447636 A |
860 | * mach_vm_map - |
861 | * Map some range of an object into an address space. | |
862 | * | |
863 | * The object can be one of several types of objects: | |
864 | * NULL - anonymous memory | |
865 | * a named entry - a range within another address space | |
866 | * or a range within a memory object | |
867 | * a whole memory object | |
868 | * | |
1c79356b A |
869 | */ |
870 | kern_return_t | |
91447636 | 871 | mach_vm_map( |
1c79356b | 872 | vm_map_t target_map, |
91447636 A |
873 | mach_vm_offset_t *address, |
874 | mach_vm_size_t initial_size, | |
875 | mach_vm_offset_t mask, | |
1c79356b A |
876 | int flags, |
877 | ipc_port_t port, | |
878 | vm_object_offset_t offset, | |
879 | boolean_t copy, | |
880 | vm_prot_t cur_protection, | |
881 | vm_prot_t max_protection, | |
882 | vm_inherit_t inheritance) | |
883 | { | |
2d21ac55 A |
884 | /* filter out any kernel-only flags */ |
885 | if (flags & ~VM_FLAGS_USER_MAP) | |
886 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 887 | |
2d21ac55 A |
888 | return vm_map_enter_mem_object(target_map, |
889 | address, | |
890 | initial_size, | |
891 | mask, | |
892 | flags, | |
893 | port, | |
894 | offset, | |
895 | copy, | |
896 | cur_protection, | |
897 | max_protection, | |
898 | inheritance); | |
1c79356b A |
899 | } |
900 | ||
91447636 A |
901 | |
902 | /* legacy interface */ | |
903 | kern_return_t | |
904 | vm_map_64( | |
905 | vm_map_t target_map, | |
906 | vm_offset_t *address, | |
907 | vm_size_t size, | |
908 | vm_offset_t mask, | |
909 | int flags, | |
910 | ipc_port_t port, | |
911 | vm_object_offset_t offset, | |
912 | boolean_t copy, | |
913 | vm_prot_t cur_protection, | |
914 | vm_prot_t max_protection, | |
915 | vm_inherit_t inheritance) | |
916 | { | |
917 | mach_vm_address_t map_addr; | |
918 | mach_vm_size_t map_size; | |
919 | mach_vm_offset_t map_mask; | |
920 | kern_return_t kr; | |
921 | ||
922 | map_addr = (mach_vm_address_t)*address; | |
923 | map_size = (mach_vm_size_t)size; | |
924 | map_mask = (mach_vm_offset_t)mask; | |
925 | ||
926 | kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, | |
927 | port, offset, copy, | |
928 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 929 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
930 | return kr; |
931 | } | |
932 | ||
1c79356b | 933 | /* temporary, until world build */ |
55e303ae | 934 | kern_return_t |
1c79356b A |
935 | vm_map( |
936 | vm_map_t target_map, | |
937 | vm_offset_t *address, | |
938 | vm_size_t size, | |
939 | vm_offset_t mask, | |
940 | int flags, | |
941 | ipc_port_t port, | |
942 | vm_offset_t offset, | |
943 | boolean_t copy, | |
944 | vm_prot_t cur_protection, | |
945 | vm_prot_t max_protection, | |
946 | vm_inherit_t inheritance) | |
947 | { | |
91447636 A |
948 | mach_vm_address_t map_addr; |
949 | mach_vm_size_t map_size; | |
950 | mach_vm_offset_t map_mask; | |
951 | vm_object_offset_t obj_offset; | |
952 | kern_return_t kr; | |
953 | ||
954 | map_addr = (mach_vm_address_t)*address; | |
955 | map_size = (mach_vm_size_t)size; | |
956 | map_mask = (mach_vm_offset_t)mask; | |
957 | obj_offset = (vm_object_offset_t)offset; | |
958 | ||
959 | kr = mach_vm_map(target_map, &map_addr, map_size, map_mask, flags, | |
960 | port, obj_offset, copy, | |
961 | cur_protection, max_protection, inheritance); | |
b0d623f7 | 962 | *address = CAST_DOWN(vm_offset_t, map_addr); |
91447636 A |
963 | return kr; |
964 | } | |
965 | ||
966 | /* | |
967 | * mach_vm_remap - | |
968 | * Remap a range of memory from one task into another, | |
969 | * to another address range within the same task, or | |
970 | * over top of itself (with altered permissions and/or | |
971 | * as an in-place copy of itself). | |
972 | */ | |
973 | ||
974 | kern_return_t | |
975 | mach_vm_remap( | |
976 | vm_map_t target_map, | |
977 | mach_vm_offset_t *address, | |
978 | mach_vm_size_t size, | |
979 | mach_vm_offset_t mask, | |
060df5ea | 980 | int flags, |
91447636 A |
981 | vm_map_t src_map, |
982 | mach_vm_offset_t memory_address, | |
983 | boolean_t copy, | |
984 | vm_prot_t *cur_protection, | |
985 | vm_prot_t *max_protection, | |
986 | vm_inherit_t inheritance) | |
987 | { | |
988 | vm_map_offset_t map_addr; | |
989 | kern_return_t kr; | |
990 | ||
991 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) | |
992 | return KERN_INVALID_ARGUMENT; | |
993 | ||
060df5ea A |
994 | /* filter out any kernel-only flags */ |
995 | if (flags & ~VM_FLAGS_USER_REMAP) | |
996 | return KERN_INVALID_ARGUMENT; | |
997 | ||
91447636 A |
998 | map_addr = (vm_map_offset_t)*address; |
999 | ||
1000 | kr = vm_map_remap(target_map, | |
1001 | &map_addr, | |
1002 | size, | |
1003 | mask, | |
060df5ea | 1004 | flags, |
91447636 A |
1005 | src_map, |
1006 | memory_address, | |
1007 | copy, | |
1008 | cur_protection, | |
1009 | max_protection, | |
1010 | inheritance); | |
1011 | *address = map_addr; | |
1012 | return kr; | |
1c79356b A |
1013 | } |
1014 | ||
91447636 A |
1015 | /* |
1016 | * vm_remap - | |
1017 | * Remap a range of memory from one task into another, | |
1018 | * to another address range within the same task, or | |
1019 | * over top of itself (with altered permissions and/or | |
1020 | * as an in-place copy of itself). | |
1021 | * | |
1022 | * The addressability of the source and target address | |
1023 | * range is limited by the size of vm_address_t (in the | |
1024 | * kernel context). | |
1025 | */ | |
1026 | kern_return_t | |
1027 | vm_remap( | |
1028 | vm_map_t target_map, | |
1029 | vm_offset_t *address, | |
1030 | vm_size_t size, | |
1031 | vm_offset_t mask, | |
060df5ea | 1032 | int flags, |
91447636 A |
1033 | vm_map_t src_map, |
1034 | vm_offset_t memory_address, | |
1035 | boolean_t copy, | |
1036 | vm_prot_t *cur_protection, | |
1037 | vm_prot_t *max_protection, | |
1038 | vm_inherit_t inheritance) | |
1039 | { | |
1040 | vm_map_offset_t map_addr; | |
1041 | kern_return_t kr; | |
1042 | ||
1043 | if (VM_MAP_NULL == target_map || VM_MAP_NULL == src_map) | |
1044 | return KERN_INVALID_ARGUMENT; | |
1045 | ||
060df5ea A |
1046 | /* filter out any kernel-only flags */ |
1047 | if (flags & ~VM_FLAGS_USER_REMAP) | |
1048 | return KERN_INVALID_ARGUMENT; | |
1049 | ||
91447636 A |
1050 | map_addr = (vm_map_offset_t)*address; |
1051 | ||
1052 | kr = vm_map_remap(target_map, | |
1053 | &map_addr, | |
1054 | size, | |
1055 | mask, | |
060df5ea | 1056 | flags, |
91447636 A |
1057 | src_map, |
1058 | memory_address, | |
1059 | copy, | |
1060 | cur_protection, | |
1061 | max_protection, | |
1062 | inheritance); | |
1063 | *address = CAST_DOWN(vm_offset_t, map_addr); | |
1064 | return kr; | |
1065 | } | |
1c79356b A |
1066 | |
1067 | /* | |
91447636 A |
1068 | * NOTE: these routine (and this file) will no longer require mach_host_server.h |
1069 | * when mach_vm_wire and vm_wire are changed to use ledgers. | |
1c79356b A |
1070 | */ |
1071 | #include <mach/mach_host_server.h> | |
1072 | /* | |
91447636 A |
1073 | * mach_vm_wire |
1074 | * Specify that the range of the virtual address space | |
1075 | * of the target task must not cause page faults for | |
1076 | * the indicated accesses. | |
1077 | * | |
1078 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1079 | */ | |
1080 | kern_return_t | |
1081 | mach_vm_wire( | |
1082 | host_priv_t host_priv, | |
1083 | vm_map_t map, | |
1084 | mach_vm_offset_t start, | |
1085 | mach_vm_size_t size, | |
1086 | vm_prot_t access) | |
1087 | { | |
1088 | kern_return_t rc; | |
1089 | ||
1090 | if (host_priv == HOST_PRIV_NULL) | |
1091 | return KERN_INVALID_HOST; | |
1092 | ||
1093 | assert(host_priv == &realhost); | |
1094 | ||
1095 | if (map == VM_MAP_NULL) | |
1096 | return KERN_INVALID_TASK; | |
1097 | ||
b0d623f7 | 1098 | if (access & ~VM_PROT_ALL || (start + size < start)) |
91447636 A |
1099 | return KERN_INVALID_ARGUMENT; |
1100 | ||
1101 | if (access != VM_PROT_NONE) { | |
1102 | rc = vm_map_wire(map, vm_map_trunc_page(start), | |
1103 | vm_map_round_page(start+size), access, TRUE); | |
1104 | } else { | |
1105 | rc = vm_map_unwire(map, vm_map_trunc_page(start), | |
1106 | vm_map_round_page(start+size), TRUE); | |
1107 | } | |
1108 | return rc; | |
1109 | } | |
1110 | ||
1111 | /* | |
1112 | * vm_wire - | |
1c79356b A |
1113 | * Specify that the range of the virtual address space |
1114 | * of the target task must not cause page faults for | |
1115 | * the indicated accesses. | |
1116 | * | |
1117 | * [ To unwire the pages, specify VM_PROT_NONE. ] | |
1118 | */ | |
1119 | kern_return_t | |
1120 | vm_wire( | |
1121 | host_priv_t host_priv, | |
1122 | register vm_map_t map, | |
1123 | vm_offset_t start, | |
1124 | vm_size_t size, | |
1125 | vm_prot_t access) | |
1126 | { | |
1127 | kern_return_t rc; | |
1128 | ||
1129 | if (host_priv == HOST_PRIV_NULL) | |
1130 | return KERN_INVALID_HOST; | |
1131 | ||
1132 | assert(host_priv == &realhost); | |
1133 | ||
1134 | if (map == VM_MAP_NULL) | |
1135 | return KERN_INVALID_TASK; | |
1136 | ||
91447636 | 1137 | if ((access & ~VM_PROT_ALL) || (start + size < start)) |
1c79356b A |
1138 | return KERN_INVALID_ARGUMENT; |
1139 | ||
91447636 A |
1140 | if (size == 0) { |
1141 | rc = KERN_SUCCESS; | |
1142 | } else if (access != VM_PROT_NONE) { | |
1143 | rc = vm_map_wire(map, vm_map_trunc_page(start), | |
1144 | vm_map_round_page(start+size), access, TRUE); | |
1c79356b | 1145 | } else { |
91447636 A |
1146 | rc = vm_map_unwire(map, vm_map_trunc_page(start), |
1147 | vm_map_round_page(start+size), TRUE); | |
1c79356b A |
1148 | } |
1149 | return rc; | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * vm_msync | |
1154 | * | |
1155 | * Synchronises the memory range specified with its backing store | |
1156 | * image by either flushing or cleaning the contents to the appropriate | |
91447636 A |
1157 | * memory manager. |
1158 | * | |
1159 | * interpretation of sync_flags | |
1160 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1161 | * pages to manager. | |
1162 | * | |
1163 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1164 | * - discard pages, write dirty or precious | |
1165 | * pages back to memory manager. | |
1166 | * | |
1167 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1168 | * - write dirty or precious pages back to | |
1169 | * the memory manager. | |
1170 | * | |
1171 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there | |
1172 | * is a hole in the region, and we would | |
1173 | * have returned KERN_SUCCESS, return | |
1174 | * KERN_INVALID_ADDRESS instead. | |
1175 | * | |
1176 | * RETURNS | |
1177 | * KERN_INVALID_TASK Bad task parameter | |
1178 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1179 | * KERN_SUCCESS The usual. | |
1180 | * KERN_INVALID_ADDRESS There was a hole in the region. | |
1181 | */ | |
1182 | ||
1183 | kern_return_t | |
1184 | mach_vm_msync( | |
1185 | vm_map_t map, | |
1186 | mach_vm_address_t address, | |
1187 | mach_vm_size_t size, | |
1188 | vm_sync_t sync_flags) | |
1189 | { | |
1190 | ||
1191 | if (map == VM_MAP_NULL) | |
1192 | return(KERN_INVALID_TASK); | |
1193 | ||
1194 | return vm_map_msync(map, (vm_map_address_t)address, | |
1195 | (vm_map_size_t)size, sync_flags); | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * vm_msync | |
1200 | * | |
1201 | * Synchronises the memory range specified with its backing store | |
1202 | * image by either flushing or cleaning the contents to the appropriate | |
1203 | * memory manager. | |
1c79356b A |
1204 | * |
1205 | * interpretation of sync_flags | |
1206 | * VM_SYNC_INVALIDATE - discard pages, only return precious | |
1207 | * pages to manager. | |
1208 | * | |
1209 | * VM_SYNC_INVALIDATE & (VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS) | |
1210 | * - discard pages, write dirty or precious | |
1211 | * pages back to memory manager. | |
1212 | * | |
1213 | * VM_SYNC_SYNCHRONOUS | VM_SYNC_ASYNCHRONOUS | |
1214 | * - write dirty or precious pages back to | |
1215 | * the memory manager. | |
1216 | * | |
91447636 A |
1217 | * VM_SYNC_CONTIGUOUS - does everything normally, but if there |
1218 | * is a hole in the region, and we would | |
1219 | * have returned KERN_SUCCESS, return | |
1220 | * KERN_INVALID_ADDRESS instead. | |
1221 | * | |
1222 | * The addressability of the range is limited to that which can | |
1223 | * be described by a vm_address_t. | |
1c79356b A |
1224 | * |
1225 | * RETURNS | |
1226 | * KERN_INVALID_TASK Bad task parameter | |
1227 | * KERN_INVALID_ARGUMENT both sync and async were specified. | |
1228 | * KERN_SUCCESS The usual. | |
91447636 | 1229 | * KERN_INVALID_ADDRESS There was a hole in the region. |
1c79356b A |
1230 | */ |
1231 | ||
1232 | kern_return_t | |
1233 | vm_msync( | |
1234 | vm_map_t map, | |
1235 | vm_address_t address, | |
1236 | vm_size_t size, | |
1237 | vm_sync_t sync_flags) | |
1238 | { | |
1c79356b | 1239 | |
91447636 A |
1240 | if (map == VM_MAP_NULL) |
1241 | return(KERN_INVALID_TASK); | |
1c79356b | 1242 | |
91447636 A |
1243 | return vm_map_msync(map, (vm_map_address_t)address, |
1244 | (vm_map_size_t)size, sync_flags); | |
1245 | } | |
1c79356b | 1246 | |
91447636 | 1247 | |
6d2010ae A |
1248 | int |
1249 | vm_toggle_entry_reuse(int toggle, int *old_value) | |
1250 | { | |
1251 | vm_map_t map = current_map(); | |
1252 | ||
1253 | if(toggle == VM_TOGGLE_GETVALUE && old_value != NULL){ | |
1254 | *old_value = map->disable_vmentry_reuse; | |
1255 | } else if(toggle == VM_TOGGLE_SET){ | |
1256 | vm_map_lock(map); | |
1257 | map->disable_vmentry_reuse = TRUE; | |
1258 | if (map->first_free == vm_map_to_entry(map)) { | |
1259 | map->highest_entry_end = vm_map_min(map); | |
1260 | } else { | |
1261 | map->highest_entry_end = map->first_free->vme_end; | |
1262 | } | |
1263 | vm_map_unlock(map); | |
1264 | } else if (toggle == VM_TOGGLE_CLEAR){ | |
1265 | vm_map_lock(map); | |
1266 | map->disable_vmentry_reuse = FALSE; | |
1267 | vm_map_unlock(map); | |
1268 | } else | |
1269 | return KERN_INVALID_ARGUMENT; | |
1270 | ||
1271 | return KERN_SUCCESS; | |
1272 | } | |
1273 | ||
91447636 A |
1274 | /* |
1275 | * mach_vm_behavior_set | |
1276 | * | |
1277 | * Sets the paging behavior attribute for the specified range | |
1278 | * in the specified map. | |
1279 | * | |
1280 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1281 | * in [start,start+size) is not a valid allocated memory region. | |
1282 | */ | |
1283 | kern_return_t | |
1284 | mach_vm_behavior_set( | |
1285 | vm_map_t map, | |
1286 | mach_vm_offset_t start, | |
1287 | mach_vm_size_t size, | |
1288 | vm_behavior_t new_behavior) | |
1289 | { | |
1290 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
1291 | return(KERN_INVALID_ARGUMENT); | |
1c79356b A |
1292 | |
1293 | if (size == 0) | |
91447636 | 1294 | return KERN_SUCCESS; |
1c79356b | 1295 | |
91447636 A |
1296 | return(vm_map_behavior_set(map, vm_map_trunc_page(start), |
1297 | vm_map_round_page(start+size), new_behavior)); | |
1298 | } | |
1c79356b | 1299 | |
91447636 A |
1300 | /* |
1301 | * vm_behavior_set | |
1302 | * | |
1303 | * Sets the paging behavior attribute for the specified range | |
1304 | * in the specified map. | |
1305 | * | |
1306 | * This routine will fail with KERN_INVALID_ADDRESS if any address | |
1307 | * in [start,start+size) is not a valid allocated memory region. | |
1308 | * | |
1309 | * This routine is potentially limited in addressibility by the | |
1310 | * use of vm_offset_t (if the map provided is larger than the | |
1311 | * kernel's). | |
1312 | */ | |
1313 | kern_return_t | |
1314 | vm_behavior_set( | |
1315 | vm_map_t map, | |
1316 | vm_offset_t start, | |
1317 | vm_size_t size, | |
1318 | vm_behavior_t new_behavior) | |
1319 | { | |
1320 | if ((map == VM_MAP_NULL) || (start + size < start)) | |
1321 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 1322 | |
91447636 A |
1323 | if (size == 0) |
1324 | return KERN_SUCCESS; | |
1c79356b | 1325 | |
91447636 A |
1326 | return(vm_map_behavior_set(map, vm_map_trunc_page(start), |
1327 | vm_map_round_page(start+size), new_behavior)); | |
1328 | } | |
1c79356b | 1329 | |
91447636 A |
1330 | /* |
1331 | * mach_vm_region: | |
1332 | * | |
1333 | * User call to obtain information about a region in | |
1334 | * a task's address map. Currently, only one flavor is | |
1335 | * supported. | |
1336 | * | |
1337 | * XXX The reserved and behavior fields cannot be filled | |
1338 | * in until the vm merge from the IK is completed, and | |
1339 | * vm_reserve is implemented. | |
1340 | * | |
1341 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1342 | */ | |
1c79356b | 1343 | |
91447636 A |
1344 | kern_return_t |
1345 | mach_vm_region( | |
1346 | vm_map_t map, | |
1347 | mach_vm_offset_t *address, /* IN/OUT */ | |
1348 | mach_vm_size_t *size, /* OUT */ | |
1349 | vm_region_flavor_t flavor, /* IN */ | |
1350 | vm_region_info_t info, /* OUT */ | |
1351 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1352 | mach_port_t *object_name) /* OUT */ | |
1353 | { | |
1354 | vm_map_offset_t map_addr; | |
1355 | vm_map_size_t map_size; | |
1356 | kern_return_t kr; | |
1c79356b | 1357 | |
91447636 A |
1358 | if (VM_MAP_NULL == map) |
1359 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1360 | |
91447636 A |
1361 | map_addr = (vm_map_offset_t)*address; |
1362 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1363 | |
91447636 A |
1364 | /* legacy conversion */ |
1365 | if (VM_REGION_BASIC_INFO == flavor) | |
1366 | flavor = VM_REGION_BASIC_INFO_64; | |
1c79356b | 1367 | |
91447636 A |
1368 | kr = vm_map_region(map, |
1369 | &map_addr, &map_size, | |
1370 | flavor, info, count, | |
1371 | object_name); | |
1c79356b | 1372 | |
91447636 A |
1373 | *address = map_addr; |
1374 | *size = map_size; | |
1375 | return kr; | |
1376 | } | |
1c79356b | 1377 | |
91447636 A |
1378 | /* |
1379 | * vm_region_64 and vm_region: | |
1380 | * | |
1381 | * User call to obtain information about a region in | |
1382 | * a task's address map. Currently, only one flavor is | |
1383 | * supported. | |
1384 | * | |
1385 | * XXX The reserved and behavior fields cannot be filled | |
1386 | * in until the vm merge from the IK is completed, and | |
1387 | * vm_reserve is implemented. | |
1388 | * | |
1389 | * XXX Dependency: syscall_vm_region() also supports only one flavor. | |
1390 | */ | |
1c79356b | 1391 | |
91447636 A |
1392 | kern_return_t |
1393 | vm_region_64( | |
1394 | vm_map_t map, | |
1395 | vm_offset_t *address, /* IN/OUT */ | |
1396 | vm_size_t *size, /* OUT */ | |
1397 | vm_region_flavor_t flavor, /* IN */ | |
1398 | vm_region_info_t info, /* OUT */ | |
1399 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1400 | mach_port_t *object_name) /* OUT */ | |
1401 | { | |
1402 | vm_map_offset_t map_addr; | |
1403 | vm_map_size_t map_size; | |
1404 | kern_return_t kr; | |
1c79356b | 1405 | |
91447636 A |
1406 | if (VM_MAP_NULL == map) |
1407 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1408 | |
91447636 A |
1409 | map_addr = (vm_map_offset_t)*address; |
1410 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1411 | |
91447636 A |
1412 | /* legacy conversion */ |
1413 | if (VM_REGION_BASIC_INFO == flavor) | |
1414 | flavor = VM_REGION_BASIC_INFO_64; | |
1c79356b | 1415 | |
91447636 A |
1416 | kr = vm_map_region(map, |
1417 | &map_addr, &map_size, | |
1418 | flavor, info, count, | |
1419 | object_name); | |
1c79356b | 1420 | |
91447636 A |
1421 | *address = CAST_DOWN(vm_offset_t, map_addr); |
1422 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1423 | |
91447636 A |
1424 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) |
1425 | return KERN_INVALID_ADDRESS; | |
1426 | return kr; | |
1427 | } | |
1c79356b | 1428 | |
91447636 A |
1429 | kern_return_t |
1430 | vm_region( | |
1431 | vm_map_t map, | |
1432 | vm_address_t *address, /* IN/OUT */ | |
1433 | vm_size_t *size, /* OUT */ | |
1434 | vm_region_flavor_t flavor, /* IN */ | |
1435 | vm_region_info_t info, /* OUT */ | |
1436 | mach_msg_type_number_t *count, /* IN/OUT */ | |
1437 | mach_port_t *object_name) /* OUT */ | |
1438 | { | |
1439 | vm_map_address_t map_addr; | |
1440 | vm_map_size_t map_size; | |
1441 | kern_return_t kr; | |
1c79356b | 1442 | |
91447636 A |
1443 | if (VM_MAP_NULL == map) |
1444 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1445 | |
91447636 A |
1446 | map_addr = (vm_map_address_t)*address; |
1447 | map_size = (vm_map_size_t)*size; | |
1c79356b | 1448 | |
91447636 A |
1449 | kr = vm_map_region(map, |
1450 | &map_addr, &map_size, | |
1451 | flavor, info, count, | |
1452 | object_name); | |
1c79356b | 1453 | |
91447636 A |
1454 | *address = CAST_DOWN(vm_address_t, map_addr); |
1455 | *size = CAST_DOWN(vm_size_t, map_size); | |
1c79356b | 1456 | |
91447636 A |
1457 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) |
1458 | return KERN_INVALID_ADDRESS; | |
1459 | return kr; | |
1460 | } | |
1c79356b A |
1461 | |
1462 | /* | |
91447636 A |
1463 | * vm_region_recurse: A form of vm_region which follows the |
1464 | * submaps in a target map | |
1c79356b | 1465 | * |
1c79356b A |
1466 | */ |
1467 | kern_return_t | |
91447636 A |
1468 | mach_vm_region_recurse( |
1469 | vm_map_t map, | |
1470 | mach_vm_address_t *address, | |
1471 | mach_vm_size_t *size, | |
1472 | uint32_t *depth, | |
1473 | vm_region_recurse_info_t info, | |
1474 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1475 | { |
91447636 A |
1476 | vm_map_address_t map_addr; |
1477 | vm_map_size_t map_size; | |
1478 | kern_return_t kr; | |
1c79356b | 1479 | |
91447636 A |
1480 | if (VM_MAP_NULL == map) |
1481 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1482 | |
91447636 A |
1483 | map_addr = (vm_map_address_t)*address; |
1484 | map_size = (vm_map_size_t)*size; | |
1485 | ||
1486 | kr = vm_map_region_recurse_64( | |
1487 | map, | |
1488 | &map_addr, | |
1489 | &map_size, | |
1490 | depth, | |
1491 | (vm_region_submap_info_64_t)info, | |
1492 | infoCnt); | |
1493 | ||
1494 | *address = map_addr; | |
1495 | *size = map_size; | |
1496 | return kr; | |
1c79356b A |
1497 | } |
1498 | ||
1499 | /* | |
91447636 A |
1500 | * vm_region_recurse: A form of vm_region which follows the |
1501 | * submaps in a target map | |
1502 | * | |
1c79356b | 1503 | */ |
91447636 A |
1504 | kern_return_t |
1505 | vm_region_recurse_64( | |
1506 | vm_map_t map, | |
1507 | vm_address_t *address, | |
1508 | vm_size_t *size, | |
1509 | uint32_t *depth, | |
1510 | vm_region_recurse_info_64_t info, | |
1511 | mach_msg_type_number_t *infoCnt) | |
1c79356b | 1512 | { |
91447636 A |
1513 | vm_map_address_t map_addr; |
1514 | vm_map_size_t map_size; | |
1515 | kern_return_t kr; | |
1516 | ||
1517 | if (VM_MAP_NULL == map) | |
1518 | return KERN_INVALID_ARGUMENT; | |
1519 | ||
1520 | map_addr = (vm_map_address_t)*address; | |
1521 | map_size = (vm_map_size_t)*size; | |
1522 | ||
1523 | kr = vm_map_region_recurse_64( | |
1524 | map, | |
1525 | &map_addr, | |
1526 | &map_size, | |
1527 | depth, | |
1528 | (vm_region_submap_info_64_t)info, | |
1529 | infoCnt); | |
1c79356b | 1530 | |
91447636 A |
1531 | *address = CAST_DOWN(vm_address_t, map_addr); |
1532 | *size = CAST_DOWN(vm_size_t, map_size); | |
1533 | ||
1534 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) | |
1535 | return KERN_INVALID_ADDRESS; | |
1536 | return kr; | |
1c79356b A |
1537 | } |
1538 | ||
91447636 A |
1539 | kern_return_t |
1540 | vm_region_recurse( | |
1541 | vm_map_t map, | |
1542 | vm_offset_t *address, /* IN/OUT */ | |
1543 | vm_size_t *size, /* OUT */ | |
1544 | natural_t *depth, /* IN/OUT */ | |
1545 | vm_region_recurse_info_t info32, /* IN/OUT */ | |
1546 | mach_msg_type_number_t *infoCnt) /* IN/OUT */ | |
1547 | { | |
1548 | vm_region_submap_info_data_64_t info64; | |
1549 | vm_region_submap_info_t info; | |
1550 | vm_map_address_t map_addr; | |
1551 | vm_map_size_t map_size; | |
1552 | kern_return_t kr; | |
1553 | ||
1554 | if (VM_MAP_NULL == map || *infoCnt < VM_REGION_SUBMAP_INFO_COUNT) | |
1555 | return KERN_INVALID_ARGUMENT; | |
1556 | ||
1557 | ||
1558 | map_addr = (vm_map_address_t)*address; | |
1559 | map_size = (vm_map_size_t)*size; | |
1560 | info = (vm_region_submap_info_t)info32; | |
1561 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT_64; | |
1562 | ||
1563 | kr = vm_map_region_recurse_64(map, &map_addr,&map_size, | |
1564 | depth, &info64, infoCnt); | |
1565 | ||
1566 | info->protection = info64.protection; | |
1567 | info->max_protection = info64.max_protection; | |
1568 | info->inheritance = info64.inheritance; | |
1569 | info->offset = (uint32_t)info64.offset; /* trouble-maker */ | |
1570 | info->user_tag = info64.user_tag; | |
1571 | info->pages_resident = info64.pages_resident; | |
1572 | info->pages_shared_now_private = info64.pages_shared_now_private; | |
1573 | info->pages_swapped_out = info64.pages_swapped_out; | |
1574 | info->pages_dirtied = info64.pages_dirtied; | |
1575 | info->ref_count = info64.ref_count; | |
1576 | info->shadow_depth = info64.shadow_depth; | |
1577 | info->external_pager = info64.external_pager; | |
1578 | info->share_mode = info64.share_mode; | |
1579 | info->is_submap = info64.is_submap; | |
1580 | info->behavior = info64.behavior; | |
1581 | info->object_id = info64.object_id; | |
1582 | info->user_wired_count = info64.user_wired_count; | |
1583 | ||
1584 | *address = CAST_DOWN(vm_address_t, map_addr); | |
1585 | *size = CAST_DOWN(vm_size_t, map_size); | |
1586 | *infoCnt = VM_REGION_SUBMAP_INFO_COUNT; | |
1587 | ||
1588 | if (KERN_SUCCESS == kr && map_addr + map_size > VM_MAX_ADDRESS) | |
1589 | return KERN_INVALID_ADDRESS; | |
1590 | return kr; | |
1591 | } | |
1592 | ||
2d21ac55 A |
1593 | kern_return_t |
1594 | mach_vm_purgable_control( | |
1595 | vm_map_t map, | |
1596 | mach_vm_offset_t address, | |
1597 | vm_purgable_t control, | |
1598 | int *state) | |
1599 | { | |
1600 | if (VM_MAP_NULL == map) | |
1601 | return KERN_INVALID_ARGUMENT; | |
1602 | ||
1603 | return vm_map_purgable_control(map, | |
1604 | vm_map_trunc_page(address), | |
1605 | control, | |
1606 | state); | |
1607 | } | |
1608 | ||
91447636 A |
1609 | kern_return_t |
1610 | vm_purgable_control( | |
1611 | vm_map_t map, | |
1612 | vm_offset_t address, | |
1613 | vm_purgable_t control, | |
1614 | int *state) | |
1615 | { | |
1616 | if (VM_MAP_NULL == map) | |
1617 | return KERN_INVALID_ARGUMENT; | |
1618 | ||
1619 | return vm_map_purgable_control(map, | |
1620 | vm_map_trunc_page(address), | |
1621 | control, | |
1622 | state); | |
1623 | } | |
1624 | ||
1c79356b A |
1625 | |
1626 | /* | |
1627 | * Ordinarily, the right to allocate CPM is restricted | |
1628 | * to privileged applications (those that can gain access | |
91447636 A |
1629 | * to the host priv port). Set this variable to zero if |
1630 | * you want to let any application allocate CPM. | |
1c79356b A |
1631 | */ |
1632 | unsigned int vm_allocate_cpm_privileged = 0; | |
1633 | ||
1634 | /* | |
1635 | * Allocate memory in the specified map, with the caveat that | |
1636 | * the memory is physically contiguous. This call may fail | |
1637 | * if the system can't find sufficient contiguous memory. | |
1638 | * This call may cause or lead to heart-stopping amounts of | |
1639 | * paging activity. | |
1640 | * | |
1641 | * Memory obtained from this call should be freed in the | |
1642 | * normal way, viz., via vm_deallocate. | |
1643 | */ | |
1644 | kern_return_t | |
1645 | vm_allocate_cpm( | |
1646 | host_priv_t host_priv, | |
91447636 A |
1647 | vm_map_t map, |
1648 | vm_address_t *addr, | |
1649 | vm_size_t size, | |
1c79356b A |
1650 | int flags) |
1651 | { | |
91447636 A |
1652 | vm_map_address_t map_addr; |
1653 | vm_map_size_t map_size; | |
1c79356b | 1654 | kern_return_t kr; |
1c79356b | 1655 | |
91447636 | 1656 | if (vm_allocate_cpm_privileged && HOST_PRIV_NULL == host_priv) |
1c79356b A |
1657 | return KERN_INVALID_HOST; |
1658 | ||
91447636 | 1659 | if (VM_MAP_NULL == map) |
1c79356b | 1660 | return KERN_INVALID_ARGUMENT; |
1c79356b | 1661 | |
91447636 A |
1662 | map_addr = (vm_map_address_t)*addr; |
1663 | map_size = (vm_map_size_t)size; | |
1c79356b | 1664 | |
91447636 A |
1665 | kr = vm_map_enter_cpm(map, |
1666 | &map_addr, | |
1667 | map_size, | |
1668 | flags); | |
1c79356b | 1669 | |
91447636 | 1670 | *addr = CAST_DOWN(vm_address_t, map_addr); |
1c79356b A |
1671 | return kr; |
1672 | } | |
1673 | ||
1674 | ||
91447636 A |
1675 | kern_return_t |
1676 | mach_vm_page_query( | |
1677 | vm_map_t map, | |
1678 | mach_vm_offset_t offset, | |
1679 | int *disposition, | |
1680 | int *ref_count) | |
1681 | { | |
1682 | if (VM_MAP_NULL == map) | |
1683 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1684 | |
b0d623f7 A |
1685 | return vm_map_page_query_internal(map, |
1686 | vm_map_trunc_page(offset), | |
1687 | disposition, ref_count); | |
91447636 | 1688 | } |
1c79356b A |
1689 | |
1690 | kern_return_t | |
91447636 A |
1691 | vm_map_page_query( |
1692 | vm_map_t map, | |
1693 | vm_offset_t offset, | |
1694 | int *disposition, | |
1695 | int *ref_count) | |
1c79356b | 1696 | { |
91447636 A |
1697 | if (VM_MAP_NULL == map) |
1698 | return KERN_INVALID_ARGUMENT; | |
1699 | ||
b0d623f7 A |
1700 | return vm_map_page_query_internal(map, |
1701 | vm_map_trunc_page(offset), | |
1702 | disposition, ref_count); | |
1703 | } | |
1704 | ||
1705 | kern_return_t | |
1706 | mach_vm_page_info( | |
1707 | vm_map_t map, | |
1708 | mach_vm_address_t address, | |
1709 | vm_page_info_flavor_t flavor, | |
1710 | vm_page_info_t info, | |
1711 | mach_msg_type_number_t *count) | |
1712 | { | |
1713 | kern_return_t kr; | |
1714 | ||
1715 | if (map == VM_MAP_NULL) { | |
1716 | return KERN_INVALID_ARGUMENT; | |
1717 | } | |
1718 | ||
1719 | kr = vm_map_page_info(map, address, flavor, info, count); | |
1720 | return kr; | |
1c79356b A |
1721 | } |
1722 | ||
91447636 | 1723 | /* map a (whole) upl into an address space */ |
1c79356b | 1724 | kern_return_t |
91447636 A |
1725 | vm_upl_map( |
1726 | vm_map_t map, | |
1727 | upl_t upl, | |
b0d623f7 | 1728 | vm_address_t *dst_addr) |
1c79356b | 1729 | { |
91447636 | 1730 | vm_map_offset_t map_addr; |
1c79356b A |
1731 | kern_return_t kr; |
1732 | ||
91447636 A |
1733 | if (VM_MAP_NULL == map) |
1734 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1735 | |
91447636 | 1736 | kr = vm_map_enter_upl(map, upl, &map_addr); |
b0d623f7 | 1737 | *dst_addr = CAST_DOWN(vm_address_t, map_addr); |
91447636 A |
1738 | return kr; |
1739 | } | |
1c79356b | 1740 | |
91447636 A |
1741 | kern_return_t |
1742 | vm_upl_unmap( | |
1743 | vm_map_t map, | |
1744 | upl_t upl) | |
1745 | { | |
1746 | if (VM_MAP_NULL == map) | |
1747 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1748 | |
91447636 A |
1749 | return (vm_map_remove_upl(map, upl)); |
1750 | } | |
1c79356b | 1751 | |
91447636 A |
1752 | /* Retrieve a upl for an object underlying an address range in a map */ |
1753 | ||
1754 | kern_return_t | |
1755 | vm_map_get_upl( | |
1756 | vm_map_t map, | |
cc9f6e38 | 1757 | vm_map_offset_t map_offset, |
91447636 A |
1758 | upl_size_t *upl_size, |
1759 | upl_t *upl, | |
1760 | upl_page_info_array_t page_list, | |
1761 | unsigned int *count, | |
1762 | int *flags, | |
1763 | int force_data_sync) | |
1764 | { | |
91447636 A |
1765 | int map_flags; |
1766 | kern_return_t kr; | |
1c79356b | 1767 | |
91447636 A |
1768 | if (VM_MAP_NULL == map) |
1769 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 1770 | |
91447636 A |
1771 | map_flags = *flags & ~UPL_NOZEROFILL; |
1772 | if (force_data_sync) | |
1773 | map_flags |= UPL_FORCE_DATA_SYNC; | |
1c79356b | 1774 | |
91447636 A |
1775 | kr = vm_map_create_upl(map, |
1776 | map_offset, | |
1777 | upl_size, | |
1778 | upl, | |
1779 | page_list, | |
1780 | count, | |
1781 | &map_flags); | |
1c79356b | 1782 | |
91447636 A |
1783 | *flags = (map_flags & ~UPL_FORCE_DATA_SYNC); |
1784 | return kr; | |
1c79356b A |
1785 | } |
1786 | ||
1c79356b | 1787 | /* |
91447636 A |
1788 | * mach_make_memory_entry_64 |
1789 | * | |
1790 | * Think of it as a two-stage vm_remap() operation. First | |
1791 | * you get a handle. Second, you get map that handle in | |
1792 | * somewhere else. Rather than doing it all at once (and | |
1793 | * without needing access to the other whole map). | |
1c79356b A |
1794 | */ |
1795 | ||
1796 | kern_return_t | |
1797 | mach_make_memory_entry_64( | |
1798 | vm_map_t target_map, | |
91447636 A |
1799 | memory_object_size_t *size, |
1800 | memory_object_offset_t offset, | |
1c79356b A |
1801 | vm_prot_t permission, |
1802 | ipc_port_t *object_handle, | |
91447636 | 1803 | ipc_port_t parent_handle) |
1c79356b A |
1804 | { |
1805 | vm_map_version_t version; | |
91447636 A |
1806 | vm_named_entry_t parent_entry; |
1807 | vm_named_entry_t user_entry; | |
1c79356b | 1808 | ipc_port_t user_handle; |
1c79356b | 1809 | kern_return_t kr; |
91447636 | 1810 | vm_map_t real_map; |
1c79356b A |
1811 | |
1812 | /* needed for call to vm_map_lookup_locked */ | |
91447636 | 1813 | boolean_t wired; |
1c79356b | 1814 | vm_object_offset_t obj_off; |
91447636 | 1815 | vm_prot_t prot; |
2d21ac55 | 1816 | struct vm_object_fault_info fault_info; |
91447636 A |
1817 | vm_object_t object; |
1818 | vm_object_t shadow_object; | |
1c79356b A |
1819 | |
1820 | /* needed for direct map entry manipulation */ | |
1821 | vm_map_entry_t map_entry; | |
9bccf70c | 1822 | vm_map_entry_t next_entry; |
91447636 A |
1823 | vm_map_t local_map; |
1824 | vm_map_t original_map = target_map; | |
1825 | vm_map_size_t total_size; | |
1826 | vm_map_size_t map_size; | |
1827 | vm_map_offset_t map_offset; | |
1828 | vm_map_offset_t local_offset; | |
1c79356b | 1829 | vm_object_size_t mappable_size; |
9bccf70c | 1830 | |
91447636 A |
1831 | unsigned int access; |
1832 | vm_prot_t protections; | |
6d2010ae | 1833 | vm_prot_t original_protections, mask_protections; |
91447636 | 1834 | unsigned int wimg_mode; |
91447636 | 1835 | |
e2d2fc5c A |
1836 | boolean_t force_shadow = FALSE; |
1837 | ||
91447636 A |
1838 | if (((permission & 0x00FF0000) & |
1839 | ~(MAP_MEM_ONLY | | |
1840 | MAP_MEM_NAMED_CREATE | | |
1841 | MAP_MEM_PURGABLE | | |
1842 | MAP_MEM_NAMED_REUSE))) { | |
1843 | /* | |
1844 | * Unknown flag: reject for forward compatibility. | |
1845 | */ | |
1846 | return KERN_INVALID_VALUE; | |
1847 | } | |
1848 | ||
1849 | if (parent_handle != IP_NULL && | |
1850 | ip_kotype(parent_handle) == IKOT_NAMED_ENTRY) { | |
1851 | parent_entry = (vm_named_entry_t) parent_handle->ip_kobject; | |
1852 | } else { | |
1853 | parent_entry = NULL; | |
1854 | } | |
55e303ae | 1855 | |
6d2010ae A |
1856 | original_protections = permission & VM_PROT_ALL; |
1857 | protections = original_protections; | |
1858 | mask_protections = permission & VM_PROT_IS_MASK; | |
55e303ae A |
1859 | access = GET_MAP_MEM(permission); |
1860 | ||
91447636 A |
1861 | user_handle = IP_NULL; |
1862 | user_entry = NULL; | |
1863 | ||
1864 | map_offset = vm_map_trunc_page(offset); | |
1865 | map_size = vm_map_round_page(*size); | |
1c79356b | 1866 | |
91447636 A |
1867 | if (permission & MAP_MEM_ONLY) { |
1868 | boolean_t parent_is_object; | |
55e303ae | 1869 | |
91447636 | 1870 | if (parent_entry == NULL) { |
55e303ae A |
1871 | return KERN_INVALID_ARGUMENT; |
1872 | } | |
91447636 A |
1873 | |
1874 | parent_is_object = !(parent_entry->is_sub_map || parent_entry->is_pager); | |
1875 | object = parent_entry->backing.object; | |
1876 | if(parent_is_object && object != VM_OBJECT_NULL) | |
55e303ae | 1877 | wimg_mode = object->wimg_bits; |
91447636 | 1878 | else |
6d2010ae | 1879 | wimg_mode = VM_WIMG_USE_DEFAULT; |
91447636 A |
1880 | if((access != GET_MAP_MEM(parent_entry->protection)) && |
1881 | !(parent_entry->protection & VM_PROT_WRITE)) { | |
55e303ae A |
1882 | return KERN_INVALID_RIGHT; |
1883 | } | |
1884 | if(access == MAP_MEM_IO) { | |
91447636 | 1885 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1886 | wimg_mode = VM_WIMG_IO; |
1887 | } else if (access == MAP_MEM_COPYBACK) { | |
91447636 | 1888 | SET_MAP_MEM(access, parent_entry->protection); |
6d2010ae | 1889 | wimg_mode = VM_WIMG_USE_DEFAULT; |
55e303ae | 1890 | } else if (access == MAP_MEM_WTHRU) { |
91447636 | 1891 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1892 | wimg_mode = VM_WIMG_WTHRU; |
1893 | } else if (access == MAP_MEM_WCOMB) { | |
91447636 | 1894 | SET_MAP_MEM(access, parent_entry->protection); |
55e303ae A |
1895 | wimg_mode = VM_WIMG_WCOMB; |
1896 | } | |
6d2010ae | 1897 | if (parent_is_object && object && |
55e303ae A |
1898 | (access != MAP_MEM_NOOP) && |
1899 | (!(object->nophyscache))) { | |
6d2010ae A |
1900 | |
1901 | if (object->wimg_bits != wimg_mode) { | |
1902 | vm_object_lock(object); | |
1903 | vm_object_change_wimg_mode(object, wimg_mode); | |
1904 | vm_object_unlock(object); | |
55e303ae A |
1905 | } |
1906 | } | |
91447636 A |
1907 | if (object_handle) |
1908 | *object_handle = IP_NULL; | |
55e303ae A |
1909 | return KERN_SUCCESS; |
1910 | } | |
1911 | ||
91447636 A |
1912 | if(permission & MAP_MEM_NAMED_CREATE) { |
1913 | kr = mach_memory_entry_allocate(&user_entry, &user_handle); | |
1914 | if (kr != KERN_SUCCESS) { | |
1915 | return KERN_FAILURE; | |
1916 | } | |
55e303ae | 1917 | |
91447636 A |
1918 | /* |
1919 | * Force the creation of the VM object now. | |
1920 | */ | |
b0d623f7 | 1921 | if (map_size > (vm_map_size_t) ANON_MAX_SIZE) { |
91447636 | 1922 | /* |
b0d623f7 | 1923 | * LP64todo - for now, we can only allocate 4GB-4096 |
91447636 A |
1924 | * internal objects because the default pager can't |
1925 | * page bigger ones. Remove this when it can. | |
1926 | */ | |
1927 | kr = KERN_FAILURE; | |
1928 | goto make_mem_done; | |
1929 | } | |
1c79356b | 1930 | |
91447636 A |
1931 | object = vm_object_allocate(map_size); |
1932 | assert(object != VM_OBJECT_NULL); | |
1c79356b | 1933 | |
91447636 A |
1934 | if (permission & MAP_MEM_PURGABLE) { |
1935 | if (! (permission & VM_PROT_WRITE)) { | |
1936 | /* if we can't write, we can't purge */ | |
1937 | vm_object_deallocate(object); | |
1938 | kr = KERN_INVALID_ARGUMENT; | |
1939 | goto make_mem_done; | |
1940 | } | |
2d21ac55 | 1941 | object->purgable = VM_PURGABLE_NONVOLATILE; |
91447636 | 1942 | } |
1c79356b | 1943 | |
91447636 A |
1944 | /* |
1945 | * The VM object is brand new and nobody else knows about it, | |
1946 | * so we don't need to lock it. | |
1947 | */ | |
1c79356b | 1948 | |
91447636 A |
1949 | wimg_mode = object->wimg_bits; |
1950 | if (access == MAP_MEM_IO) { | |
1951 | wimg_mode = VM_WIMG_IO; | |
1952 | } else if (access == MAP_MEM_COPYBACK) { | |
6d2010ae | 1953 | wimg_mode = VM_WIMG_USE_DEFAULT; |
91447636 A |
1954 | } else if (access == MAP_MEM_WTHRU) { |
1955 | wimg_mode = VM_WIMG_WTHRU; | |
1956 | } else if (access == MAP_MEM_WCOMB) { | |
1957 | wimg_mode = VM_WIMG_WCOMB; | |
1958 | } | |
1959 | if (access != MAP_MEM_NOOP) { | |
1960 | object->wimg_bits = wimg_mode; | |
1961 | } | |
1962 | /* the object has no pages, so no WIMG bits to update here */ | |
1c79356b | 1963 | |
91447636 A |
1964 | /* |
1965 | * XXX | |
1966 | * We use this path when we want to make sure that | |
1967 | * nobody messes with the object (coalesce, for | |
1968 | * example) before we map it. | |
1969 | * We might want to use these objects for transposition via | |
1970 | * vm_object_transpose() too, so we don't want any copy or | |
1971 | * shadow objects either... | |
1972 | */ | |
1973 | object->copy_strategy = MEMORY_OBJECT_COPY_NONE; | |
1c79356b | 1974 | |
91447636 A |
1975 | user_entry->backing.object = object; |
1976 | user_entry->internal = TRUE; | |
1977 | user_entry->is_sub_map = FALSE; | |
1978 | user_entry->is_pager = FALSE; | |
1979 | user_entry->offset = 0; | |
1980 | user_entry->protection = protections; | |
1981 | SET_MAP_MEM(access, user_entry->protection); | |
1982 | user_entry->size = map_size; | |
55e303ae A |
1983 | |
1984 | /* user_object pager and internal fields are not used */ | |
1985 | /* when the object field is filled in. */ | |
1986 | ||
91447636 | 1987 | *size = CAST_DOWN(vm_size_t, map_size); |
55e303ae A |
1988 | *object_handle = user_handle; |
1989 | return KERN_SUCCESS; | |
1990 | } | |
1991 | ||
91447636 A |
1992 | if (parent_entry == NULL || |
1993 | (permission & MAP_MEM_NAMED_REUSE)) { | |
1994 | ||
1995 | /* Create a named object based on address range within the task map */ | |
1996 | /* Go find the object at given address */ | |
1c79356b | 1997 | |
2d21ac55 A |
1998 | if (target_map == VM_MAP_NULL) { |
1999 | return KERN_INVALID_TASK; | |
2000 | } | |
2001 | ||
91447636 | 2002 | redo_lookup: |
6d2010ae | 2003 | protections = original_protections; |
1c79356b A |
2004 | vm_map_lock_read(target_map); |
2005 | ||
2006 | /* get the object associated with the target address */ | |
2007 | /* note we check the permission of the range against */ | |
2008 | /* that requested by the caller */ | |
2009 | ||
91447636 | 2010 | kr = vm_map_lookup_locked(&target_map, map_offset, |
6d2010ae A |
2011 | protections | mask_protections, |
2012 | OBJECT_LOCK_EXCLUSIVE, &version, | |
2013 | &object, &obj_off, &prot, &wired, | |
2014 | &fault_info, | |
2015 | &real_map); | |
1c79356b A |
2016 | if (kr != KERN_SUCCESS) { |
2017 | vm_map_unlock_read(target_map); | |
2018 | goto make_mem_done; | |
2019 | } | |
6d2010ae A |
2020 | if (mask_protections) { |
2021 | /* | |
2022 | * The caller asked us to use the "protections" as | |
2023 | * a mask, so restrict "protections" to what this | |
2024 | * mapping actually allows. | |
2025 | */ | |
2026 | protections &= prot; | |
2027 | } | |
55e303ae | 2028 | if (((prot & protections) != protections) |
9bccf70c | 2029 | || (object == kernel_object)) { |
1c79356b A |
2030 | kr = KERN_INVALID_RIGHT; |
2031 | vm_object_unlock(object); | |
2032 | vm_map_unlock_read(target_map); | |
91447636 A |
2033 | if(real_map != target_map) |
2034 | vm_map_unlock_read(real_map); | |
9bccf70c A |
2035 | if(object == kernel_object) { |
2036 | printf("Warning: Attempt to create a named" | |
2037 | " entry from the kernel_object\n"); | |
2038 | } | |
1c79356b A |
2039 | goto make_mem_done; |
2040 | } | |
2041 | ||
2042 | /* We have an object, now check to see if this object */ | |
2043 | /* is suitable. If not, create a shadow and share that */ | |
91447636 A |
2044 | |
2045 | /* | |
2046 | * We have to unlock the VM object to avoid deadlocking with | |
2047 | * a VM map lock (the lock ordering is map, the object), if we | |
2048 | * need to modify the VM map to create a shadow object. Since | |
2049 | * we might release the VM map lock below anyway, we have | |
2050 | * to release the VM map lock now. | |
2051 | * XXX FBDP There must be a way to avoid this double lookup... | |
2052 | * | |
2053 | * Take an extra reference on the VM object to make sure it's | |
2054 | * not going to disappear. | |
2055 | */ | |
2056 | vm_object_reference_locked(object); /* extra ref to hold obj */ | |
2057 | vm_object_unlock(object); | |
2058 | ||
9bccf70c | 2059 | local_map = original_map; |
91447636 | 2060 | local_offset = map_offset; |
9bccf70c A |
2061 | if(target_map != local_map) { |
2062 | vm_map_unlock_read(target_map); | |
91447636 A |
2063 | if(real_map != target_map) |
2064 | vm_map_unlock_read(real_map); | |
9bccf70c A |
2065 | vm_map_lock_read(local_map); |
2066 | target_map = local_map; | |
91447636 | 2067 | real_map = local_map; |
9bccf70c | 2068 | } |
1c79356b | 2069 | while(TRUE) { |
9bccf70c A |
2070 | if(!vm_map_lookup_entry(local_map, |
2071 | local_offset, &map_entry)) { | |
1c79356b | 2072 | kr = KERN_INVALID_ARGUMENT; |
1c79356b | 2073 | vm_map_unlock_read(target_map); |
91447636 A |
2074 | if(real_map != target_map) |
2075 | vm_map_unlock_read(real_map); | |
2076 | vm_object_deallocate(object); /* release extra ref */ | |
2077 | object = VM_OBJECT_NULL; | |
1c79356b A |
2078 | goto make_mem_done; |
2079 | } | |
2080 | if(!(map_entry->is_sub_map)) { | |
2081 | if(map_entry->object.vm_object != object) { | |
2082 | kr = KERN_INVALID_ARGUMENT; | |
1c79356b | 2083 | vm_map_unlock_read(target_map); |
91447636 A |
2084 | if(real_map != target_map) |
2085 | vm_map_unlock_read(real_map); | |
2086 | vm_object_deallocate(object); /* release extra ref */ | |
2087 | object = VM_OBJECT_NULL; | |
1c79356b A |
2088 | goto make_mem_done; |
2089 | } | |
2090 | break; | |
2091 | } else { | |
9bccf70c A |
2092 | vm_map_t tmap; |
2093 | tmap = local_map; | |
1c79356b | 2094 | local_map = map_entry->object.sub_map; |
9bccf70c | 2095 | |
1c79356b | 2096 | vm_map_lock_read(local_map); |
9bccf70c | 2097 | vm_map_unlock_read(tmap); |
1c79356b | 2098 | target_map = local_map; |
91447636 | 2099 | real_map = local_map; |
9bccf70c A |
2100 | local_offset = local_offset - map_entry->vme_start; |
2101 | local_offset += map_entry->offset; | |
1c79356b A |
2102 | } |
2103 | } | |
91447636 A |
2104 | |
2105 | /* | |
2106 | * We found the VM map entry, lock the VM object again. | |
2107 | */ | |
2108 | vm_object_lock(object); | |
2109 | if(map_entry->wired_count) { | |
2110 | /* JMM - The check below should be reworked instead. */ | |
2111 | object->true_share = TRUE; | |
2112 | } | |
6d2010ae A |
2113 | if (mask_protections) { |
2114 | /* | |
2115 | * The caller asked us to use the "protections" as | |
2116 | * a mask, so restrict "protections" to what this | |
2117 | * mapping actually allows. | |
2118 | */ | |
2119 | protections &= map_entry->max_protection; | |
2120 | } | |
55e303ae | 2121 | if(((map_entry->max_protection) & protections) != protections) { |
1c79356b A |
2122 | kr = KERN_INVALID_RIGHT; |
2123 | vm_object_unlock(object); | |
2124 | vm_map_unlock_read(target_map); | |
91447636 A |
2125 | if(real_map != target_map) |
2126 | vm_map_unlock_read(real_map); | |
2127 | vm_object_deallocate(object); | |
2128 | object = VM_OBJECT_NULL; | |
1c79356b A |
2129 | goto make_mem_done; |
2130 | } | |
9bccf70c | 2131 | |
2d21ac55 | 2132 | mappable_size = fault_info.hi_offset - obj_off; |
9bccf70c | 2133 | total_size = map_entry->vme_end - map_entry->vme_start; |
91447636 | 2134 | if(map_size > mappable_size) { |
9bccf70c A |
2135 | /* try to extend mappable size if the entries */ |
2136 | /* following are from the same object and are */ | |
2137 | /* compatible */ | |
2138 | next_entry = map_entry->vme_next; | |
2139 | /* lets see if the next map entry is still */ | |
2140 | /* pointing at this object and is contiguous */ | |
91447636 | 2141 | while(map_size > mappable_size) { |
9bccf70c A |
2142 | if((next_entry->object.vm_object == object) && |
2143 | (next_entry->vme_start == | |
2144 | next_entry->vme_prev->vme_end) && | |
2145 | (next_entry->offset == | |
2146 | next_entry->vme_prev->offset + | |
2147 | (next_entry->vme_prev->vme_end - | |
2148 | next_entry->vme_prev->vme_start))) { | |
6d2010ae A |
2149 | if (mask_protections) { |
2150 | /* | |
2151 | * The caller asked us to use | |
2152 | * the "protections" as a mask, | |
2153 | * so restrict "protections" to | |
2154 | * what this mapping actually | |
2155 | * allows. | |
2156 | */ | |
2157 | protections &= next_entry->max_protection; | |
2158 | } | |
9bccf70c | 2159 | if(((next_entry->max_protection) |
55e303ae | 2160 | & protections) != protections) { |
9bccf70c A |
2161 | break; |
2162 | } | |
55e303ae A |
2163 | if (next_entry->needs_copy != |
2164 | map_entry->needs_copy) | |
2165 | break; | |
9bccf70c A |
2166 | mappable_size += next_entry->vme_end |
2167 | - next_entry->vme_start; | |
2168 | total_size += next_entry->vme_end | |
2169 | - next_entry->vme_start; | |
2170 | next_entry = next_entry->vme_next; | |
2171 | } else { | |
2172 | break; | |
2173 | } | |
2174 | ||
2175 | } | |
2176 | } | |
2177 | ||
e2d2fc5c A |
2178 | #if !CONFIG_EMBEDDED |
2179 | if (vm_map_entry_should_cow_for_true_share(map_entry) && | |
2180 | object->vo_size > map_size && | |
2181 | map_size != 0) { | |
2182 | /* | |
2183 | * Set up the targeted range for copy-on-write to | |
2184 | * limit the impact of "true_share"/"copy_delay" to | |
2185 | * that range instead of the entire VM object... | |
2186 | */ | |
2187 | ||
2188 | vm_object_unlock(object); | |
2189 | if (vm_map_lock_read_to_write(target_map)) { | |
2190 | vm_object_deallocate(object); | |
2191 | target_map = original_map; | |
2192 | goto redo_lookup; | |
2193 | } | |
2194 | ||
2195 | vm_map_clip_start(target_map, map_entry, vm_map_trunc_page(offset)); | |
2196 | vm_map_clip_end(target_map, map_entry, vm_map_round_page(offset) + map_size); | |
2197 | force_shadow = TRUE; | |
2198 | ||
2199 | map_size = map_entry->vme_end - map_entry->vme_start; | |
2200 | total_size = map_size; | |
2201 | ||
2202 | vm_map_lock_write_to_read(target_map); | |
2203 | vm_object_lock(object); | |
2204 | } | |
2205 | #endif /* !CONFIG_EMBEDDED */ | |
2206 | ||
1c79356b A |
2207 | if(object->internal) { |
2208 | /* vm_map_lookup_locked will create a shadow if */ | |
2209 | /* needs_copy is set but does not check for the */ | |
2210 | /* other two conditions shown. It is important to */ | |
2211 | /* set up an object which will not be pulled from */ | |
2212 | /* under us. */ | |
2213 | ||
e2d2fc5c A |
2214 | if (force_shadow || |
2215 | ((map_entry->needs_copy || | |
2216 | object->shadowed || | |
2217 | (object->vo_size > total_size)) && | |
2218 | !object->true_share)) { | |
91447636 A |
2219 | /* |
2220 | * We have to unlock the VM object before | |
2221 | * trying to upgrade the VM map lock, to | |
2222 | * honor lock ordering (map then object). | |
2223 | * Otherwise, we would deadlock if another | |
2224 | * thread holds a read lock on the VM map and | |
2225 | * is trying to acquire the VM object's lock. | |
2226 | * We still hold an extra reference on the | |
2227 | * VM object, guaranteeing that it won't | |
2228 | * disappear. | |
2229 | */ | |
2230 | vm_object_unlock(object); | |
2231 | ||
1c79356b | 2232 | if (vm_map_lock_read_to_write(target_map)) { |
91447636 A |
2233 | /* |
2234 | * We couldn't upgrade our VM map lock | |
2235 | * from "read" to "write" and we lost | |
2236 | * our "read" lock. | |
2237 | * Start all over again... | |
2238 | */ | |
2239 | vm_object_deallocate(object); /* extra ref */ | |
2240 | target_map = original_map; | |
1c79356b A |
2241 | goto redo_lookup; |
2242 | } | |
91447636 | 2243 | vm_object_lock(object); |
1c79356b | 2244 | |
55e303ae A |
2245 | /* |
2246 | * JMM - We need to avoid coming here when the object | |
2247 | * is wired by anybody, not just the current map. Why | |
2248 | * couldn't we use the standard vm_object_copy_quickly() | |
2249 | * approach here? | |
2250 | */ | |
2251 | ||
1c79356b | 2252 | /* create a shadow object */ |
9bccf70c | 2253 | vm_object_shadow(&map_entry->object.vm_object, |
6d2010ae | 2254 | &map_entry->offset, total_size); |
9bccf70c A |
2255 | shadow_object = map_entry->object.vm_object; |
2256 | vm_object_unlock(object); | |
91447636 | 2257 | |
0c530ab8 | 2258 | prot = map_entry->protection & ~VM_PROT_WRITE; |
2d21ac55 A |
2259 | |
2260 | if (override_nx(target_map, map_entry->alias) && prot) | |
0c530ab8 | 2261 | prot |= VM_PROT_EXECUTE; |
2d21ac55 | 2262 | |
9bccf70c A |
2263 | vm_object_pmap_protect( |
2264 | object, map_entry->offset, | |
2265 | total_size, | |
2266 | ((map_entry->is_shared | |
2267 | || target_map->mapped) | |
2268 | ? PMAP_NULL : | |
2269 | target_map->pmap), | |
2270 | map_entry->vme_start, | |
0c530ab8 | 2271 | prot); |
9bccf70c A |
2272 | total_size -= (map_entry->vme_end |
2273 | - map_entry->vme_start); | |
2274 | next_entry = map_entry->vme_next; | |
2275 | map_entry->needs_copy = FALSE; | |
2d21ac55 A |
2276 | |
2277 | vm_object_lock(shadow_object); | |
9bccf70c A |
2278 | while (total_size) { |
2279 | if(next_entry->object.vm_object == object) { | |
2d21ac55 | 2280 | vm_object_reference_locked(shadow_object); |
9bccf70c A |
2281 | next_entry->object.vm_object |
2282 | = shadow_object; | |
55e303ae | 2283 | vm_object_deallocate(object); |
9bccf70c A |
2284 | next_entry->offset |
2285 | = next_entry->vme_prev->offset + | |
2286 | (next_entry->vme_prev->vme_end | |
2287 | - next_entry->vme_prev->vme_start); | |
2288 | next_entry->needs_copy = FALSE; | |
2289 | } else { | |
2290 | panic("mach_make_memory_entry_64:" | |
2291 | " map entries out of sync\n"); | |
2292 | } | |
2293 | total_size -= | |
2294 | next_entry->vme_end | |
2295 | - next_entry->vme_start; | |
2296 | next_entry = next_entry->vme_next; | |
2297 | } | |
2298 | ||
91447636 A |
2299 | /* |
2300 | * Transfer our extra reference to the | |
2301 | * shadow object. | |
2302 | */ | |
2303 | vm_object_reference_locked(shadow_object); | |
2304 | vm_object_deallocate(object); /* extra ref */ | |
9bccf70c | 2305 | object = shadow_object; |
91447636 | 2306 | |
9bccf70c A |
2307 | obj_off = (local_offset - map_entry->vme_start) |
2308 | + map_entry->offset; | |
1c79356b | 2309 | |
91447636 | 2310 | vm_map_lock_write_to_read(target_map); |
1c79356b A |
2311 | } |
2312 | } | |
2313 | ||
2314 | /* note: in the future we can (if necessary) allow for */ | |
2315 | /* memory object lists, this will better support */ | |
2316 | /* fragmentation, but is it necessary? The user should */ | |
2317 | /* be encouraged to create address space oriented */ | |
2318 | /* shared objects from CLEAN memory regions which have */ | |
2319 | /* a known and defined history. i.e. no inheritence */ | |
2320 | /* share, make this call before making the region the */ | |
2321 | /* target of ipc's, etc. The code above, protecting */ | |
2322 | /* against delayed copy, etc. is mostly defensive. */ | |
2323 | ||
55e303ae A |
2324 | wimg_mode = object->wimg_bits; |
2325 | if(!(object->nophyscache)) { | |
2326 | if(access == MAP_MEM_IO) { | |
2327 | wimg_mode = VM_WIMG_IO; | |
2328 | } else if (access == MAP_MEM_COPYBACK) { | |
2329 | wimg_mode = VM_WIMG_USE_DEFAULT; | |
2330 | } else if (access == MAP_MEM_WTHRU) { | |
2331 | wimg_mode = VM_WIMG_WTHRU; | |
2332 | } else if (access == MAP_MEM_WCOMB) { | |
2333 | wimg_mode = VM_WIMG_WCOMB; | |
2334 | } | |
2335 | } | |
d7e50217 | 2336 | |
de355530 | 2337 | object->true_share = TRUE; |
55e303ae A |
2338 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) |
2339 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
2340 | ||
91447636 A |
2341 | /* |
2342 | * The memory entry now points to this VM object and we | |
2343 | * need to hold a reference on the VM object. Use the extra | |
2344 | * reference we took earlier to keep the object alive when we | |
2345 | * had to unlock it. | |
2346 | */ | |
2347 | ||
55e303ae | 2348 | vm_map_unlock_read(target_map); |
91447636 A |
2349 | if(real_map != target_map) |
2350 | vm_map_unlock_read(real_map); | |
55e303ae | 2351 | |
6d2010ae A |
2352 | if (object->wimg_bits != wimg_mode) |
2353 | vm_object_change_wimg_mode(object, wimg_mode); | |
1c79356b A |
2354 | |
2355 | /* the size of mapped entry that overlaps with our region */ | |
2356 | /* which is targeted for share. */ | |
2357 | /* (entry_end - entry_start) - */ | |
2358 | /* offset of our beg addr within entry */ | |
2359 | /* it corresponds to this: */ | |
2360 | ||
91447636 A |
2361 | if(map_size > mappable_size) |
2362 | map_size = mappable_size; | |
2363 | ||
2364 | if (permission & MAP_MEM_NAMED_REUSE) { | |
2365 | /* | |
2366 | * Compare what we got with the "parent_entry". | |
2367 | * If they match, re-use the "parent_entry" instead | |
2368 | * of creating a new one. | |
2369 | */ | |
2370 | if (parent_entry != NULL && | |
2371 | parent_entry->backing.object == object && | |
2372 | parent_entry->internal == object->internal && | |
2373 | parent_entry->is_sub_map == FALSE && | |
2374 | parent_entry->is_pager == FALSE && | |
2375 | parent_entry->offset == obj_off && | |
2376 | parent_entry->protection == protections && | |
2377 | parent_entry->size == map_size) { | |
2378 | /* | |
2379 | * We have a match: re-use "parent_entry". | |
2380 | */ | |
2381 | /* release our extra reference on object */ | |
2382 | vm_object_unlock(object); | |
2383 | vm_object_deallocate(object); | |
2384 | /* parent_entry->ref_count++; XXX ? */ | |
2385 | /* Get an extra send-right on handle */ | |
2386 | ipc_port_copy_send(parent_handle); | |
2387 | *object_handle = parent_handle; | |
2388 | return KERN_SUCCESS; | |
2389 | } else { | |
2390 | /* | |
2391 | * No match: we need to create a new entry. | |
2392 | * fall through... | |
2393 | */ | |
2394 | } | |
2395 | } | |
2396 | ||
2397 | vm_object_unlock(object); | |
2398 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2399 | != KERN_SUCCESS) { | |
2400 | /* release our unused reference on the object */ | |
2401 | vm_object_deallocate(object); | |
2402 | return KERN_FAILURE; | |
2403 | } | |
1c79356b | 2404 | |
91447636 A |
2405 | user_entry->backing.object = object; |
2406 | user_entry->internal = object->internal; | |
2407 | user_entry->is_sub_map = FALSE; | |
2408 | user_entry->is_pager = FALSE; | |
2409 | user_entry->offset = obj_off; | |
6d2010ae A |
2410 | user_entry->protection = protections; |
2411 | SET_MAP_MEM(GET_MAP_MEM(permission), user_entry->protection); | |
91447636 | 2412 | user_entry->size = map_size; |
1c79356b A |
2413 | |
2414 | /* user_object pager and internal fields are not used */ | |
2415 | /* when the object field is filled in. */ | |
2416 | ||
91447636 | 2417 | *size = CAST_DOWN(vm_size_t, map_size); |
1c79356b | 2418 | *object_handle = user_handle; |
1c79356b | 2419 | return KERN_SUCCESS; |
1c79356b | 2420 | |
91447636 | 2421 | } else { |
1c79356b | 2422 | /* The new object will be base on an existing named object */ |
91447636 A |
2423 | |
2424 | if (parent_entry == NULL) { | |
1c79356b A |
2425 | kr = KERN_INVALID_ARGUMENT; |
2426 | goto make_mem_done; | |
2427 | } | |
91447636 | 2428 | if((offset + map_size) > parent_entry->size) { |
1c79356b A |
2429 | kr = KERN_INVALID_ARGUMENT; |
2430 | goto make_mem_done; | |
2431 | } | |
2432 | ||
6d2010ae A |
2433 | if (mask_protections) { |
2434 | /* | |
2435 | * The caller asked us to use the "protections" as | |
2436 | * a mask, so restrict "protections" to what this | |
2437 | * mapping actually allows. | |
2438 | */ | |
2439 | protections &= parent_entry->protection; | |
2440 | } | |
91447636 A |
2441 | if((protections & parent_entry->protection) != protections) { |
2442 | kr = KERN_PROTECTION_FAILURE; | |
2443 | goto make_mem_done; | |
2444 | } | |
2445 | ||
2446 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2447 | != KERN_SUCCESS) { | |
2448 | kr = KERN_FAILURE; | |
2449 | goto make_mem_done; | |
55e303ae | 2450 | } |
91447636 A |
2451 | |
2452 | user_entry->size = map_size; | |
2453 | user_entry->offset = parent_entry->offset + map_offset; | |
2454 | user_entry->is_sub_map = parent_entry->is_sub_map; | |
2455 | user_entry->is_pager = parent_entry->is_pager; | |
2456 | user_entry->internal = parent_entry->internal; | |
2457 | user_entry->protection = protections; | |
2458 | ||
2459 | if(access != MAP_MEM_NOOP) { | |
2460 | SET_MAP_MEM(access, user_entry->protection); | |
1c79356b | 2461 | } |
91447636 A |
2462 | |
2463 | if(parent_entry->is_sub_map) { | |
2464 | user_entry->backing.map = parent_entry->backing.map; | |
2465 | vm_map_lock(user_entry->backing.map); | |
2466 | user_entry->backing.map->ref_count++; | |
2467 | vm_map_unlock(user_entry->backing.map); | |
1c79356b | 2468 | } |
91447636 A |
2469 | else if (parent_entry->is_pager) { |
2470 | user_entry->backing.pager = parent_entry->backing.pager; | |
2471 | /* JMM - don't we need a reference here? */ | |
2472 | } else { | |
2473 | object = parent_entry->backing.object; | |
2474 | assert(object != VM_OBJECT_NULL); | |
2475 | user_entry->backing.object = object; | |
2476 | /* we now point to this object, hold on */ | |
2477 | vm_object_reference(object); | |
2478 | vm_object_lock(object); | |
2479 | object->true_share = TRUE; | |
2480 | if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) | |
2481 | object->copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
2482 | vm_object_unlock(object); | |
1c79356b | 2483 | } |
91447636 | 2484 | *size = CAST_DOWN(vm_size_t, map_size); |
1c79356b A |
2485 | *object_handle = user_handle; |
2486 | return KERN_SUCCESS; | |
2487 | } | |
2488 | ||
1c79356b | 2489 | make_mem_done: |
91447636 | 2490 | if (user_handle != IP_NULL) { |
0b4c1975 A |
2491 | /* |
2492 | * Releasing "user_handle" causes the kernel object | |
2493 | * associated with it ("user_entry" here) to also be | |
2494 | * released and freed. | |
2495 | */ | |
2496 | mach_memory_entry_port_release(user_handle); | |
91447636 A |
2497 | } |
2498 | return kr; | |
2499 | } | |
2500 | ||
2501 | kern_return_t | |
2502 | _mach_make_memory_entry( | |
2503 | vm_map_t target_map, | |
2504 | memory_object_size_t *size, | |
2505 | memory_object_offset_t offset, | |
2506 | vm_prot_t permission, | |
2507 | ipc_port_t *object_handle, | |
2508 | ipc_port_t parent_entry) | |
2509 | { | |
2d21ac55 | 2510 | memory_object_size_t mo_size; |
91447636 A |
2511 | kern_return_t kr; |
2512 | ||
2d21ac55 | 2513 | mo_size = (memory_object_size_t)*size; |
91447636 A |
2514 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
2515 | (memory_object_offset_t)offset, permission, object_handle, | |
2516 | parent_entry); | |
2517 | *size = mo_size; | |
1c79356b A |
2518 | return kr; |
2519 | } | |
2520 | ||
2521 | kern_return_t | |
2522 | mach_make_memory_entry( | |
2523 | vm_map_t target_map, | |
2524 | vm_size_t *size, | |
2525 | vm_offset_t offset, | |
2526 | vm_prot_t permission, | |
2527 | ipc_port_t *object_handle, | |
2528 | ipc_port_t parent_entry) | |
91447636 | 2529 | { |
2d21ac55 | 2530 | memory_object_size_t mo_size; |
1c79356b A |
2531 | kern_return_t kr; |
2532 | ||
2d21ac55 | 2533 | mo_size = (memory_object_size_t)*size; |
91447636 A |
2534 | kr = mach_make_memory_entry_64(target_map, &mo_size, |
2535 | (memory_object_offset_t)offset, permission, object_handle, | |
1c79356b | 2536 | parent_entry); |
91447636 | 2537 | *size = CAST_DOWN(vm_size_t, mo_size); |
1c79356b A |
2538 | return kr; |
2539 | } | |
2540 | ||
2541 | /* | |
91447636 A |
2542 | * task_wire |
2543 | * | |
2544 | * Set or clear the map's wiring_required flag. This flag, if set, | |
2545 | * will cause all future virtual memory allocation to allocate | |
2546 | * user wired memory. Unwiring pages wired down as a result of | |
2547 | * this routine is done with the vm_wire interface. | |
1c79356b | 2548 | */ |
1c79356b | 2549 | kern_return_t |
91447636 A |
2550 | task_wire( |
2551 | vm_map_t map, | |
2552 | boolean_t must_wire) | |
2553 | { | |
2554 | if (map == VM_MAP_NULL) | |
2555 | return(KERN_INVALID_ARGUMENT); | |
2556 | ||
2557 | if (must_wire) | |
2558 | map->wiring_required = TRUE; | |
2559 | else | |
2560 | map->wiring_required = FALSE; | |
2561 | ||
2562 | return(KERN_SUCCESS); | |
2563 | } | |
2564 | ||
2565 | __private_extern__ kern_return_t | |
2566 | mach_memory_entry_allocate( | |
2567 | vm_named_entry_t *user_entry_p, | |
2568 | ipc_port_t *user_handle_p) | |
1c79356b | 2569 | { |
91447636 | 2570 | vm_named_entry_t user_entry; |
1c79356b | 2571 | ipc_port_t user_handle; |
91447636 | 2572 | ipc_port_t previous; |
1c79356b | 2573 | |
91447636 A |
2574 | user_entry = (vm_named_entry_t) kalloc(sizeof *user_entry); |
2575 | if (user_entry == NULL) | |
1c79356b | 2576 | return KERN_FAILURE; |
1c79356b | 2577 | |
91447636 | 2578 | named_entry_lock_init(user_entry); |
1c79356b | 2579 | |
91447636 A |
2580 | user_handle = ipc_port_alloc_kernel(); |
2581 | if (user_handle == IP_NULL) { | |
2582 | kfree(user_entry, sizeof *user_entry); | |
2583 | return KERN_FAILURE; | |
2584 | } | |
1c79356b A |
2585 | ip_lock(user_handle); |
2586 | ||
2587 | /* make a sonce right */ | |
2588 | user_handle->ip_sorights++; | |
2589 | ip_reference(user_handle); | |
2590 | ||
2591 | user_handle->ip_destination = IP_NULL; | |
2592 | user_handle->ip_receiver_name = MACH_PORT_NULL; | |
2593 | user_handle->ip_receiver = ipc_space_kernel; | |
2594 | ||
2595 | /* make a send right */ | |
2596 | user_handle->ip_mscount++; | |
2597 | user_handle->ip_srights++; | |
2598 | ip_reference(user_handle); | |
2599 | ||
2600 | ipc_port_nsrequest(user_handle, 1, user_handle, &previous); | |
2601 | /* nsrequest unlocks user_handle */ | |
2602 | ||
91447636 A |
2603 | user_entry->backing.pager = NULL; |
2604 | user_entry->is_sub_map = FALSE; | |
2605 | user_entry->is_pager = FALSE; | |
91447636 | 2606 | user_entry->internal = FALSE; |
2d21ac55 A |
2607 | user_entry->size = 0; |
2608 | user_entry->offset = 0; | |
2609 | user_entry->protection = VM_PROT_NONE; | |
91447636 | 2610 | user_entry->ref_count = 1; |
1c79356b | 2611 | |
91447636 A |
2612 | ipc_kobject_set(user_handle, (ipc_kobject_t) user_entry, |
2613 | IKOT_NAMED_ENTRY); | |
1c79356b | 2614 | |
91447636 A |
2615 | *user_entry_p = user_entry; |
2616 | *user_handle_p = user_handle; | |
1c79356b | 2617 | |
91447636 A |
2618 | return KERN_SUCCESS; |
2619 | } | |
1c79356b | 2620 | |
91447636 A |
2621 | /* |
2622 | * mach_memory_object_memory_entry_64 | |
2623 | * | |
2624 | * Create a named entry backed by the provided pager. | |
2625 | * | |
2626 | * JMM - we need to hold a reference on the pager - | |
2627 | * and release it when the named entry is destroyed. | |
2628 | */ | |
2629 | kern_return_t | |
2630 | mach_memory_object_memory_entry_64( | |
2631 | host_t host, | |
2632 | boolean_t internal, | |
2633 | vm_object_offset_t size, | |
2634 | vm_prot_t permission, | |
2635 | memory_object_t pager, | |
2636 | ipc_port_t *entry_handle) | |
2637 | { | |
2638 | unsigned int access; | |
2639 | vm_named_entry_t user_entry; | |
2640 | ipc_port_t user_handle; | |
2641 | ||
2642 | if (host == HOST_NULL) | |
2643 | return(KERN_INVALID_HOST); | |
2644 | ||
2645 | if (mach_memory_entry_allocate(&user_entry, &user_handle) | |
2646 | != KERN_SUCCESS) { | |
2647 | return KERN_FAILURE; | |
2648 | } | |
2649 | ||
2650 | user_entry->backing.pager = pager; | |
2651 | user_entry->size = size; | |
2652 | user_entry->offset = 0; | |
2653 | user_entry->protection = permission & VM_PROT_ALL; | |
2654 | access = GET_MAP_MEM(permission); | |
2655 | SET_MAP_MEM(access, user_entry->protection); | |
2656 | user_entry->internal = internal; | |
2657 | user_entry->is_sub_map = FALSE; | |
2658 | user_entry->is_pager = TRUE; | |
2659 | assert(user_entry->ref_count == 1); | |
2660 | ||
2661 | *entry_handle = user_handle; | |
1c79356b | 2662 | return KERN_SUCCESS; |
91447636 A |
2663 | } |
2664 | ||
2665 | kern_return_t | |
2666 | mach_memory_object_memory_entry( | |
2667 | host_t host, | |
2668 | boolean_t internal, | |
2669 | vm_size_t size, | |
2670 | vm_prot_t permission, | |
2671 | memory_object_t pager, | |
2672 | ipc_port_t *entry_handle) | |
2673 | { | |
2674 | return mach_memory_object_memory_entry_64( host, internal, | |
2675 | (vm_object_offset_t)size, permission, pager, entry_handle); | |
2676 | } | |
2677 | ||
2678 | ||
2679 | kern_return_t | |
2680 | mach_memory_entry_purgable_control( | |
2681 | ipc_port_t entry_port, | |
2682 | vm_purgable_t control, | |
2683 | int *state) | |
2684 | { | |
2685 | kern_return_t kr; | |
2686 | vm_named_entry_t mem_entry; | |
2687 | vm_object_t object; | |
1c79356b | 2688 | |
91447636 A |
2689 | if (entry_port == IP_NULL || |
2690 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
2691 | return KERN_INVALID_ARGUMENT; | |
2692 | } | |
2d21ac55 A |
2693 | if (control != VM_PURGABLE_SET_STATE && |
2694 | control != VM_PURGABLE_GET_STATE) | |
2695 | return(KERN_INVALID_ARGUMENT); | |
2696 | ||
2697 | if (control == VM_PURGABLE_SET_STATE && | |
b0d623f7 | 2698 | (((*state & ~(VM_PURGABLE_ALL_MASKS)) != 0) || |
2d21ac55 A |
2699 | ((*state & VM_PURGABLE_STATE_MASK) > VM_PURGABLE_STATE_MASK))) |
2700 | return(KERN_INVALID_ARGUMENT); | |
1c79356b | 2701 | |
91447636 | 2702 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; |
1c79356b | 2703 | |
91447636 | 2704 | named_entry_lock(mem_entry); |
1c79356b | 2705 | |
91447636 A |
2706 | if (mem_entry->is_sub_map || mem_entry->is_pager) { |
2707 | named_entry_unlock(mem_entry); | |
1c79356b A |
2708 | return KERN_INVALID_ARGUMENT; |
2709 | } | |
91447636 A |
2710 | |
2711 | object = mem_entry->backing.object; | |
2712 | if (object == VM_OBJECT_NULL) { | |
2713 | named_entry_unlock(mem_entry); | |
1c79356b A |
2714 | return KERN_INVALID_ARGUMENT; |
2715 | } | |
91447636 A |
2716 | |
2717 | vm_object_lock(object); | |
2718 | ||
2719 | /* check that named entry covers entire object ? */ | |
6d2010ae | 2720 | if (mem_entry->offset != 0 || object->vo_size != mem_entry->size) { |
91447636 A |
2721 | vm_object_unlock(object); |
2722 | named_entry_unlock(mem_entry); | |
2723 | return KERN_INVALID_ARGUMENT; | |
1c79356b | 2724 | } |
91447636 A |
2725 | |
2726 | named_entry_unlock(mem_entry); | |
2727 | ||
2728 | kr = vm_object_purgable_control(object, control, state); | |
2729 | ||
2730 | vm_object_unlock(object); | |
2731 | ||
2732 | return kr; | |
1c79356b A |
2733 | } |
2734 | ||
91447636 A |
2735 | /* |
2736 | * mach_memory_entry_port_release: | |
2737 | * | |
2738 | * Release a send right on a named entry port. This is the correct | |
2739 | * way to destroy a named entry. When the last right on the port is | |
2740 | * released, ipc_kobject_destroy() will call mach_destroy_memory_entry(). | |
2741 | */ | |
2742 | void | |
2743 | mach_memory_entry_port_release( | |
2744 | ipc_port_t port) | |
2745 | { | |
2746 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
2747 | ipc_port_release_send(port); | |
2748 | } | |
1c79356b | 2749 | |
91447636 A |
2750 | /* |
2751 | * mach_destroy_memory_entry: | |
2752 | * | |
2753 | * Drops a reference on a memory entry and destroys the memory entry if | |
2754 | * there are no more references on it. | |
2755 | * NOTE: This routine should not be called to destroy a memory entry from the | |
2756 | * kernel, as it will not release the Mach port associated with the memory | |
2757 | * entry. The proper way to destroy a memory entry in the kernel is to | |
2758 | * call mach_memort_entry_port_release() to release the kernel's send-right on | |
2759 | * the memory entry's port. When the last send right is released, the memory | |
2760 | * entry will be destroyed via ipc_kobject_destroy(). | |
2761 | */ | |
1c79356b A |
2762 | void |
2763 | mach_destroy_memory_entry( | |
2764 | ipc_port_t port) | |
2765 | { | |
2766 | vm_named_entry_t named_entry; | |
2767 | #if MACH_ASSERT | |
2768 | assert(ip_kotype(port) == IKOT_NAMED_ENTRY); | |
2769 | #endif /* MACH_ASSERT */ | |
2770 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
b0d623f7 | 2771 | lck_mtx_lock(&(named_entry)->Lock); |
91447636 | 2772 | named_entry->ref_count -= 1; |
1c79356b | 2773 | if(named_entry->ref_count == 0) { |
91447636 | 2774 | if (named_entry->is_sub_map) { |
1c79356b | 2775 | vm_map_deallocate(named_entry->backing.map); |
91447636 A |
2776 | } else if (!named_entry->is_pager) { |
2777 | /* release the memory object we've been pointing to */ | |
2778 | vm_object_deallocate(named_entry->backing.object); | |
2779 | } /* else JMM - need to drop reference on pager in that case */ | |
2780 | ||
b0d623f7 | 2781 | lck_mtx_unlock(&(named_entry)->Lock); |
91447636 A |
2782 | |
2783 | kfree((void *) port->ip_kobject, | |
2784 | sizeof (struct vm_named_entry)); | |
1c79356b | 2785 | } else |
b0d623f7 | 2786 | lck_mtx_unlock(&(named_entry)->Lock); |
1c79356b A |
2787 | } |
2788 | ||
0c530ab8 A |
2789 | /* Allow manipulation of individual page state. This is actually part of */ |
2790 | /* the UPL regimen but takes place on the memory entry rather than on a UPL */ | |
2791 | ||
2792 | kern_return_t | |
2793 | mach_memory_entry_page_op( | |
2794 | ipc_port_t entry_port, | |
2795 | vm_object_offset_t offset, | |
2796 | int ops, | |
2797 | ppnum_t *phys_entry, | |
2798 | int *flags) | |
2799 | { | |
2800 | vm_named_entry_t mem_entry; | |
2801 | vm_object_t object; | |
2802 | kern_return_t kr; | |
2803 | ||
2804 | if (entry_port == IP_NULL || | |
2805 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
2806 | return KERN_INVALID_ARGUMENT; | |
2807 | } | |
2808 | ||
2809 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; | |
2810 | ||
2811 | named_entry_lock(mem_entry); | |
2812 | ||
2813 | if (mem_entry->is_sub_map || mem_entry->is_pager) { | |
2814 | named_entry_unlock(mem_entry); | |
2815 | return KERN_INVALID_ARGUMENT; | |
2816 | } | |
2817 | ||
2818 | object = mem_entry->backing.object; | |
2819 | if (object == VM_OBJECT_NULL) { | |
2820 | named_entry_unlock(mem_entry); | |
2821 | return KERN_INVALID_ARGUMENT; | |
2822 | } | |
2823 | ||
2824 | vm_object_reference(object); | |
2825 | named_entry_unlock(mem_entry); | |
2826 | ||
2827 | kr = vm_object_page_op(object, offset, ops, phys_entry, flags); | |
2828 | ||
2829 | vm_object_deallocate(object); | |
2830 | ||
2831 | return kr; | |
2832 | } | |
2833 | ||
2834 | /* | |
2835 | * mach_memory_entry_range_op offers performance enhancement over | |
2836 | * mach_memory_entry_page_op for page_op functions which do not require page | |
2837 | * level state to be returned from the call. Page_op was created to provide | |
2838 | * a low-cost alternative to page manipulation via UPLs when only a single | |
2839 | * page was involved. The range_op call establishes the ability in the _op | |
2840 | * family of functions to work on multiple pages where the lack of page level | |
2841 | * state handling allows the caller to avoid the overhead of the upl structures. | |
2842 | */ | |
2843 | ||
2844 | kern_return_t | |
2845 | mach_memory_entry_range_op( | |
2846 | ipc_port_t entry_port, | |
2847 | vm_object_offset_t offset_beg, | |
2848 | vm_object_offset_t offset_end, | |
2849 | int ops, | |
2850 | int *range) | |
2851 | { | |
2852 | vm_named_entry_t mem_entry; | |
2853 | vm_object_t object; | |
2854 | kern_return_t kr; | |
2855 | ||
2856 | if (entry_port == IP_NULL || | |
2857 | ip_kotype(entry_port) != IKOT_NAMED_ENTRY) { | |
2858 | return KERN_INVALID_ARGUMENT; | |
2859 | } | |
2860 | ||
2861 | mem_entry = (vm_named_entry_t) entry_port->ip_kobject; | |
2862 | ||
2863 | named_entry_lock(mem_entry); | |
2864 | ||
2865 | if (mem_entry->is_sub_map || mem_entry->is_pager) { | |
2866 | named_entry_unlock(mem_entry); | |
2867 | return KERN_INVALID_ARGUMENT; | |
2868 | } | |
2869 | ||
2870 | object = mem_entry->backing.object; | |
2871 | if (object == VM_OBJECT_NULL) { | |
2872 | named_entry_unlock(mem_entry); | |
2873 | return KERN_INVALID_ARGUMENT; | |
2874 | } | |
2875 | ||
2876 | vm_object_reference(object); | |
2877 | named_entry_unlock(mem_entry); | |
2878 | ||
2879 | kr = vm_object_range_op(object, | |
2880 | offset_beg, | |
2881 | offset_end, | |
2882 | ops, | |
b0d623f7 | 2883 | (uint32_t *) range); |
0c530ab8 A |
2884 | |
2885 | vm_object_deallocate(object); | |
2886 | ||
2887 | return kr; | |
2888 | } | |
1c79356b | 2889 | |
1c79356b A |
2890 | |
2891 | kern_return_t | |
2892 | set_dp_control_port( | |
2893 | host_priv_t host_priv, | |
2894 | ipc_port_t control_port) | |
2895 | { | |
2896 | if (host_priv == HOST_PRIV_NULL) | |
2897 | return (KERN_INVALID_HOST); | |
0b4e3aa0 A |
2898 | |
2899 | if (IP_VALID(dynamic_pager_control_port)) | |
2900 | ipc_port_release_send(dynamic_pager_control_port); | |
2901 | ||
1c79356b A |
2902 | dynamic_pager_control_port = control_port; |
2903 | return KERN_SUCCESS; | |
2904 | } | |
2905 | ||
2906 | kern_return_t | |
2907 | get_dp_control_port( | |
2908 | host_priv_t host_priv, | |
2909 | ipc_port_t *control_port) | |
2910 | { | |
2911 | if (host_priv == HOST_PRIV_NULL) | |
2912 | return (KERN_INVALID_HOST); | |
0b4e3aa0 A |
2913 | |
2914 | *control_port = ipc_port_copy_send(dynamic_pager_control_port); | |
1c79356b A |
2915 | return KERN_SUCCESS; |
2916 | ||
2917 | } | |
2918 | ||
91447636 | 2919 | /* ******* Temporary Internal calls to UPL for BSD ***** */ |
1c79356b | 2920 | |
91447636 A |
2921 | extern int kernel_upl_map( |
2922 | vm_map_t map, | |
2923 | upl_t upl, | |
2924 | vm_offset_t *dst_addr); | |
1c79356b | 2925 | |
91447636 A |
2926 | extern int kernel_upl_unmap( |
2927 | vm_map_t map, | |
2928 | upl_t upl); | |
150bd074 | 2929 | |
91447636 A |
2930 | extern int kernel_upl_commit( |
2931 | upl_t upl, | |
2932 | upl_page_info_t *pl, | |
2933 | mach_msg_type_number_t count); | |
1c79356b | 2934 | |
91447636 A |
2935 | extern int kernel_upl_commit_range( |
2936 | upl_t upl, | |
2937 | upl_offset_t offset, | |
2938 | upl_size_t size, | |
2939 | int flags, | |
2940 | upl_page_info_array_t pl, | |
2941 | mach_msg_type_number_t count); | |
1c79356b | 2942 | |
91447636 A |
2943 | extern int kernel_upl_abort( |
2944 | upl_t upl, | |
2945 | int abort_type); | |
1c79356b | 2946 | |
91447636 A |
2947 | extern int kernel_upl_abort_range( |
2948 | upl_t upl, | |
2949 | upl_offset_t offset, | |
2950 | upl_size_t size, | |
2951 | int abort_flags); | |
1c79356b | 2952 | |
1c79356b | 2953 | |
1c79356b A |
2954 | kern_return_t |
2955 | kernel_upl_map( | |
2956 | vm_map_t map, | |
2957 | upl_t upl, | |
2958 | vm_offset_t *dst_addr) | |
2959 | { | |
91447636 | 2960 | return vm_upl_map(map, upl, dst_addr); |
1c79356b A |
2961 | } |
2962 | ||
2963 | ||
2964 | kern_return_t | |
2965 | kernel_upl_unmap( | |
2966 | vm_map_t map, | |
0b4e3aa0 | 2967 | upl_t upl) |
1c79356b | 2968 | { |
91447636 | 2969 | return vm_upl_unmap(map, upl); |
1c79356b A |
2970 | } |
2971 | ||
2972 | kern_return_t | |
2973 | kernel_upl_commit( | |
91447636 A |
2974 | upl_t upl, |
2975 | upl_page_info_t *pl, | |
0b4e3aa0 | 2976 | mach_msg_type_number_t count) |
1c79356b | 2977 | { |
0b4e3aa0 A |
2978 | kern_return_t kr; |
2979 | ||
2980 | kr = upl_commit(upl, pl, count); | |
2981 | upl_deallocate(upl); | |
1c79356b A |
2982 | return kr; |
2983 | } | |
2984 | ||
0b4e3aa0 | 2985 | |
1c79356b A |
2986 | kern_return_t |
2987 | kernel_upl_commit_range( | |
2988 | upl_t upl, | |
91447636 A |
2989 | upl_offset_t offset, |
2990 | upl_size_t size, | |
1c79356b | 2991 | int flags, |
0b4e3aa0 A |
2992 | upl_page_info_array_t pl, |
2993 | mach_msg_type_number_t count) | |
1c79356b | 2994 | { |
0b4e3aa0 A |
2995 | boolean_t finished = FALSE; |
2996 | kern_return_t kr; | |
2997 | ||
2998 | if (flags & UPL_COMMIT_FREE_ON_EMPTY) | |
2999 | flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
3000 | ||
593a1d5f A |
3001 | if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) { |
3002 | return KERN_INVALID_ARGUMENT; | |
3003 | } | |
3004 | ||
0b4e3aa0 A |
3005 | kr = upl_commit_range(upl, offset, size, flags, pl, count, &finished); |
3006 | ||
3007 | if ((flags & UPL_COMMIT_NOTIFY_EMPTY) && finished) | |
3008 | upl_deallocate(upl); | |
3009 | ||
1c79356b A |
3010 | return kr; |
3011 | } | |
3012 | ||
3013 | kern_return_t | |
3014 | kernel_upl_abort_range( | |
0b4e3aa0 | 3015 | upl_t upl, |
91447636 A |
3016 | upl_offset_t offset, |
3017 | upl_size_t size, | |
0b4e3aa0 | 3018 | int abort_flags) |
1c79356b | 3019 | { |
0b4e3aa0 A |
3020 | kern_return_t kr; |
3021 | boolean_t finished = FALSE; | |
1c79356b | 3022 | |
0b4e3aa0 A |
3023 | if (abort_flags & UPL_COMMIT_FREE_ON_EMPTY) |
3024 | abort_flags |= UPL_COMMIT_NOTIFY_EMPTY; | |
1c79356b | 3025 | |
0b4e3aa0 | 3026 | kr = upl_abort_range(upl, offset, size, abort_flags, &finished); |
1c79356b | 3027 | |
0b4e3aa0 A |
3028 | if ((abort_flags & UPL_COMMIT_FREE_ON_EMPTY) && finished) |
3029 | upl_deallocate(upl); | |
1c79356b | 3030 | |
0b4e3aa0 | 3031 | return kr; |
1c79356b A |
3032 | } |
3033 | ||
1c79356b | 3034 | kern_return_t |
0b4e3aa0 A |
3035 | kernel_upl_abort( |
3036 | upl_t upl, | |
3037 | int abort_type) | |
1c79356b | 3038 | { |
0b4e3aa0 | 3039 | kern_return_t kr; |
1c79356b | 3040 | |
0b4e3aa0 A |
3041 | kr = upl_abort(upl, abort_type); |
3042 | upl_deallocate(upl); | |
3043 | return kr; | |
1c79356b A |
3044 | } |
3045 | ||
91447636 A |
3046 | /* |
3047 | * Now a kernel-private interface (for BootCache | |
3048 | * use only). Need a cleaner way to create an | |
3049 | * empty vm_map() and return a handle to it. | |
3050 | */ | |
1c79356b A |
3051 | |
3052 | kern_return_t | |
91447636 A |
3053 | vm_region_object_create( |
3054 | __unused vm_map_t target_map, | |
3055 | vm_size_t size, | |
3056 | ipc_port_t *object_handle) | |
1c79356b | 3057 | { |
91447636 A |
3058 | vm_named_entry_t user_entry; |
3059 | ipc_port_t user_handle; | |
1c79356b | 3060 | |
91447636 | 3061 | vm_map_t new_map; |
1c79356b | 3062 | |
91447636 A |
3063 | if (mach_memory_entry_allocate(&user_entry, &user_handle) |
3064 | != KERN_SUCCESS) { | |
1c79356b | 3065 | return KERN_FAILURE; |
91447636 | 3066 | } |
1c79356b | 3067 | |
91447636 | 3068 | /* Create a named object based on a submap of specified size */ |
1c79356b | 3069 | |
91447636 A |
3070 | new_map = vm_map_create(PMAP_NULL, VM_MAP_MIN_ADDRESS, |
3071 | vm_map_round_page(size), TRUE); | |
1c79356b | 3072 | |
91447636 A |
3073 | user_entry->backing.map = new_map; |
3074 | user_entry->internal = TRUE; | |
3075 | user_entry->is_sub_map = TRUE; | |
3076 | user_entry->offset = 0; | |
3077 | user_entry->protection = VM_PROT_ALL; | |
3078 | user_entry->size = size; | |
3079 | assert(user_entry->ref_count == 1); | |
1c79356b | 3080 | |
91447636 | 3081 | *object_handle = user_handle; |
1c79356b | 3082 | return KERN_SUCCESS; |
1c79356b | 3083 | |
55e303ae A |
3084 | } |
3085 | ||
91447636 A |
3086 | ppnum_t vm_map_get_phys_page( /* forward */ |
3087 | vm_map_t map, | |
3088 | vm_offset_t offset); | |
3089 | ||
55e303ae | 3090 | ppnum_t |
1c79356b | 3091 | vm_map_get_phys_page( |
91447636 A |
3092 | vm_map_t map, |
3093 | vm_offset_t addr) | |
1c79356b | 3094 | { |
91447636 A |
3095 | vm_object_offset_t offset; |
3096 | vm_object_t object; | |
3097 | vm_map_offset_t map_offset; | |
3098 | vm_map_entry_t entry; | |
3099 | ppnum_t phys_page = 0; | |
3100 | ||
3101 | map_offset = vm_map_trunc_page(addr); | |
1c79356b A |
3102 | |
3103 | vm_map_lock(map); | |
91447636 | 3104 | while (vm_map_lookup_entry(map, map_offset, &entry)) { |
1c79356b A |
3105 | |
3106 | if (entry->object.vm_object == VM_OBJECT_NULL) { | |
3107 | vm_map_unlock(map); | |
91447636 | 3108 | return (ppnum_t) 0; |
1c79356b A |
3109 | } |
3110 | if (entry->is_sub_map) { | |
3111 | vm_map_t old_map; | |
3112 | vm_map_lock(entry->object.sub_map); | |
3113 | old_map = map; | |
3114 | map = entry->object.sub_map; | |
91447636 | 3115 | map_offset = entry->offset + (map_offset - entry->vme_start); |
1c79356b A |
3116 | vm_map_unlock(old_map); |
3117 | continue; | |
3118 | } | |
9bccf70c A |
3119 | if (entry->object.vm_object->phys_contiguous) { |
3120 | /* These are not standard pageable memory mappings */ | |
3121 | /* If they are not present in the object they will */ | |
3122 | /* have to be picked up from the pager through the */ | |
3123 | /* fault mechanism. */ | |
6d2010ae | 3124 | if(entry->object.vm_object->vo_shadow_offset == 0) { |
9bccf70c A |
3125 | /* need to call vm_fault */ |
3126 | vm_map_unlock(map); | |
91447636 | 3127 | vm_fault(map, map_offset, VM_PROT_NONE, |
9bccf70c A |
3128 | FALSE, THREAD_UNINT, NULL, 0); |
3129 | vm_map_lock(map); | |
3130 | continue; | |
3131 | } | |
91447636 | 3132 | offset = entry->offset + (map_offset - entry->vme_start); |
55e303ae | 3133 | phys_page = (ppnum_t) |
6d2010ae | 3134 | ((entry->object.vm_object->vo_shadow_offset |
55e303ae | 3135 | + offset) >> 12); |
9bccf70c A |
3136 | break; |
3137 | ||
3138 | } | |
91447636 | 3139 | offset = entry->offset + (map_offset - entry->vme_start); |
1c79356b A |
3140 | object = entry->object.vm_object; |
3141 | vm_object_lock(object); | |
3142 | while (TRUE) { | |
3143 | vm_page_t dst_page = vm_page_lookup(object,offset); | |
3144 | if(dst_page == VM_PAGE_NULL) { | |
3145 | if(object->shadow) { | |
3146 | vm_object_t old_object; | |
3147 | vm_object_lock(object->shadow); | |
3148 | old_object = object; | |
6d2010ae | 3149 | offset = offset + object->vo_shadow_offset; |
1c79356b A |
3150 | object = object->shadow; |
3151 | vm_object_unlock(old_object); | |
3152 | } else { | |
3153 | vm_object_unlock(object); | |
3154 | break; | |
3155 | } | |
3156 | } else { | |
55e303ae | 3157 | phys_page = (ppnum_t)(dst_page->phys_page); |
1c79356b A |
3158 | vm_object_unlock(object); |
3159 | break; | |
3160 | } | |
3161 | } | |
3162 | break; | |
3163 | ||
3164 | } | |
3165 | ||
3166 | vm_map_unlock(map); | |
55e303ae A |
3167 | return phys_page; |
3168 | } | |
3169 | ||
3170 | ||
3171 | ||
91447636 A |
3172 | kern_return_t kernel_object_iopl_request( /* forward */ |
3173 | vm_named_entry_t named_entry, | |
3174 | memory_object_offset_t offset, | |
b0d623f7 | 3175 | upl_size_t *upl_size, |
91447636 A |
3176 | upl_t *upl_ptr, |
3177 | upl_page_info_array_t user_page_list, | |
3178 | unsigned int *page_list_count, | |
3179 | int *flags); | |
3180 | ||
55e303ae A |
3181 | kern_return_t |
3182 | kernel_object_iopl_request( | |
3183 | vm_named_entry_t named_entry, | |
3184 | memory_object_offset_t offset, | |
b0d623f7 | 3185 | upl_size_t *upl_size, |
55e303ae A |
3186 | upl_t *upl_ptr, |
3187 | upl_page_info_array_t user_page_list, | |
3188 | unsigned int *page_list_count, | |
3189 | int *flags) | |
3190 | { | |
3191 | vm_object_t object; | |
3192 | kern_return_t ret; | |
3193 | ||
3194 | int caller_flags; | |
3195 | ||
3196 | caller_flags = *flags; | |
3197 | ||
91447636 A |
3198 | if (caller_flags & ~UPL_VALID_FLAGS) { |
3199 | /* | |
3200 | * For forward compatibility's sake, | |
3201 | * reject any unknown flag. | |
3202 | */ | |
3203 | return KERN_INVALID_VALUE; | |
3204 | } | |
3205 | ||
55e303ae A |
3206 | /* a few checks to make sure user is obeying rules */ |
3207 | if(*upl_size == 0) { | |
3208 | if(offset >= named_entry->size) | |
3209 | return(KERN_INVALID_RIGHT); | |
b0d623f7 A |
3210 | *upl_size = (upl_size_t) (named_entry->size - offset); |
3211 | if (*upl_size != named_entry->size - offset) | |
3212 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
3213 | } |
3214 | if(caller_flags & UPL_COPYOUT_FROM) { | |
3215 | if((named_entry->protection & VM_PROT_READ) | |
3216 | != VM_PROT_READ) { | |
3217 | return(KERN_INVALID_RIGHT); | |
3218 | } | |
3219 | } else { | |
3220 | if((named_entry->protection & | |
3221 | (VM_PROT_READ | VM_PROT_WRITE)) | |
3222 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
3223 | return(KERN_INVALID_RIGHT); | |
3224 | } | |
3225 | } | |
3226 | if(named_entry->size < (offset + *upl_size)) | |
3227 | return(KERN_INVALID_ARGUMENT); | |
3228 | ||
3229 | /* the callers parameter offset is defined to be the */ | |
3230 | /* offset from beginning of named entry offset in object */ | |
3231 | offset = offset + named_entry->offset; | |
3232 | ||
3233 | if(named_entry->is_sub_map) | |
3234 | return (KERN_INVALID_ARGUMENT); | |
3235 | ||
3236 | named_entry_lock(named_entry); | |
3237 | ||
91447636 | 3238 | if (named_entry->is_pager) { |
55e303ae A |
3239 | object = vm_object_enter(named_entry->backing.pager, |
3240 | named_entry->offset + named_entry->size, | |
3241 | named_entry->internal, | |
3242 | FALSE, | |
3243 | FALSE); | |
3244 | if (object == VM_OBJECT_NULL) { | |
3245 | named_entry_unlock(named_entry); | |
3246 | return(KERN_INVALID_OBJECT); | |
3247 | } | |
55e303ae | 3248 | |
91447636 A |
3249 | /* JMM - drop reference on the pager here? */ |
3250 | ||
3251 | /* create an extra reference for the object */ | |
3252 | vm_object_lock(object); | |
55e303ae | 3253 | vm_object_reference_locked(object); |
91447636 A |
3254 | named_entry->backing.object = object; |
3255 | named_entry->is_pager = FALSE; | |
55e303ae A |
3256 | named_entry_unlock(named_entry); |
3257 | ||
3258 | /* wait for object (if any) to be ready */ | |
91447636 A |
3259 | if (!named_entry->internal) { |
3260 | while (!object->pager_ready) { | |
3261 | vm_object_wait(object, | |
3262 | VM_OBJECT_EVENT_PAGER_READY, | |
3263 | THREAD_UNINT); | |
3264 | vm_object_lock(object); | |
3265 | } | |
55e303ae A |
3266 | } |
3267 | vm_object_unlock(object); | |
91447636 A |
3268 | |
3269 | } else { | |
3270 | /* This is the case where we are going to operate */ | |
3271 | /* an an already known object. If the object is */ | |
3272 | /* not ready it is internal. An external */ | |
3273 | /* object cannot be mapped until it is ready */ | |
3274 | /* we can therefore avoid the ready check */ | |
3275 | /* in this case. */ | |
3276 | object = named_entry->backing.object; | |
3277 | vm_object_reference(object); | |
3278 | named_entry_unlock(named_entry); | |
55e303ae A |
3279 | } |
3280 | ||
3281 | if (!object->private) { | |
3282 | if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) | |
3283 | *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); | |
3284 | if (object->phys_contiguous) { | |
3285 | *flags = UPL_PHYS_CONTIG; | |
3286 | } else { | |
3287 | *flags = 0; | |
3288 | } | |
3289 | } else { | |
3290 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
3291 | } | |
3292 | ||
3293 | ret = vm_object_iopl_request(object, | |
3294 | offset, | |
3295 | *upl_size, | |
3296 | upl_ptr, | |
3297 | user_page_list, | |
3298 | page_list_count, | |
3299 | caller_flags); | |
3300 | vm_object_deallocate(object); | |
3301 | return ret; | |
1c79356b | 3302 | } |