]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/memory_object.c | |
60 | * Author: Michael Wayne Young | |
61 | * | |
62 | * External memory management interface control functions. | |
63 | */ | |
64 | ||
1c79356b A |
65 | #include <advisory_pageout.h> |
66 | ||
67 | /* | |
68 | * Interface dependencies: | |
69 | */ | |
70 | ||
71 | #include <mach/std_types.h> /* For pointer_t */ | |
72 | #include <mach/mach_types.h> | |
73 | ||
0b4e3aa0 | 74 | #include <mach/mig.h> |
1c79356b A |
75 | #include <mach/kern_return.h> |
76 | #include <mach/memory_object.h> | |
77 | #include <mach/memory_object_default.h> | |
78 | #include <mach/memory_object_control_server.h> | |
0b4e3aa0 | 79 | #include <mach/host_priv_server.h> |
1c79356b A |
80 | #include <mach/boolean.h> |
81 | #include <mach/vm_prot.h> | |
82 | #include <mach/message.h> | |
83 | ||
1c79356b A |
84 | /* |
85 | * Implementation dependencies: | |
86 | */ | |
87 | #include <string.h> /* For memcpy() */ | |
88 | ||
0b4e3aa0 A |
89 | #include <kern/xpr.h> |
90 | #include <kern/host.h> | |
91 | #include <kern/thread.h> /* For current_thread() */ | |
92 | #include <kern/ipc_mig.h> | |
93 | #include <kern/misc_protos.h> | |
94 | ||
95 | #include <vm/vm_object.h> | |
96 | #include <vm/vm_fault.h> | |
1c79356b A |
97 | #include <vm/memory_object.h> |
98 | #include <vm/vm_page.h> | |
99 | #include <vm/vm_pageout.h> | |
100 | #include <vm/pmap.h> /* For pmap_clear_modify */ | |
1c79356b A |
101 | #include <vm/vm_kern.h> /* For kernel_map, vm_move */ |
102 | #include <vm/vm_map.h> /* For vm_map_pageable */ | |
2d21ac55 | 103 | #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */ |
1c79356b A |
104 | |
105 | #if MACH_PAGEMAP | |
106 | #include <vm/vm_external.h> | |
107 | #endif /* MACH_PAGEMAP */ | |
108 | ||
91447636 A |
109 | #include <vm/vm_protos.h> |
110 | ||
111 | ||
0b4e3aa0 | 112 | memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
b0d623f7 | 113 | decl_lck_mtx_data(, memory_manager_default_lock) |
1c79356b | 114 | |
1c79356b A |
115 | |
116 | /* | |
117 | * Routine: memory_object_should_return_page | |
118 | * | |
119 | * Description: | |
120 | * Determine whether the given page should be returned, | |
121 | * based on the page's state and on the given return policy. | |
122 | * | |
123 | * We should return the page if one of the following is true: | |
124 | * | |
125 | * 1. Page is dirty and should_return is not RETURN_NONE. | |
126 | * 2. Page is precious and should_return is RETURN_ALL. | |
127 | * 3. Should_return is RETURN_ANYTHING. | |
128 | * | |
129 | * As a side effect, m->dirty will be made consistent | |
130 | * with pmap_is_modified(m), if should_return is not | |
131 | * MEMORY_OBJECT_RETURN_NONE. | |
132 | */ | |
133 | ||
134 | #define memory_object_should_return_page(m, should_return) \ | |
135 | (should_return != MEMORY_OBJECT_RETURN_NONE && \ | |
55e303ae | 136 | (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \ |
1c79356b A |
137 | ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ |
138 | (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) | |
139 | ||
140 | typedef int memory_object_lock_result_t; | |
141 | ||
142 | #define MEMORY_OBJECT_LOCK_RESULT_DONE 0 | |
143 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 | |
144 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2 | |
145 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3 | |
146 | ||
147 | memory_object_lock_result_t memory_object_lock_page( | |
148 | vm_page_t m, | |
149 | memory_object_return_t should_return, | |
150 | boolean_t should_flush, | |
151 | vm_prot_t prot); | |
152 | ||
153 | /* | |
154 | * Routine: memory_object_lock_page | |
155 | * | |
156 | * Description: | |
157 | * Perform the appropriate lock operations on the | |
158 | * given page. See the description of | |
159 | * "memory_object_lock_request" for the meanings | |
160 | * of the arguments. | |
161 | * | |
162 | * Returns an indication that the operation | |
163 | * completed, blocked, or that the page must | |
164 | * be cleaned. | |
165 | */ | |
166 | memory_object_lock_result_t | |
167 | memory_object_lock_page( | |
168 | vm_page_t m, | |
169 | memory_object_return_t should_return, | |
170 | boolean_t should_flush, | |
171 | vm_prot_t prot) | |
172 | { | |
173 | XPR(XPR_MEMORY_OBJECT, | |
174 | "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", | |
b0d623f7 | 175 | m, should_return, should_flush, prot, 0); |
1c79356b A |
176 | |
177 | /* | |
178 | * If we cannot change access to the page, | |
179 | * either because a mapping is in progress | |
180 | * (busy page) or because a mapping has been | |
181 | * wired, then give up. | |
182 | */ | |
183 | ||
b0d623f7 A |
184 | if (m->busy || m->cleaning) { |
185 | if (m->list_req_pending && m->pageout && | |
186 | should_return == MEMORY_OBJECT_RETURN_NONE && | |
187 | should_flush == TRUE) { | |
188 | /* | |
189 | * page was earmarked by vm_pageout_scan | |
190 | * to be cleaned and stolen... we're going | |
191 | * to take it back since we are being asked to | |
192 | * flush the page w/o cleaning it (i.e. we don't | |
193 | * care that it's dirty, we want it gone from | |
194 | * the cache) and we don't want to stall | |
195 | * waiting for it to be cleaned for 2 reasons... | |
196 | * 1 - no use paging it out since we're probably | |
197 | * shrinking the file at this point or we no | |
198 | * longer care about the data in the page | |
199 | * 2 - if we stall, we may casue a deadlock in | |
200 | * the FS trying to acquire its locks | |
201 | * on the VNOP_PAGEOUT path presuming that | |
202 | * those locks are already held on the truncate | |
203 | * path before calling through to this function | |
204 | * | |
205 | * so undo all of the state that vm_pageout_scan | |
206 | * hung on this page | |
207 | */ | |
208 | m->busy = FALSE; | |
209 | ||
210 | vm_pageout_queue_steal(m, FALSE); | |
211 | } else | |
212 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
213 | } | |
1c79356b A |
214 | |
215 | /* | |
216 | * Don't worry about pages for which the kernel | |
217 | * does not have any data. | |
218 | */ | |
219 | ||
765c9de3 A |
220 | if (m->absent || m->error || m->restart) { |
221 | if(m->error && should_flush) { | |
222 | /* dump the page, pager wants us to */ | |
223 | /* clean it up and there is no */ | |
224 | /* relevant data to return */ | |
b0d623f7 | 225 | if ( !VM_PAGE_WIRED(m)) { |
765c9de3 A |
226 | VM_PAGE_FREE(m); |
227 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
228 | } | |
229 | } else { | |
230 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
231 | } | |
232 | } | |
1c79356b A |
233 | |
234 | assert(!m->fictitious); | |
235 | ||
2d21ac55 A |
236 | /* |
237 | * If the page is wired, just clean or return the page if needed. | |
238 | * Wired pages don't get flushed or disconnected from the pmap. | |
239 | */ | |
1c79356b | 240 | |
b0d623f7 | 241 | if (VM_PAGE_WIRED(m)) { |
2d21ac55 A |
242 | if (memory_object_should_return_page(m, should_return)) { |
243 | if (m->dirty) | |
244 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); | |
245 | else | |
246 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); | |
1c79356b A |
247 | } |
248 | ||
2d21ac55 | 249 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); |
1c79356b A |
250 | } |
251 | ||
252 | /* | |
253 | * If the page is to be flushed, allow | |
254 | * that to be done as part of the protection. | |
255 | */ | |
256 | ||
257 | if (should_flush) | |
258 | prot = VM_PROT_ALL; | |
259 | ||
260 | /* | |
261 | * Set the page lock. | |
262 | * | |
263 | * If we are decreasing permission, do it now; | |
264 | * let the fault handler take care of increases | |
265 | * (pmap_page_protect may not increase protection). | |
266 | */ | |
267 | ||
268 | if (prot != VM_PROT_NO_CHANGE) { | |
2d21ac55 | 269 | pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot); |
1c79356b | 270 | |
1c79356b A |
271 | PAGE_WAKEUP(m); |
272 | } | |
273 | ||
274 | /* | |
275 | * Handle page returning. | |
276 | */ | |
1c79356b A |
277 | if (memory_object_should_return_page(m, should_return)) { |
278 | ||
279 | /* | |
280 | * If we weren't planning | |
281 | * to flush the page anyway, | |
282 | * we may need to remove the | |
283 | * page from the pageout | |
284 | * system and from physical | |
285 | * maps now. | |
286 | */ | |
287 | ||
2d21ac55 | 288 | vm_page_lockspin_queues(); |
1c79356b A |
289 | VM_PAGE_QUEUES_REMOVE(m); |
290 | vm_page_unlock_queues(); | |
291 | ||
292 | if (!should_flush) | |
91447636 | 293 | pmap_disconnect(m->phys_page); |
1c79356b A |
294 | |
295 | if (m->dirty) | |
296 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); | |
297 | else | |
298 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); | |
299 | } | |
300 | ||
301 | /* | |
302 | * Handle flushing | |
303 | */ | |
1c79356b A |
304 | if (should_flush) { |
305 | VM_PAGE_FREE(m); | |
306 | } else { | |
1c79356b A |
307 | /* |
308 | * XXX Make clean but not flush a paging hint, | |
309 | * and deactivate the pages. This is a hack | |
310 | * because it overloads flush/clean with | |
311 | * implementation-dependent meaning. This only | |
312 | * happens to pages that are already clean. | |
313 | */ | |
314 | ||
315 | if (vm_page_deactivate_hint && | |
316 | (should_return != MEMORY_OBJECT_RETURN_NONE)) { | |
b0d623f7 | 317 | vm_page_lockspin_queues(); |
1c79356b A |
318 | vm_page_deactivate(m); |
319 | vm_page_unlock_queues(); | |
320 | } | |
321 | } | |
322 | ||
323 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
324 | } | |
0b4e3aa0 | 325 | |
91447636 | 326 | #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync) \ |
1c79356b A |
327 | MACRO_BEGIN \ |
328 | \ | |
91447636 | 329 | register int upl_flags; \ |
2d21ac55 | 330 | memory_object_t pager; \ |
91447636 | 331 | \ |
2d21ac55 A |
332 | if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \ |
333 | vm_object_paging_begin(object); \ | |
334 | vm_object_unlock(object); \ | |
1c79356b | 335 | \ |
2d21ac55 A |
336 | if (iosync) \ |
337 | upl_flags = UPL_MSYNC | UPL_IOSYNC; \ | |
338 | else \ | |
339 | upl_flags = UPL_MSYNC; \ | |
91447636 | 340 | \ |
2d21ac55 A |
341 | (void) memory_object_data_return(pager, \ |
342 | po, \ | |
b0d623f7 | 343 | (memory_object_cluster_size_t)data_cnt, \ |
2d21ac55 A |
344 | ro, \ |
345 | ioerr, \ | |
346 | (action) == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN,\ | |
347 | !should_flush, \ | |
348 | upl_flags); \ | |
1c79356b | 349 | \ |
2d21ac55 A |
350 | vm_object_lock(object); \ |
351 | vm_object_paging_end(object); \ | |
352 | } \ | |
1c79356b A |
353 | MACRO_END |
354 | ||
1c79356b A |
355 | /* |
356 | * Routine: memory_object_lock_request [user interface] | |
357 | * | |
358 | * Description: | |
359 | * Control use of the data associated with the given | |
360 | * memory object. For each page in the given range, | |
361 | * perform the following operations, in order: | |
362 | * 1) restrict access to the page (disallow | |
363 | * forms specified by "prot"); | |
364 | * 2) return data to the manager (if "should_return" | |
365 | * is RETURN_DIRTY and the page is dirty, or | |
366 | * "should_return" is RETURN_ALL and the page | |
367 | * is either dirty or precious); and, | |
368 | * 3) flush the cached copy (if "should_flush" | |
369 | * is asserted). | |
370 | * The set of pages is defined by a starting offset | |
371 | * ("offset") and size ("size"). Only pages with the | |
372 | * same page alignment as the starting offset are | |
373 | * considered. | |
374 | * | |
375 | * A single acknowledgement is sent (to the "reply_to" | |
376 | * port) when these actions are complete. If successful, | |
377 | * the naked send right for reply_to is consumed. | |
378 | */ | |
379 | ||
380 | kern_return_t | |
381 | memory_object_lock_request( | |
0b4e3aa0 A |
382 | memory_object_control_t control, |
383 | memory_object_offset_t offset, | |
384 | memory_object_size_t size, | |
91447636 A |
385 | memory_object_offset_t * resid_offset, |
386 | int * io_errno, | |
1c79356b A |
387 | memory_object_return_t should_return, |
388 | int flags, | |
0b4e3aa0 | 389 | vm_prot_t prot) |
1c79356b | 390 | { |
0b4e3aa0 | 391 | vm_object_t object; |
1c79356b | 392 | |
b0d623f7 | 393 | /* |
1c79356b A |
394 | * Check for bogus arguments. |
395 | */ | |
0b4e3aa0 | 396 | object = memory_object_control_to_vm_object(control); |
1c79356b A |
397 | if (object == VM_OBJECT_NULL) |
398 | return (KERN_INVALID_ARGUMENT); | |
399 | ||
0b4e3aa0 | 400 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) |
1c79356b | 401 | return (KERN_INVALID_ARGUMENT); |
1c79356b | 402 | |
55e303ae | 403 | size = round_page_64(size); |
1c79356b A |
404 | |
405 | /* | |
406 | * Lock the object, and acquire a paging reference to | |
0b4e3aa0 | 407 | * prevent the memory_object reference from being released. |
1c79356b | 408 | */ |
1c79356b A |
409 | vm_object_lock(object); |
410 | vm_object_paging_begin(object); | |
b0d623f7 A |
411 | |
412 | if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) { | |
413 | if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) { | |
414 | flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL; | |
415 | flags |= MEMORY_OBJECT_DATA_FLUSH; | |
416 | } | |
417 | } | |
1c79356b A |
418 | offset -= object->paging_offset; |
419 | ||
b0d623f7 A |
420 | if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) |
421 | vm_object_reap_pages(object, REAP_DATA_FLUSH); | |
422 | else | |
423 | (void)vm_object_update(object, offset, size, resid_offset, | |
424 | io_errno, should_return, flags, prot); | |
1c79356b | 425 | |
1c79356b A |
426 | vm_object_paging_end(object); |
427 | vm_object_unlock(object); | |
1c79356b A |
428 | |
429 | return (KERN_SUCCESS); | |
430 | } | |
431 | ||
432 | /* | |
0b4e3aa0 A |
433 | * memory_object_release_name: [interface] |
434 | * | |
435 | * Enforces name semantic on memory_object reference count decrement | |
436 | * This routine should not be called unless the caller holds a name | |
437 | * reference gained through the memory_object_named_create or the | |
438 | * memory_object_rename call. | |
439 | * If the TERMINATE_IDLE flag is set, the call will return if the | |
440 | * reference count is not 1. i.e. idle with the only remaining reference | |
441 | * being the name. | |
442 | * If the decision is made to proceed the name field flag is set to | |
443 | * false and the reference count is decremented. If the RESPECT_CACHE | |
444 | * flag is set and the reference count has gone to zero, the | |
445 | * memory_object is checked to see if it is cacheable otherwise when | |
446 | * the reference count is zero, it is simply terminated. | |
447 | */ | |
448 | ||
449 | kern_return_t | |
450 | memory_object_release_name( | |
451 | memory_object_control_t control, | |
452 | int flags) | |
453 | { | |
454 | vm_object_t object; | |
455 | ||
456 | object = memory_object_control_to_vm_object(control); | |
457 | if (object == VM_OBJECT_NULL) | |
458 | return (KERN_INVALID_ARGUMENT); | |
459 | ||
460 | return vm_object_release_name(object, flags); | |
461 | } | |
462 | ||
463 | ||
464 | ||
465 | /* | |
466 | * Routine: memory_object_destroy [user interface] | |
467 | * Purpose: | |
468 | * Shut down a memory object, despite the | |
469 | * presence of address map (or other) references | |
470 | * to the vm_object. | |
471 | */ | |
472 | kern_return_t | |
473 | memory_object_destroy( | |
474 | memory_object_control_t control, | |
475 | kern_return_t reason) | |
476 | { | |
477 | vm_object_t object; | |
478 | ||
479 | object = memory_object_control_to_vm_object(control); | |
480 | if (object == VM_OBJECT_NULL) | |
481 | return (KERN_INVALID_ARGUMENT); | |
482 | ||
483 | return (vm_object_destroy(object, reason)); | |
484 | } | |
485 | ||
486 | /* | |
487 | * Routine: vm_object_sync | |
1c79356b A |
488 | * |
489 | * Kernel internal function to synch out pages in a given | |
490 | * range within an object to its memory manager. Much the | |
491 | * same as memory_object_lock_request but page protection | |
492 | * is not changed. | |
493 | * | |
494 | * If the should_flush and should_return flags are true pages | |
495 | * are flushed, that is dirty & precious pages are written to | |
496 | * the memory manager and then discarded. If should_return | |
497 | * is false, only precious pages are returned to the memory | |
498 | * manager. | |
499 | * | |
500 | * If should flush is false and should_return true, the memory | |
501 | * manager's copy of the pages is updated. If should_return | |
502 | * is also false, only the precious pages are updated. This | |
503 | * last option is of limited utility. | |
504 | * | |
505 | * Returns: | |
506 | * FALSE if no pages were returned to the pager | |
507 | * TRUE otherwise. | |
508 | */ | |
509 | ||
510 | boolean_t | |
0b4e3aa0 | 511 | vm_object_sync( |
1c79356b A |
512 | vm_object_t object, |
513 | vm_object_offset_t offset, | |
91447636 | 514 | vm_object_size_t size, |
1c79356b | 515 | boolean_t should_flush, |
91447636 A |
516 | boolean_t should_return, |
517 | boolean_t should_iosync) | |
1c79356b A |
518 | { |
519 | boolean_t rv; | |
91447636 | 520 | int flags; |
1c79356b | 521 | |
0b4e3aa0 A |
522 | XPR(XPR_VM_OBJECT, |
523 | "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", | |
b0d623f7 | 524 | object, offset, size, should_flush, should_return); |
1c79356b A |
525 | |
526 | /* | |
527 | * Lock the object, and acquire a paging reference to | |
528 | * prevent the memory_object and control ports from | |
529 | * being destroyed. | |
530 | */ | |
531 | vm_object_lock(object); | |
532 | vm_object_paging_begin(object); | |
533 | ||
91447636 A |
534 | if (should_flush) |
535 | flags = MEMORY_OBJECT_DATA_FLUSH; | |
536 | else | |
537 | flags = 0; | |
538 | ||
539 | if (should_iosync) | |
540 | flags |= MEMORY_OBJECT_IO_SYNC; | |
541 | ||
542 | rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL, | |
1c79356b A |
543 | (should_return) ? |
544 | MEMORY_OBJECT_RETURN_ALL : | |
545 | MEMORY_OBJECT_RETURN_NONE, | |
91447636 | 546 | flags, |
1c79356b A |
547 | VM_PROT_NO_CHANGE); |
548 | ||
549 | ||
550 | vm_object_paging_end(object); | |
551 | vm_object_unlock(object); | |
552 | return rv; | |
553 | } | |
554 | ||
91447636 A |
555 | |
556 | ||
557 | ||
558 | static int | |
559 | vm_object_update_extent( | |
560 | vm_object_t object, | |
561 | vm_object_offset_t offset, | |
562 | vm_object_offset_t offset_end, | |
563 | vm_object_offset_t *offset_resid, | |
564 | int *io_errno, | |
565 | boolean_t should_flush, | |
566 | memory_object_return_t should_return, | |
567 | boolean_t should_iosync, | |
568 | vm_prot_t prot) | |
569 | { | |
570 | vm_page_t m; | |
571 | int retval = 0; | |
b0d623f7 | 572 | memory_object_cluster_size_t data_cnt = 0; |
91447636 | 573 | vm_object_offset_t paging_offset = 0; |
b0d623f7 | 574 | vm_object_offset_t next_offset = offset; |
91447636 A |
575 | memory_object_lock_result_t page_lock_result; |
576 | memory_object_lock_result_t pageout_action; | |
577 | ||
578 | pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE; | |
579 | ||
580 | for (; | |
581 | offset < offset_end && object->resident_page_count; | |
582 | offset += PAGE_SIZE_64) { | |
583 | ||
584 | /* | |
b0d623f7 A |
585 | * Limit the number of pages to be cleaned at once to a contiguous |
586 | * run, or at most MAX_UPL_TRANSFER size | |
91447636 | 587 | */ |
b0d623f7 A |
588 | if (data_cnt) { |
589 | if ((data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) || (next_offset != offset)) { | |
590 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, | |
591 | pageout_action, paging_offset, offset_resid, io_errno, should_iosync); | |
592 | data_cnt = 0; | |
593 | } | |
91447636 A |
594 | } |
595 | ||
596 | while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
597 | page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot); | |
598 | ||
599 | XPR(XPR_MEMORY_OBJECT, | |
600 | "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", | |
b0d623f7 | 601 | object, offset, page_lock_result, 0, 0); |
91447636 A |
602 | |
603 | switch (page_lock_result) | |
604 | { | |
605 | case MEMORY_OBJECT_LOCK_RESULT_DONE: | |
606 | /* | |
607 | * End of a cluster of dirty pages. | |
608 | */ | |
609 | if (data_cnt) { | |
610 | LIST_REQ_PAGEOUT_PAGES(object, | |
611 | data_cnt, pageout_action, | |
612 | paging_offset, offset_resid, io_errno, should_iosync); | |
613 | data_cnt = 0; | |
614 | continue; | |
615 | } | |
616 | break; | |
617 | ||
618 | case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: | |
619 | /* | |
620 | * Since it is necessary to block, | |
621 | * clean any dirty pages now. | |
622 | */ | |
623 | if (data_cnt) { | |
624 | LIST_REQ_PAGEOUT_PAGES(object, | |
625 | data_cnt, pageout_action, | |
626 | paging_offset, offset_resid, io_errno, should_iosync); | |
627 | data_cnt = 0; | |
628 | continue; | |
629 | } | |
630 | PAGE_SLEEP(object, m, THREAD_UNINT); | |
631 | continue; | |
632 | ||
633 | case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: | |
634 | case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: | |
635 | /* | |
636 | * The clean and return cases are similar. | |
637 | * | |
638 | * if this would form a discontiguous block, | |
639 | * clean the old pages and start anew. | |
91447636 | 640 | */ |
b0d623f7 | 641 | if (data_cnt && pageout_action != page_lock_result) { |
91447636 A |
642 | LIST_REQ_PAGEOUT_PAGES(object, |
643 | data_cnt, pageout_action, | |
644 | paging_offset, offset_resid, io_errno, should_iosync); | |
645 | data_cnt = 0; | |
b0d623f7 | 646 | continue; |
91447636 | 647 | } |
91447636 A |
648 | if (m->cleaning) { |
649 | PAGE_SLEEP(object, m, THREAD_UNINT); | |
650 | continue; | |
651 | } | |
652 | if (data_cnt == 0) { | |
653 | pageout_action = page_lock_result; | |
654 | paging_offset = offset; | |
655 | } | |
656 | data_cnt += PAGE_SIZE; | |
b0d623f7 | 657 | next_offset = offset + PAGE_SIZE_64; |
91447636 | 658 | |
91447636 A |
659 | /* |
660 | * Clean | |
661 | */ | |
662 | m->list_req_pending = TRUE; | |
663 | m->cleaning = TRUE; | |
664 | ||
c910b4d9 | 665 | if (should_flush && |
b0d623f7 A |
666 | /* let's not flush a wired page... */ |
667 | !VM_PAGE_WIRED(m)) { | |
91447636 A |
668 | /* |
669 | * and add additional state | |
670 | * for the flush | |
671 | */ | |
672 | m->busy = TRUE; | |
673 | m->pageout = TRUE; | |
b0d623f7 A |
674 | |
675 | vm_page_lockspin_queues(); | |
91447636 | 676 | vm_page_wire(m); |
b0d623f7 | 677 | vm_page_unlock_queues(); |
91447636 | 678 | } |
91447636 A |
679 | |
680 | retval = 1; | |
681 | break; | |
682 | } | |
683 | break; | |
684 | } | |
685 | } | |
686 | /* | |
687 | * We have completed the scan for applicable pages. | |
688 | * Clean any pages that have been saved. | |
689 | */ | |
690 | if (data_cnt) { | |
691 | LIST_REQ_PAGEOUT_PAGES(object, | |
692 | data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync); | |
693 | } | |
694 | return (retval); | |
695 | } | |
696 | ||
697 | ||
698 | ||
1c79356b | 699 | /* |
0b4e3aa0 | 700 | * Routine: vm_object_update |
1c79356b | 701 | * Description: |
0b4e3aa0 | 702 | * Work function for m_o_lock_request(), vm_o_sync(). |
1c79356b A |
703 | * |
704 | * Called with object locked and paging ref taken. | |
705 | */ | |
706 | kern_return_t | |
0b4e3aa0 | 707 | vm_object_update( |
1c79356b A |
708 | register vm_object_t object, |
709 | register vm_object_offset_t offset, | |
91447636 A |
710 | register vm_object_size_t size, |
711 | register vm_object_offset_t *resid_offset, | |
712 | int *io_errno, | |
1c79356b A |
713 | memory_object_return_t should_return, |
714 | int flags, | |
91447636 | 715 | vm_prot_t protection) |
1c79356b | 716 | { |
2d21ac55 | 717 | vm_object_t copy_object = VM_OBJECT_NULL; |
1c79356b A |
718 | boolean_t data_returned = FALSE; |
719 | boolean_t update_cow; | |
91447636 A |
720 | boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; |
721 | boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; | |
b0d623f7 | 722 | vm_fault_return_t result; |
91447636 A |
723 | int num_of_extents; |
724 | int n; | |
725 | #define MAX_EXTENTS 8 | |
726 | #define EXTENT_SIZE (1024 * 1024 * 256) | |
727 | #define RESIDENT_LIMIT (1024 * 32) | |
728 | struct extent { | |
729 | vm_object_offset_t e_base; | |
730 | vm_object_offset_t e_min; | |
731 | vm_object_offset_t e_max; | |
732 | } extents[MAX_EXTENTS]; | |
1c79356b A |
733 | |
734 | /* | |
735 | * To avoid blocking while scanning for pages, save | |
736 | * dirty pages to be cleaned all at once. | |
737 | * | |
738 | * XXXO A similar strategy could be used to limit the | |
739 | * number of times that a scan must be restarted for | |
740 | * other reasons. Those pages that would require blocking | |
741 | * could be temporarily collected in another list, or | |
742 | * their offsets could be recorded in a small array. | |
743 | */ | |
744 | ||
745 | /* | |
746 | * XXX NOTE: May want to consider converting this to a page list | |
747 | * XXX vm_map_copy interface. Need to understand object | |
748 | * XXX coalescing implications before doing so. | |
749 | */ | |
750 | ||
751 | update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) | |
752 | && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && | |
753 | !(flags & MEMORY_OBJECT_DATA_PURGE))) | |
754 | || (flags & MEMORY_OBJECT_COPY_SYNC); | |
755 | ||
2d21ac55 A |
756 | if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) { |
757 | int collisions = 0; | |
758 | ||
759 | while ((copy_object = object->copy) != VM_OBJECT_NULL) { | |
760 | /* | |
761 | * need to do a try here since we're swimming upstream | |
762 | * against the normal lock ordering... however, we need | |
763 | * to hold the object stable until we gain control of the | |
764 | * copy object so we have to be careful how we approach this | |
765 | */ | |
766 | if (vm_object_lock_try(copy_object)) { | |
767 | /* | |
768 | * we 'won' the lock on the copy object... | |
769 | * no need to hold the object lock any longer... | |
770 | * take a real reference on the copy object because | |
771 | * we're going to call vm_fault_page on it which may | |
772 | * under certain conditions drop the lock and the paging | |
773 | * reference we're about to take... the reference | |
774 | * will keep the copy object from going away if that happens | |
775 | */ | |
776 | vm_object_unlock(object); | |
777 | vm_object_reference_locked(copy_object); | |
778 | break; | |
779 | } | |
780 | vm_object_unlock(object); | |
1c79356b | 781 | |
2d21ac55 A |
782 | collisions++; |
783 | mutex_pause(collisions); | |
784 | ||
785 | vm_object_lock(object); | |
786 | } | |
787 | } | |
788 | if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) { | |
91447636 A |
789 | vm_map_size_t i; |
790 | vm_map_size_t copy_size; | |
791 | vm_map_offset_t copy_offset; | |
1c79356b A |
792 | vm_prot_t prot; |
793 | vm_page_t page; | |
794 | vm_page_t top_page; | |
795 | kern_return_t error = 0; | |
2d21ac55 A |
796 | struct vm_object_fault_info fault_info; |
797 | ||
798 | if (copy_object != VM_OBJECT_NULL) { | |
799 | /* | |
800 | * translate offset with respect to shadow's offset | |
801 | */ | |
802 | copy_offset = (offset >= copy_object->shadow_offset) ? | |
803 | (vm_map_offset_t)(offset - copy_object->shadow_offset) : | |
804 | (vm_map_offset_t) 0; | |
805 | ||
806 | if (copy_offset > copy_object->size) | |
807 | copy_offset = copy_object->size; | |
808 | ||
809 | /* | |
810 | * clip size with respect to shadow offset | |
811 | */ | |
812 | if (offset >= copy_object->shadow_offset) { | |
813 | copy_size = size; | |
814 | } else if (size >= copy_object->shadow_offset - offset) { | |
815 | copy_size = size - (copy_object->shadow_offset - offset); | |
816 | } else { | |
817 | copy_size = 0; | |
818 | } | |
819 | ||
820 | if (copy_offset + copy_size > copy_object->size) { | |
821 | if (copy_object->size >= copy_offset) { | |
822 | copy_size = copy_object->size - copy_offset; | |
823 | } else { | |
824 | copy_size = 0; | |
825 | } | |
826 | } | |
827 | copy_size+=copy_offset; | |
1c79356b | 828 | |
1c79356b A |
829 | } else { |
830 | copy_object = object; | |
831 | ||
832 | copy_size = offset + size; | |
833 | copy_offset = offset; | |
834 | } | |
2d21ac55 A |
835 | fault_info.interruptible = THREAD_UNINT; |
836 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
837 | fault_info.user_tag = 0; | |
838 | fault_info.lo_offset = copy_offset; | |
839 | fault_info.hi_offset = copy_size; | |
840 | fault_info.no_cache = FALSE; | |
b0d623f7 | 841 | fault_info.stealth = TRUE; |
1c79356b A |
842 | |
843 | vm_object_paging_begin(copy_object); | |
2d21ac55 A |
844 | |
845 | for (i = copy_offset; i < copy_size; i += PAGE_SIZE) { | |
1c79356b | 846 | RETRY_COW_OF_LOCK_REQUEST: |
b0d623f7 A |
847 | fault_info.cluster_size = (vm_size_t) (copy_size - i); |
848 | assert(fault_info.cluster_size == copy_size - i); | |
2d21ac55 | 849 | |
1c79356b | 850 | prot = VM_PROT_WRITE|VM_PROT_READ; |
b0d623f7 A |
851 | result = vm_fault_page(copy_object, i, |
852 | VM_PROT_WRITE|VM_PROT_READ, | |
853 | FALSE, | |
854 | &prot, | |
855 | &page, | |
856 | &top_page, | |
857 | (int *)0, | |
858 | &error, | |
859 | FALSE, | |
860 | FALSE, &fault_info); | |
861 | ||
862 | switch (result) { | |
1c79356b | 863 | case VM_FAULT_SUCCESS: |
2d21ac55 | 864 | if (top_page) { |
1c79356b A |
865 | vm_fault_cleanup( |
866 | page->object, top_page); | |
1c79356b A |
867 | vm_object_lock(copy_object); |
868 | vm_object_paging_begin(copy_object); | |
1c79356b | 869 | } |
b0d623f7 A |
870 | if (!page->active && |
871 | !page->inactive && | |
872 | !page->throttled) { | |
873 | vm_page_lockspin_queues(); | |
874 | if (!page->active && | |
875 | !page->inactive && | |
876 | !page->throttled) | |
877 | vm_page_deactivate(page); | |
878 | vm_page_unlock_queues(); | |
879 | } | |
2d21ac55 | 880 | PAGE_WAKEUP_DONE(page); |
1c79356b A |
881 | break; |
882 | case VM_FAULT_RETRY: | |
883 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
884 | vm_object_lock(copy_object); | |
885 | vm_object_paging_begin(copy_object); | |
886 | goto RETRY_COW_OF_LOCK_REQUEST; | |
887 | case VM_FAULT_INTERRUPTED: | |
888 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
889 | vm_object_lock(copy_object); | |
890 | vm_object_paging_begin(copy_object); | |
891 | goto RETRY_COW_OF_LOCK_REQUEST; | |
892 | case VM_FAULT_MEMORY_SHORTAGE: | |
893 | VM_PAGE_WAIT(); | |
894 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
895 | vm_object_lock(copy_object); | |
896 | vm_object_paging_begin(copy_object); | |
897 | goto RETRY_COW_OF_LOCK_REQUEST; | |
898 | case VM_FAULT_FICTITIOUS_SHORTAGE: | |
899 | vm_page_more_fictitious(); | |
900 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
901 | vm_object_lock(copy_object); | |
902 | vm_object_paging_begin(copy_object); | |
903 | goto RETRY_COW_OF_LOCK_REQUEST; | |
b0d623f7 A |
904 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
905 | /* success but no VM page: fail */ | |
906 | vm_object_paging_end(copy_object); | |
907 | vm_object_unlock(copy_object); | |
908 | /*FALLTHROUGH*/ | |
1c79356b | 909 | case VM_FAULT_MEMORY_ERROR: |
2d21ac55 A |
910 | if (object != copy_object) |
911 | vm_object_deallocate(copy_object); | |
1c79356b A |
912 | vm_object_lock(object); |
913 | goto BYPASS_COW_COPYIN; | |
b0d623f7 A |
914 | default: |
915 | panic("vm_object_update: unexpected error 0x%x" | |
916 | " from vm_fault_page()\n", result); | |
1c79356b A |
917 | } |
918 | ||
919 | } | |
920 | vm_object_paging_end(copy_object); | |
2d21ac55 A |
921 | } |
922 | if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { | |
923 | if (copy_object != VM_OBJECT_NULL && copy_object != object) { | |
1c79356b | 924 | vm_object_unlock(copy_object); |
2d21ac55 | 925 | vm_object_deallocate(copy_object); |
1c79356b A |
926 | vm_object_lock(object); |
927 | } | |
2d21ac55 | 928 | return KERN_SUCCESS; |
1c79356b | 929 | } |
2d21ac55 A |
930 | if (copy_object != VM_OBJECT_NULL && copy_object != object) { |
931 | if ((flags & MEMORY_OBJECT_DATA_PURGE)) { | |
932 | copy_object->shadow_severed = TRUE; | |
933 | copy_object->shadowed = FALSE; | |
934 | copy_object->shadow = NULL; | |
935 | /* | |
936 | * delete the ref the COW was holding on the target object | |
937 | */ | |
938 | vm_object_deallocate(object); | |
939 | } | |
940 | vm_object_unlock(copy_object); | |
941 | vm_object_deallocate(copy_object); | |
942 | vm_object_lock(object); | |
1c79356b A |
943 | } |
944 | BYPASS_COW_COPYIN: | |
945 | ||
91447636 A |
946 | /* |
947 | * when we have a really large range to check relative | |
948 | * to the number of actual resident pages, we'd like | |
949 | * to use the resident page list to drive our checks | |
950 | * however, the object lock will get dropped while processing | |
951 | * the page which means the resident queue can change which | |
952 | * means we can't walk the queue as we process the pages | |
953 | * we also want to do the processing in offset order to allow | |
954 | * 'runs' of pages to be collected if we're being told to | |
955 | * flush to disk... the resident page queue is NOT ordered. | |
956 | * | |
957 | * a temporary solution (until we figure out how to deal with | |
958 | * large address spaces more generically) is to pre-flight | |
959 | * the resident page queue (if it's small enough) and develop | |
960 | * a collection of extents (that encompass actual resident pages) | |
961 | * to visit. This will at least allow us to deal with some of the | |
962 | * more pathological cases in a more efficient manner. The current | |
963 | * worst case (a single resident page at the end of an extremely large | |
964 | * range) can take minutes to complete for ranges in the terrabyte | |
965 | * category... since this routine is called when truncating a file, | |
966 | * and we currently support files up to 16 Tbytes in size, this | |
967 | * is not a theoretical problem | |
968 | */ | |
1c79356b | 969 | |
91447636 A |
970 | if ((object->resident_page_count < RESIDENT_LIMIT) && |
971 | (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) { | |
972 | vm_page_t next; | |
973 | vm_object_offset_t start; | |
974 | vm_object_offset_t end; | |
975 | vm_object_size_t e_mask; | |
976 | vm_page_t m; | |
1c79356b | 977 | |
91447636 A |
978 | start = offset; |
979 | end = offset + size; | |
980 | num_of_extents = 0; | |
981 | e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1)); | |
1c79356b | 982 | |
91447636 | 983 | m = (vm_page_t) queue_first(&object->memq); |
1c79356b | 984 | |
91447636 A |
985 | while (!queue_end(&object->memq, (queue_entry_t) m)) { |
986 | next = (vm_page_t) queue_next(&m->listq); | |
1c79356b | 987 | |
91447636 A |
988 | if ((m->offset >= start) && (m->offset < end)) { |
989 | /* | |
990 | * this is a page we're interested in | |
991 | * try to fit it into a current extent | |
1c79356b | 992 | */ |
91447636 A |
993 | for (n = 0; n < num_of_extents; n++) { |
994 | if ((m->offset & e_mask) == extents[n].e_base) { | |
995 | /* | |
996 | * use (PAGE_SIZE - 1) to determine the | |
997 | * max offset so that we don't wrap if | |
998 | * we're at the last page of the space | |
999 | */ | |
1000 | if (m->offset < extents[n].e_min) | |
1001 | extents[n].e_min = m->offset; | |
1002 | else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max) | |
1003 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
1004 | break; | |
1005 | } | |
1006 | } | |
1007 | if (n == num_of_extents) { | |
1008 | /* | |
1009 | * didn't find a current extent that can encompass | |
1010 | * this page | |
1011 | */ | |
1012 | if (n < MAX_EXTENTS) { | |
1013 | /* | |
1014 | * if we still have room, | |
1015 | * create a new extent | |
1016 | */ | |
1017 | extents[n].e_base = m->offset & e_mask; | |
1018 | extents[n].e_min = m->offset; | |
1019 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
1020 | ||
1021 | num_of_extents++; | |
1022 | } else { | |
1023 | /* | |
1024 | * no room to create a new extent... | |
1025 | * fall back to a single extent based | |
1026 | * on the min and max page offsets | |
1027 | * we find in the range we're interested in... | |
1028 | * first, look through the extent list and | |
1029 | * develop the overall min and max for the | |
1030 | * pages we've looked at up to this point | |
1031 | */ | |
1032 | for (n = 1; n < num_of_extents; n++) { | |
1033 | if (extents[n].e_min < extents[0].e_min) | |
1034 | extents[0].e_min = extents[n].e_min; | |
1035 | if (extents[n].e_max > extents[0].e_max) | |
1036 | extents[0].e_max = extents[n].e_max; | |
1037 | } | |
1038 | /* | |
1039 | * now setup to run through the remaining pages | |
1040 | * to determine the overall min and max | |
1041 | * offset for the specified range | |
1042 | */ | |
1043 | extents[0].e_base = 0; | |
1044 | e_mask = 0; | |
1045 | num_of_extents = 1; | |
1046 | ||
1047 | /* | |
1048 | * by continuing, we'll reprocess the | |
1049 | * page that forced us to abandon trying | |
1050 | * to develop multiple extents | |
1051 | */ | |
1052 | continue; | |
1053 | } | |
1054 | } | |
1c79356b | 1055 | } |
91447636 | 1056 | m = next; |
1c79356b | 1057 | } |
91447636 A |
1058 | } else { |
1059 | extents[0].e_min = offset; | |
1060 | extents[0].e_max = offset + (size - 1); | |
1c79356b | 1061 | |
91447636 A |
1062 | num_of_extents = 1; |
1063 | } | |
1064 | for (n = 0; n < num_of_extents; n++) { | |
1065 | if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, | |
1066 | should_flush, should_return, should_iosync, protection)) | |
1067 | data_returned = TRUE; | |
1c79356b | 1068 | } |
1c79356b A |
1069 | return (data_returned); |
1070 | } | |
1071 | ||
91447636 | 1072 | |
1c79356b A |
1073 | /* |
1074 | * Routine: memory_object_synchronize_completed [user interface] | |
1075 | * | |
1076 | * Tell kernel that previously synchronized data | |
1077 | * (memory_object_synchronize) has been queue or placed on the | |
1078 | * backing storage. | |
1079 | * | |
1080 | * Note: there may be multiple synchronize requests for a given | |
1081 | * memory object outstanding but they will not overlap. | |
1082 | */ | |
1083 | ||
1084 | kern_return_t | |
1085 | memory_object_synchronize_completed( | |
0b4e3aa0 A |
1086 | memory_object_control_t control, |
1087 | memory_object_offset_t offset, | |
b0d623f7 | 1088 | memory_object_size_t length) |
1c79356b | 1089 | { |
0b4e3aa0 A |
1090 | vm_object_t object; |
1091 | msync_req_t msr; | |
1c79356b | 1092 | |
91447636 A |
1093 | object = memory_object_control_to_vm_object(control); |
1094 | ||
1c79356b A |
1095 | XPR(XPR_MEMORY_OBJECT, |
1096 | "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", | |
b0d623f7 | 1097 | object, offset, length, 0, 0); |
1c79356b A |
1098 | |
1099 | /* | |
1100 | * Look for bogus arguments | |
1101 | */ | |
1102 | ||
0b4e3aa0 A |
1103 | if (object == VM_OBJECT_NULL) |
1104 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1105 | |
1106 | vm_object_lock(object); | |
1107 | ||
1108 | /* | |
1109 | * search for sync request structure | |
1110 | */ | |
1111 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { | |
1112 | if (msr->offset == offset && msr->length == length) { | |
1113 | queue_remove(&object->msr_q, msr, msync_req_t, msr_q); | |
1114 | break; | |
1115 | } | |
1116 | }/* queue_iterate */ | |
1117 | ||
1118 | if (queue_end(&object->msr_q, (queue_entry_t)msr)) { | |
1119 | vm_object_unlock(object); | |
1c79356b A |
1120 | return KERN_INVALID_ARGUMENT; |
1121 | } | |
1122 | ||
1123 | msr_lock(msr); | |
1124 | vm_object_unlock(object); | |
1125 | msr->flag = VM_MSYNC_DONE; | |
1126 | msr_unlock(msr); | |
1127 | thread_wakeup((event_t) msr); | |
1c79356b A |
1128 | |
1129 | return KERN_SUCCESS; | |
1130 | }/* memory_object_synchronize_completed */ | |
0b4e3aa0 A |
1131 | |
1132 | static kern_return_t | |
1133 | vm_object_set_attributes_common( | |
1c79356b A |
1134 | vm_object_t object, |
1135 | boolean_t may_cache, | |
1136 | memory_object_copy_strategy_t copy_strategy, | |
1137 | boolean_t temporary, | |
1c79356b A |
1138 | boolean_t silent_overwrite, |
1139 | boolean_t advisory_pageout) | |
1140 | { | |
1141 | boolean_t object_became_ready; | |
1142 | ||
1143 | XPR(XPR_MEMORY_OBJECT, | |
1144 | "m_o_set_attr_com, object 0x%X flg %x strat %d\n", | |
b0d623f7 | 1145 | object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0); |
1c79356b A |
1146 | |
1147 | if (object == VM_OBJECT_NULL) | |
1148 | return(KERN_INVALID_ARGUMENT); | |
1149 | ||
1150 | /* | |
1151 | * Verify the attributes of importance | |
1152 | */ | |
1153 | ||
1154 | switch(copy_strategy) { | |
1155 | case MEMORY_OBJECT_COPY_NONE: | |
1156 | case MEMORY_OBJECT_COPY_DELAY: | |
1157 | break; | |
1158 | default: | |
1c79356b A |
1159 | return(KERN_INVALID_ARGUMENT); |
1160 | } | |
1161 | ||
1162 | #if !ADVISORY_PAGEOUT | |
0b4e3aa0 | 1163 | if (silent_overwrite || advisory_pageout) |
1c79356b | 1164 | return(KERN_INVALID_ARGUMENT); |
0b4e3aa0 | 1165 | |
1c79356b A |
1166 | #endif /* !ADVISORY_PAGEOUT */ |
1167 | if (may_cache) | |
1168 | may_cache = TRUE; | |
1169 | if (temporary) | |
1170 | temporary = TRUE; | |
1c79356b A |
1171 | |
1172 | vm_object_lock(object); | |
1173 | ||
1174 | /* | |
1175 | * Copy the attributes | |
1176 | */ | |
1177 | assert(!object->internal); | |
1178 | object_became_ready = !object->pager_ready; | |
1179 | object->copy_strategy = copy_strategy; | |
1180 | object->can_persist = may_cache; | |
1181 | object->temporary = temporary; | |
1182 | object->silent_overwrite = silent_overwrite; | |
1183 | object->advisory_pageout = advisory_pageout; | |
1c79356b A |
1184 | |
1185 | /* | |
1186 | * Wake up anyone waiting for the ready attribute | |
1187 | * to become asserted. | |
1188 | */ | |
1189 | ||
1190 | if (object_became_ready) { | |
1191 | object->pager_ready = TRUE; | |
1192 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); | |
1193 | } | |
1194 | ||
1195 | vm_object_unlock(object); | |
1196 | ||
1c79356b A |
1197 | return(KERN_SUCCESS); |
1198 | } | |
1199 | ||
1200 | /* | |
1201 | * Set the memory object attribute as provided. | |
1202 | * | |
1203 | * XXX This routine cannot be completed until the vm_msync, clean | |
1204 | * in place, and cluster work is completed. See ifdef notyet | |
0b4e3aa0 | 1205 | * below and note that vm_object_set_attributes_common() |
1c79356b A |
1206 | * may have to be expanded. |
1207 | */ | |
1208 | kern_return_t | |
1209 | memory_object_change_attributes( | |
0b4e3aa0 A |
1210 | memory_object_control_t control, |
1211 | memory_object_flavor_t flavor, | |
1212 | memory_object_info_t attributes, | |
1213 | mach_msg_type_number_t count) | |
1c79356b | 1214 | { |
0b4e3aa0 A |
1215 | vm_object_t object; |
1216 | kern_return_t result = KERN_SUCCESS; | |
1217 | boolean_t temporary; | |
1218 | boolean_t may_cache; | |
1219 | boolean_t invalidate; | |
1c79356b | 1220 | memory_object_copy_strategy_t copy_strategy; |
0b4e3aa0 | 1221 | boolean_t silent_overwrite; |
1c79356b A |
1222 | boolean_t advisory_pageout; |
1223 | ||
0b4e3aa0 | 1224 | object = memory_object_control_to_vm_object(control); |
1c79356b | 1225 | if (object == VM_OBJECT_NULL) |
0b4e3aa0 | 1226 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1227 | |
1228 | vm_object_lock(object); | |
0b4e3aa0 | 1229 | |
1c79356b A |
1230 | temporary = object->temporary; |
1231 | may_cache = object->can_persist; | |
1232 | copy_strategy = object->copy_strategy; | |
1233 | silent_overwrite = object->silent_overwrite; | |
1234 | advisory_pageout = object->advisory_pageout; | |
1235 | #if notyet | |
1236 | invalidate = object->invalidate; | |
1237 | #endif | |
1c79356b A |
1238 | vm_object_unlock(object); |
1239 | ||
1240 | switch (flavor) { | |
1241 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1242 | { | |
1243 | old_memory_object_behave_info_t behave; | |
1244 | ||
1245 | if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1246 | result = KERN_INVALID_ARGUMENT; | |
1247 | break; | |
1248 | } | |
1249 | ||
1250 | behave = (old_memory_object_behave_info_t) attributes; | |
1251 | ||
1252 | temporary = behave->temporary; | |
1253 | invalidate = behave->invalidate; | |
1254 | copy_strategy = behave->copy_strategy; | |
1255 | ||
1256 | break; | |
1257 | } | |
1258 | ||
1259 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1260 | { | |
1261 | memory_object_behave_info_t behave; | |
1262 | ||
1263 | if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1264 | result = KERN_INVALID_ARGUMENT; | |
1265 | break; | |
1266 | } | |
1267 | ||
1268 | behave = (memory_object_behave_info_t) attributes; | |
1269 | ||
1270 | temporary = behave->temporary; | |
1271 | invalidate = behave->invalidate; | |
1272 | copy_strategy = behave->copy_strategy; | |
1273 | silent_overwrite = behave->silent_overwrite; | |
1274 | advisory_pageout = behave->advisory_pageout; | |
1275 | break; | |
1276 | } | |
1277 | ||
1278 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1279 | { | |
1280 | memory_object_perf_info_t perf; | |
1281 | ||
1282 | if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1283 | result = KERN_INVALID_ARGUMENT; | |
1284 | break; | |
1285 | } | |
1286 | ||
1287 | perf = (memory_object_perf_info_t) attributes; | |
1288 | ||
1289 | may_cache = perf->may_cache; | |
1c79356b A |
1290 | |
1291 | break; | |
1292 | } | |
1293 | ||
1294 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1295 | { | |
1296 | old_memory_object_attr_info_t attr; | |
1297 | ||
1298 | if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1299 | result = KERN_INVALID_ARGUMENT; | |
1300 | break; | |
1301 | } | |
1302 | ||
1303 | attr = (old_memory_object_attr_info_t) attributes; | |
1304 | ||
1305 | may_cache = attr->may_cache; | |
1306 | copy_strategy = attr->copy_strategy; | |
1c79356b A |
1307 | |
1308 | break; | |
1309 | } | |
1310 | ||
1311 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1312 | { | |
1313 | memory_object_attr_info_t attr; | |
1314 | ||
1315 | if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1316 | result = KERN_INVALID_ARGUMENT; | |
1317 | break; | |
1318 | } | |
1319 | ||
1320 | attr = (memory_object_attr_info_t) attributes; | |
1321 | ||
1322 | copy_strategy = attr->copy_strategy; | |
1323 | may_cache = attr->may_cache_object; | |
1c79356b A |
1324 | temporary = attr->temporary; |
1325 | ||
1326 | break; | |
1327 | } | |
1328 | ||
1329 | default: | |
1330 | result = KERN_INVALID_ARGUMENT; | |
1331 | break; | |
1332 | } | |
1333 | ||
0b4e3aa0 | 1334 | if (result != KERN_SUCCESS) |
1c79356b | 1335 | return(result); |
1c79356b A |
1336 | |
1337 | if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { | |
1338 | copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
1339 | temporary = TRUE; | |
1340 | } else { | |
1341 | temporary = FALSE; | |
1342 | } | |
1343 | ||
1344 | /* | |
1c79356b A |
1345 | * XXX may_cache may become a tri-valued variable to handle |
1346 | * XXX uncache if not in use. | |
1347 | */ | |
0b4e3aa0 | 1348 | return (vm_object_set_attributes_common(object, |
1c79356b A |
1349 | may_cache, |
1350 | copy_strategy, | |
1351 | temporary, | |
1c79356b | 1352 | silent_overwrite, |
0b4e3aa0 | 1353 | advisory_pageout)); |
1c79356b A |
1354 | } |
1355 | ||
1356 | kern_return_t | |
1357 | memory_object_get_attributes( | |
0b4e3aa0 | 1358 | memory_object_control_t control, |
1c79356b A |
1359 | memory_object_flavor_t flavor, |
1360 | memory_object_info_t attributes, /* pointer to OUT array */ | |
1361 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1362 | { | |
0b4e3aa0 A |
1363 | kern_return_t ret = KERN_SUCCESS; |
1364 | vm_object_t object; | |
1c79356b | 1365 | |
0b4e3aa0 A |
1366 | object = memory_object_control_to_vm_object(control); |
1367 | if (object == VM_OBJECT_NULL) | |
1368 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1369 | |
1370 | vm_object_lock(object); | |
1371 | ||
1372 | switch (flavor) { | |
1373 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1374 | { | |
1375 | old_memory_object_behave_info_t behave; | |
1376 | ||
1377 | if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1378 | ret = KERN_INVALID_ARGUMENT; | |
1379 | break; | |
1380 | } | |
1381 | ||
1382 | behave = (old_memory_object_behave_info_t) attributes; | |
1383 | behave->copy_strategy = object->copy_strategy; | |
1384 | behave->temporary = object->temporary; | |
1385 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1386 | behave->invalidate = object->invalidate; | |
1387 | #else | |
1388 | behave->invalidate = FALSE; | |
1389 | #endif | |
1390 | ||
1391 | *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1392 | break; | |
1393 | } | |
1394 | ||
1395 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1396 | { | |
1397 | memory_object_behave_info_t behave; | |
1398 | ||
1399 | if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1400 | ret = KERN_INVALID_ARGUMENT; | |
1401 | break; | |
1402 | } | |
1403 | ||
1404 | behave = (memory_object_behave_info_t) attributes; | |
1405 | behave->copy_strategy = object->copy_strategy; | |
1406 | behave->temporary = object->temporary; | |
1407 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1408 | behave->invalidate = object->invalidate; | |
1409 | #else | |
1410 | behave->invalidate = FALSE; | |
1411 | #endif | |
1412 | behave->advisory_pageout = object->advisory_pageout; | |
1413 | behave->silent_overwrite = object->silent_overwrite; | |
1414 | *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1415 | break; | |
1416 | } | |
1417 | ||
1418 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1419 | { | |
1420 | memory_object_perf_info_t perf; | |
1421 | ||
1422 | if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1423 | ret = KERN_INVALID_ARGUMENT; | |
1424 | break; | |
1425 | } | |
1426 | ||
1427 | perf = (memory_object_perf_info_t) attributes; | |
2d21ac55 | 1428 | perf->cluster_size = PAGE_SIZE; |
1c79356b A |
1429 | perf->may_cache = object->can_persist; |
1430 | ||
1431 | *count = MEMORY_OBJECT_PERF_INFO_COUNT; | |
1432 | break; | |
1433 | } | |
1434 | ||
1435 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1436 | { | |
1437 | old_memory_object_attr_info_t attr; | |
1438 | ||
1439 | if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1440 | ret = KERN_INVALID_ARGUMENT; | |
1441 | break; | |
1442 | } | |
1443 | ||
1444 | attr = (old_memory_object_attr_info_t) attributes; | |
1445 | attr->may_cache = object->can_persist; | |
1446 | attr->copy_strategy = object->copy_strategy; | |
1447 | ||
1448 | *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1449 | break; | |
1450 | } | |
1451 | ||
1452 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1453 | { | |
1454 | memory_object_attr_info_t attr; | |
1455 | ||
1456 | if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1457 | ret = KERN_INVALID_ARGUMENT; | |
1458 | break; | |
1459 | } | |
1460 | ||
1461 | attr = (memory_object_attr_info_t) attributes; | |
1462 | attr->copy_strategy = object->copy_strategy; | |
2d21ac55 | 1463 | attr->cluster_size = PAGE_SIZE; |
1c79356b A |
1464 | attr->may_cache_object = object->can_persist; |
1465 | attr->temporary = object->temporary; | |
1466 | ||
1467 | *count = MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1468 | break; | |
1469 | } | |
1470 | ||
1471 | default: | |
1472 | ret = KERN_INVALID_ARGUMENT; | |
1473 | break; | |
1474 | } | |
1475 | ||
1476 | vm_object_unlock(object); | |
1477 | ||
1c79356b A |
1478 | return(ret); |
1479 | } | |
1480 | ||
1c79356b | 1481 | |
55e303ae A |
1482 | kern_return_t |
1483 | memory_object_iopl_request( | |
1484 | ipc_port_t port, | |
1485 | memory_object_offset_t offset, | |
91447636 | 1486 | upl_size_t *upl_size, |
55e303ae A |
1487 | upl_t *upl_ptr, |
1488 | upl_page_info_array_t user_page_list, | |
1489 | unsigned int *page_list_count, | |
1490 | int *flags) | |
1491 | { | |
1492 | vm_object_t object; | |
1493 | kern_return_t ret; | |
1494 | int caller_flags; | |
1495 | ||
1496 | caller_flags = *flags; | |
1497 | ||
91447636 A |
1498 | if (caller_flags & ~UPL_VALID_FLAGS) { |
1499 | /* | |
1500 | * For forward compatibility's sake, | |
1501 | * reject any unknown flag. | |
1502 | */ | |
1503 | return KERN_INVALID_VALUE; | |
1504 | } | |
1505 | ||
55e303ae A |
1506 | if (ip_kotype(port) == IKOT_NAMED_ENTRY) { |
1507 | vm_named_entry_t named_entry; | |
1508 | ||
1509 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
1510 | /* a few checks to make sure user is obeying rules */ | |
1511 | if(*upl_size == 0) { | |
1512 | if(offset >= named_entry->size) | |
1513 | return(KERN_INVALID_RIGHT); | |
b0d623f7 A |
1514 | *upl_size = (upl_size_t)(named_entry->size - offset); |
1515 | if (*upl_size != named_entry->size - offset) | |
1516 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1517 | } |
1518 | if(caller_flags & UPL_COPYOUT_FROM) { | |
1519 | if((named_entry->protection & VM_PROT_READ) | |
1520 | != VM_PROT_READ) { | |
1521 | return(KERN_INVALID_RIGHT); | |
1522 | } | |
1523 | } else { | |
1524 | if((named_entry->protection & | |
1525 | (VM_PROT_READ | VM_PROT_WRITE)) | |
1526 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
1527 | return(KERN_INVALID_RIGHT); | |
1528 | } | |
1529 | } | |
1530 | if(named_entry->size < (offset + *upl_size)) | |
1531 | return(KERN_INVALID_ARGUMENT); | |
1532 | ||
1533 | /* the callers parameter offset is defined to be the */ | |
1534 | /* offset from beginning of named entry offset in object */ | |
1535 | offset = offset + named_entry->offset; | |
1536 | ||
1537 | if(named_entry->is_sub_map) | |
1538 | return (KERN_INVALID_ARGUMENT); | |
1539 | ||
1540 | named_entry_lock(named_entry); | |
1541 | ||
91447636 | 1542 | if (named_entry->is_pager) { |
55e303ae A |
1543 | object = vm_object_enter(named_entry->backing.pager, |
1544 | named_entry->offset + named_entry->size, | |
1545 | named_entry->internal, | |
1546 | FALSE, | |
1547 | FALSE); | |
1548 | if (object == VM_OBJECT_NULL) { | |
1549 | named_entry_unlock(named_entry); | |
1550 | return(KERN_INVALID_OBJECT); | |
1551 | } | |
91447636 A |
1552 | |
1553 | /* JMM - drop reference on pager here? */ | |
55e303ae A |
1554 | |
1555 | /* create an extra reference for the named entry */ | |
91447636 | 1556 | vm_object_lock(object); |
55e303ae | 1557 | vm_object_reference_locked(object); |
91447636 A |
1558 | named_entry->backing.object = object; |
1559 | named_entry->is_pager = FALSE; | |
55e303ae A |
1560 | named_entry_unlock(named_entry); |
1561 | ||
1562 | /* wait for object to be ready */ | |
1563 | while (!object->pager_ready) { | |
1564 | vm_object_wait(object, | |
1565 | VM_OBJECT_EVENT_PAGER_READY, | |
1566 | THREAD_UNINT); | |
1567 | vm_object_lock(object); | |
1568 | } | |
1569 | vm_object_unlock(object); | |
91447636 A |
1570 | } else { |
1571 | /* This is the case where we are going to map */ | |
1572 | /* an already mapped object. If the object is */ | |
1573 | /* not ready it is internal. An external */ | |
1574 | /* object cannot be mapped until it is ready */ | |
1575 | /* we can therefore avoid the ready check */ | |
1576 | /* in this case. */ | |
1577 | object = named_entry->backing.object; | |
1578 | vm_object_reference(object); | |
1579 | named_entry_unlock(named_entry); | |
55e303ae | 1580 | } |
0c530ab8 | 1581 | } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { |
55e303ae | 1582 | memory_object_control_t control; |
0c530ab8 | 1583 | control = (memory_object_control_t) port; |
55e303ae A |
1584 | if (control == NULL) |
1585 | return (KERN_INVALID_ARGUMENT); | |
1586 | object = memory_object_control_to_vm_object(control); | |
1587 | if (object == VM_OBJECT_NULL) | |
1588 | return (KERN_INVALID_ARGUMENT); | |
1589 | vm_object_reference(object); | |
0c530ab8 A |
1590 | } else { |
1591 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1592 | } |
1593 | if (object == VM_OBJECT_NULL) | |
1594 | return (KERN_INVALID_ARGUMENT); | |
1595 | ||
1596 | if (!object->private) { | |
1597 | if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) | |
1598 | *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); | |
1599 | if (object->phys_contiguous) { | |
1600 | *flags = UPL_PHYS_CONTIG; | |
1601 | } else { | |
1602 | *flags = 0; | |
1603 | } | |
1604 | } else { | |
1605 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
1606 | } | |
1607 | ||
1608 | ret = vm_object_iopl_request(object, | |
1609 | offset, | |
1610 | *upl_size, | |
1611 | upl_ptr, | |
1612 | user_page_list, | |
1613 | page_list_count, | |
1614 | caller_flags); | |
1615 | vm_object_deallocate(object); | |
1616 | return ret; | |
1617 | } | |
1618 | ||
0b4e3aa0 A |
1619 | /* |
1620 | * Routine: memory_object_upl_request [interface] | |
1621 | * Purpose: | |
1622 | * Cause the population of a portion of a vm_object. | |
1623 | * Depending on the nature of the request, the pages | |
1624 | * returned may be contain valid data or be uninitialized. | |
1625 | * | |
1626 | */ | |
1c79356b | 1627 | |
0b4e3aa0 A |
1628 | kern_return_t |
1629 | memory_object_upl_request( | |
1630 | memory_object_control_t control, | |
1631 | memory_object_offset_t offset, | |
91447636 | 1632 | upl_size_t size, |
0b4e3aa0 A |
1633 | upl_t *upl_ptr, |
1634 | upl_page_info_array_t user_page_list, | |
1635 | unsigned int *page_list_count, | |
1636 | int cntrl_flags) | |
1637 | { | |
1638 | vm_object_t object; | |
1639 | ||
1640 | object = memory_object_control_to_vm_object(control); | |
1641 | if (object == VM_OBJECT_NULL) | |
b0d623f7 | 1642 | return (KERN_TERMINATED); |
0b4e3aa0 A |
1643 | |
1644 | return vm_object_upl_request(object, | |
1645 | offset, | |
1646 | size, | |
1647 | upl_ptr, | |
1648 | user_page_list, | |
1649 | page_list_count, | |
1650 | cntrl_flags); | |
1651 | } | |
1652 | ||
1653 | /* | |
1654 | * Routine: memory_object_super_upl_request [interface] | |
1655 | * Purpose: | |
1656 | * Cause the population of a portion of a vm_object | |
1657 | * in much the same way as memory_object_upl_request. | |
1658 | * Depending on the nature of the request, the pages | |
1659 | * returned may be contain valid data or be uninitialized. | |
1660 | * However, the region may be expanded up to the super | |
1661 | * cluster size provided. | |
1c79356b | 1662 | */ |
0b4e3aa0 | 1663 | |
1c79356b | 1664 | kern_return_t |
0b4e3aa0 A |
1665 | memory_object_super_upl_request( |
1666 | memory_object_control_t control, | |
1667 | memory_object_offset_t offset, | |
91447636 A |
1668 | upl_size_t size, |
1669 | upl_size_t super_cluster, | |
0b4e3aa0 A |
1670 | upl_t *upl, |
1671 | upl_page_info_t *user_page_list, | |
1672 | unsigned int *page_list_count, | |
1673 | int cntrl_flags) | |
1c79356b | 1674 | { |
0b4e3aa0 A |
1675 | vm_object_t object; |
1676 | ||
1677 | object = memory_object_control_to_vm_object(control); | |
1678 | if (object == VM_OBJECT_NULL) | |
1679 | return (KERN_INVALID_ARGUMENT); | |
1680 | ||
1681 | return vm_object_super_upl_request(object, | |
1682 | offset, | |
1683 | size, | |
1684 | super_cluster, | |
1685 | upl, | |
1686 | user_page_list, | |
1687 | page_list_count, | |
1688 | cntrl_flags); | |
1c79356b A |
1689 | } |
1690 | ||
2d21ac55 A |
1691 | kern_return_t |
1692 | memory_object_cluster_size(memory_object_control_t control, memory_object_offset_t *start, | |
b0d623f7 | 1693 | vm_size_t *length, uint32_t *io_streaming, memory_object_fault_info_t fault_info) |
2d21ac55 A |
1694 | { |
1695 | vm_object_t object; | |
1696 | ||
1697 | object = memory_object_control_to_vm_object(control); | |
1698 | ||
1699 | if (object == VM_OBJECT_NULL || object->paging_offset > *start) | |
1700 | return (KERN_INVALID_ARGUMENT); | |
1701 | ||
1702 | *start -= object->paging_offset; | |
1703 | ||
b0d623f7 | 1704 | vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info, io_streaming); |
2d21ac55 A |
1705 | |
1706 | *start += object->paging_offset; | |
1707 | ||
1708 | return (KERN_SUCCESS); | |
1709 | } | |
1710 | ||
1711 | ||
0b4e3aa0 A |
1712 | int vm_stat_discard_cleared_reply = 0; |
1713 | int vm_stat_discard_cleared_unset = 0; | |
1714 | int vm_stat_discard_cleared_too_late = 0; | |
1715 | ||
1716 | ||
1717 | ||
1c79356b | 1718 | /* |
0b4e3aa0 | 1719 | * Routine: host_default_memory_manager [interface] |
1c79356b A |
1720 | * Purpose: |
1721 | * set/get the default memory manager port and default cluster | |
1722 | * size. | |
1723 | * | |
1724 | * If successful, consumes the supplied naked send right. | |
1725 | */ | |
1726 | kern_return_t | |
1727 | host_default_memory_manager( | |
0b4e3aa0 A |
1728 | host_priv_t host_priv, |
1729 | memory_object_default_t *default_manager, | |
2d21ac55 | 1730 | __unused memory_object_cluster_size_t cluster_size) |
1c79356b | 1731 | { |
0b4e3aa0 A |
1732 | memory_object_default_t current_manager; |
1733 | memory_object_default_t new_manager; | |
1734 | memory_object_default_t returned_manager; | |
2d21ac55 | 1735 | kern_return_t result = KERN_SUCCESS; |
1c79356b A |
1736 | |
1737 | if (host_priv == HOST_PRIV_NULL) | |
1738 | return(KERN_INVALID_HOST); | |
1739 | ||
1740 | assert(host_priv == &realhost); | |
1741 | ||
1742 | new_manager = *default_manager; | |
b0d623f7 | 1743 | lck_mtx_lock(&memory_manager_default_lock); |
1c79356b | 1744 | current_manager = memory_manager_default; |
2d21ac55 | 1745 | returned_manager = MEMORY_OBJECT_DEFAULT_NULL; |
1c79356b | 1746 | |
0b4e3aa0 | 1747 | if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1748 | /* |
1749 | * Retrieve the current value. | |
1750 | */ | |
0b4e3aa0 | 1751 | returned_manager = current_manager; |
2d21ac55 | 1752 | memory_object_default_reference(returned_manager); |
1c79356b | 1753 | } else { |
2d21ac55 A |
1754 | |
1755 | /* | |
1756 | * If this is the first non-null manager, start | |
1757 | * up the internal pager support. | |
1758 | */ | |
1759 | if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
1760 | result = vm_pageout_internal_start(); | |
1761 | if (result != KERN_SUCCESS) | |
1762 | goto out; | |
1763 | } | |
1764 | ||
1c79356b A |
1765 | /* |
1766 | * Retrieve the current value, | |
1767 | * and replace it with the supplied value. | |
0b4e3aa0 A |
1768 | * We return the old reference to the caller |
1769 | * but we have to take a reference on the new | |
1770 | * one. | |
1c79356b | 1771 | */ |
1c79356b A |
1772 | returned_manager = current_manager; |
1773 | memory_manager_default = new_manager; | |
0b4e3aa0 A |
1774 | memory_object_default_reference(new_manager); |
1775 | ||
1c79356b A |
1776 | /* |
1777 | * In case anyone's been waiting for a memory | |
1778 | * manager to be established, wake them up. | |
1779 | */ | |
1780 | ||
1781 | thread_wakeup((event_t) &memory_manager_default); | |
b0d623f7 A |
1782 | |
1783 | /* | |
1784 | * Now that we have a default pager for anonymous memory, | |
1785 | * reactivate all the throttled pages (i.e. dirty pages with | |
1786 | * no pager). | |
1787 | */ | |
1788 | if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
1789 | vm_page_reactivate_all_throttled(); | |
1790 | } | |
1c79356b | 1791 | } |
2d21ac55 | 1792 | out: |
b0d623f7 | 1793 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1794 | |
1795 | *default_manager = returned_manager; | |
2d21ac55 | 1796 | return(result); |
1c79356b A |
1797 | } |
1798 | ||
1799 | /* | |
1800 | * Routine: memory_manager_default_reference | |
1801 | * Purpose: | |
1802 | * Returns a naked send right for the default | |
1803 | * memory manager. The returned right is always | |
1804 | * valid (not IP_NULL or IP_DEAD). | |
1805 | */ | |
1806 | ||
0b4e3aa0 | 1807 | __private_extern__ memory_object_default_t |
2d21ac55 | 1808 | memory_manager_default_reference(void) |
1c79356b | 1809 | { |
0b4e3aa0 | 1810 | memory_object_default_t current_manager; |
1c79356b | 1811 | |
b0d623f7 | 1812 | lck_mtx_lock(&memory_manager_default_lock); |
0b4e3aa0 A |
1813 | current_manager = memory_manager_default; |
1814 | while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
9bccf70c A |
1815 | wait_result_t res; |
1816 | ||
b0d623f7 A |
1817 | res = lck_mtx_sleep(&memory_manager_default_lock, |
1818 | LCK_SLEEP_DEFAULT, | |
1819 | (event_t) &memory_manager_default, | |
1820 | THREAD_UNINT); | |
9bccf70c | 1821 | assert(res == THREAD_AWAKENED); |
0b4e3aa0 | 1822 | current_manager = memory_manager_default; |
1c79356b | 1823 | } |
0b4e3aa0 | 1824 | memory_object_default_reference(current_manager); |
b0d623f7 | 1825 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1826 | |
1827 | return current_manager; | |
1828 | } | |
1829 | ||
1c79356b A |
1830 | /* |
1831 | * Routine: memory_manager_default_check | |
1832 | * | |
1833 | * Purpose: | |
1834 | * Check whether a default memory manager has been set | |
1835 | * up yet, or not. Returns KERN_SUCCESS if dmm exists, | |
1836 | * and KERN_FAILURE if dmm does not exist. | |
1837 | * | |
1838 | * If there is no default memory manager, log an error, | |
1839 | * but only the first time. | |
1840 | * | |
1841 | */ | |
0b4e3aa0 | 1842 | __private_extern__ kern_return_t |
1c79356b A |
1843 | memory_manager_default_check(void) |
1844 | { | |
0b4e3aa0 | 1845 | memory_object_default_t current; |
1c79356b | 1846 | |
b0d623f7 | 1847 | lck_mtx_lock(&memory_manager_default_lock); |
1c79356b | 1848 | current = memory_manager_default; |
0b4e3aa0 | 1849 | if (current == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1850 | static boolean_t logged; /* initialized to 0 */ |
1851 | boolean_t complain = !logged; | |
1852 | logged = TRUE; | |
b0d623f7 | 1853 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1854 | if (complain) |
1855 | printf("Warning: No default memory manager\n"); | |
1856 | return(KERN_FAILURE); | |
1857 | } else { | |
b0d623f7 | 1858 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1859 | return(KERN_SUCCESS); |
1860 | } | |
1861 | } | |
1862 | ||
0b4e3aa0 | 1863 | __private_extern__ void |
1c79356b A |
1864 | memory_manager_default_init(void) |
1865 | { | |
0b4e3aa0 | 1866 | memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
b0d623f7 | 1867 | lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr); |
1c79356b A |
1868 | } |
1869 | ||
1870 | ||
1c79356b A |
1871 | |
1872 | /* Allow manipulation of individual page state. This is actually part of */ | |
1873 | /* the UPL regimen but takes place on the object rather than on a UPL */ | |
1874 | ||
1875 | kern_return_t | |
1876 | memory_object_page_op( | |
0b4e3aa0 A |
1877 | memory_object_control_t control, |
1878 | memory_object_offset_t offset, | |
1879 | int ops, | |
55e303ae | 1880 | ppnum_t *phys_entry, |
0b4e3aa0 | 1881 | int *flags) |
1c79356b | 1882 | { |
0b4e3aa0 | 1883 | vm_object_t object; |
0b4e3aa0 A |
1884 | |
1885 | object = memory_object_control_to_vm_object(control); | |
1886 | if (object == VM_OBJECT_NULL) | |
1887 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1888 | |
0c530ab8 | 1889 | return vm_object_page_op(object, offset, ops, phys_entry, flags); |
1c79356b A |
1890 | } |
1891 | ||
55e303ae A |
1892 | /* |
1893 | * memory_object_range_op offers performance enhancement over | |
1894 | * memory_object_page_op for page_op functions which do not require page | |
1895 | * level state to be returned from the call. Page_op was created to provide | |
1896 | * a low-cost alternative to page manipulation via UPLs when only a single | |
1897 | * page was involved. The range_op call establishes the ability in the _op | |
1898 | * family of functions to work on multiple pages where the lack of page level | |
1899 | * state handling allows the caller to avoid the overhead of the upl structures. | |
1900 | */ | |
1901 | ||
1902 | kern_return_t | |
1903 | memory_object_range_op( | |
1904 | memory_object_control_t control, | |
1905 | memory_object_offset_t offset_beg, | |
1906 | memory_object_offset_t offset_end, | |
1907 | int ops, | |
1908 | int *range) | |
1909 | { | |
55e303ae | 1910 | vm_object_t object; |
55e303ae A |
1911 | |
1912 | object = memory_object_control_to_vm_object(control); | |
1913 | if (object == VM_OBJECT_NULL) | |
1914 | return (KERN_INVALID_ARGUMENT); | |
1915 | ||
0c530ab8 A |
1916 | return vm_object_range_op(object, |
1917 | offset_beg, | |
1918 | offset_end, | |
1919 | ops, | |
b0d623f7 | 1920 | (uint32_t *) range); |
55e303ae A |
1921 | } |
1922 | ||
91447636 A |
1923 | |
1924 | kern_return_t | |
1925 | memory_object_pages_resident( | |
1926 | memory_object_control_t control, | |
1927 | boolean_t * has_pages_resident) | |
1928 | { | |
1929 | vm_object_t object; | |
1930 | ||
1931 | *has_pages_resident = FALSE; | |
1932 | ||
1933 | object = memory_object_control_to_vm_object(control); | |
1934 | if (object == VM_OBJECT_NULL) | |
1935 | return (KERN_INVALID_ARGUMENT); | |
1936 | ||
1937 | if (object->resident_page_count) | |
1938 | *has_pages_resident = TRUE; | |
1939 | ||
1940 | return (KERN_SUCCESS); | |
1941 | } | |
1942 | ||
2d21ac55 A |
1943 | kern_return_t |
1944 | memory_object_signed( | |
1945 | memory_object_control_t control, | |
1946 | boolean_t is_signed) | |
1947 | { | |
1948 | vm_object_t object; | |
1949 | ||
1950 | object = memory_object_control_to_vm_object(control); | |
1951 | if (object == VM_OBJECT_NULL) | |
1952 | return KERN_INVALID_ARGUMENT; | |
1953 | ||
1954 | vm_object_lock(object); | |
1955 | object->code_signed = is_signed; | |
1956 | vm_object_unlock(object); | |
1957 | ||
1958 | return KERN_SUCCESS; | |
1959 | } | |
91447636 | 1960 | |
0b4e3aa0 A |
1961 | static zone_t mem_obj_control_zone; |
1962 | ||
1963 | __private_extern__ void | |
1964 | memory_object_control_bootstrap(void) | |
1965 | { | |
1966 | int i; | |
1967 | ||
1968 | i = (vm_size_t) sizeof (struct memory_object_control); | |
1969 | mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control"); | |
1970 | return; | |
1971 | } | |
1972 | ||
1973 | __private_extern__ memory_object_control_t | |
1974 | memory_object_control_allocate( | |
1975 | vm_object_t object) | |
1976 | { | |
1977 | memory_object_control_t control; | |
1978 | ||
1979 | control = (memory_object_control_t)zalloc(mem_obj_control_zone); | |
0c530ab8 A |
1980 | if (control != MEMORY_OBJECT_CONTROL_NULL) { |
1981 | control->moc_object = object; | |
1982 | control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */ | |
1983 | } | |
0b4e3aa0 A |
1984 | return (control); |
1985 | } | |
1986 | ||
1987 | __private_extern__ void | |
1988 | memory_object_control_collapse( | |
1989 | memory_object_control_t control, | |
1990 | vm_object_t object) | |
1991 | { | |
0c530ab8 A |
1992 | assert((control->moc_object != VM_OBJECT_NULL) && |
1993 | (control->moc_object != object)); | |
1994 | control->moc_object = object; | |
0b4e3aa0 A |
1995 | } |
1996 | ||
1997 | __private_extern__ vm_object_t | |
1998 | memory_object_control_to_vm_object( | |
1999 | memory_object_control_t control) | |
2000 | { | |
0c530ab8 A |
2001 | if (control == MEMORY_OBJECT_CONTROL_NULL || |
2002 | control->moc_ikot != IKOT_MEM_OBJ_CONTROL) | |
0b4e3aa0 A |
2003 | return VM_OBJECT_NULL; |
2004 | ||
0c530ab8 | 2005 | return (control->moc_object); |
0b4e3aa0 A |
2006 | } |
2007 | ||
2008 | memory_object_control_t | |
2009 | convert_port_to_mo_control( | |
91447636 | 2010 | __unused mach_port_t port) |
0b4e3aa0 A |
2011 | { |
2012 | return MEMORY_OBJECT_CONTROL_NULL; | |
2013 | } | |
2014 | ||
2015 | ||
2016 | mach_port_t | |
2017 | convert_mo_control_to_port( | |
91447636 | 2018 | __unused memory_object_control_t control) |
0b4e3aa0 A |
2019 | { |
2020 | return MACH_PORT_NULL; | |
2021 | } | |
2022 | ||
2023 | void | |
2024 | memory_object_control_reference( | |
91447636 | 2025 | __unused memory_object_control_t control) |
0b4e3aa0 A |
2026 | { |
2027 | return; | |
2028 | } | |
2029 | ||
2030 | /* | |
2031 | * We only every issue one of these references, so kill it | |
2032 | * when that gets released (should switch the real reference | |
2033 | * counting in true port-less EMMI). | |
2034 | */ | |
2035 | void | |
2036 | memory_object_control_deallocate( | |
2037 | memory_object_control_t control) | |
2038 | { | |
91447636 | 2039 | zfree(mem_obj_control_zone, control); |
0b4e3aa0 A |
2040 | } |
2041 | ||
2042 | void | |
2043 | memory_object_control_disable( | |
2044 | memory_object_control_t control) | |
2045 | { | |
0c530ab8 A |
2046 | assert(control->moc_object != VM_OBJECT_NULL); |
2047 | control->moc_object = VM_OBJECT_NULL; | |
0b4e3aa0 A |
2048 | } |
2049 | ||
2050 | void | |
2051 | memory_object_default_reference( | |
2052 | memory_object_default_t dmm) | |
2053 | { | |
2054 | ipc_port_make_send(dmm); | |
2055 | } | |
2056 | ||
2057 | void | |
2058 | memory_object_default_deallocate( | |
2059 | memory_object_default_t dmm) | |
2060 | { | |
2061 | ipc_port_release_send(dmm); | |
2062 | } | |
2063 | ||
2064 | memory_object_t | |
2065 | convert_port_to_memory_object( | |
91447636 | 2066 | __unused mach_port_t port) |
0b4e3aa0 A |
2067 | { |
2068 | return (MEMORY_OBJECT_NULL); | |
2069 | } | |
2070 | ||
2071 | ||
2072 | mach_port_t | |
2073 | convert_memory_object_to_port( | |
91447636 | 2074 | __unused memory_object_t object) |
0b4e3aa0 A |
2075 | { |
2076 | return (MACH_PORT_NULL); | |
2077 | } | |
2078 | ||
0b4e3aa0 A |
2079 | |
2080 | /* Routine memory_object_reference */ | |
2081 | void memory_object_reference( | |
2082 | memory_object_t memory_object) | |
2083 | { | |
0c530ab8 A |
2084 | (memory_object->mo_pager_ops->memory_object_reference)( |
2085 | memory_object); | |
0b4e3aa0 A |
2086 | } |
2087 | ||
2088 | /* Routine memory_object_deallocate */ | |
2089 | void memory_object_deallocate( | |
2090 | memory_object_t memory_object) | |
2091 | { | |
0c530ab8 A |
2092 | (memory_object->mo_pager_ops->memory_object_deallocate)( |
2093 | memory_object); | |
0b4e3aa0 A |
2094 | } |
2095 | ||
2096 | ||
2097 | /* Routine memory_object_init */ | |
2098 | kern_return_t memory_object_init | |
2099 | ( | |
2100 | memory_object_t memory_object, | |
2101 | memory_object_control_t memory_control, | |
91447636 | 2102 | memory_object_cluster_size_t memory_object_page_size |
0b4e3aa0 A |
2103 | ) |
2104 | { | |
0c530ab8 A |
2105 | return (memory_object->mo_pager_ops->memory_object_init)( |
2106 | memory_object, | |
2107 | memory_control, | |
2108 | memory_object_page_size); | |
0b4e3aa0 A |
2109 | } |
2110 | ||
2111 | /* Routine memory_object_terminate */ | |
2112 | kern_return_t memory_object_terminate | |
2113 | ( | |
2114 | memory_object_t memory_object | |
2115 | ) | |
2116 | { | |
0c530ab8 A |
2117 | return (memory_object->mo_pager_ops->memory_object_terminate)( |
2118 | memory_object); | |
0b4e3aa0 A |
2119 | } |
2120 | ||
2121 | /* Routine memory_object_data_request */ | |
2122 | kern_return_t memory_object_data_request | |
2123 | ( | |
2124 | memory_object_t memory_object, | |
2125 | memory_object_offset_t offset, | |
91447636 | 2126 | memory_object_cluster_size_t length, |
2d21ac55 A |
2127 | vm_prot_t desired_access, |
2128 | memory_object_fault_info_t fault_info | |
0b4e3aa0 A |
2129 | ) |
2130 | { | |
0c530ab8 A |
2131 | return (memory_object->mo_pager_ops->memory_object_data_request)( |
2132 | memory_object, | |
2133 | offset, | |
2134 | length, | |
2d21ac55 A |
2135 | desired_access, |
2136 | fault_info); | |
0b4e3aa0 A |
2137 | } |
2138 | ||
2139 | /* Routine memory_object_data_return */ | |
2140 | kern_return_t memory_object_data_return | |
2141 | ( | |
2142 | memory_object_t memory_object, | |
2143 | memory_object_offset_t offset, | |
b0d623f7 | 2144 | memory_object_cluster_size_t size, |
91447636 A |
2145 | memory_object_offset_t *resid_offset, |
2146 | int *io_error, | |
0b4e3aa0 | 2147 | boolean_t dirty, |
91447636 A |
2148 | boolean_t kernel_copy, |
2149 | int upl_flags | |
0b4e3aa0 A |
2150 | ) |
2151 | { | |
0c530ab8 A |
2152 | return (memory_object->mo_pager_ops->memory_object_data_return)( |
2153 | memory_object, | |
2154 | offset, | |
2155 | size, | |
2156 | resid_offset, | |
2157 | io_error, | |
2158 | dirty, | |
2159 | kernel_copy, | |
2160 | upl_flags); | |
0b4e3aa0 A |
2161 | } |
2162 | ||
2163 | /* Routine memory_object_data_initialize */ | |
2164 | kern_return_t memory_object_data_initialize | |
2165 | ( | |
2166 | memory_object_t memory_object, | |
2167 | memory_object_offset_t offset, | |
b0d623f7 | 2168 | memory_object_cluster_size_t size |
0b4e3aa0 A |
2169 | ) |
2170 | { | |
0c530ab8 A |
2171 | return (memory_object->mo_pager_ops->memory_object_data_initialize)( |
2172 | memory_object, | |
2173 | offset, | |
2174 | size); | |
0b4e3aa0 A |
2175 | } |
2176 | ||
2177 | /* Routine memory_object_data_unlock */ | |
2178 | kern_return_t memory_object_data_unlock | |
2179 | ( | |
2180 | memory_object_t memory_object, | |
2181 | memory_object_offset_t offset, | |
b0d623f7 | 2182 | memory_object_size_t size, |
0b4e3aa0 A |
2183 | vm_prot_t desired_access |
2184 | ) | |
2185 | { | |
0c530ab8 A |
2186 | return (memory_object->mo_pager_ops->memory_object_data_unlock)( |
2187 | memory_object, | |
2188 | offset, | |
2189 | size, | |
2190 | desired_access); | |
0b4e3aa0 A |
2191 | } |
2192 | ||
2193 | /* Routine memory_object_synchronize */ | |
2194 | kern_return_t memory_object_synchronize | |
2195 | ( | |
2196 | memory_object_t memory_object, | |
2197 | memory_object_offset_t offset, | |
b0d623f7 | 2198 | memory_object_size_t size, |
0b4e3aa0 A |
2199 | vm_sync_t sync_flags |
2200 | ) | |
2201 | { | |
0c530ab8 A |
2202 | return (memory_object->mo_pager_ops->memory_object_synchronize)( |
2203 | memory_object, | |
2204 | offset, | |
2205 | size, | |
2206 | sync_flags); | |
0b4e3aa0 A |
2207 | } |
2208 | ||
593a1d5f A |
2209 | |
2210 | /* | |
2211 | * memory_object_map() is called by VM (in vm_map_enter() and its variants) | |
2212 | * each time a "named" VM object gets mapped directly or indirectly | |
2213 | * (copy-on-write mapping). A "named" VM object has an extra reference held | |
2214 | * by the pager to keep it alive until the pager decides that the | |
2215 | * memory object (and its VM object) can be reclaimed. | |
2216 | * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all | |
2217 | * the mappings of that memory object have been removed. | |
2218 | * | |
2219 | * For a given VM object, calls to memory_object_map() and memory_object_unmap() | |
2220 | * are serialized (through object->mapping_in_progress), to ensure that the | |
2221 | * pager gets a consistent view of the mapping status of the memory object. | |
2222 | * | |
2223 | * This allows the pager to keep track of how many times a memory object | |
2224 | * has been mapped and with which protections, to decide when it can be | |
2225 | * reclaimed. | |
2226 | */ | |
2227 | ||
2228 | /* Routine memory_object_map */ | |
2229 | kern_return_t memory_object_map | |
2230 | ( | |
2231 | memory_object_t memory_object, | |
2232 | vm_prot_t prot | |
2233 | ) | |
2234 | { | |
2235 | return (memory_object->mo_pager_ops->memory_object_map)( | |
2236 | memory_object, | |
2237 | prot); | |
2238 | } | |
2239 | ||
2240 | /* Routine memory_object_last_unmap */ | |
2241 | kern_return_t memory_object_last_unmap | |
0b4e3aa0 A |
2242 | ( |
2243 | memory_object_t memory_object | |
2244 | ) | |
2245 | { | |
593a1d5f | 2246 | return (memory_object->mo_pager_ops->memory_object_last_unmap)( |
0c530ab8 | 2247 | memory_object); |
0b4e3aa0 A |
2248 | } |
2249 | ||
2250 | /* Routine memory_object_create */ | |
2251 | kern_return_t memory_object_create | |
2252 | ( | |
2253 | memory_object_default_t default_memory_manager, | |
2254 | vm_size_t new_memory_object_size, | |
2255 | memory_object_t *new_memory_object | |
2256 | ) | |
2257 | { | |
0b4e3aa0 A |
2258 | return default_pager_memory_object_create(default_memory_manager, |
2259 | new_memory_object_size, | |
2260 | new_memory_object); | |
2261 | } | |
1c79356b | 2262 | |
91447636 A |
2263 | upl_t |
2264 | convert_port_to_upl( | |
2265 | ipc_port_t port) | |
2266 | { | |
2267 | upl_t upl; | |
2268 | ||
2269 | ip_lock(port); | |
2270 | if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { | |
2271 | ip_unlock(port); | |
2272 | return (upl_t)NULL; | |
2273 | } | |
2274 | upl = (upl_t) port->ip_kobject; | |
2275 | ip_unlock(port); | |
2276 | upl_lock(upl); | |
2277 | upl->ref_count+=1; | |
2278 | upl_unlock(upl); | |
2279 | return upl; | |
2280 | } | |
2281 | ||
2282 | mach_port_t | |
2283 | convert_upl_to_port( | |
2284 | __unused upl_t upl) | |
2285 | { | |
2286 | return MACH_PORT_NULL; | |
2287 | } | |
2288 | ||
2289 | __private_extern__ void | |
2290 | upl_no_senders( | |
2291 | __unused ipc_port_t port, | |
2292 | __unused mach_port_mscount_t mscount) | |
2293 | { | |
2294 | return; | |
2295 | } |