]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
b0d623f7 | 2 | * Copyright (c) 2000-2008 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
31 | /* | |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: vm/memory_object.c | |
60 | * Author: Michael Wayne Young | |
61 | * | |
62 | * External memory management interface control functions. | |
63 | */ | |
64 | ||
1c79356b A |
65 | /* |
66 | * Interface dependencies: | |
67 | */ | |
68 | ||
69 | #include <mach/std_types.h> /* For pointer_t */ | |
70 | #include <mach/mach_types.h> | |
71 | ||
0b4e3aa0 | 72 | #include <mach/mig.h> |
1c79356b A |
73 | #include <mach/kern_return.h> |
74 | #include <mach/memory_object.h> | |
75 | #include <mach/memory_object_default.h> | |
76 | #include <mach/memory_object_control_server.h> | |
0b4e3aa0 | 77 | #include <mach/host_priv_server.h> |
1c79356b A |
78 | #include <mach/boolean.h> |
79 | #include <mach/vm_prot.h> | |
80 | #include <mach/message.h> | |
81 | ||
1c79356b A |
82 | /* |
83 | * Implementation dependencies: | |
84 | */ | |
85 | #include <string.h> /* For memcpy() */ | |
86 | ||
0b4e3aa0 A |
87 | #include <kern/xpr.h> |
88 | #include <kern/host.h> | |
89 | #include <kern/thread.h> /* For current_thread() */ | |
90 | #include <kern/ipc_mig.h> | |
91 | #include <kern/misc_protos.h> | |
92 | ||
93 | #include <vm/vm_object.h> | |
94 | #include <vm/vm_fault.h> | |
1c79356b A |
95 | #include <vm/memory_object.h> |
96 | #include <vm/vm_page.h> | |
97 | #include <vm/vm_pageout.h> | |
98 | #include <vm/pmap.h> /* For pmap_clear_modify */ | |
1c79356b A |
99 | #include <vm/vm_kern.h> /* For kernel_map, vm_move */ |
100 | #include <vm/vm_map.h> /* For vm_map_pageable */ | |
2d21ac55 | 101 | #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */ |
6d2010ae | 102 | #include <vm/vm_shared_region.h> |
1c79356b | 103 | |
1c79356b | 104 | #include <vm/vm_external.h> |
1c79356b | 105 | |
91447636 A |
106 | #include <vm/vm_protos.h> |
107 | ||
0b4e3aa0 | 108 | memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
b0d623f7 | 109 | decl_lck_mtx_data(, memory_manager_default_lock) |
1c79356b | 110 | |
1c79356b A |
111 | |
112 | /* | |
113 | * Routine: memory_object_should_return_page | |
114 | * | |
115 | * Description: | |
116 | * Determine whether the given page should be returned, | |
117 | * based on the page's state and on the given return policy. | |
118 | * | |
119 | * We should return the page if one of the following is true: | |
120 | * | |
121 | * 1. Page is dirty and should_return is not RETURN_NONE. | |
122 | * 2. Page is precious and should_return is RETURN_ALL. | |
123 | * 3. Should_return is RETURN_ANYTHING. | |
124 | * | |
125 | * As a side effect, m->dirty will be made consistent | |
126 | * with pmap_is_modified(m), if should_return is not | |
127 | * MEMORY_OBJECT_RETURN_NONE. | |
128 | */ | |
129 | ||
130 | #define memory_object_should_return_page(m, should_return) \ | |
131 | (should_return != MEMORY_OBJECT_RETURN_NONE && \ | |
39037602 | 132 | (((m)->dirty || ((m)->dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \ |
1c79356b A |
133 | ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ |
134 | (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) | |
135 | ||
136 | typedef int memory_object_lock_result_t; | |
137 | ||
6d2010ae A |
138 | #define MEMORY_OBJECT_LOCK_RESULT_DONE 0 |
139 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 | |
140 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2 | |
141 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3 | |
1c79356b A |
142 | |
143 | memory_object_lock_result_t memory_object_lock_page( | |
144 | vm_page_t m, | |
145 | memory_object_return_t should_return, | |
146 | boolean_t should_flush, | |
147 | vm_prot_t prot); | |
148 | ||
149 | /* | |
150 | * Routine: memory_object_lock_page | |
151 | * | |
152 | * Description: | |
153 | * Perform the appropriate lock operations on the | |
154 | * given page. See the description of | |
155 | * "memory_object_lock_request" for the meanings | |
156 | * of the arguments. | |
157 | * | |
158 | * Returns an indication that the operation | |
159 | * completed, blocked, or that the page must | |
160 | * be cleaned. | |
161 | */ | |
162 | memory_object_lock_result_t | |
163 | memory_object_lock_page( | |
164 | vm_page_t m, | |
165 | memory_object_return_t should_return, | |
166 | boolean_t should_flush, | |
167 | vm_prot_t prot) | |
168 | { | |
169 | XPR(XPR_MEMORY_OBJECT, | |
170 | "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", | |
b0d623f7 | 171 | m, should_return, should_flush, prot, 0); |
1c79356b | 172 | |
1c79356b | 173 | |
316670eb A |
174 | if (m->busy || m->cleaning) |
175 | return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
b0d623f7 | 176 | |
316670eb A |
177 | if (m->laundry) |
178 | vm_pageout_steal_laundry(m, FALSE); | |
6d2010ae | 179 | |
1c79356b A |
180 | /* |
181 | * Don't worry about pages for which the kernel | |
182 | * does not have any data. | |
183 | */ | |
765c9de3 | 184 | if (m->absent || m->error || m->restart) { |
6d2010ae A |
185 | if (m->error && should_flush && !VM_PAGE_WIRED(m)) { |
186 | /* | |
187 | * dump the page, pager wants us to | |
188 | * clean it up and there is no | |
189 | * relevant data to return | |
190 | */ | |
191 | return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE); | |
765c9de3 | 192 | } |
6d2010ae | 193 | return (MEMORY_OBJECT_LOCK_RESULT_DONE); |
765c9de3 | 194 | } |
1c79356b A |
195 | assert(!m->fictitious); |
196 | ||
b0d623f7 | 197 | if (VM_PAGE_WIRED(m)) { |
6d2010ae A |
198 | /* |
199 | * The page is wired... just clean or return the page if needed. | |
200 | * Wired pages don't get flushed or disconnected from the pmap. | |
201 | */ | |
202 | if (memory_object_should_return_page(m, should_return)) | |
203 | return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); | |
1c79356b | 204 | |
6d2010ae A |
205 | return (MEMORY_OBJECT_LOCK_RESULT_DONE); |
206 | } | |
1c79356b | 207 | |
6d2010ae A |
208 | if (should_flush) { |
209 | /* | |
210 | * must do the pmap_disconnect before determining the | |
211 | * need to return the page... otherwise it's possible | |
212 | * for the page to go from the clean to the dirty state | |
213 | * after we've made our decision | |
214 | */ | |
39037602 | 215 | if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)) & VM_MEM_MODIFIED) { |
316670eb A |
216 | SET_PAGE_DIRTY(m, FALSE); |
217 | } | |
6d2010ae A |
218 | } else { |
219 | /* | |
220 | * If we are decreasing permission, do it now; | |
221 | * let the fault handler take care of increases | |
222 | * (pmap_page_protect may not increase protection). | |
223 | */ | |
224 | if (prot != VM_PROT_NO_CHANGE) | |
39037602 | 225 | pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m), VM_PROT_ALL & ~prot); |
1c79356b | 226 | } |
1c79356b | 227 | /* |
6d2010ae | 228 | * Handle returning dirty or precious pages |
1c79356b | 229 | */ |
1c79356b | 230 | if (memory_object_should_return_page(m, should_return)) { |
1c79356b | 231 | /* |
6d2010ae A |
232 | * we use to do a pmap_disconnect here in support |
233 | * of memory_object_lock_request, but that routine | |
234 | * no longer requires this... in any event, in | |
235 | * our world, it would turn into a big noop since | |
236 | * we don't lock the page in any way and as soon | |
237 | * as we drop the object lock, the page can be | |
238 | * faulted back into an address space | |
239 | * | |
240 | * if (!should_flush) | |
39037602 | 241 | * pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); |
1c79356b | 242 | */ |
6d2010ae | 243 | return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); |
1c79356b A |
244 | } |
245 | ||
246 | /* | |
6d2010ae | 247 | * Handle flushing clean pages |
1c79356b | 248 | */ |
6d2010ae A |
249 | if (should_flush) |
250 | return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE); | |
1c79356b | 251 | |
6d2010ae A |
252 | /* |
253 | * we use to deactivate clean pages at this point, | |
254 | * but we do not believe that an msync should change | |
255 | * the 'age' of a page in the cache... here is the | |
256 | * original comment and code concerning this... | |
257 | * | |
258 | * XXX Make clean but not flush a paging hint, | |
259 | * and deactivate the pages. This is a hack | |
260 | * because it overloads flush/clean with | |
261 | * implementation-dependent meaning. This only | |
262 | * happens to pages that are already clean. | |
263 | * | |
264 | * if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE)) | |
265 | * return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE); | |
266 | */ | |
1c79356b | 267 | |
6d2010ae | 268 | return (MEMORY_OBJECT_LOCK_RESULT_DONE); |
1c79356b | 269 | } |
0b4e3aa0 | 270 | |
6d2010ae | 271 | |
1c79356b | 272 | |
1c79356b A |
273 | /* |
274 | * Routine: memory_object_lock_request [user interface] | |
275 | * | |
276 | * Description: | |
277 | * Control use of the data associated with the given | |
278 | * memory object. For each page in the given range, | |
279 | * perform the following operations, in order: | |
280 | * 1) restrict access to the page (disallow | |
281 | * forms specified by "prot"); | |
282 | * 2) return data to the manager (if "should_return" | |
283 | * is RETURN_DIRTY and the page is dirty, or | |
284 | * "should_return" is RETURN_ALL and the page | |
285 | * is either dirty or precious); and, | |
286 | * 3) flush the cached copy (if "should_flush" | |
287 | * is asserted). | |
288 | * The set of pages is defined by a starting offset | |
289 | * ("offset") and size ("size"). Only pages with the | |
290 | * same page alignment as the starting offset are | |
291 | * considered. | |
292 | * | |
293 | * A single acknowledgement is sent (to the "reply_to" | |
294 | * port) when these actions are complete. If successful, | |
295 | * the naked send right for reply_to is consumed. | |
296 | */ | |
297 | ||
298 | kern_return_t | |
299 | memory_object_lock_request( | |
0b4e3aa0 A |
300 | memory_object_control_t control, |
301 | memory_object_offset_t offset, | |
302 | memory_object_size_t size, | |
91447636 A |
303 | memory_object_offset_t * resid_offset, |
304 | int * io_errno, | |
1c79356b A |
305 | memory_object_return_t should_return, |
306 | int flags, | |
0b4e3aa0 | 307 | vm_prot_t prot) |
1c79356b | 308 | { |
0b4e3aa0 | 309 | vm_object_t object; |
1c79356b | 310 | |
b0d623f7 | 311 | /* |
1c79356b A |
312 | * Check for bogus arguments. |
313 | */ | |
0b4e3aa0 | 314 | object = memory_object_control_to_vm_object(control); |
1c79356b A |
315 | if (object == VM_OBJECT_NULL) |
316 | return (KERN_INVALID_ARGUMENT); | |
317 | ||
0b4e3aa0 | 318 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) |
1c79356b | 319 | return (KERN_INVALID_ARGUMENT); |
1c79356b | 320 | |
55e303ae | 321 | size = round_page_64(size); |
1c79356b A |
322 | |
323 | /* | |
324 | * Lock the object, and acquire a paging reference to | |
0b4e3aa0 | 325 | * prevent the memory_object reference from being released. |
1c79356b | 326 | */ |
1c79356b A |
327 | vm_object_lock(object); |
328 | vm_object_paging_begin(object); | |
b0d623f7 A |
329 | |
330 | if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) { | |
331 | if ((should_return != MEMORY_OBJECT_RETURN_NONE) || offset || object->copy) { | |
332 | flags &= ~MEMORY_OBJECT_DATA_FLUSH_ALL; | |
333 | flags |= MEMORY_OBJECT_DATA_FLUSH; | |
334 | } | |
335 | } | |
1c79356b A |
336 | offset -= object->paging_offset; |
337 | ||
b0d623f7 A |
338 | if (flags & MEMORY_OBJECT_DATA_FLUSH_ALL) |
339 | vm_object_reap_pages(object, REAP_DATA_FLUSH); | |
340 | else | |
341 | (void)vm_object_update(object, offset, size, resid_offset, | |
342 | io_errno, should_return, flags, prot); | |
1c79356b | 343 | |
1c79356b A |
344 | vm_object_paging_end(object); |
345 | vm_object_unlock(object); | |
1c79356b A |
346 | |
347 | return (KERN_SUCCESS); | |
348 | } | |
349 | ||
350 | /* | |
0b4e3aa0 A |
351 | * memory_object_release_name: [interface] |
352 | * | |
353 | * Enforces name semantic on memory_object reference count decrement | |
354 | * This routine should not be called unless the caller holds a name | |
355 | * reference gained through the memory_object_named_create or the | |
356 | * memory_object_rename call. | |
357 | * If the TERMINATE_IDLE flag is set, the call will return if the | |
358 | * reference count is not 1. i.e. idle with the only remaining reference | |
359 | * being the name. | |
360 | * If the decision is made to proceed the name field flag is set to | |
361 | * false and the reference count is decremented. If the RESPECT_CACHE | |
362 | * flag is set and the reference count has gone to zero, the | |
363 | * memory_object is checked to see if it is cacheable otherwise when | |
364 | * the reference count is zero, it is simply terminated. | |
365 | */ | |
366 | ||
367 | kern_return_t | |
368 | memory_object_release_name( | |
369 | memory_object_control_t control, | |
370 | int flags) | |
371 | { | |
372 | vm_object_t object; | |
373 | ||
374 | object = memory_object_control_to_vm_object(control); | |
375 | if (object == VM_OBJECT_NULL) | |
376 | return (KERN_INVALID_ARGUMENT); | |
377 | ||
378 | return vm_object_release_name(object, flags); | |
379 | } | |
380 | ||
381 | ||
382 | ||
383 | /* | |
384 | * Routine: memory_object_destroy [user interface] | |
385 | * Purpose: | |
386 | * Shut down a memory object, despite the | |
387 | * presence of address map (or other) references | |
388 | * to the vm_object. | |
389 | */ | |
390 | kern_return_t | |
391 | memory_object_destroy( | |
392 | memory_object_control_t control, | |
393 | kern_return_t reason) | |
394 | { | |
395 | vm_object_t object; | |
396 | ||
397 | object = memory_object_control_to_vm_object(control); | |
398 | if (object == VM_OBJECT_NULL) | |
399 | return (KERN_INVALID_ARGUMENT); | |
400 | ||
401 | return (vm_object_destroy(object, reason)); | |
402 | } | |
403 | ||
404 | /* | |
405 | * Routine: vm_object_sync | |
1c79356b A |
406 | * |
407 | * Kernel internal function to synch out pages in a given | |
408 | * range within an object to its memory manager. Much the | |
409 | * same as memory_object_lock_request but page protection | |
410 | * is not changed. | |
411 | * | |
412 | * If the should_flush and should_return flags are true pages | |
413 | * are flushed, that is dirty & precious pages are written to | |
414 | * the memory manager and then discarded. If should_return | |
415 | * is false, only precious pages are returned to the memory | |
416 | * manager. | |
417 | * | |
418 | * If should flush is false and should_return true, the memory | |
419 | * manager's copy of the pages is updated. If should_return | |
420 | * is also false, only the precious pages are updated. This | |
421 | * last option is of limited utility. | |
422 | * | |
423 | * Returns: | |
424 | * FALSE if no pages were returned to the pager | |
425 | * TRUE otherwise. | |
426 | */ | |
427 | ||
428 | boolean_t | |
0b4e3aa0 | 429 | vm_object_sync( |
1c79356b A |
430 | vm_object_t object, |
431 | vm_object_offset_t offset, | |
91447636 | 432 | vm_object_size_t size, |
1c79356b | 433 | boolean_t should_flush, |
91447636 A |
434 | boolean_t should_return, |
435 | boolean_t should_iosync) | |
1c79356b A |
436 | { |
437 | boolean_t rv; | |
91447636 | 438 | int flags; |
1c79356b | 439 | |
0b4e3aa0 A |
440 | XPR(XPR_VM_OBJECT, |
441 | "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", | |
b0d623f7 | 442 | object, offset, size, should_flush, should_return); |
1c79356b A |
443 | |
444 | /* | |
445 | * Lock the object, and acquire a paging reference to | |
446 | * prevent the memory_object and control ports from | |
447 | * being destroyed. | |
448 | */ | |
449 | vm_object_lock(object); | |
450 | vm_object_paging_begin(object); | |
451 | ||
39236c6e | 452 | if (should_flush) { |
91447636 | 453 | flags = MEMORY_OBJECT_DATA_FLUSH; |
39236c6e A |
454 | /* |
455 | * This flush is from an msync(), not a truncate(), so the | |
456 | * contents of the file are not affected. | |
457 | * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know | |
458 | * that the data is not changed and that there's no need to | |
459 | * push the old contents to a copy object. | |
460 | */ | |
461 | flags |= MEMORY_OBJECT_DATA_NO_CHANGE; | |
462 | } else | |
91447636 A |
463 | flags = 0; |
464 | ||
465 | if (should_iosync) | |
466 | flags |= MEMORY_OBJECT_IO_SYNC; | |
467 | ||
468 | rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL, | |
1c79356b A |
469 | (should_return) ? |
470 | MEMORY_OBJECT_RETURN_ALL : | |
471 | MEMORY_OBJECT_RETURN_NONE, | |
91447636 | 472 | flags, |
1c79356b A |
473 | VM_PROT_NO_CHANGE); |
474 | ||
475 | ||
476 | vm_object_paging_end(object); | |
477 | vm_object_unlock(object); | |
478 | return rv; | |
479 | } | |
480 | ||
91447636 A |
481 | |
482 | ||
6d2010ae A |
483 | #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \ |
484 | MACRO_BEGIN \ | |
485 | \ | |
486 | int upl_flags; \ | |
487 | memory_object_t pager; \ | |
488 | \ | |
39236c6e | 489 | if (object->object_slid) { \ |
6d2010ae A |
490 | panic("Objects with slid pages not allowed\n"); \ |
491 | } \ | |
492 | \ | |
493 | if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \ | |
494 | vm_object_paging_begin(object); \ | |
495 | vm_object_unlock(object); \ | |
496 | \ | |
497 | if (iosync) \ | |
498 | upl_flags = UPL_MSYNC | UPL_IOSYNC; \ | |
499 | else \ | |
500 | upl_flags = UPL_MSYNC; \ | |
501 | \ | |
502 | (void) memory_object_data_return(pager, \ | |
503 | po, \ | |
504 | (memory_object_cluster_size_t)data_cnt, \ | |
505 | ro, \ | |
506 | ioerr, \ | |
507 | FALSE, \ | |
508 | FALSE, \ | |
509 | upl_flags); \ | |
510 | \ | |
511 | vm_object_lock(object); \ | |
512 | vm_object_paging_end(object); \ | |
513 | } \ | |
514 | MACRO_END | |
515 | ||
39037602 A |
516 | extern struct vnode * |
517 | vnode_pager_lookup_vnode(memory_object_t); | |
91447636 A |
518 | |
519 | static int | |
520 | vm_object_update_extent( | |
521 | vm_object_t object, | |
522 | vm_object_offset_t offset, | |
523 | vm_object_offset_t offset_end, | |
524 | vm_object_offset_t *offset_resid, | |
525 | int *io_errno, | |
526 | boolean_t should_flush, | |
527 | memory_object_return_t should_return, | |
528 | boolean_t should_iosync, | |
529 | vm_prot_t prot) | |
530 | { | |
531 | vm_page_t m; | |
532 | int retval = 0; | |
91447636 | 533 | vm_object_offset_t paging_offset = 0; |
b0d623f7 | 534 | vm_object_offset_t next_offset = offset; |
91447636 | 535 | memory_object_lock_result_t page_lock_result; |
6d2010ae A |
536 | memory_object_cluster_size_t data_cnt = 0; |
537 | struct vm_page_delayed_work dw_array[DEFAULT_DELAYED_WORK_LIMIT]; | |
538 | struct vm_page_delayed_work *dwp; | |
539 | int dw_count; | |
540 | int dw_limit; | |
4bd07ac2 | 541 | int dirty_count; |
6d2010ae A |
542 | |
543 | dwp = &dw_array[0]; | |
544 | dw_count = 0; | |
545 | dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT); | |
4bd07ac2 | 546 | dirty_count = 0; |
91447636 A |
547 | |
548 | for (; | |
549 | offset < offset_end && object->resident_page_count; | |
550 | offset += PAGE_SIZE_64) { | |
551 | ||
552 | /* | |
b0d623f7 | 553 | * Limit the number of pages to be cleaned at once to a contiguous |
fe8ab488 | 554 | * run, or at most MAX_UPL_TRANSFER_BYTES |
91447636 | 555 | */ |
b0d623f7 | 556 | if (data_cnt) { |
fe8ab488 | 557 | if ((data_cnt >= MAX_UPL_TRANSFER_BYTES) || (next_offset != offset)) { |
6d2010ae A |
558 | |
559 | if (dw_count) { | |
3e170ce0 | 560 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
6d2010ae A |
561 | dwp = &dw_array[0]; |
562 | dw_count = 0; | |
563 | } | |
564 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, | |
565 | paging_offset, offset_resid, io_errno, should_iosync); | |
b0d623f7 A |
566 | data_cnt = 0; |
567 | } | |
91447636 | 568 | } |
91447636 | 569 | while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { |
6d2010ae A |
570 | |
571 | dwp->dw_mask = 0; | |
572 | ||
573 | page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot); | |
574 | ||
575 | if (data_cnt && page_lock_result != MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN) { | |
576 | /* | |
577 | * End of a run of dirty/precious pages. | |
578 | */ | |
579 | if (dw_count) { | |
3e170ce0 | 580 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
6d2010ae A |
581 | dwp = &dw_array[0]; |
582 | dw_count = 0; | |
583 | } | |
584 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, | |
585 | paging_offset, offset_resid, io_errno, should_iosync); | |
586 | /* | |
587 | * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will | |
588 | * allow the state of page 'm' to change... we need to re-lookup | |
589 | * the current offset | |
590 | */ | |
591 | data_cnt = 0; | |
592 | continue; | |
593 | } | |
594 | ||
595 | switch (page_lock_result) { | |
596 | ||
597 | case MEMORY_OBJECT_LOCK_RESULT_DONE: | |
598 | break; | |
599 | ||
600 | case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE: | |
4bd07ac2 A |
601 | if (m->dirty == TRUE) |
602 | dirty_count++; | |
6d2010ae A |
603 | dwp->dw_mask |= DW_vm_page_free; |
604 | break; | |
605 | ||
606 | case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: | |
607 | PAGE_SLEEP(object, m, THREAD_UNINT); | |
608 | continue; | |
609 | ||
610 | case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: | |
611 | if (data_cnt == 0) | |
612 | paging_offset = offset; | |
613 | ||
614 | data_cnt += PAGE_SIZE; | |
615 | next_offset = offset + PAGE_SIZE_64; | |
616 | ||
6d2010ae A |
617 | /* |
618 | * wired pages shouldn't be flushed and | |
619 | * since they aren't on any queue, | |
620 | * no need to remove them | |
621 | */ | |
622 | if (!VM_PAGE_WIRED(m)) { | |
623 | ||
624 | if (should_flush) { | |
625 | /* | |
626 | * add additional state for the flush | |
627 | */ | |
39037602 | 628 | m->free_when_done = TRUE; |
6d2010ae A |
629 | } |
630 | /* | |
631 | * we use to remove the page from the queues at this | |
632 | * point, but we do not believe that an msync | |
633 | * should cause the 'age' of a page to be changed | |
634 | * | |
635 | * else | |
636 | * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE; | |
637 | */ | |
638 | } | |
639 | retval = 1; | |
640 | break; | |
641 | } | |
642 | if (dwp->dw_mask) { | |
643 | VM_PAGE_ADD_DELAYED_WORK(dwp, m, dw_count); | |
644 | ||
645 | if (dw_count >= dw_limit) { | |
3e170ce0 | 646 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
6d2010ae A |
647 | dwp = &dw_array[0]; |
648 | dw_count = 0; | |
649 | } | |
91447636 A |
650 | } |
651 | break; | |
652 | } | |
653 | } | |
4bd07ac2 | 654 | |
39037602 A |
655 | if (object->pager) |
656 | task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_INVALIDATED, vnode_pager_lookup_vnode(object->pager)); | |
91447636 A |
657 | /* |
658 | * We have completed the scan for applicable pages. | |
659 | * Clean any pages that have been saved. | |
660 | */ | |
6d2010ae | 661 | if (dw_count) |
3e170ce0 | 662 | vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, &dw_array[0], dw_count); |
6d2010ae | 663 | |
91447636 | 664 | if (data_cnt) { |
6d2010ae A |
665 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, |
666 | paging_offset, offset_resid, io_errno, should_iosync); | |
91447636 A |
667 | } |
668 | return (retval); | |
669 | } | |
670 | ||
671 | ||
672 | ||
1c79356b | 673 | /* |
0b4e3aa0 | 674 | * Routine: vm_object_update |
1c79356b | 675 | * Description: |
0b4e3aa0 | 676 | * Work function for m_o_lock_request(), vm_o_sync(). |
1c79356b A |
677 | * |
678 | * Called with object locked and paging ref taken. | |
679 | */ | |
680 | kern_return_t | |
0b4e3aa0 | 681 | vm_object_update( |
6d2010ae A |
682 | vm_object_t object, |
683 | vm_object_offset_t offset, | |
684 | vm_object_size_t size, | |
685 | vm_object_offset_t *resid_offset, | |
686 | int *io_errno, | |
687 | memory_object_return_t should_return, | |
688 | int flags, | |
689 | vm_prot_t protection) | |
1c79356b | 690 | { |
2d21ac55 | 691 | vm_object_t copy_object = VM_OBJECT_NULL; |
1c79356b A |
692 | boolean_t data_returned = FALSE; |
693 | boolean_t update_cow; | |
91447636 A |
694 | boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; |
695 | boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; | |
b0d623f7 | 696 | vm_fault_return_t result; |
91447636 A |
697 | int num_of_extents; |
698 | int n; | |
699 | #define MAX_EXTENTS 8 | |
700 | #define EXTENT_SIZE (1024 * 1024 * 256) | |
701 | #define RESIDENT_LIMIT (1024 * 32) | |
702 | struct extent { | |
703 | vm_object_offset_t e_base; | |
704 | vm_object_offset_t e_min; | |
705 | vm_object_offset_t e_max; | |
706 | } extents[MAX_EXTENTS]; | |
1c79356b A |
707 | |
708 | /* | |
709 | * To avoid blocking while scanning for pages, save | |
710 | * dirty pages to be cleaned all at once. | |
711 | * | |
712 | * XXXO A similar strategy could be used to limit the | |
713 | * number of times that a scan must be restarted for | |
714 | * other reasons. Those pages that would require blocking | |
715 | * could be temporarily collected in another list, or | |
716 | * their offsets could be recorded in a small array. | |
717 | */ | |
718 | ||
719 | /* | |
720 | * XXX NOTE: May want to consider converting this to a page list | |
721 | * XXX vm_map_copy interface. Need to understand object | |
722 | * XXX coalescing implications before doing so. | |
723 | */ | |
724 | ||
725 | update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) | |
726 | && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && | |
727 | !(flags & MEMORY_OBJECT_DATA_PURGE))) | |
728 | || (flags & MEMORY_OBJECT_COPY_SYNC); | |
729 | ||
2d21ac55 A |
730 | if (update_cow || (flags & (MEMORY_OBJECT_DATA_PURGE | MEMORY_OBJECT_DATA_SYNC))) { |
731 | int collisions = 0; | |
732 | ||
733 | while ((copy_object = object->copy) != VM_OBJECT_NULL) { | |
734 | /* | |
735 | * need to do a try here since we're swimming upstream | |
736 | * against the normal lock ordering... however, we need | |
737 | * to hold the object stable until we gain control of the | |
738 | * copy object so we have to be careful how we approach this | |
739 | */ | |
740 | if (vm_object_lock_try(copy_object)) { | |
741 | /* | |
742 | * we 'won' the lock on the copy object... | |
743 | * no need to hold the object lock any longer... | |
744 | * take a real reference on the copy object because | |
745 | * we're going to call vm_fault_page on it which may | |
746 | * under certain conditions drop the lock and the paging | |
747 | * reference we're about to take... the reference | |
748 | * will keep the copy object from going away if that happens | |
749 | */ | |
750 | vm_object_unlock(object); | |
751 | vm_object_reference_locked(copy_object); | |
752 | break; | |
753 | } | |
754 | vm_object_unlock(object); | |
1c79356b | 755 | |
2d21ac55 A |
756 | collisions++; |
757 | mutex_pause(collisions); | |
758 | ||
759 | vm_object_lock(object); | |
760 | } | |
761 | } | |
762 | if ((copy_object != VM_OBJECT_NULL && update_cow) || (flags & MEMORY_OBJECT_DATA_SYNC)) { | |
91447636 A |
763 | vm_map_size_t i; |
764 | vm_map_size_t copy_size; | |
765 | vm_map_offset_t copy_offset; | |
1c79356b A |
766 | vm_prot_t prot; |
767 | vm_page_t page; | |
768 | vm_page_t top_page; | |
769 | kern_return_t error = 0; | |
2d21ac55 A |
770 | struct vm_object_fault_info fault_info; |
771 | ||
772 | if (copy_object != VM_OBJECT_NULL) { | |
773 | /* | |
774 | * translate offset with respect to shadow's offset | |
775 | */ | |
6d2010ae A |
776 | copy_offset = (offset >= copy_object->vo_shadow_offset) ? |
777 | (vm_map_offset_t)(offset - copy_object->vo_shadow_offset) : | |
2d21ac55 A |
778 | (vm_map_offset_t) 0; |
779 | ||
6d2010ae A |
780 | if (copy_offset > copy_object->vo_size) |
781 | copy_offset = copy_object->vo_size; | |
2d21ac55 A |
782 | |
783 | /* | |
784 | * clip size with respect to shadow offset | |
785 | */ | |
6d2010ae | 786 | if (offset >= copy_object->vo_shadow_offset) { |
2d21ac55 | 787 | copy_size = size; |
6d2010ae A |
788 | } else if (size >= copy_object->vo_shadow_offset - offset) { |
789 | copy_size = size - (copy_object->vo_shadow_offset - offset); | |
2d21ac55 A |
790 | } else { |
791 | copy_size = 0; | |
792 | } | |
793 | ||
6d2010ae A |
794 | if (copy_offset + copy_size > copy_object->vo_size) { |
795 | if (copy_object->vo_size >= copy_offset) { | |
796 | copy_size = copy_object->vo_size - copy_offset; | |
2d21ac55 A |
797 | } else { |
798 | copy_size = 0; | |
799 | } | |
800 | } | |
801 | copy_size+=copy_offset; | |
1c79356b | 802 | |
1c79356b A |
803 | } else { |
804 | copy_object = object; | |
805 | ||
806 | copy_size = offset + size; | |
807 | copy_offset = offset; | |
808 | } | |
2d21ac55 A |
809 | fault_info.interruptible = THREAD_UNINT; |
810 | fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL; | |
811 | fault_info.user_tag = 0; | |
fe8ab488 | 812 | fault_info.pmap_options = 0; |
2d21ac55 A |
813 | fault_info.lo_offset = copy_offset; |
814 | fault_info.hi_offset = copy_size; | |
815 | fault_info.no_cache = FALSE; | |
b0d623f7 | 816 | fault_info.stealth = TRUE; |
6d2010ae A |
817 | fault_info.io_sync = FALSE; |
818 | fault_info.cs_bypass = FALSE; | |
0b4c1975 | 819 | fault_info.mark_zf_absent = FALSE; |
316670eb | 820 | fault_info.batch_pmap_op = FALSE; |
1c79356b A |
821 | |
822 | vm_object_paging_begin(copy_object); | |
2d21ac55 A |
823 | |
824 | for (i = copy_offset; i < copy_size; i += PAGE_SIZE) { | |
1c79356b | 825 | RETRY_COW_OF_LOCK_REQUEST: |
b0d623f7 A |
826 | fault_info.cluster_size = (vm_size_t) (copy_size - i); |
827 | assert(fault_info.cluster_size == copy_size - i); | |
2d21ac55 | 828 | |
1c79356b | 829 | prot = VM_PROT_WRITE|VM_PROT_READ; |
39236c6e | 830 | page = VM_PAGE_NULL; |
b0d623f7 A |
831 | result = vm_fault_page(copy_object, i, |
832 | VM_PROT_WRITE|VM_PROT_READ, | |
833 | FALSE, | |
39236c6e | 834 | FALSE, /* page not looked up */ |
b0d623f7 A |
835 | &prot, |
836 | &page, | |
837 | &top_page, | |
838 | (int *)0, | |
839 | &error, | |
840 | FALSE, | |
841 | FALSE, &fault_info); | |
842 | ||
843 | switch (result) { | |
1c79356b | 844 | case VM_FAULT_SUCCESS: |
2d21ac55 | 845 | if (top_page) { |
1c79356b | 846 | vm_fault_cleanup( |
39037602 | 847 | VM_PAGE_OBJECT(page), top_page); |
1c79356b A |
848 | vm_object_lock(copy_object); |
849 | vm_object_paging_begin(copy_object); | |
1c79356b | 850 | } |
39037602 A |
851 | if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { |
852 | ||
b0d623f7 | 853 | vm_page_lockspin_queues(); |
39037602 A |
854 | |
855 | if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page))) { | |
b0d623f7 | 856 | vm_page_deactivate(page); |
39037602 | 857 | } |
b0d623f7 A |
858 | vm_page_unlock_queues(); |
859 | } | |
2d21ac55 | 860 | PAGE_WAKEUP_DONE(page); |
1c79356b A |
861 | break; |
862 | case VM_FAULT_RETRY: | |
863 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
864 | vm_object_lock(copy_object); | |
865 | vm_object_paging_begin(copy_object); | |
866 | goto RETRY_COW_OF_LOCK_REQUEST; | |
867 | case VM_FAULT_INTERRUPTED: | |
868 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
869 | vm_object_lock(copy_object); | |
870 | vm_object_paging_begin(copy_object); | |
871 | goto RETRY_COW_OF_LOCK_REQUEST; | |
872 | case VM_FAULT_MEMORY_SHORTAGE: | |
873 | VM_PAGE_WAIT(); | |
874 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
875 | vm_object_lock(copy_object); | |
876 | vm_object_paging_begin(copy_object); | |
877 | goto RETRY_COW_OF_LOCK_REQUEST; | |
b0d623f7 A |
878 | case VM_FAULT_SUCCESS_NO_VM_PAGE: |
879 | /* success but no VM page: fail */ | |
880 | vm_object_paging_end(copy_object); | |
881 | vm_object_unlock(copy_object); | |
882 | /*FALLTHROUGH*/ | |
1c79356b | 883 | case VM_FAULT_MEMORY_ERROR: |
2d21ac55 A |
884 | if (object != copy_object) |
885 | vm_object_deallocate(copy_object); | |
1c79356b A |
886 | vm_object_lock(object); |
887 | goto BYPASS_COW_COPYIN; | |
b0d623f7 A |
888 | default: |
889 | panic("vm_object_update: unexpected error 0x%x" | |
890 | " from vm_fault_page()\n", result); | |
1c79356b A |
891 | } |
892 | ||
893 | } | |
894 | vm_object_paging_end(copy_object); | |
2d21ac55 A |
895 | } |
896 | if ((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { | |
897 | if (copy_object != VM_OBJECT_NULL && copy_object != object) { | |
1c79356b | 898 | vm_object_unlock(copy_object); |
2d21ac55 | 899 | vm_object_deallocate(copy_object); |
1c79356b A |
900 | vm_object_lock(object); |
901 | } | |
2d21ac55 | 902 | return KERN_SUCCESS; |
1c79356b | 903 | } |
2d21ac55 A |
904 | if (copy_object != VM_OBJECT_NULL && copy_object != object) { |
905 | if ((flags & MEMORY_OBJECT_DATA_PURGE)) { | |
39037602 | 906 | vm_object_lock_assert_exclusive(copy_object); |
2d21ac55 A |
907 | copy_object->shadow_severed = TRUE; |
908 | copy_object->shadowed = FALSE; | |
909 | copy_object->shadow = NULL; | |
910 | /* | |
911 | * delete the ref the COW was holding on the target object | |
912 | */ | |
913 | vm_object_deallocate(object); | |
914 | } | |
915 | vm_object_unlock(copy_object); | |
916 | vm_object_deallocate(copy_object); | |
917 | vm_object_lock(object); | |
1c79356b A |
918 | } |
919 | BYPASS_COW_COPYIN: | |
920 | ||
91447636 A |
921 | /* |
922 | * when we have a really large range to check relative | |
923 | * to the number of actual resident pages, we'd like | |
924 | * to use the resident page list to drive our checks | |
925 | * however, the object lock will get dropped while processing | |
926 | * the page which means the resident queue can change which | |
927 | * means we can't walk the queue as we process the pages | |
928 | * we also want to do the processing in offset order to allow | |
929 | * 'runs' of pages to be collected if we're being told to | |
930 | * flush to disk... the resident page queue is NOT ordered. | |
931 | * | |
932 | * a temporary solution (until we figure out how to deal with | |
933 | * large address spaces more generically) is to pre-flight | |
934 | * the resident page queue (if it's small enough) and develop | |
935 | * a collection of extents (that encompass actual resident pages) | |
936 | * to visit. This will at least allow us to deal with some of the | |
937 | * more pathological cases in a more efficient manner. The current | |
938 | * worst case (a single resident page at the end of an extremely large | |
939 | * range) can take minutes to complete for ranges in the terrabyte | |
940 | * category... since this routine is called when truncating a file, | |
941 | * and we currently support files up to 16 Tbytes in size, this | |
942 | * is not a theoretical problem | |
943 | */ | |
1c79356b | 944 | |
91447636 A |
945 | if ((object->resident_page_count < RESIDENT_LIMIT) && |
946 | (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) { | |
947 | vm_page_t next; | |
948 | vm_object_offset_t start; | |
949 | vm_object_offset_t end; | |
950 | vm_object_size_t e_mask; | |
951 | vm_page_t m; | |
1c79356b | 952 | |
91447636 A |
953 | start = offset; |
954 | end = offset + size; | |
955 | num_of_extents = 0; | |
956 | e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1)); | |
1c79356b | 957 | |
39037602 | 958 | m = (vm_page_t) vm_page_queue_first(&object->memq); |
1c79356b | 959 | |
39037602 A |
960 | while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) m)) { |
961 | next = (vm_page_t) vm_page_queue_next(&m->listq); | |
1c79356b | 962 | |
91447636 A |
963 | if ((m->offset >= start) && (m->offset < end)) { |
964 | /* | |
965 | * this is a page we're interested in | |
966 | * try to fit it into a current extent | |
1c79356b | 967 | */ |
91447636 A |
968 | for (n = 0; n < num_of_extents; n++) { |
969 | if ((m->offset & e_mask) == extents[n].e_base) { | |
970 | /* | |
971 | * use (PAGE_SIZE - 1) to determine the | |
972 | * max offset so that we don't wrap if | |
973 | * we're at the last page of the space | |
974 | */ | |
975 | if (m->offset < extents[n].e_min) | |
976 | extents[n].e_min = m->offset; | |
977 | else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max) | |
978 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
979 | break; | |
980 | } | |
981 | } | |
982 | if (n == num_of_extents) { | |
983 | /* | |
984 | * didn't find a current extent that can encompass | |
985 | * this page | |
986 | */ | |
987 | if (n < MAX_EXTENTS) { | |
988 | /* | |
989 | * if we still have room, | |
990 | * create a new extent | |
991 | */ | |
992 | extents[n].e_base = m->offset & e_mask; | |
993 | extents[n].e_min = m->offset; | |
994 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
995 | ||
996 | num_of_extents++; | |
997 | } else { | |
998 | /* | |
999 | * no room to create a new extent... | |
1000 | * fall back to a single extent based | |
1001 | * on the min and max page offsets | |
1002 | * we find in the range we're interested in... | |
1003 | * first, look through the extent list and | |
1004 | * develop the overall min and max for the | |
1005 | * pages we've looked at up to this point | |
1006 | */ | |
1007 | for (n = 1; n < num_of_extents; n++) { | |
1008 | if (extents[n].e_min < extents[0].e_min) | |
1009 | extents[0].e_min = extents[n].e_min; | |
1010 | if (extents[n].e_max > extents[0].e_max) | |
1011 | extents[0].e_max = extents[n].e_max; | |
1012 | } | |
1013 | /* | |
1014 | * now setup to run through the remaining pages | |
1015 | * to determine the overall min and max | |
1016 | * offset for the specified range | |
1017 | */ | |
1018 | extents[0].e_base = 0; | |
1019 | e_mask = 0; | |
1020 | num_of_extents = 1; | |
1021 | ||
1022 | /* | |
1023 | * by continuing, we'll reprocess the | |
1024 | * page that forced us to abandon trying | |
1025 | * to develop multiple extents | |
1026 | */ | |
1027 | continue; | |
1028 | } | |
1029 | } | |
1c79356b | 1030 | } |
91447636 | 1031 | m = next; |
1c79356b | 1032 | } |
91447636 A |
1033 | } else { |
1034 | extents[0].e_min = offset; | |
1035 | extents[0].e_max = offset + (size - 1); | |
1c79356b | 1036 | |
91447636 A |
1037 | num_of_extents = 1; |
1038 | } | |
1039 | for (n = 0; n < num_of_extents; n++) { | |
1040 | if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, | |
1041 | should_flush, should_return, should_iosync, protection)) | |
1042 | data_returned = TRUE; | |
1c79356b | 1043 | } |
1c79356b A |
1044 | return (data_returned); |
1045 | } | |
1046 | ||
91447636 | 1047 | |
1c79356b A |
1048 | /* |
1049 | * Routine: memory_object_synchronize_completed [user interface] | |
1050 | * | |
1051 | * Tell kernel that previously synchronized data | |
1052 | * (memory_object_synchronize) has been queue or placed on the | |
1053 | * backing storage. | |
1054 | * | |
1055 | * Note: there may be multiple synchronize requests for a given | |
1056 | * memory object outstanding but they will not overlap. | |
1057 | */ | |
1058 | ||
1059 | kern_return_t | |
1060 | memory_object_synchronize_completed( | |
0b4e3aa0 A |
1061 | memory_object_control_t control, |
1062 | memory_object_offset_t offset, | |
b0d623f7 | 1063 | memory_object_size_t length) |
1c79356b | 1064 | { |
0b4e3aa0 A |
1065 | vm_object_t object; |
1066 | msync_req_t msr; | |
1c79356b | 1067 | |
91447636 A |
1068 | object = memory_object_control_to_vm_object(control); |
1069 | ||
1c79356b A |
1070 | XPR(XPR_MEMORY_OBJECT, |
1071 | "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", | |
b0d623f7 | 1072 | object, offset, length, 0, 0); |
1c79356b A |
1073 | |
1074 | /* | |
1075 | * Look for bogus arguments | |
1076 | */ | |
1077 | ||
0b4e3aa0 A |
1078 | if (object == VM_OBJECT_NULL) |
1079 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1080 | |
1081 | vm_object_lock(object); | |
1082 | ||
1083 | /* | |
1084 | * search for sync request structure | |
1085 | */ | |
1086 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { | |
1087 | if (msr->offset == offset && msr->length == length) { | |
1088 | queue_remove(&object->msr_q, msr, msync_req_t, msr_q); | |
1089 | break; | |
1090 | } | |
1091 | }/* queue_iterate */ | |
1092 | ||
1093 | if (queue_end(&object->msr_q, (queue_entry_t)msr)) { | |
1094 | vm_object_unlock(object); | |
1c79356b A |
1095 | return KERN_INVALID_ARGUMENT; |
1096 | } | |
1097 | ||
1098 | msr_lock(msr); | |
1099 | vm_object_unlock(object); | |
1100 | msr->flag = VM_MSYNC_DONE; | |
1101 | msr_unlock(msr); | |
1102 | thread_wakeup((event_t) msr); | |
1c79356b A |
1103 | |
1104 | return KERN_SUCCESS; | |
1105 | }/* memory_object_synchronize_completed */ | |
0b4e3aa0 A |
1106 | |
1107 | static kern_return_t | |
1108 | vm_object_set_attributes_common( | |
1c79356b A |
1109 | vm_object_t object, |
1110 | boolean_t may_cache, | |
1111 | memory_object_copy_strategy_t copy_strategy, | |
1112 | boolean_t temporary, | |
39236c6e | 1113 | __unused boolean_t silent_overwrite, |
1c79356b A |
1114 | boolean_t advisory_pageout) |
1115 | { | |
1116 | boolean_t object_became_ready; | |
1117 | ||
1118 | XPR(XPR_MEMORY_OBJECT, | |
1119 | "m_o_set_attr_com, object 0x%X flg %x strat %d\n", | |
b0d623f7 | 1120 | object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0); |
1c79356b A |
1121 | |
1122 | if (object == VM_OBJECT_NULL) | |
1123 | return(KERN_INVALID_ARGUMENT); | |
1124 | ||
1125 | /* | |
1126 | * Verify the attributes of importance | |
1127 | */ | |
1128 | ||
1129 | switch(copy_strategy) { | |
1130 | case MEMORY_OBJECT_COPY_NONE: | |
1131 | case MEMORY_OBJECT_COPY_DELAY: | |
1132 | break; | |
1133 | default: | |
1c79356b A |
1134 | return(KERN_INVALID_ARGUMENT); |
1135 | } | |
1136 | ||
1c79356b A |
1137 | if (may_cache) |
1138 | may_cache = TRUE; | |
1139 | if (temporary) | |
1140 | temporary = TRUE; | |
1c79356b A |
1141 | |
1142 | vm_object_lock(object); | |
1143 | ||
1144 | /* | |
1145 | * Copy the attributes | |
1146 | */ | |
1147 | assert(!object->internal); | |
1148 | object_became_ready = !object->pager_ready; | |
1149 | object->copy_strategy = copy_strategy; | |
1150 | object->can_persist = may_cache; | |
1151 | object->temporary = temporary; | |
39236c6e | 1152 | // object->silent_overwrite = silent_overwrite; |
1c79356b | 1153 | object->advisory_pageout = advisory_pageout; |
1c79356b A |
1154 | |
1155 | /* | |
1156 | * Wake up anyone waiting for the ready attribute | |
1157 | * to become asserted. | |
1158 | */ | |
1159 | ||
1160 | if (object_became_ready) { | |
1161 | object->pager_ready = TRUE; | |
1162 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); | |
1163 | } | |
1164 | ||
1165 | vm_object_unlock(object); | |
1166 | ||
1c79356b A |
1167 | return(KERN_SUCCESS); |
1168 | } | |
1169 | ||
1170 | /* | |
1171 | * Set the memory object attribute as provided. | |
1172 | * | |
1173 | * XXX This routine cannot be completed until the vm_msync, clean | |
1174 | * in place, and cluster work is completed. See ifdef notyet | |
0b4e3aa0 | 1175 | * below and note that vm_object_set_attributes_common() |
1c79356b A |
1176 | * may have to be expanded. |
1177 | */ | |
1178 | kern_return_t | |
1179 | memory_object_change_attributes( | |
0b4e3aa0 A |
1180 | memory_object_control_t control, |
1181 | memory_object_flavor_t flavor, | |
1182 | memory_object_info_t attributes, | |
1183 | mach_msg_type_number_t count) | |
1c79356b | 1184 | { |
0b4e3aa0 A |
1185 | vm_object_t object; |
1186 | kern_return_t result = KERN_SUCCESS; | |
1187 | boolean_t temporary; | |
1188 | boolean_t may_cache; | |
1189 | boolean_t invalidate; | |
1c79356b | 1190 | memory_object_copy_strategy_t copy_strategy; |
0b4e3aa0 | 1191 | boolean_t silent_overwrite; |
1c79356b A |
1192 | boolean_t advisory_pageout; |
1193 | ||
0b4e3aa0 | 1194 | object = memory_object_control_to_vm_object(control); |
1c79356b | 1195 | if (object == VM_OBJECT_NULL) |
0b4e3aa0 | 1196 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1197 | |
1198 | vm_object_lock(object); | |
0b4e3aa0 | 1199 | |
1c79356b A |
1200 | temporary = object->temporary; |
1201 | may_cache = object->can_persist; | |
1202 | copy_strategy = object->copy_strategy; | |
39236c6e A |
1203 | // silent_overwrite = object->silent_overwrite; |
1204 | silent_overwrite = FALSE; | |
1c79356b A |
1205 | advisory_pageout = object->advisory_pageout; |
1206 | #if notyet | |
1207 | invalidate = object->invalidate; | |
1208 | #endif | |
1c79356b A |
1209 | vm_object_unlock(object); |
1210 | ||
1211 | switch (flavor) { | |
1212 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1213 | { | |
1214 | old_memory_object_behave_info_t behave; | |
1215 | ||
1216 | if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1217 | result = KERN_INVALID_ARGUMENT; | |
1218 | break; | |
1219 | } | |
1220 | ||
1221 | behave = (old_memory_object_behave_info_t) attributes; | |
1222 | ||
1223 | temporary = behave->temporary; | |
1224 | invalidate = behave->invalidate; | |
1225 | copy_strategy = behave->copy_strategy; | |
1226 | ||
1227 | break; | |
1228 | } | |
1229 | ||
1230 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1231 | { | |
1232 | memory_object_behave_info_t behave; | |
1233 | ||
1234 | if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1235 | result = KERN_INVALID_ARGUMENT; | |
1236 | break; | |
1237 | } | |
1238 | ||
1239 | behave = (memory_object_behave_info_t) attributes; | |
1240 | ||
1241 | temporary = behave->temporary; | |
1242 | invalidate = behave->invalidate; | |
1243 | copy_strategy = behave->copy_strategy; | |
1244 | silent_overwrite = behave->silent_overwrite; | |
1245 | advisory_pageout = behave->advisory_pageout; | |
1246 | break; | |
1247 | } | |
1248 | ||
1249 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1250 | { | |
1251 | memory_object_perf_info_t perf; | |
1252 | ||
1253 | if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1254 | result = KERN_INVALID_ARGUMENT; | |
1255 | break; | |
1256 | } | |
1257 | ||
1258 | perf = (memory_object_perf_info_t) attributes; | |
1259 | ||
1260 | may_cache = perf->may_cache; | |
1c79356b A |
1261 | |
1262 | break; | |
1263 | } | |
1264 | ||
1265 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1266 | { | |
1267 | old_memory_object_attr_info_t attr; | |
1268 | ||
1269 | if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1270 | result = KERN_INVALID_ARGUMENT; | |
1271 | break; | |
1272 | } | |
1273 | ||
1274 | attr = (old_memory_object_attr_info_t) attributes; | |
1275 | ||
1276 | may_cache = attr->may_cache; | |
1277 | copy_strategy = attr->copy_strategy; | |
1c79356b A |
1278 | |
1279 | break; | |
1280 | } | |
1281 | ||
1282 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1283 | { | |
1284 | memory_object_attr_info_t attr; | |
1285 | ||
1286 | if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1287 | result = KERN_INVALID_ARGUMENT; | |
1288 | break; | |
1289 | } | |
1290 | ||
1291 | attr = (memory_object_attr_info_t) attributes; | |
1292 | ||
1293 | copy_strategy = attr->copy_strategy; | |
1294 | may_cache = attr->may_cache_object; | |
1c79356b A |
1295 | temporary = attr->temporary; |
1296 | ||
1297 | break; | |
1298 | } | |
1299 | ||
1300 | default: | |
1301 | result = KERN_INVALID_ARGUMENT; | |
1302 | break; | |
1303 | } | |
1304 | ||
0b4e3aa0 | 1305 | if (result != KERN_SUCCESS) |
1c79356b | 1306 | return(result); |
1c79356b A |
1307 | |
1308 | if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { | |
1309 | copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
1310 | temporary = TRUE; | |
1311 | } else { | |
1312 | temporary = FALSE; | |
1313 | } | |
1314 | ||
1315 | /* | |
1c79356b A |
1316 | * XXX may_cache may become a tri-valued variable to handle |
1317 | * XXX uncache if not in use. | |
1318 | */ | |
0b4e3aa0 | 1319 | return (vm_object_set_attributes_common(object, |
1c79356b A |
1320 | may_cache, |
1321 | copy_strategy, | |
1322 | temporary, | |
1c79356b | 1323 | silent_overwrite, |
0b4e3aa0 | 1324 | advisory_pageout)); |
1c79356b A |
1325 | } |
1326 | ||
1327 | kern_return_t | |
1328 | memory_object_get_attributes( | |
0b4e3aa0 | 1329 | memory_object_control_t control, |
1c79356b A |
1330 | memory_object_flavor_t flavor, |
1331 | memory_object_info_t attributes, /* pointer to OUT array */ | |
1332 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1333 | { | |
0b4e3aa0 A |
1334 | kern_return_t ret = KERN_SUCCESS; |
1335 | vm_object_t object; | |
1c79356b | 1336 | |
0b4e3aa0 A |
1337 | object = memory_object_control_to_vm_object(control); |
1338 | if (object == VM_OBJECT_NULL) | |
1339 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1340 | |
1341 | vm_object_lock(object); | |
1342 | ||
1343 | switch (flavor) { | |
1344 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1345 | { | |
1346 | old_memory_object_behave_info_t behave; | |
1347 | ||
1348 | if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1349 | ret = KERN_INVALID_ARGUMENT; | |
1350 | break; | |
1351 | } | |
1352 | ||
1353 | behave = (old_memory_object_behave_info_t) attributes; | |
1354 | behave->copy_strategy = object->copy_strategy; | |
1355 | behave->temporary = object->temporary; | |
1356 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1357 | behave->invalidate = object->invalidate; | |
1358 | #else | |
1359 | behave->invalidate = FALSE; | |
1360 | #endif | |
1361 | ||
1362 | *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1363 | break; | |
1364 | } | |
1365 | ||
1366 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1367 | { | |
1368 | memory_object_behave_info_t behave; | |
1369 | ||
1370 | if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1371 | ret = KERN_INVALID_ARGUMENT; | |
1372 | break; | |
1373 | } | |
1374 | ||
1375 | behave = (memory_object_behave_info_t) attributes; | |
1376 | behave->copy_strategy = object->copy_strategy; | |
1377 | behave->temporary = object->temporary; | |
1378 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1379 | behave->invalidate = object->invalidate; | |
1380 | #else | |
1381 | behave->invalidate = FALSE; | |
1382 | #endif | |
1383 | behave->advisory_pageout = object->advisory_pageout; | |
39236c6e A |
1384 | // behave->silent_overwrite = object->silent_overwrite; |
1385 | behave->silent_overwrite = FALSE; | |
1c79356b A |
1386 | *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; |
1387 | break; | |
1388 | } | |
1389 | ||
1390 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1391 | { | |
1392 | memory_object_perf_info_t perf; | |
1393 | ||
1394 | if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1395 | ret = KERN_INVALID_ARGUMENT; | |
1396 | break; | |
1397 | } | |
1398 | ||
1399 | perf = (memory_object_perf_info_t) attributes; | |
2d21ac55 | 1400 | perf->cluster_size = PAGE_SIZE; |
1c79356b A |
1401 | perf->may_cache = object->can_persist; |
1402 | ||
1403 | *count = MEMORY_OBJECT_PERF_INFO_COUNT; | |
1404 | break; | |
1405 | } | |
1406 | ||
1407 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1408 | { | |
1409 | old_memory_object_attr_info_t attr; | |
1410 | ||
1411 | if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1412 | ret = KERN_INVALID_ARGUMENT; | |
1413 | break; | |
1414 | } | |
1415 | ||
1416 | attr = (old_memory_object_attr_info_t) attributes; | |
1417 | attr->may_cache = object->can_persist; | |
1418 | attr->copy_strategy = object->copy_strategy; | |
1419 | ||
1420 | *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1421 | break; | |
1422 | } | |
1423 | ||
1424 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1425 | { | |
1426 | memory_object_attr_info_t attr; | |
1427 | ||
1428 | if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1429 | ret = KERN_INVALID_ARGUMENT; | |
1430 | break; | |
1431 | } | |
1432 | ||
1433 | attr = (memory_object_attr_info_t) attributes; | |
1434 | attr->copy_strategy = object->copy_strategy; | |
2d21ac55 | 1435 | attr->cluster_size = PAGE_SIZE; |
1c79356b A |
1436 | attr->may_cache_object = object->can_persist; |
1437 | attr->temporary = object->temporary; | |
1438 | ||
1439 | *count = MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1440 | break; | |
1441 | } | |
1442 | ||
1443 | default: | |
1444 | ret = KERN_INVALID_ARGUMENT; | |
1445 | break; | |
1446 | } | |
1447 | ||
1448 | vm_object_unlock(object); | |
1449 | ||
1c79356b A |
1450 | return(ret); |
1451 | } | |
1452 | ||
1c79356b | 1453 | |
55e303ae A |
1454 | kern_return_t |
1455 | memory_object_iopl_request( | |
1456 | ipc_port_t port, | |
1457 | memory_object_offset_t offset, | |
91447636 | 1458 | upl_size_t *upl_size, |
55e303ae A |
1459 | upl_t *upl_ptr, |
1460 | upl_page_info_array_t user_page_list, | |
1461 | unsigned int *page_list_count, | |
3e170ce0 | 1462 | upl_control_flags_t *flags) |
55e303ae A |
1463 | { |
1464 | vm_object_t object; | |
1465 | kern_return_t ret; | |
3e170ce0 | 1466 | upl_control_flags_t caller_flags; |
55e303ae A |
1467 | |
1468 | caller_flags = *flags; | |
1469 | ||
91447636 A |
1470 | if (caller_flags & ~UPL_VALID_FLAGS) { |
1471 | /* | |
1472 | * For forward compatibility's sake, | |
1473 | * reject any unknown flag. | |
1474 | */ | |
1475 | return KERN_INVALID_VALUE; | |
1476 | } | |
1477 | ||
55e303ae A |
1478 | if (ip_kotype(port) == IKOT_NAMED_ENTRY) { |
1479 | vm_named_entry_t named_entry; | |
1480 | ||
1481 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
1482 | /* a few checks to make sure user is obeying rules */ | |
1483 | if(*upl_size == 0) { | |
1484 | if(offset >= named_entry->size) | |
1485 | return(KERN_INVALID_RIGHT); | |
b0d623f7 A |
1486 | *upl_size = (upl_size_t)(named_entry->size - offset); |
1487 | if (*upl_size != named_entry->size - offset) | |
1488 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1489 | } |
1490 | if(caller_flags & UPL_COPYOUT_FROM) { | |
1491 | if((named_entry->protection & VM_PROT_READ) | |
1492 | != VM_PROT_READ) { | |
1493 | return(KERN_INVALID_RIGHT); | |
1494 | } | |
1495 | } else { | |
1496 | if((named_entry->protection & | |
1497 | (VM_PROT_READ | VM_PROT_WRITE)) | |
1498 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
1499 | return(KERN_INVALID_RIGHT); | |
1500 | } | |
1501 | } | |
1502 | if(named_entry->size < (offset + *upl_size)) | |
1503 | return(KERN_INVALID_ARGUMENT); | |
1504 | ||
1505 | /* the callers parameter offset is defined to be the */ | |
1506 | /* offset from beginning of named entry offset in object */ | |
1507 | offset = offset + named_entry->offset; | |
1508 | ||
39236c6e A |
1509 | if (named_entry->is_sub_map || |
1510 | named_entry->is_copy) | |
1511 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1512 | |
1513 | named_entry_lock(named_entry); | |
1514 | ||
91447636 | 1515 | if (named_entry->is_pager) { |
55e303ae A |
1516 | object = vm_object_enter(named_entry->backing.pager, |
1517 | named_entry->offset + named_entry->size, | |
1518 | named_entry->internal, | |
1519 | FALSE, | |
1520 | FALSE); | |
1521 | if (object == VM_OBJECT_NULL) { | |
1522 | named_entry_unlock(named_entry); | |
1523 | return(KERN_INVALID_OBJECT); | |
1524 | } | |
91447636 A |
1525 | |
1526 | /* JMM - drop reference on pager here? */ | |
55e303ae A |
1527 | |
1528 | /* create an extra reference for the named entry */ | |
91447636 | 1529 | vm_object_lock(object); |
55e303ae | 1530 | vm_object_reference_locked(object); |
91447636 A |
1531 | named_entry->backing.object = object; |
1532 | named_entry->is_pager = FALSE; | |
55e303ae A |
1533 | named_entry_unlock(named_entry); |
1534 | ||
1535 | /* wait for object to be ready */ | |
1536 | while (!object->pager_ready) { | |
1537 | vm_object_wait(object, | |
1538 | VM_OBJECT_EVENT_PAGER_READY, | |
1539 | THREAD_UNINT); | |
1540 | vm_object_lock(object); | |
1541 | } | |
1542 | vm_object_unlock(object); | |
91447636 A |
1543 | } else { |
1544 | /* This is the case where we are going to map */ | |
1545 | /* an already mapped object. If the object is */ | |
1546 | /* not ready it is internal. An external */ | |
1547 | /* object cannot be mapped until it is ready */ | |
1548 | /* we can therefore avoid the ready check */ | |
1549 | /* in this case. */ | |
1550 | object = named_entry->backing.object; | |
1551 | vm_object_reference(object); | |
1552 | named_entry_unlock(named_entry); | |
55e303ae | 1553 | } |
0c530ab8 | 1554 | } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { |
55e303ae | 1555 | memory_object_control_t control; |
0c530ab8 | 1556 | control = (memory_object_control_t) port; |
55e303ae A |
1557 | if (control == NULL) |
1558 | return (KERN_INVALID_ARGUMENT); | |
1559 | object = memory_object_control_to_vm_object(control); | |
1560 | if (object == VM_OBJECT_NULL) | |
1561 | return (KERN_INVALID_ARGUMENT); | |
1562 | vm_object_reference(object); | |
0c530ab8 A |
1563 | } else { |
1564 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1565 | } |
1566 | if (object == VM_OBJECT_NULL) | |
1567 | return (KERN_INVALID_ARGUMENT); | |
1568 | ||
1569 | if (!object->private) { | |
55e303ae A |
1570 | if (object->phys_contiguous) { |
1571 | *flags = UPL_PHYS_CONTIG; | |
1572 | } else { | |
1573 | *flags = 0; | |
1574 | } | |
1575 | } else { | |
1576 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
1577 | } | |
1578 | ||
1579 | ret = vm_object_iopl_request(object, | |
1580 | offset, | |
1581 | *upl_size, | |
1582 | upl_ptr, | |
1583 | user_page_list, | |
1584 | page_list_count, | |
1585 | caller_flags); | |
1586 | vm_object_deallocate(object); | |
1587 | return ret; | |
1588 | } | |
1589 | ||
0b4e3aa0 A |
1590 | /* |
1591 | * Routine: memory_object_upl_request [interface] | |
1592 | * Purpose: | |
1593 | * Cause the population of a portion of a vm_object. | |
1594 | * Depending on the nature of the request, the pages | |
1595 | * returned may be contain valid data or be uninitialized. | |
1596 | * | |
1597 | */ | |
1c79356b | 1598 | |
0b4e3aa0 A |
1599 | kern_return_t |
1600 | memory_object_upl_request( | |
1601 | memory_object_control_t control, | |
1602 | memory_object_offset_t offset, | |
91447636 | 1603 | upl_size_t size, |
0b4e3aa0 A |
1604 | upl_t *upl_ptr, |
1605 | upl_page_info_array_t user_page_list, | |
1606 | unsigned int *page_list_count, | |
1607 | int cntrl_flags) | |
1608 | { | |
1609 | vm_object_t object; | |
1610 | ||
1611 | object = memory_object_control_to_vm_object(control); | |
1612 | if (object == VM_OBJECT_NULL) | |
b0d623f7 | 1613 | return (KERN_TERMINATED); |
0b4e3aa0 A |
1614 | |
1615 | return vm_object_upl_request(object, | |
1616 | offset, | |
1617 | size, | |
1618 | upl_ptr, | |
1619 | user_page_list, | |
1620 | page_list_count, | |
3e170ce0 | 1621 | (upl_control_flags_t)(unsigned int) cntrl_flags); |
0b4e3aa0 A |
1622 | } |
1623 | ||
1624 | /* | |
1625 | * Routine: memory_object_super_upl_request [interface] | |
1626 | * Purpose: | |
1627 | * Cause the population of a portion of a vm_object | |
1628 | * in much the same way as memory_object_upl_request. | |
1629 | * Depending on the nature of the request, the pages | |
1630 | * returned may be contain valid data or be uninitialized. | |
1631 | * However, the region may be expanded up to the super | |
1632 | * cluster size provided. | |
1c79356b | 1633 | */ |
0b4e3aa0 | 1634 | |
1c79356b | 1635 | kern_return_t |
0b4e3aa0 A |
1636 | memory_object_super_upl_request( |
1637 | memory_object_control_t control, | |
1638 | memory_object_offset_t offset, | |
91447636 A |
1639 | upl_size_t size, |
1640 | upl_size_t super_cluster, | |
0b4e3aa0 A |
1641 | upl_t *upl, |
1642 | upl_page_info_t *user_page_list, | |
1643 | unsigned int *page_list_count, | |
1644 | int cntrl_flags) | |
1c79356b | 1645 | { |
0b4e3aa0 A |
1646 | vm_object_t object; |
1647 | ||
1648 | object = memory_object_control_to_vm_object(control); | |
1649 | if (object == VM_OBJECT_NULL) | |
1650 | return (KERN_INVALID_ARGUMENT); | |
1651 | ||
1652 | return vm_object_super_upl_request(object, | |
1653 | offset, | |
1654 | size, | |
1655 | super_cluster, | |
1656 | upl, | |
1657 | user_page_list, | |
1658 | page_list_count, | |
3e170ce0 | 1659 | (upl_control_flags_t)(unsigned int) cntrl_flags); |
1c79356b A |
1660 | } |
1661 | ||
2d21ac55 A |
1662 | kern_return_t |
1663 | memory_object_cluster_size(memory_object_control_t control, memory_object_offset_t *start, | |
b0d623f7 | 1664 | vm_size_t *length, uint32_t *io_streaming, memory_object_fault_info_t fault_info) |
2d21ac55 A |
1665 | { |
1666 | vm_object_t object; | |
1667 | ||
1668 | object = memory_object_control_to_vm_object(control); | |
1669 | ||
1670 | if (object == VM_OBJECT_NULL || object->paging_offset > *start) | |
1671 | return (KERN_INVALID_ARGUMENT); | |
1672 | ||
1673 | *start -= object->paging_offset; | |
1674 | ||
b0d623f7 | 1675 | vm_object_cluster_size(object, (vm_object_offset_t *)start, length, (vm_object_fault_info_t)fault_info, io_streaming); |
2d21ac55 A |
1676 | |
1677 | *start += object->paging_offset; | |
1678 | ||
1679 | return (KERN_SUCCESS); | |
1680 | } | |
1681 | ||
1682 | ||
0b4e3aa0 A |
1683 | int vm_stat_discard_cleared_reply = 0; |
1684 | int vm_stat_discard_cleared_unset = 0; | |
1685 | int vm_stat_discard_cleared_too_late = 0; | |
1686 | ||
1687 | ||
1688 | ||
1c79356b | 1689 | /* |
0b4e3aa0 | 1690 | * Routine: host_default_memory_manager [interface] |
1c79356b A |
1691 | * Purpose: |
1692 | * set/get the default memory manager port and default cluster | |
1693 | * size. | |
1694 | * | |
1695 | * If successful, consumes the supplied naked send right. | |
1696 | */ | |
1697 | kern_return_t | |
1698 | host_default_memory_manager( | |
0b4e3aa0 A |
1699 | host_priv_t host_priv, |
1700 | memory_object_default_t *default_manager, | |
2d21ac55 | 1701 | __unused memory_object_cluster_size_t cluster_size) |
1c79356b | 1702 | { |
0b4e3aa0 A |
1703 | memory_object_default_t current_manager; |
1704 | memory_object_default_t new_manager; | |
1705 | memory_object_default_t returned_manager; | |
2d21ac55 | 1706 | kern_return_t result = KERN_SUCCESS; |
1c79356b A |
1707 | |
1708 | if (host_priv == HOST_PRIV_NULL) | |
1709 | return(KERN_INVALID_HOST); | |
1710 | ||
1711 | assert(host_priv == &realhost); | |
1712 | ||
1713 | new_manager = *default_manager; | |
b0d623f7 | 1714 | lck_mtx_lock(&memory_manager_default_lock); |
1c79356b | 1715 | current_manager = memory_manager_default; |
2d21ac55 | 1716 | returned_manager = MEMORY_OBJECT_DEFAULT_NULL; |
1c79356b | 1717 | |
0b4e3aa0 | 1718 | if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1719 | /* |
1720 | * Retrieve the current value. | |
1721 | */ | |
0b4e3aa0 | 1722 | returned_manager = current_manager; |
2d21ac55 | 1723 | memory_object_default_reference(returned_manager); |
1c79356b | 1724 | } else { |
3e170ce0 A |
1725 | /* |
1726 | * Only allow the kernel to change the value. | |
1727 | */ | |
1728 | extern task_t kernel_task; | |
1729 | if (current_task() != kernel_task) { | |
1730 | result = KERN_NO_ACCESS; | |
1731 | goto out; | |
1732 | } | |
2d21ac55 A |
1733 | |
1734 | /* | |
1735 | * If this is the first non-null manager, start | |
1736 | * up the internal pager support. | |
1737 | */ | |
1738 | if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
1739 | result = vm_pageout_internal_start(); | |
1740 | if (result != KERN_SUCCESS) | |
1741 | goto out; | |
1742 | } | |
1743 | ||
1c79356b A |
1744 | /* |
1745 | * Retrieve the current value, | |
1746 | * and replace it with the supplied value. | |
0b4e3aa0 A |
1747 | * We return the old reference to the caller |
1748 | * but we have to take a reference on the new | |
1749 | * one. | |
1c79356b | 1750 | */ |
1c79356b A |
1751 | returned_manager = current_manager; |
1752 | memory_manager_default = new_manager; | |
0b4e3aa0 A |
1753 | memory_object_default_reference(new_manager); |
1754 | ||
1c79356b A |
1755 | /* |
1756 | * In case anyone's been waiting for a memory | |
1757 | * manager to be established, wake them up. | |
1758 | */ | |
1759 | ||
1760 | thread_wakeup((event_t) &memory_manager_default); | |
b0d623f7 A |
1761 | |
1762 | /* | |
1763 | * Now that we have a default pager for anonymous memory, | |
1764 | * reactivate all the throttled pages (i.e. dirty pages with | |
1765 | * no pager). | |
1766 | */ | |
6d2010ae A |
1767 | if (current_manager == MEMORY_OBJECT_DEFAULT_NULL) |
1768 | { | |
b0d623f7 A |
1769 | vm_page_reactivate_all_throttled(); |
1770 | } | |
1c79356b | 1771 | } |
2d21ac55 | 1772 | out: |
b0d623f7 | 1773 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1774 | |
1775 | *default_manager = returned_manager; | |
2d21ac55 | 1776 | return(result); |
1c79356b A |
1777 | } |
1778 | ||
1779 | /* | |
1780 | * Routine: memory_manager_default_reference | |
1781 | * Purpose: | |
1782 | * Returns a naked send right for the default | |
1783 | * memory manager. The returned right is always | |
1784 | * valid (not IP_NULL or IP_DEAD). | |
1785 | */ | |
1786 | ||
0b4e3aa0 | 1787 | __private_extern__ memory_object_default_t |
2d21ac55 | 1788 | memory_manager_default_reference(void) |
1c79356b | 1789 | { |
0b4e3aa0 | 1790 | memory_object_default_t current_manager; |
1c79356b | 1791 | |
b0d623f7 | 1792 | lck_mtx_lock(&memory_manager_default_lock); |
0b4e3aa0 A |
1793 | current_manager = memory_manager_default; |
1794 | while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
9bccf70c A |
1795 | wait_result_t res; |
1796 | ||
b0d623f7 A |
1797 | res = lck_mtx_sleep(&memory_manager_default_lock, |
1798 | LCK_SLEEP_DEFAULT, | |
1799 | (event_t) &memory_manager_default, | |
1800 | THREAD_UNINT); | |
9bccf70c | 1801 | assert(res == THREAD_AWAKENED); |
0b4e3aa0 | 1802 | current_manager = memory_manager_default; |
1c79356b | 1803 | } |
0b4e3aa0 | 1804 | memory_object_default_reference(current_manager); |
b0d623f7 | 1805 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1806 | |
1807 | return current_manager; | |
1808 | } | |
1809 | ||
1c79356b A |
1810 | /* |
1811 | * Routine: memory_manager_default_check | |
1812 | * | |
1813 | * Purpose: | |
1814 | * Check whether a default memory manager has been set | |
1815 | * up yet, or not. Returns KERN_SUCCESS if dmm exists, | |
1816 | * and KERN_FAILURE if dmm does not exist. | |
1817 | * | |
1818 | * If there is no default memory manager, log an error, | |
1819 | * but only the first time. | |
1820 | * | |
1821 | */ | |
0b4e3aa0 | 1822 | __private_extern__ kern_return_t |
1c79356b A |
1823 | memory_manager_default_check(void) |
1824 | { | |
0b4e3aa0 | 1825 | memory_object_default_t current; |
1c79356b | 1826 | |
b0d623f7 | 1827 | lck_mtx_lock(&memory_manager_default_lock); |
1c79356b | 1828 | current = memory_manager_default; |
0b4e3aa0 | 1829 | if (current == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1830 | static boolean_t logged; /* initialized to 0 */ |
1831 | boolean_t complain = !logged; | |
1832 | logged = TRUE; | |
b0d623f7 | 1833 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1834 | if (complain) |
1835 | printf("Warning: No default memory manager\n"); | |
1836 | return(KERN_FAILURE); | |
1837 | } else { | |
b0d623f7 | 1838 | lck_mtx_unlock(&memory_manager_default_lock); |
1c79356b A |
1839 | return(KERN_SUCCESS); |
1840 | } | |
1841 | } | |
1842 | ||
0b4e3aa0 | 1843 | __private_extern__ void |
1c79356b A |
1844 | memory_manager_default_init(void) |
1845 | { | |
0b4e3aa0 | 1846 | memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
b0d623f7 | 1847 | lck_mtx_init(&memory_manager_default_lock, &vm_object_lck_grp, &vm_object_lck_attr); |
1c79356b A |
1848 | } |
1849 | ||
1850 | ||
1c79356b A |
1851 | |
1852 | /* Allow manipulation of individual page state. This is actually part of */ | |
1853 | /* the UPL regimen but takes place on the object rather than on a UPL */ | |
1854 | ||
1855 | kern_return_t | |
1856 | memory_object_page_op( | |
0b4e3aa0 A |
1857 | memory_object_control_t control, |
1858 | memory_object_offset_t offset, | |
1859 | int ops, | |
55e303ae | 1860 | ppnum_t *phys_entry, |
0b4e3aa0 | 1861 | int *flags) |
1c79356b | 1862 | { |
0b4e3aa0 | 1863 | vm_object_t object; |
0b4e3aa0 A |
1864 | |
1865 | object = memory_object_control_to_vm_object(control); | |
1866 | if (object == VM_OBJECT_NULL) | |
1867 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1868 | |
0c530ab8 | 1869 | return vm_object_page_op(object, offset, ops, phys_entry, flags); |
1c79356b A |
1870 | } |
1871 | ||
55e303ae A |
1872 | /* |
1873 | * memory_object_range_op offers performance enhancement over | |
1874 | * memory_object_page_op for page_op functions which do not require page | |
1875 | * level state to be returned from the call. Page_op was created to provide | |
1876 | * a low-cost alternative to page manipulation via UPLs when only a single | |
1877 | * page was involved. The range_op call establishes the ability in the _op | |
1878 | * family of functions to work on multiple pages where the lack of page level | |
1879 | * state handling allows the caller to avoid the overhead of the upl structures. | |
1880 | */ | |
1881 | ||
1882 | kern_return_t | |
1883 | memory_object_range_op( | |
1884 | memory_object_control_t control, | |
1885 | memory_object_offset_t offset_beg, | |
1886 | memory_object_offset_t offset_end, | |
1887 | int ops, | |
1888 | int *range) | |
1889 | { | |
55e303ae | 1890 | vm_object_t object; |
55e303ae A |
1891 | |
1892 | object = memory_object_control_to_vm_object(control); | |
1893 | if (object == VM_OBJECT_NULL) | |
1894 | return (KERN_INVALID_ARGUMENT); | |
1895 | ||
0c530ab8 A |
1896 | return vm_object_range_op(object, |
1897 | offset_beg, | |
1898 | offset_end, | |
1899 | ops, | |
b0d623f7 | 1900 | (uint32_t *) range); |
55e303ae A |
1901 | } |
1902 | ||
91447636 | 1903 | |
6d2010ae A |
1904 | void |
1905 | memory_object_mark_used( | |
1906 | memory_object_control_t control) | |
1907 | { | |
1908 | vm_object_t object; | |
1909 | ||
1910 | if (control == NULL) | |
1911 | return; | |
1912 | ||
1913 | object = memory_object_control_to_vm_object(control); | |
1914 | ||
1915 | if (object != VM_OBJECT_NULL) | |
1916 | vm_object_cache_remove(object); | |
1917 | } | |
1918 | ||
1919 | ||
1920 | void | |
1921 | memory_object_mark_unused( | |
1922 | memory_object_control_t control, | |
1923 | __unused boolean_t rage) | |
1924 | { | |
1925 | vm_object_t object; | |
1926 | ||
1927 | if (control == NULL) | |
1928 | return; | |
1929 | ||
1930 | object = memory_object_control_to_vm_object(control); | |
1931 | ||
1932 | if (object != VM_OBJECT_NULL) | |
1933 | vm_object_cache_add(object); | |
1934 | } | |
1935 | ||
fe8ab488 A |
1936 | void |
1937 | memory_object_mark_io_tracking( | |
1938 | memory_object_control_t control) | |
1939 | { | |
1940 | vm_object_t object; | |
1941 | ||
1942 | if (control == NULL) | |
1943 | return; | |
1944 | object = memory_object_control_to_vm_object(control); | |
1945 | ||
1946 | if (object != VM_OBJECT_NULL) { | |
1947 | vm_object_lock(object); | |
1948 | object->io_tracking = TRUE; | |
1949 | vm_object_unlock(object); | |
1950 | } | |
1951 | } | |
6d2010ae | 1952 | |
39037602 A |
1953 | #if CONFIG_SECLUDED_MEMORY |
1954 | void | |
1955 | memory_object_mark_eligible_for_secluded( | |
1956 | memory_object_control_t control, | |
1957 | boolean_t eligible_for_secluded) | |
1958 | { | |
1959 | vm_object_t object; | |
1960 | ||
1961 | if (control == NULL) | |
1962 | return; | |
1963 | object = memory_object_control_to_vm_object(control); | |
1964 | ||
1965 | if (object == VM_OBJECT_NULL) { | |
1966 | return; | |
1967 | } | |
1968 | ||
1969 | vm_object_lock(object); | |
1970 | if (eligible_for_secluded && | |
1971 | secluded_for_filecache && /* global boot-arg */ | |
1972 | !object->eligible_for_secluded) { | |
1973 | object->eligible_for_secluded = TRUE; | |
1974 | vm_page_secluded.eligible_for_secluded += object->resident_page_count; | |
1975 | } else if (!eligible_for_secluded && | |
1976 | object->eligible_for_secluded) { | |
1977 | object->eligible_for_secluded = FALSE; | |
1978 | vm_page_secluded.eligible_for_secluded -= object->resident_page_count; | |
1979 | if (object->resident_page_count) { | |
1980 | /* XXX FBDP TODO: flush pages from secluded queue? */ | |
1981 | // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object); | |
1982 | } | |
1983 | } | |
1984 | vm_object_unlock(object); | |
1985 | } | |
1986 | #endif /* CONFIG_SECLUDED_MEMORY */ | |
1987 | ||
91447636 A |
1988 | kern_return_t |
1989 | memory_object_pages_resident( | |
1990 | memory_object_control_t control, | |
1991 | boolean_t * has_pages_resident) | |
1992 | { | |
1993 | vm_object_t object; | |
1994 | ||
1995 | *has_pages_resident = FALSE; | |
1996 | ||
1997 | object = memory_object_control_to_vm_object(control); | |
1998 | if (object == VM_OBJECT_NULL) | |
1999 | return (KERN_INVALID_ARGUMENT); | |
2000 | ||
2001 | if (object->resident_page_count) | |
2002 | *has_pages_resident = TRUE; | |
2003 | ||
2004 | return (KERN_SUCCESS); | |
2005 | } | |
2006 | ||
2d21ac55 A |
2007 | kern_return_t |
2008 | memory_object_signed( | |
2009 | memory_object_control_t control, | |
2010 | boolean_t is_signed) | |
2011 | { | |
2012 | vm_object_t object; | |
2013 | ||
2014 | object = memory_object_control_to_vm_object(control); | |
2015 | if (object == VM_OBJECT_NULL) | |
2016 | return KERN_INVALID_ARGUMENT; | |
2017 | ||
2018 | vm_object_lock(object); | |
2019 | object->code_signed = is_signed; | |
2020 | vm_object_unlock(object); | |
2021 | ||
2022 | return KERN_SUCCESS; | |
2023 | } | |
91447636 | 2024 | |
39236c6e A |
2025 | boolean_t |
2026 | memory_object_is_signed( | |
2027 | memory_object_control_t control) | |
2028 | { | |
2029 | boolean_t is_signed; | |
2030 | vm_object_t object; | |
2031 | ||
2032 | object = memory_object_control_to_vm_object(control); | |
2033 | if (object == VM_OBJECT_NULL) | |
2034 | return FALSE; | |
2035 | ||
2036 | vm_object_lock_shared(object); | |
2037 | is_signed = object->code_signed; | |
2038 | vm_object_unlock(object); | |
2039 | ||
2040 | return is_signed; | |
2041 | } | |
2042 | ||
6d2010ae A |
2043 | boolean_t |
2044 | memory_object_is_slid( | |
2045 | memory_object_control_t control) | |
2046 | { | |
2047 | vm_object_t object = VM_OBJECT_NULL; | |
6d2010ae A |
2048 | |
2049 | object = memory_object_control_to_vm_object(control); | |
2050 | if (object == VM_OBJECT_NULL) | |
2051 | return FALSE; | |
2052 | ||
39236c6e | 2053 | return object->object_slid; |
6d2010ae A |
2054 | } |
2055 | ||
0b4e3aa0 A |
2056 | static zone_t mem_obj_control_zone; |
2057 | ||
2058 | __private_extern__ void | |
2059 | memory_object_control_bootstrap(void) | |
2060 | { | |
2061 | int i; | |
2062 | ||
2063 | i = (vm_size_t) sizeof (struct memory_object_control); | |
2064 | mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control"); | |
6d2010ae | 2065 | zone_change(mem_obj_control_zone, Z_CALLERACCT, FALSE); |
0b4c1975 | 2066 | zone_change(mem_obj_control_zone, Z_NOENCRYPT, TRUE); |
0b4e3aa0 A |
2067 | return; |
2068 | } | |
2069 | ||
2070 | __private_extern__ memory_object_control_t | |
2071 | memory_object_control_allocate( | |
2072 | vm_object_t object) | |
2073 | { | |
2074 | memory_object_control_t control; | |
2075 | ||
2076 | control = (memory_object_control_t)zalloc(mem_obj_control_zone); | |
0c530ab8 A |
2077 | if (control != MEMORY_OBJECT_CONTROL_NULL) { |
2078 | control->moc_object = object; | |
2079 | control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */ | |
2080 | } | |
0b4e3aa0 A |
2081 | return (control); |
2082 | } | |
2083 | ||
2084 | __private_extern__ void | |
2085 | memory_object_control_collapse( | |
2086 | memory_object_control_t control, | |
2087 | vm_object_t object) | |
2088 | { | |
0c530ab8 A |
2089 | assert((control->moc_object != VM_OBJECT_NULL) && |
2090 | (control->moc_object != object)); | |
2091 | control->moc_object = object; | |
0b4e3aa0 A |
2092 | } |
2093 | ||
2094 | __private_extern__ vm_object_t | |
2095 | memory_object_control_to_vm_object( | |
2096 | memory_object_control_t control) | |
2097 | { | |
0c530ab8 A |
2098 | if (control == MEMORY_OBJECT_CONTROL_NULL || |
2099 | control->moc_ikot != IKOT_MEM_OBJ_CONTROL) | |
0b4e3aa0 A |
2100 | return VM_OBJECT_NULL; |
2101 | ||
0c530ab8 | 2102 | return (control->moc_object); |
0b4e3aa0 A |
2103 | } |
2104 | ||
2105 | memory_object_control_t | |
2106 | convert_port_to_mo_control( | |
91447636 | 2107 | __unused mach_port_t port) |
0b4e3aa0 A |
2108 | { |
2109 | return MEMORY_OBJECT_CONTROL_NULL; | |
2110 | } | |
2111 | ||
2112 | ||
2113 | mach_port_t | |
2114 | convert_mo_control_to_port( | |
91447636 | 2115 | __unused memory_object_control_t control) |
0b4e3aa0 A |
2116 | { |
2117 | return MACH_PORT_NULL; | |
2118 | } | |
2119 | ||
2120 | void | |
2121 | memory_object_control_reference( | |
91447636 | 2122 | __unused memory_object_control_t control) |
0b4e3aa0 A |
2123 | { |
2124 | return; | |
2125 | } | |
2126 | ||
2127 | /* | |
2128 | * We only every issue one of these references, so kill it | |
2129 | * when that gets released (should switch the real reference | |
2130 | * counting in true port-less EMMI). | |
2131 | */ | |
2132 | void | |
2133 | memory_object_control_deallocate( | |
2134 | memory_object_control_t control) | |
2135 | { | |
91447636 | 2136 | zfree(mem_obj_control_zone, control); |
0b4e3aa0 A |
2137 | } |
2138 | ||
2139 | void | |
2140 | memory_object_control_disable( | |
2141 | memory_object_control_t control) | |
2142 | { | |
0c530ab8 A |
2143 | assert(control->moc_object != VM_OBJECT_NULL); |
2144 | control->moc_object = VM_OBJECT_NULL; | |
0b4e3aa0 A |
2145 | } |
2146 | ||
2147 | void | |
2148 | memory_object_default_reference( | |
2149 | memory_object_default_t dmm) | |
2150 | { | |
2151 | ipc_port_make_send(dmm); | |
2152 | } | |
2153 | ||
2154 | void | |
2155 | memory_object_default_deallocate( | |
2156 | memory_object_default_t dmm) | |
2157 | { | |
2158 | ipc_port_release_send(dmm); | |
2159 | } | |
2160 | ||
2161 | memory_object_t | |
2162 | convert_port_to_memory_object( | |
91447636 | 2163 | __unused mach_port_t port) |
0b4e3aa0 A |
2164 | { |
2165 | return (MEMORY_OBJECT_NULL); | |
2166 | } | |
2167 | ||
2168 | ||
2169 | mach_port_t | |
2170 | convert_memory_object_to_port( | |
91447636 | 2171 | __unused memory_object_t object) |
0b4e3aa0 A |
2172 | { |
2173 | return (MACH_PORT_NULL); | |
2174 | } | |
2175 | ||
0b4e3aa0 A |
2176 | |
2177 | /* Routine memory_object_reference */ | |
2178 | void memory_object_reference( | |
2179 | memory_object_t memory_object) | |
2180 | { | |
0c530ab8 A |
2181 | (memory_object->mo_pager_ops->memory_object_reference)( |
2182 | memory_object); | |
0b4e3aa0 A |
2183 | } |
2184 | ||
2185 | /* Routine memory_object_deallocate */ | |
2186 | void memory_object_deallocate( | |
2187 | memory_object_t memory_object) | |
2188 | { | |
0c530ab8 A |
2189 | (memory_object->mo_pager_ops->memory_object_deallocate)( |
2190 | memory_object); | |
0b4e3aa0 A |
2191 | } |
2192 | ||
2193 | ||
2194 | /* Routine memory_object_init */ | |
2195 | kern_return_t memory_object_init | |
2196 | ( | |
2197 | memory_object_t memory_object, | |
2198 | memory_object_control_t memory_control, | |
91447636 | 2199 | memory_object_cluster_size_t memory_object_page_size |
0b4e3aa0 A |
2200 | ) |
2201 | { | |
0c530ab8 A |
2202 | return (memory_object->mo_pager_ops->memory_object_init)( |
2203 | memory_object, | |
2204 | memory_control, | |
2205 | memory_object_page_size); | |
0b4e3aa0 A |
2206 | } |
2207 | ||
2208 | /* Routine memory_object_terminate */ | |
2209 | kern_return_t memory_object_terminate | |
2210 | ( | |
2211 | memory_object_t memory_object | |
2212 | ) | |
2213 | { | |
0c530ab8 A |
2214 | return (memory_object->mo_pager_ops->memory_object_terminate)( |
2215 | memory_object); | |
0b4e3aa0 A |
2216 | } |
2217 | ||
2218 | /* Routine memory_object_data_request */ | |
2219 | kern_return_t memory_object_data_request | |
2220 | ( | |
2221 | memory_object_t memory_object, | |
2222 | memory_object_offset_t offset, | |
91447636 | 2223 | memory_object_cluster_size_t length, |
2d21ac55 A |
2224 | vm_prot_t desired_access, |
2225 | memory_object_fault_info_t fault_info | |
0b4e3aa0 A |
2226 | ) |
2227 | { | |
0c530ab8 A |
2228 | return (memory_object->mo_pager_ops->memory_object_data_request)( |
2229 | memory_object, | |
2230 | offset, | |
2231 | length, | |
2d21ac55 A |
2232 | desired_access, |
2233 | fault_info); | |
0b4e3aa0 A |
2234 | } |
2235 | ||
2236 | /* Routine memory_object_data_return */ | |
2237 | kern_return_t memory_object_data_return | |
2238 | ( | |
2239 | memory_object_t memory_object, | |
2240 | memory_object_offset_t offset, | |
b0d623f7 | 2241 | memory_object_cluster_size_t size, |
91447636 A |
2242 | memory_object_offset_t *resid_offset, |
2243 | int *io_error, | |
0b4e3aa0 | 2244 | boolean_t dirty, |
91447636 A |
2245 | boolean_t kernel_copy, |
2246 | int upl_flags | |
0b4e3aa0 A |
2247 | ) |
2248 | { | |
0c530ab8 A |
2249 | return (memory_object->mo_pager_ops->memory_object_data_return)( |
2250 | memory_object, | |
2251 | offset, | |
2252 | size, | |
2253 | resid_offset, | |
2254 | io_error, | |
2255 | dirty, | |
2256 | kernel_copy, | |
2257 | upl_flags); | |
0b4e3aa0 A |
2258 | } |
2259 | ||
2260 | /* Routine memory_object_data_initialize */ | |
2261 | kern_return_t memory_object_data_initialize | |
2262 | ( | |
2263 | memory_object_t memory_object, | |
2264 | memory_object_offset_t offset, | |
b0d623f7 | 2265 | memory_object_cluster_size_t size |
0b4e3aa0 A |
2266 | ) |
2267 | { | |
0c530ab8 A |
2268 | return (memory_object->mo_pager_ops->memory_object_data_initialize)( |
2269 | memory_object, | |
2270 | offset, | |
2271 | size); | |
0b4e3aa0 A |
2272 | } |
2273 | ||
2274 | /* Routine memory_object_data_unlock */ | |
2275 | kern_return_t memory_object_data_unlock | |
2276 | ( | |
2277 | memory_object_t memory_object, | |
2278 | memory_object_offset_t offset, | |
b0d623f7 | 2279 | memory_object_size_t size, |
0b4e3aa0 A |
2280 | vm_prot_t desired_access |
2281 | ) | |
2282 | { | |
0c530ab8 A |
2283 | return (memory_object->mo_pager_ops->memory_object_data_unlock)( |
2284 | memory_object, | |
2285 | offset, | |
2286 | size, | |
2287 | desired_access); | |
0b4e3aa0 A |
2288 | } |
2289 | ||
2290 | /* Routine memory_object_synchronize */ | |
2291 | kern_return_t memory_object_synchronize | |
2292 | ( | |
2293 | memory_object_t memory_object, | |
2294 | memory_object_offset_t offset, | |
b0d623f7 | 2295 | memory_object_size_t size, |
0b4e3aa0 A |
2296 | vm_sync_t sync_flags |
2297 | ) | |
2298 | { | |
0c530ab8 A |
2299 | return (memory_object->mo_pager_ops->memory_object_synchronize)( |
2300 | memory_object, | |
2301 | offset, | |
2302 | size, | |
2303 | sync_flags); | |
0b4e3aa0 A |
2304 | } |
2305 | ||
593a1d5f A |
2306 | |
2307 | /* | |
2308 | * memory_object_map() is called by VM (in vm_map_enter() and its variants) | |
2309 | * each time a "named" VM object gets mapped directly or indirectly | |
2310 | * (copy-on-write mapping). A "named" VM object has an extra reference held | |
2311 | * by the pager to keep it alive until the pager decides that the | |
2312 | * memory object (and its VM object) can be reclaimed. | |
2313 | * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all | |
2314 | * the mappings of that memory object have been removed. | |
2315 | * | |
2316 | * For a given VM object, calls to memory_object_map() and memory_object_unmap() | |
2317 | * are serialized (through object->mapping_in_progress), to ensure that the | |
2318 | * pager gets a consistent view of the mapping status of the memory object. | |
2319 | * | |
2320 | * This allows the pager to keep track of how many times a memory object | |
2321 | * has been mapped and with which protections, to decide when it can be | |
2322 | * reclaimed. | |
2323 | */ | |
2324 | ||
2325 | /* Routine memory_object_map */ | |
2326 | kern_return_t memory_object_map | |
2327 | ( | |
2328 | memory_object_t memory_object, | |
2329 | vm_prot_t prot | |
2330 | ) | |
2331 | { | |
2332 | return (memory_object->mo_pager_ops->memory_object_map)( | |
2333 | memory_object, | |
2334 | prot); | |
2335 | } | |
2336 | ||
2337 | /* Routine memory_object_last_unmap */ | |
2338 | kern_return_t memory_object_last_unmap | |
0b4e3aa0 A |
2339 | ( |
2340 | memory_object_t memory_object | |
2341 | ) | |
2342 | { | |
593a1d5f | 2343 | return (memory_object->mo_pager_ops->memory_object_last_unmap)( |
0c530ab8 | 2344 | memory_object); |
0b4e3aa0 A |
2345 | } |
2346 | ||
6d2010ae A |
2347 | /* Routine memory_object_data_reclaim */ |
2348 | kern_return_t memory_object_data_reclaim | |
2349 | ( | |
2350 | memory_object_t memory_object, | |
2351 | boolean_t reclaim_backing_store | |
2352 | ) | |
2353 | { | |
2354 | if (memory_object->mo_pager_ops->memory_object_data_reclaim == NULL) | |
2355 | return KERN_NOT_SUPPORTED; | |
2356 | return (memory_object->mo_pager_ops->memory_object_data_reclaim)( | |
2357 | memory_object, | |
2358 | reclaim_backing_store); | |
2359 | } | |
2360 | ||
91447636 A |
2361 | upl_t |
2362 | convert_port_to_upl( | |
2363 | ipc_port_t port) | |
2364 | { | |
2365 | upl_t upl; | |
2366 | ||
2367 | ip_lock(port); | |
2368 | if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { | |
2369 | ip_unlock(port); | |
2370 | return (upl_t)NULL; | |
2371 | } | |
2372 | upl = (upl_t) port->ip_kobject; | |
2373 | ip_unlock(port); | |
2374 | upl_lock(upl); | |
2375 | upl->ref_count+=1; | |
2376 | upl_unlock(upl); | |
2377 | return upl; | |
2378 | } | |
2379 | ||
2380 | mach_port_t | |
2381 | convert_upl_to_port( | |
2382 | __unused upl_t upl) | |
2383 | { | |
2384 | return MACH_PORT_NULL; | |
2385 | } | |
2386 | ||
2387 | __private_extern__ void | |
2388 | upl_no_senders( | |
2389 | __unused ipc_port_t port, | |
2390 | __unused mach_port_mscount_t mscount) | |
2391 | { | |
2392 | return; | |
2393 | } |