]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | /* | |
53 | * File: vm/memory_object.c | |
54 | * Author: Michael Wayne Young | |
55 | * | |
56 | * External memory management interface control functions. | |
57 | */ | |
58 | ||
59 | #ifdef MACH_BSD | |
60 | /* THIS code should be removed when the component merge is completed */ | |
61 | extern int vnode_pager_workaround; | |
62 | #endif | |
63 | ||
64 | #include <advisory_pageout.h> | |
65 | ||
66 | /* | |
67 | * Interface dependencies: | |
68 | */ | |
69 | ||
70 | #include <mach/std_types.h> /* For pointer_t */ | |
71 | #include <mach/mach_types.h> | |
72 | ||
73 | #include <mach/kern_return.h> | |
74 | #include <mach/memory_object.h> | |
75 | #include <mach/memory_object_default.h> | |
76 | #include <mach/memory_object_control_server.h> | |
77 | #include <mach/mach_host_server.h> | |
78 | #include <mach/boolean.h> | |
79 | #include <mach/vm_prot.h> | |
80 | #include <mach/message.h> | |
81 | ||
82 | #include <vm/vm_object.h> | |
83 | #include <vm/vm_fault.h> | |
84 | /* | |
85 | * Implementation dependencies: | |
86 | */ | |
87 | #include <string.h> /* For memcpy() */ | |
88 | ||
89 | #include <vm/memory_object.h> | |
90 | #include <vm/vm_page.h> | |
91 | #include <vm/vm_pageout.h> | |
92 | #include <vm/pmap.h> /* For pmap_clear_modify */ | |
93 | #include <kern/xpr.h> | |
94 | #include <kern/thread.h> /* For current_thread() */ | |
95 | #include <kern/host.h> | |
96 | #include <vm/vm_kern.h> /* For kernel_map, vm_move */ | |
97 | #include <vm/vm_map.h> /* For vm_map_pageable */ | |
98 | #include <ipc/ipc_port.h> | |
99 | #include <ipc/ipc_space.h> | |
100 | ||
101 | #include <kern/misc_protos.h> | |
102 | ||
103 | #if MACH_PAGEMAP | |
104 | #include <vm/vm_external.h> | |
105 | #endif /* MACH_PAGEMAP */ | |
106 | ||
107 | ||
108 | ipc_port_t memory_manager_default = IP_NULL; | |
109 | vm_size_t memory_manager_default_cluster = 0; | |
110 | decl_mutex_data(,memory_manager_default_lock) | |
111 | ||
112 | /* | |
113 | * Forward ref to file-local function: | |
114 | */ | |
115 | boolean_t | |
116 | memory_object_update(vm_object_t, vm_object_offset_t, | |
117 | vm_size_t, memory_object_return_t, int, vm_prot_t); | |
118 | ||
119 | ||
120 | /* | |
121 | * Routine: memory_object_should_return_page | |
122 | * | |
123 | * Description: | |
124 | * Determine whether the given page should be returned, | |
125 | * based on the page's state and on the given return policy. | |
126 | * | |
127 | * We should return the page if one of the following is true: | |
128 | * | |
129 | * 1. Page is dirty and should_return is not RETURN_NONE. | |
130 | * 2. Page is precious and should_return is RETURN_ALL. | |
131 | * 3. Should_return is RETURN_ANYTHING. | |
132 | * | |
133 | * As a side effect, m->dirty will be made consistent | |
134 | * with pmap_is_modified(m), if should_return is not | |
135 | * MEMORY_OBJECT_RETURN_NONE. | |
136 | */ | |
137 | ||
138 | #define memory_object_should_return_page(m, should_return) \ | |
139 | (should_return != MEMORY_OBJECT_RETURN_NONE && \ | |
140 | (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \ | |
141 | ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ | |
142 | (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) | |
143 | ||
144 | typedef int memory_object_lock_result_t; | |
145 | ||
146 | #define MEMORY_OBJECT_LOCK_RESULT_DONE 0 | |
147 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 | |
148 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2 | |
149 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3 | |
150 | ||
151 | memory_object_lock_result_t memory_object_lock_page( | |
152 | vm_page_t m, | |
153 | memory_object_return_t should_return, | |
154 | boolean_t should_flush, | |
155 | vm_prot_t prot); | |
156 | ||
157 | /* | |
158 | * Routine: memory_object_lock_page | |
159 | * | |
160 | * Description: | |
161 | * Perform the appropriate lock operations on the | |
162 | * given page. See the description of | |
163 | * "memory_object_lock_request" for the meanings | |
164 | * of the arguments. | |
165 | * | |
166 | * Returns an indication that the operation | |
167 | * completed, blocked, or that the page must | |
168 | * be cleaned. | |
169 | */ | |
170 | memory_object_lock_result_t | |
171 | memory_object_lock_page( | |
172 | vm_page_t m, | |
173 | memory_object_return_t should_return, | |
174 | boolean_t should_flush, | |
175 | vm_prot_t prot) | |
176 | { | |
177 | XPR(XPR_MEMORY_OBJECT, | |
178 | "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", | |
179 | (integer_t)m, should_return, should_flush, prot, 0); | |
180 | ||
181 | /* | |
182 | * If we cannot change access to the page, | |
183 | * either because a mapping is in progress | |
184 | * (busy page) or because a mapping has been | |
185 | * wired, then give up. | |
186 | */ | |
187 | ||
188 | if (m->busy || m->cleaning) | |
189 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
190 | ||
191 | /* | |
192 | * Don't worry about pages for which the kernel | |
193 | * does not have any data. | |
194 | */ | |
195 | ||
196 | if (m->absent || m->error || m->restart) | |
197 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
198 | ||
199 | assert(!m->fictitious); | |
200 | ||
201 | if (m->wire_count != 0) { | |
202 | /* | |
203 | * If no change would take place | |
204 | * anyway, return successfully. | |
205 | * | |
206 | * No change means: | |
207 | * Not flushing AND | |
208 | * No change to page lock [2 checks] AND | |
209 | * Should not return page | |
210 | * | |
211 | * XXX This doesn't handle sending a copy of a wired | |
212 | * XXX page to the pager, but that will require some | |
213 | * XXX significant surgery. | |
214 | */ | |
215 | if (!should_flush && | |
216 | (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) && | |
217 | ! memory_object_should_return_page(m, should_return)) { | |
218 | ||
219 | /* | |
220 | * Restart page unlock requests, | |
221 | * even though no change took place. | |
222 | * [Memory managers may be expecting | |
223 | * to see new requests.] | |
224 | */ | |
225 | m->unlock_request = VM_PROT_NONE; | |
226 | PAGE_WAKEUP(m); | |
227 | ||
228 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
229 | } | |
230 | ||
231 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
232 | } | |
233 | ||
234 | /* | |
235 | * If the page is to be flushed, allow | |
236 | * that to be done as part of the protection. | |
237 | */ | |
238 | ||
239 | if (should_flush) | |
240 | prot = VM_PROT_ALL; | |
241 | ||
242 | /* | |
243 | * Set the page lock. | |
244 | * | |
245 | * If we are decreasing permission, do it now; | |
246 | * let the fault handler take care of increases | |
247 | * (pmap_page_protect may not increase protection). | |
248 | */ | |
249 | ||
250 | if (prot != VM_PROT_NO_CHANGE) { | |
251 | #if 0 | |
252 | /* code associated with the vestigial | |
253 | * memory_object_data_unlock | |
254 | */ | |
255 | if ((m->page_lock ^ prot) & prot) { | |
256 | pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot); | |
257 | } | |
258 | m->page_lock = prot; | |
259 | m->lock_supplied = TRUE; | |
260 | if (prot != VM_PROT_NONE) | |
261 | m->unusual = TRUE; | |
262 | else | |
263 | m->unusual = FALSE; | |
264 | ||
265 | /* | |
266 | * Restart any past unlock requests, even if no | |
267 | * change resulted. If the manager explicitly | |
268 | * requested no protection change, then it is assumed | |
269 | * to be remembering past requests. | |
270 | */ | |
271 | ||
272 | m->unlock_request = VM_PROT_NONE; | |
273 | #endif /* 0 */ | |
274 | PAGE_WAKEUP(m); | |
275 | } | |
276 | ||
277 | /* | |
278 | * Handle page returning. | |
279 | */ | |
280 | ||
281 | if (memory_object_should_return_page(m, should_return)) { | |
282 | ||
283 | /* | |
284 | * If we weren't planning | |
285 | * to flush the page anyway, | |
286 | * we may need to remove the | |
287 | * page from the pageout | |
288 | * system and from physical | |
289 | * maps now. | |
290 | */ | |
291 | ||
292 | vm_page_lock_queues(); | |
293 | VM_PAGE_QUEUES_REMOVE(m); | |
294 | vm_page_unlock_queues(); | |
295 | ||
296 | if (!should_flush) | |
297 | pmap_page_protect(m->phys_addr, VM_PROT_NONE); | |
298 | ||
299 | if (m->dirty) | |
300 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); | |
301 | else | |
302 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); | |
303 | } | |
304 | ||
305 | /* | |
306 | * Handle flushing | |
307 | */ | |
308 | ||
309 | if (should_flush) { | |
310 | VM_PAGE_FREE(m); | |
311 | } else { | |
312 | extern boolean_t vm_page_deactivate_hint; | |
313 | ||
314 | /* | |
315 | * XXX Make clean but not flush a paging hint, | |
316 | * and deactivate the pages. This is a hack | |
317 | * because it overloads flush/clean with | |
318 | * implementation-dependent meaning. This only | |
319 | * happens to pages that are already clean. | |
320 | */ | |
321 | ||
322 | if (vm_page_deactivate_hint && | |
323 | (should_return != MEMORY_OBJECT_RETURN_NONE)) { | |
324 | vm_page_lock_queues(); | |
325 | vm_page_deactivate(m); | |
326 | vm_page_unlock_queues(); | |
327 | } | |
328 | } | |
329 | ||
330 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
331 | } | |
332 | #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \ | |
333 | MACRO_BEGIN \ | |
334 | \ | |
335 | register int i; \ | |
336 | register vm_page_t hp; \ | |
337 | \ | |
338 | vm_object_unlock(object); \ | |
339 | \ | |
340 | if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == \ | |
341 | ((rpc_subsystem_t) &vnode_pager_workaround)) { \ | |
342 | (void) vnode_pager_data_return(object->pager, \ | |
343 | object->pager_request, \ | |
344 | po, \ | |
345 | POINTER_T(0), \ | |
346 | data_cnt, \ | |
347 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ | |
348 | !should_flush); \ | |
349 | } else { \ | |
350 | (void) memory_object_data_return(object->pager, \ | |
351 | object->pager_request, \ | |
352 | po, \ | |
353 | POINTER_T(0), \ | |
354 | data_cnt, \ | |
355 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ | |
356 | !should_flush); \ | |
357 | } \ | |
358 | \ | |
359 | vm_object_lock(object); \ | |
360 | \ | |
361 | MACRO_END | |
362 | ||
363 | #ifdef MACH_BSD | |
364 | #define PAGEOUT_PAGES(object, new_object, new_offset, action, po) \ | |
365 | MACRO_BEGIN \ | |
366 | \ | |
367 | vm_map_copy_t copy; \ | |
368 | register int i; \ | |
369 | register vm_page_t hp; \ | |
370 | \ | |
371 | vm_object_unlock(object); \ | |
372 | \ | |
373 | (void) vm_map_copyin_object(new_object, 0, new_offset, ©); \ | |
374 | \ | |
375 | if(((rpc_subsystem_t)pager_mux_hash_lookup(object->pager)) == \ | |
376 | ((rpc_subsystem_t) &vnode_pager_workaround)) { \ | |
377 | (void) vnode_pager_data_return(object->pager, \ | |
378 | object->pager_request, \ | |
379 | po, \ | |
380 | POINTER_T(copy), \ | |
381 | new_offset, \ | |
382 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ | |
383 | !should_flush); \ | |
384 | } else { \ | |
385 | (void) memory_object_data_return(object->pager, \ | |
386 | object->pager_request, \ | |
387 | po, \ | |
388 | POINTER_T(copy), \ | |
389 | new_offset, \ | |
390 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ | |
391 | !should_flush); \ | |
392 | } \ | |
393 | \ | |
394 | vm_object_lock(object); \ | |
395 | \ | |
396 | for (i = 0; i < atop(new_offset); i++) { \ | |
397 | hp = holding_pages[i]; \ | |
398 | if (hp != VM_PAGE_NULL) { \ | |
399 | vm_object_paging_end(object); \ | |
400 | VM_PAGE_FREE(hp); \ | |
401 | } \ | |
402 | } \ | |
403 | \ | |
404 | new_object = VM_OBJECT_NULL; \ | |
405 | MACRO_END | |
406 | #else | |
407 | #define PAGEOUT_PAGES(object, new_object, new_offset, action, po) \ | |
408 | MACRO_BEGIN \ | |
409 | \ | |
410 | vm_map_copy_t copy; \ | |
411 | register int i; \ | |
412 | register vm_page_t hp; \ | |
413 | \ | |
414 | vm_object_unlock(object); \ | |
415 | \ | |
416 | (void) vm_map_copyin_object(new_object, 0, new_offset, ©); \ | |
417 | \ | |
418 | (void) memory_object_data_return( \ | |
419 | object->pager, \ | |
420 | object->pager_request, \ | |
421 | po, \ | |
422 | POINTER_T(copy), \ | |
423 | new_offset, \ | |
424 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ | |
425 | !should_flush); \ | |
426 | \ | |
427 | vm_object_lock(object); \ | |
428 | \ | |
429 | for (i = 0; i < atop(new_offset); i++) { \ | |
430 | hp = holding_pages[i]; \ | |
431 | if (hp != VM_PAGE_NULL) { \ | |
432 | vm_object_paging_end(object); \ | |
433 | VM_PAGE_FREE(hp); \ | |
434 | } \ | |
435 | } \ | |
436 | \ | |
437 | new_object = VM_OBJECT_NULL; \ | |
438 | MACRO_END | |
439 | #endif | |
440 | ||
441 | /* | |
442 | * Routine: memory_object_lock_request [user interface] | |
443 | * | |
444 | * Description: | |
445 | * Control use of the data associated with the given | |
446 | * memory object. For each page in the given range, | |
447 | * perform the following operations, in order: | |
448 | * 1) restrict access to the page (disallow | |
449 | * forms specified by "prot"); | |
450 | * 2) return data to the manager (if "should_return" | |
451 | * is RETURN_DIRTY and the page is dirty, or | |
452 | * "should_return" is RETURN_ALL and the page | |
453 | * is either dirty or precious); and, | |
454 | * 3) flush the cached copy (if "should_flush" | |
455 | * is asserted). | |
456 | * The set of pages is defined by a starting offset | |
457 | * ("offset") and size ("size"). Only pages with the | |
458 | * same page alignment as the starting offset are | |
459 | * considered. | |
460 | * | |
461 | * A single acknowledgement is sent (to the "reply_to" | |
462 | * port) when these actions are complete. If successful, | |
463 | * the naked send right for reply_to is consumed. | |
464 | */ | |
465 | ||
466 | kern_return_t | |
467 | memory_object_lock_request( | |
468 | register vm_object_t object, | |
469 | register vm_object_offset_t offset, | |
470 | register vm_object_size_t size, | |
471 | memory_object_return_t should_return, | |
472 | int flags, | |
473 | vm_prot_t prot, | |
474 | ipc_port_t reply_to, | |
475 | mach_msg_type_name_t reply_to_type) | |
476 | { | |
477 | vm_object_offset_t original_offset = offset; | |
478 | boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH; | |
479 | ||
480 | XPR(XPR_MEMORY_OBJECT, | |
481 | "m_o_lock_request, obj 0x%X off 0x%X size 0x%X flags %X prot %X\n", | |
482 | (integer_t)object, offset, size, | |
483 | (((should_return&1)<<1)|should_flush), prot); | |
484 | ||
485 | /* | |
486 | * Check for bogus arguments. | |
487 | */ | |
488 | if (object == VM_OBJECT_NULL) | |
489 | return (KERN_INVALID_ARGUMENT); | |
490 | ||
491 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) { | |
492 | vm_object_deallocate(object); | |
493 | return (KERN_INVALID_ARGUMENT); | |
494 | } | |
495 | ||
496 | size = round_page(size); | |
497 | ||
498 | /* | |
499 | * Lock the object, and acquire a paging reference to | |
500 | * prevent the memory_object and control ports from | |
501 | * being destroyed. | |
502 | */ | |
503 | ||
504 | vm_object_lock(object); | |
505 | vm_object_paging_begin(object); | |
506 | offset -= object->paging_offset; | |
507 | ||
508 | (void)memory_object_update(object, | |
509 | offset, size, should_return, flags, prot); | |
510 | ||
511 | if (IP_VALID(reply_to)) { | |
512 | vm_object_unlock(object); | |
513 | ||
514 | /* consumes our naked send-once/send right for reply_to */ | |
515 | (void) memory_object_lock_completed(reply_to, reply_to_type, | |
516 | object->pager_request, original_offset, size); | |
517 | ||
518 | vm_object_lock(object); | |
519 | } | |
520 | ||
521 | vm_object_paging_end(object); | |
522 | vm_object_unlock(object); | |
523 | vm_object_deallocate(object); | |
524 | ||
525 | return (KERN_SUCCESS); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Routine: memory_object_sync | |
530 | * | |
531 | * Kernel internal function to synch out pages in a given | |
532 | * range within an object to its memory manager. Much the | |
533 | * same as memory_object_lock_request but page protection | |
534 | * is not changed. | |
535 | * | |
536 | * If the should_flush and should_return flags are true pages | |
537 | * are flushed, that is dirty & precious pages are written to | |
538 | * the memory manager and then discarded. If should_return | |
539 | * is false, only precious pages are returned to the memory | |
540 | * manager. | |
541 | * | |
542 | * If should flush is false and should_return true, the memory | |
543 | * manager's copy of the pages is updated. If should_return | |
544 | * is also false, only the precious pages are updated. This | |
545 | * last option is of limited utility. | |
546 | * | |
547 | * Returns: | |
548 | * FALSE if no pages were returned to the pager | |
549 | * TRUE otherwise. | |
550 | */ | |
551 | ||
552 | boolean_t | |
553 | memory_object_sync( | |
554 | vm_object_t object, | |
555 | vm_object_offset_t offset, | |
556 | vm_object_size_t size, | |
557 | boolean_t should_flush, | |
558 | boolean_t should_return) | |
559 | { | |
560 | boolean_t rv; | |
561 | ||
562 | XPR(XPR_MEMORY_OBJECT, | |
563 | "m_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", | |
564 | (integer_t)object, offset, size, should_flush, should_return); | |
565 | ||
566 | /* | |
567 | * Lock the object, and acquire a paging reference to | |
568 | * prevent the memory_object and control ports from | |
569 | * being destroyed. | |
570 | */ | |
571 | vm_object_lock(object); | |
572 | vm_object_paging_begin(object); | |
573 | ||
574 | rv = memory_object_update(object, offset, size, | |
575 | (should_return) ? | |
576 | MEMORY_OBJECT_RETURN_ALL : | |
577 | MEMORY_OBJECT_RETURN_NONE, | |
578 | (should_flush) ? | |
579 | MEMORY_OBJECT_DATA_FLUSH : 0, | |
580 | VM_PROT_NO_CHANGE); | |
581 | ||
582 | ||
583 | vm_object_paging_end(object); | |
584 | vm_object_unlock(object); | |
585 | return rv; | |
586 | } | |
587 | ||
588 | /* | |
589 | * Routine: memory_object_update | |
590 | * Description: | |
591 | * Work function for m_o_lock_request(), m_o_sync(). | |
592 | * | |
593 | * Called with object locked and paging ref taken. | |
594 | */ | |
595 | kern_return_t | |
596 | memory_object_update( | |
597 | register vm_object_t object, | |
598 | register vm_object_offset_t offset, | |
599 | register vm_size_t size, | |
600 | memory_object_return_t should_return, | |
601 | int flags, | |
602 | vm_prot_t prot) | |
603 | { | |
604 | register vm_page_t m; | |
605 | vm_page_t holding_page; | |
606 | vm_size_t original_size = size; | |
607 | vm_object_offset_t paging_offset = 0; | |
608 | vm_object_t copy_object; | |
609 | vm_size_t data_cnt = 0; | |
610 | vm_object_offset_t last_offset = offset; | |
611 | memory_object_lock_result_t page_lock_result; | |
612 | memory_object_lock_result_t pageout_action; | |
613 | boolean_t data_returned = FALSE; | |
614 | boolean_t update_cow; | |
615 | boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; | |
616 | #ifndef NOT_LIST_REQ | |
617 | boolean_t pending_pageout = FALSE; | |
618 | #endif | |
619 | ||
620 | /* | |
621 | * To avoid blocking while scanning for pages, save | |
622 | * dirty pages to be cleaned all at once. | |
623 | * | |
624 | * XXXO A similar strategy could be used to limit the | |
625 | * number of times that a scan must be restarted for | |
626 | * other reasons. Those pages that would require blocking | |
627 | * could be temporarily collected in another list, or | |
628 | * their offsets could be recorded in a small array. | |
629 | */ | |
630 | ||
631 | /* | |
632 | * XXX NOTE: May want to consider converting this to a page list | |
633 | * XXX vm_map_copy interface. Need to understand object | |
634 | * XXX coalescing implications before doing so. | |
635 | */ | |
636 | ||
637 | update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) | |
638 | && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && | |
639 | !(flags & MEMORY_OBJECT_DATA_PURGE))) | |
640 | || (flags & MEMORY_OBJECT_COPY_SYNC); | |
641 | ||
642 | ||
643 | if((((copy_object = object->copy) != NULL) && update_cow) || | |
644 | (flags & MEMORY_OBJECT_DATA_SYNC)) { | |
645 | vm_size_t i; | |
646 | vm_size_t copy_size; | |
647 | vm_object_offset_t copy_offset; | |
648 | vm_prot_t prot; | |
649 | vm_page_t page; | |
650 | vm_page_t top_page; | |
651 | kern_return_t error = 0; | |
652 | ||
653 | if(copy_object != NULL) { | |
654 | /* translate offset with respect to shadow's offset */ | |
655 | copy_offset = (offset >= copy_object->shadow_offset)? | |
656 | offset - copy_object->shadow_offset : | |
657 | (vm_object_offset_t) 0; | |
658 | if(copy_offset > copy_object->size) | |
659 | copy_offset = copy_object->size; | |
660 | ||
661 | /* clip size with respect to shadow offset */ | |
662 | copy_size = (offset >= copy_object->shadow_offset) ? | |
663 | size : size - (copy_object->shadow_offset - offset); | |
664 | ||
665 | if(copy_size <= 0) { | |
666 | copy_size = 0; | |
667 | } else { | |
668 | copy_size = ((copy_offset + copy_size) | |
669 | <= copy_object->size) ? | |
670 | copy_size : copy_object->size - copy_offset; | |
671 | } | |
672 | /* check for a copy_offset which is beyond the end of */ | |
673 | /* the copy_object */ | |
674 | if(copy_size < 0) | |
675 | copy_size = 0; | |
676 | ||
677 | copy_size+=offset; | |
678 | ||
679 | vm_object_unlock(object); | |
680 | vm_object_lock(copy_object); | |
681 | } else { | |
682 | copy_object = object; | |
683 | ||
684 | copy_size = offset + size; | |
685 | copy_offset = offset; | |
686 | } | |
687 | ||
688 | vm_object_paging_begin(copy_object); | |
689 | for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) { | |
690 | RETRY_COW_OF_LOCK_REQUEST: | |
691 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
692 | switch (vm_fault_page(copy_object, i, | |
693 | VM_PROT_WRITE|VM_PROT_READ, | |
694 | FALSE, | |
695 | THREAD_UNINT, | |
696 | copy_offset, | |
697 | copy_offset+copy_size, | |
698 | VM_BEHAVIOR_SEQUENTIAL, | |
699 | &prot, | |
700 | &page, | |
701 | &top_page, | |
702 | (int *)0, | |
703 | &error, | |
704 | FALSE, | |
705 | FALSE)) { | |
706 | ||
707 | case VM_FAULT_SUCCESS: | |
708 | if(top_page) { | |
709 | vm_fault_cleanup( | |
710 | page->object, top_page); | |
711 | PAGE_WAKEUP_DONE(page); | |
712 | vm_page_lock_queues(); | |
713 | if (!page->active && !page->inactive) | |
714 | vm_page_activate(page); | |
715 | vm_page_unlock_queues(); | |
716 | vm_object_lock(copy_object); | |
717 | vm_object_paging_begin(copy_object); | |
718 | } else { | |
719 | PAGE_WAKEUP_DONE(page); | |
720 | vm_page_lock_queues(); | |
721 | if (!page->active && !page->inactive) | |
722 | vm_page_activate(page); | |
723 | vm_page_unlock_queues(); | |
724 | } | |
725 | break; | |
726 | case VM_FAULT_RETRY: | |
727 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
728 | vm_object_lock(copy_object); | |
729 | vm_object_paging_begin(copy_object); | |
730 | goto RETRY_COW_OF_LOCK_REQUEST; | |
731 | case VM_FAULT_INTERRUPTED: | |
732 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
733 | vm_object_lock(copy_object); | |
734 | vm_object_paging_begin(copy_object); | |
735 | goto RETRY_COW_OF_LOCK_REQUEST; | |
736 | case VM_FAULT_MEMORY_SHORTAGE: | |
737 | VM_PAGE_WAIT(); | |
738 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
739 | vm_object_lock(copy_object); | |
740 | vm_object_paging_begin(copy_object); | |
741 | goto RETRY_COW_OF_LOCK_REQUEST; | |
742 | case VM_FAULT_FICTITIOUS_SHORTAGE: | |
743 | vm_page_more_fictitious(); | |
744 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
745 | vm_object_lock(copy_object); | |
746 | vm_object_paging_begin(copy_object); | |
747 | goto RETRY_COW_OF_LOCK_REQUEST; | |
748 | case VM_FAULT_MEMORY_ERROR: | |
749 | vm_object_lock(object); | |
750 | goto BYPASS_COW_COPYIN; | |
751 | } | |
752 | ||
753 | } | |
754 | vm_object_paging_end(copy_object); | |
755 | if(copy_object != object) { | |
756 | vm_object_unlock(copy_object); | |
757 | vm_object_lock(object); | |
758 | } | |
759 | } | |
760 | if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { | |
761 | return KERN_SUCCESS; | |
762 | } | |
763 | if(((copy_object = object->copy) != NULL) && | |
764 | (flags & MEMORY_OBJECT_DATA_PURGE)) { | |
765 | copy_object->shadow_severed = TRUE; | |
766 | copy_object->shadowed = FALSE; | |
767 | copy_object->shadow = NULL; | |
768 | /* delete the ref the COW was holding on the target object */ | |
769 | vm_object_deallocate(object); | |
770 | } | |
771 | BYPASS_COW_COPYIN: | |
772 | ||
773 | for (; | |
774 | size != 0; | |
775 | size -= PAGE_SIZE, offset += PAGE_SIZE_64) | |
776 | { | |
777 | /* | |
778 | * Limit the number of pages to be cleaned at once. | |
779 | */ | |
780 | if (pending_pageout && | |
781 | data_cnt >= PAGE_SIZE * DATA_WRITE_MAX) | |
782 | { | |
783 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, | |
784 | pageout_action, paging_offset); | |
785 | data_cnt = 0; | |
786 | pending_pageout = FALSE; | |
787 | } | |
788 | ||
789 | while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
790 | page_lock_result = memory_object_lock_page(m, should_return, | |
791 | should_flush, prot); | |
792 | ||
793 | XPR(XPR_MEMORY_OBJECT, | |
794 | "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", | |
795 | (integer_t)object, offset, page_lock_result, 0, 0); | |
796 | ||
797 | switch (page_lock_result) | |
798 | { | |
799 | case MEMORY_OBJECT_LOCK_RESULT_DONE: | |
800 | /* | |
801 | * End of a cluster of dirty pages. | |
802 | */ | |
803 | if(pending_pageout) { | |
804 | LIST_REQ_PAGEOUT_PAGES(object, | |
805 | data_cnt, pageout_action, | |
806 | paging_offset); | |
807 | data_cnt = 0; | |
808 | pending_pageout = FALSE; | |
809 | continue; | |
810 | } | |
811 | break; | |
812 | ||
813 | case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: | |
814 | /* | |
815 | * Since it is necessary to block, | |
816 | * clean any dirty pages now. | |
817 | */ | |
818 | if(pending_pageout) { | |
819 | LIST_REQ_PAGEOUT_PAGES(object, | |
820 | data_cnt, pageout_action, | |
821 | paging_offset); | |
822 | pending_pageout = FALSE; | |
823 | data_cnt = 0; | |
824 | continue; | |
825 | } | |
826 | ||
827 | PAGE_ASSERT_WAIT(m, THREAD_UNINT); | |
828 | vm_object_unlock(object); | |
829 | thread_block((void (*)(void))0); | |
830 | vm_object_lock(object); | |
831 | continue; | |
832 | ||
833 | case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: | |
834 | case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: | |
835 | /* | |
836 | * The clean and return cases are similar. | |
837 | * | |
838 | */ | |
839 | ||
840 | /* | |
841 | * if this would form a discontiguous block, | |
842 | * clean the old pages and start anew. | |
843 | * | |
844 | */ | |
845 | ||
846 | /* | |
847 | * Mark the page busy since we unlock the | |
848 | * object below. | |
849 | */ | |
850 | m->busy = TRUE; | |
851 | if (pending_pageout && | |
852 | (last_offset != offset || | |
853 | pageout_action != page_lock_result)) { | |
854 | LIST_REQ_PAGEOUT_PAGES(object, | |
855 | data_cnt, pageout_action, | |
856 | paging_offset); | |
857 | pending_pageout = FALSE; | |
858 | data_cnt = 0; | |
859 | } | |
860 | m->busy = FALSE; | |
861 | holding_page = VM_PAGE_NULL; | |
862 | if(m->cleaning) { | |
863 | PAGE_ASSERT_WAIT(m, THREAD_UNINT); | |
864 | vm_object_unlock(object); | |
865 | thread_block((void (*)(void))0); | |
866 | continue; | |
867 | } | |
868 | if(!pending_pageout) { | |
869 | pending_pageout = TRUE; | |
870 | pageout_action = page_lock_result; | |
871 | paging_offset = offset; | |
872 | } | |
873 | if (should_flush) { | |
874 | vm_page_lock_queues(); | |
875 | m->list_req_pending = TRUE; | |
876 | m->cleaning = TRUE; | |
877 | m->busy = TRUE; | |
878 | m->pageout = TRUE; | |
879 | vm_page_wire(m); | |
880 | vm_page_unlock_queues(); | |
881 | } else { | |
882 | /* | |
883 | * Clean but do not flush | |
884 | */ | |
885 | vm_page_lock_queues(); | |
886 | m->list_req_pending = TRUE; | |
887 | m->cleaning = TRUE; | |
888 | vm_page_unlock_queues(); | |
889 | ||
890 | } | |
891 | vm_object_unlock(object); | |
892 | ||
893 | ||
894 | data_cnt += PAGE_SIZE; | |
895 | last_offset = offset + PAGE_SIZE_64; | |
896 | data_returned = TRUE; | |
897 | ||
898 | vm_object_lock(object); | |
899 | break; | |
900 | } | |
901 | break; | |
902 | } | |
903 | } | |
904 | ||
905 | /* | |
906 | * We have completed the scan for applicable pages. | |
907 | * Clean any pages that have been saved. | |
908 | */ | |
909 | #ifdef NOT_LIST_REQ | |
910 | if (new_object != VM_OBJECT_NULL) { | |
911 | PAGEOUT_PAGES(object, new_object, new_offset, pageout_action, | |
912 | paging_offset); | |
913 | } | |
914 | #else | |
915 | if (pending_pageout) { | |
916 | LIST_REQ_PAGEOUT_PAGES(object, | |
917 | data_cnt, pageout_action, paging_offset); | |
918 | } | |
919 | #endif | |
920 | return (data_returned); | |
921 | } | |
922 | ||
923 | /* | |
924 | * Routine: memory_object_synchronize_completed [user interface] | |
925 | * | |
926 | * Tell kernel that previously synchronized data | |
927 | * (memory_object_synchronize) has been queue or placed on the | |
928 | * backing storage. | |
929 | * | |
930 | * Note: there may be multiple synchronize requests for a given | |
931 | * memory object outstanding but they will not overlap. | |
932 | */ | |
933 | ||
934 | kern_return_t | |
935 | memory_object_synchronize_completed( | |
936 | vm_object_t object, | |
937 | vm_object_offset_t offset, | |
938 | vm_offset_t length) | |
939 | { | |
940 | msync_req_t msr; | |
941 | ||
942 | XPR(XPR_MEMORY_OBJECT, | |
943 | "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", | |
944 | (integer_t)object, offset, length, 0, 0); | |
945 | ||
946 | /* | |
947 | * Look for bogus arguments | |
948 | */ | |
949 | ||
950 | if (object == VM_OBJECT_NULL) { | |
951 | return KERN_INVALID_ARGUMENT; | |
952 | } | |
953 | ||
954 | vm_object_lock(object); | |
955 | ||
956 | /* | |
957 | * search for sync request structure | |
958 | */ | |
959 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { | |
960 | if (msr->offset == offset && msr->length == length) { | |
961 | queue_remove(&object->msr_q, msr, msync_req_t, msr_q); | |
962 | break; | |
963 | } | |
964 | }/* queue_iterate */ | |
965 | ||
966 | if (queue_end(&object->msr_q, (queue_entry_t)msr)) { | |
967 | vm_object_unlock(object); | |
968 | vm_object_deallocate(object); | |
969 | return KERN_INVALID_ARGUMENT; | |
970 | } | |
971 | ||
972 | msr_lock(msr); | |
973 | vm_object_unlock(object); | |
974 | msr->flag = VM_MSYNC_DONE; | |
975 | msr_unlock(msr); | |
976 | thread_wakeup((event_t) msr); | |
977 | vm_object_deallocate(object); | |
978 | ||
979 | return KERN_SUCCESS; | |
980 | }/* memory_object_synchronize_completed */ | |
981 | ||
982 | kern_return_t | |
983 | memory_object_set_attributes_common( | |
984 | vm_object_t object, | |
985 | boolean_t may_cache, | |
986 | memory_object_copy_strategy_t copy_strategy, | |
987 | boolean_t temporary, | |
988 | vm_size_t cluster_size, | |
989 | boolean_t silent_overwrite, | |
990 | boolean_t advisory_pageout) | |
991 | { | |
992 | boolean_t object_became_ready; | |
993 | ||
994 | XPR(XPR_MEMORY_OBJECT, | |
995 | "m_o_set_attr_com, object 0x%X flg %x strat %d\n", | |
996 | (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0); | |
997 | ||
998 | if (object == VM_OBJECT_NULL) | |
999 | return(KERN_INVALID_ARGUMENT); | |
1000 | ||
1001 | /* | |
1002 | * Verify the attributes of importance | |
1003 | */ | |
1004 | ||
1005 | switch(copy_strategy) { | |
1006 | case MEMORY_OBJECT_COPY_NONE: | |
1007 | case MEMORY_OBJECT_COPY_DELAY: | |
1008 | break; | |
1009 | default: | |
1010 | vm_object_deallocate(object); | |
1011 | return(KERN_INVALID_ARGUMENT); | |
1012 | } | |
1013 | ||
1014 | #if !ADVISORY_PAGEOUT | |
1015 | if (silent_overwrite || advisory_pageout) { | |
1016 | vm_object_deallocate(object); | |
1017 | return(KERN_INVALID_ARGUMENT); | |
1018 | } | |
1019 | #endif /* !ADVISORY_PAGEOUT */ | |
1020 | if (may_cache) | |
1021 | may_cache = TRUE; | |
1022 | if (temporary) | |
1023 | temporary = TRUE; | |
1024 | if (cluster_size != 0) { | |
1025 | int pages_per_cluster; | |
1026 | pages_per_cluster = atop(cluster_size); | |
1027 | /* | |
1028 | * Cluster size must be integral multiple of page size, | |
1029 | * and be a power of 2 number of pages. | |
1030 | */ | |
1031 | if ((cluster_size & (PAGE_SIZE-1)) || | |
1032 | ((pages_per_cluster-1) & pages_per_cluster)) { | |
1033 | vm_object_deallocate(object); | |
1034 | return KERN_INVALID_ARGUMENT; | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | vm_object_lock(object); | |
1039 | ||
1040 | /* | |
1041 | * Copy the attributes | |
1042 | */ | |
1043 | assert(!object->internal); | |
1044 | object_became_ready = !object->pager_ready; | |
1045 | object->copy_strategy = copy_strategy; | |
1046 | object->can_persist = may_cache; | |
1047 | object->temporary = temporary; | |
1048 | object->silent_overwrite = silent_overwrite; | |
1049 | object->advisory_pageout = advisory_pageout; | |
1050 | if (cluster_size == 0) | |
1051 | cluster_size = PAGE_SIZE; | |
1052 | object->cluster_size = cluster_size; | |
1053 | ||
1054 | assert(cluster_size >= PAGE_SIZE && | |
1055 | cluster_size % PAGE_SIZE == 0); | |
1056 | ||
1057 | /* | |
1058 | * Wake up anyone waiting for the ready attribute | |
1059 | * to become asserted. | |
1060 | */ | |
1061 | ||
1062 | if (object_became_ready) { | |
1063 | object->pager_ready = TRUE; | |
1064 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); | |
1065 | } | |
1066 | ||
1067 | vm_object_unlock(object); | |
1068 | ||
1069 | vm_object_deallocate(object); | |
1070 | ||
1071 | return(KERN_SUCCESS); | |
1072 | } | |
1073 | ||
1074 | /* | |
1075 | * Set the memory object attribute as provided. | |
1076 | * | |
1077 | * XXX This routine cannot be completed until the vm_msync, clean | |
1078 | * in place, and cluster work is completed. See ifdef notyet | |
1079 | * below and note that memory_object_set_attributes_common() | |
1080 | * may have to be expanded. | |
1081 | */ | |
1082 | kern_return_t | |
1083 | memory_object_change_attributes( | |
1084 | vm_object_t object, | |
1085 | memory_object_flavor_t flavor, | |
1086 | memory_object_info_t attributes, | |
1087 | mach_msg_type_number_t count, | |
1088 | ipc_port_t reply_to, | |
1089 | mach_msg_type_name_t reply_to_type) | |
1090 | { | |
1091 | kern_return_t result = KERN_SUCCESS; | |
1092 | boolean_t temporary; | |
1093 | boolean_t may_cache; | |
1094 | boolean_t invalidate; | |
1095 | vm_size_t cluster_size; | |
1096 | memory_object_copy_strategy_t copy_strategy; | |
1097 | boolean_t silent_overwrite; | |
1098 | boolean_t advisory_pageout; | |
1099 | ||
1100 | if (object == VM_OBJECT_NULL) | |
1101 | return(KERN_INVALID_ARGUMENT); | |
1102 | ||
1103 | vm_object_lock(object); | |
1104 | temporary = object->temporary; | |
1105 | may_cache = object->can_persist; | |
1106 | copy_strategy = object->copy_strategy; | |
1107 | silent_overwrite = object->silent_overwrite; | |
1108 | advisory_pageout = object->advisory_pageout; | |
1109 | #if notyet | |
1110 | invalidate = object->invalidate; | |
1111 | #endif | |
1112 | cluster_size = object->cluster_size; | |
1113 | vm_object_unlock(object); | |
1114 | ||
1115 | switch (flavor) { | |
1116 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1117 | { | |
1118 | old_memory_object_behave_info_t behave; | |
1119 | ||
1120 | if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1121 | result = KERN_INVALID_ARGUMENT; | |
1122 | break; | |
1123 | } | |
1124 | ||
1125 | behave = (old_memory_object_behave_info_t) attributes; | |
1126 | ||
1127 | temporary = behave->temporary; | |
1128 | invalidate = behave->invalidate; | |
1129 | copy_strategy = behave->copy_strategy; | |
1130 | ||
1131 | break; | |
1132 | } | |
1133 | ||
1134 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1135 | { | |
1136 | memory_object_behave_info_t behave; | |
1137 | ||
1138 | if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1139 | result = KERN_INVALID_ARGUMENT; | |
1140 | break; | |
1141 | } | |
1142 | ||
1143 | behave = (memory_object_behave_info_t) attributes; | |
1144 | ||
1145 | temporary = behave->temporary; | |
1146 | invalidate = behave->invalidate; | |
1147 | copy_strategy = behave->copy_strategy; | |
1148 | silent_overwrite = behave->silent_overwrite; | |
1149 | advisory_pageout = behave->advisory_pageout; | |
1150 | break; | |
1151 | } | |
1152 | ||
1153 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1154 | { | |
1155 | memory_object_perf_info_t perf; | |
1156 | ||
1157 | if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1158 | result = KERN_INVALID_ARGUMENT; | |
1159 | break; | |
1160 | } | |
1161 | ||
1162 | perf = (memory_object_perf_info_t) attributes; | |
1163 | ||
1164 | may_cache = perf->may_cache; | |
1165 | cluster_size = round_page(perf->cluster_size); | |
1166 | ||
1167 | break; | |
1168 | } | |
1169 | ||
1170 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1171 | { | |
1172 | old_memory_object_attr_info_t attr; | |
1173 | ||
1174 | if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1175 | result = KERN_INVALID_ARGUMENT; | |
1176 | break; | |
1177 | } | |
1178 | ||
1179 | attr = (old_memory_object_attr_info_t) attributes; | |
1180 | ||
1181 | may_cache = attr->may_cache; | |
1182 | copy_strategy = attr->copy_strategy; | |
1183 | cluster_size = page_size; | |
1184 | ||
1185 | break; | |
1186 | } | |
1187 | ||
1188 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1189 | { | |
1190 | memory_object_attr_info_t attr; | |
1191 | ||
1192 | if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1193 | result = KERN_INVALID_ARGUMENT; | |
1194 | break; | |
1195 | } | |
1196 | ||
1197 | attr = (memory_object_attr_info_t) attributes; | |
1198 | ||
1199 | copy_strategy = attr->copy_strategy; | |
1200 | may_cache = attr->may_cache_object; | |
1201 | cluster_size = attr->cluster_size; | |
1202 | temporary = attr->temporary; | |
1203 | ||
1204 | break; | |
1205 | } | |
1206 | ||
1207 | default: | |
1208 | result = KERN_INVALID_ARGUMENT; | |
1209 | break; | |
1210 | } | |
1211 | ||
1212 | if (result != KERN_SUCCESS) { | |
1213 | vm_object_deallocate(object); | |
1214 | return(result); | |
1215 | } | |
1216 | ||
1217 | if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { | |
1218 | copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
1219 | temporary = TRUE; | |
1220 | } else { | |
1221 | temporary = FALSE; | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * Do the work and throw away our object reference. It | |
1226 | * is important that the object reference be deallocated | |
1227 | * BEFORE sending the reply. The whole point of the reply | |
1228 | * is that it shows up after the terminate message that | |
1229 | * may be generated by setting the object uncacheable. | |
1230 | * | |
1231 | * XXX may_cache may become a tri-valued variable to handle | |
1232 | * XXX uncache if not in use. | |
1233 | */ | |
1234 | result = memory_object_set_attributes_common(object, | |
1235 | may_cache, | |
1236 | copy_strategy, | |
1237 | temporary, | |
1238 | cluster_size, | |
1239 | silent_overwrite, | |
1240 | advisory_pageout); | |
1241 | ||
1242 | if (IP_VALID(reply_to)) { | |
1243 | /* consumes our naked send-once/send right for reply_to */ | |
1244 | (void) memory_object_change_completed(reply_to, reply_to_type, | |
1245 | object->alive ? | |
1246 | object->pager_request : PAGER_REQUEST_NULL, | |
1247 | flavor); | |
1248 | } | |
1249 | ||
1250 | return(result); | |
1251 | } | |
1252 | ||
1253 | kern_return_t | |
1254 | memory_object_get_attributes( | |
1255 | vm_object_t object, | |
1256 | memory_object_flavor_t flavor, | |
1257 | memory_object_info_t attributes, /* pointer to OUT array */ | |
1258 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1259 | { | |
1260 | kern_return_t ret = KERN_SUCCESS; | |
1261 | ||
1262 | if (object == VM_OBJECT_NULL) | |
1263 | return(KERN_INVALID_ARGUMENT); | |
1264 | ||
1265 | vm_object_lock(object); | |
1266 | ||
1267 | switch (flavor) { | |
1268 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1269 | { | |
1270 | old_memory_object_behave_info_t behave; | |
1271 | ||
1272 | if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1273 | ret = KERN_INVALID_ARGUMENT; | |
1274 | break; | |
1275 | } | |
1276 | ||
1277 | behave = (old_memory_object_behave_info_t) attributes; | |
1278 | behave->copy_strategy = object->copy_strategy; | |
1279 | behave->temporary = object->temporary; | |
1280 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1281 | behave->invalidate = object->invalidate; | |
1282 | #else | |
1283 | behave->invalidate = FALSE; | |
1284 | #endif | |
1285 | ||
1286 | *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1287 | break; | |
1288 | } | |
1289 | ||
1290 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1291 | { | |
1292 | memory_object_behave_info_t behave; | |
1293 | ||
1294 | if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1295 | ret = KERN_INVALID_ARGUMENT; | |
1296 | break; | |
1297 | } | |
1298 | ||
1299 | behave = (memory_object_behave_info_t) attributes; | |
1300 | behave->copy_strategy = object->copy_strategy; | |
1301 | behave->temporary = object->temporary; | |
1302 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1303 | behave->invalidate = object->invalidate; | |
1304 | #else | |
1305 | behave->invalidate = FALSE; | |
1306 | #endif | |
1307 | behave->advisory_pageout = object->advisory_pageout; | |
1308 | behave->silent_overwrite = object->silent_overwrite; | |
1309 | *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1310 | break; | |
1311 | } | |
1312 | ||
1313 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1314 | { | |
1315 | memory_object_perf_info_t perf; | |
1316 | ||
1317 | if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1318 | ret = KERN_INVALID_ARGUMENT; | |
1319 | break; | |
1320 | } | |
1321 | ||
1322 | perf = (memory_object_perf_info_t) attributes; | |
1323 | perf->cluster_size = object->cluster_size; | |
1324 | perf->may_cache = object->can_persist; | |
1325 | ||
1326 | *count = MEMORY_OBJECT_PERF_INFO_COUNT; | |
1327 | break; | |
1328 | } | |
1329 | ||
1330 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1331 | { | |
1332 | old_memory_object_attr_info_t attr; | |
1333 | ||
1334 | if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1335 | ret = KERN_INVALID_ARGUMENT; | |
1336 | break; | |
1337 | } | |
1338 | ||
1339 | attr = (old_memory_object_attr_info_t) attributes; | |
1340 | attr->may_cache = object->can_persist; | |
1341 | attr->copy_strategy = object->copy_strategy; | |
1342 | ||
1343 | *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1344 | break; | |
1345 | } | |
1346 | ||
1347 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1348 | { | |
1349 | memory_object_attr_info_t attr; | |
1350 | ||
1351 | if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1352 | ret = KERN_INVALID_ARGUMENT; | |
1353 | break; | |
1354 | } | |
1355 | ||
1356 | attr = (memory_object_attr_info_t) attributes; | |
1357 | attr->copy_strategy = object->copy_strategy; | |
1358 | attr->cluster_size = object->cluster_size; | |
1359 | attr->may_cache_object = object->can_persist; | |
1360 | attr->temporary = object->temporary; | |
1361 | ||
1362 | *count = MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1363 | break; | |
1364 | } | |
1365 | ||
1366 | default: | |
1367 | ret = KERN_INVALID_ARGUMENT; | |
1368 | break; | |
1369 | } | |
1370 | ||
1371 | vm_object_unlock(object); | |
1372 | ||
1373 | vm_object_deallocate(object); | |
1374 | ||
1375 | return(ret); | |
1376 | } | |
1377 | ||
1378 | int vm_stat_discard_cleared_reply = 0; | |
1379 | int vm_stat_discard_cleared_unset = 0; | |
1380 | int vm_stat_discard_cleared_too_late = 0; | |
1381 | ||
1382 | ||
1383 | /* | |
1384 | * vm_set_default_memory_manager(): | |
1385 | * [Obsolete] | |
1386 | */ | |
1387 | kern_return_t | |
1388 | vm_set_default_memory_manager( | |
1389 | host_t host, | |
1390 | ipc_port_t *default_manager) | |
1391 | { | |
1392 | return(host_default_memory_manager(host_priv_self(), default_manager, 4*PAGE_SIZE)); | |
1393 | } | |
1394 | ||
1395 | /* | |
1396 | * Routine: host_default_memory_manager | |
1397 | * Purpose: | |
1398 | * set/get the default memory manager port and default cluster | |
1399 | * size. | |
1400 | * | |
1401 | * If successful, consumes the supplied naked send right. | |
1402 | */ | |
1403 | kern_return_t | |
1404 | host_default_memory_manager( | |
1405 | host_priv_t host_priv, | |
1406 | ipc_port_t *default_manager, | |
1407 | vm_size_t cluster_size) | |
1408 | { | |
1409 | ipc_port_t current_manager; | |
1410 | ipc_port_t new_manager; | |
1411 | ipc_port_t returned_manager; | |
1412 | ||
1413 | if (host_priv == HOST_PRIV_NULL) | |
1414 | return(KERN_INVALID_HOST); | |
1415 | ||
1416 | assert(host_priv == &realhost); | |
1417 | ||
1418 | new_manager = *default_manager; | |
1419 | mutex_lock(&memory_manager_default_lock); | |
1420 | current_manager = memory_manager_default; | |
1421 | ||
1422 | if (new_manager == IP_NULL) { | |
1423 | /* | |
1424 | * Retrieve the current value. | |
1425 | */ | |
1426 | ||
1427 | returned_manager = ipc_port_copy_send(current_manager); | |
1428 | } else { | |
1429 | /* | |
1430 | * Retrieve the current value, | |
1431 | * and replace it with the supplied value. | |
1432 | * We consume the supplied naked send right. | |
1433 | */ | |
1434 | ||
1435 | returned_manager = current_manager; | |
1436 | memory_manager_default = new_manager; | |
1437 | if (cluster_size % PAGE_SIZE != 0) { | |
1438 | #if 0 | |
1439 | mutex_unlock(&memory_manager_default_lock); | |
1440 | return KERN_INVALID_ARGUMENT; | |
1441 | #else | |
1442 | cluster_size = round_page(cluster_size); | |
1443 | #endif | |
1444 | } | |
1445 | memory_manager_default_cluster = cluster_size; | |
1446 | ||
1447 | /* | |
1448 | * In case anyone's been waiting for a memory | |
1449 | * manager to be established, wake them up. | |
1450 | */ | |
1451 | ||
1452 | thread_wakeup((event_t) &memory_manager_default); | |
1453 | } | |
1454 | ||
1455 | mutex_unlock(&memory_manager_default_lock); | |
1456 | ||
1457 | *default_manager = returned_manager; | |
1458 | return(KERN_SUCCESS); | |
1459 | } | |
1460 | ||
1461 | /* | |
1462 | * Routine: memory_manager_default_reference | |
1463 | * Purpose: | |
1464 | * Returns a naked send right for the default | |
1465 | * memory manager. The returned right is always | |
1466 | * valid (not IP_NULL or IP_DEAD). | |
1467 | */ | |
1468 | ||
1469 | ipc_port_t | |
1470 | memory_manager_default_reference( | |
1471 | vm_size_t *cluster_size) | |
1472 | { | |
1473 | ipc_port_t current_manager; | |
1474 | ||
1475 | mutex_lock(&memory_manager_default_lock); | |
1476 | ||
1477 | while (current_manager = ipc_port_copy_send(memory_manager_default), | |
1478 | !IP_VALID(current_manager)) { | |
1479 | thread_sleep_mutex((event_t) &memory_manager_default, | |
1480 | &memory_manager_default_lock, THREAD_UNINT); | |
1481 | mutex_lock(&memory_manager_default_lock); | |
1482 | } | |
1483 | *cluster_size = memory_manager_default_cluster; | |
1484 | ||
1485 | mutex_unlock(&memory_manager_default_lock); | |
1486 | ||
1487 | return current_manager; | |
1488 | } | |
1489 | ||
1490 | /* | |
1491 | * Routine: memory_manager_default_port | |
1492 | * Purpose: | |
1493 | * Returns true if the receiver for the port | |
1494 | * is the default memory manager. | |
1495 | * | |
1496 | * This is a hack to let ds_read_done | |
1497 | * know when it should keep memory wired. | |
1498 | */ | |
1499 | ||
1500 | boolean_t | |
1501 | memory_manager_default_port( | |
1502 | ipc_port_t port) | |
1503 | { | |
1504 | ipc_port_t current; | |
1505 | boolean_t result; | |
1506 | ||
1507 | mutex_lock(&memory_manager_default_lock); | |
1508 | current = memory_manager_default; | |
1509 | if (IP_VALID(current)) { | |
1510 | /* | |
1511 | * There is no point in bothering to lock | |
1512 | * both ports, which would be painful to do. | |
1513 | * If the receive rights are moving around, | |
1514 | * we might be inaccurate. | |
1515 | */ | |
1516 | ||
1517 | result = port->ip_receiver == current->ip_receiver; | |
1518 | } else | |
1519 | result = FALSE; | |
1520 | mutex_unlock(&memory_manager_default_lock); | |
1521 | ||
1522 | return result; | |
1523 | } | |
1524 | ||
1525 | /* | |
1526 | * Routine: memory_manager_default_check | |
1527 | * | |
1528 | * Purpose: | |
1529 | * Check whether a default memory manager has been set | |
1530 | * up yet, or not. Returns KERN_SUCCESS if dmm exists, | |
1531 | * and KERN_FAILURE if dmm does not exist. | |
1532 | * | |
1533 | * If there is no default memory manager, log an error, | |
1534 | * but only the first time. | |
1535 | * | |
1536 | */ | |
1537 | kern_return_t | |
1538 | memory_manager_default_check(void) | |
1539 | { | |
1540 | ipc_port_t current; | |
1541 | ||
1542 | mutex_lock(&memory_manager_default_lock); | |
1543 | current = memory_manager_default; | |
1544 | if (!IP_VALID(current)) { | |
1545 | static boolean_t logged; /* initialized to 0 */ | |
1546 | boolean_t complain = !logged; | |
1547 | logged = TRUE; | |
1548 | mutex_unlock(&memory_manager_default_lock); | |
1549 | if (complain) | |
1550 | printf("Warning: No default memory manager\n"); | |
1551 | return(KERN_FAILURE); | |
1552 | } else { | |
1553 | mutex_unlock(&memory_manager_default_lock); | |
1554 | return(KERN_SUCCESS); | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | void | |
1559 | memory_manager_default_init(void) | |
1560 | { | |
1561 | memory_manager_default = IP_NULL; | |
1562 | mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN); | |
1563 | } | |
1564 | ||
1565 | ||
1566 | void | |
1567 | memory_object_deactivate_pages( | |
1568 | vm_object_t object, | |
1569 | vm_object_offset_t offset, | |
1570 | vm_object_size_t size, | |
1571 | boolean_t kill_page) | |
1572 | { | |
1573 | vm_object_t orig_object; | |
1574 | int pages_moved = 0; | |
1575 | int pages_found = 0; | |
1576 | ||
1577 | /* | |
1578 | * entered with object lock held, acquire a paging reference to | |
1579 | * prevent the memory_object and control ports from | |
1580 | * being destroyed. | |
1581 | */ | |
1582 | orig_object = object; | |
1583 | ||
1584 | for (;;) { | |
1585 | register vm_page_t m; | |
1586 | vm_object_offset_t toffset; | |
1587 | vm_object_size_t tsize; | |
1588 | ||
1589 | vm_object_paging_begin(object); | |
1590 | vm_page_lock_queues(); | |
1591 | ||
1592 | for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) { | |
1593 | ||
1594 | if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) { | |
1595 | ||
1596 | pages_found++; | |
1597 | ||
1598 | if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) { | |
1599 | ||
1600 | m->reference = FALSE; | |
1601 | pmap_clear_reference(m->phys_addr); | |
1602 | ||
1603 | if ((kill_page) && (object->internal)) { | |
1604 | m->precious = FALSE; | |
1605 | m->dirty = FALSE; | |
1606 | pmap_clear_modify(m->phys_addr); | |
1607 | vm_external_state_clr(object->existence_map, offset); | |
1608 | } | |
1609 | VM_PAGE_QUEUES_REMOVE(m); | |
1610 | ||
1611 | queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq); | |
1612 | ||
1613 | m->inactive = TRUE; | |
1614 | if (!m->fictitious) | |
1615 | vm_page_inactive_count++; | |
1616 | ||
1617 | pages_moved++; | |
1618 | } | |
1619 | } | |
1620 | } | |
1621 | vm_page_unlock_queues(); | |
1622 | vm_object_paging_end(object); | |
1623 | ||
1624 | if (object->shadow) { | |
1625 | vm_object_t tmp_object; | |
1626 | ||
1627 | kill_page = 0; | |
1628 | ||
1629 | offset += object->shadow_offset; | |
1630 | ||
1631 | tmp_object = object->shadow; | |
1632 | vm_object_lock(tmp_object); | |
1633 | ||
1634 | if (object != orig_object) | |
1635 | vm_object_unlock(object); | |
1636 | object = tmp_object; | |
1637 | } else | |
1638 | break; | |
1639 | } | |
1640 | if (object != orig_object) | |
1641 | vm_object_unlock(object); | |
1642 | } | |
1643 | ||
1644 | /* Allow manipulation of individual page state. This is actually part of */ | |
1645 | /* the UPL regimen but takes place on the object rather than on a UPL */ | |
1646 | ||
1647 | kern_return_t | |
1648 | memory_object_page_op( | |
1649 | vm_object_t object, | |
1650 | vm_object_offset_t offset, | |
1651 | int ops, | |
1652 | vm_offset_t *phys_entry, | |
1653 | int *flags) | |
1654 | { | |
1655 | vm_page_t dst_page; | |
1656 | ||
1657 | vm_object_lock(object); | |
1658 | ||
1659 | while(TRUE) { | |
1660 | if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) { | |
1661 | vm_object_unlock(object); | |
1662 | return KERN_FAILURE; | |
1663 | } | |
1664 | ||
1665 | /* Sync up on getting the busy bit */ | |
1666 | if((dst_page->busy || dst_page->cleaning) && | |
1667 | (((ops & UPL_POP_SET) && (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) { | |
1668 | /* someone else is playing with the page, we will */ | |
1669 | /* have to wait */ | |
1670 | PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT); | |
1671 | vm_object_unlock(object); | |
1672 | thread_block((void(*)(void))0); | |
1673 | vm_object_lock(object); | |
1674 | continue; | |
1675 | } | |
1676 | ||
1677 | if (ops & UPL_POP_DUMP) { | |
1678 | vm_page_lock_queues(); | |
1679 | vm_page_free(dst_page); | |
1680 | vm_page_unlock_queues(); | |
1681 | break; | |
1682 | } | |
1683 | ||
1684 | if (flags) { | |
1685 | *flags = 0; | |
1686 | ||
1687 | /* Get the condition of flags before requested ops */ | |
1688 | /* are undertaken */ | |
1689 | ||
1690 | if(dst_page->dirty) *flags |= UPL_POP_DIRTY; | |
1691 | if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT; | |
1692 | if(dst_page->precious) *flags |= UPL_POP_PRECIOUS; | |
1693 | if(dst_page->absent) *flags |= UPL_POP_ABSENT; | |
1694 | if(dst_page->busy) *flags |= UPL_POP_BUSY; | |
1695 | } | |
1696 | if (phys_entry) | |
1697 | *phys_entry = dst_page->phys_addr; | |
1698 | ||
1699 | /* The caller should have made a call either contingent with */ | |
1700 | /* or prior to this call to set UPL_POP_BUSY */ | |
1701 | if(ops & UPL_POP_SET) { | |
1702 | /* The protection granted with this assert will */ | |
1703 | /* not be complete. If the caller violates the */ | |
1704 | /* convention and attempts to change page state */ | |
1705 | /* without first setting busy we may not see it */ | |
1706 | /* because the page may already be busy. However */ | |
1707 | /* if such violations occur we will assert sooner */ | |
1708 | /* or later. */ | |
1709 | assert(dst_page->busy || (ops & UPL_POP_BUSY)); | |
1710 | if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE; | |
1711 | if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE; | |
1712 | if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE; | |
1713 | if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE; | |
1714 | if (ops & UPL_POP_BUSY) dst_page->busy = TRUE; | |
1715 | } | |
1716 | ||
1717 | if(ops & UPL_POP_CLR) { | |
1718 | assert(dst_page->busy); | |
1719 | if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE; | |
1720 | if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE; | |
1721 | if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE; | |
1722 | if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE; | |
1723 | if (ops & UPL_POP_BUSY) { | |
1724 | dst_page->busy = FALSE; | |
1725 | PAGE_WAKEUP(dst_page); | |
1726 | } | |
1727 | } | |
1728 | break; | |
1729 | } | |
1730 | ||
1731 | vm_object_unlock(object); | |
1732 | return KERN_SUCCESS; | |
1733 | ||
1734 | } | |
1735 | ||
1736 |