]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | /* | |
31 | * @OSF_COPYRIGHT@ | |
32 | */ | |
33 | /* | |
34 | * Mach Operating System | |
35 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
36 | * All Rights Reserved. | |
37 | * | |
38 | * Permission to use, copy, modify and distribute this software and its | |
39 | * documentation is hereby granted, provided that both the copyright | |
40 | * notice and this permission notice appear in all copies of the | |
41 | * software, derivative works or modified versions, and any portions | |
42 | * thereof, and that both notices appear in supporting documentation. | |
43 | * | |
44 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
45 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
46 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
47 | * | |
48 | * Carnegie Mellon requests users of this software to return to | |
49 | * | |
50 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
51 | * School of Computer Science | |
52 | * Carnegie Mellon University | |
53 | * Pittsburgh PA 15213-3890 | |
54 | * | |
55 | * any improvements or extensions that they make and grant Carnegie Mellon | |
56 | * the rights to redistribute these changes. | |
57 | */ | |
58 | /* | |
59 | */ | |
60 | /* | |
61 | * File: vm/memory_object.c | |
62 | * Author: Michael Wayne Young | |
63 | * | |
64 | * External memory management interface control functions. | |
65 | */ | |
66 | ||
1c79356b A |
67 | #include <advisory_pageout.h> |
68 | ||
69 | /* | |
70 | * Interface dependencies: | |
71 | */ | |
72 | ||
73 | #include <mach/std_types.h> /* For pointer_t */ | |
74 | #include <mach/mach_types.h> | |
75 | ||
0b4e3aa0 | 76 | #include <mach/mig.h> |
1c79356b A |
77 | #include <mach/kern_return.h> |
78 | #include <mach/memory_object.h> | |
79 | #include <mach/memory_object_default.h> | |
80 | #include <mach/memory_object_control_server.h> | |
0b4e3aa0 | 81 | #include <mach/host_priv_server.h> |
1c79356b A |
82 | #include <mach/boolean.h> |
83 | #include <mach/vm_prot.h> | |
84 | #include <mach/message.h> | |
85 | ||
1c79356b A |
86 | /* |
87 | * Implementation dependencies: | |
88 | */ | |
89 | #include <string.h> /* For memcpy() */ | |
90 | ||
0b4e3aa0 A |
91 | #include <kern/xpr.h> |
92 | #include <kern/host.h> | |
93 | #include <kern/thread.h> /* For current_thread() */ | |
94 | #include <kern/ipc_mig.h> | |
95 | #include <kern/misc_protos.h> | |
96 | ||
97 | #include <vm/vm_object.h> | |
98 | #include <vm/vm_fault.h> | |
1c79356b A |
99 | #include <vm/memory_object.h> |
100 | #include <vm/vm_page.h> | |
101 | #include <vm/vm_pageout.h> | |
102 | #include <vm/pmap.h> /* For pmap_clear_modify */ | |
1c79356b A |
103 | #include <vm/vm_kern.h> /* For kernel_map, vm_move */ |
104 | #include <vm/vm_map.h> /* For vm_map_pageable */ | |
1c79356b A |
105 | |
106 | #if MACH_PAGEMAP | |
107 | #include <vm/vm_external.h> | |
108 | #endif /* MACH_PAGEMAP */ | |
109 | ||
91447636 A |
110 | #include <vm/vm_protos.h> |
111 | ||
112 | ||
0b4e3aa0 A |
113 | memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
114 | vm_size_t memory_manager_default_cluster = 0; | |
115 | decl_mutex_data(, memory_manager_default_lock) | |
1c79356b | 116 | |
1c79356b A |
117 | |
118 | /* | |
119 | * Routine: memory_object_should_return_page | |
120 | * | |
121 | * Description: | |
122 | * Determine whether the given page should be returned, | |
123 | * based on the page's state and on the given return policy. | |
124 | * | |
125 | * We should return the page if one of the following is true: | |
126 | * | |
127 | * 1. Page is dirty and should_return is not RETURN_NONE. | |
128 | * 2. Page is precious and should_return is RETURN_ALL. | |
129 | * 3. Should_return is RETURN_ANYTHING. | |
130 | * | |
131 | * As a side effect, m->dirty will be made consistent | |
132 | * with pmap_is_modified(m), if should_return is not | |
133 | * MEMORY_OBJECT_RETURN_NONE. | |
134 | */ | |
135 | ||
136 | #define memory_object_should_return_page(m, should_return) \ | |
137 | (should_return != MEMORY_OBJECT_RETURN_NONE && \ | |
55e303ae | 138 | (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \ |
1c79356b A |
139 | ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \ |
140 | (should_return) == MEMORY_OBJECT_RETURN_ANYTHING)) | |
141 | ||
142 | typedef int memory_object_lock_result_t; | |
143 | ||
144 | #define MEMORY_OBJECT_LOCK_RESULT_DONE 0 | |
145 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1 | |
146 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2 | |
147 | #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3 | |
148 | ||
149 | memory_object_lock_result_t memory_object_lock_page( | |
150 | vm_page_t m, | |
151 | memory_object_return_t should_return, | |
152 | boolean_t should_flush, | |
153 | vm_prot_t prot); | |
154 | ||
155 | /* | |
156 | * Routine: memory_object_lock_page | |
157 | * | |
158 | * Description: | |
159 | * Perform the appropriate lock operations on the | |
160 | * given page. See the description of | |
161 | * "memory_object_lock_request" for the meanings | |
162 | * of the arguments. | |
163 | * | |
164 | * Returns an indication that the operation | |
165 | * completed, blocked, or that the page must | |
166 | * be cleaned. | |
167 | */ | |
168 | memory_object_lock_result_t | |
169 | memory_object_lock_page( | |
170 | vm_page_t m, | |
171 | memory_object_return_t should_return, | |
172 | boolean_t should_flush, | |
173 | vm_prot_t prot) | |
174 | { | |
175 | XPR(XPR_MEMORY_OBJECT, | |
176 | "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n", | |
177 | (integer_t)m, should_return, should_flush, prot, 0); | |
178 | ||
179 | /* | |
180 | * If we cannot change access to the page, | |
181 | * either because a mapping is in progress | |
182 | * (busy page) or because a mapping has been | |
183 | * wired, then give up. | |
184 | */ | |
185 | ||
186 | if (m->busy || m->cleaning) | |
187 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
188 | ||
189 | /* | |
190 | * Don't worry about pages for which the kernel | |
191 | * does not have any data. | |
192 | */ | |
193 | ||
765c9de3 A |
194 | if (m->absent || m->error || m->restart) { |
195 | if(m->error && should_flush) { | |
196 | /* dump the page, pager wants us to */ | |
197 | /* clean it up and there is no */ | |
198 | /* relevant data to return */ | |
199 | if(m->wire_count == 0) { | |
200 | VM_PAGE_FREE(m); | |
201 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
202 | } | |
203 | } else { | |
204 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
205 | } | |
206 | } | |
1c79356b A |
207 | |
208 | assert(!m->fictitious); | |
209 | ||
210 | if (m->wire_count != 0) { | |
211 | /* | |
212 | * If no change would take place | |
213 | * anyway, return successfully. | |
214 | * | |
215 | * No change means: | |
216 | * Not flushing AND | |
217 | * No change to page lock [2 checks] AND | |
218 | * Should not return page | |
219 | * | |
220 | * XXX This doesn't handle sending a copy of a wired | |
221 | * XXX page to the pager, but that will require some | |
222 | * XXX significant surgery. | |
223 | */ | |
224 | if (!should_flush && | |
225 | (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) && | |
226 | ! memory_object_should_return_page(m, should_return)) { | |
227 | ||
228 | /* | |
229 | * Restart page unlock requests, | |
230 | * even though no change took place. | |
231 | * [Memory managers may be expecting | |
232 | * to see new requests.] | |
233 | */ | |
234 | m->unlock_request = VM_PROT_NONE; | |
235 | PAGE_WAKEUP(m); | |
236 | ||
237 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
238 | } | |
239 | ||
240 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK); | |
241 | } | |
242 | ||
243 | /* | |
244 | * If the page is to be flushed, allow | |
245 | * that to be done as part of the protection. | |
246 | */ | |
247 | ||
248 | if (should_flush) | |
249 | prot = VM_PROT_ALL; | |
250 | ||
251 | /* | |
252 | * Set the page lock. | |
253 | * | |
254 | * If we are decreasing permission, do it now; | |
255 | * let the fault handler take care of increases | |
256 | * (pmap_page_protect may not increase protection). | |
257 | */ | |
258 | ||
259 | if (prot != VM_PROT_NO_CHANGE) { | |
0b4e3aa0 | 260 | if ((m->page_lock ^ prot) & prot) { |
55e303ae | 261 | pmap_page_protect(m->phys_page, VM_PROT_ALL & ~prot); |
0b4e3aa0 | 262 | } |
1c79356b A |
263 | #if 0 |
264 | /* code associated with the vestigial | |
265 | * memory_object_data_unlock | |
266 | */ | |
1c79356b A |
267 | m->page_lock = prot; |
268 | m->lock_supplied = TRUE; | |
269 | if (prot != VM_PROT_NONE) | |
270 | m->unusual = TRUE; | |
271 | else | |
272 | m->unusual = FALSE; | |
273 | ||
274 | /* | |
275 | * Restart any past unlock requests, even if no | |
276 | * change resulted. If the manager explicitly | |
277 | * requested no protection change, then it is assumed | |
278 | * to be remembering past requests. | |
279 | */ | |
280 | ||
281 | m->unlock_request = VM_PROT_NONE; | |
282 | #endif /* 0 */ | |
283 | PAGE_WAKEUP(m); | |
284 | } | |
285 | ||
286 | /* | |
287 | * Handle page returning. | |
288 | */ | |
289 | ||
290 | if (memory_object_should_return_page(m, should_return)) { | |
291 | ||
292 | /* | |
293 | * If we weren't planning | |
294 | * to flush the page anyway, | |
295 | * we may need to remove the | |
296 | * page from the pageout | |
297 | * system and from physical | |
298 | * maps now. | |
299 | */ | |
300 | ||
301 | vm_page_lock_queues(); | |
302 | VM_PAGE_QUEUES_REMOVE(m); | |
303 | vm_page_unlock_queues(); | |
304 | ||
305 | if (!should_flush) | |
91447636 | 306 | pmap_disconnect(m->phys_page); |
1c79356b A |
307 | |
308 | if (m->dirty) | |
309 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN); | |
310 | else | |
311 | return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Handle flushing | |
316 | */ | |
317 | ||
318 | if (should_flush) { | |
319 | VM_PAGE_FREE(m); | |
320 | } else { | |
1c79356b A |
321 | /* |
322 | * XXX Make clean but not flush a paging hint, | |
323 | * and deactivate the pages. This is a hack | |
324 | * because it overloads flush/clean with | |
325 | * implementation-dependent meaning. This only | |
326 | * happens to pages that are already clean. | |
327 | */ | |
328 | ||
329 | if (vm_page_deactivate_hint && | |
330 | (should_return != MEMORY_OBJECT_RETURN_NONE)) { | |
331 | vm_page_lock_queues(); | |
332 | vm_page_deactivate(m); | |
333 | vm_page_unlock_queues(); | |
334 | } | |
335 | } | |
336 | ||
337 | return(MEMORY_OBJECT_LOCK_RESULT_DONE); | |
338 | } | |
0b4e3aa0 | 339 | |
91447636 | 340 | #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po, ro, ioerr, iosync) \ |
1c79356b A |
341 | MACRO_BEGIN \ |
342 | \ | |
91447636 A |
343 | register int upl_flags; \ |
344 | \ | |
1c79356b A |
345 | vm_object_unlock(object); \ |
346 | \ | |
91447636 A |
347 | if (iosync) \ |
348 | upl_flags = UPL_MSYNC | UPL_IOSYNC; \ | |
349 | else \ | |
350 | upl_flags = UPL_MSYNC; \ | |
351 | \ | |
1c79356b | 352 | (void) memory_object_data_return(object->pager, \ |
1c79356b | 353 | po, \ |
1c79356b | 354 | data_cnt, \ |
91447636 A |
355 | ro, \ |
356 | ioerr, \ | |
1c79356b | 357 | (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \ |
91447636 A |
358 | !should_flush, \ |
359 | upl_flags); \ | |
1c79356b A |
360 | \ |
361 | vm_object_lock(object); \ | |
1c79356b A |
362 | MACRO_END |
363 | ||
1c79356b A |
364 | /* |
365 | * Routine: memory_object_lock_request [user interface] | |
366 | * | |
367 | * Description: | |
368 | * Control use of the data associated with the given | |
369 | * memory object. For each page in the given range, | |
370 | * perform the following operations, in order: | |
371 | * 1) restrict access to the page (disallow | |
372 | * forms specified by "prot"); | |
373 | * 2) return data to the manager (if "should_return" | |
374 | * is RETURN_DIRTY and the page is dirty, or | |
375 | * "should_return" is RETURN_ALL and the page | |
376 | * is either dirty or precious); and, | |
377 | * 3) flush the cached copy (if "should_flush" | |
378 | * is asserted). | |
379 | * The set of pages is defined by a starting offset | |
380 | * ("offset") and size ("size"). Only pages with the | |
381 | * same page alignment as the starting offset are | |
382 | * considered. | |
383 | * | |
384 | * A single acknowledgement is sent (to the "reply_to" | |
385 | * port) when these actions are complete. If successful, | |
386 | * the naked send right for reply_to is consumed. | |
387 | */ | |
388 | ||
389 | kern_return_t | |
390 | memory_object_lock_request( | |
0b4e3aa0 A |
391 | memory_object_control_t control, |
392 | memory_object_offset_t offset, | |
393 | memory_object_size_t size, | |
91447636 A |
394 | memory_object_offset_t * resid_offset, |
395 | int * io_errno, | |
1c79356b A |
396 | memory_object_return_t should_return, |
397 | int flags, | |
0b4e3aa0 | 398 | vm_prot_t prot) |
1c79356b | 399 | { |
0b4e3aa0 | 400 | vm_object_t object; |
91447636 A |
401 | __unused boolean_t should_flush; |
402 | ||
403 | should_flush = flags & MEMORY_OBJECT_DATA_FLUSH; | |
1c79356b A |
404 | |
405 | XPR(XPR_MEMORY_OBJECT, | |
0b4e3aa0 A |
406 | "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n", |
407 | (integer_t)control, offset, size, | |
1c79356b A |
408 | (((should_return&1)<<1)|should_flush), prot); |
409 | ||
410 | /* | |
411 | * Check for bogus arguments. | |
412 | */ | |
0b4e3aa0 | 413 | object = memory_object_control_to_vm_object(control); |
1c79356b A |
414 | if (object == VM_OBJECT_NULL) |
415 | return (KERN_INVALID_ARGUMENT); | |
416 | ||
0b4e3aa0 | 417 | if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) |
1c79356b | 418 | return (KERN_INVALID_ARGUMENT); |
1c79356b | 419 | |
55e303ae | 420 | size = round_page_64(size); |
1c79356b A |
421 | |
422 | /* | |
423 | * Lock the object, and acquire a paging reference to | |
0b4e3aa0 | 424 | * prevent the memory_object reference from being released. |
1c79356b | 425 | */ |
1c79356b A |
426 | vm_object_lock(object); |
427 | vm_object_paging_begin(object); | |
428 | offset -= object->paging_offset; | |
429 | ||
0b4e3aa0 | 430 | (void)vm_object_update(object, |
91447636 | 431 | offset, size, resid_offset, io_errno, should_return, flags, prot); |
1c79356b | 432 | |
1c79356b A |
433 | vm_object_paging_end(object); |
434 | vm_object_unlock(object); | |
1c79356b A |
435 | |
436 | return (KERN_SUCCESS); | |
437 | } | |
438 | ||
439 | /* | |
0b4e3aa0 A |
440 | * memory_object_release_name: [interface] |
441 | * | |
442 | * Enforces name semantic on memory_object reference count decrement | |
443 | * This routine should not be called unless the caller holds a name | |
444 | * reference gained through the memory_object_named_create or the | |
445 | * memory_object_rename call. | |
446 | * If the TERMINATE_IDLE flag is set, the call will return if the | |
447 | * reference count is not 1. i.e. idle with the only remaining reference | |
448 | * being the name. | |
449 | * If the decision is made to proceed the name field flag is set to | |
450 | * false and the reference count is decremented. If the RESPECT_CACHE | |
451 | * flag is set and the reference count has gone to zero, the | |
452 | * memory_object is checked to see if it is cacheable otherwise when | |
453 | * the reference count is zero, it is simply terminated. | |
454 | */ | |
455 | ||
456 | kern_return_t | |
457 | memory_object_release_name( | |
458 | memory_object_control_t control, | |
459 | int flags) | |
460 | { | |
461 | vm_object_t object; | |
462 | ||
463 | object = memory_object_control_to_vm_object(control); | |
464 | if (object == VM_OBJECT_NULL) | |
465 | return (KERN_INVALID_ARGUMENT); | |
466 | ||
467 | return vm_object_release_name(object, flags); | |
468 | } | |
469 | ||
470 | ||
471 | ||
472 | /* | |
473 | * Routine: memory_object_destroy [user interface] | |
474 | * Purpose: | |
475 | * Shut down a memory object, despite the | |
476 | * presence of address map (or other) references | |
477 | * to the vm_object. | |
478 | */ | |
479 | kern_return_t | |
480 | memory_object_destroy( | |
481 | memory_object_control_t control, | |
482 | kern_return_t reason) | |
483 | { | |
484 | vm_object_t object; | |
485 | ||
486 | object = memory_object_control_to_vm_object(control); | |
487 | if (object == VM_OBJECT_NULL) | |
488 | return (KERN_INVALID_ARGUMENT); | |
489 | ||
490 | return (vm_object_destroy(object, reason)); | |
491 | } | |
492 | ||
493 | /* | |
494 | * Routine: vm_object_sync | |
1c79356b A |
495 | * |
496 | * Kernel internal function to synch out pages in a given | |
497 | * range within an object to its memory manager. Much the | |
498 | * same as memory_object_lock_request but page protection | |
499 | * is not changed. | |
500 | * | |
501 | * If the should_flush and should_return flags are true pages | |
502 | * are flushed, that is dirty & precious pages are written to | |
503 | * the memory manager and then discarded. If should_return | |
504 | * is false, only precious pages are returned to the memory | |
505 | * manager. | |
506 | * | |
507 | * If should flush is false and should_return true, the memory | |
508 | * manager's copy of the pages is updated. If should_return | |
509 | * is also false, only the precious pages are updated. This | |
510 | * last option is of limited utility. | |
511 | * | |
512 | * Returns: | |
513 | * FALSE if no pages were returned to the pager | |
514 | * TRUE otherwise. | |
515 | */ | |
516 | ||
517 | boolean_t | |
0b4e3aa0 | 518 | vm_object_sync( |
1c79356b A |
519 | vm_object_t object, |
520 | vm_object_offset_t offset, | |
91447636 | 521 | vm_object_size_t size, |
1c79356b | 522 | boolean_t should_flush, |
91447636 A |
523 | boolean_t should_return, |
524 | boolean_t should_iosync) | |
1c79356b A |
525 | { |
526 | boolean_t rv; | |
91447636 | 527 | int flags; |
1c79356b | 528 | |
0b4e3aa0 A |
529 | XPR(XPR_VM_OBJECT, |
530 | "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n", | |
1c79356b A |
531 | (integer_t)object, offset, size, should_flush, should_return); |
532 | ||
533 | /* | |
534 | * Lock the object, and acquire a paging reference to | |
535 | * prevent the memory_object and control ports from | |
536 | * being destroyed. | |
537 | */ | |
538 | vm_object_lock(object); | |
539 | vm_object_paging_begin(object); | |
540 | ||
91447636 A |
541 | if (should_flush) |
542 | flags = MEMORY_OBJECT_DATA_FLUSH; | |
543 | else | |
544 | flags = 0; | |
545 | ||
546 | if (should_iosync) | |
547 | flags |= MEMORY_OBJECT_IO_SYNC; | |
548 | ||
549 | rv = vm_object_update(object, offset, (vm_object_size_t)size, NULL, NULL, | |
1c79356b A |
550 | (should_return) ? |
551 | MEMORY_OBJECT_RETURN_ALL : | |
552 | MEMORY_OBJECT_RETURN_NONE, | |
91447636 | 553 | flags, |
1c79356b A |
554 | VM_PROT_NO_CHANGE); |
555 | ||
556 | ||
557 | vm_object_paging_end(object); | |
558 | vm_object_unlock(object); | |
559 | return rv; | |
560 | } | |
561 | ||
91447636 A |
562 | |
563 | ||
564 | ||
565 | static int | |
566 | vm_object_update_extent( | |
567 | vm_object_t object, | |
568 | vm_object_offset_t offset, | |
569 | vm_object_offset_t offset_end, | |
570 | vm_object_offset_t *offset_resid, | |
571 | int *io_errno, | |
572 | boolean_t should_flush, | |
573 | memory_object_return_t should_return, | |
574 | boolean_t should_iosync, | |
575 | vm_prot_t prot) | |
576 | { | |
577 | vm_page_t m; | |
578 | int retval = 0; | |
579 | vm_size_t data_cnt = 0; | |
580 | vm_object_offset_t paging_offset = 0; | |
581 | vm_object_offset_t last_offset = offset; | |
582 | memory_object_lock_result_t page_lock_result; | |
583 | memory_object_lock_result_t pageout_action; | |
584 | ||
585 | pageout_action = MEMORY_OBJECT_LOCK_RESULT_DONE; | |
586 | ||
587 | for (; | |
588 | offset < offset_end && object->resident_page_count; | |
589 | offset += PAGE_SIZE_64) { | |
590 | ||
591 | /* | |
592 | * Limit the number of pages to be cleaned at once. | |
593 | */ | |
594 | if (data_cnt >= PAGE_SIZE * MAX_UPL_TRANSFER) { | |
595 | LIST_REQ_PAGEOUT_PAGES(object, data_cnt, | |
596 | pageout_action, paging_offset, offset_resid, io_errno, should_iosync); | |
597 | data_cnt = 0; | |
598 | } | |
599 | ||
600 | while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) { | |
601 | page_lock_result = memory_object_lock_page(m, should_return, should_flush, prot); | |
602 | ||
603 | XPR(XPR_MEMORY_OBJECT, | |
604 | "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n", | |
605 | (integer_t)object, offset, page_lock_result, 0, 0); | |
606 | ||
607 | switch (page_lock_result) | |
608 | { | |
609 | case MEMORY_OBJECT_LOCK_RESULT_DONE: | |
610 | /* | |
611 | * End of a cluster of dirty pages. | |
612 | */ | |
613 | if (data_cnt) { | |
614 | LIST_REQ_PAGEOUT_PAGES(object, | |
615 | data_cnt, pageout_action, | |
616 | paging_offset, offset_resid, io_errno, should_iosync); | |
617 | data_cnt = 0; | |
618 | continue; | |
619 | } | |
620 | break; | |
621 | ||
622 | case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK: | |
623 | /* | |
624 | * Since it is necessary to block, | |
625 | * clean any dirty pages now. | |
626 | */ | |
627 | if (data_cnt) { | |
628 | LIST_REQ_PAGEOUT_PAGES(object, | |
629 | data_cnt, pageout_action, | |
630 | paging_offset, offset_resid, io_errno, should_iosync); | |
631 | data_cnt = 0; | |
632 | continue; | |
633 | } | |
634 | PAGE_SLEEP(object, m, THREAD_UNINT); | |
635 | continue; | |
636 | ||
637 | case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN: | |
638 | case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN: | |
639 | /* | |
640 | * The clean and return cases are similar. | |
641 | * | |
642 | * if this would form a discontiguous block, | |
643 | * clean the old pages and start anew. | |
644 | * | |
645 | * Mark the page busy since we will unlock the | |
646 | * object if we issue the LIST_REQ_PAGEOUT | |
647 | */ | |
648 | m->busy = TRUE; | |
649 | if (data_cnt && | |
650 | ((last_offset != offset) || (pageout_action != page_lock_result))) { | |
651 | LIST_REQ_PAGEOUT_PAGES(object, | |
652 | data_cnt, pageout_action, | |
653 | paging_offset, offset_resid, io_errno, should_iosync); | |
654 | data_cnt = 0; | |
655 | } | |
656 | m->busy = FALSE; | |
657 | ||
658 | if (m->cleaning) { | |
659 | PAGE_SLEEP(object, m, THREAD_UNINT); | |
660 | continue; | |
661 | } | |
662 | if (data_cnt == 0) { | |
663 | pageout_action = page_lock_result; | |
664 | paging_offset = offset; | |
665 | } | |
666 | data_cnt += PAGE_SIZE; | |
667 | last_offset = offset + PAGE_SIZE_64; | |
668 | ||
669 | vm_page_lock_queues(); | |
670 | /* | |
671 | * Clean | |
672 | */ | |
673 | m->list_req_pending = TRUE; | |
674 | m->cleaning = TRUE; | |
675 | ||
676 | if (should_flush) { | |
677 | /* | |
678 | * and add additional state | |
679 | * for the flush | |
680 | */ | |
681 | m->busy = TRUE; | |
682 | m->pageout = TRUE; | |
683 | vm_page_wire(m); | |
684 | } | |
685 | vm_page_unlock_queues(); | |
686 | ||
687 | retval = 1; | |
688 | break; | |
689 | } | |
690 | break; | |
691 | } | |
692 | } | |
693 | /* | |
694 | * We have completed the scan for applicable pages. | |
695 | * Clean any pages that have been saved. | |
696 | */ | |
697 | if (data_cnt) { | |
698 | LIST_REQ_PAGEOUT_PAGES(object, | |
699 | data_cnt, pageout_action, paging_offset, offset_resid, io_errno, should_iosync); | |
700 | } | |
701 | return (retval); | |
702 | } | |
703 | ||
704 | ||
705 | ||
1c79356b | 706 | /* |
0b4e3aa0 | 707 | * Routine: vm_object_update |
1c79356b | 708 | * Description: |
0b4e3aa0 | 709 | * Work function for m_o_lock_request(), vm_o_sync(). |
1c79356b A |
710 | * |
711 | * Called with object locked and paging ref taken. | |
712 | */ | |
713 | kern_return_t | |
0b4e3aa0 | 714 | vm_object_update( |
1c79356b A |
715 | register vm_object_t object, |
716 | register vm_object_offset_t offset, | |
91447636 A |
717 | register vm_object_size_t size, |
718 | register vm_object_offset_t *resid_offset, | |
719 | int *io_errno, | |
1c79356b A |
720 | memory_object_return_t should_return, |
721 | int flags, | |
91447636 | 722 | vm_prot_t protection) |
1c79356b | 723 | { |
1c79356b | 724 | vm_object_t copy_object; |
1c79356b A |
725 | boolean_t data_returned = FALSE; |
726 | boolean_t update_cow; | |
91447636 A |
727 | boolean_t should_flush = (flags & MEMORY_OBJECT_DATA_FLUSH) ? TRUE : FALSE; |
728 | boolean_t should_iosync = (flags & MEMORY_OBJECT_IO_SYNC) ? TRUE : FALSE; | |
729 | int num_of_extents; | |
730 | int n; | |
731 | #define MAX_EXTENTS 8 | |
732 | #define EXTENT_SIZE (1024 * 1024 * 256) | |
733 | #define RESIDENT_LIMIT (1024 * 32) | |
734 | struct extent { | |
735 | vm_object_offset_t e_base; | |
736 | vm_object_offset_t e_min; | |
737 | vm_object_offset_t e_max; | |
738 | } extents[MAX_EXTENTS]; | |
1c79356b A |
739 | |
740 | /* | |
741 | * To avoid blocking while scanning for pages, save | |
742 | * dirty pages to be cleaned all at once. | |
743 | * | |
744 | * XXXO A similar strategy could be used to limit the | |
745 | * number of times that a scan must be restarted for | |
746 | * other reasons. Those pages that would require blocking | |
747 | * could be temporarily collected in another list, or | |
748 | * their offsets could be recorded in a small array. | |
749 | */ | |
750 | ||
751 | /* | |
752 | * XXX NOTE: May want to consider converting this to a page list | |
753 | * XXX vm_map_copy interface. Need to understand object | |
754 | * XXX coalescing implications before doing so. | |
755 | */ | |
756 | ||
757 | update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH) | |
758 | && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) && | |
759 | !(flags & MEMORY_OBJECT_DATA_PURGE))) | |
760 | || (flags & MEMORY_OBJECT_COPY_SYNC); | |
761 | ||
762 | ||
763 | if((((copy_object = object->copy) != NULL) && update_cow) || | |
764 | (flags & MEMORY_OBJECT_DATA_SYNC)) { | |
91447636 A |
765 | vm_map_size_t i; |
766 | vm_map_size_t copy_size; | |
767 | vm_map_offset_t copy_offset; | |
1c79356b A |
768 | vm_prot_t prot; |
769 | vm_page_t page; | |
770 | vm_page_t top_page; | |
771 | kern_return_t error = 0; | |
772 | ||
773 | if(copy_object != NULL) { | |
774 | /* translate offset with respect to shadow's offset */ | |
775 | copy_offset = (offset >= copy_object->shadow_offset)? | |
91447636 A |
776 | (vm_map_offset_t)(offset - copy_object->shadow_offset) : |
777 | (vm_map_offset_t) 0; | |
1c79356b A |
778 | if(copy_offset > copy_object->size) |
779 | copy_offset = copy_object->size; | |
780 | ||
781 | /* clip size with respect to shadow offset */ | |
91447636 A |
782 | if (offset >= copy_object->shadow_offset) { |
783 | copy_size = size; | |
784 | } else if (size >= copy_object->shadow_offset - offset) { | |
785 | copy_size = size - | |
786 | (copy_object->shadow_offset - offset); | |
1c79356b | 787 | } else { |
91447636 A |
788 | copy_size = 0; |
789 | } | |
790 | ||
791 | if (copy_offset + copy_size > copy_object->size) { | |
792 | if (copy_object->size >= copy_offset) { | |
793 | copy_size = copy_object->size - copy_offset; | |
794 | } else { | |
795 | copy_size = 0; | |
796 | } | |
1c79356b | 797 | } |
1c79356b | 798 | |
55e303ae | 799 | copy_size+=copy_offset; |
1c79356b A |
800 | |
801 | vm_object_unlock(object); | |
802 | vm_object_lock(copy_object); | |
803 | } else { | |
804 | copy_object = object; | |
805 | ||
806 | copy_size = offset + size; | |
807 | copy_offset = offset; | |
808 | } | |
809 | ||
810 | vm_object_paging_begin(copy_object); | |
811 | for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) { | |
812 | RETRY_COW_OF_LOCK_REQUEST: | |
813 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
814 | switch (vm_fault_page(copy_object, i, | |
815 | VM_PROT_WRITE|VM_PROT_READ, | |
816 | FALSE, | |
817 | THREAD_UNINT, | |
818 | copy_offset, | |
819 | copy_offset+copy_size, | |
820 | VM_BEHAVIOR_SEQUENTIAL, | |
821 | &prot, | |
822 | &page, | |
823 | &top_page, | |
824 | (int *)0, | |
825 | &error, | |
826 | FALSE, | |
0b4e3aa0 | 827 | FALSE, NULL, 0)) { |
1c79356b A |
828 | |
829 | case VM_FAULT_SUCCESS: | |
830 | if(top_page) { | |
831 | vm_fault_cleanup( | |
832 | page->object, top_page); | |
833 | PAGE_WAKEUP_DONE(page); | |
834 | vm_page_lock_queues(); | |
835 | if (!page->active && !page->inactive) | |
836 | vm_page_activate(page); | |
837 | vm_page_unlock_queues(); | |
838 | vm_object_lock(copy_object); | |
839 | vm_object_paging_begin(copy_object); | |
840 | } else { | |
841 | PAGE_WAKEUP_DONE(page); | |
842 | vm_page_lock_queues(); | |
843 | if (!page->active && !page->inactive) | |
844 | vm_page_activate(page); | |
845 | vm_page_unlock_queues(); | |
846 | } | |
847 | break; | |
848 | case VM_FAULT_RETRY: | |
849 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
850 | vm_object_lock(copy_object); | |
851 | vm_object_paging_begin(copy_object); | |
852 | goto RETRY_COW_OF_LOCK_REQUEST; | |
853 | case VM_FAULT_INTERRUPTED: | |
854 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
855 | vm_object_lock(copy_object); | |
856 | vm_object_paging_begin(copy_object); | |
857 | goto RETRY_COW_OF_LOCK_REQUEST; | |
858 | case VM_FAULT_MEMORY_SHORTAGE: | |
859 | VM_PAGE_WAIT(); | |
860 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
861 | vm_object_lock(copy_object); | |
862 | vm_object_paging_begin(copy_object); | |
863 | goto RETRY_COW_OF_LOCK_REQUEST; | |
864 | case VM_FAULT_FICTITIOUS_SHORTAGE: | |
865 | vm_page_more_fictitious(); | |
866 | prot = VM_PROT_WRITE|VM_PROT_READ; | |
867 | vm_object_lock(copy_object); | |
868 | vm_object_paging_begin(copy_object); | |
869 | goto RETRY_COW_OF_LOCK_REQUEST; | |
870 | case VM_FAULT_MEMORY_ERROR: | |
871 | vm_object_lock(object); | |
872 | goto BYPASS_COW_COPYIN; | |
873 | } | |
874 | ||
875 | } | |
876 | vm_object_paging_end(copy_object); | |
877 | if(copy_object != object) { | |
878 | vm_object_unlock(copy_object); | |
879 | vm_object_lock(object); | |
880 | } | |
881 | } | |
882 | if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) { | |
883 | return KERN_SUCCESS; | |
884 | } | |
885 | if(((copy_object = object->copy) != NULL) && | |
886 | (flags & MEMORY_OBJECT_DATA_PURGE)) { | |
887 | copy_object->shadow_severed = TRUE; | |
888 | copy_object->shadowed = FALSE; | |
889 | copy_object->shadow = NULL; | |
890 | /* delete the ref the COW was holding on the target object */ | |
891 | vm_object_deallocate(object); | |
892 | } | |
893 | BYPASS_COW_COPYIN: | |
894 | ||
91447636 A |
895 | /* |
896 | * when we have a really large range to check relative | |
897 | * to the number of actual resident pages, we'd like | |
898 | * to use the resident page list to drive our checks | |
899 | * however, the object lock will get dropped while processing | |
900 | * the page which means the resident queue can change which | |
901 | * means we can't walk the queue as we process the pages | |
902 | * we also want to do the processing in offset order to allow | |
903 | * 'runs' of pages to be collected if we're being told to | |
904 | * flush to disk... the resident page queue is NOT ordered. | |
905 | * | |
906 | * a temporary solution (until we figure out how to deal with | |
907 | * large address spaces more generically) is to pre-flight | |
908 | * the resident page queue (if it's small enough) and develop | |
909 | * a collection of extents (that encompass actual resident pages) | |
910 | * to visit. This will at least allow us to deal with some of the | |
911 | * more pathological cases in a more efficient manner. The current | |
912 | * worst case (a single resident page at the end of an extremely large | |
913 | * range) can take minutes to complete for ranges in the terrabyte | |
914 | * category... since this routine is called when truncating a file, | |
915 | * and we currently support files up to 16 Tbytes in size, this | |
916 | * is not a theoretical problem | |
917 | */ | |
1c79356b | 918 | |
91447636 A |
919 | if ((object->resident_page_count < RESIDENT_LIMIT) && |
920 | (atop_64(size) > (unsigned)(object->resident_page_count/(8 * MAX_EXTENTS)))) { | |
921 | vm_page_t next; | |
922 | vm_object_offset_t start; | |
923 | vm_object_offset_t end; | |
924 | vm_object_size_t e_mask; | |
925 | vm_page_t m; | |
1c79356b | 926 | |
91447636 A |
927 | start = offset; |
928 | end = offset + size; | |
929 | num_of_extents = 0; | |
930 | e_mask = ~((vm_object_size_t)(EXTENT_SIZE - 1)); | |
1c79356b | 931 | |
91447636 | 932 | m = (vm_page_t) queue_first(&object->memq); |
1c79356b | 933 | |
91447636 A |
934 | while (!queue_end(&object->memq, (queue_entry_t) m)) { |
935 | next = (vm_page_t) queue_next(&m->listq); | |
1c79356b | 936 | |
91447636 A |
937 | if ((m->offset >= start) && (m->offset < end)) { |
938 | /* | |
939 | * this is a page we're interested in | |
940 | * try to fit it into a current extent | |
1c79356b | 941 | */ |
91447636 A |
942 | for (n = 0; n < num_of_extents; n++) { |
943 | if ((m->offset & e_mask) == extents[n].e_base) { | |
944 | /* | |
945 | * use (PAGE_SIZE - 1) to determine the | |
946 | * max offset so that we don't wrap if | |
947 | * we're at the last page of the space | |
948 | */ | |
949 | if (m->offset < extents[n].e_min) | |
950 | extents[n].e_min = m->offset; | |
951 | else if ((m->offset + (PAGE_SIZE - 1)) > extents[n].e_max) | |
952 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
953 | break; | |
954 | } | |
955 | } | |
956 | if (n == num_of_extents) { | |
957 | /* | |
958 | * didn't find a current extent that can encompass | |
959 | * this page | |
960 | */ | |
961 | if (n < MAX_EXTENTS) { | |
962 | /* | |
963 | * if we still have room, | |
964 | * create a new extent | |
965 | */ | |
966 | extents[n].e_base = m->offset & e_mask; | |
967 | extents[n].e_min = m->offset; | |
968 | extents[n].e_max = m->offset + (PAGE_SIZE - 1); | |
969 | ||
970 | num_of_extents++; | |
971 | } else { | |
972 | /* | |
973 | * no room to create a new extent... | |
974 | * fall back to a single extent based | |
975 | * on the min and max page offsets | |
976 | * we find in the range we're interested in... | |
977 | * first, look through the extent list and | |
978 | * develop the overall min and max for the | |
979 | * pages we've looked at up to this point | |
980 | */ | |
981 | for (n = 1; n < num_of_extents; n++) { | |
982 | if (extents[n].e_min < extents[0].e_min) | |
983 | extents[0].e_min = extents[n].e_min; | |
984 | if (extents[n].e_max > extents[0].e_max) | |
985 | extents[0].e_max = extents[n].e_max; | |
986 | } | |
987 | /* | |
988 | * now setup to run through the remaining pages | |
989 | * to determine the overall min and max | |
990 | * offset for the specified range | |
991 | */ | |
992 | extents[0].e_base = 0; | |
993 | e_mask = 0; | |
994 | num_of_extents = 1; | |
995 | ||
996 | /* | |
997 | * by continuing, we'll reprocess the | |
998 | * page that forced us to abandon trying | |
999 | * to develop multiple extents | |
1000 | */ | |
1001 | continue; | |
1002 | } | |
1003 | } | |
1c79356b | 1004 | } |
91447636 | 1005 | m = next; |
1c79356b | 1006 | } |
91447636 A |
1007 | } else { |
1008 | extents[0].e_min = offset; | |
1009 | extents[0].e_max = offset + (size - 1); | |
1c79356b | 1010 | |
91447636 A |
1011 | num_of_extents = 1; |
1012 | } | |
1013 | for (n = 0; n < num_of_extents; n++) { | |
1014 | if (vm_object_update_extent(object, extents[n].e_min, extents[n].e_max, resid_offset, io_errno, | |
1015 | should_flush, should_return, should_iosync, protection)) | |
1016 | data_returned = TRUE; | |
1c79356b | 1017 | } |
1c79356b A |
1018 | return (data_returned); |
1019 | } | |
1020 | ||
91447636 | 1021 | |
1c79356b A |
1022 | /* |
1023 | * Routine: memory_object_synchronize_completed [user interface] | |
1024 | * | |
1025 | * Tell kernel that previously synchronized data | |
1026 | * (memory_object_synchronize) has been queue or placed on the | |
1027 | * backing storage. | |
1028 | * | |
1029 | * Note: there may be multiple synchronize requests for a given | |
1030 | * memory object outstanding but they will not overlap. | |
1031 | */ | |
1032 | ||
1033 | kern_return_t | |
1034 | memory_object_synchronize_completed( | |
0b4e3aa0 A |
1035 | memory_object_control_t control, |
1036 | memory_object_offset_t offset, | |
1037 | vm_offset_t length) | |
1c79356b | 1038 | { |
0b4e3aa0 A |
1039 | vm_object_t object; |
1040 | msync_req_t msr; | |
1c79356b | 1041 | |
91447636 A |
1042 | object = memory_object_control_to_vm_object(control); |
1043 | ||
1c79356b A |
1044 | XPR(XPR_MEMORY_OBJECT, |
1045 | "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n", | |
1046 | (integer_t)object, offset, length, 0, 0); | |
1047 | ||
1048 | /* | |
1049 | * Look for bogus arguments | |
1050 | */ | |
1051 | ||
0b4e3aa0 A |
1052 | if (object == VM_OBJECT_NULL) |
1053 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1054 | |
1055 | vm_object_lock(object); | |
1056 | ||
1057 | /* | |
1058 | * search for sync request structure | |
1059 | */ | |
1060 | queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) { | |
1061 | if (msr->offset == offset && msr->length == length) { | |
1062 | queue_remove(&object->msr_q, msr, msync_req_t, msr_q); | |
1063 | break; | |
1064 | } | |
1065 | }/* queue_iterate */ | |
1066 | ||
1067 | if (queue_end(&object->msr_q, (queue_entry_t)msr)) { | |
1068 | vm_object_unlock(object); | |
1c79356b A |
1069 | return KERN_INVALID_ARGUMENT; |
1070 | } | |
1071 | ||
1072 | msr_lock(msr); | |
1073 | vm_object_unlock(object); | |
1074 | msr->flag = VM_MSYNC_DONE; | |
1075 | msr_unlock(msr); | |
1076 | thread_wakeup((event_t) msr); | |
1c79356b A |
1077 | |
1078 | return KERN_SUCCESS; | |
1079 | }/* memory_object_synchronize_completed */ | |
0b4e3aa0 A |
1080 | |
1081 | static kern_return_t | |
1082 | vm_object_set_attributes_common( | |
1c79356b A |
1083 | vm_object_t object, |
1084 | boolean_t may_cache, | |
1085 | memory_object_copy_strategy_t copy_strategy, | |
1086 | boolean_t temporary, | |
91447636 | 1087 | memory_object_cluster_size_t cluster_size, |
1c79356b A |
1088 | boolean_t silent_overwrite, |
1089 | boolean_t advisory_pageout) | |
1090 | { | |
1091 | boolean_t object_became_ready; | |
1092 | ||
1093 | XPR(XPR_MEMORY_OBJECT, | |
1094 | "m_o_set_attr_com, object 0x%X flg %x strat %d\n", | |
1095 | (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0); | |
1096 | ||
1097 | if (object == VM_OBJECT_NULL) | |
1098 | return(KERN_INVALID_ARGUMENT); | |
1099 | ||
1100 | /* | |
1101 | * Verify the attributes of importance | |
1102 | */ | |
1103 | ||
1104 | switch(copy_strategy) { | |
1105 | case MEMORY_OBJECT_COPY_NONE: | |
1106 | case MEMORY_OBJECT_COPY_DELAY: | |
1107 | break; | |
1108 | default: | |
1c79356b A |
1109 | return(KERN_INVALID_ARGUMENT); |
1110 | } | |
1111 | ||
1112 | #if !ADVISORY_PAGEOUT | |
0b4e3aa0 | 1113 | if (silent_overwrite || advisory_pageout) |
1c79356b | 1114 | return(KERN_INVALID_ARGUMENT); |
0b4e3aa0 | 1115 | |
1c79356b A |
1116 | #endif /* !ADVISORY_PAGEOUT */ |
1117 | if (may_cache) | |
1118 | may_cache = TRUE; | |
1119 | if (temporary) | |
1120 | temporary = TRUE; | |
1121 | if (cluster_size != 0) { | |
1122 | int pages_per_cluster; | |
55e303ae | 1123 | pages_per_cluster = atop_32(cluster_size); |
1c79356b A |
1124 | /* |
1125 | * Cluster size must be integral multiple of page size, | |
1126 | * and be a power of 2 number of pages. | |
1127 | */ | |
1128 | if ((cluster_size & (PAGE_SIZE-1)) || | |
0b4e3aa0 | 1129 | ((pages_per_cluster-1) & pages_per_cluster)) |
1c79356b | 1130 | return KERN_INVALID_ARGUMENT; |
1c79356b A |
1131 | } |
1132 | ||
1133 | vm_object_lock(object); | |
1134 | ||
1135 | /* | |
1136 | * Copy the attributes | |
1137 | */ | |
1138 | assert(!object->internal); | |
1139 | object_became_ready = !object->pager_ready; | |
1140 | object->copy_strategy = copy_strategy; | |
1141 | object->can_persist = may_cache; | |
1142 | object->temporary = temporary; | |
1143 | object->silent_overwrite = silent_overwrite; | |
1144 | object->advisory_pageout = advisory_pageout; | |
1145 | if (cluster_size == 0) | |
1146 | cluster_size = PAGE_SIZE; | |
1147 | object->cluster_size = cluster_size; | |
1148 | ||
1149 | assert(cluster_size >= PAGE_SIZE && | |
1150 | cluster_size % PAGE_SIZE == 0); | |
1151 | ||
1152 | /* | |
1153 | * Wake up anyone waiting for the ready attribute | |
1154 | * to become asserted. | |
1155 | */ | |
1156 | ||
1157 | if (object_became_ready) { | |
1158 | object->pager_ready = TRUE; | |
1159 | vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY); | |
1160 | } | |
1161 | ||
1162 | vm_object_unlock(object); | |
1163 | ||
1c79356b A |
1164 | return(KERN_SUCCESS); |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * Set the memory object attribute as provided. | |
1169 | * | |
1170 | * XXX This routine cannot be completed until the vm_msync, clean | |
1171 | * in place, and cluster work is completed. See ifdef notyet | |
0b4e3aa0 | 1172 | * below and note that vm_object_set_attributes_common() |
1c79356b A |
1173 | * may have to be expanded. |
1174 | */ | |
1175 | kern_return_t | |
1176 | memory_object_change_attributes( | |
0b4e3aa0 A |
1177 | memory_object_control_t control, |
1178 | memory_object_flavor_t flavor, | |
1179 | memory_object_info_t attributes, | |
1180 | mach_msg_type_number_t count) | |
1c79356b | 1181 | { |
0b4e3aa0 A |
1182 | vm_object_t object; |
1183 | kern_return_t result = KERN_SUCCESS; | |
1184 | boolean_t temporary; | |
1185 | boolean_t may_cache; | |
1186 | boolean_t invalidate; | |
91447636 | 1187 | memory_object_cluster_size_t cluster_size; |
1c79356b | 1188 | memory_object_copy_strategy_t copy_strategy; |
0b4e3aa0 | 1189 | boolean_t silent_overwrite; |
1c79356b A |
1190 | boolean_t advisory_pageout; |
1191 | ||
0b4e3aa0 | 1192 | object = memory_object_control_to_vm_object(control); |
1c79356b | 1193 | if (object == VM_OBJECT_NULL) |
0b4e3aa0 | 1194 | return (KERN_INVALID_ARGUMENT); |
1c79356b A |
1195 | |
1196 | vm_object_lock(object); | |
0b4e3aa0 | 1197 | |
1c79356b A |
1198 | temporary = object->temporary; |
1199 | may_cache = object->can_persist; | |
1200 | copy_strategy = object->copy_strategy; | |
1201 | silent_overwrite = object->silent_overwrite; | |
1202 | advisory_pageout = object->advisory_pageout; | |
1203 | #if notyet | |
1204 | invalidate = object->invalidate; | |
1205 | #endif | |
1206 | cluster_size = object->cluster_size; | |
1207 | vm_object_unlock(object); | |
1208 | ||
1209 | switch (flavor) { | |
1210 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1211 | { | |
1212 | old_memory_object_behave_info_t behave; | |
1213 | ||
1214 | if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1215 | result = KERN_INVALID_ARGUMENT; | |
1216 | break; | |
1217 | } | |
1218 | ||
1219 | behave = (old_memory_object_behave_info_t) attributes; | |
1220 | ||
1221 | temporary = behave->temporary; | |
1222 | invalidate = behave->invalidate; | |
1223 | copy_strategy = behave->copy_strategy; | |
1224 | ||
1225 | break; | |
1226 | } | |
1227 | ||
1228 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1229 | { | |
1230 | memory_object_behave_info_t behave; | |
1231 | ||
1232 | if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1233 | result = KERN_INVALID_ARGUMENT; | |
1234 | break; | |
1235 | } | |
1236 | ||
1237 | behave = (memory_object_behave_info_t) attributes; | |
1238 | ||
1239 | temporary = behave->temporary; | |
1240 | invalidate = behave->invalidate; | |
1241 | copy_strategy = behave->copy_strategy; | |
1242 | silent_overwrite = behave->silent_overwrite; | |
1243 | advisory_pageout = behave->advisory_pageout; | |
1244 | break; | |
1245 | } | |
1246 | ||
1247 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1248 | { | |
1249 | memory_object_perf_info_t perf; | |
1250 | ||
1251 | if (count != MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1252 | result = KERN_INVALID_ARGUMENT; | |
1253 | break; | |
1254 | } | |
1255 | ||
1256 | perf = (memory_object_perf_info_t) attributes; | |
1257 | ||
1258 | may_cache = perf->may_cache; | |
55e303ae | 1259 | cluster_size = round_page_32(perf->cluster_size); |
1c79356b A |
1260 | |
1261 | break; | |
1262 | } | |
1263 | ||
1264 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1265 | { | |
1266 | old_memory_object_attr_info_t attr; | |
1267 | ||
1268 | if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1269 | result = KERN_INVALID_ARGUMENT; | |
1270 | break; | |
1271 | } | |
1272 | ||
1273 | attr = (old_memory_object_attr_info_t) attributes; | |
1274 | ||
1275 | may_cache = attr->may_cache; | |
1276 | copy_strategy = attr->copy_strategy; | |
1277 | cluster_size = page_size; | |
1278 | ||
1279 | break; | |
1280 | } | |
1281 | ||
1282 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1283 | { | |
1284 | memory_object_attr_info_t attr; | |
1285 | ||
1286 | if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1287 | result = KERN_INVALID_ARGUMENT; | |
1288 | break; | |
1289 | } | |
1290 | ||
1291 | attr = (memory_object_attr_info_t) attributes; | |
1292 | ||
1293 | copy_strategy = attr->copy_strategy; | |
1294 | may_cache = attr->may_cache_object; | |
1295 | cluster_size = attr->cluster_size; | |
1296 | temporary = attr->temporary; | |
1297 | ||
1298 | break; | |
1299 | } | |
1300 | ||
1301 | default: | |
1302 | result = KERN_INVALID_ARGUMENT; | |
1303 | break; | |
1304 | } | |
1305 | ||
0b4e3aa0 | 1306 | if (result != KERN_SUCCESS) |
1c79356b | 1307 | return(result); |
1c79356b A |
1308 | |
1309 | if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) { | |
1310 | copy_strategy = MEMORY_OBJECT_COPY_DELAY; | |
1311 | temporary = TRUE; | |
1312 | } else { | |
1313 | temporary = FALSE; | |
1314 | } | |
1315 | ||
1316 | /* | |
1c79356b A |
1317 | * XXX may_cache may become a tri-valued variable to handle |
1318 | * XXX uncache if not in use. | |
1319 | */ | |
0b4e3aa0 | 1320 | return (vm_object_set_attributes_common(object, |
1c79356b A |
1321 | may_cache, |
1322 | copy_strategy, | |
1323 | temporary, | |
1324 | cluster_size, | |
1325 | silent_overwrite, | |
0b4e3aa0 | 1326 | advisory_pageout)); |
1c79356b A |
1327 | } |
1328 | ||
1329 | kern_return_t | |
1330 | memory_object_get_attributes( | |
0b4e3aa0 | 1331 | memory_object_control_t control, |
1c79356b A |
1332 | memory_object_flavor_t flavor, |
1333 | memory_object_info_t attributes, /* pointer to OUT array */ | |
1334 | mach_msg_type_number_t *count) /* IN/OUT */ | |
1335 | { | |
0b4e3aa0 A |
1336 | kern_return_t ret = KERN_SUCCESS; |
1337 | vm_object_t object; | |
1c79356b | 1338 | |
0b4e3aa0 A |
1339 | object = memory_object_control_to_vm_object(control); |
1340 | if (object == VM_OBJECT_NULL) | |
1341 | return (KERN_INVALID_ARGUMENT); | |
1c79356b A |
1342 | |
1343 | vm_object_lock(object); | |
1344 | ||
1345 | switch (flavor) { | |
1346 | case OLD_MEMORY_OBJECT_BEHAVIOR_INFO: | |
1347 | { | |
1348 | old_memory_object_behave_info_t behave; | |
1349 | ||
1350 | if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1351 | ret = KERN_INVALID_ARGUMENT; | |
1352 | break; | |
1353 | } | |
1354 | ||
1355 | behave = (old_memory_object_behave_info_t) attributes; | |
1356 | behave->copy_strategy = object->copy_strategy; | |
1357 | behave->temporary = object->temporary; | |
1358 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1359 | behave->invalidate = object->invalidate; | |
1360 | #else | |
1361 | behave->invalidate = FALSE; | |
1362 | #endif | |
1363 | ||
1364 | *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1365 | break; | |
1366 | } | |
1367 | ||
1368 | case MEMORY_OBJECT_BEHAVIOR_INFO: | |
1369 | { | |
1370 | memory_object_behave_info_t behave; | |
1371 | ||
1372 | if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) { | |
1373 | ret = KERN_INVALID_ARGUMENT; | |
1374 | break; | |
1375 | } | |
1376 | ||
1377 | behave = (memory_object_behave_info_t) attributes; | |
1378 | behave->copy_strategy = object->copy_strategy; | |
1379 | behave->temporary = object->temporary; | |
1380 | #if notyet /* remove when vm_msync complies and clean in place fini */ | |
1381 | behave->invalidate = object->invalidate; | |
1382 | #else | |
1383 | behave->invalidate = FALSE; | |
1384 | #endif | |
1385 | behave->advisory_pageout = object->advisory_pageout; | |
1386 | behave->silent_overwrite = object->silent_overwrite; | |
1387 | *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT; | |
1388 | break; | |
1389 | } | |
1390 | ||
1391 | case MEMORY_OBJECT_PERFORMANCE_INFO: | |
1392 | { | |
1393 | memory_object_perf_info_t perf; | |
1394 | ||
1395 | if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) { | |
1396 | ret = KERN_INVALID_ARGUMENT; | |
1397 | break; | |
1398 | } | |
1399 | ||
1400 | perf = (memory_object_perf_info_t) attributes; | |
1401 | perf->cluster_size = object->cluster_size; | |
1402 | perf->may_cache = object->can_persist; | |
1403 | ||
1404 | *count = MEMORY_OBJECT_PERF_INFO_COUNT; | |
1405 | break; | |
1406 | } | |
1407 | ||
1408 | case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1409 | { | |
1410 | old_memory_object_attr_info_t attr; | |
1411 | ||
1412 | if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1413 | ret = KERN_INVALID_ARGUMENT; | |
1414 | break; | |
1415 | } | |
1416 | ||
1417 | attr = (old_memory_object_attr_info_t) attributes; | |
1418 | attr->may_cache = object->can_persist; | |
1419 | attr->copy_strategy = object->copy_strategy; | |
1420 | ||
1421 | *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1422 | break; | |
1423 | } | |
1424 | ||
1425 | case MEMORY_OBJECT_ATTRIBUTE_INFO: | |
1426 | { | |
1427 | memory_object_attr_info_t attr; | |
1428 | ||
1429 | if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) { | |
1430 | ret = KERN_INVALID_ARGUMENT; | |
1431 | break; | |
1432 | } | |
1433 | ||
1434 | attr = (memory_object_attr_info_t) attributes; | |
1435 | attr->copy_strategy = object->copy_strategy; | |
1436 | attr->cluster_size = object->cluster_size; | |
1437 | attr->may_cache_object = object->can_persist; | |
1438 | attr->temporary = object->temporary; | |
1439 | ||
1440 | *count = MEMORY_OBJECT_ATTR_INFO_COUNT; | |
1441 | break; | |
1442 | } | |
1443 | ||
1444 | default: | |
1445 | ret = KERN_INVALID_ARGUMENT; | |
1446 | break; | |
1447 | } | |
1448 | ||
1449 | vm_object_unlock(object); | |
1450 | ||
1c79356b A |
1451 | return(ret); |
1452 | } | |
1453 | ||
1c79356b | 1454 | |
55e303ae A |
1455 | kern_return_t |
1456 | memory_object_iopl_request( | |
1457 | ipc_port_t port, | |
1458 | memory_object_offset_t offset, | |
91447636 | 1459 | upl_size_t *upl_size, |
55e303ae A |
1460 | upl_t *upl_ptr, |
1461 | upl_page_info_array_t user_page_list, | |
1462 | unsigned int *page_list_count, | |
1463 | int *flags) | |
1464 | { | |
1465 | vm_object_t object; | |
1466 | kern_return_t ret; | |
1467 | int caller_flags; | |
1468 | ||
1469 | caller_flags = *flags; | |
1470 | ||
91447636 A |
1471 | if (caller_flags & ~UPL_VALID_FLAGS) { |
1472 | /* | |
1473 | * For forward compatibility's sake, | |
1474 | * reject any unknown flag. | |
1475 | */ | |
1476 | return KERN_INVALID_VALUE; | |
1477 | } | |
1478 | ||
55e303ae A |
1479 | if (ip_kotype(port) == IKOT_NAMED_ENTRY) { |
1480 | vm_named_entry_t named_entry; | |
1481 | ||
1482 | named_entry = (vm_named_entry_t)port->ip_kobject; | |
1483 | /* a few checks to make sure user is obeying rules */ | |
1484 | if(*upl_size == 0) { | |
1485 | if(offset >= named_entry->size) | |
1486 | return(KERN_INVALID_RIGHT); | |
1487 | *upl_size = named_entry->size - offset; | |
1488 | } | |
1489 | if(caller_flags & UPL_COPYOUT_FROM) { | |
1490 | if((named_entry->protection & VM_PROT_READ) | |
1491 | != VM_PROT_READ) { | |
1492 | return(KERN_INVALID_RIGHT); | |
1493 | } | |
1494 | } else { | |
1495 | if((named_entry->protection & | |
1496 | (VM_PROT_READ | VM_PROT_WRITE)) | |
1497 | != (VM_PROT_READ | VM_PROT_WRITE)) { | |
1498 | return(KERN_INVALID_RIGHT); | |
1499 | } | |
1500 | } | |
1501 | if(named_entry->size < (offset + *upl_size)) | |
1502 | return(KERN_INVALID_ARGUMENT); | |
1503 | ||
1504 | /* the callers parameter offset is defined to be the */ | |
1505 | /* offset from beginning of named entry offset in object */ | |
1506 | offset = offset + named_entry->offset; | |
1507 | ||
1508 | if(named_entry->is_sub_map) | |
1509 | return (KERN_INVALID_ARGUMENT); | |
1510 | ||
1511 | named_entry_lock(named_entry); | |
1512 | ||
91447636 | 1513 | if (named_entry->is_pager) { |
55e303ae A |
1514 | object = vm_object_enter(named_entry->backing.pager, |
1515 | named_entry->offset + named_entry->size, | |
1516 | named_entry->internal, | |
1517 | FALSE, | |
1518 | FALSE); | |
1519 | if (object == VM_OBJECT_NULL) { | |
1520 | named_entry_unlock(named_entry); | |
1521 | return(KERN_INVALID_OBJECT); | |
1522 | } | |
91447636 A |
1523 | |
1524 | /* JMM - drop reference on pager here? */ | |
55e303ae A |
1525 | |
1526 | /* create an extra reference for the named entry */ | |
91447636 | 1527 | vm_object_lock(object); |
55e303ae | 1528 | vm_object_reference_locked(object); |
91447636 A |
1529 | named_entry->backing.object = object; |
1530 | named_entry->is_pager = FALSE; | |
55e303ae A |
1531 | named_entry_unlock(named_entry); |
1532 | ||
1533 | /* wait for object to be ready */ | |
1534 | while (!object->pager_ready) { | |
1535 | vm_object_wait(object, | |
1536 | VM_OBJECT_EVENT_PAGER_READY, | |
1537 | THREAD_UNINT); | |
1538 | vm_object_lock(object); | |
1539 | } | |
1540 | vm_object_unlock(object); | |
91447636 A |
1541 | } else { |
1542 | /* This is the case where we are going to map */ | |
1543 | /* an already mapped object. If the object is */ | |
1544 | /* not ready it is internal. An external */ | |
1545 | /* object cannot be mapped until it is ready */ | |
1546 | /* we can therefore avoid the ready check */ | |
1547 | /* in this case. */ | |
1548 | object = named_entry->backing.object; | |
1549 | vm_object_reference(object); | |
1550 | named_entry_unlock(named_entry); | |
55e303ae | 1551 | } |
5d5c5d0d | 1552 | } else if (ip_kotype(port) == IKOT_MEM_OBJ_CONTROL) { |
55e303ae | 1553 | memory_object_control_t control; |
5d5c5d0d | 1554 | control = (memory_object_control_t) port; |
55e303ae A |
1555 | if (control == NULL) |
1556 | return (KERN_INVALID_ARGUMENT); | |
1557 | object = memory_object_control_to_vm_object(control); | |
1558 | if (object == VM_OBJECT_NULL) | |
1559 | return (KERN_INVALID_ARGUMENT); | |
1560 | vm_object_reference(object); | |
5d5c5d0d A |
1561 | } else { |
1562 | return KERN_INVALID_ARGUMENT; | |
55e303ae A |
1563 | } |
1564 | if (object == VM_OBJECT_NULL) | |
1565 | return (KERN_INVALID_ARGUMENT); | |
1566 | ||
1567 | if (!object->private) { | |
1568 | if (*upl_size > (MAX_UPL_TRANSFER*PAGE_SIZE)) | |
1569 | *upl_size = (MAX_UPL_TRANSFER*PAGE_SIZE); | |
1570 | if (object->phys_contiguous) { | |
1571 | *flags = UPL_PHYS_CONTIG; | |
1572 | } else { | |
1573 | *flags = 0; | |
1574 | } | |
1575 | } else { | |
1576 | *flags = UPL_DEV_MEMORY | UPL_PHYS_CONTIG; | |
1577 | } | |
1578 | ||
1579 | ret = vm_object_iopl_request(object, | |
1580 | offset, | |
1581 | *upl_size, | |
1582 | upl_ptr, | |
1583 | user_page_list, | |
1584 | page_list_count, | |
1585 | caller_flags); | |
1586 | vm_object_deallocate(object); | |
1587 | return ret; | |
1588 | } | |
1589 | ||
0b4e3aa0 A |
1590 | /* |
1591 | * Routine: memory_object_upl_request [interface] | |
1592 | * Purpose: | |
1593 | * Cause the population of a portion of a vm_object. | |
1594 | * Depending on the nature of the request, the pages | |
1595 | * returned may be contain valid data or be uninitialized. | |
1596 | * | |
1597 | */ | |
1c79356b | 1598 | |
0b4e3aa0 A |
1599 | kern_return_t |
1600 | memory_object_upl_request( | |
1601 | memory_object_control_t control, | |
1602 | memory_object_offset_t offset, | |
91447636 | 1603 | upl_size_t size, |
0b4e3aa0 A |
1604 | upl_t *upl_ptr, |
1605 | upl_page_info_array_t user_page_list, | |
1606 | unsigned int *page_list_count, | |
1607 | int cntrl_flags) | |
1608 | { | |
1609 | vm_object_t object; | |
1610 | ||
1611 | object = memory_object_control_to_vm_object(control); | |
1612 | if (object == VM_OBJECT_NULL) | |
1613 | return (KERN_INVALID_ARGUMENT); | |
1614 | ||
1615 | return vm_object_upl_request(object, | |
1616 | offset, | |
1617 | size, | |
1618 | upl_ptr, | |
1619 | user_page_list, | |
1620 | page_list_count, | |
1621 | cntrl_flags); | |
1622 | } | |
1623 | ||
1624 | /* | |
1625 | * Routine: memory_object_super_upl_request [interface] | |
1626 | * Purpose: | |
1627 | * Cause the population of a portion of a vm_object | |
1628 | * in much the same way as memory_object_upl_request. | |
1629 | * Depending on the nature of the request, the pages | |
1630 | * returned may be contain valid data or be uninitialized. | |
1631 | * However, the region may be expanded up to the super | |
1632 | * cluster size provided. | |
1c79356b | 1633 | */ |
0b4e3aa0 | 1634 | |
1c79356b | 1635 | kern_return_t |
0b4e3aa0 A |
1636 | memory_object_super_upl_request( |
1637 | memory_object_control_t control, | |
1638 | memory_object_offset_t offset, | |
91447636 A |
1639 | upl_size_t size, |
1640 | upl_size_t super_cluster, | |
0b4e3aa0 A |
1641 | upl_t *upl, |
1642 | upl_page_info_t *user_page_list, | |
1643 | unsigned int *page_list_count, | |
1644 | int cntrl_flags) | |
1c79356b | 1645 | { |
0b4e3aa0 A |
1646 | vm_object_t object; |
1647 | ||
1648 | object = memory_object_control_to_vm_object(control); | |
1649 | if (object == VM_OBJECT_NULL) | |
1650 | return (KERN_INVALID_ARGUMENT); | |
1651 | ||
1652 | return vm_object_super_upl_request(object, | |
1653 | offset, | |
1654 | size, | |
1655 | super_cluster, | |
1656 | upl, | |
1657 | user_page_list, | |
1658 | page_list_count, | |
1659 | cntrl_flags); | |
1c79356b A |
1660 | } |
1661 | ||
0b4e3aa0 A |
1662 | int vm_stat_discard_cleared_reply = 0; |
1663 | int vm_stat_discard_cleared_unset = 0; | |
1664 | int vm_stat_discard_cleared_too_late = 0; | |
1665 | ||
1666 | ||
1667 | ||
1c79356b | 1668 | /* |
0b4e3aa0 | 1669 | * Routine: host_default_memory_manager [interface] |
1c79356b A |
1670 | * Purpose: |
1671 | * set/get the default memory manager port and default cluster | |
1672 | * size. | |
1673 | * | |
1674 | * If successful, consumes the supplied naked send right. | |
1675 | */ | |
1676 | kern_return_t | |
1677 | host_default_memory_manager( | |
0b4e3aa0 A |
1678 | host_priv_t host_priv, |
1679 | memory_object_default_t *default_manager, | |
91447636 | 1680 | memory_object_cluster_size_t cluster_size) |
1c79356b | 1681 | { |
0b4e3aa0 A |
1682 | memory_object_default_t current_manager; |
1683 | memory_object_default_t new_manager; | |
1684 | memory_object_default_t returned_manager; | |
1c79356b A |
1685 | |
1686 | if (host_priv == HOST_PRIV_NULL) | |
1687 | return(KERN_INVALID_HOST); | |
1688 | ||
1689 | assert(host_priv == &realhost); | |
1690 | ||
1691 | new_manager = *default_manager; | |
1692 | mutex_lock(&memory_manager_default_lock); | |
1693 | current_manager = memory_manager_default; | |
1694 | ||
0b4e3aa0 | 1695 | if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1696 | /* |
1697 | * Retrieve the current value. | |
1698 | */ | |
0b4e3aa0 A |
1699 | memory_object_default_reference(current_manager); |
1700 | returned_manager = current_manager; | |
1c79356b A |
1701 | } else { |
1702 | /* | |
1703 | * Retrieve the current value, | |
1704 | * and replace it with the supplied value. | |
0b4e3aa0 A |
1705 | * We return the old reference to the caller |
1706 | * but we have to take a reference on the new | |
1707 | * one. | |
1c79356b A |
1708 | */ |
1709 | ||
1710 | returned_manager = current_manager; | |
1711 | memory_manager_default = new_manager; | |
0b4e3aa0 A |
1712 | memory_object_default_reference(new_manager); |
1713 | ||
1c79356b A |
1714 | if (cluster_size % PAGE_SIZE != 0) { |
1715 | #if 0 | |
1716 | mutex_unlock(&memory_manager_default_lock); | |
1717 | return KERN_INVALID_ARGUMENT; | |
1718 | #else | |
55e303ae | 1719 | cluster_size = round_page_32(cluster_size); |
1c79356b A |
1720 | #endif |
1721 | } | |
1722 | memory_manager_default_cluster = cluster_size; | |
1723 | ||
1724 | /* | |
1725 | * In case anyone's been waiting for a memory | |
1726 | * manager to be established, wake them up. | |
1727 | */ | |
1728 | ||
1729 | thread_wakeup((event_t) &memory_manager_default); | |
1730 | } | |
1731 | ||
1732 | mutex_unlock(&memory_manager_default_lock); | |
1733 | ||
1734 | *default_manager = returned_manager; | |
1735 | return(KERN_SUCCESS); | |
1736 | } | |
1737 | ||
1738 | /* | |
1739 | * Routine: memory_manager_default_reference | |
1740 | * Purpose: | |
1741 | * Returns a naked send right for the default | |
1742 | * memory manager. The returned right is always | |
1743 | * valid (not IP_NULL or IP_DEAD). | |
1744 | */ | |
1745 | ||
0b4e3aa0 | 1746 | __private_extern__ memory_object_default_t |
1c79356b | 1747 | memory_manager_default_reference( |
91447636 | 1748 | memory_object_cluster_size_t *cluster_size) |
1c79356b | 1749 | { |
0b4e3aa0 | 1750 | memory_object_default_t current_manager; |
1c79356b A |
1751 | |
1752 | mutex_lock(&memory_manager_default_lock); | |
0b4e3aa0 A |
1753 | current_manager = memory_manager_default; |
1754 | while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) { | |
9bccf70c A |
1755 | wait_result_t res; |
1756 | ||
1757 | res = thread_sleep_mutex((event_t) &memory_manager_default, | |
1758 | &memory_manager_default_lock, | |
1759 | THREAD_UNINT); | |
1760 | assert(res == THREAD_AWAKENED); | |
0b4e3aa0 | 1761 | current_manager = memory_manager_default; |
1c79356b | 1762 | } |
0b4e3aa0 | 1763 | memory_object_default_reference(current_manager); |
1c79356b | 1764 | *cluster_size = memory_manager_default_cluster; |
1c79356b A |
1765 | mutex_unlock(&memory_manager_default_lock); |
1766 | ||
1767 | return current_manager; | |
1768 | } | |
1769 | ||
1c79356b A |
1770 | /* |
1771 | * Routine: memory_manager_default_check | |
1772 | * | |
1773 | * Purpose: | |
1774 | * Check whether a default memory manager has been set | |
1775 | * up yet, or not. Returns KERN_SUCCESS if dmm exists, | |
1776 | * and KERN_FAILURE if dmm does not exist. | |
1777 | * | |
1778 | * If there is no default memory manager, log an error, | |
1779 | * but only the first time. | |
1780 | * | |
1781 | */ | |
0b4e3aa0 | 1782 | __private_extern__ kern_return_t |
1c79356b A |
1783 | memory_manager_default_check(void) |
1784 | { | |
0b4e3aa0 | 1785 | memory_object_default_t current; |
1c79356b A |
1786 | |
1787 | mutex_lock(&memory_manager_default_lock); | |
1788 | current = memory_manager_default; | |
0b4e3aa0 | 1789 | if (current == MEMORY_OBJECT_DEFAULT_NULL) { |
1c79356b A |
1790 | static boolean_t logged; /* initialized to 0 */ |
1791 | boolean_t complain = !logged; | |
1792 | logged = TRUE; | |
1793 | mutex_unlock(&memory_manager_default_lock); | |
1794 | if (complain) | |
1795 | printf("Warning: No default memory manager\n"); | |
1796 | return(KERN_FAILURE); | |
1797 | } else { | |
1798 | mutex_unlock(&memory_manager_default_lock); | |
1799 | return(KERN_SUCCESS); | |
1800 | } | |
1801 | } | |
1802 | ||
0b4e3aa0 | 1803 | __private_extern__ void |
1c79356b A |
1804 | memory_manager_default_init(void) |
1805 | { | |
0b4e3aa0 | 1806 | memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL; |
91447636 | 1807 | mutex_init(&memory_manager_default_lock, 0); |
1c79356b A |
1808 | } |
1809 | ||
1810 | ||
1c79356b A |
1811 | |
1812 | /* Allow manipulation of individual page state. This is actually part of */ | |
1813 | /* the UPL regimen but takes place on the object rather than on a UPL */ | |
1814 | ||
1815 | kern_return_t | |
1816 | memory_object_page_op( | |
0b4e3aa0 A |
1817 | memory_object_control_t control, |
1818 | memory_object_offset_t offset, | |
1819 | int ops, | |
55e303ae | 1820 | ppnum_t *phys_entry, |
0b4e3aa0 | 1821 | int *flags) |
1c79356b | 1822 | { |
0b4e3aa0 | 1823 | vm_object_t object; |
0b4e3aa0 A |
1824 | |
1825 | object = memory_object_control_to_vm_object(control); | |
1826 | if (object == VM_OBJECT_NULL) | |
1827 | return (KERN_INVALID_ARGUMENT); | |
1c79356b | 1828 | |
5d5c5d0d | 1829 | return vm_object_page_op(object, offset, ops, phys_entry, flags); |
1c79356b A |
1830 | } |
1831 | ||
55e303ae A |
1832 | /* |
1833 | * memory_object_range_op offers performance enhancement over | |
1834 | * memory_object_page_op for page_op functions which do not require page | |
1835 | * level state to be returned from the call. Page_op was created to provide | |
1836 | * a low-cost alternative to page manipulation via UPLs when only a single | |
1837 | * page was involved. The range_op call establishes the ability in the _op | |
1838 | * family of functions to work on multiple pages where the lack of page level | |
1839 | * state handling allows the caller to avoid the overhead of the upl structures. | |
1840 | */ | |
1841 | ||
1842 | kern_return_t | |
1843 | memory_object_range_op( | |
1844 | memory_object_control_t control, | |
1845 | memory_object_offset_t offset_beg, | |
1846 | memory_object_offset_t offset_end, | |
1847 | int ops, | |
1848 | int *range) | |
1849 | { | |
55e303ae | 1850 | vm_object_t object; |
55e303ae A |
1851 | |
1852 | object = memory_object_control_to_vm_object(control); | |
1853 | if (object == VM_OBJECT_NULL) | |
1854 | return (KERN_INVALID_ARGUMENT); | |
1855 | ||
5d5c5d0d A |
1856 | return vm_object_range_op(object, |
1857 | offset_beg, | |
1858 | offset_end, | |
1859 | ops, | |
1860 | range); | |
55e303ae A |
1861 | } |
1862 | ||
91447636 A |
1863 | |
1864 | kern_return_t | |
1865 | memory_object_pages_resident( | |
1866 | memory_object_control_t control, | |
1867 | boolean_t * has_pages_resident) | |
1868 | { | |
1869 | vm_object_t object; | |
1870 | ||
1871 | *has_pages_resident = FALSE; | |
1872 | ||
1873 | object = memory_object_control_to_vm_object(control); | |
1874 | if (object == VM_OBJECT_NULL) | |
1875 | return (KERN_INVALID_ARGUMENT); | |
1876 | ||
1877 | if (object->resident_page_count) | |
1878 | *has_pages_resident = TRUE; | |
1879 | ||
1880 | return (KERN_SUCCESS); | |
1881 | } | |
1882 | ||
1883 | ||
0b4e3aa0 A |
1884 | static zone_t mem_obj_control_zone; |
1885 | ||
1886 | __private_extern__ void | |
1887 | memory_object_control_bootstrap(void) | |
1888 | { | |
1889 | int i; | |
1890 | ||
1891 | i = (vm_size_t) sizeof (struct memory_object_control); | |
1892 | mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control"); | |
1893 | return; | |
1894 | } | |
1895 | ||
1896 | __private_extern__ memory_object_control_t | |
1897 | memory_object_control_allocate( | |
1898 | vm_object_t object) | |
1899 | { | |
1900 | memory_object_control_t control; | |
1901 | ||
1902 | control = (memory_object_control_t)zalloc(mem_obj_control_zone); | |
5d5c5d0d A |
1903 | if (control != MEMORY_OBJECT_CONTROL_NULL) { |
1904 | control->moc_object = object; | |
1905 | control->moc_ikot = IKOT_MEM_OBJ_CONTROL; /* fake ip_kotype */ | |
1906 | } | |
0b4e3aa0 A |
1907 | return (control); |
1908 | } | |
1909 | ||
1910 | __private_extern__ void | |
1911 | memory_object_control_collapse( | |
1912 | memory_object_control_t control, | |
1913 | vm_object_t object) | |
1914 | { | |
5d5c5d0d A |
1915 | assert((control->moc_object != VM_OBJECT_NULL) && |
1916 | (control->moc_object != object)); | |
1917 | control->moc_object = object; | |
0b4e3aa0 A |
1918 | } |
1919 | ||
1920 | __private_extern__ vm_object_t | |
1921 | memory_object_control_to_vm_object( | |
1922 | memory_object_control_t control) | |
1923 | { | |
5d5c5d0d A |
1924 | if (control == MEMORY_OBJECT_CONTROL_NULL || |
1925 | control->moc_ikot != IKOT_MEM_OBJ_CONTROL) | |
0b4e3aa0 A |
1926 | return VM_OBJECT_NULL; |
1927 | ||
5d5c5d0d | 1928 | return (control->moc_object); |
0b4e3aa0 A |
1929 | } |
1930 | ||
1931 | memory_object_control_t | |
1932 | convert_port_to_mo_control( | |
91447636 | 1933 | __unused mach_port_t port) |
0b4e3aa0 A |
1934 | { |
1935 | return MEMORY_OBJECT_CONTROL_NULL; | |
1936 | } | |
1937 | ||
1938 | ||
1939 | mach_port_t | |
1940 | convert_mo_control_to_port( | |
91447636 | 1941 | __unused memory_object_control_t control) |
0b4e3aa0 A |
1942 | { |
1943 | return MACH_PORT_NULL; | |
1944 | } | |
1945 | ||
1946 | void | |
1947 | memory_object_control_reference( | |
91447636 | 1948 | __unused memory_object_control_t control) |
0b4e3aa0 A |
1949 | { |
1950 | return; | |
1951 | } | |
1952 | ||
1953 | /* | |
1954 | * We only every issue one of these references, so kill it | |
1955 | * when that gets released (should switch the real reference | |
1956 | * counting in true port-less EMMI). | |
1957 | */ | |
1958 | void | |
1959 | memory_object_control_deallocate( | |
1960 | memory_object_control_t control) | |
1961 | { | |
91447636 | 1962 | zfree(mem_obj_control_zone, control); |
0b4e3aa0 A |
1963 | } |
1964 | ||
1965 | void | |
1966 | memory_object_control_disable( | |
1967 | memory_object_control_t control) | |
1968 | { | |
5d5c5d0d A |
1969 | assert(control->moc_object != VM_OBJECT_NULL); |
1970 | control->moc_object = VM_OBJECT_NULL; | |
0b4e3aa0 A |
1971 | } |
1972 | ||
1973 | void | |
1974 | memory_object_default_reference( | |
1975 | memory_object_default_t dmm) | |
1976 | { | |
1977 | ipc_port_make_send(dmm); | |
1978 | } | |
1979 | ||
1980 | void | |
1981 | memory_object_default_deallocate( | |
1982 | memory_object_default_t dmm) | |
1983 | { | |
1984 | ipc_port_release_send(dmm); | |
1985 | } | |
1986 | ||
1987 | memory_object_t | |
1988 | convert_port_to_memory_object( | |
91447636 | 1989 | __unused mach_port_t port) |
0b4e3aa0 A |
1990 | { |
1991 | return (MEMORY_OBJECT_NULL); | |
1992 | } | |
1993 | ||
1994 | ||
1995 | mach_port_t | |
1996 | convert_memory_object_to_port( | |
91447636 | 1997 | __unused memory_object_t object) |
0b4e3aa0 A |
1998 | { |
1999 | return (MACH_PORT_NULL); | |
2000 | } | |
2001 | ||
0b4e3aa0 A |
2002 | |
2003 | /* Routine memory_object_reference */ | |
2004 | void memory_object_reference( | |
2005 | memory_object_t memory_object) | |
2006 | { | |
5d5c5d0d A |
2007 | (memory_object->mo_pager_ops->memory_object_reference)( |
2008 | memory_object); | |
0b4e3aa0 A |
2009 | } |
2010 | ||
2011 | /* Routine memory_object_deallocate */ | |
2012 | void memory_object_deallocate( | |
2013 | memory_object_t memory_object) | |
2014 | { | |
5d5c5d0d A |
2015 | (memory_object->mo_pager_ops->memory_object_deallocate)( |
2016 | memory_object); | |
0b4e3aa0 A |
2017 | } |
2018 | ||
2019 | ||
2020 | /* Routine memory_object_init */ | |
2021 | kern_return_t memory_object_init | |
2022 | ( | |
2023 | memory_object_t memory_object, | |
2024 | memory_object_control_t memory_control, | |
91447636 | 2025 | memory_object_cluster_size_t memory_object_page_size |
0b4e3aa0 A |
2026 | ) |
2027 | { | |
5d5c5d0d A |
2028 | return (memory_object->mo_pager_ops->memory_object_init)( |
2029 | memory_object, | |
2030 | memory_control, | |
2031 | memory_object_page_size); | |
0b4e3aa0 A |
2032 | } |
2033 | ||
2034 | /* Routine memory_object_terminate */ | |
2035 | kern_return_t memory_object_terminate | |
2036 | ( | |
2037 | memory_object_t memory_object | |
2038 | ) | |
2039 | { | |
5d5c5d0d A |
2040 | return (memory_object->mo_pager_ops->memory_object_terminate)( |
2041 | memory_object); | |
0b4e3aa0 A |
2042 | } |
2043 | ||
2044 | /* Routine memory_object_data_request */ | |
2045 | kern_return_t memory_object_data_request | |
2046 | ( | |
2047 | memory_object_t memory_object, | |
2048 | memory_object_offset_t offset, | |
91447636 | 2049 | memory_object_cluster_size_t length, |
0b4e3aa0 A |
2050 | vm_prot_t desired_access |
2051 | ) | |
2052 | { | |
5d5c5d0d A |
2053 | return (memory_object->mo_pager_ops->memory_object_data_request)( |
2054 | memory_object, | |
2055 | offset, | |
2056 | length, | |
2057 | desired_access); | |
0b4e3aa0 A |
2058 | } |
2059 | ||
2060 | /* Routine memory_object_data_return */ | |
2061 | kern_return_t memory_object_data_return | |
2062 | ( | |
2063 | memory_object_t memory_object, | |
2064 | memory_object_offset_t offset, | |
2065 | vm_size_t size, | |
91447636 A |
2066 | memory_object_offset_t *resid_offset, |
2067 | int *io_error, | |
0b4e3aa0 | 2068 | boolean_t dirty, |
91447636 A |
2069 | boolean_t kernel_copy, |
2070 | int upl_flags | |
0b4e3aa0 A |
2071 | ) |
2072 | { | |
5d5c5d0d A |
2073 | return (memory_object->mo_pager_ops->memory_object_data_return)( |
2074 | memory_object, | |
2075 | offset, | |
2076 | size, | |
2077 | resid_offset, | |
2078 | io_error, | |
2079 | dirty, | |
2080 | kernel_copy, | |
2081 | upl_flags); | |
0b4e3aa0 A |
2082 | } |
2083 | ||
2084 | /* Routine memory_object_data_initialize */ | |
2085 | kern_return_t memory_object_data_initialize | |
2086 | ( | |
2087 | memory_object_t memory_object, | |
2088 | memory_object_offset_t offset, | |
2089 | vm_size_t size | |
2090 | ) | |
2091 | { | |
5d5c5d0d A |
2092 | return (memory_object->mo_pager_ops->memory_object_data_initialize)( |
2093 | memory_object, | |
2094 | offset, | |
2095 | size); | |
0b4e3aa0 A |
2096 | } |
2097 | ||
2098 | /* Routine memory_object_data_unlock */ | |
2099 | kern_return_t memory_object_data_unlock | |
2100 | ( | |
2101 | memory_object_t memory_object, | |
2102 | memory_object_offset_t offset, | |
2103 | vm_size_t size, | |
2104 | vm_prot_t desired_access | |
2105 | ) | |
2106 | { | |
5d5c5d0d A |
2107 | return (memory_object->mo_pager_ops->memory_object_data_unlock)( |
2108 | memory_object, | |
2109 | offset, | |
2110 | size, | |
2111 | desired_access); | |
0b4e3aa0 A |
2112 | } |
2113 | ||
2114 | /* Routine memory_object_synchronize */ | |
2115 | kern_return_t memory_object_synchronize | |
2116 | ( | |
2117 | memory_object_t memory_object, | |
2118 | memory_object_offset_t offset, | |
2119 | vm_size_t size, | |
2120 | vm_sync_t sync_flags | |
2121 | ) | |
2122 | { | |
5d5c5d0d A |
2123 | return (memory_object->mo_pager_ops->memory_object_synchronize)( |
2124 | memory_object, | |
2125 | offset, | |
2126 | size, | |
2127 | sync_flags); | |
0b4e3aa0 A |
2128 | } |
2129 | ||
2130 | /* Routine memory_object_unmap */ | |
2131 | kern_return_t memory_object_unmap | |
2132 | ( | |
2133 | memory_object_t memory_object | |
2134 | ) | |
2135 | { | |
5d5c5d0d A |
2136 | return (memory_object->mo_pager_ops->memory_object_unmap)( |
2137 | memory_object); | |
0b4e3aa0 A |
2138 | } |
2139 | ||
2140 | /* Routine memory_object_create */ | |
2141 | kern_return_t memory_object_create | |
2142 | ( | |
2143 | memory_object_default_t default_memory_manager, | |
2144 | vm_size_t new_memory_object_size, | |
2145 | memory_object_t *new_memory_object | |
2146 | ) | |
2147 | { | |
0b4e3aa0 A |
2148 | return default_pager_memory_object_create(default_memory_manager, |
2149 | new_memory_object_size, | |
2150 | new_memory_object); | |
2151 | } | |
1c79356b | 2152 | |
91447636 A |
2153 | upl_t |
2154 | convert_port_to_upl( | |
2155 | ipc_port_t port) | |
2156 | { | |
2157 | upl_t upl; | |
2158 | ||
2159 | ip_lock(port); | |
2160 | if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { | |
2161 | ip_unlock(port); | |
2162 | return (upl_t)NULL; | |
2163 | } | |
2164 | upl = (upl_t) port->ip_kobject; | |
2165 | ip_unlock(port); | |
2166 | upl_lock(upl); | |
2167 | upl->ref_count+=1; | |
2168 | upl_unlock(upl); | |
2169 | return upl; | |
2170 | } | |
2171 | ||
2172 | mach_port_t | |
2173 | convert_upl_to_port( | |
2174 | __unused upl_t upl) | |
2175 | { | |
2176 | return MACH_PORT_NULL; | |
2177 | } | |
2178 | ||
2179 | __private_extern__ void | |
2180 | upl_no_senders( | |
2181 | __unused ipc_port_t port, | |
2182 | __unused mach_port_mscount_t mscount) | |
2183 | { | |
2184 | return; | |
2185 | } |