]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/memory_object.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: vm/memory_object.c
54 * Author: Michael Wayne Young
55 *
56 * External memory management interface control functions.
57 */
58
1c79356b
A
59#include <advisory_pageout.h>
60
61/*
62 * Interface dependencies:
63 */
64
65#include <mach/std_types.h> /* For pointer_t */
66#include <mach/mach_types.h>
67
0b4e3aa0 68#include <mach/mig.h>
1c79356b
A
69#include <mach/kern_return.h>
70#include <mach/memory_object.h>
71#include <mach/memory_object_default.h>
72#include <mach/memory_object_control_server.h>
0b4e3aa0 73#include <mach/host_priv_server.h>
1c79356b
A
74#include <mach/boolean.h>
75#include <mach/vm_prot.h>
76#include <mach/message.h>
77
1c79356b
A
78/*
79 * Implementation dependencies:
80 */
81#include <string.h> /* For memcpy() */
82
0b4e3aa0
A
83#include <kern/xpr.h>
84#include <kern/host.h>
85#include <kern/thread.h> /* For current_thread() */
86#include <kern/ipc_mig.h>
87#include <kern/misc_protos.h>
88
89#include <vm/vm_object.h>
90#include <vm/vm_fault.h>
1c79356b
A
91#include <vm/memory_object.h>
92#include <vm/vm_page.h>
93#include <vm/vm_pageout.h>
94#include <vm/pmap.h> /* For pmap_clear_modify */
1c79356b
A
95#include <vm/vm_kern.h> /* For kernel_map, vm_move */
96#include <vm/vm_map.h> /* For vm_map_pageable */
1c79356b
A
97
98#if MACH_PAGEMAP
99#include <vm/vm_external.h>
100#endif /* MACH_PAGEMAP */
101
102
0b4e3aa0
A
103memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
104vm_size_t memory_manager_default_cluster = 0;
105decl_mutex_data(, memory_manager_default_lock)
1c79356b
A
106
107/*
108 * Forward ref to file-local function:
109 */
110boolean_t
0b4e3aa0 111vm_object_update(vm_object_t, vm_object_offset_t,
1c79356b
A
112 vm_size_t, memory_object_return_t, int, vm_prot_t);
113
114
115/*
116 * Routine: memory_object_should_return_page
117 *
118 * Description:
119 * Determine whether the given page should be returned,
120 * based on the page's state and on the given return policy.
121 *
122 * We should return the page if one of the following is true:
123 *
124 * 1. Page is dirty and should_return is not RETURN_NONE.
125 * 2. Page is precious and should_return is RETURN_ALL.
126 * 3. Should_return is RETURN_ANYTHING.
127 *
128 * As a side effect, m->dirty will be made consistent
129 * with pmap_is_modified(m), if should_return is not
130 * MEMORY_OBJECT_RETURN_NONE.
131 */
132
133#define memory_object_should_return_page(m, should_return) \
134 (should_return != MEMORY_OBJECT_RETURN_NONE && \
135 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \
136 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
137 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
138
139typedef int memory_object_lock_result_t;
140
141#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
142#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
143#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
144#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
145
146memory_object_lock_result_t memory_object_lock_page(
147 vm_page_t m,
148 memory_object_return_t should_return,
149 boolean_t should_flush,
150 vm_prot_t prot);
151
152/*
153 * Routine: memory_object_lock_page
154 *
155 * Description:
156 * Perform the appropriate lock operations on the
157 * given page. See the description of
158 * "memory_object_lock_request" for the meanings
159 * of the arguments.
160 *
161 * Returns an indication that the operation
162 * completed, blocked, or that the page must
163 * be cleaned.
164 */
165memory_object_lock_result_t
166memory_object_lock_page(
167 vm_page_t m,
168 memory_object_return_t should_return,
169 boolean_t should_flush,
170 vm_prot_t prot)
171{
172 XPR(XPR_MEMORY_OBJECT,
173 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
174 (integer_t)m, should_return, should_flush, prot, 0);
175
176 /*
177 * If we cannot change access to the page,
178 * either because a mapping is in progress
179 * (busy page) or because a mapping has been
180 * wired, then give up.
181 */
182
183 if (m->busy || m->cleaning)
184 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
185
186 /*
187 * Don't worry about pages for which the kernel
188 * does not have any data.
189 */
190
765c9de3
A
191 if (m->absent || m->error || m->restart) {
192 if(m->error && should_flush) {
193 /* dump the page, pager wants us to */
194 /* clean it up and there is no */
195 /* relevant data to return */
196 if(m->wire_count == 0) {
197 VM_PAGE_FREE(m);
198 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
199 }
200 } else {
201 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
202 }
203 }
1c79356b
A
204
205 assert(!m->fictitious);
206
207 if (m->wire_count != 0) {
208 /*
209 * If no change would take place
210 * anyway, return successfully.
211 *
212 * No change means:
213 * Not flushing AND
214 * No change to page lock [2 checks] AND
215 * Should not return page
216 *
217 * XXX This doesn't handle sending a copy of a wired
218 * XXX page to the pager, but that will require some
219 * XXX significant surgery.
220 */
221 if (!should_flush &&
222 (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
223 ! memory_object_should_return_page(m, should_return)) {
224
225 /*
226 * Restart page unlock requests,
227 * even though no change took place.
228 * [Memory managers may be expecting
229 * to see new requests.]
230 */
231 m->unlock_request = VM_PROT_NONE;
232 PAGE_WAKEUP(m);
233
234 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
235 }
236
237 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
238 }
239
240 /*
241 * If the page is to be flushed, allow
242 * that to be done as part of the protection.
243 */
244
245 if (should_flush)
246 prot = VM_PROT_ALL;
247
248 /*
249 * Set the page lock.
250 *
251 * If we are decreasing permission, do it now;
252 * let the fault handler take care of increases
253 * (pmap_page_protect may not increase protection).
254 */
255
256 if (prot != VM_PROT_NO_CHANGE) {
0b4e3aa0
A
257 if ((m->page_lock ^ prot) & prot) {
258 pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot);
259 }
1c79356b
A
260#if 0
261 /* code associated with the vestigial
262 * memory_object_data_unlock
263 */
1c79356b
A
264 m->page_lock = prot;
265 m->lock_supplied = TRUE;
266 if (prot != VM_PROT_NONE)
267 m->unusual = TRUE;
268 else
269 m->unusual = FALSE;
270
271 /*
272 * Restart any past unlock requests, even if no
273 * change resulted. If the manager explicitly
274 * requested no protection change, then it is assumed
275 * to be remembering past requests.
276 */
277
278 m->unlock_request = VM_PROT_NONE;
279#endif /* 0 */
280 PAGE_WAKEUP(m);
281 }
282
283 /*
284 * Handle page returning.
285 */
286
287 if (memory_object_should_return_page(m, should_return)) {
288
289 /*
290 * If we weren't planning
291 * to flush the page anyway,
292 * we may need to remove the
293 * page from the pageout
294 * system and from physical
295 * maps now.
296 */
297
298 vm_page_lock_queues();
299 VM_PAGE_QUEUES_REMOVE(m);
300 vm_page_unlock_queues();
301
302 if (!should_flush)
303 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
304
305 if (m->dirty)
306 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
307 else
308 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
309 }
310
311 /*
312 * Handle flushing
313 */
314
315 if (should_flush) {
316 VM_PAGE_FREE(m);
317 } else {
318 extern boolean_t vm_page_deactivate_hint;
319
320 /*
321 * XXX Make clean but not flush a paging hint,
322 * and deactivate the pages. This is a hack
323 * because it overloads flush/clean with
324 * implementation-dependent meaning. This only
325 * happens to pages that are already clean.
326 */
327
328 if (vm_page_deactivate_hint &&
329 (should_return != MEMORY_OBJECT_RETURN_NONE)) {
330 vm_page_lock_queues();
331 vm_page_deactivate(m);
332 vm_page_unlock_queues();
333 }
334 }
335
336 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
337}
0b4e3aa0 338
1c79356b
A
339#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
340MACRO_BEGIN \
341 \
342 register int i; \
343 register vm_page_t hp; \
344 \
345 vm_object_unlock(object); \
346 \
1c79356b 347 (void) memory_object_data_return(object->pager, \
1c79356b 348 po, \
1c79356b
A
349 data_cnt, \
350 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
351 !should_flush); \
1c79356b
A
352 \
353 vm_object_lock(object); \
1c79356b
A
354MACRO_END
355
1c79356b
A
356/*
357 * Routine: memory_object_lock_request [user interface]
358 *
359 * Description:
360 * Control use of the data associated with the given
361 * memory object. For each page in the given range,
362 * perform the following operations, in order:
363 * 1) restrict access to the page (disallow
364 * forms specified by "prot");
365 * 2) return data to the manager (if "should_return"
366 * is RETURN_DIRTY and the page is dirty, or
367 * "should_return" is RETURN_ALL and the page
368 * is either dirty or precious); and,
369 * 3) flush the cached copy (if "should_flush"
370 * is asserted).
371 * The set of pages is defined by a starting offset
372 * ("offset") and size ("size"). Only pages with the
373 * same page alignment as the starting offset are
374 * considered.
375 *
376 * A single acknowledgement is sent (to the "reply_to"
377 * port) when these actions are complete. If successful,
378 * the naked send right for reply_to is consumed.
379 */
380
381kern_return_t
382memory_object_lock_request(
0b4e3aa0
A
383 memory_object_control_t control,
384 memory_object_offset_t offset,
385 memory_object_size_t size,
1c79356b
A
386 memory_object_return_t should_return,
387 int flags,
0b4e3aa0 388 vm_prot_t prot)
1c79356b 389{
0b4e3aa0 390 vm_object_t object;
1c79356b
A
391 vm_object_offset_t original_offset = offset;
392 boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH;
393
394 XPR(XPR_MEMORY_OBJECT,
0b4e3aa0
A
395 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
396 (integer_t)control, offset, size,
1c79356b
A
397 (((should_return&1)<<1)|should_flush), prot);
398
399 /*
400 * Check for bogus arguments.
401 */
0b4e3aa0 402 object = memory_object_control_to_vm_object(control);
1c79356b
A
403 if (object == VM_OBJECT_NULL)
404 return (KERN_INVALID_ARGUMENT);
405
0b4e3aa0 406 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
1c79356b 407 return (KERN_INVALID_ARGUMENT);
1c79356b
A
408
409 size = round_page(size);
410
411 /*
412 * Lock the object, and acquire a paging reference to
0b4e3aa0 413 * prevent the memory_object reference from being released.
1c79356b 414 */
1c79356b
A
415 vm_object_lock(object);
416 vm_object_paging_begin(object);
417 offset -= object->paging_offset;
418
0b4e3aa0 419 (void)vm_object_update(object,
1c79356b
A
420 offset, size, should_return, flags, prot);
421
1c79356b
A
422 vm_object_paging_end(object);
423 vm_object_unlock(object);
1c79356b
A
424
425 return (KERN_SUCCESS);
426}
427
428/*
0b4e3aa0
A
429 * memory_object_release_name: [interface]
430 *
431 * Enforces name semantic on memory_object reference count decrement
432 * This routine should not be called unless the caller holds a name
433 * reference gained through the memory_object_named_create or the
434 * memory_object_rename call.
435 * If the TERMINATE_IDLE flag is set, the call will return if the
436 * reference count is not 1. i.e. idle with the only remaining reference
437 * being the name.
438 * If the decision is made to proceed the name field flag is set to
439 * false and the reference count is decremented. If the RESPECT_CACHE
440 * flag is set and the reference count has gone to zero, the
441 * memory_object is checked to see if it is cacheable otherwise when
442 * the reference count is zero, it is simply terminated.
443 */
444
445kern_return_t
446memory_object_release_name(
447 memory_object_control_t control,
448 int flags)
449{
450 vm_object_t object;
451
452 object = memory_object_control_to_vm_object(control);
453 if (object == VM_OBJECT_NULL)
454 return (KERN_INVALID_ARGUMENT);
455
456 return vm_object_release_name(object, flags);
457}
458
459
460
461/*
462 * Routine: memory_object_destroy [user interface]
463 * Purpose:
464 * Shut down a memory object, despite the
465 * presence of address map (or other) references
466 * to the vm_object.
467 */
468kern_return_t
469memory_object_destroy(
470 memory_object_control_t control,
471 kern_return_t reason)
472{
473 vm_object_t object;
474
475 object = memory_object_control_to_vm_object(control);
476 if (object == VM_OBJECT_NULL)
477 return (KERN_INVALID_ARGUMENT);
478
479 return (vm_object_destroy(object, reason));
480}
481
482/*
483 * Routine: vm_object_sync
1c79356b
A
484 *
485 * Kernel internal function to synch out pages in a given
486 * range within an object to its memory manager. Much the
487 * same as memory_object_lock_request but page protection
488 * is not changed.
489 *
490 * If the should_flush and should_return flags are true pages
491 * are flushed, that is dirty & precious pages are written to
492 * the memory manager and then discarded. If should_return
493 * is false, only precious pages are returned to the memory
494 * manager.
495 *
496 * If should flush is false and should_return true, the memory
497 * manager's copy of the pages is updated. If should_return
498 * is also false, only the precious pages are updated. This
499 * last option is of limited utility.
500 *
501 * Returns:
502 * FALSE if no pages were returned to the pager
503 * TRUE otherwise.
504 */
505
506boolean_t
0b4e3aa0 507vm_object_sync(
1c79356b
A
508 vm_object_t object,
509 vm_object_offset_t offset,
0b4e3aa0 510 vm_size_t size,
1c79356b
A
511 boolean_t should_flush,
512 boolean_t should_return)
513{
514 boolean_t rv;
515
0b4e3aa0
A
516 XPR(XPR_VM_OBJECT,
517 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
1c79356b
A
518 (integer_t)object, offset, size, should_flush, should_return);
519
520 /*
521 * Lock the object, and acquire a paging reference to
522 * prevent the memory_object and control ports from
523 * being destroyed.
524 */
525 vm_object_lock(object);
526 vm_object_paging_begin(object);
527
0b4e3aa0 528 rv = vm_object_update(object, offset, size,
1c79356b
A
529 (should_return) ?
530 MEMORY_OBJECT_RETURN_ALL :
531 MEMORY_OBJECT_RETURN_NONE,
532 (should_flush) ?
533 MEMORY_OBJECT_DATA_FLUSH : 0,
534 VM_PROT_NO_CHANGE);
535
536
537 vm_object_paging_end(object);
538 vm_object_unlock(object);
539 return rv;
540}
541
542/*
0b4e3aa0 543 * Routine: vm_object_update
1c79356b 544 * Description:
0b4e3aa0 545 * Work function for m_o_lock_request(), vm_o_sync().
1c79356b
A
546 *
547 * Called with object locked and paging ref taken.
548 */
549kern_return_t
0b4e3aa0 550vm_object_update(
1c79356b
A
551 register vm_object_t object,
552 register vm_object_offset_t offset,
553 register vm_size_t size,
554 memory_object_return_t should_return,
555 int flags,
556 vm_prot_t prot)
557{
558 register vm_page_t m;
559 vm_page_t holding_page;
560 vm_size_t original_size = size;
561 vm_object_offset_t paging_offset = 0;
562 vm_object_t copy_object;
563 vm_size_t data_cnt = 0;
564 vm_object_offset_t last_offset = offset;
565 memory_object_lock_result_t page_lock_result;
566 memory_object_lock_result_t pageout_action;
567 boolean_t data_returned = FALSE;
568 boolean_t update_cow;
569 boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
1c79356b 570 boolean_t pending_pageout = FALSE;
1c79356b
A
571
572 /*
573 * To avoid blocking while scanning for pages, save
574 * dirty pages to be cleaned all at once.
575 *
576 * XXXO A similar strategy could be used to limit the
577 * number of times that a scan must be restarted for
578 * other reasons. Those pages that would require blocking
579 * could be temporarily collected in another list, or
580 * their offsets could be recorded in a small array.
581 */
582
583 /*
584 * XXX NOTE: May want to consider converting this to a page list
585 * XXX vm_map_copy interface. Need to understand object
586 * XXX coalescing implications before doing so.
587 */
588
589 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
590 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
591 !(flags & MEMORY_OBJECT_DATA_PURGE)))
592 || (flags & MEMORY_OBJECT_COPY_SYNC);
593
594
595 if((((copy_object = object->copy) != NULL) && update_cow) ||
596 (flags & MEMORY_OBJECT_DATA_SYNC)) {
597 vm_size_t i;
598 vm_size_t copy_size;
599 vm_object_offset_t copy_offset;
600 vm_prot_t prot;
601 vm_page_t page;
602 vm_page_t top_page;
603 kern_return_t error = 0;
604
605 if(copy_object != NULL) {
606 /* translate offset with respect to shadow's offset */
607 copy_offset = (offset >= copy_object->shadow_offset)?
608 offset - copy_object->shadow_offset :
609 (vm_object_offset_t) 0;
610 if(copy_offset > copy_object->size)
611 copy_offset = copy_object->size;
612
613 /* clip size with respect to shadow offset */
614 copy_size = (offset >= copy_object->shadow_offset) ?
615 size : size - (copy_object->shadow_offset - offset);
616
617 if(copy_size <= 0) {
618 copy_size = 0;
619 } else {
620 copy_size = ((copy_offset + copy_size)
621 <= copy_object->size) ?
622 copy_size : copy_object->size - copy_offset;
623 }
624 /* check for a copy_offset which is beyond the end of */
625 /* the copy_object */
626 if(copy_size < 0)
627 copy_size = 0;
628
629 copy_size+=offset;
630
631 vm_object_unlock(object);
632 vm_object_lock(copy_object);
633 } else {
634 copy_object = object;
635
636 copy_size = offset + size;
637 copy_offset = offset;
638 }
639
640 vm_object_paging_begin(copy_object);
641 for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
642 RETRY_COW_OF_LOCK_REQUEST:
643 prot = VM_PROT_WRITE|VM_PROT_READ;
644 switch (vm_fault_page(copy_object, i,
645 VM_PROT_WRITE|VM_PROT_READ,
646 FALSE,
647 THREAD_UNINT,
648 copy_offset,
649 copy_offset+copy_size,
650 VM_BEHAVIOR_SEQUENTIAL,
651 &prot,
652 &page,
653 &top_page,
654 (int *)0,
655 &error,
656 FALSE,
0b4e3aa0 657 FALSE, NULL, 0)) {
1c79356b
A
658
659 case VM_FAULT_SUCCESS:
660 if(top_page) {
661 vm_fault_cleanup(
662 page->object, top_page);
663 PAGE_WAKEUP_DONE(page);
664 vm_page_lock_queues();
665 if (!page->active && !page->inactive)
666 vm_page_activate(page);
667 vm_page_unlock_queues();
668 vm_object_lock(copy_object);
669 vm_object_paging_begin(copy_object);
670 } else {
671 PAGE_WAKEUP_DONE(page);
672 vm_page_lock_queues();
673 if (!page->active && !page->inactive)
674 vm_page_activate(page);
675 vm_page_unlock_queues();
676 }
677 break;
678 case VM_FAULT_RETRY:
679 prot = VM_PROT_WRITE|VM_PROT_READ;
680 vm_object_lock(copy_object);
681 vm_object_paging_begin(copy_object);
682 goto RETRY_COW_OF_LOCK_REQUEST;
683 case VM_FAULT_INTERRUPTED:
684 prot = VM_PROT_WRITE|VM_PROT_READ;
685 vm_object_lock(copy_object);
686 vm_object_paging_begin(copy_object);
687 goto RETRY_COW_OF_LOCK_REQUEST;
688 case VM_FAULT_MEMORY_SHORTAGE:
689 VM_PAGE_WAIT();
690 prot = VM_PROT_WRITE|VM_PROT_READ;
691 vm_object_lock(copy_object);
692 vm_object_paging_begin(copy_object);
693 goto RETRY_COW_OF_LOCK_REQUEST;
694 case VM_FAULT_FICTITIOUS_SHORTAGE:
695 vm_page_more_fictitious();
696 prot = VM_PROT_WRITE|VM_PROT_READ;
697 vm_object_lock(copy_object);
698 vm_object_paging_begin(copy_object);
699 goto RETRY_COW_OF_LOCK_REQUEST;
700 case VM_FAULT_MEMORY_ERROR:
701 vm_object_lock(object);
702 goto BYPASS_COW_COPYIN;
703 }
704
705 }
706 vm_object_paging_end(copy_object);
707 if(copy_object != object) {
708 vm_object_unlock(copy_object);
709 vm_object_lock(object);
710 }
711 }
712 if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
713 return KERN_SUCCESS;
714 }
715 if(((copy_object = object->copy) != NULL) &&
716 (flags & MEMORY_OBJECT_DATA_PURGE)) {
717 copy_object->shadow_severed = TRUE;
718 copy_object->shadowed = FALSE;
719 copy_object->shadow = NULL;
720 /* delete the ref the COW was holding on the target object */
721 vm_object_deallocate(object);
722 }
723BYPASS_COW_COPYIN:
724
725 for (;
726 size != 0;
727 size -= PAGE_SIZE, offset += PAGE_SIZE_64)
728 {
729 /*
730 * Limit the number of pages to be cleaned at once.
731 */
732 if (pending_pageout &&
733 data_cnt >= PAGE_SIZE * DATA_WRITE_MAX)
734 {
735 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
736 pageout_action, paging_offset);
737 data_cnt = 0;
738 pending_pageout = FALSE;
739 }
740
741 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
742 page_lock_result = memory_object_lock_page(m, should_return,
743 should_flush, prot);
744
745 XPR(XPR_MEMORY_OBJECT,
746 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
747 (integer_t)object, offset, page_lock_result, 0, 0);
748
749 switch (page_lock_result)
750 {
751 case MEMORY_OBJECT_LOCK_RESULT_DONE:
752 /*
753 * End of a cluster of dirty pages.
754 */
755 if(pending_pageout) {
756 LIST_REQ_PAGEOUT_PAGES(object,
757 data_cnt, pageout_action,
758 paging_offset);
759 data_cnt = 0;
760 pending_pageout = FALSE;
761 continue;
762 }
763 break;
764
765 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
766 /*
767 * Since it is necessary to block,
768 * clean any dirty pages now.
769 */
770 if(pending_pageout) {
771 LIST_REQ_PAGEOUT_PAGES(object,
772 data_cnt, pageout_action,
773 paging_offset);
774 pending_pageout = FALSE;
775 data_cnt = 0;
776 continue;
777 }
778
779 PAGE_ASSERT_WAIT(m, THREAD_UNINT);
780 vm_object_unlock(object);
781 thread_block((void (*)(void))0);
782 vm_object_lock(object);
783 continue;
784
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
786 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
787 /*
788 * The clean and return cases are similar.
789 *
790 */
791
792 /*
793 * if this would form a discontiguous block,
794 * clean the old pages and start anew.
795 *
796 */
797
798 /*
799 * Mark the page busy since we unlock the
800 * object below.
801 */
802 m->busy = TRUE;
803 if (pending_pageout &&
804 (last_offset != offset ||
805 pageout_action != page_lock_result)) {
806 LIST_REQ_PAGEOUT_PAGES(object,
807 data_cnt, pageout_action,
808 paging_offset);
809 pending_pageout = FALSE;
810 data_cnt = 0;
811 }
812 m->busy = FALSE;
813 holding_page = VM_PAGE_NULL;
814 if(m->cleaning) {
815 PAGE_ASSERT_WAIT(m, THREAD_UNINT);
816 vm_object_unlock(object);
817 thread_block((void (*)(void))0);
818 continue;
819 }
820 if(!pending_pageout) {
821 pending_pageout = TRUE;
822 pageout_action = page_lock_result;
823 paging_offset = offset;
824 }
825 if (should_flush) {
826 vm_page_lock_queues();
827 m->list_req_pending = TRUE;
828 m->cleaning = TRUE;
829 m->busy = TRUE;
830 m->pageout = TRUE;
831 vm_page_wire(m);
832 vm_page_unlock_queues();
833 } else {
834 /*
835 * Clean but do not flush
836 */
837 vm_page_lock_queues();
838 m->list_req_pending = TRUE;
839 m->cleaning = TRUE;
840 vm_page_unlock_queues();
841
842 }
843 vm_object_unlock(object);
844
845
846 data_cnt += PAGE_SIZE;
847 last_offset = offset + PAGE_SIZE_64;
848 data_returned = TRUE;
849
850 vm_object_lock(object);
851 break;
852 }
853 break;
854 }
855 }
856
857 /*
858 * We have completed the scan for applicable pages.
859 * Clean any pages that have been saved.
860 */
1c79356b
A
861 if (pending_pageout) {
862 LIST_REQ_PAGEOUT_PAGES(object,
863 data_cnt, pageout_action, paging_offset);
864 }
1c79356b
A
865 return (data_returned);
866}
867
868/*
869 * Routine: memory_object_synchronize_completed [user interface]
870 *
871 * Tell kernel that previously synchronized data
872 * (memory_object_synchronize) has been queue or placed on the
873 * backing storage.
874 *
875 * Note: there may be multiple synchronize requests for a given
876 * memory object outstanding but they will not overlap.
877 */
878
879kern_return_t
880memory_object_synchronize_completed(
0b4e3aa0
A
881 memory_object_control_t control,
882 memory_object_offset_t offset,
883 vm_offset_t length)
1c79356b 884{
0b4e3aa0
A
885 vm_object_t object;
886 msync_req_t msr;
1c79356b
A
887
888 XPR(XPR_MEMORY_OBJECT,
889 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
890 (integer_t)object, offset, length, 0, 0);
891
892 /*
893 * Look for bogus arguments
894 */
895
0b4e3aa0
A
896 object = memory_object_control_to_vm_object(control);
897 if (object == VM_OBJECT_NULL)
898 return (KERN_INVALID_ARGUMENT);
1c79356b
A
899
900 vm_object_lock(object);
901
902/*
903 * search for sync request structure
904 */
905 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
906 if (msr->offset == offset && msr->length == length) {
907 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
908 break;
909 }
910 }/* queue_iterate */
911
912 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
913 vm_object_unlock(object);
1c79356b
A
914 return KERN_INVALID_ARGUMENT;
915 }
916
917 msr_lock(msr);
918 vm_object_unlock(object);
919 msr->flag = VM_MSYNC_DONE;
920 msr_unlock(msr);
921 thread_wakeup((event_t) msr);
1c79356b
A
922
923 return KERN_SUCCESS;
924}/* memory_object_synchronize_completed */
0b4e3aa0
A
925
926static kern_return_t
927vm_object_set_attributes_common(
1c79356b
A
928 vm_object_t object,
929 boolean_t may_cache,
930 memory_object_copy_strategy_t copy_strategy,
931 boolean_t temporary,
932 vm_size_t cluster_size,
933 boolean_t silent_overwrite,
934 boolean_t advisory_pageout)
935{
936 boolean_t object_became_ready;
937
938 XPR(XPR_MEMORY_OBJECT,
939 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
940 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
941
942 if (object == VM_OBJECT_NULL)
943 return(KERN_INVALID_ARGUMENT);
944
945 /*
946 * Verify the attributes of importance
947 */
948
949 switch(copy_strategy) {
950 case MEMORY_OBJECT_COPY_NONE:
951 case MEMORY_OBJECT_COPY_DELAY:
952 break;
953 default:
1c79356b
A
954 return(KERN_INVALID_ARGUMENT);
955 }
956
957#if !ADVISORY_PAGEOUT
0b4e3aa0 958 if (silent_overwrite || advisory_pageout)
1c79356b 959 return(KERN_INVALID_ARGUMENT);
0b4e3aa0 960
1c79356b
A
961#endif /* !ADVISORY_PAGEOUT */
962 if (may_cache)
963 may_cache = TRUE;
964 if (temporary)
965 temporary = TRUE;
966 if (cluster_size != 0) {
967 int pages_per_cluster;
968 pages_per_cluster = atop(cluster_size);
969 /*
970 * Cluster size must be integral multiple of page size,
971 * and be a power of 2 number of pages.
972 */
973 if ((cluster_size & (PAGE_SIZE-1)) ||
0b4e3aa0 974 ((pages_per_cluster-1) & pages_per_cluster))
1c79356b 975 return KERN_INVALID_ARGUMENT;
1c79356b
A
976 }
977
978 vm_object_lock(object);
979
980 /*
981 * Copy the attributes
982 */
983 assert(!object->internal);
984 object_became_ready = !object->pager_ready;
985 object->copy_strategy = copy_strategy;
986 object->can_persist = may_cache;
987 object->temporary = temporary;
988 object->silent_overwrite = silent_overwrite;
989 object->advisory_pageout = advisory_pageout;
990 if (cluster_size == 0)
991 cluster_size = PAGE_SIZE;
992 object->cluster_size = cluster_size;
993
994 assert(cluster_size >= PAGE_SIZE &&
995 cluster_size % PAGE_SIZE == 0);
996
997 /*
998 * Wake up anyone waiting for the ready attribute
999 * to become asserted.
1000 */
1001
1002 if (object_became_ready) {
1003 object->pager_ready = TRUE;
1004 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1005 }
1006
1007 vm_object_unlock(object);
1008
1c79356b
A
1009 return(KERN_SUCCESS);
1010}
1011
1012/*
1013 * Set the memory object attribute as provided.
1014 *
1015 * XXX This routine cannot be completed until the vm_msync, clean
1016 * in place, and cluster work is completed. See ifdef notyet
0b4e3aa0 1017 * below and note that vm_object_set_attributes_common()
1c79356b
A
1018 * may have to be expanded.
1019 */
1020kern_return_t
1021memory_object_change_attributes(
0b4e3aa0
A
1022 memory_object_control_t control,
1023 memory_object_flavor_t flavor,
1024 memory_object_info_t attributes,
1025 mach_msg_type_number_t count)
1c79356b 1026{
0b4e3aa0
A
1027 vm_object_t object;
1028 kern_return_t result = KERN_SUCCESS;
1029 boolean_t temporary;
1030 boolean_t may_cache;
1031 boolean_t invalidate;
1c79356b
A
1032 vm_size_t cluster_size;
1033 memory_object_copy_strategy_t copy_strategy;
0b4e3aa0 1034 boolean_t silent_overwrite;
1c79356b
A
1035 boolean_t advisory_pageout;
1036
0b4e3aa0 1037 object = memory_object_control_to_vm_object(control);
1c79356b 1038 if (object == VM_OBJECT_NULL)
0b4e3aa0 1039 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1040
1041 vm_object_lock(object);
0b4e3aa0 1042
1c79356b
A
1043 temporary = object->temporary;
1044 may_cache = object->can_persist;
1045 copy_strategy = object->copy_strategy;
1046 silent_overwrite = object->silent_overwrite;
1047 advisory_pageout = object->advisory_pageout;
1048#if notyet
1049 invalidate = object->invalidate;
1050#endif
1051 cluster_size = object->cluster_size;
1052 vm_object_unlock(object);
1053
1054 switch (flavor) {
1055 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1056 {
1057 old_memory_object_behave_info_t behave;
1058
1059 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1060 result = KERN_INVALID_ARGUMENT;
1061 break;
1062 }
1063
1064 behave = (old_memory_object_behave_info_t) attributes;
1065
1066 temporary = behave->temporary;
1067 invalidate = behave->invalidate;
1068 copy_strategy = behave->copy_strategy;
1069
1070 break;
1071 }
1072
1073 case MEMORY_OBJECT_BEHAVIOR_INFO:
1074 {
1075 memory_object_behave_info_t behave;
1076
1077 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1078 result = KERN_INVALID_ARGUMENT;
1079 break;
1080 }
1081
1082 behave = (memory_object_behave_info_t) attributes;
1083
1084 temporary = behave->temporary;
1085 invalidate = behave->invalidate;
1086 copy_strategy = behave->copy_strategy;
1087 silent_overwrite = behave->silent_overwrite;
1088 advisory_pageout = behave->advisory_pageout;
1089 break;
1090 }
1091
1092 case MEMORY_OBJECT_PERFORMANCE_INFO:
1093 {
1094 memory_object_perf_info_t perf;
1095
1096 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1097 result = KERN_INVALID_ARGUMENT;
1098 break;
1099 }
1100
1101 perf = (memory_object_perf_info_t) attributes;
1102
1103 may_cache = perf->may_cache;
1104 cluster_size = round_page(perf->cluster_size);
1105
1106 break;
1107 }
1108
1109 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1110 {
1111 old_memory_object_attr_info_t attr;
1112
1113 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1114 result = KERN_INVALID_ARGUMENT;
1115 break;
1116 }
1117
1118 attr = (old_memory_object_attr_info_t) attributes;
1119
1120 may_cache = attr->may_cache;
1121 copy_strategy = attr->copy_strategy;
1122 cluster_size = page_size;
1123
1124 break;
1125 }
1126
1127 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1128 {
1129 memory_object_attr_info_t attr;
1130
1131 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1132 result = KERN_INVALID_ARGUMENT;
1133 break;
1134 }
1135
1136 attr = (memory_object_attr_info_t) attributes;
1137
1138 copy_strategy = attr->copy_strategy;
1139 may_cache = attr->may_cache_object;
1140 cluster_size = attr->cluster_size;
1141 temporary = attr->temporary;
1142
1143 break;
1144 }
1145
1146 default:
1147 result = KERN_INVALID_ARGUMENT;
1148 break;
1149 }
1150
0b4e3aa0 1151 if (result != KERN_SUCCESS)
1c79356b 1152 return(result);
1c79356b
A
1153
1154 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1155 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1156 temporary = TRUE;
1157 } else {
1158 temporary = FALSE;
1159 }
1160
1161 /*
1c79356b
A
1162 * XXX may_cache may become a tri-valued variable to handle
1163 * XXX uncache if not in use.
1164 */
0b4e3aa0 1165 return (vm_object_set_attributes_common(object,
1c79356b
A
1166 may_cache,
1167 copy_strategy,
1168 temporary,
1169 cluster_size,
1170 silent_overwrite,
0b4e3aa0 1171 advisory_pageout));
1c79356b
A
1172}
1173
1174kern_return_t
1175memory_object_get_attributes(
0b4e3aa0 1176 memory_object_control_t control,
1c79356b
A
1177 memory_object_flavor_t flavor,
1178 memory_object_info_t attributes, /* pointer to OUT array */
1179 mach_msg_type_number_t *count) /* IN/OUT */
1180{
0b4e3aa0
A
1181 kern_return_t ret = KERN_SUCCESS;
1182 vm_object_t object;
1c79356b 1183
0b4e3aa0
A
1184 object = memory_object_control_to_vm_object(control);
1185 if (object == VM_OBJECT_NULL)
1186 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1187
1188 vm_object_lock(object);
1189
1190 switch (flavor) {
1191 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1192 {
1193 old_memory_object_behave_info_t behave;
1194
1195 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1196 ret = KERN_INVALID_ARGUMENT;
1197 break;
1198 }
1199
1200 behave = (old_memory_object_behave_info_t) attributes;
1201 behave->copy_strategy = object->copy_strategy;
1202 behave->temporary = object->temporary;
1203#if notyet /* remove when vm_msync complies and clean in place fini */
1204 behave->invalidate = object->invalidate;
1205#else
1206 behave->invalidate = FALSE;
1207#endif
1208
1209 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1210 break;
1211 }
1212
1213 case MEMORY_OBJECT_BEHAVIOR_INFO:
1214 {
1215 memory_object_behave_info_t behave;
1216
1217 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1218 ret = KERN_INVALID_ARGUMENT;
1219 break;
1220 }
1221
1222 behave = (memory_object_behave_info_t) attributes;
1223 behave->copy_strategy = object->copy_strategy;
1224 behave->temporary = object->temporary;
1225#if notyet /* remove when vm_msync complies and clean in place fini */
1226 behave->invalidate = object->invalidate;
1227#else
1228 behave->invalidate = FALSE;
1229#endif
1230 behave->advisory_pageout = object->advisory_pageout;
1231 behave->silent_overwrite = object->silent_overwrite;
1232 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1233 break;
1234 }
1235
1236 case MEMORY_OBJECT_PERFORMANCE_INFO:
1237 {
1238 memory_object_perf_info_t perf;
1239
1240 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1241 ret = KERN_INVALID_ARGUMENT;
1242 break;
1243 }
1244
1245 perf = (memory_object_perf_info_t) attributes;
1246 perf->cluster_size = object->cluster_size;
1247 perf->may_cache = object->can_persist;
1248
1249 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1250 break;
1251 }
1252
1253 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1254 {
1255 old_memory_object_attr_info_t attr;
1256
1257 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1258 ret = KERN_INVALID_ARGUMENT;
1259 break;
1260 }
1261
1262 attr = (old_memory_object_attr_info_t) attributes;
1263 attr->may_cache = object->can_persist;
1264 attr->copy_strategy = object->copy_strategy;
1265
1266 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1267 break;
1268 }
1269
1270 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1271 {
1272 memory_object_attr_info_t attr;
1273
1274 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1275 ret = KERN_INVALID_ARGUMENT;
1276 break;
1277 }
1278
1279 attr = (memory_object_attr_info_t) attributes;
1280 attr->copy_strategy = object->copy_strategy;
1281 attr->cluster_size = object->cluster_size;
1282 attr->may_cache_object = object->can_persist;
1283 attr->temporary = object->temporary;
1284
1285 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1286 break;
1287 }
1288
1289 default:
1290 ret = KERN_INVALID_ARGUMENT;
1291 break;
1292 }
1293
1294 vm_object_unlock(object);
1295
1c79356b
A
1296 return(ret);
1297}
1298
1c79356b 1299
0b4e3aa0
A
1300/*
1301 * Routine: memory_object_upl_request [interface]
1302 * Purpose:
1303 * Cause the population of a portion of a vm_object.
1304 * Depending on the nature of the request, the pages
1305 * returned may be contain valid data or be uninitialized.
1306 *
1307 */
1c79356b 1308
0b4e3aa0
A
1309kern_return_t
1310memory_object_upl_request(
1311 memory_object_control_t control,
1312 memory_object_offset_t offset,
1313 vm_size_t size,
1314 upl_t *upl_ptr,
1315 upl_page_info_array_t user_page_list,
1316 unsigned int *page_list_count,
1317 int cntrl_flags)
1318{
1319 vm_object_t object;
1320
1321 object = memory_object_control_to_vm_object(control);
1322 if (object == VM_OBJECT_NULL)
1323 return (KERN_INVALID_ARGUMENT);
1324
1325 return vm_object_upl_request(object,
1326 offset,
1327 size,
1328 upl_ptr,
1329 user_page_list,
1330 page_list_count,
1331 cntrl_flags);
1332}
1333
1334/*
1335 * Routine: memory_object_super_upl_request [interface]
1336 * Purpose:
1337 * Cause the population of a portion of a vm_object
1338 * in much the same way as memory_object_upl_request.
1339 * Depending on the nature of the request, the pages
1340 * returned may be contain valid data or be uninitialized.
1341 * However, the region may be expanded up to the super
1342 * cluster size provided.
1c79356b 1343 */
0b4e3aa0 1344
1c79356b 1345kern_return_t
0b4e3aa0
A
1346memory_object_super_upl_request(
1347 memory_object_control_t control,
1348 memory_object_offset_t offset,
1349 vm_size_t size,
1350 vm_size_t super_cluster,
1351 upl_t *upl,
1352 upl_page_info_t *user_page_list,
1353 unsigned int *page_list_count,
1354 int cntrl_flags)
1c79356b 1355{
0b4e3aa0
A
1356 vm_object_t object;
1357
1358 object = memory_object_control_to_vm_object(control);
1359 if (object == VM_OBJECT_NULL)
1360 return (KERN_INVALID_ARGUMENT);
1361
1362 return vm_object_super_upl_request(object,
1363 offset,
1364 size,
1365 super_cluster,
1366 upl,
1367 user_page_list,
1368 page_list_count,
1369 cntrl_flags);
1c79356b
A
1370}
1371
0b4e3aa0
A
1372int vm_stat_discard_cleared_reply = 0;
1373int vm_stat_discard_cleared_unset = 0;
1374int vm_stat_discard_cleared_too_late = 0;
1375
1376
1377
1c79356b 1378/*
0b4e3aa0 1379 * Routine: host_default_memory_manager [interface]
1c79356b
A
1380 * Purpose:
1381 * set/get the default memory manager port and default cluster
1382 * size.
1383 *
1384 * If successful, consumes the supplied naked send right.
1385 */
1386kern_return_t
1387host_default_memory_manager(
0b4e3aa0
A
1388 host_priv_t host_priv,
1389 memory_object_default_t *default_manager,
1390 vm_size_t cluster_size)
1c79356b 1391{
0b4e3aa0
A
1392 memory_object_default_t current_manager;
1393 memory_object_default_t new_manager;
1394 memory_object_default_t returned_manager;
1c79356b
A
1395
1396 if (host_priv == HOST_PRIV_NULL)
1397 return(KERN_INVALID_HOST);
1398
1399 assert(host_priv == &realhost);
1400
1401 new_manager = *default_manager;
1402 mutex_lock(&memory_manager_default_lock);
1403 current_manager = memory_manager_default;
1404
0b4e3aa0 1405 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1406 /*
1407 * Retrieve the current value.
1408 */
0b4e3aa0
A
1409 memory_object_default_reference(current_manager);
1410 returned_manager = current_manager;
1c79356b
A
1411 } else {
1412 /*
1413 * Retrieve the current value,
1414 * and replace it with the supplied value.
0b4e3aa0
A
1415 * We return the old reference to the caller
1416 * but we have to take a reference on the new
1417 * one.
1c79356b
A
1418 */
1419
1420 returned_manager = current_manager;
1421 memory_manager_default = new_manager;
0b4e3aa0
A
1422 memory_object_default_reference(new_manager);
1423
1c79356b
A
1424 if (cluster_size % PAGE_SIZE != 0) {
1425#if 0
1426 mutex_unlock(&memory_manager_default_lock);
1427 return KERN_INVALID_ARGUMENT;
1428#else
1429 cluster_size = round_page(cluster_size);
1430#endif
1431 }
1432 memory_manager_default_cluster = cluster_size;
1433
1434 /*
1435 * In case anyone's been waiting for a memory
1436 * manager to be established, wake them up.
1437 */
1438
1439 thread_wakeup((event_t) &memory_manager_default);
1440 }
1441
1442 mutex_unlock(&memory_manager_default_lock);
1443
1444 *default_manager = returned_manager;
1445 return(KERN_SUCCESS);
1446}
1447
1448/*
1449 * Routine: memory_manager_default_reference
1450 * Purpose:
1451 * Returns a naked send right for the default
1452 * memory manager. The returned right is always
1453 * valid (not IP_NULL or IP_DEAD).
1454 */
1455
0b4e3aa0 1456__private_extern__ memory_object_default_t
1c79356b
A
1457memory_manager_default_reference(
1458 vm_size_t *cluster_size)
1459{
0b4e3aa0 1460 memory_object_default_t current_manager;
1c79356b
A
1461
1462 mutex_lock(&memory_manager_default_lock);
0b4e3aa0
A
1463 current_manager = memory_manager_default;
1464 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1465 thread_sleep_mutex((event_t) &memory_manager_default,
1466 &memory_manager_default_lock, THREAD_UNINT);
1467 mutex_lock(&memory_manager_default_lock);
0b4e3aa0 1468 current_manager = memory_manager_default;
1c79356b 1469 }
0b4e3aa0 1470 memory_object_default_reference(current_manager);
1c79356b 1471 *cluster_size = memory_manager_default_cluster;
1c79356b
A
1472 mutex_unlock(&memory_manager_default_lock);
1473
1474 return current_manager;
1475}
1476
1c79356b
A
1477/*
1478 * Routine: memory_manager_default_check
1479 *
1480 * Purpose:
1481 * Check whether a default memory manager has been set
1482 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1483 * and KERN_FAILURE if dmm does not exist.
1484 *
1485 * If there is no default memory manager, log an error,
1486 * but only the first time.
1487 *
1488 */
0b4e3aa0 1489__private_extern__ kern_return_t
1c79356b
A
1490memory_manager_default_check(void)
1491{
0b4e3aa0 1492 memory_object_default_t current;
1c79356b
A
1493
1494 mutex_lock(&memory_manager_default_lock);
1495 current = memory_manager_default;
0b4e3aa0 1496 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1497 static boolean_t logged; /* initialized to 0 */
1498 boolean_t complain = !logged;
1499 logged = TRUE;
1500 mutex_unlock(&memory_manager_default_lock);
1501 if (complain)
1502 printf("Warning: No default memory manager\n");
1503 return(KERN_FAILURE);
1504 } else {
1505 mutex_unlock(&memory_manager_default_lock);
1506 return(KERN_SUCCESS);
1507 }
1508}
1509
0b4e3aa0 1510__private_extern__ void
1c79356b
A
1511memory_manager_default_init(void)
1512{
0b4e3aa0 1513 memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b
A
1514 mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN);
1515}
1516
1517
1518void
1519memory_object_deactivate_pages(
1520 vm_object_t object,
1521 vm_object_offset_t offset,
1522 vm_object_size_t size,
1523 boolean_t kill_page)
1524{
1525 vm_object_t orig_object;
1526 int pages_moved = 0;
1527 int pages_found = 0;
1528
1529 /*
1530 * entered with object lock held, acquire a paging reference to
1531 * prevent the memory_object and control ports from
1532 * being destroyed.
1533 */
1534 orig_object = object;
1535
1536 for (;;) {
1537 register vm_page_t m;
1538 vm_object_offset_t toffset;
1539 vm_object_size_t tsize;
1540
1541 vm_object_paging_begin(object);
1542 vm_page_lock_queues();
1543
1544 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1545
1546 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1547
1548 pages_found++;
1549
1550 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1551
1552 m->reference = FALSE;
1553 pmap_clear_reference(m->phys_addr);
1554
1555 if ((kill_page) && (object->internal)) {
1556 m->precious = FALSE;
1557 m->dirty = FALSE;
1558 pmap_clear_modify(m->phys_addr);
1559 vm_external_state_clr(object->existence_map, offset);
1560 }
1561 VM_PAGE_QUEUES_REMOVE(m);
1562
1563 queue_enter_first(&vm_page_queue_inactive, m, vm_page_t, pageq);
1564
1565 m->inactive = TRUE;
1566 if (!m->fictitious)
1567 vm_page_inactive_count++;
1568
1569 pages_moved++;
1570 }
1571 }
1572 }
1573 vm_page_unlock_queues();
1574 vm_object_paging_end(object);
1575
1576 if (object->shadow) {
1577 vm_object_t tmp_object;
1578
1579 kill_page = 0;
1580
1581 offset += object->shadow_offset;
1582
1583 tmp_object = object->shadow;
1584 vm_object_lock(tmp_object);
1585
1586 if (object != orig_object)
1587 vm_object_unlock(object);
1588 object = tmp_object;
1589 } else
1590 break;
1591 }
1592 if (object != orig_object)
1593 vm_object_unlock(object);
1594}
1595
1596/* Allow manipulation of individual page state. This is actually part of */
1597/* the UPL regimen but takes place on the object rather than on a UPL */
1598
1599kern_return_t
1600memory_object_page_op(
0b4e3aa0
A
1601 memory_object_control_t control,
1602 memory_object_offset_t offset,
1603 int ops,
1604 vm_offset_t *phys_entry,
1605 int *flags)
1c79356b 1606{
0b4e3aa0
A
1607 vm_object_t object;
1608 vm_page_t dst_page;
1609
1610
1611 object = memory_object_control_to_vm_object(control);
1612 if (object == VM_OBJECT_NULL)
1613 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1614
1615 vm_object_lock(object);
1616
0b4e3aa0
A
1617 if(ops & UPL_POP_PHYSICAL) {
1618 if(object->phys_contiguous) {
1619 if (phys_entry) {
1620 *phys_entry = (vm_offset_t)
1621 object->shadow_offset;
1622 }
1623 vm_object_unlock(object);
1624 return KERN_SUCCESS;
1625 } else {
1626 vm_object_unlock(object);
1627 return KERN_INVALID_OBJECT;
1628 }
1629 }
1630
1c79356b 1631 while(TRUE) {
0b4e3aa0
A
1632 if(object->phys_contiguous) {
1633 vm_object_unlock(object);
1634 return KERN_INVALID_OBJECT;
1635 }
1636
1c79356b
A
1637 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
1638 vm_object_unlock(object);
1639 return KERN_FAILURE;
1640 }
1641
1642 /* Sync up on getting the busy bit */
1643 if((dst_page->busy || dst_page->cleaning) &&
0b4e3aa0
A
1644 (((ops & UPL_POP_SET) &&
1645 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
1c79356b
A
1646 /* someone else is playing with the page, we will */
1647 /* have to wait */
1648 PAGE_ASSERT_WAIT(dst_page, THREAD_UNINT);
1649 vm_object_unlock(object);
1650 thread_block((void(*)(void))0);
1651 vm_object_lock(object);
1652 continue;
1653 }
1654
1655 if (ops & UPL_POP_DUMP) {
1656 vm_page_lock_queues();
1657 vm_page_free(dst_page);
1658 vm_page_unlock_queues();
1659 break;
1660 }
1661
1662 if (flags) {
1663 *flags = 0;
1664
1665 /* Get the condition of flags before requested ops */
1666 /* are undertaken */
1667
1668 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
1669 if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
1670 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
1671 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
1672 if(dst_page->busy) *flags |= UPL_POP_BUSY;
1673 }
1674 if (phys_entry)
1675 *phys_entry = dst_page->phys_addr;
1676
1677 /* The caller should have made a call either contingent with */
1678 /* or prior to this call to set UPL_POP_BUSY */
1679 if(ops & UPL_POP_SET) {
1680 /* The protection granted with this assert will */
1681 /* not be complete. If the caller violates the */
1682 /* convention and attempts to change page state */
1683 /* without first setting busy we may not see it */
1684 /* because the page may already be busy. However */
1685 /* if such violations occur we will assert sooner */
1686 /* or later. */
1687 assert(dst_page->busy || (ops & UPL_POP_BUSY));
1688 if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
1689 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
1690 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
1691 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
1692 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
1693 }
1694
1695 if(ops & UPL_POP_CLR) {
1696 assert(dst_page->busy);
1697 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
1698 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
1699 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
1700 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
1701 if (ops & UPL_POP_BUSY) {
1702 dst_page->busy = FALSE;
1703 PAGE_WAKEUP(dst_page);
1704 }
1705 }
1706 break;
1707 }
1708
1709 vm_object_unlock(object);
1710 return KERN_SUCCESS;
1711
1712}
1713
0b4e3aa0
A
1714static zone_t mem_obj_control_zone;
1715
1716__private_extern__ void
1717memory_object_control_bootstrap(void)
1718{
1719 int i;
1720
1721 i = (vm_size_t) sizeof (struct memory_object_control);
1722 mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
1723 return;
1724}
1725
1726__private_extern__ memory_object_control_t
1727memory_object_control_allocate(
1728 vm_object_t object)
1729{
1730 memory_object_control_t control;
1731
1732 control = (memory_object_control_t)zalloc(mem_obj_control_zone);
1733 if (control != MEMORY_OBJECT_CONTROL_NULL)
1734 control->object = object;
1735 return (control);
1736}
1737
1738__private_extern__ void
1739memory_object_control_collapse(
1740 memory_object_control_t control,
1741 vm_object_t object)
1742{
1743 assert((control->object != VM_OBJECT_NULL) &&
1744 (control->object != object));
1745 control->object = object;
1746}
1747
1748__private_extern__ vm_object_t
1749memory_object_control_to_vm_object(
1750 memory_object_control_t control)
1751{
1752 if (control == MEMORY_OBJECT_CONTROL_NULL)
1753 return VM_OBJECT_NULL;
1754
1755 return (control->object);
1756}
1757
1758memory_object_control_t
1759convert_port_to_mo_control(
1760 mach_port_t port)
1761{
1762 return MEMORY_OBJECT_CONTROL_NULL;
1763}
1764
1765
1766mach_port_t
1767convert_mo_control_to_port(
1768 memory_object_control_t control)
1769{
1770 return MACH_PORT_NULL;
1771}
1772
1773void
1774memory_object_control_reference(
1775 memory_object_control_t control)
1776{
1777 return;
1778}
1779
1780/*
1781 * We only every issue one of these references, so kill it
1782 * when that gets released (should switch the real reference
1783 * counting in true port-less EMMI).
1784 */
1785void
1786memory_object_control_deallocate(
1787 memory_object_control_t control)
1788{
1789 zfree(mem_obj_control_zone, (vm_offset_t)control);
1790}
1791
1792void
1793memory_object_control_disable(
1794 memory_object_control_t control)
1795{
1796 assert(control->object != VM_OBJECT_NULL);
1797 control->object = VM_OBJECT_NULL;
1798}
1799
1800void
1801memory_object_default_reference(
1802 memory_object_default_t dmm)
1803{
1804 ipc_port_make_send(dmm);
1805}
1806
1807void
1808memory_object_default_deallocate(
1809 memory_object_default_t dmm)
1810{
1811 ipc_port_release_send(dmm);
1812}
1813
1814memory_object_t
1815convert_port_to_memory_object(
1816 mach_port_t port)
1817{
1818 return (MEMORY_OBJECT_NULL);
1819}
1820
1821
1822mach_port_t
1823convert_memory_object_to_port(
1824 memory_object_t object)
1825{
1826 return (MACH_PORT_NULL);
1827}
1828
1829#ifdef MACH_BSD
1830/* remove after component interface available */
1831extern int vnode_pager_workaround;
1832extern int device_pager_workaround;
1833#endif
1834
1835
1836/* Routine memory_object_reference */
1837void memory_object_reference(
1838 memory_object_t memory_object)
1839{
1840extern void dp_memory_object_reference(memory_object_t);
1841
1842#ifdef MACH_BSD
1843 extern void vnode_pager_reference(memory_object_t);
1844 extern void device_pager_reference(memory_object_t);
1845
1846 if(memory_object->pager == &vnode_pager_workaround) {
1847 vnode_pager_reference(memory_object);
1848 } else if(memory_object->pager == &device_pager_workaround) {
1849 device_pager_reference(memory_object);
1850 } else
1851#endif
1852 dp_memory_object_reference(memory_object);
1853}
1854
1855/* Routine memory_object_deallocate */
1856void memory_object_deallocate(
1857 memory_object_t memory_object)
1858{
1859extern void dp_memory_object_deallocate(memory_object_t);
1860
1861#ifdef MACH_BSD
1862 extern void vnode_pager_deallocate(memory_object_t);
1863 extern void device_pager_deallocate(memory_object_t);
1864
1865 if(memory_object->pager == &vnode_pager_workaround) {
1866 vnode_pager_deallocate(memory_object);
1867 } else if(memory_object->pager == &device_pager_workaround) {
1868 device_pager_deallocate(memory_object);
1869 } else
1870#endif
1871 dp_memory_object_deallocate(memory_object);
1872}
1873
1874
1875/* Routine memory_object_init */
1876kern_return_t memory_object_init
1877(
1878 memory_object_t memory_object,
1879 memory_object_control_t memory_control,
1880 vm_size_t memory_object_page_size
1881)
1882{
1883extern kern_return_t dp_memory_object_init(memory_object_t,
1884 memory_object_control_t,
1885 vm_size_t);
1886#ifdef MACH_BSD
1887extern kern_return_t vnode_pager_init(memory_object_t,
1888 memory_object_control_t,
1889 vm_size_t);
1890extern kern_return_t device_pager_init(memory_object_t,
1891 memory_object_control_t,
1892 vm_size_t);
1893
1894 if(memory_object->pager == &vnode_pager_workaround) {
1895 return vnode_pager_init(memory_object,
1896 memory_control,
1897 memory_object_page_size);
1898 } else if(memory_object->pager == &device_pager_workaround) {
1899 return device_pager_init(memory_object,
1900 memory_control,
1901 memory_object_page_size);
1902 } else
1903#endif
1904 return dp_memory_object_init(memory_object,
1905 memory_control,
1906 memory_object_page_size);
1907}
1908
1909/* Routine memory_object_terminate */
1910kern_return_t memory_object_terminate
1911(
1912 memory_object_t memory_object
1913)
1914{
1915extern kern_return_t dp_memory_object_terminate(memory_object_t);
1916
1917#ifdef MACH_BSD
1918extern kern_return_t vnode_pager_terminate(memory_object_t);
1919extern kern_return_t device_pager_terminate(memory_object_t);
1920
1921 if(memory_object->pager == &vnode_pager_workaround) {
1922 return vnode_pager_terminate(memory_object);
1923 } else if(memory_object->pager == &device_pager_workaround) {
1924 return device_pager_terminate(memory_object);
1925 } else
1926#endif
1927 return dp_memory_object_terminate(memory_object);
1928}
1929
1930/* Routine memory_object_data_request */
1931kern_return_t memory_object_data_request
1932(
1933 memory_object_t memory_object,
1934 memory_object_offset_t offset,
1935 vm_size_t length,
1936 vm_prot_t desired_access
1937)
1938{
1939extern kern_return_t dp_memory_object_data_request(memory_object_t,
1940 memory_object_offset_t, vm_size_t, vm_prot_t);
1941
1942#ifdef MACH_BSD
1943extern kern_return_t vnode_pager_data_request(memory_object_t,
1944 memory_object_offset_t, vm_size_t, vm_prot_t);
1945extern kern_return_t device_pager_data_request(memory_object_t,
1946 memory_object_offset_t, vm_size_t, vm_prot_t);
1947
1948 if (memory_object->pager == &vnode_pager_workaround) {
1949 return vnode_pager_data_request(memory_object,
1950 offset,
1951 length,
1952 desired_access);
1953 } else if (memory_object->pager == &device_pager_workaround) {
1954 return device_pager_data_request(memory_object,
1955 offset,
1956 length,
1957 desired_access);
1958 } else
1959#endif
1960 return dp_memory_object_data_request(memory_object,
1961 offset,
1962 length,
1963 desired_access);
1964}
1965
1966/* Routine memory_object_data_return */
1967kern_return_t memory_object_data_return
1968(
1969 memory_object_t memory_object,
1970 memory_object_offset_t offset,
1971 vm_size_t size,
1972 boolean_t dirty,
1973 boolean_t kernel_copy
1974)
1975{
1976 extern kern_return_t dp_memory_object_data_return(memory_object_t,
1977 memory_object_offset_t,
1978 vm_size_t,
1979 boolean_t,
1980 boolean_t);
1981#ifdef MACH_BSD
1982 extern kern_return_t vnode_pager_data_return(memory_object_t,
1983 memory_object_offset_t,
1984 vm_size_t,
1985 boolean_t,
1986 boolean_t);
1987 extern kern_return_t device_pager_data_return(memory_object_t,
1988 memory_object_offset_t,
1989 vm_size_t,
1990 boolean_t,
1991 boolean_t);
1992
1993 if (memory_object->pager == &vnode_pager_workaround) {
1994 return vnode_pager_data_return(memory_object,
1995 offset,
1996 size,
1997 dirty,
1998 kernel_copy);
1999 } else if (memory_object->pager == &device_pager_workaround) {
2000 return device_pager_data_return(memory_object,
2001 offset,
2002 size,
2003 dirty,
2004 kernel_copy);
2005 } else
2006#endif
2007 return dp_memory_object_data_return(memory_object,
2008 offset,
2009 size,
2010 dirty,
2011 kernel_copy);
2012}
2013
2014/* Routine memory_object_data_initialize */
2015kern_return_t memory_object_data_initialize
2016(
2017 memory_object_t memory_object,
2018 memory_object_offset_t offset,
2019 vm_size_t size
2020)
2021{
2022
2023 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
2024 memory_object_offset_t,
2025 vm_size_t);
2026#ifdef MACH_BSD
2027 extern kern_return_t vnode_pager_data_initialize(memory_object_t,
2028 memory_object_offset_t,
2029 vm_size_t);
2030 extern kern_return_t device_pager_data_initialize(memory_object_t,
2031 memory_object_offset_t,
2032 vm_size_t);
2033
2034 if (memory_object->pager == &vnode_pager_workaround) {
2035 return vnode_pager_data_initialize(memory_object,
2036 offset,
2037 size);
2038 } else if (memory_object->pager == &device_pager_workaround) {
2039 return device_pager_data_initialize(memory_object,
2040 offset,
2041 size);
2042 } else
2043#endif
2044 return dp_memory_object_data_initialize(memory_object,
2045 offset,
2046 size);
2047}
2048
2049/* Routine memory_object_data_unlock */
2050kern_return_t memory_object_data_unlock
2051(
2052 memory_object_t memory_object,
2053 memory_object_offset_t offset,
2054 vm_size_t size,
2055 vm_prot_t desired_access
2056)
2057{
2058 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
2059 memory_object_offset_t,
2060 vm_size_t,
2061 vm_prot_t);
2062#ifdef MACH_BSD
2063 extern kern_return_t vnode_pager_data_unlock(memory_object_t,
2064 memory_object_offset_t,
2065 vm_size_t,
2066 vm_prot_t);
2067 extern kern_return_t device_pager_data_unlock(memory_object_t,
2068 memory_object_offset_t,
2069 vm_size_t,
2070 vm_prot_t);
2071
2072 if (memory_object->pager == &vnode_pager_workaround) {
2073 return vnode_pager_data_unlock(memory_object,
2074 offset,
2075 size,
2076 desired_access);
2077 } else if (memory_object->pager == &device_pager_workaround) {
2078 return device_pager_data_unlock(memory_object,
2079 offset,
2080 size,
2081 desired_access);
2082 } else
2083#endif
2084 return dp_memory_object_data_unlock(memory_object,
2085 offset,
2086 size,
2087 desired_access);
2088
2089}
2090
2091/* Routine memory_object_synchronize */
2092kern_return_t memory_object_synchronize
2093(
2094 memory_object_t memory_object,
2095 memory_object_offset_t offset,
2096 vm_size_t size,
2097 vm_sync_t sync_flags
2098)
2099{
2100 extern kern_return_t dp_memory_object_data_synchronize(memory_object_t,
2101 memory_object_offset_t,
2102 vm_size_t,
2103 vm_sync_t);
2104#ifdef MACH_BSD
2105 extern kern_return_t vnode_pager_data_synchronize(memory_object_t,
2106 memory_object_offset_t,
2107 vm_size_t,
2108 vm_sync_t);
2109 extern kern_return_t device_pager_data_synchronize(memory_object_t,
2110 memory_object_offset_t,
2111 vm_size_t,
2112 vm_sync_t);
2113
2114 if (memory_object->pager == &vnode_pager_workaround) {
2115 return vnode_pager_synchronize(
2116 memory_object,
2117 offset,
2118 size,
2119 sync_flags);
2120 } else if (memory_object->pager == &device_pager_workaround) {
2121 return device_pager_synchronize(
2122 memory_object,
2123 offset,
2124 size,
2125 sync_flags);
2126 } else
2127#endif
2128 return dp_memory_object_synchronize(
2129 memory_object,
2130 offset,
2131 size,
2132 sync_flags);
2133}
2134
2135/* Routine memory_object_unmap */
2136kern_return_t memory_object_unmap
2137(
2138 memory_object_t memory_object
2139)
2140{
2141 extern kern_return_t dp_memory_object_unmap(memory_object_t);
2142#ifdef MACH_BSD
2143 extern kern_return_t vnode_pager_unmap(memory_object_t);
2144 extern kern_return_t device_pager_unmap(memory_object_t);
2145
2146 if (memory_object->pager == &vnode_pager_workaround) {
2147 return vnode_pager_unmap(memory_object);
2148 } else if (memory_object->pager == &device_pager_workaround) {
2149 return device_pager_unmap(memory_object);
2150 } else
2151#endif
2152 return dp_memory_object_unmap(memory_object);
2153}
2154
2155/* Routine memory_object_create */
2156kern_return_t memory_object_create
2157(
2158 memory_object_default_t default_memory_manager,
2159 vm_size_t new_memory_object_size,
2160 memory_object_t *new_memory_object
2161)
2162{
2163extern kern_return_t default_pager_memory_object_create(memory_object_default_t,
2164 vm_size_t,
2165 memory_object_t *);
2166
2167 return default_pager_memory_object_create(default_memory_manager,
2168 new_memory_object_size,
2169 new_memory_object);
2170}
1c79356b 2171