]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/memory_object.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / vm / memory_object.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53/*
54 */
55/*
56 * File: vm/memory_object.c
57 * Author: Michael Wayne Young
58 *
59 * External memory management interface control functions.
60 */
61
1c79356b
A
62#include <advisory_pageout.h>
63
64/*
65 * Interface dependencies:
66 */
67
68#include <mach/std_types.h> /* For pointer_t */
69#include <mach/mach_types.h>
70
0b4e3aa0 71#include <mach/mig.h>
1c79356b
A
72#include <mach/kern_return.h>
73#include <mach/memory_object.h>
74#include <mach/memory_object_default.h>
75#include <mach/memory_object_control_server.h>
0b4e3aa0 76#include <mach/host_priv_server.h>
1c79356b
A
77#include <mach/boolean.h>
78#include <mach/vm_prot.h>
79#include <mach/message.h>
80
1c79356b
A
81/*
82 * Implementation dependencies:
83 */
84#include <string.h> /* For memcpy() */
85
0b4e3aa0
A
86#include <kern/xpr.h>
87#include <kern/host.h>
88#include <kern/thread.h> /* For current_thread() */
89#include <kern/ipc_mig.h>
90#include <kern/misc_protos.h>
91
92#include <vm/vm_object.h>
93#include <vm/vm_fault.h>
1c79356b
A
94#include <vm/memory_object.h>
95#include <vm/vm_page.h>
96#include <vm/vm_pageout.h>
97#include <vm/pmap.h> /* For pmap_clear_modify */
1c79356b
A
98#include <vm/vm_kern.h> /* For kernel_map, vm_move */
99#include <vm/vm_map.h> /* For vm_map_pageable */
1c79356b
A
100
101#if MACH_PAGEMAP
102#include <vm/vm_external.h>
103#endif /* MACH_PAGEMAP */
104
105
0b4e3aa0
A
106memory_object_default_t memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
107vm_size_t memory_manager_default_cluster = 0;
108decl_mutex_data(, memory_manager_default_lock)
1c79356b
A
109
110/*
111 * Forward ref to file-local function:
112 */
113boolean_t
0b4e3aa0 114vm_object_update(vm_object_t, vm_object_offset_t,
1c79356b
A
115 vm_size_t, memory_object_return_t, int, vm_prot_t);
116
117
118/*
119 * Routine: memory_object_should_return_page
120 *
121 * Description:
122 * Determine whether the given page should be returned,
123 * based on the page's state and on the given return policy.
124 *
125 * We should return the page if one of the following is true:
126 *
127 * 1. Page is dirty and should_return is not RETURN_NONE.
128 * 2. Page is precious and should_return is RETURN_ALL.
129 * 3. Should_return is RETURN_ANYTHING.
130 *
131 * As a side effect, m->dirty will be made consistent
132 * with pmap_is_modified(m), if should_return is not
133 * MEMORY_OBJECT_RETURN_NONE.
134 */
135
136#define memory_object_should_return_page(m, should_return) \
137 (should_return != MEMORY_OBJECT_RETURN_NONE && \
de355530 138 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \
1c79356b
A
139 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
140 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
141
142typedef int memory_object_lock_result_t;
143
144#define MEMORY_OBJECT_LOCK_RESULT_DONE 0
145#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
146#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
147#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
148
149memory_object_lock_result_t memory_object_lock_page(
150 vm_page_t m,
151 memory_object_return_t should_return,
152 boolean_t should_flush,
153 vm_prot_t prot);
154
155/*
156 * Routine: memory_object_lock_page
157 *
158 * Description:
159 * Perform the appropriate lock operations on the
160 * given page. See the description of
161 * "memory_object_lock_request" for the meanings
162 * of the arguments.
163 *
164 * Returns an indication that the operation
165 * completed, blocked, or that the page must
166 * be cleaned.
167 */
168memory_object_lock_result_t
169memory_object_lock_page(
170 vm_page_t m,
171 memory_object_return_t should_return,
172 boolean_t should_flush,
173 vm_prot_t prot)
174{
175 XPR(XPR_MEMORY_OBJECT,
176 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
177 (integer_t)m, should_return, should_flush, prot, 0);
178
179 /*
180 * If we cannot change access to the page,
181 * either because a mapping is in progress
182 * (busy page) or because a mapping has been
183 * wired, then give up.
184 */
185
186 if (m->busy || m->cleaning)
187 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
188
189 /*
190 * Don't worry about pages for which the kernel
191 * does not have any data.
192 */
193
765c9de3
A
194 if (m->absent || m->error || m->restart) {
195 if(m->error && should_flush) {
196 /* dump the page, pager wants us to */
197 /* clean it up and there is no */
198 /* relevant data to return */
199 if(m->wire_count == 0) {
200 VM_PAGE_FREE(m);
201 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
202 }
203 } else {
204 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
205 }
206 }
1c79356b
A
207
208 assert(!m->fictitious);
209
210 if (m->wire_count != 0) {
211 /*
212 * If no change would take place
213 * anyway, return successfully.
214 *
215 * No change means:
216 * Not flushing AND
217 * No change to page lock [2 checks] AND
218 * Should not return page
219 *
220 * XXX This doesn't handle sending a copy of a wired
221 * XXX page to the pager, but that will require some
222 * XXX significant surgery.
223 */
224 if (!should_flush &&
225 (m->page_lock == prot || prot == VM_PROT_NO_CHANGE) &&
226 ! memory_object_should_return_page(m, should_return)) {
227
228 /*
229 * Restart page unlock requests,
230 * even though no change took place.
231 * [Memory managers may be expecting
232 * to see new requests.]
233 */
234 m->unlock_request = VM_PROT_NONE;
235 PAGE_WAKEUP(m);
236
237 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
238 }
239
240 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK);
241 }
242
243 /*
244 * If the page is to be flushed, allow
245 * that to be done as part of the protection.
246 */
247
248 if (should_flush)
249 prot = VM_PROT_ALL;
250
251 /*
252 * Set the page lock.
253 *
254 * If we are decreasing permission, do it now;
255 * let the fault handler take care of increases
256 * (pmap_page_protect may not increase protection).
257 */
258
259 if (prot != VM_PROT_NO_CHANGE) {
0b4e3aa0 260 if ((m->page_lock ^ prot) & prot) {
de355530 261 pmap_page_protect(m->phys_addr, VM_PROT_ALL & ~prot);
0b4e3aa0 262 }
1c79356b
A
263#if 0
264 /* code associated with the vestigial
265 * memory_object_data_unlock
266 */
1c79356b
A
267 m->page_lock = prot;
268 m->lock_supplied = TRUE;
269 if (prot != VM_PROT_NONE)
270 m->unusual = TRUE;
271 else
272 m->unusual = FALSE;
273
274 /*
275 * Restart any past unlock requests, even if no
276 * change resulted. If the manager explicitly
277 * requested no protection change, then it is assumed
278 * to be remembering past requests.
279 */
280
281 m->unlock_request = VM_PROT_NONE;
282#endif /* 0 */
283 PAGE_WAKEUP(m);
284 }
285
286 /*
287 * Handle page returning.
288 */
289
290 if (memory_object_should_return_page(m, should_return)) {
291
292 /*
293 * If we weren't planning
294 * to flush the page anyway,
295 * we may need to remove the
296 * page from the pageout
297 * system and from physical
298 * maps now.
299 */
300
301 vm_page_lock_queues();
302 VM_PAGE_QUEUES_REMOVE(m);
303 vm_page_unlock_queues();
304
305 if (!should_flush)
de355530 306 pmap_page_protect(m->phys_addr, VM_PROT_NONE);
1c79356b
A
307
308 if (m->dirty)
309 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN);
310 else
311 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN);
312 }
313
314 /*
315 * Handle flushing
316 */
317
318 if (should_flush) {
319 VM_PAGE_FREE(m);
320 } else {
321 extern boolean_t vm_page_deactivate_hint;
322
323 /*
324 * XXX Make clean but not flush a paging hint,
325 * and deactivate the pages. This is a hack
326 * because it overloads flush/clean with
327 * implementation-dependent meaning. This only
328 * happens to pages that are already clean.
329 */
330
331 if (vm_page_deactivate_hint &&
332 (should_return != MEMORY_OBJECT_RETURN_NONE)) {
333 vm_page_lock_queues();
334 vm_page_deactivate(m);
335 vm_page_unlock_queues();
336 }
337 }
338
339 return(MEMORY_OBJECT_LOCK_RESULT_DONE);
340}
0b4e3aa0 341
1c79356b
A
342#define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
343MACRO_BEGIN \
344 \
345 register int i; \
346 register vm_page_t hp; \
347 \
348 vm_object_unlock(object); \
349 \
1c79356b 350 (void) memory_object_data_return(object->pager, \
1c79356b 351 po, \
1c79356b
A
352 data_cnt, \
353 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
354 !should_flush); \
1c79356b
A
355 \
356 vm_object_lock(object); \
1c79356b
A
357MACRO_END
358
1c79356b
A
359/*
360 * Routine: memory_object_lock_request [user interface]
361 *
362 * Description:
363 * Control use of the data associated with the given
364 * memory object. For each page in the given range,
365 * perform the following operations, in order:
366 * 1) restrict access to the page (disallow
367 * forms specified by "prot");
368 * 2) return data to the manager (if "should_return"
369 * is RETURN_DIRTY and the page is dirty, or
370 * "should_return" is RETURN_ALL and the page
371 * is either dirty or precious); and,
372 * 3) flush the cached copy (if "should_flush"
373 * is asserted).
374 * The set of pages is defined by a starting offset
375 * ("offset") and size ("size"). Only pages with the
376 * same page alignment as the starting offset are
377 * considered.
378 *
379 * A single acknowledgement is sent (to the "reply_to"
380 * port) when these actions are complete. If successful,
381 * the naked send right for reply_to is consumed.
382 */
383
384kern_return_t
385memory_object_lock_request(
0b4e3aa0
A
386 memory_object_control_t control,
387 memory_object_offset_t offset,
388 memory_object_size_t size,
1c79356b
A
389 memory_object_return_t should_return,
390 int flags,
0b4e3aa0 391 vm_prot_t prot)
1c79356b 392{
0b4e3aa0 393 vm_object_t object;
1c79356b
A
394 vm_object_offset_t original_offset = offset;
395 boolean_t should_flush=flags & MEMORY_OBJECT_DATA_FLUSH;
396
397 XPR(XPR_MEMORY_OBJECT,
0b4e3aa0
A
398 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
399 (integer_t)control, offset, size,
1c79356b
A
400 (((should_return&1)<<1)|should_flush), prot);
401
402 /*
403 * Check for bogus arguments.
404 */
0b4e3aa0 405 object = memory_object_control_to_vm_object(control);
1c79356b
A
406 if (object == VM_OBJECT_NULL)
407 return (KERN_INVALID_ARGUMENT);
408
0b4e3aa0 409 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE)
1c79356b 410 return (KERN_INVALID_ARGUMENT);
1c79356b 411
de355530 412 size = round_page(size);
1c79356b
A
413
414 /*
415 * Lock the object, and acquire a paging reference to
0b4e3aa0 416 * prevent the memory_object reference from being released.
1c79356b 417 */
1c79356b
A
418 vm_object_lock(object);
419 vm_object_paging_begin(object);
420 offset -= object->paging_offset;
421
0b4e3aa0 422 (void)vm_object_update(object,
1c79356b
A
423 offset, size, should_return, flags, prot);
424
1c79356b
A
425 vm_object_paging_end(object);
426 vm_object_unlock(object);
1c79356b
A
427
428 return (KERN_SUCCESS);
429}
430
431/*
0b4e3aa0
A
432 * memory_object_release_name: [interface]
433 *
434 * Enforces name semantic on memory_object reference count decrement
435 * This routine should not be called unless the caller holds a name
436 * reference gained through the memory_object_named_create or the
437 * memory_object_rename call.
438 * If the TERMINATE_IDLE flag is set, the call will return if the
439 * reference count is not 1. i.e. idle with the only remaining reference
440 * being the name.
441 * If the decision is made to proceed the name field flag is set to
442 * false and the reference count is decremented. If the RESPECT_CACHE
443 * flag is set and the reference count has gone to zero, the
444 * memory_object is checked to see if it is cacheable otherwise when
445 * the reference count is zero, it is simply terminated.
446 */
447
448kern_return_t
449memory_object_release_name(
450 memory_object_control_t control,
451 int flags)
452{
453 vm_object_t object;
454
455 object = memory_object_control_to_vm_object(control);
456 if (object == VM_OBJECT_NULL)
457 return (KERN_INVALID_ARGUMENT);
458
459 return vm_object_release_name(object, flags);
460}
461
462
463
464/*
465 * Routine: memory_object_destroy [user interface]
466 * Purpose:
467 * Shut down a memory object, despite the
468 * presence of address map (or other) references
469 * to the vm_object.
470 */
471kern_return_t
472memory_object_destroy(
473 memory_object_control_t control,
474 kern_return_t reason)
475{
476 vm_object_t object;
477
478 object = memory_object_control_to_vm_object(control);
479 if (object == VM_OBJECT_NULL)
480 return (KERN_INVALID_ARGUMENT);
481
482 return (vm_object_destroy(object, reason));
483}
484
485/*
486 * Routine: vm_object_sync
1c79356b
A
487 *
488 * Kernel internal function to synch out pages in a given
489 * range within an object to its memory manager. Much the
490 * same as memory_object_lock_request but page protection
491 * is not changed.
492 *
493 * If the should_flush and should_return flags are true pages
494 * are flushed, that is dirty & precious pages are written to
495 * the memory manager and then discarded. If should_return
496 * is false, only precious pages are returned to the memory
497 * manager.
498 *
499 * If should flush is false and should_return true, the memory
500 * manager's copy of the pages is updated. If should_return
501 * is also false, only the precious pages are updated. This
502 * last option is of limited utility.
503 *
504 * Returns:
505 * FALSE if no pages were returned to the pager
506 * TRUE otherwise.
507 */
508
509boolean_t
0b4e3aa0 510vm_object_sync(
1c79356b
A
511 vm_object_t object,
512 vm_object_offset_t offset,
0b4e3aa0 513 vm_size_t size,
1c79356b
A
514 boolean_t should_flush,
515 boolean_t should_return)
516{
517 boolean_t rv;
518
0b4e3aa0
A
519 XPR(XPR_VM_OBJECT,
520 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
1c79356b
A
521 (integer_t)object, offset, size, should_flush, should_return);
522
523 /*
524 * Lock the object, and acquire a paging reference to
525 * prevent the memory_object and control ports from
526 * being destroyed.
527 */
528 vm_object_lock(object);
529 vm_object_paging_begin(object);
530
0b4e3aa0 531 rv = vm_object_update(object, offset, size,
1c79356b
A
532 (should_return) ?
533 MEMORY_OBJECT_RETURN_ALL :
534 MEMORY_OBJECT_RETURN_NONE,
535 (should_flush) ?
536 MEMORY_OBJECT_DATA_FLUSH : 0,
537 VM_PROT_NO_CHANGE);
538
539
540 vm_object_paging_end(object);
541 vm_object_unlock(object);
542 return rv;
543}
544
545/*
0b4e3aa0 546 * Routine: vm_object_update
1c79356b 547 * Description:
0b4e3aa0 548 * Work function for m_o_lock_request(), vm_o_sync().
1c79356b
A
549 *
550 * Called with object locked and paging ref taken.
551 */
552kern_return_t
0b4e3aa0 553vm_object_update(
1c79356b
A
554 register vm_object_t object,
555 register vm_object_offset_t offset,
556 register vm_size_t size,
557 memory_object_return_t should_return,
558 int flags,
559 vm_prot_t prot)
560{
561 register vm_page_t m;
562 vm_page_t holding_page;
563 vm_size_t original_size = size;
564 vm_object_offset_t paging_offset = 0;
565 vm_object_t copy_object;
566 vm_size_t data_cnt = 0;
567 vm_object_offset_t last_offset = offset;
568 memory_object_lock_result_t page_lock_result;
569 memory_object_lock_result_t pageout_action;
570 boolean_t data_returned = FALSE;
571 boolean_t update_cow;
572 boolean_t should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
1c79356b 573 boolean_t pending_pageout = FALSE;
1c79356b
A
574
575 /*
576 * To avoid blocking while scanning for pages, save
577 * dirty pages to be cleaned all at once.
578 *
579 * XXXO A similar strategy could be used to limit the
580 * number of times that a scan must be restarted for
581 * other reasons. Those pages that would require blocking
582 * could be temporarily collected in another list, or
583 * their offsets could be recorded in a small array.
584 */
585
586 /*
587 * XXX NOTE: May want to consider converting this to a page list
588 * XXX vm_map_copy interface. Need to understand object
589 * XXX coalescing implications before doing so.
590 */
591
592 update_cow = ((flags & MEMORY_OBJECT_DATA_FLUSH)
593 && (!(flags & MEMORY_OBJECT_DATA_NO_CHANGE) &&
594 !(flags & MEMORY_OBJECT_DATA_PURGE)))
595 || (flags & MEMORY_OBJECT_COPY_SYNC);
596
597
598 if((((copy_object = object->copy) != NULL) && update_cow) ||
599 (flags & MEMORY_OBJECT_DATA_SYNC)) {
600 vm_size_t i;
601 vm_size_t copy_size;
602 vm_object_offset_t copy_offset;
603 vm_prot_t prot;
604 vm_page_t page;
605 vm_page_t top_page;
606 kern_return_t error = 0;
607
608 if(copy_object != NULL) {
609 /* translate offset with respect to shadow's offset */
610 copy_offset = (offset >= copy_object->shadow_offset)?
611 offset - copy_object->shadow_offset :
612 (vm_object_offset_t) 0;
613 if(copy_offset > copy_object->size)
614 copy_offset = copy_object->size;
615
616 /* clip size with respect to shadow offset */
617 copy_size = (offset >= copy_object->shadow_offset) ?
618 size : size - (copy_object->shadow_offset - offset);
619
620 if(copy_size <= 0) {
621 copy_size = 0;
622 } else {
623 copy_size = ((copy_offset + copy_size)
624 <= copy_object->size) ?
625 copy_size : copy_object->size - copy_offset;
626 }
627 /* check for a copy_offset which is beyond the end of */
628 /* the copy_object */
629 if(copy_size < 0)
630 copy_size = 0;
631
632 copy_size+=offset;
633
634 vm_object_unlock(object);
635 vm_object_lock(copy_object);
636 } else {
637 copy_object = object;
638
639 copy_size = offset + size;
640 copy_offset = offset;
641 }
642
643 vm_object_paging_begin(copy_object);
644 for (i=copy_offset; i<copy_size; i+=PAGE_SIZE) {
645 RETRY_COW_OF_LOCK_REQUEST:
646 prot = VM_PROT_WRITE|VM_PROT_READ;
647 switch (vm_fault_page(copy_object, i,
648 VM_PROT_WRITE|VM_PROT_READ,
649 FALSE,
650 THREAD_UNINT,
651 copy_offset,
652 copy_offset+copy_size,
653 VM_BEHAVIOR_SEQUENTIAL,
654 &prot,
655 &page,
656 &top_page,
657 (int *)0,
658 &error,
659 FALSE,
0b4e3aa0 660 FALSE, NULL, 0)) {
1c79356b
A
661
662 case VM_FAULT_SUCCESS:
663 if(top_page) {
664 vm_fault_cleanup(
665 page->object, top_page);
666 PAGE_WAKEUP_DONE(page);
667 vm_page_lock_queues();
668 if (!page->active && !page->inactive)
669 vm_page_activate(page);
670 vm_page_unlock_queues();
671 vm_object_lock(copy_object);
672 vm_object_paging_begin(copy_object);
673 } else {
674 PAGE_WAKEUP_DONE(page);
675 vm_page_lock_queues();
676 if (!page->active && !page->inactive)
677 vm_page_activate(page);
678 vm_page_unlock_queues();
679 }
680 break;
681 case VM_FAULT_RETRY:
682 prot = VM_PROT_WRITE|VM_PROT_READ;
683 vm_object_lock(copy_object);
684 vm_object_paging_begin(copy_object);
685 goto RETRY_COW_OF_LOCK_REQUEST;
686 case VM_FAULT_INTERRUPTED:
687 prot = VM_PROT_WRITE|VM_PROT_READ;
688 vm_object_lock(copy_object);
689 vm_object_paging_begin(copy_object);
690 goto RETRY_COW_OF_LOCK_REQUEST;
691 case VM_FAULT_MEMORY_SHORTAGE:
692 VM_PAGE_WAIT();
693 prot = VM_PROT_WRITE|VM_PROT_READ;
694 vm_object_lock(copy_object);
695 vm_object_paging_begin(copy_object);
696 goto RETRY_COW_OF_LOCK_REQUEST;
697 case VM_FAULT_FICTITIOUS_SHORTAGE:
698 vm_page_more_fictitious();
699 prot = VM_PROT_WRITE|VM_PROT_READ;
700 vm_object_lock(copy_object);
701 vm_object_paging_begin(copy_object);
702 goto RETRY_COW_OF_LOCK_REQUEST;
703 case VM_FAULT_MEMORY_ERROR:
704 vm_object_lock(object);
705 goto BYPASS_COW_COPYIN;
706 }
707
708 }
709 vm_object_paging_end(copy_object);
710 if(copy_object != object) {
711 vm_object_unlock(copy_object);
712 vm_object_lock(object);
713 }
714 }
715 if((flags & (MEMORY_OBJECT_DATA_SYNC | MEMORY_OBJECT_COPY_SYNC))) {
716 return KERN_SUCCESS;
717 }
718 if(((copy_object = object->copy) != NULL) &&
719 (flags & MEMORY_OBJECT_DATA_PURGE)) {
720 copy_object->shadow_severed = TRUE;
721 copy_object->shadowed = FALSE;
722 copy_object->shadow = NULL;
723 /* delete the ref the COW was holding on the target object */
724 vm_object_deallocate(object);
725 }
726BYPASS_COW_COPYIN:
727
728 for (;
729 size != 0;
730 size -= PAGE_SIZE, offset += PAGE_SIZE_64)
731 {
732 /*
733 * Limit the number of pages to be cleaned at once.
734 */
735 if (pending_pageout &&
736 data_cnt >= PAGE_SIZE * DATA_WRITE_MAX)
737 {
738 LIST_REQ_PAGEOUT_PAGES(object, data_cnt,
739 pageout_action, paging_offset);
740 data_cnt = 0;
741 pending_pageout = FALSE;
742 }
743
744 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
745 page_lock_result = memory_object_lock_page(m, should_return,
746 should_flush, prot);
747
748 XPR(XPR_MEMORY_OBJECT,
749 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
750 (integer_t)object, offset, page_lock_result, 0, 0);
751
752 switch (page_lock_result)
753 {
754 case MEMORY_OBJECT_LOCK_RESULT_DONE:
755 /*
756 * End of a cluster of dirty pages.
757 */
758 if(pending_pageout) {
759 LIST_REQ_PAGEOUT_PAGES(object,
760 data_cnt, pageout_action,
761 paging_offset);
762 data_cnt = 0;
763 pending_pageout = FALSE;
764 continue;
765 }
766 break;
767
768 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK:
769 /*
770 * Since it is necessary to block,
771 * clean any dirty pages now.
772 */
773 if(pending_pageout) {
774 LIST_REQ_PAGEOUT_PAGES(object,
775 data_cnt, pageout_action,
776 paging_offset);
777 pending_pageout = FALSE;
778 data_cnt = 0;
779 continue;
780 }
781
9bccf70c 782 PAGE_SLEEP(object, m, THREAD_UNINT);
1c79356b
A
783 continue;
784
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN:
786 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN:
787 /*
788 * The clean and return cases are similar.
789 *
790 */
791
792 /*
793 * if this would form a discontiguous block,
794 * clean the old pages and start anew.
795 *
796 */
797
798 /*
799 * Mark the page busy since we unlock the
800 * object below.
801 */
802 m->busy = TRUE;
803 if (pending_pageout &&
804 (last_offset != offset ||
805 pageout_action != page_lock_result)) {
806 LIST_REQ_PAGEOUT_PAGES(object,
807 data_cnt, pageout_action,
808 paging_offset);
809 pending_pageout = FALSE;
810 data_cnt = 0;
811 }
812 m->busy = FALSE;
813 holding_page = VM_PAGE_NULL;
814 if(m->cleaning) {
9bccf70c 815 PAGE_SLEEP(object, m, THREAD_UNINT);
1c79356b
A
816 continue;
817 }
818 if(!pending_pageout) {
819 pending_pageout = TRUE;
820 pageout_action = page_lock_result;
821 paging_offset = offset;
822 }
823 if (should_flush) {
824 vm_page_lock_queues();
825 m->list_req_pending = TRUE;
826 m->cleaning = TRUE;
827 m->busy = TRUE;
828 m->pageout = TRUE;
829 vm_page_wire(m);
830 vm_page_unlock_queues();
831 } else {
832 /*
833 * Clean but do not flush
834 */
835 vm_page_lock_queues();
836 m->list_req_pending = TRUE;
837 m->cleaning = TRUE;
838 vm_page_unlock_queues();
839
840 }
841 vm_object_unlock(object);
842
843
844 data_cnt += PAGE_SIZE;
845 last_offset = offset + PAGE_SIZE_64;
846 data_returned = TRUE;
847
848 vm_object_lock(object);
849 break;
850 }
851 break;
852 }
853 }
854
855 /*
856 * We have completed the scan for applicable pages.
857 * Clean any pages that have been saved.
858 */
1c79356b
A
859 if (pending_pageout) {
860 LIST_REQ_PAGEOUT_PAGES(object,
861 data_cnt, pageout_action, paging_offset);
862 }
1c79356b
A
863 return (data_returned);
864}
865
866/*
867 * Routine: memory_object_synchronize_completed [user interface]
868 *
869 * Tell kernel that previously synchronized data
870 * (memory_object_synchronize) has been queue or placed on the
871 * backing storage.
872 *
873 * Note: there may be multiple synchronize requests for a given
874 * memory object outstanding but they will not overlap.
875 */
876
877kern_return_t
878memory_object_synchronize_completed(
0b4e3aa0
A
879 memory_object_control_t control,
880 memory_object_offset_t offset,
881 vm_offset_t length)
1c79356b 882{
0b4e3aa0
A
883 vm_object_t object;
884 msync_req_t msr;
1c79356b
A
885
886 XPR(XPR_MEMORY_OBJECT,
887 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
888 (integer_t)object, offset, length, 0, 0);
889
890 /*
891 * Look for bogus arguments
892 */
893
0b4e3aa0
A
894 object = memory_object_control_to_vm_object(control);
895 if (object == VM_OBJECT_NULL)
896 return (KERN_INVALID_ARGUMENT);
1c79356b
A
897
898 vm_object_lock(object);
899
900/*
901 * search for sync request structure
902 */
903 queue_iterate(&object->msr_q, msr, msync_req_t, msr_q) {
904 if (msr->offset == offset && msr->length == length) {
905 queue_remove(&object->msr_q, msr, msync_req_t, msr_q);
906 break;
907 }
908 }/* queue_iterate */
909
910 if (queue_end(&object->msr_q, (queue_entry_t)msr)) {
911 vm_object_unlock(object);
1c79356b
A
912 return KERN_INVALID_ARGUMENT;
913 }
914
915 msr_lock(msr);
916 vm_object_unlock(object);
917 msr->flag = VM_MSYNC_DONE;
918 msr_unlock(msr);
919 thread_wakeup((event_t) msr);
1c79356b
A
920
921 return KERN_SUCCESS;
922}/* memory_object_synchronize_completed */
0b4e3aa0
A
923
924static kern_return_t
925vm_object_set_attributes_common(
1c79356b
A
926 vm_object_t object,
927 boolean_t may_cache,
928 memory_object_copy_strategy_t copy_strategy,
929 boolean_t temporary,
930 vm_size_t cluster_size,
931 boolean_t silent_overwrite,
932 boolean_t advisory_pageout)
933{
934 boolean_t object_became_ready;
935
936 XPR(XPR_MEMORY_OBJECT,
937 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
938 (integer_t)object, (may_cache&1)|((temporary&1)<1), copy_strategy, 0, 0);
939
940 if (object == VM_OBJECT_NULL)
941 return(KERN_INVALID_ARGUMENT);
942
943 /*
944 * Verify the attributes of importance
945 */
946
947 switch(copy_strategy) {
948 case MEMORY_OBJECT_COPY_NONE:
949 case MEMORY_OBJECT_COPY_DELAY:
950 break;
951 default:
1c79356b
A
952 return(KERN_INVALID_ARGUMENT);
953 }
954
955#if !ADVISORY_PAGEOUT
0b4e3aa0 956 if (silent_overwrite || advisory_pageout)
1c79356b 957 return(KERN_INVALID_ARGUMENT);
0b4e3aa0 958
1c79356b
A
959#endif /* !ADVISORY_PAGEOUT */
960 if (may_cache)
961 may_cache = TRUE;
962 if (temporary)
963 temporary = TRUE;
964 if (cluster_size != 0) {
965 int pages_per_cluster;
de355530 966 pages_per_cluster = atop(cluster_size);
1c79356b
A
967 /*
968 * Cluster size must be integral multiple of page size,
969 * and be a power of 2 number of pages.
970 */
971 if ((cluster_size & (PAGE_SIZE-1)) ||
0b4e3aa0 972 ((pages_per_cluster-1) & pages_per_cluster))
1c79356b 973 return KERN_INVALID_ARGUMENT;
1c79356b
A
974 }
975
976 vm_object_lock(object);
977
978 /*
979 * Copy the attributes
980 */
981 assert(!object->internal);
982 object_became_ready = !object->pager_ready;
983 object->copy_strategy = copy_strategy;
984 object->can_persist = may_cache;
985 object->temporary = temporary;
986 object->silent_overwrite = silent_overwrite;
987 object->advisory_pageout = advisory_pageout;
988 if (cluster_size == 0)
989 cluster_size = PAGE_SIZE;
990 object->cluster_size = cluster_size;
991
992 assert(cluster_size >= PAGE_SIZE &&
993 cluster_size % PAGE_SIZE == 0);
994
995 /*
996 * Wake up anyone waiting for the ready attribute
997 * to become asserted.
998 */
999
1000 if (object_became_ready) {
1001 object->pager_ready = TRUE;
1002 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
1003 }
1004
1005 vm_object_unlock(object);
1006
1c79356b
A
1007 return(KERN_SUCCESS);
1008}
1009
1010/*
1011 * Set the memory object attribute as provided.
1012 *
1013 * XXX This routine cannot be completed until the vm_msync, clean
1014 * in place, and cluster work is completed. See ifdef notyet
0b4e3aa0 1015 * below and note that vm_object_set_attributes_common()
1c79356b
A
1016 * may have to be expanded.
1017 */
1018kern_return_t
1019memory_object_change_attributes(
0b4e3aa0
A
1020 memory_object_control_t control,
1021 memory_object_flavor_t flavor,
1022 memory_object_info_t attributes,
1023 mach_msg_type_number_t count)
1c79356b 1024{
0b4e3aa0
A
1025 vm_object_t object;
1026 kern_return_t result = KERN_SUCCESS;
1027 boolean_t temporary;
1028 boolean_t may_cache;
1029 boolean_t invalidate;
1c79356b
A
1030 vm_size_t cluster_size;
1031 memory_object_copy_strategy_t copy_strategy;
0b4e3aa0 1032 boolean_t silent_overwrite;
1c79356b
A
1033 boolean_t advisory_pageout;
1034
0b4e3aa0 1035 object = memory_object_control_to_vm_object(control);
1c79356b 1036 if (object == VM_OBJECT_NULL)
0b4e3aa0 1037 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1038
1039 vm_object_lock(object);
0b4e3aa0 1040
1c79356b
A
1041 temporary = object->temporary;
1042 may_cache = object->can_persist;
1043 copy_strategy = object->copy_strategy;
1044 silent_overwrite = object->silent_overwrite;
1045 advisory_pageout = object->advisory_pageout;
1046#if notyet
1047 invalidate = object->invalidate;
1048#endif
1049 cluster_size = object->cluster_size;
1050 vm_object_unlock(object);
1051
1052 switch (flavor) {
1053 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1054 {
1055 old_memory_object_behave_info_t behave;
1056
1057 if (count != OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1058 result = KERN_INVALID_ARGUMENT;
1059 break;
1060 }
1061
1062 behave = (old_memory_object_behave_info_t) attributes;
1063
1064 temporary = behave->temporary;
1065 invalidate = behave->invalidate;
1066 copy_strategy = behave->copy_strategy;
1067
1068 break;
1069 }
1070
1071 case MEMORY_OBJECT_BEHAVIOR_INFO:
1072 {
1073 memory_object_behave_info_t behave;
1074
1075 if (count != MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1076 result = KERN_INVALID_ARGUMENT;
1077 break;
1078 }
1079
1080 behave = (memory_object_behave_info_t) attributes;
1081
1082 temporary = behave->temporary;
1083 invalidate = behave->invalidate;
1084 copy_strategy = behave->copy_strategy;
1085 silent_overwrite = behave->silent_overwrite;
1086 advisory_pageout = behave->advisory_pageout;
1087 break;
1088 }
1089
1090 case MEMORY_OBJECT_PERFORMANCE_INFO:
1091 {
1092 memory_object_perf_info_t perf;
1093
1094 if (count != MEMORY_OBJECT_PERF_INFO_COUNT) {
1095 result = KERN_INVALID_ARGUMENT;
1096 break;
1097 }
1098
1099 perf = (memory_object_perf_info_t) attributes;
1100
1101 may_cache = perf->may_cache;
de355530 1102 cluster_size = round_page(perf->cluster_size);
1c79356b
A
1103
1104 break;
1105 }
1106
1107 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1108 {
1109 old_memory_object_attr_info_t attr;
1110
1111 if (count != OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1112 result = KERN_INVALID_ARGUMENT;
1113 break;
1114 }
1115
1116 attr = (old_memory_object_attr_info_t) attributes;
1117
1118 may_cache = attr->may_cache;
1119 copy_strategy = attr->copy_strategy;
1120 cluster_size = page_size;
1121
1122 break;
1123 }
1124
1125 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1126 {
1127 memory_object_attr_info_t attr;
1128
1129 if (count != MEMORY_OBJECT_ATTR_INFO_COUNT) {
1130 result = KERN_INVALID_ARGUMENT;
1131 break;
1132 }
1133
1134 attr = (memory_object_attr_info_t) attributes;
1135
1136 copy_strategy = attr->copy_strategy;
1137 may_cache = attr->may_cache_object;
1138 cluster_size = attr->cluster_size;
1139 temporary = attr->temporary;
1140
1141 break;
1142 }
1143
1144 default:
1145 result = KERN_INVALID_ARGUMENT;
1146 break;
1147 }
1148
0b4e3aa0 1149 if (result != KERN_SUCCESS)
1c79356b 1150 return(result);
1c79356b
A
1151
1152 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY) {
1153 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
1154 temporary = TRUE;
1155 } else {
1156 temporary = FALSE;
1157 }
1158
1159 /*
1c79356b
A
1160 * XXX may_cache may become a tri-valued variable to handle
1161 * XXX uncache if not in use.
1162 */
0b4e3aa0 1163 return (vm_object_set_attributes_common(object,
1c79356b
A
1164 may_cache,
1165 copy_strategy,
1166 temporary,
1167 cluster_size,
1168 silent_overwrite,
0b4e3aa0 1169 advisory_pageout));
1c79356b
A
1170}
1171
1172kern_return_t
1173memory_object_get_attributes(
0b4e3aa0 1174 memory_object_control_t control,
1c79356b
A
1175 memory_object_flavor_t flavor,
1176 memory_object_info_t attributes, /* pointer to OUT array */
1177 mach_msg_type_number_t *count) /* IN/OUT */
1178{
0b4e3aa0
A
1179 kern_return_t ret = KERN_SUCCESS;
1180 vm_object_t object;
1c79356b 1181
0b4e3aa0
A
1182 object = memory_object_control_to_vm_object(control);
1183 if (object == VM_OBJECT_NULL)
1184 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1185
1186 vm_object_lock(object);
1187
1188 switch (flavor) {
1189 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO:
1190 {
1191 old_memory_object_behave_info_t behave;
1192
1193 if (*count < OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1194 ret = KERN_INVALID_ARGUMENT;
1195 break;
1196 }
1197
1198 behave = (old_memory_object_behave_info_t) attributes;
1199 behave->copy_strategy = object->copy_strategy;
1200 behave->temporary = object->temporary;
1201#if notyet /* remove when vm_msync complies and clean in place fini */
1202 behave->invalidate = object->invalidate;
1203#else
1204 behave->invalidate = FALSE;
1205#endif
1206
1207 *count = OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1208 break;
1209 }
1210
1211 case MEMORY_OBJECT_BEHAVIOR_INFO:
1212 {
1213 memory_object_behave_info_t behave;
1214
1215 if (*count < MEMORY_OBJECT_BEHAVE_INFO_COUNT) {
1216 ret = KERN_INVALID_ARGUMENT;
1217 break;
1218 }
1219
1220 behave = (memory_object_behave_info_t) attributes;
1221 behave->copy_strategy = object->copy_strategy;
1222 behave->temporary = object->temporary;
1223#if notyet /* remove when vm_msync complies and clean in place fini */
1224 behave->invalidate = object->invalidate;
1225#else
1226 behave->invalidate = FALSE;
1227#endif
1228 behave->advisory_pageout = object->advisory_pageout;
1229 behave->silent_overwrite = object->silent_overwrite;
1230 *count = MEMORY_OBJECT_BEHAVE_INFO_COUNT;
1231 break;
1232 }
1233
1234 case MEMORY_OBJECT_PERFORMANCE_INFO:
1235 {
1236 memory_object_perf_info_t perf;
1237
1238 if (*count < MEMORY_OBJECT_PERF_INFO_COUNT) {
1239 ret = KERN_INVALID_ARGUMENT;
1240 break;
1241 }
1242
1243 perf = (memory_object_perf_info_t) attributes;
1244 perf->cluster_size = object->cluster_size;
1245 perf->may_cache = object->can_persist;
1246
1247 *count = MEMORY_OBJECT_PERF_INFO_COUNT;
1248 break;
1249 }
1250
1251 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO:
1252 {
1253 old_memory_object_attr_info_t attr;
1254
1255 if (*count < OLD_MEMORY_OBJECT_ATTR_INFO_COUNT) {
1256 ret = KERN_INVALID_ARGUMENT;
1257 break;
1258 }
1259
1260 attr = (old_memory_object_attr_info_t) attributes;
1261 attr->may_cache = object->can_persist;
1262 attr->copy_strategy = object->copy_strategy;
1263
1264 *count = OLD_MEMORY_OBJECT_ATTR_INFO_COUNT;
1265 break;
1266 }
1267
1268 case MEMORY_OBJECT_ATTRIBUTE_INFO:
1269 {
1270 memory_object_attr_info_t attr;
1271
1272 if (*count < MEMORY_OBJECT_ATTR_INFO_COUNT) {
1273 ret = KERN_INVALID_ARGUMENT;
1274 break;
1275 }
1276
1277 attr = (memory_object_attr_info_t) attributes;
1278 attr->copy_strategy = object->copy_strategy;
1279 attr->cluster_size = object->cluster_size;
1280 attr->may_cache_object = object->can_persist;
1281 attr->temporary = object->temporary;
1282
1283 *count = MEMORY_OBJECT_ATTR_INFO_COUNT;
1284 break;
1285 }
1286
1287 default:
1288 ret = KERN_INVALID_ARGUMENT;
1289 break;
1290 }
1291
1292 vm_object_unlock(object);
1293
1c79356b
A
1294 return(ret);
1295}
1296
1c79356b 1297
0b4e3aa0
A
1298/*
1299 * Routine: memory_object_upl_request [interface]
1300 * Purpose:
1301 * Cause the population of a portion of a vm_object.
1302 * Depending on the nature of the request, the pages
1303 * returned may be contain valid data or be uninitialized.
1304 *
1305 */
1c79356b 1306
0b4e3aa0
A
1307kern_return_t
1308memory_object_upl_request(
1309 memory_object_control_t control,
1310 memory_object_offset_t offset,
1311 vm_size_t size,
1312 upl_t *upl_ptr,
1313 upl_page_info_array_t user_page_list,
1314 unsigned int *page_list_count,
1315 int cntrl_flags)
1316{
1317 vm_object_t object;
1318
1319 object = memory_object_control_to_vm_object(control);
1320 if (object == VM_OBJECT_NULL)
1321 return (KERN_INVALID_ARGUMENT);
1322
1323 return vm_object_upl_request(object,
1324 offset,
1325 size,
1326 upl_ptr,
1327 user_page_list,
1328 page_list_count,
1329 cntrl_flags);
1330}
1331
1332/*
1333 * Routine: memory_object_super_upl_request [interface]
1334 * Purpose:
1335 * Cause the population of a portion of a vm_object
1336 * in much the same way as memory_object_upl_request.
1337 * Depending on the nature of the request, the pages
1338 * returned may be contain valid data or be uninitialized.
1339 * However, the region may be expanded up to the super
1340 * cluster size provided.
1c79356b 1341 */
0b4e3aa0 1342
1c79356b 1343kern_return_t
0b4e3aa0
A
1344memory_object_super_upl_request(
1345 memory_object_control_t control,
1346 memory_object_offset_t offset,
1347 vm_size_t size,
1348 vm_size_t super_cluster,
1349 upl_t *upl,
1350 upl_page_info_t *user_page_list,
1351 unsigned int *page_list_count,
1352 int cntrl_flags)
1c79356b 1353{
0b4e3aa0
A
1354 vm_object_t object;
1355
1356 object = memory_object_control_to_vm_object(control);
1357 if (object == VM_OBJECT_NULL)
1358 return (KERN_INVALID_ARGUMENT);
1359
1360 return vm_object_super_upl_request(object,
1361 offset,
1362 size,
1363 super_cluster,
1364 upl,
1365 user_page_list,
1366 page_list_count,
1367 cntrl_flags);
1c79356b
A
1368}
1369
0b4e3aa0
A
1370int vm_stat_discard_cleared_reply = 0;
1371int vm_stat_discard_cleared_unset = 0;
1372int vm_stat_discard_cleared_too_late = 0;
1373
1374
1375
1c79356b 1376/*
0b4e3aa0 1377 * Routine: host_default_memory_manager [interface]
1c79356b
A
1378 * Purpose:
1379 * set/get the default memory manager port and default cluster
1380 * size.
1381 *
1382 * If successful, consumes the supplied naked send right.
1383 */
1384kern_return_t
1385host_default_memory_manager(
0b4e3aa0
A
1386 host_priv_t host_priv,
1387 memory_object_default_t *default_manager,
1388 vm_size_t cluster_size)
1c79356b 1389{
0b4e3aa0
A
1390 memory_object_default_t current_manager;
1391 memory_object_default_t new_manager;
1392 memory_object_default_t returned_manager;
1c79356b
A
1393
1394 if (host_priv == HOST_PRIV_NULL)
1395 return(KERN_INVALID_HOST);
1396
1397 assert(host_priv == &realhost);
1398
1399 new_manager = *default_manager;
1400 mutex_lock(&memory_manager_default_lock);
1401 current_manager = memory_manager_default;
1402
0b4e3aa0 1403 if (new_manager == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1404 /*
1405 * Retrieve the current value.
1406 */
0b4e3aa0
A
1407 memory_object_default_reference(current_manager);
1408 returned_manager = current_manager;
1c79356b
A
1409 } else {
1410 /*
1411 * Retrieve the current value,
1412 * and replace it with the supplied value.
0b4e3aa0
A
1413 * We return the old reference to the caller
1414 * but we have to take a reference on the new
1415 * one.
1c79356b
A
1416 */
1417
1418 returned_manager = current_manager;
1419 memory_manager_default = new_manager;
0b4e3aa0
A
1420 memory_object_default_reference(new_manager);
1421
1c79356b
A
1422 if (cluster_size % PAGE_SIZE != 0) {
1423#if 0
1424 mutex_unlock(&memory_manager_default_lock);
1425 return KERN_INVALID_ARGUMENT;
1426#else
de355530 1427 cluster_size = round_page(cluster_size);
1c79356b
A
1428#endif
1429 }
1430 memory_manager_default_cluster = cluster_size;
1431
1432 /*
1433 * In case anyone's been waiting for a memory
1434 * manager to be established, wake them up.
1435 */
1436
1437 thread_wakeup((event_t) &memory_manager_default);
1438 }
1439
1440 mutex_unlock(&memory_manager_default_lock);
1441
1442 *default_manager = returned_manager;
1443 return(KERN_SUCCESS);
1444}
1445
1446/*
1447 * Routine: memory_manager_default_reference
1448 * Purpose:
1449 * Returns a naked send right for the default
1450 * memory manager. The returned right is always
1451 * valid (not IP_NULL or IP_DEAD).
1452 */
1453
0b4e3aa0 1454__private_extern__ memory_object_default_t
1c79356b
A
1455memory_manager_default_reference(
1456 vm_size_t *cluster_size)
1457{
0b4e3aa0 1458 memory_object_default_t current_manager;
1c79356b
A
1459
1460 mutex_lock(&memory_manager_default_lock);
0b4e3aa0
A
1461 current_manager = memory_manager_default;
1462 while (current_manager == MEMORY_OBJECT_DEFAULT_NULL) {
9bccf70c
A
1463 wait_result_t res;
1464
1465 res = thread_sleep_mutex((event_t) &memory_manager_default,
1466 &memory_manager_default_lock,
1467 THREAD_UNINT);
1468 assert(res == THREAD_AWAKENED);
0b4e3aa0 1469 current_manager = memory_manager_default;
1c79356b 1470 }
0b4e3aa0 1471 memory_object_default_reference(current_manager);
1c79356b 1472 *cluster_size = memory_manager_default_cluster;
1c79356b
A
1473 mutex_unlock(&memory_manager_default_lock);
1474
1475 return current_manager;
1476}
1477
1c79356b
A
1478/*
1479 * Routine: memory_manager_default_check
1480 *
1481 * Purpose:
1482 * Check whether a default memory manager has been set
1483 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1484 * and KERN_FAILURE if dmm does not exist.
1485 *
1486 * If there is no default memory manager, log an error,
1487 * but only the first time.
1488 *
1489 */
0b4e3aa0 1490__private_extern__ kern_return_t
1c79356b
A
1491memory_manager_default_check(void)
1492{
0b4e3aa0 1493 memory_object_default_t current;
1c79356b
A
1494
1495 mutex_lock(&memory_manager_default_lock);
1496 current = memory_manager_default;
0b4e3aa0 1497 if (current == MEMORY_OBJECT_DEFAULT_NULL) {
1c79356b
A
1498 static boolean_t logged; /* initialized to 0 */
1499 boolean_t complain = !logged;
1500 logged = TRUE;
1501 mutex_unlock(&memory_manager_default_lock);
1502 if (complain)
1503 printf("Warning: No default memory manager\n");
1504 return(KERN_FAILURE);
1505 } else {
1506 mutex_unlock(&memory_manager_default_lock);
1507 return(KERN_SUCCESS);
1508 }
1509}
1510
0b4e3aa0 1511__private_extern__ void
1c79356b
A
1512memory_manager_default_init(void)
1513{
0b4e3aa0 1514 memory_manager_default = MEMORY_OBJECT_DEFAULT_NULL;
1c79356b
A
1515 mutex_init(&memory_manager_default_lock, ETAP_VM_MEMMAN);
1516}
1517
1518
1519void
1520memory_object_deactivate_pages(
1521 vm_object_t object,
1522 vm_object_offset_t offset,
1523 vm_object_size_t size,
1524 boolean_t kill_page)
1525{
1526 vm_object_t orig_object;
1527 int pages_moved = 0;
1528 int pages_found = 0;
1529
1530 /*
1531 * entered with object lock held, acquire a paging reference to
1532 * prevent the memory_object and control ports from
1533 * being destroyed.
1534 */
1535 orig_object = object;
1536
1537 for (;;) {
1538 register vm_page_t m;
1539 vm_object_offset_t toffset;
1540 vm_object_size_t tsize;
1541
1542 vm_object_paging_begin(object);
1543 vm_page_lock_queues();
1544
1545 for (tsize = size, toffset = offset; tsize; tsize -= PAGE_SIZE, toffset += PAGE_SIZE) {
1546
1547 if ((m = vm_page_lookup(object, toffset)) != VM_PAGE_NULL) {
1548
1549 pages_found++;
1550
1551 if ((m->wire_count == 0) && (!m->private) && (!m->gobbled) && (!m->busy)) {
1552
1553 m->reference = FALSE;
de355530 1554 pmap_clear_reference(m->phys_addr);
1c79356b
A
1555
1556 if ((kill_page) && (object->internal)) {
1557 m->precious = FALSE;
1558 m->dirty = FALSE;
de355530 1559 pmap_clear_modify(m->phys_addr);
1c79356b
A
1560 vm_external_state_clr(object->existence_map, offset);
1561 }
1562 VM_PAGE_QUEUES_REMOVE(m);
1563
9bccf70c
A
1564 if(m->zero_fill) {
1565 queue_enter_first(
1566 &vm_page_queue_zf,
1567 m, vm_page_t, pageq);
1568 } else {
1569 queue_enter_first(
1570 &vm_page_queue_inactive,
1571 m, vm_page_t, pageq);
1572 }
1c79356b
A
1573
1574 m->inactive = TRUE;
1575 if (!m->fictitious)
1576 vm_page_inactive_count++;
1577
1578 pages_moved++;
1579 }
1580 }
1581 }
1582 vm_page_unlock_queues();
1583 vm_object_paging_end(object);
1584
1585 if (object->shadow) {
1586 vm_object_t tmp_object;
1587
1588 kill_page = 0;
1589
1590 offset += object->shadow_offset;
1591
1592 tmp_object = object->shadow;
1593 vm_object_lock(tmp_object);
1594
1595 if (object != orig_object)
1596 vm_object_unlock(object);
1597 object = tmp_object;
1598 } else
1599 break;
1600 }
1601 if (object != orig_object)
1602 vm_object_unlock(object);
1603}
1604
1605/* Allow manipulation of individual page state. This is actually part of */
1606/* the UPL regimen but takes place on the object rather than on a UPL */
1607
1608kern_return_t
1609memory_object_page_op(
0b4e3aa0
A
1610 memory_object_control_t control,
1611 memory_object_offset_t offset,
1612 int ops,
de355530 1613 vm_offset_t *phys_entry,
0b4e3aa0 1614 int *flags)
1c79356b 1615{
0b4e3aa0
A
1616 vm_object_t object;
1617 vm_page_t dst_page;
1618
1619
1620 object = memory_object_control_to_vm_object(control);
1621 if (object == VM_OBJECT_NULL)
1622 return (KERN_INVALID_ARGUMENT);
1c79356b
A
1623
1624 vm_object_lock(object);
1625
0b4e3aa0
A
1626 if(ops & UPL_POP_PHYSICAL) {
1627 if(object->phys_contiguous) {
1628 if (phys_entry) {
de355530
A
1629 *phys_entry = (vm_offset_t)
1630 object->shadow_offset;
0b4e3aa0
A
1631 }
1632 vm_object_unlock(object);
1633 return KERN_SUCCESS;
1634 } else {
1635 vm_object_unlock(object);
1636 return KERN_INVALID_OBJECT;
1637 }
1638 }
1639
1c79356b 1640 while(TRUE) {
0b4e3aa0
A
1641 if(object->phys_contiguous) {
1642 vm_object_unlock(object);
1643 return KERN_INVALID_OBJECT;
1644 }
1645
1c79356b
A
1646 if((dst_page = vm_page_lookup(object,offset)) == VM_PAGE_NULL) {
1647 vm_object_unlock(object);
1648 return KERN_FAILURE;
1649 }
1650
1651 /* Sync up on getting the busy bit */
1652 if((dst_page->busy || dst_page->cleaning) &&
0b4e3aa0
A
1653 (((ops & UPL_POP_SET) &&
1654 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
1c79356b
A
1655 /* someone else is playing with the page, we will */
1656 /* have to wait */
9bccf70c 1657 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
1c79356b
A
1658 continue;
1659 }
1660
1661 if (ops & UPL_POP_DUMP) {
1662 vm_page_lock_queues();
1663 vm_page_free(dst_page);
1664 vm_page_unlock_queues();
1665 break;
1666 }
1667
1668 if (flags) {
1669 *flags = 0;
1670
1671 /* Get the condition of flags before requested ops */
1672 /* are undertaken */
1673
1674 if(dst_page->dirty) *flags |= UPL_POP_DIRTY;
1675 if(dst_page->pageout) *flags |= UPL_POP_PAGEOUT;
1676 if(dst_page->precious) *flags |= UPL_POP_PRECIOUS;
1677 if(dst_page->absent) *flags |= UPL_POP_ABSENT;
1678 if(dst_page->busy) *flags |= UPL_POP_BUSY;
1679 }
1680 if (phys_entry)
de355530 1681 *phys_entry = dst_page->phys_addr;
1c79356b
A
1682
1683 /* The caller should have made a call either contingent with */
1684 /* or prior to this call to set UPL_POP_BUSY */
1685 if(ops & UPL_POP_SET) {
1686 /* The protection granted with this assert will */
1687 /* not be complete. If the caller violates the */
1688 /* convention and attempts to change page state */
1689 /* without first setting busy we may not see it */
1690 /* because the page may already be busy. However */
1691 /* if such violations occur we will assert sooner */
1692 /* or later. */
1693 assert(dst_page->busy || (ops & UPL_POP_BUSY));
1694 if (ops & UPL_POP_DIRTY) dst_page->dirty = TRUE;
1695 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = TRUE;
1696 if (ops & UPL_POP_PRECIOUS) dst_page->precious = TRUE;
1697 if (ops & UPL_POP_ABSENT) dst_page->absent = TRUE;
1698 if (ops & UPL_POP_BUSY) dst_page->busy = TRUE;
1699 }
1700
1701 if(ops & UPL_POP_CLR) {
1702 assert(dst_page->busy);
1703 if (ops & UPL_POP_DIRTY) dst_page->dirty = FALSE;
1704 if (ops & UPL_POP_PAGEOUT) dst_page->pageout = FALSE;
1705 if (ops & UPL_POP_PRECIOUS) dst_page->precious = FALSE;
1706 if (ops & UPL_POP_ABSENT) dst_page->absent = FALSE;
1707 if (ops & UPL_POP_BUSY) {
1708 dst_page->busy = FALSE;
1709 PAGE_WAKEUP(dst_page);
1710 }
1711 }
1712 break;
1713 }
1714
1715 vm_object_unlock(object);
1716 return KERN_SUCCESS;
1717
1718}
1719
0b4e3aa0
A
1720static zone_t mem_obj_control_zone;
1721
1722__private_extern__ void
1723memory_object_control_bootstrap(void)
1724{
1725 int i;
1726
1727 i = (vm_size_t) sizeof (struct memory_object_control);
1728 mem_obj_control_zone = zinit (i, 8192*i, 4096, "mem_obj_control");
1729 return;
1730}
1731
1732__private_extern__ memory_object_control_t
1733memory_object_control_allocate(
1734 vm_object_t object)
1735{
1736 memory_object_control_t control;
1737
1738 control = (memory_object_control_t)zalloc(mem_obj_control_zone);
1739 if (control != MEMORY_OBJECT_CONTROL_NULL)
1740 control->object = object;
1741 return (control);
1742}
1743
1744__private_extern__ void
1745memory_object_control_collapse(
1746 memory_object_control_t control,
1747 vm_object_t object)
1748{
1749 assert((control->object != VM_OBJECT_NULL) &&
1750 (control->object != object));
1751 control->object = object;
1752}
1753
1754__private_extern__ vm_object_t
1755memory_object_control_to_vm_object(
1756 memory_object_control_t control)
1757{
1758 if (control == MEMORY_OBJECT_CONTROL_NULL)
1759 return VM_OBJECT_NULL;
1760
1761 return (control->object);
1762}
1763
1764memory_object_control_t
1765convert_port_to_mo_control(
1766 mach_port_t port)
1767{
1768 return MEMORY_OBJECT_CONTROL_NULL;
1769}
1770
1771
1772mach_port_t
1773convert_mo_control_to_port(
1774 memory_object_control_t control)
1775{
1776 return MACH_PORT_NULL;
1777}
1778
1779void
1780memory_object_control_reference(
1781 memory_object_control_t control)
1782{
1783 return;
1784}
1785
1786/*
1787 * We only every issue one of these references, so kill it
1788 * when that gets released (should switch the real reference
1789 * counting in true port-less EMMI).
1790 */
1791void
1792memory_object_control_deallocate(
1793 memory_object_control_t control)
1794{
1795 zfree(mem_obj_control_zone, (vm_offset_t)control);
1796}
1797
1798void
1799memory_object_control_disable(
1800 memory_object_control_t control)
1801{
1802 assert(control->object != VM_OBJECT_NULL);
1803 control->object = VM_OBJECT_NULL;
1804}
1805
1806void
1807memory_object_default_reference(
1808 memory_object_default_t dmm)
1809{
1810 ipc_port_make_send(dmm);
1811}
1812
1813void
1814memory_object_default_deallocate(
1815 memory_object_default_t dmm)
1816{
1817 ipc_port_release_send(dmm);
1818}
1819
1820memory_object_t
1821convert_port_to_memory_object(
1822 mach_port_t port)
1823{
1824 return (MEMORY_OBJECT_NULL);
1825}
1826
1827
1828mach_port_t
1829convert_memory_object_to_port(
1830 memory_object_t object)
1831{
1832 return (MACH_PORT_NULL);
1833}
1834
1835#ifdef MACH_BSD
1836/* remove after component interface available */
1837extern int vnode_pager_workaround;
1838extern int device_pager_workaround;
1839#endif
1840
1841
1842/* Routine memory_object_reference */
1843void memory_object_reference(
1844 memory_object_t memory_object)
1845{
1846extern void dp_memory_object_reference(memory_object_t);
1847
1848#ifdef MACH_BSD
1849 extern void vnode_pager_reference(memory_object_t);
1850 extern void device_pager_reference(memory_object_t);
1851
1852 if(memory_object->pager == &vnode_pager_workaround) {
1853 vnode_pager_reference(memory_object);
1854 } else if(memory_object->pager == &device_pager_workaround) {
1855 device_pager_reference(memory_object);
1856 } else
1857#endif
1858 dp_memory_object_reference(memory_object);
1859}
1860
1861/* Routine memory_object_deallocate */
1862void memory_object_deallocate(
1863 memory_object_t memory_object)
1864{
1865extern void dp_memory_object_deallocate(memory_object_t);
1866
1867#ifdef MACH_BSD
1868 extern void vnode_pager_deallocate(memory_object_t);
1869 extern void device_pager_deallocate(memory_object_t);
1870
1871 if(memory_object->pager == &vnode_pager_workaround) {
1872 vnode_pager_deallocate(memory_object);
1873 } else if(memory_object->pager == &device_pager_workaround) {
1874 device_pager_deallocate(memory_object);
1875 } else
1876#endif
1877 dp_memory_object_deallocate(memory_object);
1878}
1879
1880
1881/* Routine memory_object_init */
1882kern_return_t memory_object_init
1883(
1884 memory_object_t memory_object,
1885 memory_object_control_t memory_control,
1886 vm_size_t memory_object_page_size
1887)
1888{
1889extern kern_return_t dp_memory_object_init(memory_object_t,
1890 memory_object_control_t,
1891 vm_size_t);
1892#ifdef MACH_BSD
1893extern kern_return_t vnode_pager_init(memory_object_t,
1894 memory_object_control_t,
1895 vm_size_t);
1896extern kern_return_t device_pager_init(memory_object_t,
1897 memory_object_control_t,
1898 vm_size_t);
1899
1900 if(memory_object->pager == &vnode_pager_workaround) {
1901 return vnode_pager_init(memory_object,
1902 memory_control,
1903 memory_object_page_size);
1904 } else if(memory_object->pager == &device_pager_workaround) {
1905 return device_pager_init(memory_object,
1906 memory_control,
1907 memory_object_page_size);
1908 } else
1909#endif
1910 return dp_memory_object_init(memory_object,
1911 memory_control,
1912 memory_object_page_size);
1913}
1914
1915/* Routine memory_object_terminate */
1916kern_return_t memory_object_terminate
1917(
1918 memory_object_t memory_object
1919)
1920{
1921extern kern_return_t dp_memory_object_terminate(memory_object_t);
1922
1923#ifdef MACH_BSD
1924extern kern_return_t vnode_pager_terminate(memory_object_t);
1925extern kern_return_t device_pager_terminate(memory_object_t);
1926
1927 if(memory_object->pager == &vnode_pager_workaround) {
1928 return vnode_pager_terminate(memory_object);
1929 } else if(memory_object->pager == &device_pager_workaround) {
1930 return device_pager_terminate(memory_object);
1931 } else
1932#endif
1933 return dp_memory_object_terminate(memory_object);
1934}
1935
1936/* Routine memory_object_data_request */
1937kern_return_t memory_object_data_request
1938(
1939 memory_object_t memory_object,
1940 memory_object_offset_t offset,
1941 vm_size_t length,
1942 vm_prot_t desired_access
1943)
1944{
1945extern kern_return_t dp_memory_object_data_request(memory_object_t,
1946 memory_object_offset_t, vm_size_t, vm_prot_t);
1947
1948#ifdef MACH_BSD
1949extern kern_return_t vnode_pager_data_request(memory_object_t,
1950 memory_object_offset_t, vm_size_t, vm_prot_t);
1951extern kern_return_t device_pager_data_request(memory_object_t,
1952 memory_object_offset_t, vm_size_t, vm_prot_t);
1953
1954 if (memory_object->pager == &vnode_pager_workaround) {
1955 return vnode_pager_data_request(memory_object,
1956 offset,
1957 length,
1958 desired_access);
1959 } else if (memory_object->pager == &device_pager_workaround) {
1960 return device_pager_data_request(memory_object,
1961 offset,
1962 length,
1963 desired_access);
1964 } else
1965#endif
1966 return dp_memory_object_data_request(memory_object,
1967 offset,
1968 length,
1969 desired_access);
1970}
1971
1972/* Routine memory_object_data_return */
1973kern_return_t memory_object_data_return
1974(
1975 memory_object_t memory_object,
1976 memory_object_offset_t offset,
1977 vm_size_t size,
1978 boolean_t dirty,
1979 boolean_t kernel_copy
1980)
1981{
1982 extern kern_return_t dp_memory_object_data_return(memory_object_t,
1983 memory_object_offset_t,
1984 vm_size_t,
1985 boolean_t,
1986 boolean_t);
1987#ifdef MACH_BSD
1988 extern kern_return_t vnode_pager_data_return(memory_object_t,
1989 memory_object_offset_t,
1990 vm_size_t,
1991 boolean_t,
1992 boolean_t);
1993 extern kern_return_t device_pager_data_return(memory_object_t,
1994 memory_object_offset_t,
1995 vm_size_t,
1996 boolean_t,
1997 boolean_t);
1998
1999 if (memory_object->pager == &vnode_pager_workaround) {
2000 return vnode_pager_data_return(memory_object,
2001 offset,
2002 size,
2003 dirty,
2004 kernel_copy);
2005 } else if (memory_object->pager == &device_pager_workaround) {
2006 return device_pager_data_return(memory_object,
2007 offset,
2008 size,
2009 dirty,
2010 kernel_copy);
2011 } else
2012#endif
2013 return dp_memory_object_data_return(memory_object,
2014 offset,
2015 size,
2016 dirty,
2017 kernel_copy);
2018}
2019
2020/* Routine memory_object_data_initialize */
2021kern_return_t memory_object_data_initialize
2022(
2023 memory_object_t memory_object,
2024 memory_object_offset_t offset,
2025 vm_size_t size
2026)
2027{
2028
2029 extern kern_return_t dp_memory_object_data_initialize(memory_object_t,
2030 memory_object_offset_t,
2031 vm_size_t);
2032#ifdef MACH_BSD
2033 extern kern_return_t vnode_pager_data_initialize(memory_object_t,
2034 memory_object_offset_t,
2035 vm_size_t);
2036 extern kern_return_t device_pager_data_initialize(memory_object_t,
2037 memory_object_offset_t,
2038 vm_size_t);
2039
2040 if (memory_object->pager == &vnode_pager_workaround) {
2041 return vnode_pager_data_initialize(memory_object,
2042 offset,
2043 size);
2044 } else if (memory_object->pager == &device_pager_workaround) {
2045 return device_pager_data_initialize(memory_object,
2046 offset,
2047 size);
2048 } else
2049#endif
2050 return dp_memory_object_data_initialize(memory_object,
2051 offset,
2052 size);
2053}
2054
2055/* Routine memory_object_data_unlock */
2056kern_return_t memory_object_data_unlock
2057(
2058 memory_object_t memory_object,
2059 memory_object_offset_t offset,
2060 vm_size_t size,
2061 vm_prot_t desired_access
2062)
2063{
2064 extern kern_return_t dp_memory_object_data_unlock(memory_object_t,
2065 memory_object_offset_t,
2066 vm_size_t,
2067 vm_prot_t);
2068#ifdef MACH_BSD
2069 extern kern_return_t vnode_pager_data_unlock(memory_object_t,
2070 memory_object_offset_t,
2071 vm_size_t,
2072 vm_prot_t);
2073 extern kern_return_t device_pager_data_unlock(memory_object_t,
2074 memory_object_offset_t,
2075 vm_size_t,
2076 vm_prot_t);
2077
2078 if (memory_object->pager == &vnode_pager_workaround) {
2079 return vnode_pager_data_unlock(memory_object,
2080 offset,
2081 size,
2082 desired_access);
2083 } else if (memory_object->pager == &device_pager_workaround) {
2084 return device_pager_data_unlock(memory_object,
2085 offset,
2086 size,
2087 desired_access);
2088 } else
2089#endif
2090 return dp_memory_object_data_unlock(memory_object,
2091 offset,
2092 size,
2093 desired_access);
2094
2095}
2096
2097/* Routine memory_object_synchronize */
2098kern_return_t memory_object_synchronize
2099(
2100 memory_object_t memory_object,
2101 memory_object_offset_t offset,
2102 vm_size_t size,
2103 vm_sync_t sync_flags
2104)
2105{
2106 extern kern_return_t dp_memory_object_data_synchronize(memory_object_t,
2107 memory_object_offset_t,
2108 vm_size_t,
2109 vm_sync_t);
2110#ifdef MACH_BSD
2111 extern kern_return_t vnode_pager_data_synchronize(memory_object_t,
2112 memory_object_offset_t,
2113 vm_size_t,
2114 vm_sync_t);
2115 extern kern_return_t device_pager_data_synchronize(memory_object_t,
2116 memory_object_offset_t,
2117 vm_size_t,
2118 vm_sync_t);
2119
2120 if (memory_object->pager == &vnode_pager_workaround) {
2121 return vnode_pager_synchronize(
2122 memory_object,
2123 offset,
2124 size,
2125 sync_flags);
2126 } else if (memory_object->pager == &device_pager_workaround) {
2127 return device_pager_synchronize(
2128 memory_object,
2129 offset,
2130 size,
2131 sync_flags);
2132 } else
2133#endif
2134 return dp_memory_object_synchronize(
2135 memory_object,
2136 offset,
2137 size,
2138 sync_flags);
2139}
2140
2141/* Routine memory_object_unmap */
2142kern_return_t memory_object_unmap
2143(
2144 memory_object_t memory_object
2145)
2146{
2147 extern kern_return_t dp_memory_object_unmap(memory_object_t);
2148#ifdef MACH_BSD
2149 extern kern_return_t vnode_pager_unmap(memory_object_t);
2150 extern kern_return_t device_pager_unmap(memory_object_t);
2151
2152 if (memory_object->pager == &vnode_pager_workaround) {
2153 return vnode_pager_unmap(memory_object);
2154 } else if (memory_object->pager == &device_pager_workaround) {
2155 return device_pager_unmap(memory_object);
2156 } else
2157#endif
2158 return dp_memory_object_unmap(memory_object);
2159}
2160
2161/* Routine memory_object_create */
2162kern_return_t memory_object_create
2163(
2164 memory_object_default_t default_memory_manager,
2165 vm_size_t new_memory_object_size,
2166 memory_object_t *new_memory_object
2167)
2168{
2169extern kern_return_t default_pager_memory_object_create(memory_object_default_t,
2170 vm_size_t,
2171 memory_object_t *);
2172
2173 return default_pager_memory_object_create(default_memory_manager,
2174 new_memory_object_size,
2175 new_memory_object);
2176}
1c79356b 2177