2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/memory_object.c
54 * Author: Michael Wayne Young
56 * External memory management interface control functions.
59 #include <advisory_pageout.h>
62 * Interface dependencies:
65 #include <mach/std_types.h> /* For pointer_t */
66 #include <mach/mach_types.h>
69 #include <mach/kern_return.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/host_priv_server.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <mach/message.h>
79 * Implementation dependencies:
81 #include <string.h> /* For memcpy() */
84 #include <kern/host.h>
85 #include <kern/thread.h> /* For current_thread() */
86 #include <kern/ipc_mig.h>
87 #include <kern/misc_protos.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_fault.h>
91 #include <vm/memory_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/pmap.h> /* For pmap_clear_modify */
95 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
96 #include <vm/vm_map.h> /* For vm_map_pageable */
99 #include <vm/vm_external.h>
100 #endif /* MACH_PAGEMAP */
103 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
104 vm_size_t memory_manager_default_cluster
= 0;
105 decl_mutex_data(, memory_manager_default_lock
)
108 * Forward ref to file-local function:
111 vm_object_update(vm_object_t
, vm_object_offset_t
,
112 vm_size_t
, memory_object_return_t
, int, vm_prot_t
);
116 * Routine: memory_object_should_return_page
119 * Determine whether the given page should be returned,
120 * based on the page's state and on the given return policy.
122 * We should return the page if one of the following is true:
124 * 1. Page is dirty and should_return is not RETURN_NONE.
125 * 2. Page is precious and should_return is RETURN_ALL.
126 * 3. Should_return is RETURN_ANYTHING.
128 * As a side effect, m->dirty will be made consistent
129 * with pmap_is_modified(m), if should_return is not
130 * MEMORY_OBJECT_RETURN_NONE.
133 #define memory_object_should_return_page(m, should_return) \
134 (should_return != MEMORY_OBJECT_RETURN_NONE && \
135 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_addr))) || \
136 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
137 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
139 typedef int memory_object_lock_result_t
;
141 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
142 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
143 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
144 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
146 memory_object_lock_result_t
memory_object_lock_page(
148 memory_object_return_t should_return
,
149 boolean_t should_flush
,
153 * Routine: memory_object_lock_page
156 * Perform the appropriate lock operations on the
157 * given page. See the description of
158 * "memory_object_lock_request" for the meanings
161 * Returns an indication that the operation
162 * completed, blocked, or that the page must
165 memory_object_lock_result_t
166 memory_object_lock_page(
168 memory_object_return_t should_return
,
169 boolean_t should_flush
,
172 XPR(XPR_MEMORY_OBJECT
,
173 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
174 (integer_t
)m
, should_return
, should_flush
, prot
, 0);
177 * If we cannot change access to the page,
178 * either because a mapping is in progress
179 * (busy page) or because a mapping has been
180 * wired, then give up.
183 if (m
->busy
|| m
->cleaning
)
184 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
187 * Don't worry about pages for which the kernel
188 * does not have any data.
191 if (m
->absent
|| m
->error
|| m
->restart
) {
192 if(m
->error
&& should_flush
) {
193 /* dump the page, pager wants us to */
194 /* clean it up and there is no */
195 /* relevant data to return */
196 if(m
->wire_count
== 0) {
198 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
201 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
205 assert(!m
->fictitious
);
207 if (m
->wire_count
!= 0) {
209 * If no change would take place
210 * anyway, return successfully.
214 * No change to page lock [2 checks] AND
215 * Should not return page
217 * XXX This doesn't handle sending a copy of a wired
218 * XXX page to the pager, but that will require some
219 * XXX significant surgery.
222 (m
->page_lock
== prot
|| prot
== VM_PROT_NO_CHANGE
) &&
223 ! memory_object_should_return_page(m
, should_return
)) {
226 * Restart page unlock requests,
227 * even though no change took place.
228 * [Memory managers may be expecting
229 * to see new requests.]
231 m
->unlock_request
= VM_PROT_NONE
;
234 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
237 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
241 * If the page is to be flushed, allow
242 * that to be done as part of the protection.
251 * If we are decreasing permission, do it now;
252 * let the fault handler take care of increases
253 * (pmap_page_protect may not increase protection).
256 if (prot
!= VM_PROT_NO_CHANGE
) {
257 if ((m
->page_lock
^ prot
) & prot
) {
258 pmap_page_protect(m
->phys_addr
, VM_PROT_ALL
& ~prot
);
261 /* code associated with the vestigial
262 * memory_object_data_unlock
265 m
->lock_supplied
= TRUE
;
266 if (prot
!= VM_PROT_NONE
)
272 * Restart any past unlock requests, even if no
273 * change resulted. If the manager explicitly
274 * requested no protection change, then it is assumed
275 * to be remembering past requests.
278 m
->unlock_request
= VM_PROT_NONE
;
284 * Handle page returning.
287 if (memory_object_should_return_page(m
, should_return
)) {
290 * If we weren't planning
291 * to flush the page anyway,
292 * we may need to remove the
293 * page from the pageout
294 * system and from physical
298 vm_page_lock_queues();
299 VM_PAGE_QUEUES_REMOVE(m
);
300 vm_page_unlock_queues();
303 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
306 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
);
308 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
318 extern boolean_t vm_page_deactivate_hint
;
321 * XXX Make clean but not flush a paging hint,
322 * and deactivate the pages. This is a hack
323 * because it overloads flush/clean with
324 * implementation-dependent meaning. This only
325 * happens to pages that are already clean.
328 if (vm_page_deactivate_hint
&&
329 (should_return
!= MEMORY_OBJECT_RETURN_NONE
)) {
330 vm_page_lock_queues();
331 vm_page_deactivate(m
);
332 vm_page_unlock_queues();
336 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
339 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
343 register vm_page_t hp; \
345 vm_object_unlock(object); \
347 (void) memory_object_data_return(object->pager, \
350 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
353 vm_object_lock(object); \
357 * Routine: memory_object_lock_request [user interface]
360 * Control use of the data associated with the given
361 * memory object. For each page in the given range,
362 * perform the following operations, in order:
363 * 1) restrict access to the page (disallow
364 * forms specified by "prot");
365 * 2) return data to the manager (if "should_return"
366 * is RETURN_DIRTY and the page is dirty, or
367 * "should_return" is RETURN_ALL and the page
368 * is either dirty or precious); and,
369 * 3) flush the cached copy (if "should_flush"
371 * The set of pages is defined by a starting offset
372 * ("offset") and size ("size"). Only pages with the
373 * same page alignment as the starting offset are
376 * A single acknowledgement is sent (to the "reply_to"
377 * port) when these actions are complete. If successful,
378 * the naked send right for reply_to is consumed.
382 memory_object_lock_request(
383 memory_object_control_t control
,
384 memory_object_offset_t offset
,
385 memory_object_size_t size
,
386 memory_object_return_t should_return
,
391 vm_object_offset_t original_offset
= offset
;
392 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
394 XPR(XPR_MEMORY_OBJECT
,
395 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
396 (integer_t
)control
, offset
, size
,
397 (((should_return
&1)<<1)|should_flush
), prot
);
400 * Check for bogus arguments.
402 object
= memory_object_control_to_vm_object(control
);
403 if (object
== VM_OBJECT_NULL
)
404 return (KERN_INVALID_ARGUMENT
);
406 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
407 return (KERN_INVALID_ARGUMENT
);
409 size
= round_page(size
);
412 * Lock the object, and acquire a paging reference to
413 * prevent the memory_object reference from being released.
415 vm_object_lock(object
);
416 vm_object_paging_begin(object
);
417 offset
-= object
->paging_offset
;
419 (void)vm_object_update(object
,
420 offset
, size
, should_return
, flags
, prot
);
422 vm_object_paging_end(object
);
423 vm_object_unlock(object
);
425 return (KERN_SUCCESS
);
429 * memory_object_release_name: [interface]
431 * Enforces name semantic on memory_object reference count decrement
432 * This routine should not be called unless the caller holds a name
433 * reference gained through the memory_object_named_create or the
434 * memory_object_rename call.
435 * If the TERMINATE_IDLE flag is set, the call will return if the
436 * reference count is not 1. i.e. idle with the only remaining reference
438 * If the decision is made to proceed the name field flag is set to
439 * false and the reference count is decremented. If the RESPECT_CACHE
440 * flag is set and the reference count has gone to zero, the
441 * memory_object is checked to see if it is cacheable otherwise when
442 * the reference count is zero, it is simply terminated.
446 memory_object_release_name(
447 memory_object_control_t control
,
452 object
= memory_object_control_to_vm_object(control
);
453 if (object
== VM_OBJECT_NULL
)
454 return (KERN_INVALID_ARGUMENT
);
456 return vm_object_release_name(object
, flags
);
462 * Routine: memory_object_destroy [user interface]
464 * Shut down a memory object, despite the
465 * presence of address map (or other) references
469 memory_object_destroy(
470 memory_object_control_t control
,
471 kern_return_t reason
)
475 object
= memory_object_control_to_vm_object(control
);
476 if (object
== VM_OBJECT_NULL
)
477 return (KERN_INVALID_ARGUMENT
);
479 return (vm_object_destroy(object
, reason
));
483 * Routine: vm_object_sync
485 * Kernel internal function to synch out pages in a given
486 * range within an object to its memory manager. Much the
487 * same as memory_object_lock_request but page protection
490 * If the should_flush and should_return flags are true pages
491 * are flushed, that is dirty & precious pages are written to
492 * the memory manager and then discarded. If should_return
493 * is false, only precious pages are returned to the memory
496 * If should flush is false and should_return true, the memory
497 * manager's copy of the pages is updated. If should_return
498 * is also false, only the precious pages are updated. This
499 * last option is of limited utility.
502 * FALSE if no pages were returned to the pager
509 vm_object_offset_t offset
,
511 boolean_t should_flush
,
512 boolean_t should_return
)
517 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
518 (integer_t
)object
, offset
, size
, should_flush
, should_return
);
521 * Lock the object, and acquire a paging reference to
522 * prevent the memory_object and control ports from
525 vm_object_lock(object
);
526 vm_object_paging_begin(object
);
528 rv
= vm_object_update(object
, offset
, size
,
530 MEMORY_OBJECT_RETURN_ALL
:
531 MEMORY_OBJECT_RETURN_NONE
,
533 MEMORY_OBJECT_DATA_FLUSH
: 0,
537 vm_object_paging_end(object
);
538 vm_object_unlock(object
);
543 * Routine: vm_object_update
545 * Work function for m_o_lock_request(), vm_o_sync().
547 * Called with object locked and paging ref taken.
551 register vm_object_t object
,
552 register vm_object_offset_t offset
,
553 register vm_size_t size
,
554 memory_object_return_t should_return
,
558 register vm_page_t m
;
559 vm_page_t holding_page
;
560 vm_size_t original_size
= size
;
561 vm_object_offset_t paging_offset
= 0;
562 vm_object_t copy_object
;
563 vm_size_t data_cnt
= 0;
564 vm_object_offset_t last_offset
= offset
;
565 memory_object_lock_result_t page_lock_result
;
566 memory_object_lock_result_t pageout_action
;
567 boolean_t data_returned
= FALSE
;
568 boolean_t update_cow
;
569 boolean_t should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
570 boolean_t pending_pageout
= FALSE
;
573 * To avoid blocking while scanning for pages, save
574 * dirty pages to be cleaned all at once.
576 * XXXO A similar strategy could be used to limit the
577 * number of times that a scan must be restarted for
578 * other reasons. Those pages that would require blocking
579 * could be temporarily collected in another list, or
580 * their offsets could be recorded in a small array.
584 * XXX NOTE: May want to consider converting this to a page list
585 * XXX vm_map_copy interface. Need to understand object
586 * XXX coalescing implications before doing so.
589 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
590 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
591 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
592 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
595 if((((copy_object
= object
->copy
) != NULL
) && update_cow
) ||
596 (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
599 vm_object_offset_t copy_offset
;
603 kern_return_t error
= 0;
605 if(copy_object
!= NULL
) {
606 /* translate offset with respect to shadow's offset */
607 copy_offset
= (offset
>= copy_object
->shadow_offset
)?
608 offset
- copy_object
->shadow_offset
:
609 (vm_object_offset_t
) 0;
610 if(copy_offset
> copy_object
->size
)
611 copy_offset
= copy_object
->size
;
613 /* clip size with respect to shadow offset */
614 copy_size
= (offset
>= copy_object
->shadow_offset
) ?
615 size
: size
- (copy_object
->shadow_offset
- offset
);
620 copy_size
= ((copy_offset
+ copy_size
)
621 <= copy_object
->size
) ?
622 copy_size
: copy_object
->size
- copy_offset
;
624 /* check for a copy_offset which is beyond the end of */
625 /* the copy_object */
631 vm_object_unlock(object
);
632 vm_object_lock(copy_object
);
634 copy_object
= object
;
636 copy_size
= offset
+ size
;
637 copy_offset
= offset
;
640 vm_object_paging_begin(copy_object
);
641 for (i
=copy_offset
; i
<copy_size
; i
+=PAGE_SIZE
) {
642 RETRY_COW_OF_LOCK_REQUEST
:
643 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
644 switch (vm_fault_page(copy_object
, i
,
645 VM_PROT_WRITE
|VM_PROT_READ
,
649 copy_offset
+copy_size
,
650 VM_BEHAVIOR_SEQUENTIAL
,
659 case VM_FAULT_SUCCESS
:
662 page
->object
, top_page
);
663 PAGE_WAKEUP_DONE(page
);
664 vm_page_lock_queues();
665 if (!page
->active
&& !page
->inactive
)
666 vm_page_activate(page
);
667 vm_page_unlock_queues();
668 vm_object_lock(copy_object
);
669 vm_object_paging_begin(copy_object
);
671 PAGE_WAKEUP_DONE(page
);
672 vm_page_lock_queues();
673 if (!page
->active
&& !page
->inactive
)
674 vm_page_activate(page
);
675 vm_page_unlock_queues();
679 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
680 vm_object_lock(copy_object
);
681 vm_object_paging_begin(copy_object
);
682 goto RETRY_COW_OF_LOCK_REQUEST
;
683 case VM_FAULT_INTERRUPTED
:
684 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
685 vm_object_lock(copy_object
);
686 vm_object_paging_begin(copy_object
);
687 goto RETRY_COW_OF_LOCK_REQUEST
;
688 case VM_FAULT_MEMORY_SHORTAGE
:
690 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
691 vm_object_lock(copy_object
);
692 vm_object_paging_begin(copy_object
);
693 goto RETRY_COW_OF_LOCK_REQUEST
;
694 case VM_FAULT_FICTITIOUS_SHORTAGE
:
695 vm_page_more_fictitious();
696 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
697 vm_object_lock(copy_object
);
698 vm_object_paging_begin(copy_object
);
699 goto RETRY_COW_OF_LOCK_REQUEST
;
700 case VM_FAULT_MEMORY_ERROR
:
701 vm_object_lock(object
);
702 goto BYPASS_COW_COPYIN
;
706 vm_object_paging_end(copy_object
);
707 if(copy_object
!= object
) {
708 vm_object_unlock(copy_object
);
709 vm_object_lock(object
);
712 if((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
715 if(((copy_object
= object
->copy
) != NULL
) &&
716 (flags
& MEMORY_OBJECT_DATA_PURGE
)) {
717 copy_object
->shadow_severed
= TRUE
;
718 copy_object
->shadowed
= FALSE
;
719 copy_object
->shadow
= NULL
;
720 /* delete the ref the COW was holding on the target object */
721 vm_object_deallocate(object
);
727 size
-= PAGE_SIZE
, offset
+= PAGE_SIZE_64
)
730 * Limit the number of pages to be cleaned at once.
732 if (pending_pageout
&&
733 data_cnt
>= PAGE_SIZE
* DATA_WRITE_MAX
)
735 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
736 pageout_action
, paging_offset
);
738 pending_pageout
= FALSE
;
741 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
742 page_lock_result
= memory_object_lock_page(m
, should_return
,
745 XPR(XPR_MEMORY_OBJECT
,
746 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
747 (integer_t
)object
, offset
, page_lock_result
, 0, 0);
749 switch (page_lock_result
)
751 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
753 * End of a cluster of dirty pages.
755 if(pending_pageout
) {
756 LIST_REQ_PAGEOUT_PAGES(object
,
757 data_cnt
, pageout_action
,
760 pending_pageout
= FALSE
;
765 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
767 * Since it is necessary to block,
768 * clean any dirty pages now.
770 if(pending_pageout
) {
771 LIST_REQ_PAGEOUT_PAGES(object
,
772 data_cnt
, pageout_action
,
774 pending_pageout
= FALSE
;
779 PAGE_ASSERT_WAIT(m
, THREAD_UNINT
);
780 vm_object_unlock(object
);
781 thread_block((void (*)(void))0);
782 vm_object_lock(object
);
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
:
786 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
788 * The clean and return cases are similar.
793 * if this would form a discontiguous block,
794 * clean the old pages and start anew.
799 * Mark the page busy since we unlock the
803 if (pending_pageout
&&
804 (last_offset
!= offset
||
805 pageout_action
!= page_lock_result
)) {
806 LIST_REQ_PAGEOUT_PAGES(object
,
807 data_cnt
, pageout_action
,
809 pending_pageout
= FALSE
;
813 holding_page
= VM_PAGE_NULL
;
815 PAGE_ASSERT_WAIT(m
, THREAD_UNINT
);
816 vm_object_unlock(object
);
817 thread_block((void (*)(void))0);
820 if(!pending_pageout
) {
821 pending_pageout
= TRUE
;
822 pageout_action
= page_lock_result
;
823 paging_offset
= offset
;
826 vm_page_lock_queues();
827 m
->list_req_pending
= TRUE
;
832 vm_page_unlock_queues();
835 * Clean but do not flush
837 vm_page_lock_queues();
838 m
->list_req_pending
= TRUE
;
840 vm_page_unlock_queues();
843 vm_object_unlock(object
);
846 data_cnt
+= PAGE_SIZE
;
847 last_offset
= offset
+ PAGE_SIZE_64
;
848 data_returned
= TRUE
;
850 vm_object_lock(object
);
858 * We have completed the scan for applicable pages.
859 * Clean any pages that have been saved.
861 if (pending_pageout
) {
862 LIST_REQ_PAGEOUT_PAGES(object
,
863 data_cnt
, pageout_action
, paging_offset
);
865 return (data_returned
);
869 * Routine: memory_object_synchronize_completed [user interface]
871 * Tell kernel that previously synchronized data
872 * (memory_object_synchronize) has been queue or placed on the
875 * Note: there may be multiple synchronize requests for a given
876 * memory object outstanding but they will not overlap.
880 memory_object_synchronize_completed(
881 memory_object_control_t control
,
882 memory_object_offset_t offset
,
888 XPR(XPR_MEMORY_OBJECT
,
889 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
890 (integer_t
)object
, offset
, length
, 0, 0);
893 * Look for bogus arguments
896 object
= memory_object_control_to_vm_object(control
);
897 if (object
== VM_OBJECT_NULL
)
898 return (KERN_INVALID_ARGUMENT
);
900 vm_object_lock(object
);
903 * search for sync request structure
905 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
906 if (msr
->offset
== offset
&& msr
->length
== length
) {
907 queue_remove(&object
->msr_q
, msr
, msync_req_t
, msr_q
);
912 if (queue_end(&object
->msr_q
, (queue_entry_t
)msr
)) {
913 vm_object_unlock(object
);
914 return KERN_INVALID_ARGUMENT
;
918 vm_object_unlock(object
);
919 msr
->flag
= VM_MSYNC_DONE
;
921 thread_wakeup((event_t
) msr
);
924 }/* memory_object_synchronize_completed */
927 vm_object_set_attributes_common(
930 memory_object_copy_strategy_t copy_strategy
,
932 vm_size_t cluster_size
,
933 boolean_t silent_overwrite
,
934 boolean_t advisory_pageout
)
936 boolean_t object_became_ready
;
938 XPR(XPR_MEMORY_OBJECT
,
939 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
940 (integer_t
)object
, (may_cache
&1)|((temporary
&1)<1), copy_strategy
, 0, 0);
942 if (object
== VM_OBJECT_NULL
)
943 return(KERN_INVALID_ARGUMENT
);
946 * Verify the attributes of importance
949 switch(copy_strategy
) {
950 case MEMORY_OBJECT_COPY_NONE
:
951 case MEMORY_OBJECT_COPY_DELAY
:
954 return(KERN_INVALID_ARGUMENT
);
957 #if !ADVISORY_PAGEOUT
958 if (silent_overwrite
|| advisory_pageout
)
959 return(KERN_INVALID_ARGUMENT
);
961 #endif /* !ADVISORY_PAGEOUT */
966 if (cluster_size
!= 0) {
967 int pages_per_cluster
;
968 pages_per_cluster
= atop(cluster_size
);
970 * Cluster size must be integral multiple of page size,
971 * and be a power of 2 number of pages.
973 if ((cluster_size
& (PAGE_SIZE
-1)) ||
974 ((pages_per_cluster
-1) & pages_per_cluster
))
975 return KERN_INVALID_ARGUMENT
;
978 vm_object_lock(object
);
981 * Copy the attributes
983 assert(!object
->internal
);
984 object_became_ready
= !object
->pager_ready
;
985 object
->copy_strategy
= copy_strategy
;
986 object
->can_persist
= may_cache
;
987 object
->temporary
= temporary
;
988 object
->silent_overwrite
= silent_overwrite
;
989 object
->advisory_pageout
= advisory_pageout
;
990 if (cluster_size
== 0)
991 cluster_size
= PAGE_SIZE
;
992 object
->cluster_size
= cluster_size
;
994 assert(cluster_size
>= PAGE_SIZE
&&
995 cluster_size
% PAGE_SIZE
== 0);
998 * Wake up anyone waiting for the ready attribute
999 * to become asserted.
1002 if (object_became_ready
) {
1003 object
->pager_ready
= TRUE
;
1004 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1007 vm_object_unlock(object
);
1009 return(KERN_SUCCESS
);
1013 * Set the memory object attribute as provided.
1015 * XXX This routine cannot be completed until the vm_msync, clean
1016 * in place, and cluster work is completed. See ifdef notyet
1017 * below and note that vm_object_set_attributes_common()
1018 * may have to be expanded.
1021 memory_object_change_attributes(
1022 memory_object_control_t control
,
1023 memory_object_flavor_t flavor
,
1024 memory_object_info_t attributes
,
1025 mach_msg_type_number_t count
)
1028 kern_return_t result
= KERN_SUCCESS
;
1029 boolean_t temporary
;
1030 boolean_t may_cache
;
1031 boolean_t invalidate
;
1032 vm_size_t cluster_size
;
1033 memory_object_copy_strategy_t copy_strategy
;
1034 boolean_t silent_overwrite
;
1035 boolean_t advisory_pageout
;
1037 object
= memory_object_control_to_vm_object(control
);
1038 if (object
== VM_OBJECT_NULL
)
1039 return (KERN_INVALID_ARGUMENT
);
1041 vm_object_lock(object
);
1043 temporary
= object
->temporary
;
1044 may_cache
= object
->can_persist
;
1045 copy_strategy
= object
->copy_strategy
;
1046 silent_overwrite
= object
->silent_overwrite
;
1047 advisory_pageout
= object
->advisory_pageout
;
1049 invalidate
= object
->invalidate
;
1051 cluster_size
= object
->cluster_size
;
1052 vm_object_unlock(object
);
1055 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1057 old_memory_object_behave_info_t behave
;
1059 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1060 result
= KERN_INVALID_ARGUMENT
;
1064 behave
= (old_memory_object_behave_info_t
) attributes
;
1066 temporary
= behave
->temporary
;
1067 invalidate
= behave
->invalidate
;
1068 copy_strategy
= behave
->copy_strategy
;
1073 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1075 memory_object_behave_info_t behave
;
1077 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1078 result
= KERN_INVALID_ARGUMENT
;
1082 behave
= (memory_object_behave_info_t
) attributes
;
1084 temporary
= behave
->temporary
;
1085 invalidate
= behave
->invalidate
;
1086 copy_strategy
= behave
->copy_strategy
;
1087 silent_overwrite
= behave
->silent_overwrite
;
1088 advisory_pageout
= behave
->advisory_pageout
;
1092 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1094 memory_object_perf_info_t perf
;
1096 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1097 result
= KERN_INVALID_ARGUMENT
;
1101 perf
= (memory_object_perf_info_t
) attributes
;
1103 may_cache
= perf
->may_cache
;
1104 cluster_size
= round_page(perf
->cluster_size
);
1109 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1111 old_memory_object_attr_info_t attr
;
1113 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1114 result
= KERN_INVALID_ARGUMENT
;
1118 attr
= (old_memory_object_attr_info_t
) attributes
;
1120 may_cache
= attr
->may_cache
;
1121 copy_strategy
= attr
->copy_strategy
;
1122 cluster_size
= page_size
;
1127 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1129 memory_object_attr_info_t attr
;
1131 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1132 result
= KERN_INVALID_ARGUMENT
;
1136 attr
= (memory_object_attr_info_t
) attributes
;
1138 copy_strategy
= attr
->copy_strategy
;
1139 may_cache
= attr
->may_cache_object
;
1140 cluster_size
= attr
->cluster_size
;
1141 temporary
= attr
->temporary
;
1147 result
= KERN_INVALID_ARGUMENT
;
1151 if (result
!= KERN_SUCCESS
)
1154 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1155 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1162 * XXX may_cache may become a tri-valued variable to handle
1163 * XXX uncache if not in use.
1165 return (vm_object_set_attributes_common(object
,
1175 memory_object_get_attributes(
1176 memory_object_control_t control
,
1177 memory_object_flavor_t flavor
,
1178 memory_object_info_t attributes
, /* pointer to OUT array */
1179 mach_msg_type_number_t
*count
) /* IN/OUT */
1181 kern_return_t ret
= KERN_SUCCESS
;
1184 object
= memory_object_control_to_vm_object(control
);
1185 if (object
== VM_OBJECT_NULL
)
1186 return (KERN_INVALID_ARGUMENT
);
1188 vm_object_lock(object
);
1191 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1193 old_memory_object_behave_info_t behave
;
1195 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1196 ret
= KERN_INVALID_ARGUMENT
;
1200 behave
= (old_memory_object_behave_info_t
) attributes
;
1201 behave
->copy_strategy
= object
->copy_strategy
;
1202 behave
->temporary
= object
->temporary
;
1203 #if notyet /* remove when vm_msync complies and clean in place fini */
1204 behave
->invalidate
= object
->invalidate
;
1206 behave
->invalidate
= FALSE
;
1209 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1213 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1215 memory_object_behave_info_t behave
;
1217 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1218 ret
= KERN_INVALID_ARGUMENT
;
1222 behave
= (memory_object_behave_info_t
) attributes
;
1223 behave
->copy_strategy
= object
->copy_strategy
;
1224 behave
->temporary
= object
->temporary
;
1225 #if notyet /* remove when vm_msync complies and clean in place fini */
1226 behave
->invalidate
= object
->invalidate
;
1228 behave
->invalidate
= FALSE
;
1230 behave
->advisory_pageout
= object
->advisory_pageout
;
1231 behave
->silent_overwrite
= object
->silent_overwrite
;
1232 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1236 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1238 memory_object_perf_info_t perf
;
1240 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1241 ret
= KERN_INVALID_ARGUMENT
;
1245 perf
= (memory_object_perf_info_t
) attributes
;
1246 perf
->cluster_size
= object
->cluster_size
;
1247 perf
->may_cache
= object
->can_persist
;
1249 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1253 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1255 old_memory_object_attr_info_t attr
;
1257 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1258 ret
= KERN_INVALID_ARGUMENT
;
1262 attr
= (old_memory_object_attr_info_t
) attributes
;
1263 attr
->may_cache
= object
->can_persist
;
1264 attr
->copy_strategy
= object
->copy_strategy
;
1266 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1270 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1272 memory_object_attr_info_t attr
;
1274 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1275 ret
= KERN_INVALID_ARGUMENT
;
1279 attr
= (memory_object_attr_info_t
) attributes
;
1280 attr
->copy_strategy
= object
->copy_strategy
;
1281 attr
->cluster_size
= object
->cluster_size
;
1282 attr
->may_cache_object
= object
->can_persist
;
1283 attr
->temporary
= object
->temporary
;
1285 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1290 ret
= KERN_INVALID_ARGUMENT
;
1294 vm_object_unlock(object
);
1301 * Routine: memory_object_upl_request [interface]
1303 * Cause the population of a portion of a vm_object.
1304 * Depending on the nature of the request, the pages
1305 * returned may be contain valid data or be uninitialized.
1310 memory_object_upl_request(
1311 memory_object_control_t control
,
1312 memory_object_offset_t offset
,
1315 upl_page_info_array_t user_page_list
,
1316 unsigned int *page_list_count
,
1321 object
= memory_object_control_to_vm_object(control
);
1322 if (object
== VM_OBJECT_NULL
)
1323 return (KERN_INVALID_ARGUMENT
);
1325 return vm_object_upl_request(object
,
1335 * Routine: memory_object_super_upl_request [interface]
1337 * Cause the population of a portion of a vm_object
1338 * in much the same way as memory_object_upl_request.
1339 * Depending on the nature of the request, the pages
1340 * returned may be contain valid data or be uninitialized.
1341 * However, the region may be expanded up to the super
1342 * cluster size provided.
1346 memory_object_super_upl_request(
1347 memory_object_control_t control
,
1348 memory_object_offset_t offset
,
1350 vm_size_t super_cluster
,
1352 upl_page_info_t
*user_page_list
,
1353 unsigned int *page_list_count
,
1358 object
= memory_object_control_to_vm_object(control
);
1359 if (object
== VM_OBJECT_NULL
)
1360 return (KERN_INVALID_ARGUMENT
);
1362 return vm_object_super_upl_request(object
,
1372 int vm_stat_discard_cleared_reply
= 0;
1373 int vm_stat_discard_cleared_unset
= 0;
1374 int vm_stat_discard_cleared_too_late
= 0;
1379 * Routine: host_default_memory_manager [interface]
1381 * set/get the default memory manager port and default cluster
1384 * If successful, consumes the supplied naked send right.
1387 host_default_memory_manager(
1388 host_priv_t host_priv
,
1389 memory_object_default_t
*default_manager
,
1390 vm_size_t cluster_size
)
1392 memory_object_default_t current_manager
;
1393 memory_object_default_t new_manager
;
1394 memory_object_default_t returned_manager
;
1396 if (host_priv
== HOST_PRIV_NULL
)
1397 return(KERN_INVALID_HOST
);
1399 assert(host_priv
== &realhost
);
1401 new_manager
= *default_manager
;
1402 mutex_lock(&memory_manager_default_lock
);
1403 current_manager
= memory_manager_default
;
1405 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1407 * Retrieve the current value.
1409 memory_object_default_reference(current_manager
);
1410 returned_manager
= current_manager
;
1413 * Retrieve the current value,
1414 * and replace it with the supplied value.
1415 * We return the old reference to the caller
1416 * but we have to take a reference on the new
1420 returned_manager
= current_manager
;
1421 memory_manager_default
= new_manager
;
1422 memory_object_default_reference(new_manager
);
1424 if (cluster_size
% PAGE_SIZE
!= 0) {
1426 mutex_unlock(&memory_manager_default_lock
);
1427 return KERN_INVALID_ARGUMENT
;
1429 cluster_size
= round_page(cluster_size
);
1432 memory_manager_default_cluster
= cluster_size
;
1435 * In case anyone's been waiting for a memory
1436 * manager to be established, wake them up.
1439 thread_wakeup((event_t
) &memory_manager_default
);
1442 mutex_unlock(&memory_manager_default_lock
);
1444 *default_manager
= returned_manager
;
1445 return(KERN_SUCCESS
);
1449 * Routine: memory_manager_default_reference
1451 * Returns a naked send right for the default
1452 * memory manager. The returned right is always
1453 * valid (not IP_NULL or IP_DEAD).
1456 __private_extern__ memory_object_default_t
1457 memory_manager_default_reference(
1458 vm_size_t
*cluster_size
)
1460 memory_object_default_t current_manager
;
1462 mutex_lock(&memory_manager_default_lock
);
1463 current_manager
= memory_manager_default
;
1464 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1465 thread_sleep_mutex((event_t
) &memory_manager_default
,
1466 &memory_manager_default_lock
, THREAD_UNINT
);
1467 mutex_lock(&memory_manager_default_lock
);
1468 current_manager
= memory_manager_default
;
1470 memory_object_default_reference(current_manager
);
1471 *cluster_size
= memory_manager_default_cluster
;
1472 mutex_unlock(&memory_manager_default_lock
);
1474 return current_manager
;
1478 * Routine: memory_manager_default_check
1481 * Check whether a default memory manager has been set
1482 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1483 * and KERN_FAILURE if dmm does not exist.
1485 * If there is no default memory manager, log an error,
1486 * but only the first time.
1489 __private_extern__ kern_return_t
1490 memory_manager_default_check(void)
1492 memory_object_default_t current
;
1494 mutex_lock(&memory_manager_default_lock
);
1495 current
= memory_manager_default
;
1496 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1497 static boolean_t logged
; /* initialized to 0 */
1498 boolean_t complain
= !logged
;
1500 mutex_unlock(&memory_manager_default_lock
);
1502 printf("Warning: No default memory manager\n");
1503 return(KERN_FAILURE
);
1505 mutex_unlock(&memory_manager_default_lock
);
1506 return(KERN_SUCCESS
);
1510 __private_extern__
void
1511 memory_manager_default_init(void)
1513 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1514 mutex_init(&memory_manager_default_lock
, ETAP_VM_MEMMAN
);
1519 memory_object_deactivate_pages(
1521 vm_object_offset_t offset
,
1522 vm_object_size_t size
,
1523 boolean_t kill_page
)
1525 vm_object_t orig_object
;
1526 int pages_moved
= 0;
1527 int pages_found
= 0;
1530 * entered with object lock held, acquire a paging reference to
1531 * prevent the memory_object and control ports from
1534 orig_object
= object
;
1537 register vm_page_t m
;
1538 vm_object_offset_t toffset
;
1539 vm_object_size_t tsize
;
1541 vm_object_paging_begin(object
);
1542 vm_page_lock_queues();
1544 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1546 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1550 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1552 m
->reference
= FALSE
;
1553 pmap_clear_reference(m
->phys_addr
);
1555 if ((kill_page
) && (object
->internal
)) {
1556 m
->precious
= FALSE
;
1558 pmap_clear_modify(m
->phys_addr
);
1559 vm_external_state_clr(object
->existence_map
, offset
);
1561 VM_PAGE_QUEUES_REMOVE(m
);
1563 queue_enter_first(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1567 vm_page_inactive_count
++;
1573 vm_page_unlock_queues();
1574 vm_object_paging_end(object
);
1576 if (object
->shadow
) {
1577 vm_object_t tmp_object
;
1581 offset
+= object
->shadow_offset
;
1583 tmp_object
= object
->shadow
;
1584 vm_object_lock(tmp_object
);
1586 if (object
!= orig_object
)
1587 vm_object_unlock(object
);
1588 object
= tmp_object
;
1592 if (object
!= orig_object
)
1593 vm_object_unlock(object
);
1596 /* Allow manipulation of individual page state. This is actually part of */
1597 /* the UPL regimen but takes place on the object rather than on a UPL */
1600 memory_object_page_op(
1601 memory_object_control_t control
,
1602 memory_object_offset_t offset
,
1604 vm_offset_t
*phys_entry
,
1611 object
= memory_object_control_to_vm_object(control
);
1612 if (object
== VM_OBJECT_NULL
)
1613 return (KERN_INVALID_ARGUMENT
);
1615 vm_object_lock(object
);
1617 if(ops
& UPL_POP_PHYSICAL
) {
1618 if(object
->phys_contiguous
) {
1620 *phys_entry
= (vm_offset_t
)
1621 object
->shadow_offset
;
1623 vm_object_unlock(object
);
1624 return KERN_SUCCESS
;
1626 vm_object_unlock(object
);
1627 return KERN_INVALID_OBJECT
;
1632 if(object
->phys_contiguous
) {
1633 vm_object_unlock(object
);
1634 return KERN_INVALID_OBJECT
;
1637 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
1638 vm_object_unlock(object
);
1639 return KERN_FAILURE
;
1642 /* Sync up on getting the busy bit */
1643 if((dst_page
->busy
|| dst_page
->cleaning
) &&
1644 (((ops
& UPL_POP_SET
) &&
1645 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
1646 /* someone else is playing with the page, we will */
1648 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
1649 vm_object_unlock(object
);
1650 thread_block((void(*)(void))0);
1651 vm_object_lock(object
);
1655 if (ops
& UPL_POP_DUMP
) {
1656 vm_page_lock_queues();
1657 vm_page_free(dst_page
);
1658 vm_page_unlock_queues();
1665 /* Get the condition of flags before requested ops */
1666 /* are undertaken */
1668 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
1669 if(dst_page
->pageout
) *flags
|= UPL_POP_PAGEOUT
;
1670 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
1671 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
1672 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
1675 *phys_entry
= dst_page
->phys_addr
;
1677 /* The caller should have made a call either contingent with */
1678 /* or prior to this call to set UPL_POP_BUSY */
1679 if(ops
& UPL_POP_SET
) {
1680 /* The protection granted with this assert will */
1681 /* not be complete. If the caller violates the */
1682 /* convention and attempts to change page state */
1683 /* without first setting busy we may not see it */
1684 /* because the page may already be busy. However */
1685 /* if such violations occur we will assert sooner */
1687 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
1688 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= TRUE
;
1689 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= TRUE
;
1690 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
1691 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
1692 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
1695 if(ops
& UPL_POP_CLR
) {
1696 assert(dst_page
->busy
);
1697 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
1698 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= FALSE
;
1699 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
1700 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
1701 if (ops
& UPL_POP_BUSY
) {
1702 dst_page
->busy
= FALSE
;
1703 PAGE_WAKEUP(dst_page
);
1709 vm_object_unlock(object
);
1710 return KERN_SUCCESS
;
1714 static zone_t mem_obj_control_zone
;
1716 __private_extern__
void
1717 memory_object_control_bootstrap(void)
1721 i
= (vm_size_t
) sizeof (struct memory_object_control
);
1722 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
1726 __private_extern__ memory_object_control_t
1727 memory_object_control_allocate(
1730 memory_object_control_t control
;
1732 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
1733 if (control
!= MEMORY_OBJECT_CONTROL_NULL
)
1734 control
->object
= object
;
1738 __private_extern__
void
1739 memory_object_control_collapse(
1740 memory_object_control_t control
,
1743 assert((control
->object
!= VM_OBJECT_NULL
) &&
1744 (control
->object
!= object
));
1745 control
->object
= object
;
1748 __private_extern__ vm_object_t
1749 memory_object_control_to_vm_object(
1750 memory_object_control_t control
)
1752 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1753 return VM_OBJECT_NULL
;
1755 return (control
->object
);
1758 memory_object_control_t
1759 convert_port_to_mo_control(
1762 return MEMORY_OBJECT_CONTROL_NULL
;
1767 convert_mo_control_to_port(
1768 memory_object_control_t control
)
1770 return MACH_PORT_NULL
;
1774 memory_object_control_reference(
1775 memory_object_control_t control
)
1781 * We only every issue one of these references, so kill it
1782 * when that gets released (should switch the real reference
1783 * counting in true port-less EMMI).
1786 memory_object_control_deallocate(
1787 memory_object_control_t control
)
1789 zfree(mem_obj_control_zone
, (vm_offset_t
)control
);
1793 memory_object_control_disable(
1794 memory_object_control_t control
)
1796 assert(control
->object
!= VM_OBJECT_NULL
);
1797 control
->object
= VM_OBJECT_NULL
;
1801 memory_object_default_reference(
1802 memory_object_default_t dmm
)
1804 ipc_port_make_send(dmm
);
1808 memory_object_default_deallocate(
1809 memory_object_default_t dmm
)
1811 ipc_port_release_send(dmm
);
1815 convert_port_to_memory_object(
1818 return (MEMORY_OBJECT_NULL
);
1823 convert_memory_object_to_port(
1824 memory_object_t object
)
1826 return (MACH_PORT_NULL
);
1830 /* remove after component interface available */
1831 extern int vnode_pager_workaround
;
1832 extern int device_pager_workaround
;
1836 /* Routine memory_object_reference */
1837 void memory_object_reference(
1838 memory_object_t memory_object
)
1840 extern void dp_memory_object_reference(memory_object_t
);
1843 extern void vnode_pager_reference(memory_object_t
);
1844 extern void device_pager_reference(memory_object_t
);
1846 if(memory_object
->pager
== &vnode_pager_workaround
) {
1847 vnode_pager_reference(memory_object
);
1848 } else if(memory_object
->pager
== &device_pager_workaround
) {
1849 device_pager_reference(memory_object
);
1852 dp_memory_object_reference(memory_object
);
1855 /* Routine memory_object_deallocate */
1856 void memory_object_deallocate(
1857 memory_object_t memory_object
)
1859 extern void dp_memory_object_deallocate(memory_object_t
);
1862 extern void vnode_pager_deallocate(memory_object_t
);
1863 extern void device_pager_deallocate(memory_object_t
);
1865 if(memory_object
->pager
== &vnode_pager_workaround
) {
1866 vnode_pager_deallocate(memory_object
);
1867 } else if(memory_object
->pager
== &device_pager_workaround
) {
1868 device_pager_deallocate(memory_object
);
1871 dp_memory_object_deallocate(memory_object
);
1875 /* Routine memory_object_init */
1876 kern_return_t memory_object_init
1878 memory_object_t memory_object
,
1879 memory_object_control_t memory_control
,
1880 vm_size_t memory_object_page_size
1883 extern kern_return_t
dp_memory_object_init(memory_object_t
,
1884 memory_object_control_t
,
1887 extern kern_return_t
vnode_pager_init(memory_object_t
,
1888 memory_object_control_t
,
1890 extern kern_return_t
device_pager_init(memory_object_t
,
1891 memory_object_control_t
,
1894 if(memory_object
->pager
== &vnode_pager_workaround
) {
1895 return vnode_pager_init(memory_object
,
1897 memory_object_page_size
);
1898 } else if(memory_object
->pager
== &device_pager_workaround
) {
1899 return device_pager_init(memory_object
,
1901 memory_object_page_size
);
1904 return dp_memory_object_init(memory_object
,
1906 memory_object_page_size
);
1909 /* Routine memory_object_terminate */
1910 kern_return_t memory_object_terminate
1912 memory_object_t memory_object
1915 extern kern_return_t
dp_memory_object_terminate(memory_object_t
);
1918 extern kern_return_t
vnode_pager_terminate(memory_object_t
);
1919 extern kern_return_t
device_pager_terminate(memory_object_t
);
1921 if(memory_object
->pager
== &vnode_pager_workaround
) {
1922 return vnode_pager_terminate(memory_object
);
1923 } else if(memory_object
->pager
== &device_pager_workaround
) {
1924 return device_pager_terminate(memory_object
);
1927 return dp_memory_object_terminate(memory_object
);
1930 /* Routine memory_object_data_request */
1931 kern_return_t memory_object_data_request
1933 memory_object_t memory_object
,
1934 memory_object_offset_t offset
,
1936 vm_prot_t desired_access
1939 extern kern_return_t
dp_memory_object_data_request(memory_object_t
,
1940 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1943 extern kern_return_t
vnode_pager_data_request(memory_object_t
,
1944 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1945 extern kern_return_t
device_pager_data_request(memory_object_t
,
1946 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1948 if (memory_object
->pager
== &vnode_pager_workaround
) {
1949 return vnode_pager_data_request(memory_object
,
1953 } else if (memory_object
->pager
== &device_pager_workaround
) {
1954 return device_pager_data_request(memory_object
,
1960 return dp_memory_object_data_request(memory_object
,
1966 /* Routine memory_object_data_return */
1967 kern_return_t memory_object_data_return
1969 memory_object_t memory_object
,
1970 memory_object_offset_t offset
,
1973 boolean_t kernel_copy
1976 extern kern_return_t
dp_memory_object_data_return(memory_object_t
,
1977 memory_object_offset_t
,
1982 extern kern_return_t
vnode_pager_data_return(memory_object_t
,
1983 memory_object_offset_t
,
1987 extern kern_return_t
device_pager_data_return(memory_object_t
,
1988 memory_object_offset_t
,
1993 if (memory_object
->pager
== &vnode_pager_workaround
) {
1994 return vnode_pager_data_return(memory_object
,
1999 } else if (memory_object
->pager
== &device_pager_workaround
) {
2000 return device_pager_data_return(memory_object
,
2007 return dp_memory_object_data_return(memory_object
,
2014 /* Routine memory_object_data_initialize */
2015 kern_return_t memory_object_data_initialize
2017 memory_object_t memory_object
,
2018 memory_object_offset_t offset
,
2023 extern kern_return_t
dp_memory_object_data_initialize(memory_object_t
,
2024 memory_object_offset_t
,
2027 extern kern_return_t
vnode_pager_data_initialize(memory_object_t
,
2028 memory_object_offset_t
,
2030 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
2031 memory_object_offset_t
,
2034 if (memory_object
->pager
== &vnode_pager_workaround
) {
2035 return vnode_pager_data_initialize(memory_object
,
2038 } else if (memory_object
->pager
== &device_pager_workaround
) {
2039 return device_pager_data_initialize(memory_object
,
2044 return dp_memory_object_data_initialize(memory_object
,
2049 /* Routine memory_object_data_unlock */
2050 kern_return_t memory_object_data_unlock
2052 memory_object_t memory_object
,
2053 memory_object_offset_t offset
,
2055 vm_prot_t desired_access
2058 extern kern_return_t
dp_memory_object_data_unlock(memory_object_t
,
2059 memory_object_offset_t
,
2063 extern kern_return_t
vnode_pager_data_unlock(memory_object_t
,
2064 memory_object_offset_t
,
2067 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
2068 memory_object_offset_t
,
2072 if (memory_object
->pager
== &vnode_pager_workaround
) {
2073 return vnode_pager_data_unlock(memory_object
,
2077 } else if (memory_object
->pager
== &device_pager_workaround
) {
2078 return device_pager_data_unlock(memory_object
,
2084 return dp_memory_object_data_unlock(memory_object
,
2091 /* Routine memory_object_synchronize */
2092 kern_return_t memory_object_synchronize
2094 memory_object_t memory_object
,
2095 memory_object_offset_t offset
,
2097 vm_sync_t sync_flags
2100 extern kern_return_t
dp_memory_object_data_synchronize(memory_object_t
,
2101 memory_object_offset_t
,
2105 extern kern_return_t
vnode_pager_data_synchronize(memory_object_t
,
2106 memory_object_offset_t
,
2109 extern kern_return_t
device_pager_data_synchronize(memory_object_t
,
2110 memory_object_offset_t
,
2114 if (memory_object
->pager
== &vnode_pager_workaround
) {
2115 return vnode_pager_synchronize(
2120 } else if (memory_object
->pager
== &device_pager_workaround
) {
2121 return device_pager_synchronize(
2128 return dp_memory_object_synchronize(
2135 /* Routine memory_object_unmap */
2136 kern_return_t memory_object_unmap
2138 memory_object_t memory_object
2141 extern kern_return_t
dp_memory_object_unmap(memory_object_t
);
2143 extern kern_return_t
vnode_pager_unmap(memory_object_t
);
2144 extern kern_return_t
device_pager_unmap(memory_object_t
);
2146 if (memory_object
->pager
== &vnode_pager_workaround
) {
2147 return vnode_pager_unmap(memory_object
);
2148 } else if (memory_object
->pager
== &device_pager_workaround
) {
2149 return device_pager_unmap(memory_object
);
2152 return dp_memory_object_unmap(memory_object
);
2155 /* Routine memory_object_create */
2156 kern_return_t memory_object_create
2158 memory_object_default_t default_memory_manager
,
2159 vm_size_t new_memory_object_size
,
2160 memory_object_t
*new_memory_object
2163 extern kern_return_t
default_pager_memory_object_create(memory_object_default_t
,
2167 return default_pager_memory_object_create(default_memory_manager
,
2168 new_memory_object_size
,