2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/memory_object.c
57 * Author: Michael Wayne Young
59 * External memory management interface control functions.
62 #include <advisory_pageout.h>
65 * Interface dependencies:
68 #include <mach/std_types.h> /* For pointer_t */
69 #include <mach/mach_types.h>
72 #include <mach/kern_return.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_prot.h>
79 #include <mach/message.h>
82 * Implementation dependencies:
84 #include <string.h> /* For memcpy() */
87 #include <kern/host.h>
88 #include <kern/thread.h> /* For current_thread() */
89 #include <kern/ipc_mig.h>
90 #include <kern/misc_protos.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_fault.h>
94 #include <vm/memory_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/pmap.h> /* For pmap_clear_modify */
98 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
99 #include <vm/vm_map.h> /* For vm_map_pageable */
102 #include <vm/vm_external.h>
103 #endif /* MACH_PAGEMAP */
106 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
107 vm_size_t memory_manager_default_cluster
= 0;
108 decl_mutex_data(, memory_manager_default_lock
)
111 * Forward ref to file-local function:
114 vm_object_update(vm_object_t
, vm_object_offset_t
,
115 vm_size_t
, memory_object_return_t
, int, vm_prot_t
);
119 * Routine: memory_object_should_return_page
122 * Determine whether the given page should be returned,
123 * based on the page's state and on the given return policy.
125 * We should return the page if one of the following is true:
127 * 1. Page is dirty and should_return is not RETURN_NONE.
128 * 2. Page is precious and should_return is RETURN_ALL.
129 * 3. Should_return is RETURN_ANYTHING.
131 * As a side effect, m->dirty will be made consistent
132 * with pmap_is_modified(m), if should_return is not
133 * MEMORY_OBJECT_RETURN_NONE.
136 #define memory_object_should_return_page(m, should_return) \
137 (should_return != MEMORY_OBJECT_RETURN_NONE && \
138 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
139 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
140 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
142 typedef int memory_object_lock_result_t
;
144 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
145 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
146 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
147 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
149 memory_object_lock_result_t
memory_object_lock_page(
151 memory_object_return_t should_return
,
152 boolean_t should_flush
,
156 * Routine: memory_object_lock_page
159 * Perform the appropriate lock operations on the
160 * given page. See the description of
161 * "memory_object_lock_request" for the meanings
164 * Returns an indication that the operation
165 * completed, blocked, or that the page must
168 memory_object_lock_result_t
169 memory_object_lock_page(
171 memory_object_return_t should_return
,
172 boolean_t should_flush
,
175 XPR(XPR_MEMORY_OBJECT
,
176 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
177 (integer_t
)m
, should_return
, should_flush
, prot
, 0);
180 * If we cannot change access to the page,
181 * either because a mapping is in progress
182 * (busy page) or because a mapping has been
183 * wired, then give up.
186 if (m
->busy
|| m
->cleaning
)
187 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
190 * Don't worry about pages for which the kernel
191 * does not have any data.
194 if (m
->absent
|| m
->error
|| m
->restart
) {
195 if(m
->error
&& should_flush
) {
196 /* dump the page, pager wants us to */
197 /* clean it up and there is no */
198 /* relevant data to return */
199 if(m
->wire_count
== 0) {
201 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
204 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
208 assert(!m
->fictitious
);
210 if (m
->wire_count
!= 0) {
212 * If no change would take place
213 * anyway, return successfully.
217 * No change to page lock [2 checks] AND
218 * Should not return page
220 * XXX This doesn't handle sending a copy of a wired
221 * XXX page to the pager, but that will require some
222 * XXX significant surgery.
225 (m
->page_lock
== prot
|| prot
== VM_PROT_NO_CHANGE
) &&
226 ! memory_object_should_return_page(m
, should_return
)) {
229 * Restart page unlock requests,
230 * even though no change took place.
231 * [Memory managers may be expecting
232 * to see new requests.]
234 m
->unlock_request
= VM_PROT_NONE
;
237 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
240 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
244 * If the page is to be flushed, allow
245 * that to be done as part of the protection.
254 * If we are decreasing permission, do it now;
255 * let the fault handler take care of increases
256 * (pmap_page_protect may not increase protection).
259 if (prot
!= VM_PROT_NO_CHANGE
) {
260 if ((m
->page_lock
^ prot
) & prot
) {
261 pmap_page_protect(m
->phys_page
, VM_PROT_ALL
& ~prot
);
264 /* code associated with the vestigial
265 * memory_object_data_unlock
268 m
->lock_supplied
= TRUE
;
269 if (prot
!= VM_PROT_NONE
)
275 * Restart any past unlock requests, even if no
276 * change resulted. If the manager explicitly
277 * requested no protection change, then it is assumed
278 * to be remembering past requests.
281 m
->unlock_request
= VM_PROT_NONE
;
287 * Handle page returning.
290 if (memory_object_should_return_page(m
, should_return
)) {
293 * If we weren't planning
294 * to flush the page anyway,
295 * we may need to remove the
296 * page from the pageout
297 * system and from physical
301 vm_page_lock_queues();
302 VM_PAGE_QUEUES_REMOVE(m
);
303 vm_page_unlock_queues();
306 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
309 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
);
311 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
321 extern boolean_t vm_page_deactivate_hint
;
324 * XXX Make clean but not flush a paging hint,
325 * and deactivate the pages. This is a hack
326 * because it overloads flush/clean with
327 * implementation-dependent meaning. This only
328 * happens to pages that are already clean.
331 if (vm_page_deactivate_hint
&&
332 (should_return
!= MEMORY_OBJECT_RETURN_NONE
)) {
333 vm_page_lock_queues();
334 vm_page_deactivate(m
);
335 vm_page_unlock_queues();
339 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
342 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
346 register vm_page_t hp; \
348 vm_object_unlock(object); \
350 (void) memory_object_data_return(object->pager, \
353 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
356 vm_object_lock(object); \
360 * Routine: memory_object_lock_request [user interface]
363 * Control use of the data associated with the given
364 * memory object. For each page in the given range,
365 * perform the following operations, in order:
366 * 1) restrict access to the page (disallow
367 * forms specified by "prot");
368 * 2) return data to the manager (if "should_return"
369 * is RETURN_DIRTY and the page is dirty, or
370 * "should_return" is RETURN_ALL and the page
371 * is either dirty or precious); and,
372 * 3) flush the cached copy (if "should_flush"
374 * The set of pages is defined by a starting offset
375 * ("offset") and size ("size"). Only pages with the
376 * same page alignment as the starting offset are
379 * A single acknowledgement is sent (to the "reply_to"
380 * port) when these actions are complete. If successful,
381 * the naked send right for reply_to is consumed.
385 memory_object_lock_request(
386 memory_object_control_t control
,
387 memory_object_offset_t offset
,
388 memory_object_size_t size
,
389 memory_object_return_t should_return
,
394 vm_object_offset_t original_offset
= offset
;
395 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
397 XPR(XPR_MEMORY_OBJECT
,
398 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
399 (integer_t
)control
, offset
, size
,
400 (((should_return
&1)<<1)|should_flush
), prot
);
403 * Check for bogus arguments.
405 object
= memory_object_control_to_vm_object(control
);
406 if (object
== VM_OBJECT_NULL
)
407 return (KERN_INVALID_ARGUMENT
);
409 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
410 return (KERN_INVALID_ARGUMENT
);
412 size
= round_page_64(size
);
415 * Lock the object, and acquire a paging reference to
416 * prevent the memory_object reference from being released.
418 vm_object_lock(object
);
419 vm_object_paging_begin(object
);
420 offset
-= object
->paging_offset
;
422 (void)vm_object_update(object
,
423 offset
, size
, should_return
, flags
, prot
);
425 vm_object_paging_end(object
);
426 vm_object_unlock(object
);
428 return (KERN_SUCCESS
);
432 * memory_object_release_name: [interface]
434 * Enforces name semantic on memory_object reference count decrement
435 * This routine should not be called unless the caller holds a name
436 * reference gained through the memory_object_named_create or the
437 * memory_object_rename call.
438 * If the TERMINATE_IDLE flag is set, the call will return if the
439 * reference count is not 1. i.e. idle with the only remaining reference
441 * If the decision is made to proceed the name field flag is set to
442 * false and the reference count is decremented. If the RESPECT_CACHE
443 * flag is set and the reference count has gone to zero, the
444 * memory_object is checked to see if it is cacheable otherwise when
445 * the reference count is zero, it is simply terminated.
449 memory_object_release_name(
450 memory_object_control_t control
,
455 object
= memory_object_control_to_vm_object(control
);
456 if (object
== VM_OBJECT_NULL
)
457 return (KERN_INVALID_ARGUMENT
);
459 return vm_object_release_name(object
, flags
);
465 * Routine: memory_object_destroy [user interface]
467 * Shut down a memory object, despite the
468 * presence of address map (or other) references
472 memory_object_destroy(
473 memory_object_control_t control
,
474 kern_return_t reason
)
478 object
= memory_object_control_to_vm_object(control
);
479 if (object
== VM_OBJECT_NULL
)
480 return (KERN_INVALID_ARGUMENT
);
482 return (vm_object_destroy(object
, reason
));
486 * Routine: vm_object_sync
488 * Kernel internal function to synch out pages in a given
489 * range within an object to its memory manager. Much the
490 * same as memory_object_lock_request but page protection
493 * If the should_flush and should_return flags are true pages
494 * are flushed, that is dirty & precious pages are written to
495 * the memory manager and then discarded. If should_return
496 * is false, only precious pages are returned to the memory
499 * If should flush is false and should_return true, the memory
500 * manager's copy of the pages is updated. If should_return
501 * is also false, only the precious pages are updated. This
502 * last option is of limited utility.
505 * FALSE if no pages were returned to the pager
512 vm_object_offset_t offset
,
514 boolean_t should_flush
,
515 boolean_t should_return
)
520 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
521 (integer_t
)object
, offset
, size
, should_flush
, should_return
);
524 * Lock the object, and acquire a paging reference to
525 * prevent the memory_object and control ports from
528 vm_object_lock(object
);
529 vm_object_paging_begin(object
);
531 rv
= vm_object_update(object
, offset
, size
,
533 MEMORY_OBJECT_RETURN_ALL
:
534 MEMORY_OBJECT_RETURN_NONE
,
536 MEMORY_OBJECT_DATA_FLUSH
: 0,
540 vm_object_paging_end(object
);
541 vm_object_unlock(object
);
546 * Routine: vm_object_update
548 * Work function for m_o_lock_request(), vm_o_sync().
550 * Called with object locked and paging ref taken.
554 register vm_object_t object
,
555 register vm_object_offset_t offset
,
556 register vm_size_t size
,
557 memory_object_return_t should_return
,
561 register vm_page_t m
;
562 vm_page_t holding_page
;
563 vm_size_t original_size
= size
;
564 vm_object_offset_t paging_offset
= 0;
565 vm_object_t copy_object
;
566 vm_size_t data_cnt
= 0;
567 vm_object_offset_t last_offset
= offset
;
568 memory_object_lock_result_t page_lock_result
;
569 memory_object_lock_result_t pageout_action
;
570 boolean_t data_returned
= FALSE
;
571 boolean_t update_cow
;
572 boolean_t should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
573 boolean_t pending_pageout
= FALSE
;
576 * To avoid blocking while scanning for pages, save
577 * dirty pages to be cleaned all at once.
579 * XXXO A similar strategy could be used to limit the
580 * number of times that a scan must be restarted for
581 * other reasons. Those pages that would require blocking
582 * could be temporarily collected in another list, or
583 * their offsets could be recorded in a small array.
587 * XXX NOTE: May want to consider converting this to a page list
588 * XXX vm_map_copy interface. Need to understand object
589 * XXX coalescing implications before doing so.
592 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
593 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
594 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
595 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
598 if((((copy_object
= object
->copy
) != NULL
) && update_cow
) ||
599 (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
602 vm_object_offset_t copy_offset
;
606 kern_return_t error
= 0;
608 if(copy_object
!= NULL
) {
609 /* translate offset with respect to shadow's offset */
610 copy_offset
= (offset
>= copy_object
->shadow_offset
)?
611 offset
- copy_object
->shadow_offset
:
612 (vm_object_offset_t
) 0;
613 if(copy_offset
> copy_object
->size
)
614 copy_offset
= copy_object
->size
;
616 /* clip size with respect to shadow offset */
617 copy_size
= (offset
>= copy_object
->shadow_offset
) ?
618 size
: size
- (copy_object
->shadow_offset
- offset
);
623 copy_size
= ((copy_offset
+ copy_size
)
624 <= copy_object
->size
) ?
625 copy_size
: copy_object
->size
- copy_offset
;
627 /* check for a copy_offset which is beyond the end of */
628 /* the copy_object */
634 vm_object_unlock(object
);
635 vm_object_lock(copy_object
);
637 copy_object
= object
;
639 copy_size
= offset
+ size
;
640 copy_offset
= offset
;
643 vm_object_paging_begin(copy_object
);
644 for (i
=copy_offset
; i
<copy_size
; i
+=PAGE_SIZE
) {
645 RETRY_COW_OF_LOCK_REQUEST
:
646 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
647 switch (vm_fault_page(copy_object
, i
,
648 VM_PROT_WRITE
|VM_PROT_READ
,
652 copy_offset
+copy_size
,
653 VM_BEHAVIOR_SEQUENTIAL
,
662 case VM_FAULT_SUCCESS
:
665 page
->object
, top_page
);
666 PAGE_WAKEUP_DONE(page
);
667 vm_page_lock_queues();
668 if (!page
->active
&& !page
->inactive
)
669 vm_page_activate(page
);
670 vm_page_unlock_queues();
671 vm_object_lock(copy_object
);
672 vm_object_paging_begin(copy_object
);
674 PAGE_WAKEUP_DONE(page
);
675 vm_page_lock_queues();
676 if (!page
->active
&& !page
->inactive
)
677 vm_page_activate(page
);
678 vm_page_unlock_queues();
682 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
683 vm_object_lock(copy_object
);
684 vm_object_paging_begin(copy_object
);
685 goto RETRY_COW_OF_LOCK_REQUEST
;
686 case VM_FAULT_INTERRUPTED
:
687 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
688 vm_object_lock(copy_object
);
689 vm_object_paging_begin(copy_object
);
690 goto RETRY_COW_OF_LOCK_REQUEST
;
691 case VM_FAULT_MEMORY_SHORTAGE
:
693 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
694 vm_object_lock(copy_object
);
695 vm_object_paging_begin(copy_object
);
696 goto RETRY_COW_OF_LOCK_REQUEST
;
697 case VM_FAULT_FICTITIOUS_SHORTAGE
:
698 vm_page_more_fictitious();
699 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
700 vm_object_lock(copy_object
);
701 vm_object_paging_begin(copy_object
);
702 goto RETRY_COW_OF_LOCK_REQUEST
;
703 case VM_FAULT_MEMORY_ERROR
:
704 vm_object_lock(object
);
705 goto BYPASS_COW_COPYIN
;
709 vm_object_paging_end(copy_object
);
710 if(copy_object
!= object
) {
711 vm_object_unlock(copy_object
);
712 vm_object_lock(object
);
715 if((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
718 if(((copy_object
= object
->copy
) != NULL
) &&
719 (flags
& MEMORY_OBJECT_DATA_PURGE
)) {
720 copy_object
->shadow_severed
= TRUE
;
721 copy_object
->shadowed
= FALSE
;
722 copy_object
->shadow
= NULL
;
723 /* delete the ref the COW was holding on the target object */
724 vm_object_deallocate(object
);
730 size
-= PAGE_SIZE
, offset
+= PAGE_SIZE_64
)
733 * Limit the number of pages to be cleaned at once.
735 if (pending_pageout
&&
736 data_cnt
>= PAGE_SIZE
* DATA_WRITE_MAX
)
738 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
739 pageout_action
, paging_offset
);
741 pending_pageout
= FALSE
;
744 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
745 page_lock_result
= memory_object_lock_page(m
, should_return
,
748 XPR(XPR_MEMORY_OBJECT
,
749 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
750 (integer_t
)object
, offset
, page_lock_result
, 0, 0);
752 switch (page_lock_result
)
754 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
756 * End of a cluster of dirty pages.
758 if(pending_pageout
) {
759 LIST_REQ_PAGEOUT_PAGES(object
,
760 data_cnt
, pageout_action
,
763 pending_pageout
= FALSE
;
768 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
770 * Since it is necessary to block,
771 * clean any dirty pages now.
773 if(pending_pageout
) {
774 LIST_REQ_PAGEOUT_PAGES(object
,
775 data_cnt
, pageout_action
,
777 pending_pageout
= FALSE
;
782 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
:
786 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
788 * The clean and return cases are similar.
793 * if this would form a discontiguous block,
794 * clean the old pages and start anew.
799 * Mark the page busy since we unlock the
803 if (pending_pageout
&&
804 (last_offset
!= offset
||
805 pageout_action
!= page_lock_result
)) {
806 LIST_REQ_PAGEOUT_PAGES(object
,
807 data_cnt
, pageout_action
,
809 pending_pageout
= FALSE
;
813 holding_page
= VM_PAGE_NULL
;
815 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
818 if(!pending_pageout
) {
819 pending_pageout
= TRUE
;
820 pageout_action
= page_lock_result
;
821 paging_offset
= offset
;
824 vm_page_lock_queues();
825 m
->list_req_pending
= TRUE
;
830 vm_page_unlock_queues();
833 * Clean but do not flush
835 vm_page_lock_queues();
836 m
->list_req_pending
= TRUE
;
838 vm_page_unlock_queues();
841 vm_object_unlock(object
);
844 data_cnt
+= PAGE_SIZE
;
845 last_offset
= offset
+ PAGE_SIZE_64
;
846 data_returned
= TRUE
;
848 vm_object_lock(object
);
856 * We have completed the scan for applicable pages.
857 * Clean any pages that have been saved.
859 if (pending_pageout
) {
860 LIST_REQ_PAGEOUT_PAGES(object
,
861 data_cnt
, pageout_action
, paging_offset
);
863 return (data_returned
);
867 * Routine: memory_object_synchronize_completed [user interface]
869 * Tell kernel that previously synchronized data
870 * (memory_object_synchronize) has been queue or placed on the
873 * Note: there may be multiple synchronize requests for a given
874 * memory object outstanding but they will not overlap.
878 memory_object_synchronize_completed(
879 memory_object_control_t control
,
880 memory_object_offset_t offset
,
886 XPR(XPR_MEMORY_OBJECT
,
887 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
888 (integer_t
)object
, offset
, length
, 0, 0);
891 * Look for bogus arguments
894 object
= memory_object_control_to_vm_object(control
);
895 if (object
== VM_OBJECT_NULL
)
896 return (KERN_INVALID_ARGUMENT
);
898 vm_object_lock(object
);
901 * search for sync request structure
903 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
904 if (msr
->offset
== offset
&& msr
->length
== length
) {
905 queue_remove(&object
->msr_q
, msr
, msync_req_t
, msr_q
);
910 if (queue_end(&object
->msr_q
, (queue_entry_t
)msr
)) {
911 vm_object_unlock(object
);
912 return KERN_INVALID_ARGUMENT
;
916 vm_object_unlock(object
);
917 msr
->flag
= VM_MSYNC_DONE
;
919 thread_wakeup((event_t
) msr
);
922 }/* memory_object_synchronize_completed */
925 vm_object_set_attributes_common(
928 memory_object_copy_strategy_t copy_strategy
,
930 vm_size_t cluster_size
,
931 boolean_t silent_overwrite
,
932 boolean_t advisory_pageout
)
934 boolean_t object_became_ready
;
936 XPR(XPR_MEMORY_OBJECT
,
937 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
938 (integer_t
)object
, (may_cache
&1)|((temporary
&1)<1), copy_strategy
, 0, 0);
940 if (object
== VM_OBJECT_NULL
)
941 return(KERN_INVALID_ARGUMENT
);
944 * Verify the attributes of importance
947 switch(copy_strategy
) {
948 case MEMORY_OBJECT_COPY_NONE
:
949 case MEMORY_OBJECT_COPY_DELAY
:
952 return(KERN_INVALID_ARGUMENT
);
955 #if !ADVISORY_PAGEOUT
956 if (silent_overwrite
|| advisory_pageout
)
957 return(KERN_INVALID_ARGUMENT
);
959 #endif /* !ADVISORY_PAGEOUT */
964 if (cluster_size
!= 0) {
965 int pages_per_cluster
;
966 pages_per_cluster
= atop_32(cluster_size
);
968 * Cluster size must be integral multiple of page size,
969 * and be a power of 2 number of pages.
971 if ((cluster_size
& (PAGE_SIZE
-1)) ||
972 ((pages_per_cluster
-1) & pages_per_cluster
))
973 return KERN_INVALID_ARGUMENT
;
976 vm_object_lock(object
);
979 * Copy the attributes
981 assert(!object
->internal
);
982 object_became_ready
= !object
->pager_ready
;
983 object
->copy_strategy
= copy_strategy
;
984 object
->can_persist
= may_cache
;
985 object
->temporary
= temporary
;
986 object
->silent_overwrite
= silent_overwrite
;
987 object
->advisory_pageout
= advisory_pageout
;
988 if (cluster_size
== 0)
989 cluster_size
= PAGE_SIZE
;
990 object
->cluster_size
= cluster_size
;
992 assert(cluster_size
>= PAGE_SIZE
&&
993 cluster_size
% PAGE_SIZE
== 0);
996 * Wake up anyone waiting for the ready attribute
997 * to become asserted.
1000 if (object_became_ready
) {
1001 object
->pager_ready
= TRUE
;
1002 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1005 vm_object_unlock(object
);
1007 return(KERN_SUCCESS
);
1011 * Set the memory object attribute as provided.
1013 * XXX This routine cannot be completed until the vm_msync, clean
1014 * in place, and cluster work is completed. See ifdef notyet
1015 * below and note that vm_object_set_attributes_common()
1016 * may have to be expanded.
1019 memory_object_change_attributes(
1020 memory_object_control_t control
,
1021 memory_object_flavor_t flavor
,
1022 memory_object_info_t attributes
,
1023 mach_msg_type_number_t count
)
1026 kern_return_t result
= KERN_SUCCESS
;
1027 boolean_t temporary
;
1028 boolean_t may_cache
;
1029 boolean_t invalidate
;
1030 vm_size_t cluster_size
;
1031 memory_object_copy_strategy_t copy_strategy
;
1032 boolean_t silent_overwrite
;
1033 boolean_t advisory_pageout
;
1035 object
= memory_object_control_to_vm_object(control
);
1036 if (object
== VM_OBJECT_NULL
)
1037 return (KERN_INVALID_ARGUMENT
);
1039 vm_object_lock(object
);
1041 temporary
= object
->temporary
;
1042 may_cache
= object
->can_persist
;
1043 copy_strategy
= object
->copy_strategy
;
1044 silent_overwrite
= object
->silent_overwrite
;
1045 advisory_pageout
= object
->advisory_pageout
;
1047 invalidate
= object
->invalidate
;
1049 cluster_size
= object
->cluster_size
;
1050 vm_object_unlock(object
);
1053 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1055 old_memory_object_behave_info_t behave
;
1057 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1058 result
= KERN_INVALID_ARGUMENT
;
1062 behave
= (old_memory_object_behave_info_t
) attributes
;
1064 temporary
= behave
->temporary
;
1065 invalidate
= behave
->invalidate
;
1066 copy_strategy
= behave
->copy_strategy
;
1071 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1073 memory_object_behave_info_t behave
;
1075 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1076 result
= KERN_INVALID_ARGUMENT
;
1080 behave
= (memory_object_behave_info_t
) attributes
;
1082 temporary
= behave
->temporary
;
1083 invalidate
= behave
->invalidate
;
1084 copy_strategy
= behave
->copy_strategy
;
1085 silent_overwrite
= behave
->silent_overwrite
;
1086 advisory_pageout
= behave
->advisory_pageout
;
1090 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1092 memory_object_perf_info_t perf
;
1094 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1095 result
= KERN_INVALID_ARGUMENT
;
1099 perf
= (memory_object_perf_info_t
) attributes
;
1101 may_cache
= perf
->may_cache
;
1102 cluster_size
= round_page_32(perf
->cluster_size
);
1107 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1109 old_memory_object_attr_info_t attr
;
1111 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1112 result
= KERN_INVALID_ARGUMENT
;
1116 attr
= (old_memory_object_attr_info_t
) attributes
;
1118 may_cache
= attr
->may_cache
;
1119 copy_strategy
= attr
->copy_strategy
;
1120 cluster_size
= page_size
;
1125 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1127 memory_object_attr_info_t attr
;
1129 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1130 result
= KERN_INVALID_ARGUMENT
;
1134 attr
= (memory_object_attr_info_t
) attributes
;
1136 copy_strategy
= attr
->copy_strategy
;
1137 may_cache
= attr
->may_cache_object
;
1138 cluster_size
= attr
->cluster_size
;
1139 temporary
= attr
->temporary
;
1145 result
= KERN_INVALID_ARGUMENT
;
1149 if (result
!= KERN_SUCCESS
)
1152 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1153 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1160 * XXX may_cache may become a tri-valued variable to handle
1161 * XXX uncache if not in use.
1163 return (vm_object_set_attributes_common(object
,
1173 memory_object_get_attributes(
1174 memory_object_control_t control
,
1175 memory_object_flavor_t flavor
,
1176 memory_object_info_t attributes
, /* pointer to OUT array */
1177 mach_msg_type_number_t
*count
) /* IN/OUT */
1179 kern_return_t ret
= KERN_SUCCESS
;
1182 object
= memory_object_control_to_vm_object(control
);
1183 if (object
== VM_OBJECT_NULL
)
1184 return (KERN_INVALID_ARGUMENT
);
1186 vm_object_lock(object
);
1189 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1191 old_memory_object_behave_info_t behave
;
1193 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1194 ret
= KERN_INVALID_ARGUMENT
;
1198 behave
= (old_memory_object_behave_info_t
) attributes
;
1199 behave
->copy_strategy
= object
->copy_strategy
;
1200 behave
->temporary
= object
->temporary
;
1201 #if notyet /* remove when vm_msync complies and clean in place fini */
1202 behave
->invalidate
= object
->invalidate
;
1204 behave
->invalidate
= FALSE
;
1207 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1211 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1213 memory_object_behave_info_t behave
;
1215 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1216 ret
= KERN_INVALID_ARGUMENT
;
1220 behave
= (memory_object_behave_info_t
) attributes
;
1221 behave
->copy_strategy
= object
->copy_strategy
;
1222 behave
->temporary
= object
->temporary
;
1223 #if notyet /* remove when vm_msync complies and clean in place fini */
1224 behave
->invalidate
= object
->invalidate
;
1226 behave
->invalidate
= FALSE
;
1228 behave
->advisory_pageout
= object
->advisory_pageout
;
1229 behave
->silent_overwrite
= object
->silent_overwrite
;
1230 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1234 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1236 memory_object_perf_info_t perf
;
1238 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1239 ret
= KERN_INVALID_ARGUMENT
;
1243 perf
= (memory_object_perf_info_t
) attributes
;
1244 perf
->cluster_size
= object
->cluster_size
;
1245 perf
->may_cache
= object
->can_persist
;
1247 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1251 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1253 old_memory_object_attr_info_t attr
;
1255 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1256 ret
= KERN_INVALID_ARGUMENT
;
1260 attr
= (old_memory_object_attr_info_t
) attributes
;
1261 attr
->may_cache
= object
->can_persist
;
1262 attr
->copy_strategy
= object
->copy_strategy
;
1264 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1268 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1270 memory_object_attr_info_t attr
;
1272 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1273 ret
= KERN_INVALID_ARGUMENT
;
1277 attr
= (memory_object_attr_info_t
) attributes
;
1278 attr
->copy_strategy
= object
->copy_strategy
;
1279 attr
->cluster_size
= object
->cluster_size
;
1280 attr
->may_cache_object
= object
->can_persist
;
1281 attr
->temporary
= object
->temporary
;
1283 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1288 ret
= KERN_INVALID_ARGUMENT
;
1292 vm_object_unlock(object
);
1299 * Routine: memory_object_upl_request [interface]
1301 * Cause the population of a portion of a vm_object.
1302 * Depending on the nature of the request, the pages
1303 * returned may be contain valid data or be uninitialized.
1308 memory_object_upl_request(
1309 memory_object_control_t control
,
1310 memory_object_offset_t offset
,
1313 upl_page_info_array_t user_page_list
,
1314 unsigned int *page_list_count
,
1319 object
= memory_object_control_to_vm_object(control
);
1320 if (object
== VM_OBJECT_NULL
)
1321 return (KERN_INVALID_ARGUMENT
);
1323 return vm_object_upl_request(object
,
1333 * Routine: memory_object_super_upl_request [interface]
1335 * Cause the population of a portion of a vm_object
1336 * in much the same way as memory_object_upl_request.
1337 * Depending on the nature of the request, the pages
1338 * returned may be contain valid data or be uninitialized.
1339 * However, the region may be expanded up to the super
1340 * cluster size provided.
1344 memory_object_super_upl_request(
1345 memory_object_control_t control
,
1346 memory_object_offset_t offset
,
1348 vm_size_t super_cluster
,
1350 upl_page_info_t
*user_page_list
,
1351 unsigned int *page_list_count
,
1356 object
= memory_object_control_to_vm_object(control
);
1357 if (object
== VM_OBJECT_NULL
)
1358 return (KERN_INVALID_ARGUMENT
);
1360 return vm_object_super_upl_request(object
,
1370 int vm_stat_discard_cleared_reply
= 0;
1371 int vm_stat_discard_cleared_unset
= 0;
1372 int vm_stat_discard_cleared_too_late
= 0;
1377 * Routine: host_default_memory_manager [interface]
1379 * set/get the default memory manager port and default cluster
1382 * If successful, consumes the supplied naked send right.
1385 host_default_memory_manager(
1386 host_priv_t host_priv
,
1387 memory_object_default_t
*default_manager
,
1388 vm_size_t cluster_size
)
1390 memory_object_default_t current_manager
;
1391 memory_object_default_t new_manager
;
1392 memory_object_default_t returned_manager
;
1394 if (host_priv
== HOST_PRIV_NULL
)
1395 return(KERN_INVALID_HOST
);
1397 assert(host_priv
== &realhost
);
1399 new_manager
= *default_manager
;
1400 mutex_lock(&memory_manager_default_lock
);
1401 current_manager
= memory_manager_default
;
1403 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1405 * Retrieve the current value.
1407 memory_object_default_reference(current_manager
);
1408 returned_manager
= current_manager
;
1411 * Retrieve the current value,
1412 * and replace it with the supplied value.
1413 * We return the old reference to the caller
1414 * but we have to take a reference on the new
1418 returned_manager
= current_manager
;
1419 memory_manager_default
= new_manager
;
1420 memory_object_default_reference(new_manager
);
1422 if (cluster_size
% PAGE_SIZE
!= 0) {
1424 mutex_unlock(&memory_manager_default_lock
);
1425 return KERN_INVALID_ARGUMENT
;
1427 cluster_size
= round_page_32(cluster_size
);
1430 memory_manager_default_cluster
= cluster_size
;
1433 * In case anyone's been waiting for a memory
1434 * manager to be established, wake them up.
1437 thread_wakeup((event_t
) &memory_manager_default
);
1440 mutex_unlock(&memory_manager_default_lock
);
1442 *default_manager
= returned_manager
;
1443 return(KERN_SUCCESS
);
1447 * Routine: memory_manager_default_reference
1449 * Returns a naked send right for the default
1450 * memory manager. The returned right is always
1451 * valid (not IP_NULL or IP_DEAD).
1454 __private_extern__ memory_object_default_t
1455 memory_manager_default_reference(
1456 vm_size_t
*cluster_size
)
1458 memory_object_default_t current_manager
;
1460 mutex_lock(&memory_manager_default_lock
);
1461 current_manager
= memory_manager_default
;
1462 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1465 res
= thread_sleep_mutex((event_t
) &memory_manager_default
,
1466 &memory_manager_default_lock
,
1468 assert(res
== THREAD_AWAKENED
);
1469 current_manager
= memory_manager_default
;
1471 memory_object_default_reference(current_manager
);
1472 *cluster_size
= memory_manager_default_cluster
;
1473 mutex_unlock(&memory_manager_default_lock
);
1475 return current_manager
;
1479 * Routine: memory_manager_default_check
1482 * Check whether a default memory manager has been set
1483 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1484 * and KERN_FAILURE if dmm does not exist.
1486 * If there is no default memory manager, log an error,
1487 * but only the first time.
1490 __private_extern__ kern_return_t
1491 memory_manager_default_check(void)
1493 memory_object_default_t current
;
1495 mutex_lock(&memory_manager_default_lock
);
1496 current
= memory_manager_default
;
1497 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1498 static boolean_t logged
; /* initialized to 0 */
1499 boolean_t complain
= !logged
;
1501 mutex_unlock(&memory_manager_default_lock
);
1503 printf("Warning: No default memory manager\n");
1504 return(KERN_FAILURE
);
1506 mutex_unlock(&memory_manager_default_lock
);
1507 return(KERN_SUCCESS
);
1511 __private_extern__
void
1512 memory_manager_default_init(void)
1514 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1515 mutex_init(&memory_manager_default_lock
, ETAP_VM_MEMMAN
);
1520 memory_object_deactivate_pages(
1522 vm_object_offset_t offset
,
1523 vm_object_size_t size
,
1524 boolean_t kill_page
)
1526 vm_object_t orig_object
;
1527 int pages_moved
= 0;
1528 int pages_found
= 0;
1531 * entered with object lock held, acquire a paging reference to
1532 * prevent the memory_object and control ports from
1535 orig_object
= object
;
1538 register vm_page_t m
;
1539 vm_object_offset_t toffset
;
1540 vm_object_size_t tsize
;
1542 vm_object_paging_begin(object
);
1543 vm_page_lock_queues();
1545 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1547 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1551 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1553 m
->reference
= FALSE
;
1554 pmap_clear_reference(m
->phys_page
);
1556 if ((kill_page
) && (object
->internal
)) {
1557 m
->precious
= FALSE
;
1559 pmap_clear_modify(m
->phys_page
);
1560 vm_external_state_clr(object
->existence_map
, offset
);
1562 VM_PAGE_QUEUES_REMOVE(m
);
1567 m
, vm_page_t
, pageq
);
1570 &vm_page_queue_inactive
,
1571 m
, vm_page_t
, pageq
);
1576 vm_page_inactive_count
++;
1582 vm_page_unlock_queues();
1583 vm_object_paging_end(object
);
1585 if (object
->shadow
) {
1586 vm_object_t tmp_object
;
1590 offset
+= object
->shadow_offset
;
1592 tmp_object
= object
->shadow
;
1593 vm_object_lock(tmp_object
);
1595 if (object
!= orig_object
)
1596 vm_object_unlock(object
);
1597 object
= tmp_object
;
1601 if (object
!= orig_object
)
1602 vm_object_unlock(object
);
1605 /* Allow manipulation of individual page state. This is actually part of */
1606 /* the UPL regimen but takes place on the object rather than on a UPL */
1609 memory_object_page_op(
1610 memory_object_control_t control
,
1611 memory_object_offset_t offset
,
1613 ppnum_t
*phys_entry
,
1620 object
= memory_object_control_to_vm_object(control
);
1621 if (object
== VM_OBJECT_NULL
)
1622 return (KERN_INVALID_ARGUMENT
);
1624 vm_object_lock(object
);
1626 if(ops
& UPL_POP_PHYSICAL
) {
1627 if(object
->phys_contiguous
) {
1629 *phys_entry
= (ppnum_t
)
1630 (object
->shadow_offset
>> 12);
1632 vm_object_unlock(object
);
1633 return KERN_SUCCESS
;
1635 vm_object_unlock(object
);
1636 return KERN_INVALID_OBJECT
;
1641 if(object
->phys_contiguous
) {
1642 vm_object_unlock(object
);
1643 return KERN_INVALID_OBJECT
;
1646 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
1647 vm_object_unlock(object
);
1648 return KERN_FAILURE
;
1651 /* Sync up on getting the busy bit */
1652 if((dst_page
->busy
|| dst_page
->cleaning
) &&
1653 (((ops
& UPL_POP_SET
) &&
1654 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
1655 /* someone else is playing with the page, we will */
1657 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
1661 if (ops
& UPL_POP_DUMP
) {
1662 vm_page_lock_queues();
1663 vm_page_free(dst_page
);
1664 vm_page_unlock_queues();
1671 /* Get the condition of flags before requested ops */
1672 /* are undertaken */
1674 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
1675 if(dst_page
->pageout
) *flags
|= UPL_POP_PAGEOUT
;
1676 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
1677 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
1678 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
1681 *phys_entry
= dst_page
->phys_page
;
1683 /* The caller should have made a call either contingent with */
1684 /* or prior to this call to set UPL_POP_BUSY */
1685 if(ops
& UPL_POP_SET
) {
1686 /* The protection granted with this assert will */
1687 /* not be complete. If the caller violates the */
1688 /* convention and attempts to change page state */
1689 /* without first setting busy we may not see it */
1690 /* because the page may already be busy. However */
1691 /* if such violations occur we will assert sooner */
1693 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
1694 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= TRUE
;
1695 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= TRUE
;
1696 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
1697 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
1698 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
1701 if(ops
& UPL_POP_CLR
) {
1702 assert(dst_page
->busy
);
1703 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
1704 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= FALSE
;
1705 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
1706 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
1707 if (ops
& UPL_POP_BUSY
) {
1708 dst_page
->busy
= FALSE
;
1709 PAGE_WAKEUP(dst_page
);
1715 vm_object_unlock(object
);
1716 return KERN_SUCCESS
;
1720 static zone_t mem_obj_control_zone
;
1722 __private_extern__
void
1723 memory_object_control_bootstrap(void)
1727 i
= (vm_size_t
) sizeof (struct memory_object_control
);
1728 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
1732 __private_extern__ memory_object_control_t
1733 memory_object_control_allocate(
1736 memory_object_control_t control
;
1738 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
1739 if (control
!= MEMORY_OBJECT_CONTROL_NULL
)
1740 control
->object
= object
;
1744 __private_extern__
void
1745 memory_object_control_collapse(
1746 memory_object_control_t control
,
1749 assert((control
->object
!= VM_OBJECT_NULL
) &&
1750 (control
->object
!= object
));
1751 control
->object
= object
;
1754 __private_extern__ vm_object_t
1755 memory_object_control_to_vm_object(
1756 memory_object_control_t control
)
1758 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1759 return VM_OBJECT_NULL
;
1761 return (control
->object
);
1764 memory_object_control_t
1765 convert_port_to_mo_control(
1768 return MEMORY_OBJECT_CONTROL_NULL
;
1773 convert_mo_control_to_port(
1774 memory_object_control_t control
)
1776 return MACH_PORT_NULL
;
1780 memory_object_control_reference(
1781 memory_object_control_t control
)
1787 * We only every issue one of these references, so kill it
1788 * when that gets released (should switch the real reference
1789 * counting in true port-less EMMI).
1792 memory_object_control_deallocate(
1793 memory_object_control_t control
)
1795 zfree(mem_obj_control_zone
, (vm_offset_t
)control
);
1799 memory_object_control_disable(
1800 memory_object_control_t control
)
1802 assert(control
->object
!= VM_OBJECT_NULL
);
1803 control
->object
= VM_OBJECT_NULL
;
1807 memory_object_default_reference(
1808 memory_object_default_t dmm
)
1810 ipc_port_make_send(dmm
);
1814 memory_object_default_deallocate(
1815 memory_object_default_t dmm
)
1817 ipc_port_release_send(dmm
);
1821 convert_port_to_memory_object(
1824 return (MEMORY_OBJECT_NULL
);
1829 convert_memory_object_to_port(
1830 memory_object_t object
)
1832 return (MACH_PORT_NULL
);
1836 /* remove after component interface available */
1837 extern int vnode_pager_workaround
;
1838 extern int device_pager_workaround
;
1842 /* Routine memory_object_reference */
1843 void memory_object_reference(
1844 memory_object_t memory_object
)
1846 extern void dp_memory_object_reference(memory_object_t
);
1849 extern void vnode_pager_reference(memory_object_t
);
1850 extern void device_pager_reference(memory_object_t
);
1852 if(memory_object
->pager
== &vnode_pager_workaround
) {
1853 vnode_pager_reference(memory_object
);
1854 } else if(memory_object
->pager
== &device_pager_workaround
) {
1855 device_pager_reference(memory_object
);
1858 dp_memory_object_reference(memory_object
);
1861 /* Routine memory_object_deallocate */
1862 void memory_object_deallocate(
1863 memory_object_t memory_object
)
1865 extern void dp_memory_object_deallocate(memory_object_t
);
1868 extern void vnode_pager_deallocate(memory_object_t
);
1869 extern void device_pager_deallocate(memory_object_t
);
1871 if(memory_object
->pager
== &vnode_pager_workaround
) {
1872 vnode_pager_deallocate(memory_object
);
1873 } else if(memory_object
->pager
== &device_pager_workaround
) {
1874 device_pager_deallocate(memory_object
);
1877 dp_memory_object_deallocate(memory_object
);
1881 /* Routine memory_object_init */
1882 kern_return_t memory_object_init
1884 memory_object_t memory_object
,
1885 memory_object_control_t memory_control
,
1886 vm_size_t memory_object_page_size
1889 extern kern_return_t
dp_memory_object_init(memory_object_t
,
1890 memory_object_control_t
,
1893 extern kern_return_t
vnode_pager_init(memory_object_t
,
1894 memory_object_control_t
,
1896 extern kern_return_t
device_pager_init(memory_object_t
,
1897 memory_object_control_t
,
1900 if(memory_object
->pager
== &vnode_pager_workaround
) {
1901 return vnode_pager_init(memory_object
,
1903 memory_object_page_size
);
1904 } else if(memory_object
->pager
== &device_pager_workaround
) {
1905 return device_pager_init(memory_object
,
1907 memory_object_page_size
);
1910 return dp_memory_object_init(memory_object
,
1912 memory_object_page_size
);
1915 /* Routine memory_object_terminate */
1916 kern_return_t memory_object_terminate
1918 memory_object_t memory_object
1921 extern kern_return_t
dp_memory_object_terminate(memory_object_t
);
1924 extern kern_return_t
vnode_pager_terminate(memory_object_t
);
1925 extern kern_return_t
device_pager_terminate(memory_object_t
);
1927 if(memory_object
->pager
== &vnode_pager_workaround
) {
1928 return vnode_pager_terminate(memory_object
);
1929 } else if(memory_object
->pager
== &device_pager_workaround
) {
1930 return device_pager_terminate(memory_object
);
1933 return dp_memory_object_terminate(memory_object
);
1936 /* Routine memory_object_data_request */
1937 kern_return_t memory_object_data_request
1939 memory_object_t memory_object
,
1940 memory_object_offset_t offset
,
1942 vm_prot_t desired_access
1945 extern kern_return_t
dp_memory_object_data_request(memory_object_t
,
1946 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1949 extern kern_return_t
vnode_pager_data_request(memory_object_t
,
1950 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1951 extern kern_return_t
device_pager_data_request(memory_object_t
,
1952 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
1954 if (memory_object
->pager
== &vnode_pager_workaround
) {
1955 return vnode_pager_data_request(memory_object
,
1959 } else if (memory_object
->pager
== &device_pager_workaround
) {
1960 return device_pager_data_request(memory_object
,
1966 return dp_memory_object_data_request(memory_object
,
1972 /* Routine memory_object_data_return */
1973 kern_return_t memory_object_data_return
1975 memory_object_t memory_object
,
1976 memory_object_offset_t offset
,
1979 boolean_t kernel_copy
1982 extern kern_return_t
dp_memory_object_data_return(memory_object_t
,
1983 memory_object_offset_t
,
1988 extern kern_return_t
vnode_pager_data_return(memory_object_t
,
1989 memory_object_offset_t
,
1993 extern kern_return_t
device_pager_data_return(memory_object_t
,
1994 memory_object_offset_t
,
1999 if (memory_object
->pager
== &vnode_pager_workaround
) {
2000 return vnode_pager_data_return(memory_object
,
2005 } else if (memory_object
->pager
== &device_pager_workaround
) {
2006 return device_pager_data_return(memory_object
,
2013 return dp_memory_object_data_return(memory_object
,
2020 /* Routine memory_object_data_initialize */
2021 kern_return_t memory_object_data_initialize
2023 memory_object_t memory_object
,
2024 memory_object_offset_t offset
,
2029 extern kern_return_t
dp_memory_object_data_initialize(memory_object_t
,
2030 memory_object_offset_t
,
2033 extern kern_return_t
vnode_pager_data_initialize(memory_object_t
,
2034 memory_object_offset_t
,
2036 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
2037 memory_object_offset_t
,
2040 if (memory_object
->pager
== &vnode_pager_workaround
) {
2041 return vnode_pager_data_initialize(memory_object
,
2044 } else if (memory_object
->pager
== &device_pager_workaround
) {
2045 return device_pager_data_initialize(memory_object
,
2050 return dp_memory_object_data_initialize(memory_object
,
2055 /* Routine memory_object_data_unlock */
2056 kern_return_t memory_object_data_unlock
2058 memory_object_t memory_object
,
2059 memory_object_offset_t offset
,
2061 vm_prot_t desired_access
2064 extern kern_return_t
dp_memory_object_data_unlock(memory_object_t
,
2065 memory_object_offset_t
,
2069 extern kern_return_t
vnode_pager_data_unlock(memory_object_t
,
2070 memory_object_offset_t
,
2073 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
2074 memory_object_offset_t
,
2078 if (memory_object
->pager
== &vnode_pager_workaround
) {
2079 return vnode_pager_data_unlock(memory_object
,
2083 } else if (memory_object
->pager
== &device_pager_workaround
) {
2084 return device_pager_data_unlock(memory_object
,
2090 return dp_memory_object_data_unlock(memory_object
,
2097 /* Routine memory_object_synchronize */
2098 kern_return_t memory_object_synchronize
2100 memory_object_t memory_object
,
2101 memory_object_offset_t offset
,
2103 vm_sync_t sync_flags
2106 extern kern_return_t
dp_memory_object_data_synchronize(memory_object_t
,
2107 memory_object_offset_t
,
2111 extern kern_return_t
vnode_pager_data_synchronize(memory_object_t
,
2112 memory_object_offset_t
,
2115 extern kern_return_t
device_pager_data_synchronize(memory_object_t
,
2116 memory_object_offset_t
,
2120 if (memory_object
->pager
== &vnode_pager_workaround
) {
2121 return vnode_pager_synchronize(
2126 } else if (memory_object
->pager
== &device_pager_workaround
) {
2127 return device_pager_synchronize(
2134 return dp_memory_object_synchronize(
2141 /* Routine memory_object_unmap */
2142 kern_return_t memory_object_unmap
2144 memory_object_t memory_object
2147 extern kern_return_t
dp_memory_object_unmap(memory_object_t
);
2149 extern kern_return_t
vnode_pager_unmap(memory_object_t
);
2150 extern kern_return_t
device_pager_unmap(memory_object_t
);
2152 if (memory_object
->pager
== &vnode_pager_workaround
) {
2153 return vnode_pager_unmap(memory_object
);
2154 } else if (memory_object
->pager
== &device_pager_workaround
) {
2155 return device_pager_unmap(memory_object
);
2158 return dp_memory_object_unmap(memory_object
);
2161 /* Routine memory_object_create */
2162 kern_return_t memory_object_create
2164 memory_object_default_t default_memory_manager
,
2165 vm_size_t new_memory_object_size
,
2166 memory_object_t
*new_memory_object
2169 extern kern_return_t
default_pager_memory_object_create(memory_object_default_t
,
2173 return default_pager_memory_object_create(default_memory_manager
,
2174 new_memory_object_size
,