2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/memory_object.c
57 * Author: Michael Wayne Young
59 * External memory management interface control functions.
62 #include <advisory_pageout.h>
65 * Interface dependencies:
68 #include <mach/std_types.h> /* For pointer_t */
69 #include <mach/mach_types.h>
72 #include <mach/kern_return.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/host_priv_server.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_prot.h>
79 #include <mach/message.h>
82 * Implementation dependencies:
84 #include <string.h> /* For memcpy() */
87 #include <kern/host.h>
88 #include <kern/thread.h> /* For current_thread() */
89 #include <kern/ipc_mig.h>
90 #include <kern/misc_protos.h>
92 #include <vm/vm_object.h>
93 #include <vm/vm_fault.h>
94 #include <vm/memory_object.h>
95 #include <vm/vm_page.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/pmap.h> /* For pmap_clear_modify */
98 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
99 #include <vm/vm_map.h> /* For vm_map_pageable */
102 #include <vm/vm_external.h>
103 #endif /* MACH_PAGEMAP */
105 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
106 vm_size_t memory_manager_default_cluster
= 0;
107 decl_mutex_data(, memory_manager_default_lock
)
110 * Forward ref to file-local function:
113 vm_object_update(vm_object_t
, vm_object_offset_t
,
114 vm_size_t
, memory_object_return_t
, int, vm_prot_t
);
118 * Routine: memory_object_should_return_page
121 * Determine whether the given page should be returned,
122 * based on the page's state and on the given return policy.
124 * We should return the page if one of the following is true:
126 * 1. Page is dirty and should_return is not RETURN_NONE.
127 * 2. Page is precious and should_return is RETURN_ALL.
128 * 3. Should_return is RETURN_ANYTHING.
130 * As a side effect, m->dirty will be made consistent
131 * with pmap_is_modified(m), if should_return is not
132 * MEMORY_OBJECT_RETURN_NONE.
135 #define memory_object_should_return_page(m, should_return) \
136 (should_return != MEMORY_OBJECT_RETURN_NONE && \
137 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
138 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
139 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
141 typedef int memory_object_lock_result_t
;
143 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
144 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
145 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
146 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
148 memory_object_lock_result_t
memory_object_lock_page(
150 memory_object_return_t should_return
,
151 boolean_t should_flush
,
155 * Routine: memory_object_lock_page
158 * Perform the appropriate lock operations on the
159 * given page. See the description of
160 * "memory_object_lock_request" for the meanings
163 * Returns an indication that the operation
164 * completed, blocked, or that the page must
167 memory_object_lock_result_t
168 memory_object_lock_page(
170 memory_object_return_t should_return
,
171 boolean_t should_flush
,
174 XPR(XPR_MEMORY_OBJECT
,
175 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
176 (integer_t
)m
, should_return
, should_flush
, prot
, 0);
179 * If we cannot change access to the page,
180 * either because a mapping is in progress
181 * (busy page) or because a mapping has been
182 * wired, then give up.
185 if (m
->busy
|| m
->cleaning
)
186 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
189 * Don't worry about pages for which the kernel
190 * does not have any data.
193 if (m
->absent
|| m
->error
|| m
->restart
) {
194 if(m
->error
&& should_flush
) {
195 /* dump the page, pager wants us to */
196 /* clean it up and there is no */
197 /* relevant data to return */
198 if(m
->wire_count
== 0) {
200 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
203 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
207 assert(!m
->fictitious
);
209 if (m
->wire_count
!= 0) {
211 * If no change would take place
212 * anyway, return successfully.
216 * No change to page lock [2 checks] AND
217 * Should not return page
219 * XXX This doesn't handle sending a copy of a wired
220 * XXX page to the pager, but that will require some
221 * XXX significant surgery.
224 (m
->page_lock
== prot
|| prot
== VM_PROT_NO_CHANGE
) &&
225 ! memory_object_should_return_page(m
, should_return
)) {
228 * Restart page unlock requests,
229 * even though no change took place.
230 * [Memory managers may be expecting
231 * to see new requests.]
233 m
->unlock_request
= VM_PROT_NONE
;
236 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
239 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
243 * If the page is to be flushed, allow
244 * that to be done as part of the protection.
253 * If we are decreasing permission, do it now;
254 * let the fault handler take care of increases
255 * (pmap_page_protect may not increase protection).
258 if (prot
!= VM_PROT_NO_CHANGE
) {
259 if ((m
->page_lock
^ prot
) & prot
) {
260 pmap_page_protect(m
->phys_page
, VM_PROT_ALL
& ~prot
);
263 /* code associated with the vestigial
264 * memory_object_data_unlock
267 m
->lock_supplied
= TRUE
;
268 if (prot
!= VM_PROT_NONE
)
274 * Restart any past unlock requests, even if no
275 * change resulted. If the manager explicitly
276 * requested no protection change, then it is assumed
277 * to be remembering past requests.
280 m
->unlock_request
= VM_PROT_NONE
;
286 * Handle page returning.
289 if (memory_object_should_return_page(m
, should_return
)) {
292 * If we weren't planning
293 * to flush the page anyway,
294 * we may need to remove the
295 * page from the pageout
296 * system and from physical
300 vm_page_lock_queues();
301 VM_PAGE_QUEUES_REMOVE(m
);
302 vm_page_unlock_queues();
305 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
308 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
);
310 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
320 extern boolean_t vm_page_deactivate_hint
;
323 * XXX Make clean but not flush a paging hint,
324 * and deactivate the pages. This is a hack
325 * because it overloads flush/clean with
326 * implementation-dependent meaning. This only
327 * happens to pages that are already clean.
330 if (vm_page_deactivate_hint
&&
331 (should_return
!= MEMORY_OBJECT_RETURN_NONE
)) {
332 vm_page_lock_queues();
333 vm_page_deactivate(m
);
334 vm_page_unlock_queues();
338 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
341 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
345 register vm_page_t hp; \
347 vm_object_unlock(object); \
349 (void) memory_object_data_return(object->pager, \
352 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
355 vm_object_lock(object); \
359 * Routine: memory_object_lock_request [user interface]
362 * Control use of the data associated with the given
363 * memory object. For each page in the given range,
364 * perform the following operations, in order:
365 * 1) restrict access to the page (disallow
366 * forms specified by "prot");
367 * 2) return data to the manager (if "should_return"
368 * is RETURN_DIRTY and the page is dirty, or
369 * "should_return" is RETURN_ALL and the page
370 * is either dirty or precious); and,
371 * 3) flush the cached copy (if "should_flush"
373 * The set of pages is defined by a starting offset
374 * ("offset") and size ("size"). Only pages with the
375 * same page alignment as the starting offset are
378 * A single acknowledgement is sent (to the "reply_to"
379 * port) when these actions are complete. If successful,
380 * the naked send right for reply_to is consumed.
384 memory_object_lock_request(
385 memory_object_control_t control
,
386 memory_object_offset_t offset
,
387 memory_object_size_t size
,
388 memory_object_return_t should_return
,
393 vm_object_offset_t original_offset
= offset
;
394 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
396 XPR(XPR_MEMORY_OBJECT
,
397 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
398 (integer_t
)control
, offset
, size
,
399 (((should_return
&1)<<1)|should_flush
), prot
);
402 * Check for bogus arguments.
404 object
= memory_object_control_to_vm_object(control
);
405 if (object
== VM_OBJECT_NULL
)
406 return (KERN_INVALID_ARGUMENT
);
408 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
409 return (KERN_INVALID_ARGUMENT
);
411 size
= round_page_64(size
);
414 * Lock the object, and acquire a paging reference to
415 * prevent the memory_object reference from being released.
417 vm_object_lock(object
);
418 vm_object_paging_begin(object
);
419 offset
-= object
->paging_offset
;
421 (void)vm_object_update(object
,
422 offset
, size
, should_return
, flags
, prot
);
424 vm_object_paging_end(object
);
425 vm_object_unlock(object
);
427 return (KERN_SUCCESS
);
431 * memory_object_release_name: [interface]
433 * Enforces name semantic on memory_object reference count decrement
434 * This routine should not be called unless the caller holds a name
435 * reference gained through the memory_object_named_create or the
436 * memory_object_rename call.
437 * If the TERMINATE_IDLE flag is set, the call will return if the
438 * reference count is not 1. i.e. idle with the only remaining reference
440 * If the decision is made to proceed the name field flag is set to
441 * false and the reference count is decremented. If the RESPECT_CACHE
442 * flag is set and the reference count has gone to zero, the
443 * memory_object is checked to see if it is cacheable otherwise when
444 * the reference count is zero, it is simply terminated.
448 memory_object_release_name(
449 memory_object_control_t control
,
454 object
= memory_object_control_to_vm_object(control
);
455 if (object
== VM_OBJECT_NULL
)
456 return (KERN_INVALID_ARGUMENT
);
458 return vm_object_release_name(object
, flags
);
464 * Routine: memory_object_destroy [user interface]
466 * Shut down a memory object, despite the
467 * presence of address map (or other) references
471 memory_object_destroy(
472 memory_object_control_t control
,
473 kern_return_t reason
)
477 object
= memory_object_control_to_vm_object(control
);
478 if (object
== VM_OBJECT_NULL
)
479 return (KERN_INVALID_ARGUMENT
);
481 return (vm_object_destroy(object
, reason
));
485 * Routine: vm_object_sync
487 * Kernel internal function to synch out pages in a given
488 * range within an object to its memory manager. Much the
489 * same as memory_object_lock_request but page protection
492 * If the should_flush and should_return flags are true pages
493 * are flushed, that is dirty & precious pages are written to
494 * the memory manager and then discarded. If should_return
495 * is false, only precious pages are returned to the memory
498 * If should flush is false and should_return true, the memory
499 * manager's copy of the pages is updated. If should_return
500 * is also false, only the precious pages are updated. This
501 * last option is of limited utility.
504 * FALSE if no pages were returned to the pager
511 vm_object_offset_t offset
,
513 boolean_t should_flush
,
514 boolean_t should_return
)
519 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
520 (integer_t
)object
, offset
, size
, should_flush
, should_return
);
523 * Lock the object, and acquire a paging reference to
524 * prevent the memory_object and control ports from
527 vm_object_lock(object
);
528 vm_object_paging_begin(object
);
530 rv
= vm_object_update(object
, offset
, size
,
532 MEMORY_OBJECT_RETURN_ALL
:
533 MEMORY_OBJECT_RETURN_NONE
,
535 MEMORY_OBJECT_DATA_FLUSH
: 0,
539 vm_object_paging_end(object
);
540 vm_object_unlock(object
);
545 * Routine: vm_object_update
547 * Work function for m_o_lock_request(), vm_o_sync().
549 * Called with object locked and paging ref taken.
553 register vm_object_t object
,
554 register vm_object_offset_t offset
,
555 register vm_size_t size
,
556 memory_object_return_t should_return
,
560 register vm_page_t m
;
561 vm_page_t holding_page
;
562 vm_size_t original_size
= size
;
563 vm_object_offset_t paging_offset
= 0;
564 vm_object_t copy_object
;
565 vm_size_t data_cnt
= 0;
566 vm_object_offset_t last_offset
= offset
;
567 memory_object_lock_result_t page_lock_result
;
568 memory_object_lock_result_t pageout_action
;
569 boolean_t data_returned
= FALSE
;
570 boolean_t update_cow
;
571 boolean_t should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
572 boolean_t pending_pageout
= FALSE
;
575 * To avoid blocking while scanning for pages, save
576 * dirty pages to be cleaned all at once.
578 * XXXO A similar strategy could be used to limit the
579 * number of times that a scan must be restarted for
580 * other reasons. Those pages that would require blocking
581 * could be temporarily collected in another list, or
582 * their offsets could be recorded in a small array.
586 * XXX NOTE: May want to consider converting this to a page list
587 * XXX vm_map_copy interface. Need to understand object
588 * XXX coalescing implications before doing so.
591 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
592 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
593 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
594 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
597 if((((copy_object
= object
->copy
) != NULL
) && update_cow
) ||
598 (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
601 vm_object_offset_t copy_offset
;
605 kern_return_t error
= 0;
607 if(copy_object
!= NULL
) {
608 /* translate offset with respect to shadow's offset */
609 copy_offset
= (offset
>= copy_object
->shadow_offset
)?
610 offset
- copy_object
->shadow_offset
:
611 (vm_object_offset_t
) 0;
612 if(copy_offset
> copy_object
->size
)
613 copy_offset
= copy_object
->size
;
615 /* clip size with respect to shadow offset */
616 copy_size
= (offset
>= copy_object
->shadow_offset
) ?
617 size
: size
- (copy_object
->shadow_offset
- offset
);
622 copy_size
= ((copy_offset
+ copy_size
)
623 <= copy_object
->size
) ?
624 copy_size
: copy_object
->size
- copy_offset
;
626 /* check for a copy_offset which is beyond the end of */
627 /* the copy_object */
631 copy_size
+=copy_offset
;
633 vm_object_unlock(object
);
634 vm_object_lock(copy_object
);
636 copy_object
= object
;
638 copy_size
= offset
+ size
;
639 copy_offset
= offset
;
642 vm_object_paging_begin(copy_object
);
643 for (i
=copy_offset
; i
<copy_size
; i
+=PAGE_SIZE
) {
644 RETRY_COW_OF_LOCK_REQUEST
:
645 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
646 switch (vm_fault_page(copy_object
, i
,
647 VM_PROT_WRITE
|VM_PROT_READ
,
651 copy_offset
+copy_size
,
652 VM_BEHAVIOR_SEQUENTIAL
,
661 case VM_FAULT_SUCCESS
:
664 page
->object
, top_page
);
665 PAGE_WAKEUP_DONE(page
);
666 vm_page_lock_queues();
667 if (!page
->active
&& !page
->inactive
)
668 vm_page_activate(page
);
669 vm_page_unlock_queues();
670 vm_object_lock(copy_object
);
671 vm_object_paging_begin(copy_object
);
673 PAGE_WAKEUP_DONE(page
);
674 vm_page_lock_queues();
675 if (!page
->active
&& !page
->inactive
)
676 vm_page_activate(page
);
677 vm_page_unlock_queues();
681 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
682 vm_object_lock(copy_object
);
683 vm_object_paging_begin(copy_object
);
684 goto RETRY_COW_OF_LOCK_REQUEST
;
685 case VM_FAULT_INTERRUPTED
:
686 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
687 vm_object_lock(copy_object
);
688 vm_object_paging_begin(copy_object
);
689 goto RETRY_COW_OF_LOCK_REQUEST
;
690 case VM_FAULT_MEMORY_SHORTAGE
:
692 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
693 vm_object_lock(copy_object
);
694 vm_object_paging_begin(copy_object
);
695 goto RETRY_COW_OF_LOCK_REQUEST
;
696 case VM_FAULT_FICTITIOUS_SHORTAGE
:
697 vm_page_more_fictitious();
698 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
699 vm_object_lock(copy_object
);
700 vm_object_paging_begin(copy_object
);
701 goto RETRY_COW_OF_LOCK_REQUEST
;
702 case VM_FAULT_MEMORY_ERROR
:
703 vm_object_lock(object
);
704 goto BYPASS_COW_COPYIN
;
708 vm_object_paging_end(copy_object
);
709 if(copy_object
!= object
) {
710 vm_object_unlock(copy_object
);
711 vm_object_lock(object
);
714 if((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
717 if(((copy_object
= object
->copy
) != NULL
) &&
718 (flags
& MEMORY_OBJECT_DATA_PURGE
)) {
719 copy_object
->shadow_severed
= TRUE
;
720 copy_object
->shadowed
= FALSE
;
721 copy_object
->shadow
= NULL
;
722 /* delete the ref the COW was holding on the target object */
723 vm_object_deallocate(object
);
729 size
-= PAGE_SIZE
, offset
+= PAGE_SIZE_64
)
732 * Limit the number of pages to be cleaned at once.
734 if (pending_pageout
&&
735 data_cnt
>= PAGE_SIZE
* DATA_WRITE_MAX
)
737 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
738 pageout_action
, paging_offset
);
740 pending_pageout
= FALSE
;
743 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
744 page_lock_result
= memory_object_lock_page(m
, should_return
,
747 XPR(XPR_MEMORY_OBJECT
,
748 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
749 (integer_t
)object
, offset
, page_lock_result
, 0, 0);
751 switch (page_lock_result
)
753 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
755 * End of a cluster of dirty pages.
757 if(pending_pageout
) {
758 LIST_REQ_PAGEOUT_PAGES(object
,
759 data_cnt
, pageout_action
,
762 pending_pageout
= FALSE
;
767 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
769 * Since it is necessary to block,
770 * clean any dirty pages now.
772 if(pending_pageout
) {
773 LIST_REQ_PAGEOUT_PAGES(object
,
774 data_cnt
, pageout_action
,
776 pending_pageout
= FALSE
;
781 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
784 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
:
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
787 * The clean and return cases are similar.
792 * if this would form a discontiguous block,
793 * clean the old pages and start anew.
798 * Mark the page busy since we unlock the
802 if (pending_pageout
&&
803 (last_offset
!= offset
||
804 pageout_action
!= page_lock_result
)) {
805 LIST_REQ_PAGEOUT_PAGES(object
,
806 data_cnt
, pageout_action
,
808 pending_pageout
= FALSE
;
812 holding_page
= VM_PAGE_NULL
;
814 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
817 if(!pending_pageout
) {
818 pending_pageout
= TRUE
;
819 pageout_action
= page_lock_result
;
820 paging_offset
= offset
;
823 vm_page_lock_queues();
824 m
->list_req_pending
= TRUE
;
829 vm_page_unlock_queues();
832 * Clean but do not flush
834 vm_page_lock_queues();
835 m
->list_req_pending
= TRUE
;
837 vm_page_unlock_queues();
840 vm_object_unlock(object
);
843 data_cnt
+= PAGE_SIZE
;
844 last_offset
= offset
+ PAGE_SIZE_64
;
845 data_returned
= TRUE
;
847 vm_object_lock(object
);
855 * We have completed the scan for applicable pages.
856 * Clean any pages that have been saved.
858 if (pending_pageout
) {
859 LIST_REQ_PAGEOUT_PAGES(object
,
860 data_cnt
, pageout_action
, paging_offset
);
862 return (data_returned
);
866 * Routine: memory_object_synchronize_completed [user interface]
868 * Tell kernel that previously synchronized data
869 * (memory_object_synchronize) has been queue or placed on the
872 * Note: there may be multiple synchronize requests for a given
873 * memory object outstanding but they will not overlap.
877 memory_object_synchronize_completed(
878 memory_object_control_t control
,
879 memory_object_offset_t offset
,
885 XPR(XPR_MEMORY_OBJECT
,
886 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
887 (integer_t
)object
, offset
, length
, 0, 0);
890 * Look for bogus arguments
893 object
= memory_object_control_to_vm_object(control
);
894 if (object
== VM_OBJECT_NULL
)
895 return (KERN_INVALID_ARGUMENT
);
897 vm_object_lock(object
);
900 * search for sync request structure
902 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
903 if (msr
->offset
== offset
&& msr
->length
== length
) {
904 queue_remove(&object
->msr_q
, msr
, msync_req_t
, msr_q
);
909 if (queue_end(&object
->msr_q
, (queue_entry_t
)msr
)) {
910 vm_object_unlock(object
);
911 return KERN_INVALID_ARGUMENT
;
915 vm_object_unlock(object
);
916 msr
->flag
= VM_MSYNC_DONE
;
918 thread_wakeup((event_t
) msr
);
921 }/* memory_object_synchronize_completed */
924 vm_object_set_attributes_common(
927 memory_object_copy_strategy_t copy_strategy
,
929 vm_size_t cluster_size
,
930 boolean_t silent_overwrite
,
931 boolean_t advisory_pageout
)
933 boolean_t object_became_ready
;
935 XPR(XPR_MEMORY_OBJECT
,
936 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
937 (integer_t
)object
, (may_cache
&1)|((temporary
&1)<1), copy_strategy
, 0, 0);
939 if (object
== VM_OBJECT_NULL
)
940 return(KERN_INVALID_ARGUMENT
);
943 * Verify the attributes of importance
946 switch(copy_strategy
) {
947 case MEMORY_OBJECT_COPY_NONE
:
948 case MEMORY_OBJECT_COPY_DELAY
:
951 return(KERN_INVALID_ARGUMENT
);
954 #if !ADVISORY_PAGEOUT
955 if (silent_overwrite
|| advisory_pageout
)
956 return(KERN_INVALID_ARGUMENT
);
958 #endif /* !ADVISORY_PAGEOUT */
963 if (cluster_size
!= 0) {
964 int pages_per_cluster
;
965 pages_per_cluster
= atop_32(cluster_size
);
967 * Cluster size must be integral multiple of page size,
968 * and be a power of 2 number of pages.
970 if ((cluster_size
& (PAGE_SIZE
-1)) ||
971 ((pages_per_cluster
-1) & pages_per_cluster
))
972 return KERN_INVALID_ARGUMENT
;
975 vm_object_lock(object
);
978 * Copy the attributes
980 assert(!object
->internal
);
981 object_became_ready
= !object
->pager_ready
;
982 object
->copy_strategy
= copy_strategy
;
983 object
->can_persist
= may_cache
;
984 object
->temporary
= temporary
;
985 object
->silent_overwrite
= silent_overwrite
;
986 object
->advisory_pageout
= advisory_pageout
;
987 if (cluster_size
== 0)
988 cluster_size
= PAGE_SIZE
;
989 object
->cluster_size
= cluster_size
;
991 assert(cluster_size
>= PAGE_SIZE
&&
992 cluster_size
% PAGE_SIZE
== 0);
995 * Wake up anyone waiting for the ready attribute
996 * to become asserted.
999 if (object_became_ready
) {
1000 object
->pager_ready
= TRUE
;
1001 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1004 vm_object_unlock(object
);
1006 return(KERN_SUCCESS
);
1010 * Set the memory object attribute as provided.
1012 * XXX This routine cannot be completed until the vm_msync, clean
1013 * in place, and cluster work is completed. See ifdef notyet
1014 * below and note that vm_object_set_attributes_common()
1015 * may have to be expanded.
1018 memory_object_change_attributes(
1019 memory_object_control_t control
,
1020 memory_object_flavor_t flavor
,
1021 memory_object_info_t attributes
,
1022 mach_msg_type_number_t count
)
1025 kern_return_t result
= KERN_SUCCESS
;
1026 boolean_t temporary
;
1027 boolean_t may_cache
;
1028 boolean_t invalidate
;
1029 vm_size_t cluster_size
;
1030 memory_object_copy_strategy_t copy_strategy
;
1031 boolean_t silent_overwrite
;
1032 boolean_t advisory_pageout
;
1034 object
= memory_object_control_to_vm_object(control
);
1035 if (object
== VM_OBJECT_NULL
)
1036 return (KERN_INVALID_ARGUMENT
);
1038 vm_object_lock(object
);
1040 temporary
= object
->temporary
;
1041 may_cache
= object
->can_persist
;
1042 copy_strategy
= object
->copy_strategy
;
1043 silent_overwrite
= object
->silent_overwrite
;
1044 advisory_pageout
= object
->advisory_pageout
;
1046 invalidate
= object
->invalidate
;
1048 cluster_size
= object
->cluster_size
;
1049 vm_object_unlock(object
);
1052 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1054 old_memory_object_behave_info_t behave
;
1056 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1057 result
= KERN_INVALID_ARGUMENT
;
1061 behave
= (old_memory_object_behave_info_t
) attributes
;
1063 temporary
= behave
->temporary
;
1064 invalidate
= behave
->invalidate
;
1065 copy_strategy
= behave
->copy_strategy
;
1070 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1072 memory_object_behave_info_t behave
;
1074 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1075 result
= KERN_INVALID_ARGUMENT
;
1079 behave
= (memory_object_behave_info_t
) attributes
;
1081 temporary
= behave
->temporary
;
1082 invalidate
= behave
->invalidate
;
1083 copy_strategy
= behave
->copy_strategy
;
1084 silent_overwrite
= behave
->silent_overwrite
;
1085 advisory_pageout
= behave
->advisory_pageout
;
1089 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1091 memory_object_perf_info_t perf
;
1093 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1094 result
= KERN_INVALID_ARGUMENT
;
1098 perf
= (memory_object_perf_info_t
) attributes
;
1100 may_cache
= perf
->may_cache
;
1101 cluster_size
= round_page_32(perf
->cluster_size
);
1106 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1108 old_memory_object_attr_info_t attr
;
1110 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1111 result
= KERN_INVALID_ARGUMENT
;
1115 attr
= (old_memory_object_attr_info_t
) attributes
;
1117 may_cache
= attr
->may_cache
;
1118 copy_strategy
= attr
->copy_strategy
;
1119 cluster_size
= page_size
;
1124 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1126 memory_object_attr_info_t attr
;
1128 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1129 result
= KERN_INVALID_ARGUMENT
;
1133 attr
= (memory_object_attr_info_t
) attributes
;
1135 copy_strategy
= attr
->copy_strategy
;
1136 may_cache
= attr
->may_cache_object
;
1137 cluster_size
= attr
->cluster_size
;
1138 temporary
= attr
->temporary
;
1144 result
= KERN_INVALID_ARGUMENT
;
1148 if (result
!= KERN_SUCCESS
)
1151 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1152 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1159 * XXX may_cache may become a tri-valued variable to handle
1160 * XXX uncache if not in use.
1162 return (vm_object_set_attributes_common(object
,
1172 memory_object_get_attributes(
1173 memory_object_control_t control
,
1174 memory_object_flavor_t flavor
,
1175 memory_object_info_t attributes
, /* pointer to OUT array */
1176 mach_msg_type_number_t
*count
) /* IN/OUT */
1178 kern_return_t ret
= KERN_SUCCESS
;
1181 object
= memory_object_control_to_vm_object(control
);
1182 if (object
== VM_OBJECT_NULL
)
1183 return (KERN_INVALID_ARGUMENT
);
1185 vm_object_lock(object
);
1188 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1190 old_memory_object_behave_info_t behave
;
1192 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1193 ret
= KERN_INVALID_ARGUMENT
;
1197 behave
= (old_memory_object_behave_info_t
) attributes
;
1198 behave
->copy_strategy
= object
->copy_strategy
;
1199 behave
->temporary
= object
->temporary
;
1200 #if notyet /* remove when vm_msync complies and clean in place fini */
1201 behave
->invalidate
= object
->invalidate
;
1203 behave
->invalidate
= FALSE
;
1206 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1210 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1212 memory_object_behave_info_t behave
;
1214 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1215 ret
= KERN_INVALID_ARGUMENT
;
1219 behave
= (memory_object_behave_info_t
) attributes
;
1220 behave
->copy_strategy
= object
->copy_strategy
;
1221 behave
->temporary
= object
->temporary
;
1222 #if notyet /* remove when vm_msync complies and clean in place fini */
1223 behave
->invalidate
= object
->invalidate
;
1225 behave
->invalidate
= FALSE
;
1227 behave
->advisory_pageout
= object
->advisory_pageout
;
1228 behave
->silent_overwrite
= object
->silent_overwrite
;
1229 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1233 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1235 memory_object_perf_info_t perf
;
1237 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1238 ret
= KERN_INVALID_ARGUMENT
;
1242 perf
= (memory_object_perf_info_t
) attributes
;
1243 perf
->cluster_size
= object
->cluster_size
;
1244 perf
->may_cache
= object
->can_persist
;
1246 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1250 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1252 old_memory_object_attr_info_t attr
;
1254 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1255 ret
= KERN_INVALID_ARGUMENT
;
1259 attr
= (old_memory_object_attr_info_t
) attributes
;
1260 attr
->may_cache
= object
->can_persist
;
1261 attr
->copy_strategy
= object
->copy_strategy
;
1263 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1267 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1269 memory_object_attr_info_t attr
;
1271 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1272 ret
= KERN_INVALID_ARGUMENT
;
1276 attr
= (memory_object_attr_info_t
) attributes
;
1277 attr
->copy_strategy
= object
->copy_strategy
;
1278 attr
->cluster_size
= object
->cluster_size
;
1279 attr
->may_cache_object
= object
->can_persist
;
1280 attr
->temporary
= object
->temporary
;
1282 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1287 ret
= KERN_INVALID_ARGUMENT
;
1291 vm_object_unlock(object
);
1298 memory_object_iopl_request(
1300 memory_object_offset_t offset
,
1301 vm_size_t
*upl_size
,
1303 upl_page_info_array_t user_page_list
,
1304 unsigned int *page_list_count
,
1311 caller_flags
= *flags
;
1313 if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
1314 vm_named_entry_t named_entry
;
1316 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1317 /* a few checks to make sure user is obeying rules */
1318 if(*upl_size
== 0) {
1319 if(offset
>= named_entry
->size
)
1320 return(KERN_INVALID_RIGHT
);
1321 *upl_size
= named_entry
->size
- offset
;
1323 if(caller_flags
& UPL_COPYOUT_FROM
) {
1324 if((named_entry
->protection
& VM_PROT_READ
)
1326 return(KERN_INVALID_RIGHT
);
1329 if((named_entry
->protection
&
1330 (VM_PROT_READ
| VM_PROT_WRITE
))
1331 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
1332 return(KERN_INVALID_RIGHT
);
1335 if(named_entry
->size
< (offset
+ *upl_size
))
1336 return(KERN_INVALID_ARGUMENT
);
1338 /* the callers parameter offset is defined to be the */
1339 /* offset from beginning of named entry offset in object */
1340 offset
= offset
+ named_entry
->offset
;
1342 if(named_entry
->is_sub_map
)
1343 return (KERN_INVALID_ARGUMENT
);
1345 named_entry_lock(named_entry
);
1347 if(named_entry
->object
) {
1348 /* This is the case where we are going to map */
1349 /* an already mapped object. If the object is */
1350 /* not ready it is internal. An external */
1351 /* object cannot be mapped until it is ready */
1352 /* we can therefore avoid the ready check */
1354 vm_object_reference(named_entry
->object
);
1355 object
= named_entry
->object
;
1356 named_entry_unlock(named_entry
);
1358 object
= vm_object_enter(named_entry
->backing
.pager
,
1359 named_entry
->offset
+ named_entry
->size
,
1360 named_entry
->internal
,
1363 if (object
== VM_OBJECT_NULL
) {
1364 named_entry_unlock(named_entry
);
1365 return(KERN_INVALID_OBJECT
);
1367 vm_object_lock(object
);
1369 /* create an extra reference for the named entry */
1370 vm_object_reference_locked(object
);
1371 named_entry
->object
= object
;
1372 named_entry_unlock(named_entry
);
1374 /* wait for object to be ready */
1375 while (!object
->pager_ready
) {
1376 vm_object_wait(object
,
1377 VM_OBJECT_EVENT_PAGER_READY
,
1379 vm_object_lock(object
);
1381 vm_object_unlock(object
);
1384 memory_object_control_t control
;
1385 control
= (memory_object_control_t
)port
->ip_kobject
;
1386 if (control
== NULL
)
1387 return (KERN_INVALID_ARGUMENT
);
1388 object
= memory_object_control_to_vm_object(control
);
1389 if (object
== VM_OBJECT_NULL
)
1390 return (KERN_INVALID_ARGUMENT
);
1391 vm_object_reference(object
);
1393 if (object
== VM_OBJECT_NULL
)
1394 return (KERN_INVALID_ARGUMENT
);
1396 if (!object
->private) {
1397 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
1398 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
1399 if (object
->phys_contiguous
) {
1400 *flags
= UPL_PHYS_CONTIG
;
1405 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
1408 ret
= vm_object_iopl_request(object
,
1415 vm_object_deallocate(object
);
1420 * Routine: memory_object_upl_request [interface]
1422 * Cause the population of a portion of a vm_object.
1423 * Depending on the nature of the request, the pages
1424 * returned may be contain valid data or be uninitialized.
1429 memory_object_upl_request(
1430 memory_object_control_t control
,
1431 memory_object_offset_t offset
,
1434 upl_page_info_array_t user_page_list
,
1435 unsigned int *page_list_count
,
1440 object
= memory_object_control_to_vm_object(control
);
1441 if (object
== VM_OBJECT_NULL
)
1442 return (KERN_INVALID_ARGUMENT
);
1444 return vm_object_upl_request(object
,
1454 * Routine: memory_object_super_upl_request [interface]
1456 * Cause the population of a portion of a vm_object
1457 * in much the same way as memory_object_upl_request.
1458 * Depending on the nature of the request, the pages
1459 * returned may be contain valid data or be uninitialized.
1460 * However, the region may be expanded up to the super
1461 * cluster size provided.
1465 memory_object_super_upl_request(
1466 memory_object_control_t control
,
1467 memory_object_offset_t offset
,
1469 vm_size_t super_cluster
,
1471 upl_page_info_t
*user_page_list
,
1472 unsigned int *page_list_count
,
1477 object
= memory_object_control_to_vm_object(control
);
1478 if (object
== VM_OBJECT_NULL
)
1479 return (KERN_INVALID_ARGUMENT
);
1481 return vm_object_super_upl_request(object
,
1491 int vm_stat_discard_cleared_reply
= 0;
1492 int vm_stat_discard_cleared_unset
= 0;
1493 int vm_stat_discard_cleared_too_late
= 0;
1498 * Routine: host_default_memory_manager [interface]
1500 * set/get the default memory manager port and default cluster
1503 * If successful, consumes the supplied naked send right.
1506 host_default_memory_manager(
1507 host_priv_t host_priv
,
1508 memory_object_default_t
*default_manager
,
1509 vm_size_t cluster_size
)
1511 memory_object_default_t current_manager
;
1512 memory_object_default_t new_manager
;
1513 memory_object_default_t returned_manager
;
1515 if (host_priv
== HOST_PRIV_NULL
)
1516 return(KERN_INVALID_HOST
);
1518 assert(host_priv
== &realhost
);
1520 new_manager
= *default_manager
;
1521 mutex_lock(&memory_manager_default_lock
);
1522 current_manager
= memory_manager_default
;
1524 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1526 * Retrieve the current value.
1528 memory_object_default_reference(current_manager
);
1529 returned_manager
= current_manager
;
1532 * Retrieve the current value,
1533 * and replace it with the supplied value.
1534 * We return the old reference to the caller
1535 * but we have to take a reference on the new
1539 returned_manager
= current_manager
;
1540 memory_manager_default
= new_manager
;
1541 memory_object_default_reference(new_manager
);
1543 if (cluster_size
% PAGE_SIZE
!= 0) {
1545 mutex_unlock(&memory_manager_default_lock
);
1546 return KERN_INVALID_ARGUMENT
;
1548 cluster_size
= round_page_32(cluster_size
);
1551 memory_manager_default_cluster
= cluster_size
;
1554 * In case anyone's been waiting for a memory
1555 * manager to be established, wake them up.
1558 thread_wakeup((event_t
) &memory_manager_default
);
1561 mutex_unlock(&memory_manager_default_lock
);
1563 *default_manager
= returned_manager
;
1564 return(KERN_SUCCESS
);
1568 * Routine: memory_manager_default_reference
1570 * Returns a naked send right for the default
1571 * memory manager. The returned right is always
1572 * valid (not IP_NULL or IP_DEAD).
1575 __private_extern__ memory_object_default_t
1576 memory_manager_default_reference(
1577 vm_size_t
*cluster_size
)
1579 memory_object_default_t current_manager
;
1581 mutex_lock(&memory_manager_default_lock
);
1582 current_manager
= memory_manager_default
;
1583 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1586 res
= thread_sleep_mutex((event_t
) &memory_manager_default
,
1587 &memory_manager_default_lock
,
1589 assert(res
== THREAD_AWAKENED
);
1590 current_manager
= memory_manager_default
;
1592 memory_object_default_reference(current_manager
);
1593 *cluster_size
= memory_manager_default_cluster
;
1594 mutex_unlock(&memory_manager_default_lock
);
1596 return current_manager
;
1600 * Routine: memory_manager_default_check
1603 * Check whether a default memory manager has been set
1604 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1605 * and KERN_FAILURE if dmm does not exist.
1607 * If there is no default memory manager, log an error,
1608 * but only the first time.
1611 __private_extern__ kern_return_t
1612 memory_manager_default_check(void)
1614 memory_object_default_t current
;
1616 mutex_lock(&memory_manager_default_lock
);
1617 current
= memory_manager_default
;
1618 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1619 static boolean_t logged
; /* initialized to 0 */
1620 boolean_t complain
= !logged
;
1622 mutex_unlock(&memory_manager_default_lock
);
1624 printf("Warning: No default memory manager\n");
1625 return(KERN_FAILURE
);
1627 mutex_unlock(&memory_manager_default_lock
);
1628 return(KERN_SUCCESS
);
1632 __private_extern__
void
1633 memory_manager_default_init(void)
1635 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1636 mutex_init(&memory_manager_default_lock
, ETAP_VM_MEMMAN
);
1641 memory_object_deactivate_pages(
1643 vm_object_offset_t offset
,
1644 vm_object_size_t size
,
1645 boolean_t kill_page
)
1647 vm_object_t orig_object
;
1648 int pages_moved
= 0;
1649 int pages_found
= 0;
1652 * entered with object lock held, acquire a paging reference to
1653 * prevent the memory_object and control ports from
1656 orig_object
= object
;
1659 register vm_page_t m
;
1660 vm_object_offset_t toffset
;
1661 vm_object_size_t tsize
;
1663 vm_object_paging_begin(object
);
1664 vm_page_lock_queues();
1666 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1668 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1672 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1674 m
->reference
= FALSE
;
1675 pmap_clear_reference(m
->phys_page
);
1677 if ((kill_page
) && (object
->internal
)) {
1678 m
->precious
= FALSE
;
1680 pmap_clear_modify(m
->phys_page
);
1681 vm_external_state_clr(object
->existence_map
, offset
);
1683 VM_PAGE_QUEUES_REMOVE(m
);
1688 m
, vm_page_t
, pageq
);
1691 &vm_page_queue_inactive
,
1692 m
, vm_page_t
, pageq
);
1697 vm_page_inactive_count
++;
1703 vm_page_unlock_queues();
1704 vm_object_paging_end(object
);
1706 if (object
->shadow
) {
1707 vm_object_t tmp_object
;
1711 offset
+= object
->shadow_offset
;
1713 tmp_object
= object
->shadow
;
1714 vm_object_lock(tmp_object
);
1716 if (object
!= orig_object
)
1717 vm_object_unlock(object
);
1718 object
= tmp_object
;
1722 if (object
!= orig_object
)
1723 vm_object_unlock(object
);
1726 /* Allow manipulation of individual page state. This is actually part of */
1727 /* the UPL regimen but takes place on the object rather than on a UPL */
1730 memory_object_page_op(
1731 memory_object_control_t control
,
1732 memory_object_offset_t offset
,
1734 ppnum_t
*phys_entry
,
1741 object
= memory_object_control_to_vm_object(control
);
1742 if (object
== VM_OBJECT_NULL
)
1743 return (KERN_INVALID_ARGUMENT
);
1745 vm_object_lock(object
);
1747 if(ops
& UPL_POP_PHYSICAL
) {
1748 if(object
->phys_contiguous
) {
1750 *phys_entry
= (ppnum_t
)
1751 (object
->shadow_offset
>> 12);
1753 vm_object_unlock(object
);
1754 return KERN_SUCCESS
;
1756 vm_object_unlock(object
);
1757 return KERN_INVALID_OBJECT
;
1760 if(object
->phys_contiguous
) {
1761 vm_object_unlock(object
);
1762 return KERN_INVALID_OBJECT
;
1766 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
1767 vm_object_unlock(object
);
1768 return KERN_FAILURE
;
1771 /* Sync up on getting the busy bit */
1772 if((dst_page
->busy
|| dst_page
->cleaning
) &&
1773 (((ops
& UPL_POP_SET
) &&
1774 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
1775 /* someone else is playing with the page, we will */
1777 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
1781 if (ops
& UPL_POP_DUMP
) {
1782 vm_page_lock_queues();
1784 if (dst_page
->no_isync
== FALSE
)
1785 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
1786 vm_page_free(dst_page
);
1788 vm_page_unlock_queues();
1795 /* Get the condition of flags before requested ops */
1796 /* are undertaken */
1798 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
1799 if(dst_page
->pageout
) *flags
|= UPL_POP_PAGEOUT
;
1800 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
1801 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
1802 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
1805 *phys_entry
= dst_page
->phys_page
;
1807 /* The caller should have made a call either contingent with */
1808 /* or prior to this call to set UPL_POP_BUSY */
1809 if(ops
& UPL_POP_SET
) {
1810 /* The protection granted with this assert will */
1811 /* not be complete. If the caller violates the */
1812 /* convention and attempts to change page state */
1813 /* without first setting busy we may not see it */
1814 /* because the page may already be busy. However */
1815 /* if such violations occur we will assert sooner */
1817 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
1818 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= TRUE
;
1819 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= TRUE
;
1820 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
1821 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
1822 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
1825 if(ops
& UPL_POP_CLR
) {
1826 assert(dst_page
->busy
);
1827 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
1828 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= FALSE
;
1829 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
1830 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
1831 if (ops
& UPL_POP_BUSY
) {
1832 dst_page
->busy
= FALSE
;
1833 PAGE_WAKEUP(dst_page
);
1839 vm_object_unlock(object
);
1840 return KERN_SUCCESS
;
1845 * memory_object_range_op offers performance enhancement over
1846 * memory_object_page_op for page_op functions which do not require page
1847 * level state to be returned from the call. Page_op was created to provide
1848 * a low-cost alternative to page manipulation via UPLs when only a single
1849 * page was involved. The range_op call establishes the ability in the _op
1850 * family of functions to work on multiple pages where the lack of page level
1851 * state handling allows the caller to avoid the overhead of the upl structures.
1855 memory_object_range_op(
1856 memory_object_control_t control
,
1857 memory_object_offset_t offset_beg
,
1858 memory_object_offset_t offset_end
,
1862 memory_object_offset_t offset
;
1866 object
= memory_object_control_to_vm_object(control
);
1867 if (object
== VM_OBJECT_NULL
)
1868 return (KERN_INVALID_ARGUMENT
);
1870 if (object
->resident_page_count
== 0) {
1872 if (ops
& UPL_ROP_PRESENT
)
1875 *range
= offset_end
- offset_beg
;
1877 return KERN_SUCCESS
;
1879 vm_object_lock(object
);
1881 if (object
->phys_contiguous
)
1882 return KERN_INVALID_OBJECT
;
1884 offset
= offset_beg
;
1886 while (offset
< offset_end
) {
1887 if (dst_page
= vm_page_lookup(object
, offset
)) {
1888 if (ops
& UPL_ROP_DUMP
) {
1889 if (dst_page
->busy
|| dst_page
->cleaning
) {
1891 * someone else is playing with the
1892 * page, we will have to wait
1895 dst_page
, THREAD_UNINT
);
1897 * need to relook the page up since it's
1898 * state may have changed while we slept
1899 * it might even belong to a different object
1904 vm_page_lock_queues();
1906 if (dst_page
->no_isync
== FALSE
)
1907 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
1908 vm_page_free(dst_page
);
1910 vm_page_unlock_queues();
1911 } else if (ops
& UPL_ROP_ABSENT
)
1913 } else if (ops
& UPL_ROP_PRESENT
)
1916 offset
+= PAGE_SIZE
;
1918 vm_object_unlock(object
);
1921 *range
= offset
- offset_beg
;
1923 return KERN_SUCCESS
;
1926 static zone_t mem_obj_control_zone
;
1928 __private_extern__
void
1929 memory_object_control_bootstrap(void)
1933 i
= (vm_size_t
) sizeof (struct memory_object_control
);
1934 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
1938 __private_extern__ memory_object_control_t
1939 memory_object_control_allocate(
1942 memory_object_control_t control
;
1944 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
1945 if (control
!= MEMORY_OBJECT_CONTROL_NULL
)
1946 control
->object
= object
;
1950 __private_extern__
void
1951 memory_object_control_collapse(
1952 memory_object_control_t control
,
1955 assert((control
->object
!= VM_OBJECT_NULL
) &&
1956 (control
->object
!= object
));
1957 control
->object
= object
;
1960 __private_extern__ vm_object_t
1961 memory_object_control_to_vm_object(
1962 memory_object_control_t control
)
1964 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1965 return VM_OBJECT_NULL
;
1967 return (control
->object
);
1970 memory_object_control_t
1971 convert_port_to_mo_control(
1974 return MEMORY_OBJECT_CONTROL_NULL
;
1979 convert_mo_control_to_port(
1980 memory_object_control_t control
)
1982 return MACH_PORT_NULL
;
1986 memory_object_control_reference(
1987 memory_object_control_t control
)
1993 * We only every issue one of these references, so kill it
1994 * when that gets released (should switch the real reference
1995 * counting in true port-less EMMI).
1998 memory_object_control_deallocate(
1999 memory_object_control_t control
)
2001 zfree(mem_obj_control_zone
, (vm_offset_t
)control
);
2005 memory_object_control_disable(
2006 memory_object_control_t control
)
2008 assert(control
->object
!= VM_OBJECT_NULL
);
2009 control
->object
= VM_OBJECT_NULL
;
2013 memory_object_default_reference(
2014 memory_object_default_t dmm
)
2016 ipc_port_make_send(dmm
);
2020 memory_object_default_deallocate(
2021 memory_object_default_t dmm
)
2023 ipc_port_release_send(dmm
);
2027 convert_port_to_memory_object(
2030 return (MEMORY_OBJECT_NULL
);
2035 convert_memory_object_to_port(
2036 memory_object_t object
)
2038 return (MACH_PORT_NULL
);
2042 /* remove after component interface available */
2043 extern int vnode_pager_workaround
;
2044 extern int device_pager_workaround
;
2048 /* Routine memory_object_reference */
2049 void memory_object_reference(
2050 memory_object_t memory_object
)
2052 extern void dp_memory_object_reference(memory_object_t
);
2055 extern void vnode_pager_reference(memory_object_t
);
2056 extern void device_pager_reference(memory_object_t
);
2058 if(memory_object
->pager
== &vnode_pager_workaround
) {
2059 vnode_pager_reference(memory_object
);
2060 } else if(memory_object
->pager
== &device_pager_workaround
) {
2061 device_pager_reference(memory_object
);
2064 dp_memory_object_reference(memory_object
);
2067 /* Routine memory_object_deallocate */
2068 void memory_object_deallocate(
2069 memory_object_t memory_object
)
2071 extern void dp_memory_object_deallocate(memory_object_t
);
2074 extern void vnode_pager_deallocate(memory_object_t
);
2075 extern void device_pager_deallocate(memory_object_t
);
2077 if(memory_object
->pager
== &vnode_pager_workaround
) {
2078 vnode_pager_deallocate(memory_object
);
2079 } else if(memory_object
->pager
== &device_pager_workaround
) {
2080 device_pager_deallocate(memory_object
);
2083 dp_memory_object_deallocate(memory_object
);
2087 /* Routine memory_object_init */
2088 kern_return_t memory_object_init
2090 memory_object_t memory_object
,
2091 memory_object_control_t memory_control
,
2092 vm_size_t memory_object_page_size
2095 extern kern_return_t
dp_memory_object_init(memory_object_t
,
2096 memory_object_control_t
,
2099 extern kern_return_t
vnode_pager_init(memory_object_t
,
2100 memory_object_control_t
,
2102 extern kern_return_t
device_pager_init(memory_object_t
,
2103 memory_object_control_t
,
2106 if(memory_object
->pager
== &vnode_pager_workaround
) {
2107 return vnode_pager_init(memory_object
,
2109 memory_object_page_size
);
2110 } else if(memory_object
->pager
== &device_pager_workaround
) {
2111 return device_pager_init(memory_object
,
2113 memory_object_page_size
);
2116 return dp_memory_object_init(memory_object
,
2118 memory_object_page_size
);
2121 /* Routine memory_object_terminate */
2122 kern_return_t memory_object_terminate
2124 memory_object_t memory_object
2127 extern kern_return_t
dp_memory_object_terminate(memory_object_t
);
2130 extern kern_return_t
vnode_pager_terminate(memory_object_t
);
2131 extern kern_return_t
device_pager_terminate(memory_object_t
);
2133 if(memory_object
->pager
== &vnode_pager_workaround
) {
2134 return vnode_pager_terminate(memory_object
);
2135 } else if(memory_object
->pager
== &device_pager_workaround
) {
2136 return device_pager_terminate(memory_object
);
2139 return dp_memory_object_terminate(memory_object
);
2142 /* Routine memory_object_data_request */
2143 kern_return_t memory_object_data_request
2145 memory_object_t memory_object
,
2146 memory_object_offset_t offset
,
2148 vm_prot_t desired_access
2151 extern kern_return_t
dp_memory_object_data_request(memory_object_t
,
2152 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2155 extern kern_return_t
vnode_pager_data_request(memory_object_t
,
2156 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2157 extern kern_return_t
device_pager_data_request(memory_object_t
,
2158 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2160 if (memory_object
->pager
== &vnode_pager_workaround
) {
2161 return vnode_pager_data_request(memory_object
,
2165 } else if (memory_object
->pager
== &device_pager_workaround
) {
2166 return device_pager_data_request(memory_object
,
2172 return dp_memory_object_data_request(memory_object
,
2178 /* Routine memory_object_data_return */
2179 kern_return_t memory_object_data_return
2181 memory_object_t memory_object
,
2182 memory_object_offset_t offset
,
2185 boolean_t kernel_copy
2188 extern kern_return_t
dp_memory_object_data_return(memory_object_t
,
2189 memory_object_offset_t
,
2194 extern kern_return_t
vnode_pager_data_return(memory_object_t
,
2195 memory_object_offset_t
,
2199 extern kern_return_t
device_pager_data_return(memory_object_t
,
2200 memory_object_offset_t
,
2205 if (memory_object
->pager
== &vnode_pager_workaround
) {
2206 return vnode_pager_data_return(memory_object
,
2211 } else if (memory_object
->pager
== &device_pager_workaround
) {
2212 return device_pager_data_return(memory_object
,
2219 return dp_memory_object_data_return(memory_object
,
2226 /* Routine memory_object_data_initialize */
2227 kern_return_t memory_object_data_initialize
2229 memory_object_t memory_object
,
2230 memory_object_offset_t offset
,
2235 extern kern_return_t
dp_memory_object_data_initialize(memory_object_t
,
2236 memory_object_offset_t
,
2239 extern kern_return_t
vnode_pager_data_initialize(memory_object_t
,
2240 memory_object_offset_t
,
2242 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
2243 memory_object_offset_t
,
2246 if (memory_object
->pager
== &vnode_pager_workaround
) {
2247 return vnode_pager_data_initialize(memory_object
,
2250 } else if (memory_object
->pager
== &device_pager_workaround
) {
2251 return device_pager_data_initialize(memory_object
,
2256 return dp_memory_object_data_initialize(memory_object
,
2261 /* Routine memory_object_data_unlock */
2262 kern_return_t memory_object_data_unlock
2264 memory_object_t memory_object
,
2265 memory_object_offset_t offset
,
2267 vm_prot_t desired_access
2270 extern kern_return_t
dp_memory_object_data_unlock(memory_object_t
,
2271 memory_object_offset_t
,
2275 extern kern_return_t
vnode_pager_data_unlock(memory_object_t
,
2276 memory_object_offset_t
,
2279 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
2280 memory_object_offset_t
,
2284 if (memory_object
->pager
== &vnode_pager_workaround
) {
2285 return vnode_pager_data_unlock(memory_object
,
2289 } else if (memory_object
->pager
== &device_pager_workaround
) {
2290 return device_pager_data_unlock(memory_object
,
2296 return dp_memory_object_data_unlock(memory_object
,
2303 /* Routine memory_object_synchronize */
2304 kern_return_t memory_object_synchronize
2306 memory_object_t memory_object
,
2307 memory_object_offset_t offset
,
2309 vm_sync_t sync_flags
2312 extern kern_return_t
dp_memory_object_data_synchronize(memory_object_t
,
2313 memory_object_offset_t
,
2317 extern kern_return_t
vnode_pager_data_synchronize(memory_object_t
,
2318 memory_object_offset_t
,
2321 extern kern_return_t
device_pager_data_synchronize(memory_object_t
,
2322 memory_object_offset_t
,
2326 if (memory_object
->pager
== &vnode_pager_workaround
) {
2327 return vnode_pager_synchronize(
2332 } else if (memory_object
->pager
== &device_pager_workaround
) {
2333 return device_pager_synchronize(
2340 return dp_memory_object_synchronize(
2347 /* Routine memory_object_unmap */
2348 kern_return_t memory_object_unmap
2350 memory_object_t memory_object
2353 extern kern_return_t
dp_memory_object_unmap(memory_object_t
);
2355 extern kern_return_t
vnode_pager_unmap(memory_object_t
);
2356 extern kern_return_t
device_pager_unmap(memory_object_t
);
2358 if (memory_object
->pager
== &vnode_pager_workaround
) {
2359 return vnode_pager_unmap(memory_object
);
2360 } else if (memory_object
->pager
== &device_pager_workaround
) {
2361 return device_pager_unmap(memory_object
);
2364 return dp_memory_object_unmap(memory_object
);
2367 /* Routine memory_object_create */
2368 kern_return_t memory_object_create
2370 memory_object_default_t default_memory_manager
,
2371 vm_size_t new_memory_object_size
,
2372 memory_object_t
*new_memory_object
2375 extern kern_return_t
default_pager_memory_object_create(memory_object_default_t
,
2379 return default_pager_memory_object_create(default_memory_manager
,
2380 new_memory_object_size
,