2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/memory_object.c
54 * Author: Michael Wayne Young
56 * External memory management interface control functions.
59 #include <advisory_pageout.h>
62 * Interface dependencies:
65 #include <mach/std_types.h> /* For pointer_t */
66 #include <mach/mach_types.h>
69 #include <mach/kern_return.h>
70 #include <mach/memory_object.h>
71 #include <mach/memory_object_default.h>
72 #include <mach/memory_object_control_server.h>
73 #include <mach/host_priv_server.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <mach/message.h>
79 * Implementation dependencies:
81 #include <string.h> /* For memcpy() */
84 #include <kern/host.h>
85 #include <kern/thread.h> /* For current_thread() */
86 #include <kern/ipc_mig.h>
87 #include <kern/misc_protos.h>
89 #include <vm/vm_object.h>
90 #include <vm/vm_fault.h>
91 #include <vm/memory_object.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_pageout.h>
94 #include <vm/pmap.h> /* For pmap_clear_modify */
95 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
96 #include <vm/vm_map.h> /* For vm_map_pageable */
99 #include <vm/vm_external.h>
100 #endif /* MACH_PAGEMAP */
102 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
103 vm_size_t memory_manager_default_cluster
= 0;
104 decl_mutex_data(, memory_manager_default_lock
)
107 * Forward ref to file-local function:
110 vm_object_update(vm_object_t
, vm_object_offset_t
,
111 vm_size_t
, memory_object_return_t
, int, vm_prot_t
);
115 * Routine: memory_object_should_return_page
118 * Determine whether the given page should be returned,
119 * based on the page's state and on the given return policy.
121 * We should return the page if one of the following is true:
123 * 1. Page is dirty and should_return is not RETURN_NONE.
124 * 2. Page is precious and should_return is RETURN_ALL.
125 * 3. Should_return is RETURN_ANYTHING.
127 * As a side effect, m->dirty will be made consistent
128 * with pmap_is_modified(m), if should_return is not
129 * MEMORY_OBJECT_RETURN_NONE.
132 #define memory_object_should_return_page(m, should_return) \
133 (should_return != MEMORY_OBJECT_RETURN_NONE && \
134 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
135 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
136 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
138 typedef int memory_object_lock_result_t
;
140 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
141 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
142 #define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
143 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
145 memory_object_lock_result_t
memory_object_lock_page(
147 memory_object_return_t should_return
,
148 boolean_t should_flush
,
152 * Routine: memory_object_lock_page
155 * Perform the appropriate lock operations on the
156 * given page. See the description of
157 * "memory_object_lock_request" for the meanings
160 * Returns an indication that the operation
161 * completed, blocked, or that the page must
164 memory_object_lock_result_t
165 memory_object_lock_page(
167 memory_object_return_t should_return
,
168 boolean_t should_flush
,
171 XPR(XPR_MEMORY_OBJECT
,
172 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
173 (integer_t
)m
, should_return
, should_flush
, prot
, 0);
176 * If we cannot change access to the page,
177 * either because a mapping is in progress
178 * (busy page) or because a mapping has been
179 * wired, then give up.
182 if (m
->busy
|| m
->cleaning
)
183 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
186 * Don't worry about pages for which the kernel
187 * does not have any data.
190 if (m
->absent
|| m
->error
|| m
->restart
) {
191 if(m
->error
&& should_flush
) {
192 /* dump the page, pager wants us to */
193 /* clean it up and there is no */
194 /* relevant data to return */
195 if(m
->wire_count
== 0) {
197 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
200 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
204 assert(!m
->fictitious
);
206 if (m
->wire_count
!= 0) {
208 * If no change would take place
209 * anyway, return successfully.
213 * No change to page lock [2 checks] AND
214 * Should not return page
216 * XXX This doesn't handle sending a copy of a wired
217 * XXX page to the pager, but that will require some
218 * XXX significant surgery.
221 (m
->page_lock
== prot
|| prot
== VM_PROT_NO_CHANGE
) &&
222 ! memory_object_should_return_page(m
, should_return
)) {
225 * Restart page unlock requests,
226 * even though no change took place.
227 * [Memory managers may be expecting
228 * to see new requests.]
230 m
->unlock_request
= VM_PROT_NONE
;
233 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
236 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
240 * If the page is to be flushed, allow
241 * that to be done as part of the protection.
250 * If we are decreasing permission, do it now;
251 * let the fault handler take care of increases
252 * (pmap_page_protect may not increase protection).
255 if (prot
!= VM_PROT_NO_CHANGE
) {
256 if ((m
->page_lock
^ prot
) & prot
) {
257 pmap_page_protect(m
->phys_page
, VM_PROT_ALL
& ~prot
);
260 /* code associated with the vestigial
261 * memory_object_data_unlock
264 m
->lock_supplied
= TRUE
;
265 if (prot
!= VM_PROT_NONE
)
271 * Restart any past unlock requests, even if no
272 * change resulted. If the manager explicitly
273 * requested no protection change, then it is assumed
274 * to be remembering past requests.
277 m
->unlock_request
= VM_PROT_NONE
;
283 * Handle page returning.
286 if (memory_object_should_return_page(m
, should_return
)) {
289 * If we weren't planning
290 * to flush the page anyway,
291 * we may need to remove the
292 * page from the pageout
293 * system and from physical
297 vm_page_lock_queues();
298 VM_PAGE_QUEUES_REMOVE(m
);
299 vm_page_unlock_queues();
302 pmap_page_protect(m
->phys_page
, VM_PROT_NONE
);
305 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
);
307 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
317 extern boolean_t vm_page_deactivate_hint
;
320 * XXX Make clean but not flush a paging hint,
321 * and deactivate the pages. This is a hack
322 * because it overloads flush/clean with
323 * implementation-dependent meaning. This only
324 * happens to pages that are already clean.
327 if (vm_page_deactivate_hint
&&
328 (should_return
!= MEMORY_OBJECT_RETURN_NONE
)) {
329 vm_page_lock_queues();
330 vm_page_deactivate(m
);
331 vm_page_unlock_queues();
335 return(MEMORY_OBJECT_LOCK_RESULT_DONE
);
338 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, action, po) \
342 register vm_page_t hp; \
344 vm_object_unlock(object); \
346 (void) memory_object_data_return(object->pager, \
349 (action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN), \
352 vm_object_lock(object); \
356 * Routine: memory_object_lock_request [user interface]
359 * Control use of the data associated with the given
360 * memory object. For each page in the given range,
361 * perform the following operations, in order:
362 * 1) restrict access to the page (disallow
363 * forms specified by "prot");
364 * 2) return data to the manager (if "should_return"
365 * is RETURN_DIRTY and the page is dirty, or
366 * "should_return" is RETURN_ALL and the page
367 * is either dirty or precious); and,
368 * 3) flush the cached copy (if "should_flush"
370 * The set of pages is defined by a starting offset
371 * ("offset") and size ("size"). Only pages with the
372 * same page alignment as the starting offset are
375 * A single acknowledgement is sent (to the "reply_to"
376 * port) when these actions are complete. If successful,
377 * the naked send right for reply_to is consumed.
381 memory_object_lock_request(
382 memory_object_control_t control
,
383 memory_object_offset_t offset
,
384 memory_object_size_t size
,
385 memory_object_return_t should_return
,
390 vm_object_offset_t original_offset
= offset
;
391 boolean_t should_flush
=flags
& MEMORY_OBJECT_DATA_FLUSH
;
393 XPR(XPR_MEMORY_OBJECT
,
394 "m_o_lock_request, control 0x%X off 0x%X size 0x%X flags %X prot %X\n",
395 (integer_t
)control
, offset
, size
,
396 (((should_return
&1)<<1)|should_flush
), prot
);
399 * Check for bogus arguments.
401 object
= memory_object_control_to_vm_object(control
);
402 if (object
== VM_OBJECT_NULL
)
403 return (KERN_INVALID_ARGUMENT
);
405 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
406 return (KERN_INVALID_ARGUMENT
);
408 size
= round_page_64(size
);
411 * Lock the object, and acquire a paging reference to
412 * prevent the memory_object reference from being released.
414 vm_object_lock(object
);
415 vm_object_paging_begin(object
);
416 offset
-= object
->paging_offset
;
418 (void)vm_object_update(object
,
419 offset
, size
, should_return
, flags
, prot
);
421 vm_object_paging_end(object
);
422 vm_object_unlock(object
);
424 return (KERN_SUCCESS
);
428 * memory_object_release_name: [interface]
430 * Enforces name semantic on memory_object reference count decrement
431 * This routine should not be called unless the caller holds a name
432 * reference gained through the memory_object_named_create or the
433 * memory_object_rename call.
434 * If the TERMINATE_IDLE flag is set, the call will return if the
435 * reference count is not 1. i.e. idle with the only remaining reference
437 * If the decision is made to proceed the name field flag is set to
438 * false and the reference count is decremented. If the RESPECT_CACHE
439 * flag is set and the reference count has gone to zero, the
440 * memory_object is checked to see if it is cacheable otherwise when
441 * the reference count is zero, it is simply terminated.
445 memory_object_release_name(
446 memory_object_control_t control
,
451 object
= memory_object_control_to_vm_object(control
);
452 if (object
== VM_OBJECT_NULL
)
453 return (KERN_INVALID_ARGUMENT
);
455 return vm_object_release_name(object
, flags
);
461 * Routine: memory_object_destroy [user interface]
463 * Shut down a memory object, despite the
464 * presence of address map (or other) references
468 memory_object_destroy(
469 memory_object_control_t control
,
470 kern_return_t reason
)
474 object
= memory_object_control_to_vm_object(control
);
475 if (object
== VM_OBJECT_NULL
)
476 return (KERN_INVALID_ARGUMENT
);
478 return (vm_object_destroy(object
, reason
));
482 * Routine: vm_object_sync
484 * Kernel internal function to synch out pages in a given
485 * range within an object to its memory manager. Much the
486 * same as memory_object_lock_request but page protection
489 * If the should_flush and should_return flags are true pages
490 * are flushed, that is dirty & precious pages are written to
491 * the memory manager and then discarded. If should_return
492 * is false, only precious pages are returned to the memory
495 * If should flush is false and should_return true, the memory
496 * manager's copy of the pages is updated. If should_return
497 * is also false, only the precious pages are updated. This
498 * last option is of limited utility.
501 * FALSE if no pages were returned to the pager
508 vm_object_offset_t offset
,
510 boolean_t should_flush
,
511 boolean_t should_return
)
516 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
517 (integer_t
)object
, offset
, size
, should_flush
, should_return
);
520 * Lock the object, and acquire a paging reference to
521 * prevent the memory_object and control ports from
524 vm_object_lock(object
);
525 vm_object_paging_begin(object
);
527 rv
= vm_object_update(object
, offset
, size
,
529 MEMORY_OBJECT_RETURN_ALL
:
530 MEMORY_OBJECT_RETURN_NONE
,
532 MEMORY_OBJECT_DATA_FLUSH
: 0,
536 vm_object_paging_end(object
);
537 vm_object_unlock(object
);
542 * Routine: vm_object_update
544 * Work function for m_o_lock_request(), vm_o_sync().
546 * Called with object locked and paging ref taken.
550 register vm_object_t object
,
551 register vm_object_offset_t offset
,
552 register vm_size_t size
,
553 memory_object_return_t should_return
,
557 register vm_page_t m
;
558 vm_page_t holding_page
;
559 vm_size_t original_size
= size
;
560 vm_object_offset_t paging_offset
= 0;
561 vm_object_t copy_object
;
562 vm_size_t data_cnt
= 0;
563 vm_object_offset_t last_offset
= offset
;
564 memory_object_lock_result_t page_lock_result
;
565 memory_object_lock_result_t pageout_action
;
566 boolean_t data_returned
= FALSE
;
567 boolean_t update_cow
;
568 boolean_t should_flush
= flags
& MEMORY_OBJECT_DATA_FLUSH
;
569 boolean_t pending_pageout
= FALSE
;
572 * To avoid blocking while scanning for pages, save
573 * dirty pages to be cleaned all at once.
575 * XXXO A similar strategy could be used to limit the
576 * number of times that a scan must be restarted for
577 * other reasons. Those pages that would require blocking
578 * could be temporarily collected in another list, or
579 * their offsets could be recorded in a small array.
583 * XXX NOTE: May want to consider converting this to a page list
584 * XXX vm_map_copy interface. Need to understand object
585 * XXX coalescing implications before doing so.
588 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
589 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
590 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
591 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
594 if((((copy_object
= object
->copy
) != NULL
) && update_cow
) ||
595 (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
598 vm_object_offset_t copy_offset
;
602 kern_return_t error
= 0;
604 if(copy_object
!= NULL
) {
605 /* translate offset with respect to shadow's offset */
606 copy_offset
= (offset
>= copy_object
->shadow_offset
)?
607 offset
- copy_object
->shadow_offset
:
608 (vm_object_offset_t
) 0;
609 if(copy_offset
> copy_object
->size
)
610 copy_offset
= copy_object
->size
;
612 /* clip size with respect to shadow offset */
613 copy_size
= (offset
>= copy_object
->shadow_offset
) ?
614 size
: size
- (copy_object
->shadow_offset
- offset
);
619 copy_size
= ((copy_offset
+ copy_size
)
620 <= copy_object
->size
) ?
621 copy_size
: copy_object
->size
- copy_offset
;
623 /* check for a copy_offset which is beyond the end of */
624 /* the copy_object */
628 copy_size
+=copy_offset
;
630 vm_object_unlock(object
);
631 vm_object_lock(copy_object
);
633 copy_object
= object
;
635 copy_size
= offset
+ size
;
636 copy_offset
= offset
;
639 vm_object_paging_begin(copy_object
);
640 for (i
=copy_offset
; i
<copy_size
; i
+=PAGE_SIZE
) {
641 RETRY_COW_OF_LOCK_REQUEST
:
642 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
643 switch (vm_fault_page(copy_object
, i
,
644 VM_PROT_WRITE
|VM_PROT_READ
,
648 copy_offset
+copy_size
,
649 VM_BEHAVIOR_SEQUENTIAL
,
658 case VM_FAULT_SUCCESS
:
661 page
->object
, top_page
);
662 PAGE_WAKEUP_DONE(page
);
663 vm_page_lock_queues();
664 if (!page
->active
&& !page
->inactive
)
665 vm_page_activate(page
);
666 vm_page_unlock_queues();
667 vm_object_lock(copy_object
);
668 vm_object_paging_begin(copy_object
);
670 PAGE_WAKEUP_DONE(page
);
671 vm_page_lock_queues();
672 if (!page
->active
&& !page
->inactive
)
673 vm_page_activate(page
);
674 vm_page_unlock_queues();
678 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
679 vm_object_lock(copy_object
);
680 vm_object_paging_begin(copy_object
);
681 goto RETRY_COW_OF_LOCK_REQUEST
;
682 case VM_FAULT_INTERRUPTED
:
683 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
684 vm_object_lock(copy_object
);
685 vm_object_paging_begin(copy_object
);
686 goto RETRY_COW_OF_LOCK_REQUEST
;
687 case VM_FAULT_MEMORY_SHORTAGE
:
689 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
690 vm_object_lock(copy_object
);
691 vm_object_paging_begin(copy_object
);
692 goto RETRY_COW_OF_LOCK_REQUEST
;
693 case VM_FAULT_FICTITIOUS_SHORTAGE
:
694 vm_page_more_fictitious();
695 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
696 vm_object_lock(copy_object
);
697 vm_object_paging_begin(copy_object
);
698 goto RETRY_COW_OF_LOCK_REQUEST
;
699 case VM_FAULT_MEMORY_ERROR
:
700 vm_object_lock(object
);
701 goto BYPASS_COW_COPYIN
;
705 vm_object_paging_end(copy_object
);
706 if(copy_object
!= object
) {
707 vm_object_unlock(copy_object
);
708 vm_object_lock(object
);
711 if((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
714 if(((copy_object
= object
->copy
) != NULL
) &&
715 (flags
& MEMORY_OBJECT_DATA_PURGE
)) {
716 copy_object
->shadow_severed
= TRUE
;
717 copy_object
->shadowed
= FALSE
;
718 copy_object
->shadow
= NULL
;
719 /* delete the ref the COW was holding on the target object */
720 vm_object_deallocate(object
);
726 size
-= PAGE_SIZE
, offset
+= PAGE_SIZE_64
)
729 * Limit the number of pages to be cleaned at once.
731 if (pending_pageout
&&
732 data_cnt
>= PAGE_SIZE
* DATA_WRITE_MAX
)
734 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
735 pageout_action
, paging_offset
);
737 pending_pageout
= FALSE
;
740 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
741 page_lock_result
= memory_object_lock_page(m
, should_return
,
744 XPR(XPR_MEMORY_OBJECT
,
745 "m_o_update: lock_page, obj 0x%X offset 0x%X result %d\n",
746 (integer_t
)object
, offset
, page_lock_result
, 0, 0);
748 switch (page_lock_result
)
750 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
752 * End of a cluster of dirty pages.
754 if(pending_pageout
) {
755 LIST_REQ_PAGEOUT_PAGES(object
,
756 data_cnt
, pageout_action
,
759 pending_pageout
= FALSE
;
764 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
766 * Since it is necessary to block,
767 * clean any dirty pages now.
769 if(pending_pageout
) {
770 LIST_REQ_PAGEOUT_PAGES(object
,
771 data_cnt
, pageout_action
,
773 pending_pageout
= FALSE
;
778 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
781 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN
:
782 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
784 * The clean and return cases are similar.
789 * if this would form a discontiguous block,
790 * clean the old pages and start anew.
795 * Mark the page busy since we unlock the
799 if (pending_pageout
&&
800 (last_offset
!= offset
||
801 pageout_action
!= page_lock_result
)) {
802 LIST_REQ_PAGEOUT_PAGES(object
,
803 data_cnt
, pageout_action
,
805 pending_pageout
= FALSE
;
809 holding_page
= VM_PAGE_NULL
;
811 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
814 if(!pending_pageout
) {
815 pending_pageout
= TRUE
;
816 pageout_action
= page_lock_result
;
817 paging_offset
= offset
;
820 vm_page_lock_queues();
821 m
->list_req_pending
= TRUE
;
826 vm_page_unlock_queues();
829 * Clean but do not flush
831 vm_page_lock_queues();
832 m
->list_req_pending
= TRUE
;
834 vm_page_unlock_queues();
837 vm_object_unlock(object
);
840 data_cnt
+= PAGE_SIZE
;
841 last_offset
= offset
+ PAGE_SIZE_64
;
842 data_returned
= TRUE
;
844 vm_object_lock(object
);
852 * We have completed the scan for applicable pages.
853 * Clean any pages that have been saved.
855 if (pending_pageout
) {
856 LIST_REQ_PAGEOUT_PAGES(object
,
857 data_cnt
, pageout_action
, paging_offset
);
859 return (data_returned
);
863 * Routine: memory_object_synchronize_completed [user interface]
865 * Tell kernel that previously synchronized data
866 * (memory_object_synchronize) has been queue or placed on the
869 * Note: there may be multiple synchronize requests for a given
870 * memory object outstanding but they will not overlap.
874 memory_object_synchronize_completed(
875 memory_object_control_t control
,
876 memory_object_offset_t offset
,
882 XPR(XPR_MEMORY_OBJECT
,
883 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
884 (integer_t
)object
, offset
, length
, 0, 0);
887 * Look for bogus arguments
890 object
= memory_object_control_to_vm_object(control
);
891 if (object
== VM_OBJECT_NULL
)
892 return (KERN_INVALID_ARGUMENT
);
894 vm_object_lock(object
);
897 * search for sync request structure
899 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
900 if (msr
->offset
== offset
&& msr
->length
== length
) {
901 queue_remove(&object
->msr_q
, msr
, msync_req_t
, msr_q
);
906 if (queue_end(&object
->msr_q
, (queue_entry_t
)msr
)) {
907 vm_object_unlock(object
);
908 return KERN_INVALID_ARGUMENT
;
912 vm_object_unlock(object
);
913 msr
->flag
= VM_MSYNC_DONE
;
915 thread_wakeup((event_t
) msr
);
918 }/* memory_object_synchronize_completed */
921 vm_object_set_attributes_common(
924 memory_object_copy_strategy_t copy_strategy
,
926 vm_size_t cluster_size
,
927 boolean_t silent_overwrite
,
928 boolean_t advisory_pageout
)
930 boolean_t object_became_ready
;
932 XPR(XPR_MEMORY_OBJECT
,
933 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
934 (integer_t
)object
, (may_cache
&1)|((temporary
&1)<1), copy_strategy
, 0, 0);
936 if (object
== VM_OBJECT_NULL
)
937 return(KERN_INVALID_ARGUMENT
);
940 * Verify the attributes of importance
943 switch(copy_strategy
) {
944 case MEMORY_OBJECT_COPY_NONE
:
945 case MEMORY_OBJECT_COPY_DELAY
:
948 return(KERN_INVALID_ARGUMENT
);
951 #if !ADVISORY_PAGEOUT
952 if (silent_overwrite
|| advisory_pageout
)
953 return(KERN_INVALID_ARGUMENT
);
955 #endif /* !ADVISORY_PAGEOUT */
960 if (cluster_size
!= 0) {
961 int pages_per_cluster
;
962 pages_per_cluster
= atop_32(cluster_size
);
964 * Cluster size must be integral multiple of page size,
965 * and be a power of 2 number of pages.
967 if ((cluster_size
& (PAGE_SIZE
-1)) ||
968 ((pages_per_cluster
-1) & pages_per_cluster
))
969 return KERN_INVALID_ARGUMENT
;
972 vm_object_lock(object
);
975 * Copy the attributes
977 assert(!object
->internal
);
978 object_became_ready
= !object
->pager_ready
;
979 object
->copy_strategy
= copy_strategy
;
980 object
->can_persist
= may_cache
;
981 object
->temporary
= temporary
;
982 object
->silent_overwrite
= silent_overwrite
;
983 object
->advisory_pageout
= advisory_pageout
;
984 if (cluster_size
== 0)
985 cluster_size
= PAGE_SIZE
;
986 object
->cluster_size
= cluster_size
;
988 assert(cluster_size
>= PAGE_SIZE
&&
989 cluster_size
% PAGE_SIZE
== 0);
992 * Wake up anyone waiting for the ready attribute
993 * to become asserted.
996 if (object_became_ready
) {
997 object
->pager_ready
= TRUE
;
998 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1001 vm_object_unlock(object
);
1003 return(KERN_SUCCESS
);
1007 * Set the memory object attribute as provided.
1009 * XXX This routine cannot be completed until the vm_msync, clean
1010 * in place, and cluster work is completed. See ifdef notyet
1011 * below and note that vm_object_set_attributes_common()
1012 * may have to be expanded.
1015 memory_object_change_attributes(
1016 memory_object_control_t control
,
1017 memory_object_flavor_t flavor
,
1018 memory_object_info_t attributes
,
1019 mach_msg_type_number_t count
)
1022 kern_return_t result
= KERN_SUCCESS
;
1023 boolean_t temporary
;
1024 boolean_t may_cache
;
1025 boolean_t invalidate
;
1026 vm_size_t cluster_size
;
1027 memory_object_copy_strategy_t copy_strategy
;
1028 boolean_t silent_overwrite
;
1029 boolean_t advisory_pageout
;
1031 object
= memory_object_control_to_vm_object(control
);
1032 if (object
== VM_OBJECT_NULL
)
1033 return (KERN_INVALID_ARGUMENT
);
1035 vm_object_lock(object
);
1037 temporary
= object
->temporary
;
1038 may_cache
= object
->can_persist
;
1039 copy_strategy
= object
->copy_strategy
;
1040 silent_overwrite
= object
->silent_overwrite
;
1041 advisory_pageout
= object
->advisory_pageout
;
1043 invalidate
= object
->invalidate
;
1045 cluster_size
= object
->cluster_size
;
1046 vm_object_unlock(object
);
1049 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1051 old_memory_object_behave_info_t behave
;
1053 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1054 result
= KERN_INVALID_ARGUMENT
;
1058 behave
= (old_memory_object_behave_info_t
) attributes
;
1060 temporary
= behave
->temporary
;
1061 invalidate
= behave
->invalidate
;
1062 copy_strategy
= behave
->copy_strategy
;
1067 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1069 memory_object_behave_info_t behave
;
1071 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1072 result
= KERN_INVALID_ARGUMENT
;
1076 behave
= (memory_object_behave_info_t
) attributes
;
1078 temporary
= behave
->temporary
;
1079 invalidate
= behave
->invalidate
;
1080 copy_strategy
= behave
->copy_strategy
;
1081 silent_overwrite
= behave
->silent_overwrite
;
1082 advisory_pageout
= behave
->advisory_pageout
;
1086 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1088 memory_object_perf_info_t perf
;
1090 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1091 result
= KERN_INVALID_ARGUMENT
;
1095 perf
= (memory_object_perf_info_t
) attributes
;
1097 may_cache
= perf
->may_cache
;
1098 cluster_size
= round_page_32(perf
->cluster_size
);
1103 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1105 old_memory_object_attr_info_t attr
;
1107 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1108 result
= KERN_INVALID_ARGUMENT
;
1112 attr
= (old_memory_object_attr_info_t
) attributes
;
1114 may_cache
= attr
->may_cache
;
1115 copy_strategy
= attr
->copy_strategy
;
1116 cluster_size
= page_size
;
1121 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1123 memory_object_attr_info_t attr
;
1125 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1126 result
= KERN_INVALID_ARGUMENT
;
1130 attr
= (memory_object_attr_info_t
) attributes
;
1132 copy_strategy
= attr
->copy_strategy
;
1133 may_cache
= attr
->may_cache_object
;
1134 cluster_size
= attr
->cluster_size
;
1135 temporary
= attr
->temporary
;
1141 result
= KERN_INVALID_ARGUMENT
;
1145 if (result
!= KERN_SUCCESS
)
1148 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1149 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1156 * XXX may_cache may become a tri-valued variable to handle
1157 * XXX uncache if not in use.
1159 return (vm_object_set_attributes_common(object
,
1169 memory_object_get_attributes(
1170 memory_object_control_t control
,
1171 memory_object_flavor_t flavor
,
1172 memory_object_info_t attributes
, /* pointer to OUT array */
1173 mach_msg_type_number_t
*count
) /* IN/OUT */
1175 kern_return_t ret
= KERN_SUCCESS
;
1178 object
= memory_object_control_to_vm_object(control
);
1179 if (object
== VM_OBJECT_NULL
)
1180 return (KERN_INVALID_ARGUMENT
);
1182 vm_object_lock(object
);
1185 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1187 old_memory_object_behave_info_t behave
;
1189 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1190 ret
= KERN_INVALID_ARGUMENT
;
1194 behave
= (old_memory_object_behave_info_t
) attributes
;
1195 behave
->copy_strategy
= object
->copy_strategy
;
1196 behave
->temporary
= object
->temporary
;
1197 #if notyet /* remove when vm_msync complies and clean in place fini */
1198 behave
->invalidate
= object
->invalidate
;
1200 behave
->invalidate
= FALSE
;
1203 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1207 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1209 memory_object_behave_info_t behave
;
1211 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1212 ret
= KERN_INVALID_ARGUMENT
;
1216 behave
= (memory_object_behave_info_t
) attributes
;
1217 behave
->copy_strategy
= object
->copy_strategy
;
1218 behave
->temporary
= object
->temporary
;
1219 #if notyet /* remove when vm_msync complies and clean in place fini */
1220 behave
->invalidate
= object
->invalidate
;
1222 behave
->invalidate
= FALSE
;
1224 behave
->advisory_pageout
= object
->advisory_pageout
;
1225 behave
->silent_overwrite
= object
->silent_overwrite
;
1226 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1230 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1232 memory_object_perf_info_t perf
;
1234 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1235 ret
= KERN_INVALID_ARGUMENT
;
1239 perf
= (memory_object_perf_info_t
) attributes
;
1240 perf
->cluster_size
= object
->cluster_size
;
1241 perf
->may_cache
= object
->can_persist
;
1243 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1247 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1249 old_memory_object_attr_info_t attr
;
1251 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1252 ret
= KERN_INVALID_ARGUMENT
;
1256 attr
= (old_memory_object_attr_info_t
) attributes
;
1257 attr
->may_cache
= object
->can_persist
;
1258 attr
->copy_strategy
= object
->copy_strategy
;
1260 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1264 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1266 memory_object_attr_info_t attr
;
1268 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1269 ret
= KERN_INVALID_ARGUMENT
;
1273 attr
= (memory_object_attr_info_t
) attributes
;
1274 attr
->copy_strategy
= object
->copy_strategy
;
1275 attr
->cluster_size
= object
->cluster_size
;
1276 attr
->may_cache_object
= object
->can_persist
;
1277 attr
->temporary
= object
->temporary
;
1279 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1284 ret
= KERN_INVALID_ARGUMENT
;
1288 vm_object_unlock(object
);
1295 memory_object_iopl_request(
1297 memory_object_offset_t offset
,
1298 vm_size_t
*upl_size
,
1300 upl_page_info_array_t user_page_list
,
1301 unsigned int *page_list_count
,
1308 caller_flags
= *flags
;
1310 if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
1311 vm_named_entry_t named_entry
;
1313 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1314 /* a few checks to make sure user is obeying rules */
1315 if(*upl_size
== 0) {
1316 if(offset
>= named_entry
->size
)
1317 return(KERN_INVALID_RIGHT
);
1318 *upl_size
= named_entry
->size
- offset
;
1320 if(caller_flags
& UPL_COPYOUT_FROM
) {
1321 if((named_entry
->protection
& VM_PROT_READ
)
1323 return(KERN_INVALID_RIGHT
);
1326 if((named_entry
->protection
&
1327 (VM_PROT_READ
| VM_PROT_WRITE
))
1328 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
1329 return(KERN_INVALID_RIGHT
);
1332 if(named_entry
->size
< (offset
+ *upl_size
))
1333 return(KERN_INVALID_ARGUMENT
);
1335 /* the callers parameter offset is defined to be the */
1336 /* offset from beginning of named entry offset in object */
1337 offset
= offset
+ named_entry
->offset
;
1339 if(named_entry
->is_sub_map
)
1340 return (KERN_INVALID_ARGUMENT
);
1342 named_entry_lock(named_entry
);
1344 if(named_entry
->object
) {
1345 /* This is the case where we are going to map */
1346 /* an already mapped object. If the object is */
1347 /* not ready it is internal. An external */
1348 /* object cannot be mapped until it is ready */
1349 /* we can therefore avoid the ready check */
1351 vm_object_reference(named_entry
->object
);
1352 object
= named_entry
->object
;
1353 named_entry_unlock(named_entry
);
1355 object
= vm_object_enter(named_entry
->backing
.pager
,
1356 named_entry
->offset
+ named_entry
->size
,
1357 named_entry
->internal
,
1360 if (object
== VM_OBJECT_NULL
) {
1361 named_entry_unlock(named_entry
);
1362 return(KERN_INVALID_OBJECT
);
1364 vm_object_lock(object
);
1366 /* create an extra reference for the named entry */
1367 vm_object_reference_locked(object
);
1368 named_entry
->object
= object
;
1369 named_entry_unlock(named_entry
);
1371 /* wait for object to be ready */
1372 while (!object
->pager_ready
) {
1373 vm_object_wait(object
,
1374 VM_OBJECT_EVENT_PAGER_READY
,
1376 vm_object_lock(object
);
1378 vm_object_unlock(object
);
1381 memory_object_control_t control
;
1382 control
= (memory_object_control_t
)port
->ip_kobject
;
1383 if (control
== NULL
)
1384 return (KERN_INVALID_ARGUMENT
);
1385 object
= memory_object_control_to_vm_object(control
);
1386 if (object
== VM_OBJECT_NULL
)
1387 return (KERN_INVALID_ARGUMENT
);
1388 vm_object_reference(object
);
1390 if (object
== VM_OBJECT_NULL
)
1391 return (KERN_INVALID_ARGUMENT
);
1393 if (!object
->private) {
1394 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
1395 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
1396 if (object
->phys_contiguous
) {
1397 *flags
= UPL_PHYS_CONTIG
;
1402 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
1405 ret
= vm_object_iopl_request(object
,
1412 vm_object_deallocate(object
);
1417 * Routine: memory_object_upl_request [interface]
1419 * Cause the population of a portion of a vm_object.
1420 * Depending on the nature of the request, the pages
1421 * returned may be contain valid data or be uninitialized.
1426 memory_object_upl_request(
1427 memory_object_control_t control
,
1428 memory_object_offset_t offset
,
1431 upl_page_info_array_t user_page_list
,
1432 unsigned int *page_list_count
,
1437 object
= memory_object_control_to_vm_object(control
);
1438 if (object
== VM_OBJECT_NULL
)
1439 return (KERN_INVALID_ARGUMENT
);
1441 return vm_object_upl_request(object
,
1451 * Routine: memory_object_super_upl_request [interface]
1453 * Cause the population of a portion of a vm_object
1454 * in much the same way as memory_object_upl_request.
1455 * Depending on the nature of the request, the pages
1456 * returned may be contain valid data or be uninitialized.
1457 * However, the region may be expanded up to the super
1458 * cluster size provided.
1462 memory_object_super_upl_request(
1463 memory_object_control_t control
,
1464 memory_object_offset_t offset
,
1466 vm_size_t super_cluster
,
1468 upl_page_info_t
*user_page_list
,
1469 unsigned int *page_list_count
,
1474 object
= memory_object_control_to_vm_object(control
);
1475 if (object
== VM_OBJECT_NULL
)
1476 return (KERN_INVALID_ARGUMENT
);
1478 return vm_object_super_upl_request(object
,
1488 int vm_stat_discard_cleared_reply
= 0;
1489 int vm_stat_discard_cleared_unset
= 0;
1490 int vm_stat_discard_cleared_too_late
= 0;
1495 * Routine: host_default_memory_manager [interface]
1497 * set/get the default memory manager port and default cluster
1500 * If successful, consumes the supplied naked send right.
1503 host_default_memory_manager(
1504 host_priv_t host_priv
,
1505 memory_object_default_t
*default_manager
,
1506 vm_size_t cluster_size
)
1508 memory_object_default_t current_manager
;
1509 memory_object_default_t new_manager
;
1510 memory_object_default_t returned_manager
;
1512 if (host_priv
== HOST_PRIV_NULL
)
1513 return(KERN_INVALID_HOST
);
1515 assert(host_priv
== &realhost
);
1517 new_manager
= *default_manager
;
1518 mutex_lock(&memory_manager_default_lock
);
1519 current_manager
= memory_manager_default
;
1521 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1523 * Retrieve the current value.
1525 memory_object_default_reference(current_manager
);
1526 returned_manager
= current_manager
;
1529 * Retrieve the current value,
1530 * and replace it with the supplied value.
1531 * We return the old reference to the caller
1532 * but we have to take a reference on the new
1536 returned_manager
= current_manager
;
1537 memory_manager_default
= new_manager
;
1538 memory_object_default_reference(new_manager
);
1540 if (cluster_size
% PAGE_SIZE
!= 0) {
1542 mutex_unlock(&memory_manager_default_lock
);
1543 return KERN_INVALID_ARGUMENT
;
1545 cluster_size
= round_page_32(cluster_size
);
1548 memory_manager_default_cluster
= cluster_size
;
1551 * In case anyone's been waiting for a memory
1552 * manager to be established, wake them up.
1555 thread_wakeup((event_t
) &memory_manager_default
);
1558 mutex_unlock(&memory_manager_default_lock
);
1560 *default_manager
= returned_manager
;
1561 return(KERN_SUCCESS
);
1565 * Routine: memory_manager_default_reference
1567 * Returns a naked send right for the default
1568 * memory manager. The returned right is always
1569 * valid (not IP_NULL or IP_DEAD).
1572 __private_extern__ memory_object_default_t
1573 memory_manager_default_reference(
1574 vm_size_t
*cluster_size
)
1576 memory_object_default_t current_manager
;
1578 mutex_lock(&memory_manager_default_lock
);
1579 current_manager
= memory_manager_default
;
1580 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1583 res
= thread_sleep_mutex((event_t
) &memory_manager_default
,
1584 &memory_manager_default_lock
,
1586 assert(res
== THREAD_AWAKENED
);
1587 current_manager
= memory_manager_default
;
1589 memory_object_default_reference(current_manager
);
1590 *cluster_size
= memory_manager_default_cluster
;
1591 mutex_unlock(&memory_manager_default_lock
);
1593 return current_manager
;
1597 * Routine: memory_manager_default_check
1600 * Check whether a default memory manager has been set
1601 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1602 * and KERN_FAILURE if dmm does not exist.
1604 * If there is no default memory manager, log an error,
1605 * but only the first time.
1608 __private_extern__ kern_return_t
1609 memory_manager_default_check(void)
1611 memory_object_default_t current
;
1613 mutex_lock(&memory_manager_default_lock
);
1614 current
= memory_manager_default
;
1615 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1616 static boolean_t logged
; /* initialized to 0 */
1617 boolean_t complain
= !logged
;
1619 mutex_unlock(&memory_manager_default_lock
);
1621 printf("Warning: No default memory manager\n");
1622 return(KERN_FAILURE
);
1624 mutex_unlock(&memory_manager_default_lock
);
1625 return(KERN_SUCCESS
);
1629 __private_extern__
void
1630 memory_manager_default_init(void)
1632 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1633 mutex_init(&memory_manager_default_lock
, ETAP_VM_MEMMAN
);
1638 memory_object_deactivate_pages(
1640 vm_object_offset_t offset
,
1641 vm_object_size_t size
,
1642 boolean_t kill_page
)
1644 vm_object_t orig_object
;
1645 int pages_moved
= 0;
1646 int pages_found
= 0;
1649 * entered with object lock held, acquire a paging reference to
1650 * prevent the memory_object and control ports from
1653 orig_object
= object
;
1656 register vm_page_t m
;
1657 vm_object_offset_t toffset
;
1658 vm_object_size_t tsize
;
1660 vm_object_paging_begin(object
);
1661 vm_page_lock_queues();
1663 for (tsize
= size
, toffset
= offset
; tsize
; tsize
-= PAGE_SIZE
, toffset
+= PAGE_SIZE
) {
1665 if ((m
= vm_page_lookup(object
, toffset
)) != VM_PAGE_NULL
) {
1669 if ((m
->wire_count
== 0) && (!m
->private) && (!m
->gobbled
) && (!m
->busy
)) {
1671 m
->reference
= FALSE
;
1672 pmap_clear_reference(m
->phys_page
);
1674 if ((kill_page
) && (object
->internal
)) {
1675 m
->precious
= FALSE
;
1677 pmap_clear_modify(m
->phys_page
);
1678 vm_external_state_clr(object
->existence_map
, offset
);
1680 VM_PAGE_QUEUES_REMOVE(m
);
1685 m
, vm_page_t
, pageq
);
1688 &vm_page_queue_inactive
,
1689 m
, vm_page_t
, pageq
);
1694 vm_page_inactive_count
++;
1700 vm_page_unlock_queues();
1701 vm_object_paging_end(object
);
1703 if (object
->shadow
) {
1704 vm_object_t tmp_object
;
1708 offset
+= object
->shadow_offset
;
1710 tmp_object
= object
->shadow
;
1711 vm_object_lock(tmp_object
);
1713 if (object
!= orig_object
)
1714 vm_object_unlock(object
);
1715 object
= tmp_object
;
1719 if (object
!= orig_object
)
1720 vm_object_unlock(object
);
1723 /* Allow manipulation of individual page state. This is actually part of */
1724 /* the UPL regimen but takes place on the object rather than on a UPL */
1727 memory_object_page_op(
1728 memory_object_control_t control
,
1729 memory_object_offset_t offset
,
1731 ppnum_t
*phys_entry
,
1738 object
= memory_object_control_to_vm_object(control
);
1739 if (object
== VM_OBJECT_NULL
)
1740 return (KERN_INVALID_ARGUMENT
);
1742 vm_object_lock(object
);
1744 if(ops
& UPL_POP_PHYSICAL
) {
1745 if(object
->phys_contiguous
) {
1747 *phys_entry
= (ppnum_t
)
1748 (object
->shadow_offset
>> 12);
1750 vm_object_unlock(object
);
1751 return KERN_SUCCESS
;
1753 vm_object_unlock(object
);
1754 return KERN_INVALID_OBJECT
;
1757 if(object
->phys_contiguous
) {
1758 vm_object_unlock(object
);
1759 return KERN_INVALID_OBJECT
;
1763 if((dst_page
= vm_page_lookup(object
,offset
)) == VM_PAGE_NULL
) {
1764 vm_object_unlock(object
);
1765 return KERN_FAILURE
;
1768 /* Sync up on getting the busy bit */
1769 if((dst_page
->busy
|| dst_page
->cleaning
) &&
1770 (((ops
& UPL_POP_SET
) &&
1771 (ops
& UPL_POP_BUSY
)) || (ops
& UPL_POP_DUMP
))) {
1772 /* someone else is playing with the page, we will */
1774 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
1778 if (ops
& UPL_POP_DUMP
) {
1779 vm_page_lock_queues();
1781 if (dst_page
->no_isync
== FALSE
)
1782 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
1783 vm_page_free(dst_page
);
1785 vm_page_unlock_queues();
1792 /* Get the condition of flags before requested ops */
1793 /* are undertaken */
1795 if(dst_page
->dirty
) *flags
|= UPL_POP_DIRTY
;
1796 if(dst_page
->pageout
) *flags
|= UPL_POP_PAGEOUT
;
1797 if(dst_page
->precious
) *flags
|= UPL_POP_PRECIOUS
;
1798 if(dst_page
->absent
) *flags
|= UPL_POP_ABSENT
;
1799 if(dst_page
->busy
) *flags
|= UPL_POP_BUSY
;
1802 *phys_entry
= dst_page
->phys_page
;
1804 /* The caller should have made a call either contingent with */
1805 /* or prior to this call to set UPL_POP_BUSY */
1806 if(ops
& UPL_POP_SET
) {
1807 /* The protection granted with this assert will */
1808 /* not be complete. If the caller violates the */
1809 /* convention and attempts to change page state */
1810 /* without first setting busy we may not see it */
1811 /* because the page may already be busy. However */
1812 /* if such violations occur we will assert sooner */
1814 assert(dst_page
->busy
|| (ops
& UPL_POP_BUSY
));
1815 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= TRUE
;
1816 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= TRUE
;
1817 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= TRUE
;
1818 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= TRUE
;
1819 if (ops
& UPL_POP_BUSY
) dst_page
->busy
= TRUE
;
1822 if(ops
& UPL_POP_CLR
) {
1823 assert(dst_page
->busy
);
1824 if (ops
& UPL_POP_DIRTY
) dst_page
->dirty
= FALSE
;
1825 if (ops
& UPL_POP_PAGEOUT
) dst_page
->pageout
= FALSE
;
1826 if (ops
& UPL_POP_PRECIOUS
) dst_page
->precious
= FALSE
;
1827 if (ops
& UPL_POP_ABSENT
) dst_page
->absent
= FALSE
;
1828 if (ops
& UPL_POP_BUSY
) {
1829 dst_page
->busy
= FALSE
;
1830 PAGE_WAKEUP(dst_page
);
1836 vm_object_unlock(object
);
1837 return KERN_SUCCESS
;
1842 * memory_object_range_op offers performance enhancement over
1843 * memory_object_page_op for page_op functions which do not require page
1844 * level state to be returned from the call. Page_op was created to provide
1845 * a low-cost alternative to page manipulation via UPLs when only a single
1846 * page was involved. The range_op call establishes the ability in the _op
1847 * family of functions to work on multiple pages where the lack of page level
1848 * state handling allows the caller to avoid the overhead of the upl structures.
1852 memory_object_range_op(
1853 memory_object_control_t control
,
1854 memory_object_offset_t offset_beg
,
1855 memory_object_offset_t offset_end
,
1859 memory_object_offset_t offset
;
1863 object
= memory_object_control_to_vm_object(control
);
1864 if (object
== VM_OBJECT_NULL
)
1865 return (KERN_INVALID_ARGUMENT
);
1867 if (object
->resident_page_count
== 0) {
1869 if (ops
& UPL_ROP_PRESENT
)
1872 *range
= offset_end
- offset_beg
;
1874 return KERN_SUCCESS
;
1876 vm_object_lock(object
);
1878 if (object
->phys_contiguous
)
1879 return KERN_INVALID_OBJECT
;
1881 offset
= offset_beg
;
1883 while (offset
< offset_end
) {
1884 if (dst_page
= vm_page_lookup(object
, offset
)) {
1885 if (ops
& UPL_ROP_DUMP
) {
1886 if (dst_page
->busy
|| dst_page
->cleaning
) {
1888 * someone else is playing with the
1889 * page, we will have to wait
1892 dst_page
, THREAD_UNINT
);
1894 * need to relook the page up since it's
1895 * state may have changed while we slept
1896 * it might even belong to a different object
1901 vm_page_lock_queues();
1903 if (dst_page
->no_isync
== FALSE
)
1904 pmap_page_protect(dst_page
->phys_page
, VM_PROT_NONE
);
1905 vm_page_free(dst_page
);
1907 vm_page_unlock_queues();
1908 } else if (ops
& UPL_ROP_ABSENT
)
1910 } else if (ops
& UPL_ROP_PRESENT
)
1913 offset
+= PAGE_SIZE
;
1915 vm_object_unlock(object
);
1918 *range
= offset
- offset_beg
;
1920 return KERN_SUCCESS
;
1923 static zone_t mem_obj_control_zone
;
1925 __private_extern__
void
1926 memory_object_control_bootstrap(void)
1930 i
= (vm_size_t
) sizeof (struct memory_object_control
);
1931 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
1935 __private_extern__ memory_object_control_t
1936 memory_object_control_allocate(
1939 memory_object_control_t control
;
1941 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
1942 if (control
!= MEMORY_OBJECT_CONTROL_NULL
)
1943 control
->object
= object
;
1947 __private_extern__
void
1948 memory_object_control_collapse(
1949 memory_object_control_t control
,
1952 assert((control
->object
!= VM_OBJECT_NULL
) &&
1953 (control
->object
!= object
));
1954 control
->object
= object
;
1957 __private_extern__ vm_object_t
1958 memory_object_control_to_vm_object(
1959 memory_object_control_t control
)
1961 if (control
== MEMORY_OBJECT_CONTROL_NULL
)
1962 return VM_OBJECT_NULL
;
1964 return (control
->object
);
1967 memory_object_control_t
1968 convert_port_to_mo_control(
1971 return MEMORY_OBJECT_CONTROL_NULL
;
1976 convert_mo_control_to_port(
1977 memory_object_control_t control
)
1979 return MACH_PORT_NULL
;
1983 memory_object_control_reference(
1984 memory_object_control_t control
)
1990 * We only every issue one of these references, so kill it
1991 * when that gets released (should switch the real reference
1992 * counting in true port-less EMMI).
1995 memory_object_control_deallocate(
1996 memory_object_control_t control
)
1998 zfree(mem_obj_control_zone
, (vm_offset_t
)control
);
2002 memory_object_control_disable(
2003 memory_object_control_t control
)
2005 assert(control
->object
!= VM_OBJECT_NULL
);
2006 control
->object
= VM_OBJECT_NULL
;
2010 memory_object_default_reference(
2011 memory_object_default_t dmm
)
2013 ipc_port_make_send(dmm
);
2017 memory_object_default_deallocate(
2018 memory_object_default_t dmm
)
2020 ipc_port_release_send(dmm
);
2024 convert_port_to_memory_object(
2027 return (MEMORY_OBJECT_NULL
);
2032 convert_memory_object_to_port(
2033 memory_object_t object
)
2035 return (MACH_PORT_NULL
);
2039 /* remove after component interface available */
2040 extern int vnode_pager_workaround
;
2041 extern int device_pager_workaround
;
2045 /* Routine memory_object_reference */
2046 void memory_object_reference(
2047 memory_object_t memory_object
)
2049 extern void dp_memory_object_reference(memory_object_t
);
2052 extern void vnode_pager_reference(memory_object_t
);
2053 extern void device_pager_reference(memory_object_t
);
2055 if(memory_object
->pager
== &vnode_pager_workaround
) {
2056 vnode_pager_reference(memory_object
);
2057 } else if(memory_object
->pager
== &device_pager_workaround
) {
2058 device_pager_reference(memory_object
);
2061 dp_memory_object_reference(memory_object
);
2064 /* Routine memory_object_deallocate */
2065 void memory_object_deallocate(
2066 memory_object_t memory_object
)
2068 extern void dp_memory_object_deallocate(memory_object_t
);
2071 extern void vnode_pager_deallocate(memory_object_t
);
2072 extern void device_pager_deallocate(memory_object_t
);
2074 if(memory_object
->pager
== &vnode_pager_workaround
) {
2075 vnode_pager_deallocate(memory_object
);
2076 } else if(memory_object
->pager
== &device_pager_workaround
) {
2077 device_pager_deallocate(memory_object
);
2080 dp_memory_object_deallocate(memory_object
);
2084 /* Routine memory_object_init */
2085 kern_return_t memory_object_init
2087 memory_object_t memory_object
,
2088 memory_object_control_t memory_control
,
2089 vm_size_t memory_object_page_size
2092 extern kern_return_t
dp_memory_object_init(memory_object_t
,
2093 memory_object_control_t
,
2096 extern kern_return_t
vnode_pager_init(memory_object_t
,
2097 memory_object_control_t
,
2099 extern kern_return_t
device_pager_init(memory_object_t
,
2100 memory_object_control_t
,
2103 if(memory_object
->pager
== &vnode_pager_workaround
) {
2104 return vnode_pager_init(memory_object
,
2106 memory_object_page_size
);
2107 } else if(memory_object
->pager
== &device_pager_workaround
) {
2108 return device_pager_init(memory_object
,
2110 memory_object_page_size
);
2113 return dp_memory_object_init(memory_object
,
2115 memory_object_page_size
);
2118 /* Routine memory_object_terminate */
2119 kern_return_t memory_object_terminate
2121 memory_object_t memory_object
2124 extern kern_return_t
dp_memory_object_terminate(memory_object_t
);
2127 extern kern_return_t
vnode_pager_terminate(memory_object_t
);
2128 extern kern_return_t
device_pager_terminate(memory_object_t
);
2130 if(memory_object
->pager
== &vnode_pager_workaround
) {
2131 return vnode_pager_terminate(memory_object
);
2132 } else if(memory_object
->pager
== &device_pager_workaround
) {
2133 return device_pager_terminate(memory_object
);
2136 return dp_memory_object_terminate(memory_object
);
2139 /* Routine memory_object_data_request */
2140 kern_return_t memory_object_data_request
2142 memory_object_t memory_object
,
2143 memory_object_offset_t offset
,
2145 vm_prot_t desired_access
2148 extern kern_return_t
dp_memory_object_data_request(memory_object_t
,
2149 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2152 extern kern_return_t
vnode_pager_data_request(memory_object_t
,
2153 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2154 extern kern_return_t
device_pager_data_request(memory_object_t
,
2155 memory_object_offset_t
, vm_size_t
, vm_prot_t
);
2157 if (memory_object
->pager
== &vnode_pager_workaround
) {
2158 return vnode_pager_data_request(memory_object
,
2162 } else if (memory_object
->pager
== &device_pager_workaround
) {
2163 return device_pager_data_request(memory_object
,
2169 return dp_memory_object_data_request(memory_object
,
2175 /* Routine memory_object_data_return */
2176 kern_return_t memory_object_data_return
2178 memory_object_t memory_object
,
2179 memory_object_offset_t offset
,
2182 boolean_t kernel_copy
2185 extern kern_return_t
dp_memory_object_data_return(memory_object_t
,
2186 memory_object_offset_t
,
2191 extern kern_return_t
vnode_pager_data_return(memory_object_t
,
2192 memory_object_offset_t
,
2196 extern kern_return_t
device_pager_data_return(memory_object_t
,
2197 memory_object_offset_t
,
2202 if (memory_object
->pager
== &vnode_pager_workaround
) {
2203 return vnode_pager_data_return(memory_object
,
2208 } else if (memory_object
->pager
== &device_pager_workaround
) {
2209 return device_pager_data_return(memory_object
,
2216 return dp_memory_object_data_return(memory_object
,
2223 /* Routine memory_object_data_initialize */
2224 kern_return_t memory_object_data_initialize
2226 memory_object_t memory_object
,
2227 memory_object_offset_t offset
,
2232 extern kern_return_t
dp_memory_object_data_initialize(memory_object_t
,
2233 memory_object_offset_t
,
2236 extern kern_return_t
vnode_pager_data_initialize(memory_object_t
,
2237 memory_object_offset_t
,
2239 extern kern_return_t
device_pager_data_initialize(memory_object_t
,
2240 memory_object_offset_t
,
2243 if (memory_object
->pager
== &vnode_pager_workaround
) {
2244 return vnode_pager_data_initialize(memory_object
,
2247 } else if (memory_object
->pager
== &device_pager_workaround
) {
2248 return device_pager_data_initialize(memory_object
,
2253 return dp_memory_object_data_initialize(memory_object
,
2258 /* Routine memory_object_data_unlock */
2259 kern_return_t memory_object_data_unlock
2261 memory_object_t memory_object
,
2262 memory_object_offset_t offset
,
2264 vm_prot_t desired_access
2267 extern kern_return_t
dp_memory_object_data_unlock(memory_object_t
,
2268 memory_object_offset_t
,
2272 extern kern_return_t
vnode_pager_data_unlock(memory_object_t
,
2273 memory_object_offset_t
,
2276 extern kern_return_t
device_pager_data_unlock(memory_object_t
,
2277 memory_object_offset_t
,
2281 if (memory_object
->pager
== &vnode_pager_workaround
) {
2282 return vnode_pager_data_unlock(memory_object
,
2286 } else if (memory_object
->pager
== &device_pager_workaround
) {
2287 return device_pager_data_unlock(memory_object
,
2293 return dp_memory_object_data_unlock(memory_object
,
2300 /* Routine memory_object_synchronize */
2301 kern_return_t memory_object_synchronize
2303 memory_object_t memory_object
,
2304 memory_object_offset_t offset
,
2306 vm_sync_t sync_flags
2309 extern kern_return_t
dp_memory_object_data_synchronize(memory_object_t
,
2310 memory_object_offset_t
,
2314 extern kern_return_t
vnode_pager_data_synchronize(memory_object_t
,
2315 memory_object_offset_t
,
2318 extern kern_return_t
device_pager_data_synchronize(memory_object_t
,
2319 memory_object_offset_t
,
2323 if (memory_object
->pager
== &vnode_pager_workaround
) {
2324 return vnode_pager_synchronize(
2329 } else if (memory_object
->pager
== &device_pager_workaround
) {
2330 return device_pager_synchronize(
2337 return dp_memory_object_synchronize(
2344 /* Routine memory_object_unmap */
2345 kern_return_t memory_object_unmap
2347 memory_object_t memory_object
2350 extern kern_return_t
dp_memory_object_unmap(memory_object_t
);
2352 extern kern_return_t
vnode_pager_unmap(memory_object_t
);
2353 extern kern_return_t
device_pager_unmap(memory_object_t
);
2355 if (memory_object
->pager
== &vnode_pager_workaround
) {
2356 return vnode_pager_unmap(memory_object
);
2357 } else if (memory_object
->pager
== &device_pager_workaround
) {
2358 return device_pager_unmap(memory_object
);
2361 return dp_memory_object_unmap(memory_object
);
2364 /* Routine memory_object_create */
2365 kern_return_t memory_object_create
2367 memory_object_default_t default_memory_manager
,
2368 vm_size_t new_memory_object_size
,
2369 memory_object_t
*new_memory_object
2372 extern kern_return_t
default_pager_memory_object_create(memory_object_default_t
,
2376 return default_pager_memory_object_create(default_memory_manager
,
2377 new_memory_object_size
,