2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/memory_object.c
60 * Author: Michael Wayne Young
62 * External memory management interface control functions.
65 #include <advisory_pageout.h>
68 * Interface dependencies:
71 #include <mach/std_types.h> /* For pointer_t */
72 #include <mach/mach_types.h>
75 #include <mach/kern_return.h>
76 #include <mach/memory_object.h>
77 #include <mach/memory_object_default.h>
78 #include <mach/memory_object_control_server.h>
79 #include <mach/host_priv_server.h>
80 #include <mach/boolean.h>
81 #include <mach/vm_prot.h>
82 #include <mach/message.h>
85 * Implementation dependencies:
87 #include <string.h> /* For memcpy() */
90 #include <kern/host.h>
91 #include <kern/thread.h> /* For current_thread() */
92 #include <kern/ipc_mig.h>
93 #include <kern/misc_protos.h>
95 #include <vm/vm_object.h>
96 #include <vm/vm_fault.h>
97 #include <vm/memory_object.h>
98 #include <vm/vm_page.h>
99 #include <vm/vm_pageout.h>
100 #include <vm/pmap.h> /* For pmap_clear_modify */
101 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
102 #include <vm/vm_map.h> /* For vm_map_pageable */
103 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
104 #include <vm/vm_shared_region.h>
106 #include <vm/vm_external.h>
108 #include <vm/vm_protos.h>
111 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
112 decl_lck_mtx_data(, memory_manager_default_lock
)
116 * Routine: memory_object_should_return_page
119 * Determine whether the given page should be returned,
120 * based on the page's state and on the given return policy.
122 * We should return the page if one of the following is true:
124 * 1. Page is dirty and should_return is not RETURN_NONE.
125 * 2. Page is precious and should_return is RETURN_ALL.
126 * 3. Should_return is RETURN_ANYTHING.
128 * As a side effect, m->dirty will be made consistent
129 * with pmap_is_modified(m), if should_return is not
130 * MEMORY_OBJECT_RETURN_NONE.
133 #define memory_object_should_return_page(m, should_return) \
134 (should_return != MEMORY_OBJECT_RETURN_NONE && \
135 (((m)->dirty || ((m)->dirty = pmap_is_modified((m)->phys_page))) || \
136 ((m)->precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
137 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
139 typedef int memory_object_lock_result_t
;
141 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
142 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
143 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2
144 #define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3
146 memory_object_lock_result_t
memory_object_lock_page(
148 memory_object_return_t should_return
,
149 boolean_t should_flush
,
153 * Routine: memory_object_lock_page
156 * Perform the appropriate lock operations on the
157 * given page. See the description of
158 * "memory_object_lock_request" for the meanings
161 * Returns an indication that the operation
162 * completed, blocked, or that the page must
165 memory_object_lock_result_t
166 memory_object_lock_page(
168 memory_object_return_t should_return
,
169 boolean_t should_flush
,
172 XPR(XPR_MEMORY_OBJECT
,
173 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
174 m
, should_return
, should_flush
, prot
, 0);
177 if (m
->busy
|| m
->cleaning
)
178 return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
181 vm_pageout_steal_laundry(m
, FALSE
);
184 * Don't worry about pages for which the kernel
185 * does not have any data.
187 if (m
->absent
|| m
->error
|| m
->restart
) {
188 if (m
->error
&& should_flush
&& !VM_PAGE_WIRED(m
)) {
190 * dump the page, pager wants us to
191 * clean it up and there is no
192 * relevant data to return
194 return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
);
196 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
198 assert(!m
->fictitious
);
200 if (VM_PAGE_WIRED(m
)) {
202 * The page is wired... just clean or return the page if needed.
203 * Wired pages don't get flushed or disconnected from the pmap.
205 if (memory_object_should_return_page(m
, should_return
))
206 return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
208 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
213 * must do the pmap_disconnect before determining the
214 * need to return the page... otherwise it's possible
215 * for the page to go from the clean to the dirty state
216 * after we've made our decision
218 if (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
) {
219 SET_PAGE_DIRTY(m
, FALSE
);
223 * If we are decreasing permission, do it now;
224 * let the fault handler take care of increases
225 * (pmap_page_protect may not increase protection).
227 if (prot
!= VM_PROT_NO_CHANGE
)
228 pmap_page_protect(m
->phys_page
, VM_PROT_ALL
& ~prot
);
231 * Handle returning dirty or precious pages
233 if (memory_object_should_return_page(m
, should_return
)) {
235 * we use to do a pmap_disconnect here in support
236 * of memory_object_lock_request, but that routine
237 * no longer requires this... in any event, in
238 * our world, it would turn into a big noop since
239 * we don't lock the page in any way and as soon
240 * as we drop the object lock, the page can be
241 * faulted back into an address space
244 * pmap_disconnect(m->phys_page);
246 return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
250 * Handle flushing clean pages
253 return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
);
256 * we use to deactivate clean pages at this point,
257 * but we do not believe that an msync should change
258 * the 'age' of a page in the cache... here is the
259 * original comment and code concerning this...
261 * XXX Make clean but not flush a paging hint,
262 * and deactivate the pages. This is a hack
263 * because it overloads flush/clean with
264 * implementation-dependent meaning. This only
265 * happens to pages that are already clean.
267 * if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE))
268 * return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE);
271 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
277 * Routine: memory_object_lock_request [user interface]
280 * Control use of the data associated with the given
281 * memory object. For each page in the given range,
282 * perform the following operations, in order:
283 * 1) restrict access to the page (disallow
284 * forms specified by "prot");
285 * 2) return data to the manager (if "should_return"
286 * is RETURN_DIRTY and the page is dirty, or
287 * "should_return" is RETURN_ALL and the page
288 * is either dirty or precious); and,
289 * 3) flush the cached copy (if "should_flush"
291 * The set of pages is defined by a starting offset
292 * ("offset") and size ("size"). Only pages with the
293 * same page alignment as the starting offset are
296 * A single acknowledgement is sent (to the "reply_to"
297 * port) when these actions are complete. If successful,
298 * the naked send right for reply_to is consumed.
302 memory_object_lock_request(
303 memory_object_control_t control
,
304 memory_object_offset_t offset
,
305 memory_object_size_t size
,
306 memory_object_offset_t
* resid_offset
,
308 memory_object_return_t should_return
,
315 * Check for bogus arguments.
317 object
= memory_object_control_to_vm_object(control
);
318 if (object
== VM_OBJECT_NULL
)
319 return (KERN_INVALID_ARGUMENT
);
321 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
322 return (KERN_INVALID_ARGUMENT
);
324 size
= round_page_64(size
);
327 * Lock the object, and acquire a paging reference to
328 * prevent the memory_object reference from being released.
330 vm_object_lock(object
);
331 vm_object_paging_begin(object
);
333 if (flags
& MEMORY_OBJECT_DATA_FLUSH_ALL
) {
334 if ((should_return
!= MEMORY_OBJECT_RETURN_NONE
) || offset
|| object
->copy
) {
335 flags
&= ~MEMORY_OBJECT_DATA_FLUSH_ALL
;
336 flags
|= MEMORY_OBJECT_DATA_FLUSH
;
339 offset
-= object
->paging_offset
;
341 if (flags
& MEMORY_OBJECT_DATA_FLUSH_ALL
)
342 vm_object_reap_pages(object
, REAP_DATA_FLUSH
);
344 (void)vm_object_update(object
, offset
, size
, resid_offset
,
345 io_errno
, should_return
, flags
, prot
);
347 vm_object_paging_end(object
);
348 vm_object_unlock(object
);
350 return (KERN_SUCCESS
);
354 * memory_object_release_name: [interface]
356 * Enforces name semantic on memory_object reference count decrement
357 * This routine should not be called unless the caller holds a name
358 * reference gained through the memory_object_named_create or the
359 * memory_object_rename call.
360 * If the TERMINATE_IDLE flag is set, the call will return if the
361 * reference count is not 1. i.e. idle with the only remaining reference
363 * If the decision is made to proceed the name field flag is set to
364 * false and the reference count is decremented. If the RESPECT_CACHE
365 * flag is set and the reference count has gone to zero, the
366 * memory_object is checked to see if it is cacheable otherwise when
367 * the reference count is zero, it is simply terminated.
371 memory_object_release_name(
372 memory_object_control_t control
,
377 object
= memory_object_control_to_vm_object(control
);
378 if (object
== VM_OBJECT_NULL
)
379 return (KERN_INVALID_ARGUMENT
);
381 return vm_object_release_name(object
, flags
);
387 * Routine: memory_object_destroy [user interface]
389 * Shut down a memory object, despite the
390 * presence of address map (or other) references
394 memory_object_destroy(
395 memory_object_control_t control
,
396 kern_return_t reason
)
400 object
= memory_object_control_to_vm_object(control
);
401 if (object
== VM_OBJECT_NULL
)
402 return (KERN_INVALID_ARGUMENT
);
404 return (vm_object_destroy(object
, reason
));
408 * Routine: vm_object_sync
410 * Kernel internal function to synch out pages in a given
411 * range within an object to its memory manager. Much the
412 * same as memory_object_lock_request but page protection
415 * If the should_flush and should_return flags are true pages
416 * are flushed, that is dirty & precious pages are written to
417 * the memory manager and then discarded. If should_return
418 * is false, only precious pages are returned to the memory
421 * If should flush is false and should_return true, the memory
422 * manager's copy of the pages is updated. If should_return
423 * is also false, only the precious pages are updated. This
424 * last option is of limited utility.
427 * FALSE if no pages were returned to the pager
434 vm_object_offset_t offset
,
435 vm_object_size_t size
,
436 boolean_t should_flush
,
437 boolean_t should_return
,
438 boolean_t should_iosync
)
444 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
445 object
, offset
, size
, should_flush
, should_return
);
448 * Lock the object, and acquire a paging reference to
449 * prevent the memory_object and control ports from
452 vm_object_lock(object
);
453 vm_object_paging_begin(object
);
456 flags
= MEMORY_OBJECT_DATA_FLUSH
;
458 * This flush is from an msync(), not a truncate(), so the
459 * contents of the file are not affected.
460 * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
461 * that the data is not changed and that there's no need to
462 * push the old contents to a copy object.
464 flags
|= MEMORY_OBJECT_DATA_NO_CHANGE
;
469 flags
|= MEMORY_OBJECT_IO_SYNC
;
471 rv
= vm_object_update(object
, offset
, (vm_object_size_t
)size
, NULL
, NULL
,
473 MEMORY_OBJECT_RETURN_ALL
:
474 MEMORY_OBJECT_RETURN_NONE
,
479 vm_object_paging_end(object
);
480 vm_object_unlock(object
);
486 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \
490 memory_object_t pager; \
492 if (object->object_slid) { \
493 panic("Objects with slid pages not allowed\n"); \
496 if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
497 vm_object_paging_begin(object); \
498 vm_object_unlock(object); \
501 upl_flags = UPL_MSYNC | UPL_IOSYNC; \
503 upl_flags = UPL_MSYNC; \
505 (void) memory_object_data_return(pager, \
507 (memory_object_cluster_size_t)data_cnt, \
514 vm_object_lock(object); \
515 vm_object_paging_end(object); \
522 vm_object_update_extent(
524 vm_object_offset_t offset
,
525 vm_object_offset_t offset_end
,
526 vm_object_offset_t
*offset_resid
,
528 boolean_t should_flush
,
529 memory_object_return_t should_return
,
530 boolean_t should_iosync
,
535 vm_object_offset_t paging_offset
= 0;
536 vm_object_offset_t next_offset
= offset
;
537 memory_object_lock_result_t page_lock_result
;
538 memory_object_cluster_size_t data_cnt
= 0;
539 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
540 struct vm_page_delayed_work
*dwp
;
546 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
549 offset
< offset_end
&& object
->resident_page_count
;
550 offset
+= PAGE_SIZE_64
) {
553 * Limit the number of pages to be cleaned at once to a contiguous
554 * run, or at most MAX_UPL_TRANSFER size
557 if ((data_cnt
>= PAGE_SIZE
* MAX_UPL_TRANSFER
) || (next_offset
!= offset
)) {
560 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
564 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
565 paging_offset
, offset_resid
, io_errno
, should_iosync
);
569 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
573 page_lock_result
= memory_object_lock_page(m
, should_return
, should_flush
, prot
);
575 if (data_cnt
&& page_lock_result
!= MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
) {
577 * End of a run of dirty/precious pages.
580 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
584 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
585 paging_offset
, offset_resid
, io_errno
, should_iosync
);
587 * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
588 * allow the state of page 'm' to change... we need to re-lookup
595 switch (page_lock_result
) {
597 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
600 case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
:
601 dwp
->dw_mask
|= DW_vm_page_free
;
604 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
605 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
608 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
610 paging_offset
= offset
;
612 data_cnt
+= PAGE_SIZE
;
613 next_offset
= offset
+ PAGE_SIZE_64
;
616 * wired pages shouldn't be flushed and
617 * since they aren't on any queue,
618 * no need to remove them
620 if (!VM_PAGE_WIRED(m
)) {
624 * add additional state for the flush
629 * we use to remove the page from the queues at this
630 * point, but we do not believe that an msync
631 * should cause the 'age' of a page to be changed
634 * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
641 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
643 if (dw_count
>= dw_limit
) {
644 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
653 * We have completed the scan for applicable pages.
654 * Clean any pages that have been saved.
657 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
660 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
661 paging_offset
, offset_resid
, io_errno
, should_iosync
);
669 * Routine: vm_object_update
671 * Work function for m_o_lock_request(), vm_o_sync().
673 * Called with object locked and paging ref taken.
678 vm_object_offset_t offset
,
679 vm_object_size_t size
,
680 vm_object_offset_t
*resid_offset
,
682 memory_object_return_t should_return
,
684 vm_prot_t protection
)
686 vm_object_t copy_object
= VM_OBJECT_NULL
;
687 boolean_t data_returned
= FALSE
;
688 boolean_t update_cow
;
689 boolean_t should_flush
= (flags
& MEMORY_OBJECT_DATA_FLUSH
) ? TRUE
: FALSE
;
690 boolean_t should_iosync
= (flags
& MEMORY_OBJECT_IO_SYNC
) ? TRUE
: FALSE
;
691 vm_fault_return_t result
;
694 #define MAX_EXTENTS 8
695 #define EXTENT_SIZE (1024 * 1024 * 256)
696 #define RESIDENT_LIMIT (1024 * 32)
698 vm_object_offset_t e_base
;
699 vm_object_offset_t e_min
;
700 vm_object_offset_t e_max
;
701 } extents
[MAX_EXTENTS
];
704 * To avoid blocking while scanning for pages, save
705 * dirty pages to be cleaned all at once.
707 * XXXO A similar strategy could be used to limit the
708 * number of times that a scan must be restarted for
709 * other reasons. Those pages that would require blocking
710 * could be temporarily collected in another list, or
711 * their offsets could be recorded in a small array.
715 * XXX NOTE: May want to consider converting this to a page list
716 * XXX vm_map_copy interface. Need to understand object
717 * XXX coalescing implications before doing so.
720 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
721 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
722 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
723 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
725 if (update_cow
|| (flags
& (MEMORY_OBJECT_DATA_PURGE
| MEMORY_OBJECT_DATA_SYNC
))) {
728 while ((copy_object
= object
->copy
) != VM_OBJECT_NULL
) {
730 * need to do a try here since we're swimming upstream
731 * against the normal lock ordering... however, we need
732 * to hold the object stable until we gain control of the
733 * copy object so we have to be careful how we approach this
735 if (vm_object_lock_try(copy_object
)) {
737 * we 'won' the lock on the copy object...
738 * no need to hold the object lock any longer...
739 * take a real reference on the copy object because
740 * we're going to call vm_fault_page on it which may
741 * under certain conditions drop the lock and the paging
742 * reference we're about to take... the reference
743 * will keep the copy object from going away if that happens
745 vm_object_unlock(object
);
746 vm_object_reference_locked(copy_object
);
749 vm_object_unlock(object
);
752 mutex_pause(collisions
);
754 vm_object_lock(object
);
757 if ((copy_object
!= VM_OBJECT_NULL
&& update_cow
) || (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
759 vm_map_size_t copy_size
;
760 vm_map_offset_t copy_offset
;
764 kern_return_t error
= 0;
765 struct vm_object_fault_info fault_info
;
767 if (copy_object
!= VM_OBJECT_NULL
) {
769 * translate offset with respect to shadow's offset
771 copy_offset
= (offset
>= copy_object
->vo_shadow_offset
) ?
772 (vm_map_offset_t
)(offset
- copy_object
->vo_shadow_offset
) :
775 if (copy_offset
> copy_object
->vo_size
)
776 copy_offset
= copy_object
->vo_size
;
779 * clip size with respect to shadow offset
781 if (offset
>= copy_object
->vo_shadow_offset
) {
783 } else if (size
>= copy_object
->vo_shadow_offset
- offset
) {
784 copy_size
= size
- (copy_object
->vo_shadow_offset
- offset
);
789 if (copy_offset
+ copy_size
> copy_object
->vo_size
) {
790 if (copy_object
->vo_size
>= copy_offset
) {
791 copy_size
= copy_object
->vo_size
- copy_offset
;
796 copy_size
+=copy_offset
;
799 copy_object
= object
;
801 copy_size
= offset
+ size
;
802 copy_offset
= offset
;
804 fault_info
.interruptible
= THREAD_UNINT
;
805 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
806 fault_info
.user_tag
= 0;
807 fault_info
.lo_offset
= copy_offset
;
808 fault_info
.hi_offset
= copy_size
;
809 fault_info
.no_cache
= FALSE
;
810 fault_info
.stealth
= TRUE
;
811 fault_info
.io_sync
= FALSE
;
812 fault_info
.cs_bypass
= FALSE
;
813 fault_info
.mark_zf_absent
= FALSE
;
814 fault_info
.batch_pmap_op
= FALSE
;
816 vm_object_paging_begin(copy_object
);
818 for (i
= copy_offset
; i
< copy_size
; i
+= PAGE_SIZE
) {
819 RETRY_COW_OF_LOCK_REQUEST
:
820 fault_info
.cluster_size
= (vm_size_t
) (copy_size
- i
);
821 assert(fault_info
.cluster_size
== copy_size
- i
);
823 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
825 result
= vm_fault_page(copy_object
, i
,
826 VM_PROT_WRITE
|VM_PROT_READ
,
828 FALSE
, /* page not looked up */
838 case VM_FAULT_SUCCESS
:
841 page
->object
, top_page
);
842 vm_object_lock(copy_object
);
843 vm_object_paging_begin(copy_object
);
848 vm_page_lockspin_queues();
852 vm_page_deactivate(page
);
853 vm_page_unlock_queues();
855 PAGE_WAKEUP_DONE(page
);
858 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
859 vm_object_lock(copy_object
);
860 vm_object_paging_begin(copy_object
);
861 goto RETRY_COW_OF_LOCK_REQUEST
;
862 case VM_FAULT_INTERRUPTED
:
863 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
864 vm_object_lock(copy_object
);
865 vm_object_paging_begin(copy_object
);
866 goto RETRY_COW_OF_LOCK_REQUEST
;
867 case VM_FAULT_MEMORY_SHORTAGE
:
869 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
870 vm_object_lock(copy_object
);
871 vm_object_paging_begin(copy_object
);
872 goto RETRY_COW_OF_LOCK_REQUEST
;
873 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
874 /* success but no VM page: fail */
875 vm_object_paging_end(copy_object
);
876 vm_object_unlock(copy_object
);
878 case VM_FAULT_MEMORY_ERROR
:
879 if (object
!= copy_object
)
880 vm_object_deallocate(copy_object
);
881 vm_object_lock(object
);
882 goto BYPASS_COW_COPYIN
;
884 panic("vm_object_update: unexpected error 0x%x"
885 " from vm_fault_page()\n", result
);
889 vm_object_paging_end(copy_object
);
891 if ((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
892 if (copy_object
!= VM_OBJECT_NULL
&& copy_object
!= object
) {
893 vm_object_unlock(copy_object
);
894 vm_object_deallocate(copy_object
);
895 vm_object_lock(object
);
899 if (copy_object
!= VM_OBJECT_NULL
&& copy_object
!= object
) {
900 if ((flags
& MEMORY_OBJECT_DATA_PURGE
)) {
901 copy_object
->shadow_severed
= TRUE
;
902 copy_object
->shadowed
= FALSE
;
903 copy_object
->shadow
= NULL
;
905 * delete the ref the COW was holding on the target object
907 vm_object_deallocate(object
);
909 vm_object_unlock(copy_object
);
910 vm_object_deallocate(copy_object
);
911 vm_object_lock(object
);
916 * when we have a really large range to check relative
917 * to the number of actual resident pages, we'd like
918 * to use the resident page list to drive our checks
919 * however, the object lock will get dropped while processing
920 * the page which means the resident queue can change which
921 * means we can't walk the queue as we process the pages
922 * we also want to do the processing in offset order to allow
923 * 'runs' of pages to be collected if we're being told to
924 * flush to disk... the resident page queue is NOT ordered.
926 * a temporary solution (until we figure out how to deal with
927 * large address spaces more generically) is to pre-flight
928 * the resident page queue (if it's small enough) and develop
929 * a collection of extents (that encompass actual resident pages)
930 * to visit. This will at least allow us to deal with some of the
931 * more pathological cases in a more efficient manner. The current
932 * worst case (a single resident page at the end of an extremely large
933 * range) can take minutes to complete for ranges in the terrabyte
934 * category... since this routine is called when truncating a file,
935 * and we currently support files up to 16 Tbytes in size, this
936 * is not a theoretical problem
939 if ((object
->resident_page_count
< RESIDENT_LIMIT
) &&
940 (atop_64(size
) > (unsigned)(object
->resident_page_count
/(8 * MAX_EXTENTS
)))) {
942 vm_object_offset_t start
;
943 vm_object_offset_t end
;
944 vm_object_size_t e_mask
;
950 e_mask
= ~((vm_object_size_t
)(EXTENT_SIZE
- 1));
952 m
= (vm_page_t
) queue_first(&object
->memq
);
954 while (!queue_end(&object
->memq
, (queue_entry_t
) m
)) {
955 next
= (vm_page_t
) queue_next(&m
->listq
);
957 if ((m
->offset
>= start
) && (m
->offset
< end
)) {
959 * this is a page we're interested in
960 * try to fit it into a current extent
962 for (n
= 0; n
< num_of_extents
; n
++) {
963 if ((m
->offset
& e_mask
) == extents
[n
].e_base
) {
965 * use (PAGE_SIZE - 1) to determine the
966 * max offset so that we don't wrap if
967 * we're at the last page of the space
969 if (m
->offset
< extents
[n
].e_min
)
970 extents
[n
].e_min
= m
->offset
;
971 else if ((m
->offset
+ (PAGE_SIZE
- 1)) > extents
[n
].e_max
)
972 extents
[n
].e_max
= m
->offset
+ (PAGE_SIZE
- 1);
976 if (n
== num_of_extents
) {
978 * didn't find a current extent that can encompass
981 if (n
< MAX_EXTENTS
) {
983 * if we still have room,
984 * create a new extent
986 extents
[n
].e_base
= m
->offset
& e_mask
;
987 extents
[n
].e_min
= m
->offset
;
988 extents
[n
].e_max
= m
->offset
+ (PAGE_SIZE
- 1);
993 * no room to create a new extent...
994 * fall back to a single extent based
995 * on the min and max page offsets
996 * we find in the range we're interested in...
997 * first, look through the extent list and
998 * develop the overall min and max for the
999 * pages we've looked at up to this point
1001 for (n
= 1; n
< num_of_extents
; n
++) {
1002 if (extents
[n
].e_min
< extents
[0].e_min
)
1003 extents
[0].e_min
= extents
[n
].e_min
;
1004 if (extents
[n
].e_max
> extents
[0].e_max
)
1005 extents
[0].e_max
= extents
[n
].e_max
;
1008 * now setup to run through the remaining pages
1009 * to determine the overall min and max
1010 * offset for the specified range
1012 extents
[0].e_base
= 0;
1017 * by continuing, we'll reprocess the
1018 * page that forced us to abandon trying
1019 * to develop multiple extents
1028 extents
[0].e_min
= offset
;
1029 extents
[0].e_max
= offset
+ (size
- 1);
1033 for (n
= 0; n
< num_of_extents
; n
++) {
1034 if (vm_object_update_extent(object
, extents
[n
].e_min
, extents
[n
].e_max
, resid_offset
, io_errno
,
1035 should_flush
, should_return
, should_iosync
, protection
))
1036 data_returned
= TRUE
;
1038 return (data_returned
);
1043 * Routine: memory_object_synchronize_completed [user interface]
1045 * Tell kernel that previously synchronized data
1046 * (memory_object_synchronize) has been queue or placed on the
1049 * Note: there may be multiple synchronize requests for a given
1050 * memory object outstanding but they will not overlap.
1054 memory_object_synchronize_completed(
1055 memory_object_control_t control
,
1056 memory_object_offset_t offset
,
1057 memory_object_size_t length
)
1062 object
= memory_object_control_to_vm_object(control
);
1064 XPR(XPR_MEMORY_OBJECT
,
1065 "m_o_sync_completed, object 0x%X, offset 0x%X length 0x%X\n",
1066 object
, offset
, length
, 0, 0);
1069 * Look for bogus arguments
1072 if (object
== VM_OBJECT_NULL
)
1073 return (KERN_INVALID_ARGUMENT
);
1075 vm_object_lock(object
);
1078 * search for sync request structure
1080 queue_iterate(&object
->msr_q
, msr
, msync_req_t
, msr_q
) {
1081 if (msr
->offset
== offset
&& msr
->length
== length
) {
1082 queue_remove(&object
->msr_q
, msr
, msync_req_t
, msr_q
);
1085 }/* queue_iterate */
1087 if (queue_end(&object
->msr_q
, (queue_entry_t
)msr
)) {
1088 vm_object_unlock(object
);
1089 return KERN_INVALID_ARGUMENT
;
1093 vm_object_unlock(object
);
1094 msr
->flag
= VM_MSYNC_DONE
;
1096 thread_wakeup((event_t
) msr
);
1098 return KERN_SUCCESS
;
1099 }/* memory_object_synchronize_completed */
1101 static kern_return_t
1102 vm_object_set_attributes_common(
1104 boolean_t may_cache
,
1105 memory_object_copy_strategy_t copy_strategy
,
1106 boolean_t temporary
,
1107 __unused boolean_t silent_overwrite
,
1108 boolean_t advisory_pageout
)
1110 boolean_t object_became_ready
;
1112 XPR(XPR_MEMORY_OBJECT
,
1113 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
1114 object
, (may_cache
&1)|((temporary
&1)<1), copy_strategy
, 0, 0);
1116 if (object
== VM_OBJECT_NULL
)
1117 return(KERN_INVALID_ARGUMENT
);
1120 * Verify the attributes of importance
1123 switch(copy_strategy
) {
1124 case MEMORY_OBJECT_COPY_NONE
:
1125 case MEMORY_OBJECT_COPY_DELAY
:
1128 return(KERN_INVALID_ARGUMENT
);
1131 #if !ADVISORY_PAGEOUT
1132 if (silent_overwrite
|| advisory_pageout
)
1133 return(KERN_INVALID_ARGUMENT
);
1135 #endif /* !ADVISORY_PAGEOUT */
1141 vm_object_lock(object
);
1144 * Copy the attributes
1146 assert(!object
->internal
);
1147 object_became_ready
= !object
->pager_ready
;
1148 object
->copy_strategy
= copy_strategy
;
1149 object
->can_persist
= may_cache
;
1150 object
->temporary
= temporary
;
1151 // object->silent_overwrite = silent_overwrite;
1152 object
->advisory_pageout
= advisory_pageout
;
1155 * Wake up anyone waiting for the ready attribute
1156 * to become asserted.
1159 if (object_became_ready
) {
1160 object
->pager_ready
= TRUE
;
1161 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1164 vm_object_unlock(object
);
1166 return(KERN_SUCCESS
);
1170 * Set the memory object attribute as provided.
1172 * XXX This routine cannot be completed until the vm_msync, clean
1173 * in place, and cluster work is completed. See ifdef notyet
1174 * below and note that vm_object_set_attributes_common()
1175 * may have to be expanded.
1178 memory_object_change_attributes(
1179 memory_object_control_t control
,
1180 memory_object_flavor_t flavor
,
1181 memory_object_info_t attributes
,
1182 mach_msg_type_number_t count
)
1185 kern_return_t result
= KERN_SUCCESS
;
1186 boolean_t temporary
;
1187 boolean_t may_cache
;
1188 boolean_t invalidate
;
1189 memory_object_copy_strategy_t copy_strategy
;
1190 boolean_t silent_overwrite
;
1191 boolean_t advisory_pageout
;
1193 object
= memory_object_control_to_vm_object(control
);
1194 if (object
== VM_OBJECT_NULL
)
1195 return (KERN_INVALID_ARGUMENT
);
1197 vm_object_lock(object
);
1199 temporary
= object
->temporary
;
1200 may_cache
= object
->can_persist
;
1201 copy_strategy
= object
->copy_strategy
;
1202 // silent_overwrite = object->silent_overwrite;
1203 silent_overwrite
= FALSE
;
1204 advisory_pageout
= object
->advisory_pageout
;
1206 invalidate
= object
->invalidate
;
1208 vm_object_unlock(object
);
1211 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1213 old_memory_object_behave_info_t behave
;
1215 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1216 result
= KERN_INVALID_ARGUMENT
;
1220 behave
= (old_memory_object_behave_info_t
) attributes
;
1222 temporary
= behave
->temporary
;
1223 invalidate
= behave
->invalidate
;
1224 copy_strategy
= behave
->copy_strategy
;
1229 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1231 memory_object_behave_info_t behave
;
1233 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1234 result
= KERN_INVALID_ARGUMENT
;
1238 behave
= (memory_object_behave_info_t
) attributes
;
1240 temporary
= behave
->temporary
;
1241 invalidate
= behave
->invalidate
;
1242 copy_strategy
= behave
->copy_strategy
;
1243 silent_overwrite
= behave
->silent_overwrite
;
1244 advisory_pageout
= behave
->advisory_pageout
;
1248 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1250 memory_object_perf_info_t perf
;
1252 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1253 result
= KERN_INVALID_ARGUMENT
;
1257 perf
= (memory_object_perf_info_t
) attributes
;
1259 may_cache
= perf
->may_cache
;
1264 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1266 old_memory_object_attr_info_t attr
;
1268 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1269 result
= KERN_INVALID_ARGUMENT
;
1273 attr
= (old_memory_object_attr_info_t
) attributes
;
1275 may_cache
= attr
->may_cache
;
1276 copy_strategy
= attr
->copy_strategy
;
1281 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1283 memory_object_attr_info_t attr
;
1285 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1286 result
= KERN_INVALID_ARGUMENT
;
1290 attr
= (memory_object_attr_info_t
) attributes
;
1292 copy_strategy
= attr
->copy_strategy
;
1293 may_cache
= attr
->may_cache_object
;
1294 temporary
= attr
->temporary
;
1300 result
= KERN_INVALID_ARGUMENT
;
1304 if (result
!= KERN_SUCCESS
)
1307 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1308 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1315 * XXX may_cache may become a tri-valued variable to handle
1316 * XXX uncache if not in use.
1318 return (vm_object_set_attributes_common(object
,
1327 memory_object_get_attributes(
1328 memory_object_control_t control
,
1329 memory_object_flavor_t flavor
,
1330 memory_object_info_t attributes
, /* pointer to OUT array */
1331 mach_msg_type_number_t
*count
) /* IN/OUT */
1333 kern_return_t ret
= KERN_SUCCESS
;
1336 object
= memory_object_control_to_vm_object(control
);
1337 if (object
== VM_OBJECT_NULL
)
1338 return (KERN_INVALID_ARGUMENT
);
1340 vm_object_lock(object
);
1343 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1345 old_memory_object_behave_info_t behave
;
1347 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1348 ret
= KERN_INVALID_ARGUMENT
;
1352 behave
= (old_memory_object_behave_info_t
) attributes
;
1353 behave
->copy_strategy
= object
->copy_strategy
;
1354 behave
->temporary
= object
->temporary
;
1355 #if notyet /* remove when vm_msync complies and clean in place fini */
1356 behave
->invalidate
= object
->invalidate
;
1358 behave
->invalidate
= FALSE
;
1361 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1365 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1367 memory_object_behave_info_t behave
;
1369 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1370 ret
= KERN_INVALID_ARGUMENT
;
1374 behave
= (memory_object_behave_info_t
) attributes
;
1375 behave
->copy_strategy
= object
->copy_strategy
;
1376 behave
->temporary
= object
->temporary
;
1377 #if notyet /* remove when vm_msync complies and clean in place fini */
1378 behave
->invalidate
= object
->invalidate
;
1380 behave
->invalidate
= FALSE
;
1382 behave
->advisory_pageout
= object
->advisory_pageout
;
1383 // behave->silent_overwrite = object->silent_overwrite;
1384 behave
->silent_overwrite
= FALSE
;
1385 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1389 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1391 memory_object_perf_info_t perf
;
1393 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1394 ret
= KERN_INVALID_ARGUMENT
;
1398 perf
= (memory_object_perf_info_t
) attributes
;
1399 perf
->cluster_size
= PAGE_SIZE
;
1400 perf
->may_cache
= object
->can_persist
;
1402 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1406 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1408 old_memory_object_attr_info_t attr
;
1410 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1411 ret
= KERN_INVALID_ARGUMENT
;
1415 attr
= (old_memory_object_attr_info_t
) attributes
;
1416 attr
->may_cache
= object
->can_persist
;
1417 attr
->copy_strategy
= object
->copy_strategy
;
1419 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1423 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1425 memory_object_attr_info_t attr
;
1427 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1428 ret
= KERN_INVALID_ARGUMENT
;
1432 attr
= (memory_object_attr_info_t
) attributes
;
1433 attr
->copy_strategy
= object
->copy_strategy
;
1434 attr
->cluster_size
= PAGE_SIZE
;
1435 attr
->may_cache_object
= object
->can_persist
;
1436 attr
->temporary
= object
->temporary
;
1438 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1443 ret
= KERN_INVALID_ARGUMENT
;
1447 vm_object_unlock(object
);
1454 memory_object_iopl_request(
1456 memory_object_offset_t offset
,
1457 upl_size_t
*upl_size
,
1459 upl_page_info_array_t user_page_list
,
1460 unsigned int *page_list_count
,
1467 caller_flags
= *flags
;
1469 if (caller_flags
& ~UPL_VALID_FLAGS
) {
1471 * For forward compatibility's sake,
1472 * reject any unknown flag.
1474 return KERN_INVALID_VALUE
;
1477 if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
1478 vm_named_entry_t named_entry
;
1480 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1481 /* a few checks to make sure user is obeying rules */
1482 if(*upl_size
== 0) {
1483 if(offset
>= named_entry
->size
)
1484 return(KERN_INVALID_RIGHT
);
1485 *upl_size
= (upl_size_t
)(named_entry
->size
- offset
);
1486 if (*upl_size
!= named_entry
->size
- offset
)
1487 return KERN_INVALID_ARGUMENT
;
1489 if(caller_flags
& UPL_COPYOUT_FROM
) {
1490 if((named_entry
->protection
& VM_PROT_READ
)
1492 return(KERN_INVALID_RIGHT
);
1495 if((named_entry
->protection
&
1496 (VM_PROT_READ
| VM_PROT_WRITE
))
1497 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
1498 return(KERN_INVALID_RIGHT
);
1501 if(named_entry
->size
< (offset
+ *upl_size
))
1502 return(KERN_INVALID_ARGUMENT
);
1504 /* the callers parameter offset is defined to be the */
1505 /* offset from beginning of named entry offset in object */
1506 offset
= offset
+ named_entry
->offset
;
1508 if (named_entry
->is_sub_map
||
1509 named_entry
->is_copy
)
1510 return KERN_INVALID_ARGUMENT
;
1512 named_entry_lock(named_entry
);
1514 if (named_entry
->is_pager
) {
1515 object
= vm_object_enter(named_entry
->backing
.pager
,
1516 named_entry
->offset
+ named_entry
->size
,
1517 named_entry
->internal
,
1520 if (object
== VM_OBJECT_NULL
) {
1521 named_entry_unlock(named_entry
);
1522 return(KERN_INVALID_OBJECT
);
1525 /* JMM - drop reference on pager here? */
1527 /* create an extra reference for the named entry */
1528 vm_object_lock(object
);
1529 vm_object_reference_locked(object
);
1530 named_entry
->backing
.object
= object
;
1531 named_entry
->is_pager
= FALSE
;
1532 named_entry_unlock(named_entry
);
1534 /* wait for object to be ready */
1535 while (!object
->pager_ready
) {
1536 vm_object_wait(object
,
1537 VM_OBJECT_EVENT_PAGER_READY
,
1539 vm_object_lock(object
);
1541 vm_object_unlock(object
);
1543 /* This is the case where we are going to map */
1544 /* an already mapped object. If the object is */
1545 /* not ready it is internal. An external */
1546 /* object cannot be mapped until it is ready */
1547 /* we can therefore avoid the ready check */
1549 object
= named_entry
->backing
.object
;
1550 vm_object_reference(object
);
1551 named_entry_unlock(named_entry
);
1553 } else if (ip_kotype(port
) == IKOT_MEM_OBJ_CONTROL
) {
1554 memory_object_control_t control
;
1555 control
= (memory_object_control_t
) port
;
1556 if (control
== NULL
)
1557 return (KERN_INVALID_ARGUMENT
);
1558 object
= memory_object_control_to_vm_object(control
);
1559 if (object
== VM_OBJECT_NULL
)
1560 return (KERN_INVALID_ARGUMENT
);
1561 vm_object_reference(object
);
1563 return KERN_INVALID_ARGUMENT
;
1565 if (object
== VM_OBJECT_NULL
)
1566 return (KERN_INVALID_ARGUMENT
);
1568 if (!object
->private) {
1569 if (*upl_size
> (MAX_UPL_TRANSFER
*PAGE_SIZE
))
1570 *upl_size
= (MAX_UPL_TRANSFER
*PAGE_SIZE
);
1571 if (object
->phys_contiguous
) {
1572 *flags
= UPL_PHYS_CONTIG
;
1577 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
1580 ret
= vm_object_iopl_request(object
,
1587 vm_object_deallocate(object
);
1592 * Routine: memory_object_upl_request [interface]
1594 * Cause the population of a portion of a vm_object.
1595 * Depending on the nature of the request, the pages
1596 * returned may be contain valid data or be uninitialized.
1601 memory_object_upl_request(
1602 memory_object_control_t control
,
1603 memory_object_offset_t offset
,
1606 upl_page_info_array_t user_page_list
,
1607 unsigned int *page_list_count
,
1612 object
= memory_object_control_to_vm_object(control
);
1613 if (object
== VM_OBJECT_NULL
)
1614 return (KERN_TERMINATED
);
1616 return vm_object_upl_request(object
,
1626 * Routine: memory_object_super_upl_request [interface]
1628 * Cause the population of a portion of a vm_object
1629 * in much the same way as memory_object_upl_request.
1630 * Depending on the nature of the request, the pages
1631 * returned may be contain valid data or be uninitialized.
1632 * However, the region may be expanded up to the super
1633 * cluster size provided.
1637 memory_object_super_upl_request(
1638 memory_object_control_t control
,
1639 memory_object_offset_t offset
,
1641 upl_size_t super_cluster
,
1643 upl_page_info_t
*user_page_list
,
1644 unsigned int *page_list_count
,
1649 object
= memory_object_control_to_vm_object(control
);
1650 if (object
== VM_OBJECT_NULL
)
1651 return (KERN_INVALID_ARGUMENT
);
1653 return vm_object_super_upl_request(object
,
1664 memory_object_cluster_size(memory_object_control_t control
, memory_object_offset_t
*start
,
1665 vm_size_t
*length
, uint32_t *io_streaming
, memory_object_fault_info_t fault_info
)
1669 object
= memory_object_control_to_vm_object(control
);
1671 if (object
== VM_OBJECT_NULL
|| object
->paging_offset
> *start
)
1672 return (KERN_INVALID_ARGUMENT
);
1674 *start
-= object
->paging_offset
;
1676 vm_object_cluster_size(object
, (vm_object_offset_t
*)start
, length
, (vm_object_fault_info_t
)fault_info
, io_streaming
);
1678 *start
+= object
->paging_offset
;
1680 return (KERN_SUCCESS
);
1684 int vm_stat_discard_cleared_reply
= 0;
1685 int vm_stat_discard_cleared_unset
= 0;
1686 int vm_stat_discard_cleared_too_late
= 0;
1691 * Routine: host_default_memory_manager [interface]
1693 * set/get the default memory manager port and default cluster
1696 * If successful, consumes the supplied naked send right.
1699 host_default_memory_manager(
1700 host_priv_t host_priv
,
1701 memory_object_default_t
*default_manager
,
1702 __unused memory_object_cluster_size_t cluster_size
)
1704 memory_object_default_t current_manager
;
1705 memory_object_default_t new_manager
;
1706 memory_object_default_t returned_manager
;
1707 kern_return_t result
= KERN_SUCCESS
;
1709 if (host_priv
== HOST_PRIV_NULL
)
1710 return(KERN_INVALID_HOST
);
1712 assert(host_priv
== &realhost
);
1714 new_manager
= *default_manager
;
1715 lck_mtx_lock(&memory_manager_default_lock
);
1716 current_manager
= memory_manager_default
;
1717 returned_manager
= MEMORY_OBJECT_DEFAULT_NULL
;
1719 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1721 * Retrieve the current value.
1723 returned_manager
= current_manager
;
1724 memory_object_default_reference(returned_manager
);
1728 * If this is the first non-null manager, start
1729 * up the internal pager support.
1731 if (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1732 result
= vm_pageout_internal_start();
1733 if (result
!= KERN_SUCCESS
)
1738 * Retrieve the current value,
1739 * and replace it with the supplied value.
1740 * We return the old reference to the caller
1741 * but we have to take a reference on the new
1744 returned_manager
= current_manager
;
1745 memory_manager_default
= new_manager
;
1746 memory_object_default_reference(new_manager
);
1749 * In case anyone's been waiting for a memory
1750 * manager to be established, wake them up.
1753 thread_wakeup((event_t
) &memory_manager_default
);
1756 * Now that we have a default pager for anonymous memory,
1757 * reactivate all the throttled pages (i.e. dirty pages with
1760 if (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
)
1762 vm_page_reactivate_all_throttled();
1766 lck_mtx_unlock(&memory_manager_default_lock
);
1768 *default_manager
= returned_manager
;
1773 * Routine: memory_manager_default_reference
1775 * Returns a naked send right for the default
1776 * memory manager. The returned right is always
1777 * valid (not IP_NULL or IP_DEAD).
1780 __private_extern__ memory_object_default_t
1781 memory_manager_default_reference(void)
1783 memory_object_default_t current_manager
;
1785 lck_mtx_lock(&memory_manager_default_lock
);
1786 current_manager
= memory_manager_default
;
1787 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1790 res
= lck_mtx_sleep(&memory_manager_default_lock
,
1792 (event_t
) &memory_manager_default
,
1794 assert(res
== THREAD_AWAKENED
);
1795 current_manager
= memory_manager_default
;
1797 memory_object_default_reference(current_manager
);
1798 lck_mtx_unlock(&memory_manager_default_lock
);
1800 return current_manager
;
1804 * Routine: memory_manager_default_check
1807 * Check whether a default memory manager has been set
1808 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1809 * and KERN_FAILURE if dmm does not exist.
1811 * If there is no default memory manager, log an error,
1812 * but only the first time.
1815 __private_extern__ kern_return_t
1816 memory_manager_default_check(void)
1818 memory_object_default_t current
;
1820 lck_mtx_lock(&memory_manager_default_lock
);
1821 current
= memory_manager_default
;
1822 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1823 static boolean_t logged
; /* initialized to 0 */
1824 boolean_t complain
= !logged
;
1826 lck_mtx_unlock(&memory_manager_default_lock
);
1828 printf("Warning: No default memory manager\n");
1829 return(KERN_FAILURE
);
1831 lck_mtx_unlock(&memory_manager_default_lock
);
1832 return(KERN_SUCCESS
);
1836 __private_extern__
void
1837 memory_manager_default_init(void)
1839 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1840 lck_mtx_init(&memory_manager_default_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
1845 /* Allow manipulation of individual page state. This is actually part of */
1846 /* the UPL regimen but takes place on the object rather than on a UPL */
1849 memory_object_page_op(
1850 memory_object_control_t control
,
1851 memory_object_offset_t offset
,
1853 ppnum_t
*phys_entry
,
1858 object
= memory_object_control_to_vm_object(control
);
1859 if (object
== VM_OBJECT_NULL
)
1860 return (KERN_INVALID_ARGUMENT
);
1862 return vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
1866 * memory_object_range_op offers performance enhancement over
1867 * memory_object_page_op for page_op functions which do not require page
1868 * level state to be returned from the call. Page_op was created to provide
1869 * a low-cost alternative to page manipulation via UPLs when only a single
1870 * page was involved. The range_op call establishes the ability in the _op
1871 * family of functions to work on multiple pages where the lack of page level
1872 * state handling allows the caller to avoid the overhead of the upl structures.
1876 memory_object_range_op(
1877 memory_object_control_t control
,
1878 memory_object_offset_t offset_beg
,
1879 memory_object_offset_t offset_end
,
1885 object
= memory_object_control_to_vm_object(control
);
1886 if (object
== VM_OBJECT_NULL
)
1887 return (KERN_INVALID_ARGUMENT
);
1889 return vm_object_range_op(object
,
1893 (uint32_t *) range
);
1898 memory_object_mark_used(
1899 memory_object_control_t control
)
1903 if (control
== NULL
)
1906 object
= memory_object_control_to_vm_object(control
);
1908 if (object
!= VM_OBJECT_NULL
)
1909 vm_object_cache_remove(object
);
1914 memory_object_mark_unused(
1915 memory_object_control_t control
,
1916 __unused boolean_t rage
)
1920 if (control
== NULL
)
1923 object
= memory_object_control_to_vm_object(control
);
1925 if (object
!= VM_OBJECT_NULL
)
1926 vm_object_cache_add(object
);
1931 memory_object_pages_resident(
1932 memory_object_control_t control
,
1933 boolean_t
* has_pages_resident
)
1937 *has_pages_resident
= FALSE
;
1939 object
= memory_object_control_to_vm_object(control
);
1940 if (object
== VM_OBJECT_NULL
)
1941 return (KERN_INVALID_ARGUMENT
);
1943 if (object
->resident_page_count
)
1944 *has_pages_resident
= TRUE
;
1946 return (KERN_SUCCESS
);
1950 memory_object_signed(
1951 memory_object_control_t control
,
1952 boolean_t is_signed
)
1956 object
= memory_object_control_to_vm_object(control
);
1957 if (object
== VM_OBJECT_NULL
)
1958 return KERN_INVALID_ARGUMENT
;
1960 vm_object_lock(object
);
1961 object
->code_signed
= is_signed
;
1962 vm_object_unlock(object
);
1964 return KERN_SUCCESS
;
1968 memory_object_is_signed(
1969 memory_object_control_t control
)
1971 boolean_t is_signed
;
1974 object
= memory_object_control_to_vm_object(control
);
1975 if (object
== VM_OBJECT_NULL
)
1978 vm_object_lock_shared(object
);
1979 is_signed
= object
->code_signed
;
1980 vm_object_unlock(object
);
1986 memory_object_is_slid(
1987 memory_object_control_t control
)
1989 vm_object_t object
= VM_OBJECT_NULL
;
1991 object
= memory_object_control_to_vm_object(control
);
1992 if (object
== VM_OBJECT_NULL
)
1995 return object
->object_slid
;
1998 static zone_t mem_obj_control_zone
;
2000 __private_extern__
void
2001 memory_object_control_bootstrap(void)
2005 i
= (vm_size_t
) sizeof (struct memory_object_control
);
2006 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
2007 zone_change(mem_obj_control_zone
, Z_CALLERACCT
, FALSE
);
2008 zone_change(mem_obj_control_zone
, Z_NOENCRYPT
, TRUE
);
2012 __private_extern__ memory_object_control_t
2013 memory_object_control_allocate(
2016 memory_object_control_t control
;
2018 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
2019 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
2020 control
->moc_object
= object
;
2021 control
->moc_ikot
= IKOT_MEM_OBJ_CONTROL
; /* fake ip_kotype */
2026 __private_extern__
void
2027 memory_object_control_collapse(
2028 memory_object_control_t control
,
2031 assert((control
->moc_object
!= VM_OBJECT_NULL
) &&
2032 (control
->moc_object
!= object
));
2033 control
->moc_object
= object
;
2036 __private_extern__ vm_object_t
2037 memory_object_control_to_vm_object(
2038 memory_object_control_t control
)
2040 if (control
== MEMORY_OBJECT_CONTROL_NULL
||
2041 control
->moc_ikot
!= IKOT_MEM_OBJ_CONTROL
)
2042 return VM_OBJECT_NULL
;
2044 return (control
->moc_object
);
2047 memory_object_control_t
2048 convert_port_to_mo_control(
2049 __unused mach_port_t port
)
2051 return MEMORY_OBJECT_CONTROL_NULL
;
2056 convert_mo_control_to_port(
2057 __unused memory_object_control_t control
)
2059 return MACH_PORT_NULL
;
2063 memory_object_control_reference(
2064 __unused memory_object_control_t control
)
2070 * We only every issue one of these references, so kill it
2071 * when that gets released (should switch the real reference
2072 * counting in true port-less EMMI).
2075 memory_object_control_deallocate(
2076 memory_object_control_t control
)
2078 zfree(mem_obj_control_zone
, control
);
2082 memory_object_control_disable(
2083 memory_object_control_t control
)
2085 assert(control
->moc_object
!= VM_OBJECT_NULL
);
2086 control
->moc_object
= VM_OBJECT_NULL
;
2090 memory_object_default_reference(
2091 memory_object_default_t dmm
)
2093 ipc_port_make_send(dmm
);
2097 memory_object_default_deallocate(
2098 memory_object_default_t dmm
)
2100 ipc_port_release_send(dmm
);
2104 convert_port_to_memory_object(
2105 __unused mach_port_t port
)
2107 return (MEMORY_OBJECT_NULL
);
2112 convert_memory_object_to_port(
2113 __unused memory_object_t object
)
2115 return (MACH_PORT_NULL
);
2119 /* Routine memory_object_reference */
2120 void memory_object_reference(
2121 memory_object_t memory_object
)
2123 (memory_object
->mo_pager_ops
->memory_object_reference
)(
2127 /* Routine memory_object_deallocate */
2128 void memory_object_deallocate(
2129 memory_object_t memory_object
)
2131 (memory_object
->mo_pager_ops
->memory_object_deallocate
)(
2136 /* Routine memory_object_init */
2137 kern_return_t memory_object_init
2139 memory_object_t memory_object
,
2140 memory_object_control_t memory_control
,
2141 memory_object_cluster_size_t memory_object_page_size
2144 return (memory_object
->mo_pager_ops
->memory_object_init
)(
2147 memory_object_page_size
);
2150 /* Routine memory_object_terminate */
2151 kern_return_t memory_object_terminate
2153 memory_object_t memory_object
2156 return (memory_object
->mo_pager_ops
->memory_object_terminate
)(
2160 /* Routine memory_object_data_request */
2161 kern_return_t memory_object_data_request
2163 memory_object_t memory_object
,
2164 memory_object_offset_t offset
,
2165 memory_object_cluster_size_t length
,
2166 vm_prot_t desired_access
,
2167 memory_object_fault_info_t fault_info
2170 return (memory_object
->mo_pager_ops
->memory_object_data_request
)(
2178 /* Routine memory_object_data_return */
2179 kern_return_t memory_object_data_return
2181 memory_object_t memory_object
,
2182 memory_object_offset_t offset
,
2183 memory_object_cluster_size_t size
,
2184 memory_object_offset_t
*resid_offset
,
2187 boolean_t kernel_copy
,
2191 return (memory_object
->mo_pager_ops
->memory_object_data_return
)(
2202 /* Routine memory_object_data_initialize */
2203 kern_return_t memory_object_data_initialize
2205 memory_object_t memory_object
,
2206 memory_object_offset_t offset
,
2207 memory_object_cluster_size_t size
2210 return (memory_object
->mo_pager_ops
->memory_object_data_initialize
)(
2216 /* Routine memory_object_data_unlock */
2217 kern_return_t memory_object_data_unlock
2219 memory_object_t memory_object
,
2220 memory_object_offset_t offset
,
2221 memory_object_size_t size
,
2222 vm_prot_t desired_access
2225 return (memory_object
->mo_pager_ops
->memory_object_data_unlock
)(
2232 /* Routine memory_object_synchronize */
2233 kern_return_t memory_object_synchronize
2235 memory_object_t memory_object
,
2236 memory_object_offset_t offset
,
2237 memory_object_size_t size
,
2238 vm_sync_t sync_flags
2241 return (memory_object
->mo_pager_ops
->memory_object_synchronize
)(
2250 * memory_object_map() is called by VM (in vm_map_enter() and its variants)
2251 * each time a "named" VM object gets mapped directly or indirectly
2252 * (copy-on-write mapping). A "named" VM object has an extra reference held
2253 * by the pager to keep it alive until the pager decides that the
2254 * memory object (and its VM object) can be reclaimed.
2255 * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
2256 * the mappings of that memory object have been removed.
2258 * For a given VM object, calls to memory_object_map() and memory_object_unmap()
2259 * are serialized (through object->mapping_in_progress), to ensure that the
2260 * pager gets a consistent view of the mapping status of the memory object.
2262 * This allows the pager to keep track of how many times a memory object
2263 * has been mapped and with which protections, to decide when it can be
2267 /* Routine memory_object_map */
2268 kern_return_t memory_object_map
2270 memory_object_t memory_object
,
2274 return (memory_object
->mo_pager_ops
->memory_object_map
)(
2279 /* Routine memory_object_last_unmap */
2280 kern_return_t memory_object_last_unmap
2282 memory_object_t memory_object
2285 return (memory_object
->mo_pager_ops
->memory_object_last_unmap
)(
2289 /* Routine memory_object_data_reclaim */
2290 kern_return_t memory_object_data_reclaim
2292 memory_object_t memory_object
,
2293 boolean_t reclaim_backing_store
2296 if (memory_object
->mo_pager_ops
->memory_object_data_reclaim
== NULL
)
2297 return KERN_NOT_SUPPORTED
;
2298 return (memory_object
->mo_pager_ops
->memory_object_data_reclaim
)(
2300 reclaim_backing_store
);
2303 /* Routine memory_object_create */
2304 kern_return_t memory_object_create
2306 memory_object_default_t default_memory_manager
,
2307 vm_size_t new_memory_object_size
,
2308 memory_object_t
*new_memory_object
2311 return default_pager_memory_object_create(default_memory_manager
,
2312 new_memory_object_size
,
2317 convert_port_to_upl(
2323 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_UPL
)) {
2327 upl
= (upl_t
) port
->ip_kobject
;
2336 convert_upl_to_port(
2339 return MACH_PORT_NULL
;
2342 __private_extern__
void
2344 __unused ipc_port_t port
,
2345 __unused mach_port_mscount_t mscount
)