2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/memory_object.c
60 * Author: Michael Wayne Young
62 * External memory management interface control functions.
66 * Interface dependencies:
69 #include <mach/std_types.h> /* For pointer_t */
70 #include <mach/mach_types.h>
73 #include <mach/kern_return.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/host_priv_server.h>
78 #include <mach/boolean.h>
79 #include <mach/vm_prot.h>
80 #include <mach/message.h>
83 * Implementation dependencies:
85 #include <string.h> /* For memcpy() */
88 #include <kern/host.h>
89 #include <kern/thread.h> /* For current_thread() */
90 #include <kern/ipc_mig.h>
91 #include <kern/misc_protos.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_fault.h>
95 #include <vm/memory_object.h>
96 #include <vm/vm_page.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/pmap.h> /* For pmap_clear_modify */
99 #include <vm/vm_kern.h> /* For kernel_map, vm_move */
100 #include <vm/vm_map.h> /* For vm_map_pageable */
101 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
102 #include <vm/vm_shared_region.h>
104 #include <vm/vm_external.h>
106 #include <vm/vm_protos.h>
108 memory_object_default_t memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
109 decl_lck_mtx_data(, memory_manager_default_lock
)
113 * Routine: memory_object_should_return_page
116 * Determine whether the given page should be returned,
117 * based on the page's state and on the given return policy.
119 * We should return the page if one of the following is true:
121 * 1. Page is dirty and should_return is not RETURN_NONE.
122 * 2. Page is precious and should_return is RETURN_ALL.
123 * 3. Should_return is RETURN_ANYTHING.
125 * As a side effect, m->vmp_dirty will be made consistent
126 * with pmap_is_modified(m), if should_return is not
127 * MEMORY_OBJECT_RETURN_NONE.
130 #define memory_object_should_return_page(m, should_return) \
131 (should_return != MEMORY_OBJECT_RETURN_NONE && \
132 (((m)->vmp_dirty || ((m)->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(m)))) || \
133 ((m)->vmp_precious && (should_return) == MEMORY_OBJECT_RETURN_ALL) || \
134 (should_return) == MEMORY_OBJECT_RETURN_ANYTHING))
136 typedef int memory_object_lock_result_t
;
138 #define MEMORY_OBJECT_LOCK_RESULT_DONE 0
139 #define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK 1
140 #define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 2
141 #define MEMORY_OBJECT_LOCK_RESULT_MUST_FREE 3
143 memory_object_lock_result_t
memory_object_lock_page(
145 memory_object_return_t should_return
,
146 boolean_t should_flush
,
150 * Routine: memory_object_lock_page
153 * Perform the appropriate lock operations on the
154 * given page. See the description of
155 * "memory_object_lock_request" for the meanings
158 * Returns an indication that the operation
159 * completed, blocked, or that the page must
162 memory_object_lock_result_t
163 memory_object_lock_page(
165 memory_object_return_t should_return
,
166 boolean_t should_flush
,
169 XPR(XPR_MEMORY_OBJECT
,
170 "m_o_lock_page, page 0x%X rtn %d flush %d prot %d\n",
171 m
, should_return
, should_flush
, prot
, 0);
174 if (m
->vmp_busy
|| m
->vmp_cleaning
)
175 return (MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
);
178 vm_pageout_steal_laundry(m
, FALSE
);
181 * Don't worry about pages for which the kernel
182 * does not have any data.
184 if (m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_restart
) {
185 if (m
->vmp_error
&& should_flush
&& !VM_PAGE_WIRED(m
)) {
187 * dump the page, pager wants us to
188 * clean it up and there is no
189 * relevant data to return
191 return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
);
193 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
195 assert(!m
->vmp_fictitious
);
197 if (VM_PAGE_WIRED(m
)) {
199 * The page is wired... just clean or return the page if needed.
200 * Wired pages don't get flushed or disconnected from the pmap.
202 if (memory_object_should_return_page(m
, should_return
))
203 return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
205 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
210 * must do the pmap_disconnect before determining the
211 * need to return the page... otherwise it's possible
212 * for the page to go from the clean to the dirty state
213 * after we've made our decision
215 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
) {
216 SET_PAGE_DIRTY(m
, FALSE
);
220 * If we are decreasing permission, do it now;
221 * let the fault handler take care of increases
222 * (pmap_page_protect may not increase protection).
224 if (prot
!= VM_PROT_NO_CHANGE
)
225 pmap_page_protect(VM_PAGE_GET_PHYS_PAGE(m
), VM_PROT_ALL
& ~prot
);
228 * Handle returning dirty or precious pages
230 if (memory_object_should_return_page(m
, should_return
)) {
232 * we use to do a pmap_disconnect here in support
233 * of memory_object_lock_request, but that routine
234 * no longer requires this... in any event, in
235 * our world, it would turn into a big noop since
236 * we don't lock the page in any way and as soon
237 * as we drop the object lock, the page can be
238 * faulted back into an address space
241 * pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
243 return (MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
);
247 * Handle flushing clean pages
250 return (MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
);
253 * we use to deactivate clean pages at this point,
254 * but we do not believe that an msync should change
255 * the 'age' of a page in the cache... here is the
256 * original comment and code concerning this...
258 * XXX Make clean but not flush a paging hint,
259 * and deactivate the pages. This is a hack
260 * because it overloads flush/clean with
261 * implementation-dependent meaning. This only
262 * happens to pages that are already clean.
264 * if (vm_page_deactivate_hint && (should_return != MEMORY_OBJECT_RETURN_NONE))
265 * return (MEMORY_OBJECT_LOCK_RESULT_MUST_DEACTIVATE);
268 return (MEMORY_OBJECT_LOCK_RESULT_DONE
);
274 * Routine: memory_object_lock_request [user interface]
277 * Control use of the data associated with the given
278 * memory object. For each page in the given range,
279 * perform the following operations, in order:
280 * 1) restrict access to the page (disallow
281 * forms specified by "prot");
282 * 2) return data to the manager (if "should_return"
283 * is RETURN_DIRTY and the page is dirty, or
284 * "should_return" is RETURN_ALL and the page
285 * is either dirty or precious); and,
286 * 3) flush the cached copy (if "should_flush"
288 * The set of pages is defined by a starting offset
289 * ("offset") and size ("size"). Only pages with the
290 * same page alignment as the starting offset are
293 * A single acknowledgement is sent (to the "reply_to"
294 * port) when these actions are complete. If successful,
295 * the naked send right for reply_to is consumed.
299 memory_object_lock_request(
300 memory_object_control_t control
,
301 memory_object_offset_t offset
,
302 memory_object_size_t size
,
303 memory_object_offset_t
* resid_offset
,
305 memory_object_return_t should_return
,
312 * Check for bogus arguments.
314 object
= memory_object_control_to_vm_object(control
);
315 if (object
== VM_OBJECT_NULL
)
316 return (KERN_INVALID_ARGUMENT
);
318 if ((prot
& ~VM_PROT_ALL
) != 0 && prot
!= VM_PROT_NO_CHANGE
)
319 return (KERN_INVALID_ARGUMENT
);
321 size
= round_page_64(size
);
324 * Lock the object, and acquire a paging reference to
325 * prevent the memory_object reference from being released.
327 vm_object_lock(object
);
328 vm_object_paging_begin(object
);
330 if (flags
& MEMORY_OBJECT_DATA_FLUSH_ALL
) {
331 if ((should_return
!= MEMORY_OBJECT_RETURN_NONE
) || offset
|| object
->copy
) {
332 flags
&= ~MEMORY_OBJECT_DATA_FLUSH_ALL
;
333 flags
|= MEMORY_OBJECT_DATA_FLUSH
;
336 offset
-= object
->paging_offset
;
338 if (flags
& MEMORY_OBJECT_DATA_FLUSH_ALL
)
339 vm_object_reap_pages(object
, REAP_DATA_FLUSH
);
341 (void)vm_object_update(object
, offset
, size
, resid_offset
,
342 io_errno
, should_return
, flags
, prot
);
344 vm_object_paging_end(object
);
345 vm_object_unlock(object
);
347 return (KERN_SUCCESS
);
351 * memory_object_release_name: [interface]
353 * Enforces name semantic on memory_object reference count decrement
354 * This routine should not be called unless the caller holds a name
355 * reference gained through the memory_object_named_create or the
356 * memory_object_rename call.
357 * If the TERMINATE_IDLE flag is set, the call will return if the
358 * reference count is not 1. i.e. idle with the only remaining reference
360 * If the decision is made to proceed the name field flag is set to
361 * false and the reference count is decremented. If the RESPECT_CACHE
362 * flag is set and the reference count has gone to zero, the
363 * memory_object is checked to see if it is cacheable otherwise when
364 * the reference count is zero, it is simply terminated.
368 memory_object_release_name(
369 memory_object_control_t control
,
374 object
= memory_object_control_to_vm_object(control
);
375 if (object
== VM_OBJECT_NULL
)
376 return (KERN_INVALID_ARGUMENT
);
378 return vm_object_release_name(object
, flags
);
384 * Routine: memory_object_destroy [user interface]
386 * Shut down a memory object, despite the
387 * presence of address map (or other) references
391 memory_object_destroy(
392 memory_object_control_t control
,
393 kern_return_t reason
)
397 object
= memory_object_control_to_vm_object(control
);
398 if (object
== VM_OBJECT_NULL
)
399 return (KERN_INVALID_ARGUMENT
);
401 return (vm_object_destroy(object
, reason
));
405 * Routine: vm_object_sync
407 * Kernel internal function to synch out pages in a given
408 * range within an object to its memory manager. Much the
409 * same as memory_object_lock_request but page protection
412 * If the should_flush and should_return flags are true pages
413 * are flushed, that is dirty & precious pages are written to
414 * the memory manager and then discarded. If should_return
415 * is false, only precious pages are returned to the memory
418 * If should flush is false and should_return true, the memory
419 * manager's copy of the pages is updated. If should_return
420 * is also false, only the precious pages are updated. This
421 * last option is of limited utility.
424 * FALSE if no pages were returned to the pager
431 vm_object_offset_t offset
,
432 vm_object_size_t size
,
433 boolean_t should_flush
,
434 boolean_t should_return
,
435 boolean_t should_iosync
)
441 "vm_o_sync, object 0x%X, offset 0x%X size 0x%x flush %d rtn %d\n",
442 object
, offset
, size
, should_flush
, should_return
);
445 * Lock the object, and acquire a paging reference to
446 * prevent the memory_object and control ports from
449 vm_object_lock(object
);
450 vm_object_paging_begin(object
);
453 flags
= MEMORY_OBJECT_DATA_FLUSH
;
455 * This flush is from an msync(), not a truncate(), so the
456 * contents of the file are not affected.
457 * MEMORY_OBECT_DATA_NO_CHANGE lets vm_object_update() know
458 * that the data is not changed and that there's no need to
459 * push the old contents to a copy object.
461 flags
|= MEMORY_OBJECT_DATA_NO_CHANGE
;
466 flags
|= MEMORY_OBJECT_IO_SYNC
;
468 rv
= vm_object_update(object
, offset
, (vm_object_size_t
)size
, NULL
, NULL
,
470 MEMORY_OBJECT_RETURN_ALL
:
471 MEMORY_OBJECT_RETURN_NONE
,
476 vm_object_paging_end(object
);
477 vm_object_unlock(object
);
483 #define LIST_REQ_PAGEOUT_PAGES(object, data_cnt, po, ro, ioerr, iosync) \
487 memory_object_t pager; \
489 if ((pager = (object)->pager) != MEMORY_OBJECT_NULL) { \
490 vm_object_paging_begin(object); \
491 vm_object_unlock(object); \
494 upl_flags = UPL_MSYNC | UPL_IOSYNC; \
496 upl_flags = UPL_MSYNC; \
498 (void) memory_object_data_return(pager, \
500 (memory_object_cluster_size_t)data_cnt, \
507 vm_object_lock(object); \
508 vm_object_paging_end(object); \
512 extern struct vnode
*
513 vnode_pager_lookup_vnode(memory_object_t
);
516 vm_object_update_extent(
518 vm_object_offset_t offset
,
519 vm_object_offset_t offset_end
,
520 vm_object_offset_t
*offset_resid
,
522 boolean_t should_flush
,
523 memory_object_return_t should_return
,
524 boolean_t should_iosync
,
529 vm_object_offset_t paging_offset
= 0;
530 vm_object_offset_t next_offset
= offset
;
531 memory_object_lock_result_t page_lock_result
;
532 memory_object_cluster_size_t data_cnt
= 0;
533 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
534 struct vm_page_delayed_work
*dwp
;
541 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
545 offset
< offset_end
&& object
->resident_page_count
;
546 offset
+= PAGE_SIZE_64
) {
549 * Limit the number of pages to be cleaned at once to a contiguous
550 * run, or at most MAX_UPL_TRANSFER_BYTES
553 if ((data_cnt
>= MAX_UPL_TRANSFER_BYTES
) || (next_offset
!= offset
)) {
556 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
560 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
561 paging_offset
, offset_resid
, io_errno
, should_iosync
);
565 while ((m
= vm_page_lookup(object
, offset
)) != VM_PAGE_NULL
) {
569 page_lock_result
= memory_object_lock_page(m
, should_return
, should_flush
, prot
);
571 if (data_cnt
&& page_lock_result
!= MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
) {
573 * End of a run of dirty/precious pages.
576 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
580 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
581 paging_offset
, offset_resid
, io_errno
, should_iosync
);
583 * LIST_REQ_PAGEOUT_PAGES will drop the object lock which will
584 * allow the state of page 'm' to change... we need to re-lookup
591 switch (page_lock_result
) {
593 case MEMORY_OBJECT_LOCK_RESULT_DONE
:
596 case MEMORY_OBJECT_LOCK_RESULT_MUST_FREE
:
597 if (m
->vmp_dirty
== TRUE
)
599 dwp
->dw_mask
|= DW_vm_page_free
;
602 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK
:
603 PAGE_SLEEP(object
, m
, THREAD_UNINT
);
606 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN
:
608 paging_offset
= offset
;
610 data_cnt
+= PAGE_SIZE
;
611 next_offset
= offset
+ PAGE_SIZE_64
;
614 * wired pages shouldn't be flushed and
615 * since they aren't on any queue,
616 * no need to remove them
618 if (!VM_PAGE_WIRED(m
)) {
622 * add additional state for the flush
624 m
->vmp_free_when_done
= TRUE
;
627 * we use to remove the page from the queues at this
628 * point, but we do not believe that an msync
629 * should cause the 'age' of a page to be changed
632 * dwp->dw_mask |= DW_VM_PAGE_QUEUES_REMOVE;
639 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
641 if (dw_count
>= dw_limit
) {
642 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
652 task_update_logical_writes(current_task(), (dirty_count
* PAGE_SIZE
), TASK_WRITE_INVALIDATED
, vnode_pager_lookup_vnode(object
->pager
));
654 * We have completed the scan for applicable pages.
655 * Clean any pages that have been saved.
658 vm_page_do_delayed_work(object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
661 LIST_REQ_PAGEOUT_PAGES(object
, data_cnt
,
662 paging_offset
, offset_resid
, io_errno
, should_iosync
);
670 * Routine: vm_object_update
672 * Work function for m_o_lock_request(), vm_o_sync().
674 * Called with object locked and paging ref taken.
679 vm_object_offset_t offset
,
680 vm_object_size_t size
,
681 vm_object_offset_t
*resid_offset
,
683 memory_object_return_t should_return
,
685 vm_prot_t protection
)
687 vm_object_t copy_object
= VM_OBJECT_NULL
;
688 boolean_t data_returned
= FALSE
;
689 boolean_t update_cow
;
690 boolean_t should_flush
= (flags
& MEMORY_OBJECT_DATA_FLUSH
) ? TRUE
: FALSE
;
691 boolean_t should_iosync
= (flags
& MEMORY_OBJECT_IO_SYNC
) ? TRUE
: FALSE
;
692 vm_fault_return_t result
;
695 #define MAX_EXTENTS 8
696 #define EXTENT_SIZE (1024 * 1024 * 256)
697 #define RESIDENT_LIMIT (1024 * 32)
699 vm_object_offset_t e_base
;
700 vm_object_offset_t e_min
;
701 vm_object_offset_t e_max
;
702 } extents
[MAX_EXTENTS
];
705 * To avoid blocking while scanning for pages, save
706 * dirty pages to be cleaned all at once.
708 * XXXO A similar strategy could be used to limit the
709 * number of times that a scan must be restarted for
710 * other reasons. Those pages that would require blocking
711 * could be temporarily collected in another list, or
712 * their offsets could be recorded in a small array.
716 * XXX NOTE: May want to consider converting this to a page list
717 * XXX vm_map_copy interface. Need to understand object
718 * XXX coalescing implications before doing so.
721 update_cow
= ((flags
& MEMORY_OBJECT_DATA_FLUSH
)
722 && (!(flags
& MEMORY_OBJECT_DATA_NO_CHANGE
) &&
723 !(flags
& MEMORY_OBJECT_DATA_PURGE
)))
724 || (flags
& MEMORY_OBJECT_COPY_SYNC
);
726 if (update_cow
|| (flags
& (MEMORY_OBJECT_DATA_PURGE
| MEMORY_OBJECT_DATA_SYNC
))) {
729 while ((copy_object
= object
->copy
) != VM_OBJECT_NULL
) {
731 * need to do a try here since we're swimming upstream
732 * against the normal lock ordering... however, we need
733 * to hold the object stable until we gain control of the
734 * copy object so we have to be careful how we approach this
736 if (vm_object_lock_try(copy_object
)) {
738 * we 'won' the lock on the copy object...
739 * no need to hold the object lock any longer...
740 * take a real reference on the copy object because
741 * we're going to call vm_fault_page on it which may
742 * under certain conditions drop the lock and the paging
743 * reference we're about to take... the reference
744 * will keep the copy object from going away if that happens
746 vm_object_unlock(object
);
747 vm_object_reference_locked(copy_object
);
750 vm_object_unlock(object
);
753 mutex_pause(collisions
);
755 vm_object_lock(object
);
758 if ((copy_object
!= VM_OBJECT_NULL
&& update_cow
) || (flags
& MEMORY_OBJECT_DATA_SYNC
)) {
760 vm_map_size_t copy_size
;
761 vm_map_offset_t copy_offset
;
765 kern_return_t error
= 0;
766 struct vm_object_fault_info fault_info
= {};
768 if (copy_object
!= VM_OBJECT_NULL
) {
770 * translate offset with respect to shadow's offset
772 copy_offset
= (offset
>= copy_object
->vo_shadow_offset
) ?
773 (vm_map_offset_t
)(offset
- copy_object
->vo_shadow_offset
) :
776 if (copy_offset
> copy_object
->vo_size
)
777 copy_offset
= copy_object
->vo_size
;
780 * clip size with respect to shadow offset
782 if (offset
>= copy_object
->vo_shadow_offset
) {
784 } else if (size
>= copy_object
->vo_shadow_offset
- offset
) {
785 copy_size
= size
- (copy_object
->vo_shadow_offset
- offset
);
790 if (copy_offset
+ copy_size
> copy_object
->vo_size
) {
791 if (copy_object
->vo_size
>= copy_offset
) {
792 copy_size
= copy_object
->vo_size
- copy_offset
;
797 copy_size
+=copy_offset
;
800 copy_object
= object
;
802 copy_size
= offset
+ size
;
803 copy_offset
= offset
;
805 fault_info
.interruptible
= THREAD_UNINT
;
806 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
807 fault_info
.lo_offset
= copy_offset
;
808 fault_info
.hi_offset
= copy_size
;
809 fault_info
.stealth
= TRUE
;
810 assert(fault_info
.cs_bypass
== FALSE
);
811 assert(fault_info
.pmap_cs_associated
== FALSE
);
813 vm_object_paging_begin(copy_object
);
815 for (i
= copy_offset
; i
< copy_size
; i
+= PAGE_SIZE
) {
816 RETRY_COW_OF_LOCK_REQUEST
:
817 fault_info
.cluster_size
= (vm_size_t
) (copy_size
- i
);
818 assert(fault_info
.cluster_size
== copy_size
- i
);
820 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
822 result
= vm_fault_page(copy_object
, i
,
823 VM_PROT_WRITE
|VM_PROT_READ
,
825 FALSE
, /* page not looked up */
835 case VM_FAULT_SUCCESS
:
838 VM_PAGE_OBJECT(page
), top_page
);
839 vm_object_lock(copy_object
);
840 vm_object_paging_begin(copy_object
);
842 if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page
))) {
844 vm_page_lockspin_queues();
846 if (( !VM_PAGE_NON_SPECULATIVE_PAGEABLE(page
))) {
847 vm_page_deactivate(page
);
849 vm_page_unlock_queues();
851 PAGE_WAKEUP_DONE(page
);
854 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
855 vm_object_lock(copy_object
);
856 vm_object_paging_begin(copy_object
);
857 goto RETRY_COW_OF_LOCK_REQUEST
;
858 case VM_FAULT_INTERRUPTED
:
859 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
860 vm_object_lock(copy_object
);
861 vm_object_paging_begin(copy_object
);
862 goto RETRY_COW_OF_LOCK_REQUEST
;
863 case VM_FAULT_MEMORY_SHORTAGE
:
865 prot
= VM_PROT_WRITE
|VM_PROT_READ
;
866 vm_object_lock(copy_object
);
867 vm_object_paging_begin(copy_object
);
868 goto RETRY_COW_OF_LOCK_REQUEST
;
869 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
870 /* success but no VM page: fail */
871 vm_object_paging_end(copy_object
);
872 vm_object_unlock(copy_object
);
874 case VM_FAULT_MEMORY_ERROR
:
875 if (object
!= copy_object
)
876 vm_object_deallocate(copy_object
);
877 vm_object_lock(object
);
878 goto BYPASS_COW_COPYIN
;
880 panic("vm_object_update: unexpected error 0x%x"
881 " from vm_fault_page()\n", result
);
885 vm_object_paging_end(copy_object
);
887 if ((flags
& (MEMORY_OBJECT_DATA_SYNC
| MEMORY_OBJECT_COPY_SYNC
))) {
888 if (copy_object
!= VM_OBJECT_NULL
&& copy_object
!= object
) {
889 vm_object_unlock(copy_object
);
890 vm_object_deallocate(copy_object
);
891 vm_object_lock(object
);
895 if (copy_object
!= VM_OBJECT_NULL
&& copy_object
!= object
) {
896 if ((flags
& MEMORY_OBJECT_DATA_PURGE
)) {
897 vm_object_lock_assert_exclusive(copy_object
);
898 copy_object
->shadow_severed
= TRUE
;
899 copy_object
->shadowed
= FALSE
;
900 copy_object
->shadow
= NULL
;
902 * delete the ref the COW was holding on the target object
904 vm_object_deallocate(object
);
906 vm_object_unlock(copy_object
);
907 vm_object_deallocate(copy_object
);
908 vm_object_lock(object
);
913 * when we have a really large range to check relative
914 * to the number of actual resident pages, we'd like
915 * to use the resident page list to drive our checks
916 * however, the object lock will get dropped while processing
917 * the page which means the resident queue can change which
918 * means we can't walk the queue as we process the pages
919 * we also want to do the processing in offset order to allow
920 * 'runs' of pages to be collected if we're being told to
921 * flush to disk... the resident page queue is NOT ordered.
923 * a temporary solution (until we figure out how to deal with
924 * large address spaces more generically) is to pre-flight
925 * the resident page queue (if it's small enough) and develop
926 * a collection of extents (that encompass actual resident pages)
927 * to visit. This will at least allow us to deal with some of the
928 * more pathological cases in a more efficient manner. The current
929 * worst case (a single resident page at the end of an extremely large
930 * range) can take minutes to complete for ranges in the terrabyte
931 * category... since this routine is called when truncating a file,
932 * and we currently support files up to 16 Tbytes in size, this
933 * is not a theoretical problem
936 if ((object
->resident_page_count
< RESIDENT_LIMIT
) &&
937 (atop_64(size
) > (unsigned)(object
->resident_page_count
/(8 * MAX_EXTENTS
)))) {
939 vm_object_offset_t start
;
940 vm_object_offset_t end
;
941 vm_object_size_t e_mask
;
947 e_mask
= ~((vm_object_size_t
)(EXTENT_SIZE
- 1));
949 m
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
951 while (!vm_page_queue_end(&object
->memq
, (vm_page_queue_entry_t
) m
)) {
952 next
= (vm_page_t
) vm_page_queue_next(&m
->vmp_listq
);
954 if ((m
->vmp_offset
>= start
) && (m
->vmp_offset
< end
)) {
956 * this is a page we're interested in
957 * try to fit it into a current extent
959 for (n
= 0; n
< num_of_extents
; n
++) {
960 if ((m
->vmp_offset
& e_mask
) == extents
[n
].e_base
) {
962 * use (PAGE_SIZE - 1) to determine the
963 * max offset so that we don't wrap if
964 * we're at the last page of the space
966 if (m
->vmp_offset
< extents
[n
].e_min
)
967 extents
[n
].e_min
= m
->vmp_offset
;
968 else if ((m
->vmp_offset
+ (PAGE_SIZE
- 1)) > extents
[n
].e_max
)
969 extents
[n
].e_max
= m
->vmp_offset
+ (PAGE_SIZE
- 1);
973 if (n
== num_of_extents
) {
975 * didn't find a current extent that can encompass
978 if (n
< MAX_EXTENTS
) {
980 * if we still have room,
981 * create a new extent
983 extents
[n
].e_base
= m
->vmp_offset
& e_mask
;
984 extents
[n
].e_min
= m
->vmp_offset
;
985 extents
[n
].e_max
= m
->vmp_offset
+ (PAGE_SIZE
- 1);
990 * no room to create a new extent...
991 * fall back to a single extent based
992 * on the min and max page offsets
993 * we find in the range we're interested in...
994 * first, look through the extent list and
995 * develop the overall min and max for the
996 * pages we've looked at up to this point
998 for (n
= 1; n
< num_of_extents
; n
++) {
999 if (extents
[n
].e_min
< extents
[0].e_min
)
1000 extents
[0].e_min
= extents
[n
].e_min
;
1001 if (extents
[n
].e_max
> extents
[0].e_max
)
1002 extents
[0].e_max
= extents
[n
].e_max
;
1005 * now setup to run through the remaining pages
1006 * to determine the overall min and max
1007 * offset for the specified range
1009 extents
[0].e_base
= 0;
1014 * by continuing, we'll reprocess the
1015 * page that forced us to abandon trying
1016 * to develop multiple extents
1025 extents
[0].e_min
= offset
;
1026 extents
[0].e_max
= offset
+ (size
- 1);
1030 for (n
= 0; n
< num_of_extents
; n
++) {
1031 if (vm_object_update_extent(object
, extents
[n
].e_min
, extents
[n
].e_max
, resid_offset
, io_errno
,
1032 should_flush
, should_return
, should_iosync
, protection
))
1033 data_returned
= TRUE
;
1035 return (data_returned
);
1039 static kern_return_t
1040 vm_object_set_attributes_common(
1042 boolean_t may_cache
,
1043 memory_object_copy_strategy_t copy_strategy
)
1045 boolean_t object_became_ready
;
1047 XPR(XPR_MEMORY_OBJECT
,
1048 "m_o_set_attr_com, object 0x%X flg %x strat %d\n",
1049 object
, (may_cache
&1), copy_strategy
, 0, 0);
1051 if (object
== VM_OBJECT_NULL
)
1052 return(KERN_INVALID_ARGUMENT
);
1055 * Verify the attributes of importance
1058 switch(copy_strategy
) {
1059 case MEMORY_OBJECT_COPY_NONE
:
1060 case MEMORY_OBJECT_COPY_DELAY
:
1063 return(KERN_INVALID_ARGUMENT
);
1069 vm_object_lock(object
);
1072 * Copy the attributes
1074 assert(!object
->internal
);
1075 object_became_ready
= !object
->pager_ready
;
1076 object
->copy_strategy
= copy_strategy
;
1077 object
->can_persist
= may_cache
;
1080 * Wake up anyone waiting for the ready attribute
1081 * to become asserted.
1084 if (object_became_ready
) {
1085 object
->pager_ready
= TRUE
;
1086 vm_object_wakeup(object
, VM_OBJECT_EVENT_PAGER_READY
);
1089 vm_object_unlock(object
);
1091 return(KERN_SUCCESS
);
1096 memory_object_synchronize_completed(
1097 __unused memory_object_control_t control
,
1098 __unused memory_object_offset_t offset
,
1099 __unused memory_object_size_t length
)
1101 panic("memory_object_synchronize_completed no longer supported\n");
1102 return(KERN_FAILURE
);
1107 * Set the memory object attribute as provided.
1109 * XXX This routine cannot be completed until the vm_msync, clean
1110 * in place, and cluster work is completed. See ifdef notyet
1111 * below and note that vm_object_set_attributes_common()
1112 * may have to be expanded.
1115 memory_object_change_attributes(
1116 memory_object_control_t control
,
1117 memory_object_flavor_t flavor
,
1118 memory_object_info_t attributes
,
1119 mach_msg_type_number_t count
)
1122 kern_return_t result
= KERN_SUCCESS
;
1123 boolean_t may_cache
;
1124 boolean_t invalidate
;
1125 memory_object_copy_strategy_t copy_strategy
;
1127 object
= memory_object_control_to_vm_object(control
);
1128 if (object
== VM_OBJECT_NULL
)
1129 return (KERN_INVALID_ARGUMENT
);
1131 vm_object_lock(object
);
1133 may_cache
= object
->can_persist
;
1134 copy_strategy
= object
->copy_strategy
;
1136 invalidate
= object
->invalidate
;
1138 vm_object_unlock(object
);
1141 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1143 old_memory_object_behave_info_t behave
;
1145 if (count
!= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1146 result
= KERN_INVALID_ARGUMENT
;
1150 behave
= (old_memory_object_behave_info_t
) attributes
;
1152 invalidate
= behave
->invalidate
;
1153 copy_strategy
= behave
->copy_strategy
;
1158 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1160 memory_object_behave_info_t behave
;
1162 if (count
!= MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1163 result
= KERN_INVALID_ARGUMENT
;
1167 behave
= (memory_object_behave_info_t
) attributes
;
1169 invalidate
= behave
->invalidate
;
1170 copy_strategy
= behave
->copy_strategy
;
1174 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1176 memory_object_perf_info_t perf
;
1178 if (count
!= MEMORY_OBJECT_PERF_INFO_COUNT
) {
1179 result
= KERN_INVALID_ARGUMENT
;
1183 perf
= (memory_object_perf_info_t
) attributes
;
1185 may_cache
= perf
->may_cache
;
1190 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1192 old_memory_object_attr_info_t attr
;
1194 if (count
!= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1195 result
= KERN_INVALID_ARGUMENT
;
1199 attr
= (old_memory_object_attr_info_t
) attributes
;
1201 may_cache
= attr
->may_cache
;
1202 copy_strategy
= attr
->copy_strategy
;
1207 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1209 memory_object_attr_info_t attr
;
1211 if (count
!= MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1212 result
= KERN_INVALID_ARGUMENT
;
1216 attr
= (memory_object_attr_info_t
) attributes
;
1218 copy_strategy
= attr
->copy_strategy
;
1219 may_cache
= attr
->may_cache_object
;
1225 result
= KERN_INVALID_ARGUMENT
;
1229 if (result
!= KERN_SUCCESS
)
1232 if (copy_strategy
== MEMORY_OBJECT_COPY_TEMPORARY
) {
1233 copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
1237 * XXX may_cache may become a tri-valued variable to handle
1238 * XXX uncache if not in use.
1240 return (vm_object_set_attributes_common(object
,
1246 memory_object_get_attributes(
1247 memory_object_control_t control
,
1248 memory_object_flavor_t flavor
,
1249 memory_object_info_t attributes
, /* pointer to OUT array */
1250 mach_msg_type_number_t
*count
) /* IN/OUT */
1252 kern_return_t ret
= KERN_SUCCESS
;
1255 object
= memory_object_control_to_vm_object(control
);
1256 if (object
== VM_OBJECT_NULL
)
1257 return (KERN_INVALID_ARGUMENT
);
1259 vm_object_lock(object
);
1262 case OLD_MEMORY_OBJECT_BEHAVIOR_INFO
:
1264 old_memory_object_behave_info_t behave
;
1266 if (*count
< OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1267 ret
= KERN_INVALID_ARGUMENT
;
1271 behave
= (old_memory_object_behave_info_t
) attributes
;
1272 behave
->copy_strategy
= object
->copy_strategy
;
1273 behave
->temporary
= FALSE
;
1274 #if notyet /* remove when vm_msync complies and clean in place fini */
1275 behave
->invalidate
= object
->invalidate
;
1277 behave
->invalidate
= FALSE
;
1280 *count
= OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1284 case MEMORY_OBJECT_BEHAVIOR_INFO
:
1286 memory_object_behave_info_t behave
;
1288 if (*count
< MEMORY_OBJECT_BEHAVE_INFO_COUNT
) {
1289 ret
= KERN_INVALID_ARGUMENT
;
1293 behave
= (memory_object_behave_info_t
) attributes
;
1294 behave
->copy_strategy
= object
->copy_strategy
;
1295 behave
->temporary
= FALSE
;
1296 #if notyet /* remove when vm_msync complies and clean in place fini */
1297 behave
->invalidate
= object
->invalidate
;
1299 behave
->invalidate
= FALSE
;
1301 behave
->advisory_pageout
= FALSE
;
1302 behave
->silent_overwrite
= FALSE
;
1303 *count
= MEMORY_OBJECT_BEHAVE_INFO_COUNT
;
1307 case MEMORY_OBJECT_PERFORMANCE_INFO
:
1309 memory_object_perf_info_t perf
;
1311 if (*count
< MEMORY_OBJECT_PERF_INFO_COUNT
) {
1312 ret
= KERN_INVALID_ARGUMENT
;
1316 perf
= (memory_object_perf_info_t
) attributes
;
1317 perf
->cluster_size
= PAGE_SIZE
;
1318 perf
->may_cache
= object
->can_persist
;
1320 *count
= MEMORY_OBJECT_PERF_INFO_COUNT
;
1324 case OLD_MEMORY_OBJECT_ATTRIBUTE_INFO
:
1326 old_memory_object_attr_info_t attr
;
1328 if (*count
< OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1329 ret
= KERN_INVALID_ARGUMENT
;
1333 attr
= (old_memory_object_attr_info_t
) attributes
;
1334 attr
->may_cache
= object
->can_persist
;
1335 attr
->copy_strategy
= object
->copy_strategy
;
1337 *count
= OLD_MEMORY_OBJECT_ATTR_INFO_COUNT
;
1341 case MEMORY_OBJECT_ATTRIBUTE_INFO
:
1343 memory_object_attr_info_t attr
;
1345 if (*count
< MEMORY_OBJECT_ATTR_INFO_COUNT
) {
1346 ret
= KERN_INVALID_ARGUMENT
;
1350 attr
= (memory_object_attr_info_t
) attributes
;
1351 attr
->copy_strategy
= object
->copy_strategy
;
1352 attr
->cluster_size
= PAGE_SIZE
;
1353 attr
->may_cache_object
= object
->can_persist
;
1354 attr
->temporary
= FALSE
;
1356 *count
= MEMORY_OBJECT_ATTR_INFO_COUNT
;
1361 ret
= KERN_INVALID_ARGUMENT
;
1365 vm_object_unlock(object
);
1372 memory_object_iopl_request(
1374 memory_object_offset_t offset
,
1375 upl_size_t
*upl_size
,
1377 upl_page_info_array_t user_page_list
,
1378 unsigned int *page_list_count
,
1379 upl_control_flags_t
*flags
,
1384 upl_control_flags_t caller_flags
;
1386 caller_flags
= *flags
;
1388 if (caller_flags
& ~UPL_VALID_FLAGS
) {
1390 * For forward compatibility's sake,
1391 * reject any unknown flag.
1393 return KERN_INVALID_VALUE
;
1396 if (ip_kotype(port
) == IKOT_NAMED_ENTRY
) {
1397 vm_named_entry_t named_entry
;
1399 named_entry
= (vm_named_entry_t
)port
->ip_kobject
;
1400 /* a few checks to make sure user is obeying rules */
1401 if(*upl_size
== 0) {
1402 if(offset
>= named_entry
->size
)
1403 return(KERN_INVALID_RIGHT
);
1404 *upl_size
= (upl_size_t
)(named_entry
->size
- offset
);
1405 if (*upl_size
!= named_entry
->size
- offset
)
1406 return KERN_INVALID_ARGUMENT
;
1408 if(caller_flags
& UPL_COPYOUT_FROM
) {
1409 if((named_entry
->protection
& VM_PROT_READ
)
1411 return(KERN_INVALID_RIGHT
);
1414 if((named_entry
->protection
&
1415 (VM_PROT_READ
| VM_PROT_WRITE
))
1416 != (VM_PROT_READ
| VM_PROT_WRITE
)) {
1417 return(KERN_INVALID_RIGHT
);
1420 if(named_entry
->size
< (offset
+ *upl_size
))
1421 return(KERN_INVALID_ARGUMENT
);
1423 /* the callers parameter offset is defined to be the */
1424 /* offset from beginning of named entry offset in object */
1425 offset
= offset
+ named_entry
->offset
;
1427 if (named_entry
->is_sub_map
||
1428 named_entry
->is_copy
)
1429 return KERN_INVALID_ARGUMENT
;
1431 named_entry_lock(named_entry
);
1433 object
= named_entry
->backing
.object
;
1434 vm_object_reference(object
);
1435 named_entry_unlock(named_entry
);
1436 } else if (ip_kotype(port
) == IKOT_MEM_OBJ_CONTROL
) {
1437 memory_object_control_t control
;
1438 control
= (memory_object_control_t
) port
;
1439 if (control
== NULL
)
1440 return (KERN_INVALID_ARGUMENT
);
1441 object
= memory_object_control_to_vm_object(control
);
1442 if (object
== VM_OBJECT_NULL
)
1443 return (KERN_INVALID_ARGUMENT
);
1444 vm_object_reference(object
);
1446 return KERN_INVALID_ARGUMENT
;
1448 if (object
== VM_OBJECT_NULL
)
1449 return (KERN_INVALID_ARGUMENT
);
1451 if (!object
->private) {
1452 if (object
->phys_contiguous
) {
1453 *flags
= UPL_PHYS_CONTIG
;
1458 *flags
= UPL_DEV_MEMORY
| UPL_PHYS_CONTIG
;
1461 ret
= vm_object_iopl_request(object
,
1469 vm_object_deallocate(object
);
1474 * Routine: memory_object_upl_request [interface]
1476 * Cause the population of a portion of a vm_object.
1477 * Depending on the nature of the request, the pages
1478 * returned may be contain valid data or be uninitialized.
1483 memory_object_upl_request(
1484 memory_object_control_t control
,
1485 memory_object_offset_t offset
,
1488 upl_page_info_array_t user_page_list
,
1489 unsigned int *page_list_count
,
1495 object
= memory_object_control_to_vm_object(control
);
1496 if (object
== VM_OBJECT_NULL
)
1497 return (KERN_TERMINATED
);
1499 return vm_object_upl_request(object
,
1505 (upl_control_flags_t
)(unsigned int) cntrl_flags
,
1510 * Routine: memory_object_super_upl_request [interface]
1512 * Cause the population of a portion of a vm_object
1513 * in much the same way as memory_object_upl_request.
1514 * Depending on the nature of the request, the pages
1515 * returned may be contain valid data or be uninitialized.
1516 * However, the region may be expanded up to the super
1517 * cluster size provided.
1521 memory_object_super_upl_request(
1522 memory_object_control_t control
,
1523 memory_object_offset_t offset
,
1525 upl_size_t super_cluster
,
1527 upl_page_info_t
*user_page_list
,
1528 unsigned int *page_list_count
,
1534 object
= memory_object_control_to_vm_object(control
);
1535 if (object
== VM_OBJECT_NULL
)
1536 return (KERN_INVALID_ARGUMENT
);
1538 return vm_object_super_upl_request(object
,
1545 (upl_control_flags_t
)(unsigned int) cntrl_flags
,
1550 memory_object_cluster_size(
1551 memory_object_control_t control
,
1552 memory_object_offset_t
*start
,
1554 uint32_t *io_streaming
,
1555 memory_object_fault_info_t mo_fault_info
)
1558 vm_object_fault_info_t fault_info
;
1560 object
= memory_object_control_to_vm_object(control
);
1562 if (object
== VM_OBJECT_NULL
|| object
->paging_offset
> *start
)
1563 return KERN_INVALID_ARGUMENT
;
1565 *start
-= object
->paging_offset
;
1567 fault_info
= (vm_object_fault_info_t
)(uintptr_t) mo_fault_info
;
1568 vm_object_cluster_size(object
,
1569 (vm_object_offset_t
*)start
,
1574 *start
+= object
->paging_offset
;
1576 return KERN_SUCCESS
;
1581 * Routine: host_default_memory_manager [interface]
1583 * set/get the default memory manager port and default cluster
1586 * If successful, consumes the supplied naked send right.
1589 host_default_memory_manager(
1590 host_priv_t host_priv
,
1591 memory_object_default_t
*default_manager
,
1592 __unused memory_object_cluster_size_t cluster_size
)
1594 memory_object_default_t current_manager
;
1595 memory_object_default_t new_manager
;
1596 memory_object_default_t returned_manager
;
1597 kern_return_t result
= KERN_SUCCESS
;
1599 if (host_priv
== HOST_PRIV_NULL
)
1600 return(KERN_INVALID_HOST
);
1602 assert(host_priv
== &realhost
);
1604 new_manager
= *default_manager
;
1605 lck_mtx_lock(&memory_manager_default_lock
);
1606 current_manager
= memory_manager_default
;
1607 returned_manager
= MEMORY_OBJECT_DEFAULT_NULL
;
1609 if (new_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1611 * Retrieve the current value.
1613 returned_manager
= current_manager
;
1614 memory_object_default_reference(returned_manager
);
1617 * Only allow the kernel to change the value.
1619 extern task_t kernel_task
;
1620 if (current_task() != kernel_task
) {
1621 result
= KERN_NO_ACCESS
;
1626 * If this is the first non-null manager, start
1627 * up the internal pager support.
1629 if (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1630 result
= vm_pageout_internal_start();
1631 if (result
!= KERN_SUCCESS
)
1636 * Retrieve the current value,
1637 * and replace it with the supplied value.
1638 * We return the old reference to the caller
1639 * but we have to take a reference on the new
1642 returned_manager
= current_manager
;
1643 memory_manager_default
= new_manager
;
1644 memory_object_default_reference(new_manager
);
1647 * In case anyone's been waiting for a memory
1648 * manager to be established, wake them up.
1651 thread_wakeup((event_t
) &memory_manager_default
);
1654 * Now that we have a default pager for anonymous memory,
1655 * reactivate all the throttled pages (i.e. dirty pages with
1658 if (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
)
1660 vm_page_reactivate_all_throttled();
1664 lck_mtx_unlock(&memory_manager_default_lock
);
1666 *default_manager
= returned_manager
;
1671 * Routine: memory_manager_default_reference
1673 * Returns a naked send right for the default
1674 * memory manager. The returned right is always
1675 * valid (not IP_NULL or IP_DEAD).
1678 __private_extern__ memory_object_default_t
1679 memory_manager_default_reference(void)
1681 memory_object_default_t current_manager
;
1683 lck_mtx_lock(&memory_manager_default_lock
);
1684 current_manager
= memory_manager_default
;
1685 while (current_manager
== MEMORY_OBJECT_DEFAULT_NULL
) {
1688 res
= lck_mtx_sleep(&memory_manager_default_lock
,
1690 (event_t
) &memory_manager_default
,
1692 assert(res
== THREAD_AWAKENED
);
1693 current_manager
= memory_manager_default
;
1695 memory_object_default_reference(current_manager
);
1696 lck_mtx_unlock(&memory_manager_default_lock
);
1698 return current_manager
;
1702 * Routine: memory_manager_default_check
1705 * Check whether a default memory manager has been set
1706 * up yet, or not. Returns KERN_SUCCESS if dmm exists,
1707 * and KERN_FAILURE if dmm does not exist.
1709 * If there is no default memory manager, log an error,
1710 * but only the first time.
1713 __private_extern__ kern_return_t
1714 memory_manager_default_check(void)
1716 memory_object_default_t current
;
1718 lck_mtx_lock(&memory_manager_default_lock
);
1719 current
= memory_manager_default
;
1720 if (current
== MEMORY_OBJECT_DEFAULT_NULL
) {
1721 static boolean_t logged
; /* initialized to 0 */
1722 boolean_t complain
= !logged
;
1724 lck_mtx_unlock(&memory_manager_default_lock
);
1726 printf("Warning: No default memory manager\n");
1727 return(KERN_FAILURE
);
1729 lck_mtx_unlock(&memory_manager_default_lock
);
1730 return(KERN_SUCCESS
);
1734 __private_extern__
void
1735 memory_manager_default_init(void)
1737 memory_manager_default
= MEMORY_OBJECT_DEFAULT_NULL
;
1738 lck_mtx_init(&memory_manager_default_lock
, &vm_object_lck_grp
, &vm_object_lck_attr
);
1743 /* Allow manipulation of individual page state. This is actually part of */
1744 /* the UPL regimen but takes place on the object rather than on a UPL */
1747 memory_object_page_op(
1748 memory_object_control_t control
,
1749 memory_object_offset_t offset
,
1751 ppnum_t
*phys_entry
,
1756 object
= memory_object_control_to_vm_object(control
);
1757 if (object
== VM_OBJECT_NULL
)
1758 return (KERN_INVALID_ARGUMENT
);
1760 return vm_object_page_op(object
, offset
, ops
, phys_entry
, flags
);
1764 * memory_object_range_op offers performance enhancement over
1765 * memory_object_page_op for page_op functions which do not require page
1766 * level state to be returned from the call. Page_op was created to provide
1767 * a low-cost alternative to page manipulation via UPLs when only a single
1768 * page was involved. The range_op call establishes the ability in the _op
1769 * family of functions to work on multiple pages where the lack of page level
1770 * state handling allows the caller to avoid the overhead of the upl structures.
1774 memory_object_range_op(
1775 memory_object_control_t control
,
1776 memory_object_offset_t offset_beg
,
1777 memory_object_offset_t offset_end
,
1783 object
= memory_object_control_to_vm_object(control
);
1784 if (object
== VM_OBJECT_NULL
)
1785 return (KERN_INVALID_ARGUMENT
);
1787 return vm_object_range_op(object
,
1791 (uint32_t *) range
);
1796 memory_object_mark_used(
1797 memory_object_control_t control
)
1801 if (control
== NULL
)
1804 object
= memory_object_control_to_vm_object(control
);
1806 if (object
!= VM_OBJECT_NULL
)
1807 vm_object_cache_remove(object
);
1812 memory_object_mark_unused(
1813 memory_object_control_t control
,
1814 __unused boolean_t rage
)
1818 if (control
== NULL
)
1821 object
= memory_object_control_to_vm_object(control
);
1823 if (object
!= VM_OBJECT_NULL
)
1824 vm_object_cache_add(object
);
1828 memory_object_mark_io_tracking(
1829 memory_object_control_t control
)
1833 if (control
== NULL
)
1835 object
= memory_object_control_to_vm_object(control
);
1837 if (object
!= VM_OBJECT_NULL
) {
1838 vm_object_lock(object
);
1839 object
->io_tracking
= TRUE
;
1840 vm_object_unlock(object
);
1844 #if CONFIG_SECLUDED_MEMORY
1846 memory_object_mark_eligible_for_secluded(
1847 memory_object_control_t control
,
1848 boolean_t eligible_for_secluded
)
1852 if (control
== NULL
)
1854 object
= memory_object_control_to_vm_object(control
);
1856 if (object
== VM_OBJECT_NULL
) {
1860 vm_object_lock(object
);
1861 if (eligible_for_secluded
&&
1862 secluded_for_filecache
&& /* global boot-arg */
1863 !object
->eligible_for_secluded
) {
1864 object
->eligible_for_secluded
= TRUE
;
1865 vm_page_secluded
.eligible_for_secluded
+= object
->resident_page_count
;
1866 } else if (!eligible_for_secluded
&&
1867 object
->eligible_for_secluded
) {
1868 object
->eligible_for_secluded
= FALSE
;
1869 vm_page_secluded
.eligible_for_secluded
-= object
->resident_page_count
;
1870 if (object
->resident_page_count
) {
1871 /* XXX FBDP TODO: flush pages from secluded queue? */
1872 // printf("FBDP TODO: flush %d pages from %p from secluded queue\n", object->resident_page_count, object);
1875 vm_object_unlock(object
);
1877 #endif /* CONFIG_SECLUDED_MEMORY */
1880 memory_object_pages_resident(
1881 memory_object_control_t control
,
1882 boolean_t
* has_pages_resident
)
1886 *has_pages_resident
= FALSE
;
1888 object
= memory_object_control_to_vm_object(control
);
1889 if (object
== VM_OBJECT_NULL
)
1890 return (KERN_INVALID_ARGUMENT
);
1892 if (object
->resident_page_count
)
1893 *has_pages_resident
= TRUE
;
1895 return (KERN_SUCCESS
);
1899 memory_object_signed(
1900 memory_object_control_t control
,
1901 boolean_t is_signed
)
1905 object
= memory_object_control_to_vm_object(control
);
1906 if (object
== VM_OBJECT_NULL
)
1907 return KERN_INVALID_ARGUMENT
;
1909 vm_object_lock(object
);
1910 object
->code_signed
= is_signed
;
1911 vm_object_unlock(object
);
1913 return KERN_SUCCESS
;
1917 memory_object_is_signed(
1918 memory_object_control_t control
)
1920 boolean_t is_signed
;
1923 object
= memory_object_control_to_vm_object(control
);
1924 if (object
== VM_OBJECT_NULL
)
1927 vm_object_lock_shared(object
);
1928 is_signed
= object
->code_signed
;
1929 vm_object_unlock(object
);
1935 memory_object_is_shared_cache(
1936 memory_object_control_t control
)
1938 vm_object_t object
= VM_OBJECT_NULL
;
1940 object
= memory_object_control_to_vm_object(control
);
1941 if (object
== VM_OBJECT_NULL
)
1944 return object
->object_is_shared_cache
;
1947 static zone_t mem_obj_control_zone
;
1949 __private_extern__
void
1950 memory_object_control_bootstrap(void)
1954 i
= (vm_size_t
) sizeof (struct memory_object_control
);
1955 mem_obj_control_zone
= zinit (i
, 8192*i
, 4096, "mem_obj_control");
1956 zone_change(mem_obj_control_zone
, Z_CALLERACCT
, FALSE
);
1957 zone_change(mem_obj_control_zone
, Z_NOENCRYPT
, TRUE
);
1961 __private_extern__ memory_object_control_t
1962 memory_object_control_allocate(
1965 memory_object_control_t control
;
1967 control
= (memory_object_control_t
)zalloc(mem_obj_control_zone
);
1968 if (control
!= MEMORY_OBJECT_CONTROL_NULL
) {
1969 control
->moc_object
= object
;
1970 control
->moc_ikot
= IKOT_MEM_OBJ_CONTROL
; /* fake ip_kotype */
1975 __private_extern__
void
1976 memory_object_control_collapse(
1977 memory_object_control_t control
,
1980 assert((control
->moc_object
!= VM_OBJECT_NULL
) &&
1981 (control
->moc_object
!= object
));
1982 control
->moc_object
= object
;
1985 __private_extern__ vm_object_t
1986 memory_object_control_to_vm_object(
1987 memory_object_control_t control
)
1989 if (control
== MEMORY_OBJECT_CONTROL_NULL
||
1990 control
->moc_ikot
!= IKOT_MEM_OBJ_CONTROL
)
1991 return VM_OBJECT_NULL
;
1993 return (control
->moc_object
);
1996 __private_extern__ vm_object_t
1997 memory_object_to_vm_object(
1998 memory_object_t mem_obj
)
2000 memory_object_control_t mo_control
;
2002 if (mem_obj
== MEMORY_OBJECT_NULL
) {
2003 return VM_OBJECT_NULL
;
2005 mo_control
= mem_obj
->mo_control
;
2006 if (mo_control
== NULL
) {
2007 return VM_OBJECT_NULL
;
2009 return memory_object_control_to_vm_object(mo_control
);
2012 memory_object_control_t
2013 convert_port_to_mo_control(
2014 __unused mach_port_t port
)
2016 return MEMORY_OBJECT_CONTROL_NULL
;
2021 convert_mo_control_to_port(
2022 __unused memory_object_control_t control
)
2024 return MACH_PORT_NULL
;
2028 memory_object_control_reference(
2029 __unused memory_object_control_t control
)
2035 * We only every issue one of these references, so kill it
2036 * when that gets released (should switch the real reference
2037 * counting in true port-less EMMI).
2040 memory_object_control_deallocate(
2041 memory_object_control_t control
)
2043 zfree(mem_obj_control_zone
, control
);
2047 memory_object_control_disable(
2048 memory_object_control_t control
)
2050 assert(control
->moc_object
!= VM_OBJECT_NULL
);
2051 control
->moc_object
= VM_OBJECT_NULL
;
2055 memory_object_default_reference(
2056 memory_object_default_t dmm
)
2058 ipc_port_make_send(dmm
);
2062 memory_object_default_deallocate(
2063 memory_object_default_t dmm
)
2065 ipc_port_release_send(dmm
);
2069 convert_port_to_memory_object(
2070 __unused mach_port_t port
)
2072 return (MEMORY_OBJECT_NULL
);
2077 convert_memory_object_to_port(
2078 __unused memory_object_t object
)
2080 return (MACH_PORT_NULL
);
2084 /* Routine memory_object_reference */
2085 void memory_object_reference(
2086 memory_object_t memory_object
)
2088 (memory_object
->mo_pager_ops
->memory_object_reference
)(
2092 /* Routine memory_object_deallocate */
2093 void memory_object_deallocate(
2094 memory_object_t memory_object
)
2096 (memory_object
->mo_pager_ops
->memory_object_deallocate
)(
2101 /* Routine memory_object_init */
2102 kern_return_t memory_object_init
2104 memory_object_t memory_object
,
2105 memory_object_control_t memory_control
,
2106 memory_object_cluster_size_t memory_object_page_size
2109 return (memory_object
->mo_pager_ops
->memory_object_init
)(
2112 memory_object_page_size
);
2115 /* Routine memory_object_terminate */
2116 kern_return_t memory_object_terminate
2118 memory_object_t memory_object
2121 return (memory_object
->mo_pager_ops
->memory_object_terminate
)(
2125 /* Routine memory_object_data_request */
2126 kern_return_t memory_object_data_request
2128 memory_object_t memory_object
,
2129 memory_object_offset_t offset
,
2130 memory_object_cluster_size_t length
,
2131 vm_prot_t desired_access
,
2132 memory_object_fault_info_t fault_info
2135 return (memory_object
->mo_pager_ops
->memory_object_data_request
)(
2143 /* Routine memory_object_data_return */
2144 kern_return_t memory_object_data_return
2146 memory_object_t memory_object
,
2147 memory_object_offset_t offset
,
2148 memory_object_cluster_size_t size
,
2149 memory_object_offset_t
*resid_offset
,
2152 boolean_t kernel_copy
,
2156 return (memory_object
->mo_pager_ops
->memory_object_data_return
)(
2167 /* Routine memory_object_data_initialize */
2168 kern_return_t memory_object_data_initialize
2170 memory_object_t memory_object
,
2171 memory_object_offset_t offset
,
2172 memory_object_cluster_size_t size
2175 return (memory_object
->mo_pager_ops
->memory_object_data_initialize
)(
2181 /* Routine memory_object_data_unlock */
2182 kern_return_t memory_object_data_unlock
2184 memory_object_t memory_object
,
2185 memory_object_offset_t offset
,
2186 memory_object_size_t size
,
2187 vm_prot_t desired_access
2190 return (memory_object
->mo_pager_ops
->memory_object_data_unlock
)(
2197 /* Routine memory_object_synchronize */
2198 kern_return_t memory_object_synchronize
2200 memory_object_t memory_object
,
2201 memory_object_offset_t offset
,
2202 memory_object_size_t size
,
2203 vm_sync_t sync_flags
2206 panic("memory_object_syncrhonize no longer supported\n");
2208 return (memory_object
->mo_pager_ops
->memory_object_synchronize
)(
2217 * memory_object_map() is called by VM (in vm_map_enter() and its variants)
2218 * each time a "named" VM object gets mapped directly or indirectly
2219 * (copy-on-write mapping). A "named" VM object has an extra reference held
2220 * by the pager to keep it alive until the pager decides that the
2221 * memory object (and its VM object) can be reclaimed.
2222 * VM calls memory_object_last_unmap() (in vm_object_deallocate()) when all
2223 * the mappings of that memory object have been removed.
2225 * For a given VM object, calls to memory_object_map() and memory_object_unmap()
2226 * are serialized (through object->mapping_in_progress), to ensure that the
2227 * pager gets a consistent view of the mapping status of the memory object.
2229 * This allows the pager to keep track of how many times a memory object
2230 * has been mapped and with which protections, to decide when it can be
2234 /* Routine memory_object_map */
2235 kern_return_t memory_object_map
2237 memory_object_t memory_object
,
2241 return (memory_object
->mo_pager_ops
->memory_object_map
)(
2246 /* Routine memory_object_last_unmap */
2247 kern_return_t memory_object_last_unmap
2249 memory_object_t memory_object
2252 return (memory_object
->mo_pager_ops
->memory_object_last_unmap
)(
2256 /* Routine memory_object_data_reclaim */
2257 kern_return_t memory_object_data_reclaim
2259 memory_object_t memory_object
,
2260 boolean_t reclaim_backing_store
2263 if (memory_object
->mo_pager_ops
->memory_object_data_reclaim
== NULL
)
2264 return KERN_NOT_SUPPORTED
;
2265 return (memory_object
->mo_pager_ops
->memory_object_data_reclaim
)(
2267 reclaim_backing_store
);
2271 convert_port_to_upl(
2277 if (!ip_active(port
) || (ip_kotype(port
) != IKOT_UPL
)) {
2281 upl
= (upl_t
) port
->ip_kobject
;
2290 convert_upl_to_port(
2293 return MACH_PORT_NULL
;
2296 __private_extern__
void
2298 __unused ipc_port_t port
,
2299 __unused mach_port_mscount_t mscount
)