2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
72 #include <mach/mach_types.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/mach_host_server.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_param.h>
80 #include <mach/vm_statistics.h>
83 #include <kern/kern_types.h>
84 #include <kern/counters.h>
85 #include <kern/host_statistics.h>
86 #include <kern/machine.h>
87 #include <kern/misc_protos.h>
88 #include <kern/sched.h>
89 #include <kern/thread.h>
91 #include <kern/kalloc.h>
92 #include <kern/policy_internal.h>
93 #include <kern/thread_group.h>
95 #include <machine/vm_tuning.h>
96 #include <machine/commpage.h>
99 #include <vm/vm_compressor_pager.h>
100 #include <vm/vm_fault.h>
101 #include <vm/vm_map.h>
102 #include <vm/vm_object.h>
103 #include <vm/vm_page.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_protos.h> /* must be last */
106 #include <vm/memory_object.h>
107 #include <vm/vm_purgeable_internal.h>
108 #include <vm/vm_shared_region.h>
109 #include <vm/vm_compressor.h>
111 #include <san/kasan.h>
113 #if CONFIG_PHANTOM_CACHE
114 #include <vm/vm_phantom_cache.h>
118 #include <libkern/OSDebug.h>
123 extern void mbuf_drain(boolean_t
);
125 #if VM_PRESSURE_EVENTS
127 extern unsigned int memorystatus_available_pages
;
128 extern unsigned int memorystatus_available_pages_pressure
;
129 extern unsigned int memorystatus_available_pages_critical
;
130 #else /* CONFIG_JETSAM */
131 extern uint64_t memorystatus_available_pages
;
132 extern uint64_t memorystatus_available_pages_pressure
;
133 extern uint64_t memorystatus_available_pages_critical
;
134 #endif /* CONFIG_JETSAM */
136 extern unsigned int memorystatus_frozen_count
;
137 extern unsigned int memorystatus_suspended_count
;
138 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
140 void vm_pressure_response(void);
141 extern void consider_vm_pressure_events(void);
143 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
144 #endif /* VM_PRESSURE_EVENTS */
147 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
148 #ifdef CONFIG_EMBEDDED
149 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
151 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
155 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
156 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
159 #ifndef VM_PAGE_LAUNDRY_MAX
160 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
161 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
163 #ifndef VM_PAGEOUT_BURST_WAIT
164 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
165 #endif /* VM_PAGEOUT_BURST_WAIT */
167 #ifndef VM_PAGEOUT_EMPTY_WAIT
168 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
169 #endif /* VM_PAGEOUT_EMPTY_WAIT */
171 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
172 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
173 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
175 #ifndef VM_PAGEOUT_IDLE_WAIT
176 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
177 #endif /* VM_PAGEOUT_IDLE_WAIT */
179 #ifndef VM_PAGEOUT_SWAP_WAIT
180 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
181 #endif /* VM_PAGEOUT_SWAP_WAIT */
184 #ifndef VM_PAGE_SPECULATIVE_TARGET
185 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
186 #endif /* VM_PAGE_SPECULATIVE_TARGET */
190 * To obtain a reasonable LRU approximation, the inactive queue
191 * needs to be large enough to give pages on it a chance to be
192 * referenced a second time. This macro defines the fraction
193 * of active+inactive pages that should be inactive.
194 * The pageout daemon uses it to update vm_page_inactive_target.
196 * If vm_page_free_count falls below vm_page_free_target and
197 * vm_page_inactive_count is below vm_page_inactive_target,
198 * then the pageout daemon starts running.
201 #ifndef VM_PAGE_INACTIVE_TARGET
202 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
203 #endif /* VM_PAGE_INACTIVE_TARGET */
206 * Once the pageout daemon starts running, it keeps going
207 * until vm_page_free_count meets or exceeds vm_page_free_target.
210 #ifndef VM_PAGE_FREE_TARGET
211 #ifdef CONFIG_EMBEDDED
212 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
214 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
216 #endif /* VM_PAGE_FREE_TARGET */
220 * The pageout daemon always starts running once vm_page_free_count
221 * falls below vm_page_free_min.
224 #ifndef VM_PAGE_FREE_MIN
225 #ifdef CONFIG_EMBEDDED
226 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
228 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
230 #endif /* VM_PAGE_FREE_MIN */
232 #ifdef CONFIG_EMBEDDED
233 #define VM_PAGE_FREE_RESERVED_LIMIT 100
234 #define VM_PAGE_FREE_MIN_LIMIT 1500
235 #define VM_PAGE_FREE_TARGET_LIMIT 2000
237 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
238 #define VM_PAGE_FREE_MIN_LIMIT 3500
239 #define VM_PAGE_FREE_TARGET_LIMIT 4000
243 * When vm_page_free_count falls below vm_page_free_reserved,
244 * only vm-privileged threads can allocate pages. vm-privilege
245 * allows the pageout daemon and default pager (and any other
246 * associated threads needed for default pageout) to continue
247 * operation by dipping into the reserved pool of pages.
250 #ifndef VM_PAGE_FREE_RESERVED
251 #define VM_PAGE_FREE_RESERVED(n) \
252 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
253 #endif /* VM_PAGE_FREE_RESERVED */
256 * When we dequeue pages from the inactive list, they are
257 * reactivated (ie, put back on the active queue) if referenced.
258 * However, it is possible to starve the free list if other
259 * processors are referencing pages faster than we can turn off
260 * the referenced bit. So we limit the number of reactivations
261 * we will make per call of vm_pageout_scan().
263 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
265 #ifndef VM_PAGE_REACTIVATE_LIMIT
266 #ifdef CONFIG_EMBEDDED
267 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
269 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
271 #endif /* VM_PAGE_REACTIVATE_LIMIT */
272 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
274 extern boolean_t hibernate_cleaning_in_progress
;
277 * Forward declarations for internal routines.
280 struct vm_pageout_queue
*q
;
286 struct cq ciq
[MAX_COMPRESSOR_THREAD_COUNT
];
289 #if VM_PRESSURE_EVENTS
290 void vm_pressure_thread(void);
292 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void);
293 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void);
295 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void);
296 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void);
299 void vm_pageout_garbage_collect(int);
300 static void vm_pageout_iothread_external(void);
301 static void vm_pageout_iothread_internal(struct cq
*cq
);
302 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*, boolean_t
);
304 extern void vm_pageout_continue(void);
305 extern void vm_pageout_scan(void);
307 void vm_tests(void); /* forward */
310 static boolean_t vm_pageout_waiter
= FALSE
;
311 static boolean_t vm_pageout_running
= FALSE
;
312 #endif /* !CONFIG_EMBEDDED */
315 #if DEVELOPMENT || DEBUG
316 struct vm_pageout_debug vm_pageout_debug
;
318 struct vm_pageout_vminfo vm_pageout_vminfo
;
319 struct vm_pageout_state vm_pageout_state
;
320 struct vm_config vm_config
;
322 struct vm_pageout_queue vm_pageout_queue_internal
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
323 struct vm_pageout_queue vm_pageout_queue_external
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
325 int vm_upl_wait_for_pages
= 0;
326 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
328 boolean_t(*volatile consider_buffer_cache_collect
)(int) = NULL
;
330 int vm_debug_events
= 0;
332 lck_grp_t vm_pageout_lck_grp
;
334 #if CONFIG_MEMORYSTATUS
335 extern boolean_t
memorystatus_kill_on_VM_page_shortage(boolean_t async
);
337 uint32_t vm_pageout_memorystatus_fb_factor_nr
= 5;
338 uint32_t vm_pageout_memorystatus_fb_factor_dr
= 2;
345 * Routine: vm_pageout_object_terminate
347 * Destroy the pageout_object, and perform all of the
348 * required cleanup actions.
351 * The object must be locked, and will be returned locked.
354 vm_pageout_object_terminate(
357 vm_object_t shadow_object
;
360 * Deal with the deallocation (last reference) of a pageout object
361 * (used for cleaning-in-place) by dropping the paging references/
362 * freeing pages in the original object.
365 assert(object
->pageout
);
366 shadow_object
= object
->shadow
;
367 vm_object_lock(shadow_object
);
369 while (!vm_page_queue_empty(&object
->memq
)) {
371 vm_object_offset_t offset
;
373 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
375 assert(p
->vmp_private
);
376 assert(p
->vmp_free_when_done
);
377 p
->vmp_free_when_done
= FALSE
;
378 assert(!p
->vmp_cleaning
);
379 assert(!p
->vmp_laundry
);
381 offset
= p
->vmp_offset
;
385 m
= vm_page_lookup(shadow_object
,
386 offset
+ object
->vo_shadow_offset
);
388 if (m
== VM_PAGE_NULL
) {
392 assert((m
->vmp_dirty
) || (m
->vmp_precious
) ||
393 (m
->vmp_busy
&& m
->vmp_cleaning
));
396 * Handle the trusted pager throttle.
397 * Also decrement the burst throttle (if external).
399 vm_page_lock_queues();
400 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
401 vm_pageout_throttle_up(m
);
405 * Handle the "target" page(s). These pages are to be freed if
406 * successfully cleaned. Target pages are always busy, and are
407 * wired exactly once. The initial target pages are not mapped,
408 * (so cannot be referenced or modified) but converted target
409 * pages may have been modified between the selection as an
410 * adjacent page and conversion to a target.
412 if (m
->vmp_free_when_done
) {
414 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
415 assert(m
->vmp_wire_count
== 1);
416 m
->vmp_cleaning
= FALSE
;
417 m
->vmp_free_when_done
= FALSE
;
419 * Revoke all access to the page. Since the object is
420 * locked, and the page is busy, this prevents the page
421 * from being dirtied after the pmap_disconnect() call
424 * Since the page is left "dirty" but "not modifed", we
425 * can detect whether the page was redirtied during
426 * pageout by checking the modify state.
428 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
) {
429 SET_PAGE_DIRTY(m
, FALSE
);
431 m
->vmp_dirty
= FALSE
;
435 vm_page_unwire(m
, TRUE
); /* reactivates */
436 VM_STAT_INCR(reactivations
);
439 vm_page_free(m
); /* clears busy, etc. */
441 vm_page_unlock_queues();
445 * Handle the "adjacent" pages. These pages were cleaned in
446 * place, and should be left alone.
447 * If prep_pin_count is nonzero, then someone is using the
448 * page, so make it active.
450 if ((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) && !m
->vmp_private
) {
451 if (m
->vmp_reference
) {
454 vm_page_deactivate(m
);
457 if (m
->vmp_overwriting
) {
459 * the (COPY_OUT_FROM == FALSE) request_page_list case
463 * We do not re-set m->vmp_dirty !
464 * The page was busy so no extraneous activity
465 * could have occurred. COPY_INTO is a read into the
466 * new pages. CLEAN_IN_PLACE does actually write
467 * out the pages but handling outside of this code
468 * will take care of resetting dirty. We clear the
469 * modify however for the Programmed I/O case.
471 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
474 m
->vmp_absent
= FALSE
;
477 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
478 * Occurs when the original page was wired
479 * at the time of the list request
481 assert(VM_PAGE_WIRED(m
));
482 vm_page_unwire(m
, TRUE
); /* reactivates */
484 m
->vmp_overwriting
= FALSE
;
486 m
->vmp_dirty
= FALSE
;
488 m
->vmp_cleaning
= FALSE
;
491 * Wakeup any thread waiting for the page to be un-cleaning.
494 vm_page_unlock_queues();
497 * Account for the paging reference taken in vm_paging_object_allocate.
499 vm_object_activity_end(shadow_object
);
500 vm_object_unlock(shadow_object
);
502 assert(object
->ref_count
== 0);
503 assert(object
->paging_in_progress
== 0);
504 assert(object
->activity_in_progress
== 0);
505 assert(object
->resident_page_count
== 0);
510 * Routine: vm_pageclean_setup
512 * Purpose: setup a page to be cleaned (made non-dirty), but not
513 * necessarily flushed from the VM page cache.
514 * This is accomplished by cleaning in place.
516 * The page must not be busy, and new_object
524 vm_object_t new_object
,
525 vm_object_offset_t new_offset
)
527 assert(!m
->vmp_busy
);
529 assert(!m
->vmp_cleaning
);
533 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
534 VM_PAGE_OBJECT(m
), m
->vmp_offset
, m
,
537 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
540 * Mark original page as cleaning in place.
542 m
->vmp_cleaning
= TRUE
;
543 SET_PAGE_DIRTY(m
, FALSE
);
544 m
->vmp_precious
= FALSE
;
547 * Convert the fictitious page to a private shadow of
550 assert(new_m
->vmp_fictitious
);
551 assert(VM_PAGE_GET_PHYS_PAGE(new_m
) == vm_page_fictitious_addr
);
552 new_m
->vmp_fictitious
= FALSE
;
553 new_m
->vmp_private
= TRUE
;
554 new_m
->vmp_free_when_done
= TRUE
;
555 VM_PAGE_SET_PHYS_PAGE(new_m
, VM_PAGE_GET_PHYS_PAGE(m
));
557 vm_page_lockspin_queues();
558 vm_page_wire(new_m
, VM_KERN_MEMORY_NONE
, TRUE
);
559 vm_page_unlock_queues();
561 vm_page_insert_wired(new_m
, new_object
, new_offset
, VM_KERN_MEMORY_NONE
);
562 assert(!new_m
->vmp_wanted
);
563 new_m
->vmp_busy
= FALSE
;
567 * Routine: vm_pageout_initialize_page
569 * Causes the specified page to be initialized in
570 * the appropriate memory object. This routine is used to push
571 * pages into a copy-object when they are modified in the
574 * The page is moved to a temporary object and paged out.
577 * The page in question must not be on any pageout queues.
578 * The object to which it belongs must be locked.
579 * The page must be busy, but not hold a paging reference.
582 * Move this page to a completely new object.
585 vm_pageout_initialize_page(
589 vm_object_offset_t paging_offset
;
590 memory_object_t pager
;
593 "vm_pageout_initialize_page, page 0x%X\n",
596 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
598 object
= VM_PAGE_OBJECT(m
);
601 assert(object
->internal
);
604 * Verify that we really want to clean this page
606 assert(!m
->vmp_absent
);
607 assert(!m
->vmp_error
);
608 assert(m
->vmp_dirty
);
611 * Create a paging reference to let us play with the object.
613 paging_offset
= m
->vmp_offset
+ object
->paging_offset
;
615 if (m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_restart
|| (!m
->vmp_dirty
&& !m
->vmp_precious
)) {
616 panic("reservation without pageout?"); /* alan */
619 vm_object_unlock(object
);
625 * If there's no pager, then we can't clean the page. This should
626 * never happen since this should be a copy object and therefore not
627 * an external object, so the pager should always be there.
630 pager
= object
->pager
;
632 if (pager
== MEMORY_OBJECT_NULL
) {
633 panic("missing pager for copy object");
640 * set the page for future call to vm_fault_list_request
642 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
643 SET_PAGE_DIRTY(m
, FALSE
);
646 * keep the object from collapsing or terminating
648 vm_object_paging_begin(object
);
649 vm_object_unlock(object
);
652 * Write the data to its pager.
653 * Note that the data is passed by naming the new object,
654 * not a virtual address; the pager interface has been
655 * manipulated to use the "internal memory" data type.
656 * [The object reference from its allocation is donated
657 * to the eventual recipient.]
659 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
661 vm_object_lock(object
);
662 vm_object_paging_end(object
);
667 * vm_pageout_cluster:
669 * Given a page, queue it to the appropriate I/O thread,
670 * which will page it out and attempt to clean adjacent pages
671 * in the same operation.
673 * The object and queues must be locked. We will take a
674 * paging reference to prevent deallocation or collapse when we
675 * release the object lock back at the call site. The I/O thread
676 * is responsible for consuming this reference
678 * The page must not be on any pageout queue.
680 #if DEVELOPMENT || DEBUG
681 vmct_stats_t vmct_stats
;
683 int32_t vmct_active
= 0;
684 uint64_t vm_compressor_epoch_start
= 0;
685 uint64_t vm_compressor_epoch_stop
= 0;
687 typedef enum vmct_state_t
{
692 vmct_state_t vmct_state
[MAX_COMPRESSOR_THREAD_COUNT
];
697 vm_pageout_cluster(vm_page_t m
)
699 vm_object_t object
= VM_PAGE_OBJECT(m
);
700 struct vm_pageout_queue
*q
;
704 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
705 object
, m
->vmp_offset
, m
, 0, 0);
708 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
709 vm_object_lock_assert_exclusive(object
);
712 * Only a certain kind of page is appreciated here.
714 assert((m
->vmp_dirty
|| m
->vmp_precious
) && (!VM_PAGE_WIRED(m
)));
715 assert(!m
->vmp_cleaning
&& !m
->vmp_laundry
);
716 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
719 * protect the object from collapse or termination
721 vm_object_activity_begin(object
);
723 if (object
->internal
== TRUE
) {
724 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
728 q
= &vm_pageout_queue_internal
;
730 q
= &vm_pageout_queue_external
;
734 * pgo_laundry count is tied to the laundry bit
736 m
->vmp_laundry
= TRUE
;
739 m
->vmp_q_state
= VM_PAGE_ON_PAGEOUT_Q
;
740 vm_page_queue_enter(&q
->pgo_pending
, m
, vmp_pageq
);
742 if (q
->pgo_idle
== TRUE
) {
744 thread_wakeup((event_t
) &q
->pgo_pending
);
751 * A page is back from laundry or we are stealing it back from
752 * the laundering state. See if there are some pages waiting to
753 * go to laundry and if we can let some of them go now.
755 * Object and page queues must be locked.
758 vm_pageout_throttle_up(
761 struct vm_pageout_queue
*q
;
762 vm_object_t m_object
;
764 m_object
= VM_PAGE_OBJECT(m
);
766 assert(m_object
!= VM_OBJECT_NULL
);
767 assert(m_object
!= kernel_object
);
769 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
770 vm_object_lock_assert_exclusive(m_object
);
772 if (m_object
->internal
== TRUE
) {
773 q
= &vm_pageout_queue_internal
;
775 q
= &vm_pageout_queue_external
;
778 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
779 vm_page_queue_remove(&q
->pgo_pending
, m
, vmp_pageq
);
780 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
782 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
784 vm_object_activity_end(m_object
);
786 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page
, 1);
788 if (m
->vmp_laundry
== TRUE
) {
789 m
->vmp_laundry
= FALSE
;
792 if (q
->pgo_throttled
== TRUE
) {
793 q
->pgo_throttled
= FALSE
;
794 thread_wakeup((event_t
) &q
->pgo_laundry
);
796 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
797 q
->pgo_draining
= FALSE
;
798 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
800 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, 1);
806 vm_pageout_throttle_up_batch(
807 struct vm_pageout_queue
*q
,
810 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
812 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, batch_cnt
);
814 q
->pgo_laundry
-= batch_cnt
;
816 if (q
->pgo_throttled
== TRUE
) {
817 q
->pgo_throttled
= FALSE
;
818 thread_wakeup((event_t
) &q
->pgo_laundry
);
820 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
821 q
->pgo_draining
= FALSE
;
822 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
829 * VM memory pressure monitoring.
831 * vm_pageout_scan() keeps track of the number of pages it considers and
832 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
834 * compute_memory_pressure() is called every second from compute_averages()
835 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
836 * of recalimed pages in a new vm_pageout_stat[] bucket.
838 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
839 * The caller provides the number of seconds ("nsecs") worth of statistics
840 * it wants, up to 30 seconds.
841 * It computes the number of pages reclaimed in the past "nsecs" seconds and
842 * also returns the number of pages the system still needs to reclaim at this
845 #if DEVELOPMENT || DEBUG
846 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
848 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
850 struct vm_pageout_stat
{
851 unsigned long vm_page_active_count
;
852 unsigned long vm_page_speculative_count
;
853 unsigned long vm_page_inactive_count
;
854 unsigned long vm_page_anonymous_count
;
856 unsigned long vm_page_free_count
;
857 unsigned long vm_page_wire_count
;
858 unsigned long vm_page_compressor_count
;
860 unsigned long vm_page_pages_compressed
;
861 unsigned long vm_page_pageable_internal_count
;
862 unsigned long vm_page_pageable_external_count
;
863 unsigned long vm_page_xpmapped_external_count
;
865 unsigned int pages_grabbed
;
866 unsigned int pages_freed
;
868 unsigned int pages_compressed
;
869 unsigned int pages_grabbed_by_compressor
;
870 unsigned int failed_compressions
;
872 unsigned int pages_evicted
;
873 unsigned int pages_purged
;
875 unsigned int considered
;
876 unsigned int considered_bq_internal
;
877 unsigned int considered_bq_external
;
879 unsigned int skipped_external
;
880 unsigned int filecache_min_reactivations
;
882 unsigned int freed_speculative
;
883 unsigned int freed_cleaned
;
884 unsigned int freed_internal
;
885 unsigned int freed_external
;
887 unsigned int cleaned_dirty_external
;
888 unsigned int cleaned_dirty_internal
;
890 unsigned int inactive_referenced
;
891 unsigned int inactive_nolock
;
892 unsigned int reactivation_limit_exceeded
;
893 unsigned int forced_inactive_reclaim
;
895 unsigned int throttled_internal_q
;
896 unsigned int throttled_external_q
;
898 unsigned int phantom_ghosts_found
;
899 unsigned int phantom_ghosts_added
;
900 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
902 unsigned int vm_pageout_stat_now
= 0;
904 #define VM_PAGEOUT_STAT_BEFORE(i) \
905 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
906 #define VM_PAGEOUT_STAT_AFTER(i) \
907 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
909 #if VM_PAGE_BUCKETS_CHECK
910 int vm_page_buckets_check_interval
= 80; /* in eighths of a second */
911 #endif /* VM_PAGE_BUCKETS_CHECK */
915 record_memory_pressure(void);
917 record_memory_pressure(void)
919 unsigned int vm_pageout_next
;
921 #if VM_PAGE_BUCKETS_CHECK
922 /* check the consistency of VM page buckets at regular interval */
923 static int counter
= 0;
924 if ((++counter
% vm_page_buckets_check_interval
) == 0) {
925 vm_page_buckets_check();
927 #endif /* VM_PAGE_BUCKETS_CHECK */
929 vm_pageout_state
.vm_memory_pressure
=
930 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_speculative
+
931 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_cleaned
+
932 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_internal
+
933 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_external
;
935 commpage_set_memory_pressure((unsigned int)vm_pageout_state
.vm_memory_pressure
);
937 /* move "now" forward */
938 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
940 bzero(&vm_pageout_stats
[vm_pageout_next
], sizeof(struct vm_pageout_stat
));
942 vm_pageout_stat_now
= vm_pageout_next
;
948 * mach_vm_ctl_page_free_wanted() is called indirectly, via
949 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
950 * it must be safe in the restricted stackshot context. Locks and/or
951 * blocking are not allowable.
954 mach_vm_ctl_page_free_wanted(void)
956 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
958 page_free_target
= vm_page_free_target
;
959 page_free_count
= vm_page_free_count
;
960 if (page_free_target
> page_free_count
) {
961 page_free_wanted
= page_free_target
- page_free_count
;
963 page_free_wanted
= 0;
966 return page_free_wanted
;
972 * mach_vm_pressure_monitor() is called when taking a stackshot, with
973 * wait_for_pressure FALSE, so that code path must remain safe in the
974 * restricted stackshot context. No blocking or locks are allowable.
979 mach_vm_pressure_monitor(
980 boolean_t wait_for_pressure
,
981 unsigned int nsecs_monitored
,
982 unsigned int *pages_reclaimed_p
,
983 unsigned int *pages_wanted_p
)
986 unsigned int vm_pageout_then
, vm_pageout_now
;
987 unsigned int pages_reclaimed
;
988 unsigned int units_of_monitor
;
990 units_of_monitor
= 8 * nsecs_monitored
;
992 * We don't take the vm_page_queue_lock here because we don't want
993 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
994 * thread when it's trying to reclaim memory. We don't need fully
995 * accurate monitoring anyway...
998 if (wait_for_pressure
) {
999 /* wait until there's memory pressure */
1000 while (vm_page_free_count
>= vm_page_free_target
) {
1001 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
1002 THREAD_INTERRUPTIBLE
);
1003 if (wr
== THREAD_WAITING
) {
1004 wr
= thread_block(THREAD_CONTINUE_NULL
);
1006 if (wr
== THREAD_INTERRUPTED
) {
1007 return KERN_ABORTED
;
1009 if (wr
== THREAD_AWAKENED
) {
1011 * The memory pressure might have already
1012 * been relieved but let's not block again
1013 * and let's report that there was memory
1014 * pressure at some point.
1021 /* provide the number of pages the system wants to reclaim */
1022 if (pages_wanted_p
!= NULL
) {
1023 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1026 if (pages_reclaimed_p
== NULL
) {
1027 return KERN_SUCCESS
;
1030 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1031 vm_pageout_now
= vm_pageout_stat_now
;
1032 pages_reclaimed
= 0;
1033 for (vm_pageout_then
=
1034 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1035 vm_pageout_then
!= vm_pageout_now
&&
1036 units_of_monitor
-- != 0;
1038 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1039 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_speculative
;
1040 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_cleaned
;
1041 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_internal
;
1042 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_external
;
1044 *pages_reclaimed_p
= pages_reclaimed
;
1046 return KERN_SUCCESS
;
1051 #if DEVELOPMENT || DEBUG
1054 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*, int);
1057 * condition variable used to make sure there is
1058 * only a single sweep going on at a time
1060 boolean_t vm_pageout_disconnect_all_pages_active
= FALSE
;
1064 vm_pageout_disconnect_all_pages()
1066 vm_page_lock_queues();
1068 if (vm_pageout_disconnect_all_pages_active
== TRUE
) {
1069 vm_page_unlock_queues();
1072 vm_pageout_disconnect_all_pages_active
= TRUE
;
1073 vm_page_unlock_queues();
1075 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1076 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1077 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active
, vm_page_active_count
);
1079 vm_pageout_disconnect_all_pages_active
= FALSE
;
1084 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*q
, int qcount
)
1087 vm_object_t t_object
= NULL
;
1088 vm_object_t l_object
= NULL
;
1089 vm_object_t m_object
= NULL
;
1090 int delayed_unlock
= 0;
1091 int try_failed_count
= 0;
1092 int disconnected_count
= 0;
1093 int paused_count
= 0;
1094 int object_locked_count
= 0;
1096 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_START
,
1097 q
, qcount
, 0, 0, 0);
1099 vm_page_lock_queues();
1101 while (qcount
&& !vm_page_queue_empty(q
)) {
1102 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1104 m
= (vm_page_t
) vm_page_queue_first(q
);
1105 m_object
= VM_PAGE_OBJECT(m
);
1108 * check to see if we currently are working
1109 * with the same object... if so, we've
1110 * already got the lock
1112 if (m_object
!= l_object
) {
1114 * the object associated with candidate page is
1115 * different from the one we were just working
1116 * with... dump the lock if we still own it
1118 if (l_object
!= NULL
) {
1119 vm_object_unlock(l_object
);
1122 if (m_object
!= t_object
) {
1123 try_failed_count
= 0;
1127 * Try to lock object; since we've alread got the
1128 * page queues lock, we can only 'try' for this one.
1129 * if the 'try' fails, we need to do a mutex_pause
1130 * to allow the owner of the object lock a chance to
1133 if (!vm_object_lock_try_scan(m_object
)) {
1134 if (try_failed_count
> 20) {
1135 goto reenter_pg_on_q
;
1137 vm_page_unlock_queues();
1138 mutex_pause(try_failed_count
++);
1139 vm_page_lock_queues();
1144 t_object
= m_object
;
1147 object_locked_count
++;
1149 l_object
= m_object
;
1151 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1153 * put it back on the head of its queue
1155 goto reenter_pg_on_q
;
1157 if (m
->vmp_pmapped
== TRUE
) {
1158 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
1160 disconnected_count
++;
1163 vm_page_queue_remove(q
, m
, vmp_pageq
);
1164 vm_page_queue_enter(q
, m
, vmp_pageq
);
1167 try_failed_count
= 0;
1169 if (delayed_unlock
++ > 128) {
1170 if (l_object
!= NULL
) {
1171 vm_object_unlock(l_object
);
1174 lck_mtx_yield(&vm_page_queue_lock
);
1178 if (l_object
!= NULL
) {
1179 vm_object_unlock(l_object
);
1182 vm_page_unlock_queues();
1184 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_END
,
1185 q
, disconnected_count
, object_locked_count
, paused_count
, 0);
1192 vm_pageout_page_queue(vm_page_queue_head_t
*, int);
1195 * condition variable used to make sure there is
1196 * only a single sweep going on at a time
1198 boolean_t vm_pageout_anonymous_pages_active
= FALSE
;
1202 vm_pageout_anonymous_pages()
1204 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
1205 vm_page_lock_queues();
1207 if (vm_pageout_anonymous_pages_active
== TRUE
) {
1208 vm_page_unlock_queues();
1211 vm_pageout_anonymous_pages_active
= TRUE
;
1212 vm_page_unlock_queues();
1214 vm_pageout_page_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1215 vm_pageout_page_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1216 vm_pageout_page_queue(&vm_page_queue_active
, vm_page_active_count
);
1218 if (VM_CONFIG_SWAP_IS_PRESENT
) {
1219 vm_consider_swapping();
1222 vm_page_lock_queues();
1223 vm_pageout_anonymous_pages_active
= FALSE
;
1224 vm_page_unlock_queues();
1230 vm_pageout_page_queue(vm_page_queue_head_t
*q
, int qcount
)
1233 vm_object_t t_object
= NULL
;
1234 vm_object_t l_object
= NULL
;
1235 vm_object_t m_object
= NULL
;
1236 int delayed_unlock
= 0;
1237 int try_failed_count
= 0;
1240 struct vm_pageout_queue
*iq
;
1244 iq
= &vm_pageout_queue_internal
;
1246 vm_page_lock_queues();
1248 while (qcount
&& !vm_page_queue_empty(q
)) {
1249 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1251 if (VM_PAGE_Q_THROTTLED(iq
)) {
1252 if (l_object
!= NULL
) {
1253 vm_object_unlock(l_object
);
1256 iq
->pgo_draining
= TRUE
;
1258 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1), THREAD_INTERRUPTIBLE
);
1259 vm_page_unlock_queues();
1261 thread_block(THREAD_CONTINUE_NULL
);
1263 vm_page_lock_queues();
1267 m
= (vm_page_t
) vm_page_queue_first(q
);
1268 m_object
= VM_PAGE_OBJECT(m
);
1271 * check to see if we currently are working
1272 * with the same object... if so, we've
1273 * already got the lock
1275 if (m_object
!= l_object
) {
1276 if (!m_object
->internal
) {
1277 goto reenter_pg_on_q
;
1281 * the object associated with candidate page is
1282 * different from the one we were just working
1283 * with... dump the lock if we still own it
1285 if (l_object
!= NULL
) {
1286 vm_object_unlock(l_object
);
1289 if (m_object
!= t_object
) {
1290 try_failed_count
= 0;
1294 * Try to lock object; since we've alread got the
1295 * page queues lock, we can only 'try' for this one.
1296 * if the 'try' fails, we need to do a mutex_pause
1297 * to allow the owner of the object lock a chance to
1300 if (!vm_object_lock_try_scan(m_object
)) {
1301 if (try_failed_count
> 20) {
1302 goto reenter_pg_on_q
;
1304 vm_page_unlock_queues();
1305 mutex_pause(try_failed_count
++);
1306 vm_page_lock_queues();
1309 t_object
= m_object
;
1312 l_object
= m_object
;
1314 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1316 * page is not to be cleaned
1317 * put it back on the head of its queue
1319 goto reenter_pg_on_q
;
1321 phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
1323 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
1324 refmod_state
= pmap_get_refmod(phys_page
);
1326 if (refmod_state
& VM_MEM_REFERENCED
) {
1327 m
->vmp_reference
= TRUE
;
1329 if (refmod_state
& VM_MEM_MODIFIED
) {
1330 SET_PAGE_DIRTY(m
, FALSE
);
1333 if (m
->vmp_reference
== TRUE
) {
1334 m
->vmp_reference
= FALSE
;
1335 pmap_clear_refmod_options(phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1336 goto reenter_pg_on_q
;
1338 if (m
->vmp_pmapped
== TRUE
) {
1339 if (m
->vmp_dirty
|| m
->vmp_precious
) {
1340 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
1342 pmap_options
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
1344 refmod_state
= pmap_disconnect_options(phys_page
, pmap_options
, NULL
);
1345 if (refmod_state
& VM_MEM_MODIFIED
) {
1346 SET_PAGE_DIRTY(m
, FALSE
);
1350 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
1351 vm_page_unlock_queues();
1353 vm_page_lock_queues();
1358 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1359 if (!m_object
->pager_initialized
) {
1360 vm_page_unlock_queues();
1362 vm_object_collapse(m_object
, (vm_object_offset_t
) 0, TRUE
);
1364 if (!m_object
->pager_initialized
) {
1365 vm_object_compressor_pager_create(m_object
);
1368 vm_page_lock_queues();
1371 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1372 goto reenter_pg_on_q
;
1375 * vm_object_compressor_pager_create will drop the object lock
1376 * which means 'm' may no longer be valid to use
1381 * we've already factored out pages in the laundry which
1382 * means this page can't be on the pageout queue so it's
1383 * safe to do the vm_page_queues_remove
1385 vm_page_queues_remove(m
, TRUE
);
1387 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1389 vm_pageout_cluster(m
);
1394 vm_page_queue_remove(q
, m
, vmp_pageq
);
1395 vm_page_queue_enter(q
, m
, vmp_pageq
);
1398 try_failed_count
= 0;
1400 if (delayed_unlock
++ > 128) {
1401 if (l_object
!= NULL
) {
1402 vm_object_unlock(l_object
);
1405 lck_mtx_yield(&vm_page_queue_lock
);
1409 if (l_object
!= NULL
) {
1410 vm_object_unlock(l_object
);
1413 vm_page_unlock_queues();
1419 * function in BSD to apply I/O throttle to the pageout thread
1421 extern void vm_pageout_io_throttle(void);
1423 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1426 * If a "reusable" page somehow made it back into \
1427 * the active queue, it's been re-used and is not \
1428 * quite re-usable. \
1429 * If the VM object was "all_reusable", consider it \
1430 * as "all re-used" instead of converting it to \
1431 * "partially re-used", which could be expensive. \
1433 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1434 if ((m)->vmp_reusable || \
1435 (obj)->all_reusable) { \
1436 vm_object_reuse_pages((obj), \
1438 (m)->vmp_offset + PAGE_SIZE_64, \
1444 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1445 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1448 #define FCS_DELAYED 1
1449 #define FCS_DEADLOCK_DETECTED 2
1451 struct flow_control
{
1457 #if CONFIG_BACKGROUND_QUEUE
1458 uint64_t vm_pageout_rejected_bq_internal
= 0;
1459 uint64_t vm_pageout_rejected_bq_external
= 0;
1460 uint64_t vm_pageout_skipped_bq_internal
= 0;
1463 #define ANONS_GRABBED_LIMIT 2
1467 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t
*);
1469 static void vm_pageout_prepare_to_block(vm_object_t
*, int *, vm_page_t
*, int *, int);
1471 #define VM_PAGEOUT_PB_NO_ACTION 0
1472 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1473 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1478 vm_pageout_delayed_unlock(int *delayed_unlock
, int *local_freed
, vm_page_t
*local_freeq
)
1481 vm_page_unlock_queues();
1483 VM_DEBUG_CONSTANT_EVENT(
1484 vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1485 vm_page_free_count
, 0, 0, 1);
1487 vm_page_free_list(*local_freeq
, TRUE
);
1489 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1490 vm_page_free_count
, *local_freed
, 0, 1);
1492 *local_freeq
= NULL
;
1495 vm_page_lock_queues();
1497 lck_mtx_yield(&vm_page_queue_lock
);
1499 *delayed_unlock
= 1;
1505 vm_pageout_prepare_to_block(vm_object_t
*object
, int *delayed_unlock
,
1506 vm_page_t
*local_freeq
, int *local_freed
, int action
)
1508 vm_page_unlock_queues();
1510 if (*object
!= NULL
) {
1511 vm_object_unlock(*object
);
1515 vm_page_free_list(*local_freeq
, TRUE
);
1517 *local_freeq
= NULL
;
1520 *delayed_unlock
= 1;
1523 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
:
1524 vm_consider_waking_compactor_swapper();
1526 case VM_PAGEOUT_PB_THREAD_YIELD
:
1527 thread_yield_internal(1);
1529 case VM_PAGEOUT_PB_NO_ACTION
:
1533 vm_page_lock_queues();
1537 static struct vm_pageout_vminfo last
;
1539 uint64_t last_vm_page_pages_grabbed
= 0;
1541 extern uint32_t c_segment_pages_compressed
;
1543 extern uint64_t shared_region_pager_reclaimed
;
1544 extern struct memory_object_pager_ops shared_region_pager_ops
;
1547 update_vm_info(void)
1551 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
= vm_page_active_count
;
1552 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
= vm_page_speculative_count
;
1553 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
= vm_page_inactive_count
;
1554 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
= vm_page_anonymous_count
;
1556 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
= vm_page_free_count
;
1557 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
= vm_page_wire_count
;
1558 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
= VM_PAGE_COMPRESSOR_COUNT
;
1560 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
= c_segment_pages_compressed
;
1561 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
= vm_page_pageable_internal_count
;
1562 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
= vm_page_pageable_external_count
;
1563 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
= vm_page_xpmapped_external_count
;
1566 tmp
= vm_pageout_vminfo
.vm_pageout_considered_page
;
1567 vm_pageout_stats
[vm_pageout_stat_now
].considered
= (unsigned int)(tmp
- last
.vm_pageout_considered_page
);
1568 last
.vm_pageout_considered_page
= tmp
;
1570 tmp
= vm_pageout_vminfo
.vm_pageout_compressions
;
1571 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
= (unsigned int)(tmp
- last
.vm_pageout_compressions
);
1572 last
.vm_pageout_compressions
= tmp
;
1574 tmp
= vm_pageout_vminfo
.vm_compressor_failed
;
1575 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
= (unsigned int)(tmp
- last
.vm_compressor_failed
);
1576 last
.vm_compressor_failed
= tmp
;
1578 tmp
= vm_pageout_vminfo
.vm_compressor_pages_grabbed
;
1579 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
= (unsigned int)(tmp
- last
.vm_compressor_pages_grabbed
);
1580 last
.vm_compressor_pages_grabbed
= tmp
;
1582 tmp
= vm_pageout_vminfo
.vm_phantom_cache_found_ghost
;
1583 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
= (unsigned int)(tmp
- last
.vm_phantom_cache_found_ghost
);
1584 last
.vm_phantom_cache_found_ghost
= tmp
;
1586 tmp
= vm_pageout_vminfo
.vm_phantom_cache_added_ghost
;
1587 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
= (unsigned int)(tmp
- last
.vm_phantom_cache_added_ghost
);
1588 last
.vm_phantom_cache_added_ghost
= tmp
;
1590 tmp
= get_pages_grabbed_count();
1591 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
= (unsigned int)(tmp
- last_vm_page_pages_grabbed
);
1592 last_vm_page_pages_grabbed
= tmp
;
1594 tmp
= vm_pageout_vminfo
.vm_page_pages_freed
;
1595 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
= (unsigned int)(tmp
- last
.vm_page_pages_freed
);
1596 last
.vm_page_pages_freed
= tmp
;
1599 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
) {
1600 tmp
= vm_pageout_vminfo
.vm_pageout_pages_evicted
;
1601 vm_pageout_stats
[vm_pageout_stat_now
].pages_evicted
= (unsigned int)(tmp
- last
.vm_pageout_pages_evicted
);
1602 last
.vm_pageout_pages_evicted
= tmp
;
1604 tmp
= vm_pageout_vminfo
.vm_pageout_pages_purged
;
1605 vm_pageout_stats
[vm_pageout_stat_now
].pages_purged
= (unsigned int)(tmp
- last
.vm_pageout_pages_purged
);
1606 last
.vm_pageout_pages_purged
= tmp
;
1608 tmp
= vm_pageout_vminfo
.vm_pageout_freed_speculative
;
1609 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
= (unsigned int)(tmp
- last
.vm_pageout_freed_speculative
);
1610 last
.vm_pageout_freed_speculative
= tmp
;
1612 tmp
= vm_pageout_vminfo
.vm_pageout_freed_external
;
1613 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
= (unsigned int)(tmp
- last
.vm_pageout_freed_external
);
1614 last
.vm_pageout_freed_external
= tmp
;
1616 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_referenced
;
1617 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
= (unsigned int)(tmp
- last
.vm_pageout_inactive_referenced
);
1618 last
.vm_pageout_inactive_referenced
= tmp
;
1620 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
;
1621 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_external
);
1622 last
.vm_pageout_scan_inactive_throttled_external
= tmp
;
1624 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
;
1625 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_external
);
1626 last
.vm_pageout_inactive_dirty_external
= tmp
;
1628 tmp
= vm_pageout_vminfo
.vm_pageout_freed_cleaned
;
1629 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
= (unsigned int)(tmp
- last
.vm_pageout_freed_cleaned
);
1630 last
.vm_pageout_freed_cleaned
= tmp
;
1632 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_nolock
;
1633 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
= (unsigned int)(tmp
- last
.vm_pageout_inactive_nolock
);
1634 last
.vm_pageout_inactive_nolock
= tmp
;
1636 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
;
1637 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_internal
);
1638 last
.vm_pageout_scan_inactive_throttled_internal
= tmp
;
1640 tmp
= vm_pageout_vminfo
.vm_pageout_skipped_external
;
1641 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
= (unsigned int)(tmp
- last
.vm_pageout_skipped_external
);
1642 last
.vm_pageout_skipped_external
= tmp
;
1644 tmp
= vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
;
1645 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
= (unsigned int)(tmp
- last
.vm_pageout_reactivation_limit_exceeded
);
1646 last
.vm_pageout_reactivation_limit_exceeded
= tmp
;
1648 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
;
1649 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
= (unsigned int)(tmp
- last
.vm_pageout_inactive_force_reclaim
);
1650 last
.vm_pageout_inactive_force_reclaim
= tmp
;
1652 tmp
= vm_pageout_vminfo
.vm_pageout_freed_internal
;
1653 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
= (unsigned int)(tmp
- last
.vm_pageout_freed_internal
);
1654 last
.vm_pageout_freed_internal
= tmp
;
1656 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_internal
;
1657 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_internal
);
1658 last
.vm_pageout_considered_bq_internal
= tmp
;
1660 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_external
;
1661 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_external
);
1662 last
.vm_pageout_considered_bq_external
= tmp
;
1664 tmp
= vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
;
1665 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
= (unsigned int)(tmp
- last
.vm_pageout_filecache_min_reactivated
);
1666 last
.vm_pageout_filecache_min_reactivated
= tmp
;
1668 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
;
1669 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_internal
);
1670 last
.vm_pageout_inactive_dirty_internal
= tmp
;
1673 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO1
)) | DBG_FUNC_NONE
,
1674 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
,
1675 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
,
1676 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
,
1677 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
,
1680 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO2
)) | DBG_FUNC_NONE
,
1681 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
,
1682 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
,
1683 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
,
1687 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO3
)) | DBG_FUNC_NONE
,
1688 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
,
1689 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
,
1690 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
,
1691 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
,
1694 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
||
1695 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
||
1696 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
) {
1697 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO4
)) | DBG_FUNC_NONE
,
1698 vm_pageout_stats
[vm_pageout_stat_now
].considered
,
1699 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
,
1700 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
,
1701 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
,
1704 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO5
)) | DBG_FUNC_NONE
,
1705 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
,
1706 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
,
1707 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
,
1708 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
,
1711 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO6
)) | DBG_FUNC_NONE
,
1712 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
,
1713 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
,
1714 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
,
1715 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
,
1718 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO7
)) | DBG_FUNC_NONE
,
1719 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
,
1720 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
,
1721 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
,
1722 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
,
1725 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO8
)) | DBG_FUNC_NONE
,
1726 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
,
1727 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
,
1728 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
,
1729 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
,
1732 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO9
)) | DBG_FUNC_NONE
,
1733 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
,
1734 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
,
1735 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
,
1736 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
,
1739 record_memory_pressure();
1742 extern boolean_t hibernation_vmqueues_inspection
;
1745 vm_page_balance_inactive(int max_to_move
)
1749 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1751 if (hibernation_vmqueues_inspection
== TRUE
) {
1753 * It is likely that the hibernation code path is
1754 * dealing with these very queues as we are about
1755 * to move pages around in/from them and completely
1756 * change the linkage of the pages.
1758 * And so we skip the rebalancing of these queues.
1762 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1763 vm_page_inactive_count
+
1764 vm_page_speculative_count
);
1766 while (max_to_move
-- && (vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) {
1767 VM_PAGEOUT_DEBUG(vm_pageout_balanced
, 1);
1769 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
1771 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
);
1772 assert(!m
->vmp_laundry
);
1773 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
1774 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
1776 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
1779 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
1781 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
1782 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
1783 * new reference happens. If no futher references happen on the page after that remote TLB flushes
1784 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
1785 * by pageout_scan, which is just fine since the last reference would have happened quite far
1786 * in the past (TLB caches don't hang around for very long), and of course could just as easily
1787 * have happened before we moved the page
1789 if (m
->vmp_pmapped
== TRUE
) {
1790 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
), VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1794 * The page might be absent or busy,
1795 * but vm_page_deactivate can handle that.
1796 * FALSE indicates that we don't want a H/W clear reference
1798 vm_page_deactivate_internal(m
, FALSE
);
1804 * vm_pageout_scan does the dirty work for the pageout daemon.
1805 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
1806 * held and vm_page_free_wanted == 0.
1809 vm_pageout_scan(void)
1811 unsigned int loop_count
= 0;
1812 unsigned int inactive_burst_count
= 0;
1813 unsigned int reactivated_this_call
;
1814 unsigned int reactivate_limit
;
1815 vm_page_t local_freeq
= NULL
;
1816 int local_freed
= 0;
1818 int delayed_unlock_limit
= 0;
1819 int refmod_state
= 0;
1820 int vm_pageout_deadlock_target
= 0;
1821 struct vm_pageout_queue
*iq
;
1822 struct vm_pageout_queue
*eq
;
1823 struct vm_speculative_age_q
*sq
;
1824 struct flow_control flow_control
= { 0, { 0, 0 } };
1825 boolean_t inactive_throttled
= FALSE
;
1827 unsigned int msecs
= 0;
1828 vm_object_t object
= NULL
;
1829 uint32_t inactive_reclaim_run
;
1830 boolean_t exceeded_burst_throttle
;
1831 boolean_t grab_anonymous
= FALSE
;
1832 boolean_t force_anonymous
= FALSE
;
1833 boolean_t force_speculative_aging
= FALSE
;
1834 int anons_grabbed
= 0;
1835 int page_prev_q_state
= 0;
1836 #if CONFIG_BACKGROUND_QUEUE
1837 boolean_t page_from_bg_q
= FALSE
;
1839 int cache_evict_throttle
= 0;
1840 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
1841 uint32_t inactive_external_count
;
1842 int force_purge
= 0;
1844 #define DELAY_SPECULATIVE_AGE 1000
1845 int delay_speculative_age
= 0;
1846 vm_object_t m_object
= VM_OBJECT_NULL
;
1848 #if VM_PRESSURE_EVENTS
1849 vm_pressure_level_t pressure_level
;
1850 #endif /* VM_PRESSURE_EVENTS */
1852 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
1853 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
1854 vm_pageout_state
.vm_pageout_inactive_clean
,
1855 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
1856 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
1858 flow_control
.state
= FCS_IDLE
;
1859 iq
= &vm_pageout_queue_internal
;
1860 eq
= &vm_pageout_queue_external
;
1861 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1864 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1866 /* Ask the pmap layer to return any pages it no longer needs. */
1867 uint64_t pmap_wired_pages_freed
= pmap_release_pages_fast();
1869 vm_page_lock_queues();
1871 vm_page_wire_count
-= pmap_wired_pages_freed
;
1876 * Calculate the max number of referenced pages on the inactive
1877 * queue that we will reactivate.
1879 reactivated_this_call
= 0;
1880 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
1881 vm_page_inactive_count
);
1882 inactive_reclaim_run
= 0;
1884 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
1887 * We must limit the rate at which we send pages to the pagers
1888 * so that we don't tie up too many pages in the I/O queues.
1889 * We implement a throttling mechanism using the laundry count
1890 * to limit the number of pages outstanding to the default
1891 * and external pagers. We can bypass the throttles and look
1892 * for clean pages if the pageout queues don't drain in a timely
1893 * fashion since this may indicate that the pageout paths are
1894 * stalled waiting for memory, which only we can provide.
1899 assert(object
== NULL
);
1900 assert(delayed_unlock
!= 0);
1902 vm_page_anonymous_min
= vm_page_inactive_target
/ 20;
1904 if (vm_pageout_state
.vm_page_speculative_percentage
> 50) {
1905 vm_pageout_state
.vm_page_speculative_percentage
= 50;
1906 } else if (vm_pageout_state
.vm_page_speculative_percentage
<= 0) {
1907 vm_pageout_state
.vm_page_speculative_percentage
= 1;
1910 vm_pageout_state
.vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1911 vm_page_inactive_count
);
1916 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
1918 if (vm_upl_wait_for_pages
< 0) {
1919 vm_upl_wait_for_pages
= 0;
1922 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
1924 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
) {
1925 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
1928 #if CONFIG_SECLUDED_MEMORY
1930 * Deal with secluded_q overflow.
1932 if (vm_page_secluded_count
> vm_page_secluded_target
) {
1933 vm_page_t secluded_page
;
1936 * SECLUDED_AGING_BEFORE_ACTIVE:
1937 * Excess secluded pages go to the active queue and
1938 * will later go to the inactive queue.
1940 assert((vm_page_secluded_count_free
+
1941 vm_page_secluded_count_inuse
) ==
1942 vm_page_secluded_count
);
1943 secluded_page
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_secluded
);
1944 assert(secluded_page
->vmp_q_state
== VM_PAGE_ON_SECLUDED_Q
);
1946 vm_page_queues_remove(secluded_page
, FALSE
);
1947 assert(!secluded_page
->vmp_fictitious
);
1948 assert(!VM_PAGE_WIRED(secluded_page
));
1950 if (secluded_page
->vmp_object
== 0) {
1951 /* transfer to free queue */
1952 assert(secluded_page
->vmp_busy
);
1953 secluded_page
->vmp_snext
= local_freeq
;
1954 local_freeq
= secluded_page
;
1957 /* transfer to head of active queue */
1958 vm_page_enqueue_active(secluded_page
, FALSE
);
1959 secluded_page
= VM_PAGE_NULL
;
1962 #endif /* CONFIG_SECLUDED_MEMORY */
1964 assert(delayed_unlock
);
1967 * maintain our balance
1969 vm_page_balance_inactive(1);
1972 /**********************************************************************
1973 * above this point we're playing with the active and secluded queues
1974 * below this point we're playing with the throttling mechanisms
1975 * and the inactive queue
1976 **********************************************************************/
1978 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
1979 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1981 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
1982 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
1984 * make sure the pageout I/O threads are running
1985 * throttled in case there are still requests
1986 * in the laundry... since we have met our targets
1987 * we don't need the laundry to be cleaned in a timely
1988 * fashion... so let's avoid interfering with foreground
1991 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
1993 lck_mtx_lock(&vm_page_queue_free_lock
);
1995 if ((vm_page_free_count
>= vm_page_free_target
) &&
1996 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1998 * done - we have met our target *and*
1999 * there is no one waiting for a page.
2002 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2004 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
2005 vm_pageout_state
.vm_pageout_inactive
,
2006 vm_pageout_state
.vm_pageout_inactive_used
, 0, 0);
2007 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
2008 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
2009 vm_pageout_state
.vm_pageout_inactive_clean
,
2010 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
2011 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
2015 lck_mtx_unlock(&vm_page_queue_free_lock
);
2019 * Before anything, we check if we have any ripe volatile
2020 * objects around. If so, try to purge the first object.
2021 * If the purge fails, fall through to reclaim a page instead.
2022 * If the purge succeeds, go back to the top and reevalute
2023 * the new memory situation.
2026 assert(available_for_purge
>= 0);
2027 force_purge
= 0; /* no force-purging */
2029 #if VM_PRESSURE_EVENTS
2030 pressure_level
= memorystatus_vm_pressure_level
;
2032 if (pressure_level
> kVMPressureNormal
) {
2033 if (pressure_level
>= kVMPressureCritical
) {
2034 force_purge
= vm_pageout_state
.memorystatus_purge_on_critical
;
2035 } else if (pressure_level
>= kVMPressureUrgent
) {
2036 force_purge
= vm_pageout_state
.memorystatus_purge_on_urgent
;
2037 } else if (pressure_level
>= kVMPressureWarning
) {
2038 force_purge
= vm_pageout_state
.memorystatus_purge_on_warning
;
2041 #endif /* VM_PRESSURE_EVENTS */
2043 if (available_for_purge
|| force_purge
) {
2044 if (object
!= NULL
) {
2045 vm_object_unlock(object
);
2049 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
);
2051 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
2052 if (vm_purgeable_object_purge_one(force_purge
, C_DONT_BLOCK
)) {
2053 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects
, 1);
2054 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
2055 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
2058 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
2059 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
2062 if (vm_page_queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
2064 * try to pull pages from the aging bins...
2065 * see vm_page.h for an explanation of how
2066 * this mechanism works
2068 struct vm_speculative_age_q
*aq
;
2069 boolean_t can_steal
= FALSE
;
2070 int num_scanned_queues
;
2072 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
2074 num_scanned_queues
= 0;
2075 while (vm_page_queue_empty(&aq
->age_q
) &&
2076 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
2077 speculative_steal_index
++;
2079 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
2080 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
2083 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
2086 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
2088 * XXX We've scanned all the speculative
2089 * queues but still haven't found one
2090 * that is not empty, even though
2091 * vm_page_speculative_count is not 0.
2093 if (!vm_page_queue_empty(&sq
->age_q
)) {
2096 #if DEVELOPMENT || DEBUG
2097 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count
);
2100 vm_page_speculative_count
= 0;
2101 /* ... and continue */
2105 if (vm_page_speculative_count
> vm_pageout_state
.vm_page_speculative_target
|| force_speculative_aging
== TRUE
) {
2108 if (!delay_speculative_age
) {
2109 mach_timespec_t ts_fully_aged
;
2111 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) / 1000;
2112 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) % 1000)
2113 * 1000 * NSEC_PER_USEC
;
2115 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
2119 clock_get_system_nanotime(&sec
, &nsec
);
2120 ts
.tv_sec
= (unsigned int) sec
;
2123 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0) {
2126 delay_speculative_age
++;
2129 delay_speculative_age
++;
2130 if (delay_speculative_age
== DELAY_SPECULATIVE_AGE
) {
2131 delay_speculative_age
= 0;
2135 if (can_steal
== TRUE
) {
2136 vm_page_speculate_ageit(aq
);
2139 force_speculative_aging
= FALSE
;
2141 if (vm_page_queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
2144 if (object
!= NULL
) {
2145 vm_object_unlock(object
);
2148 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
2150 pages_evicted
= vm_object_cache_evict(100, 10);
2152 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END
, pages_evicted
, 0, 0, 0, 0);
2154 if (pages_evicted
) {
2155 vm_pageout_vminfo
.vm_pageout_pages_evicted
+= pages_evicted
;
2157 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
2158 vm_page_free_count
, pages_evicted
, vm_pageout_vminfo
.vm_pageout_pages_evicted
, 0);
2159 memoryshot(VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
);
2162 * we just freed up to 100 pages,
2163 * so go back to the top of the main loop
2164 * and re-evaulate the memory situation
2168 cache_evict_throttle
= 1000;
2171 if (cache_evict_throttle
) {
2172 cache_evict_throttle
--;
2175 divisor
= vm_pageout_state
.vm_page_filecache_min_divisor
;
2179 * don't let the filecache_min fall below 15% of available memory
2180 * on systems with an active compressor that isn't nearing its
2181 * limits w/r to accepting new data
2183 * on systems w/o the compressor/swapper, the filecache is always
2184 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2185 * since most (if not all) of the anonymous pages are in the
2186 * throttled queue (which isn't counted as available) which
2187 * effectively disables this filter
2189 if (vm_compressor_low_on_space() || divisor
== 0) {
2190 vm_pageout_state
.vm_page_filecache_min
= 0;
2192 vm_pageout_state
.vm_page_filecache_min
=
2193 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2196 if (vm_compressor_out_of_space() || divisor
== 0) {
2197 vm_pageout_state
.vm_page_filecache_min
= 0;
2200 * don't let the filecache_min fall below the specified critical level
2202 vm_pageout_state
.vm_page_filecache_min
=
2203 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2206 if (vm_page_free_count
< (vm_page_free_reserved
/ 4)) {
2207 vm_pageout_state
.vm_page_filecache_min
= 0;
2210 exceeded_burst_throttle
= FALSE
;
2212 * Sometimes we have to pause:
2213 * 1) No inactive pages - nothing to do.
2214 * 2) Loop control - no acceptable pages found on the inactive queue
2215 * within the last vm_pageout_burst_inactive_throttle iterations
2216 * 3) Flow control - default pageout queue is full
2218 if (vm_page_queue_empty(&vm_page_queue_inactive
) &&
2219 vm_page_queue_empty(&vm_page_queue_anonymous
) &&
2220 vm_page_queue_empty(&vm_page_queue_cleaned
) &&
2221 vm_page_queue_empty(&sq
->age_q
)) {
2222 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle
, 1);
2223 msecs
= vm_pageout_state
.vm_pageout_empty_wait
;
2224 goto vm_pageout_scan_delay
;
2225 } else if (inactive_burst_count
>=
2226 MIN(vm_pageout_state
.vm_pageout_burst_inactive_throttle
,
2227 (vm_page_inactive_count
+
2228 vm_page_speculative_count
))) {
2229 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle
, 1);
2230 msecs
= vm_pageout_state
.vm_pageout_burst_wait
;
2232 exceeded_burst_throttle
= TRUE
;
2233 goto vm_pageout_scan_delay
;
2234 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
2235 VM_DYNAMIC_PAGING_ENABLED()) {
2239 switch (flow_control
.state
) {
2241 if ((vm_page_free_count
+ local_freed
) < vm_page_free_target
&&
2242 vm_pageout_state
.vm_restricted_to_single_processor
== FALSE
) {
2244 * since the compressor is running independently of vm_pageout_scan
2245 * let's not wait for it just yet... as long as we have a healthy supply
2246 * of filecache pages to work with, let's keep stealing those.
2248 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2250 if (vm_page_pageable_external_count
> vm_pageout_state
.vm_page_filecache_min
&&
2251 (inactive_external_count
>= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2252 anons_grabbed
= ANONS_GRABBED_LIMIT
;
2253 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred
, 1);
2254 goto consider_inactive
;
2257 reset_deadlock_timer
:
2258 ts
.tv_sec
= vm_pageout_state
.vm_pageout_deadlock_wait
/ 1000;
2259 ts
.tv_nsec
= (vm_pageout_state
.vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
2260 clock_get_system_nanotime(&sec
, &nsec
);
2261 flow_control
.ts
.tv_sec
= (unsigned int) sec
;
2262 flow_control
.ts
.tv_nsec
= nsec
;
2263 ADD_MACH_TIMESPEC(&flow_control
.ts
, &ts
);
2265 flow_control
.state
= FCS_DELAYED
;
2266 msecs
= vm_pageout_state
.vm_pageout_deadlock_wait
;
2268 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
++;
2272 clock_get_system_nanotime(&sec
, &nsec
);
2273 ts
.tv_sec
= (unsigned int) sec
;
2276 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
.ts
) >= 0) {
2278 * the pageout thread for the default pager is potentially
2279 * deadlocked since the
2280 * default pager queue has been throttled for more than the
2281 * allowable time... we need to move some clean pages or dirty
2282 * pages belonging to the external pagers if they aren't throttled
2283 * vm_page_free_wanted represents the number of threads currently
2284 * blocked waiting for pages... we'll move one page for each of
2285 * these plus a fixed amount to break the logjam... once we're done
2286 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2287 * with a new timeout target since we have no way of knowing
2288 * whether we've broken the deadlock except through observation
2289 * of the queue associated with the default pager... we need to
2290 * stop moving pages and allow the system to run to see what
2291 * state it settles into.
2293 vm_pageout_deadlock_target
= vm_pageout_state
.vm_pageout_deadlock_relief
+
2294 vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
2295 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected
, 1);
2296 flow_control
.state
= FCS_DEADLOCK_DETECTED
;
2297 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
2298 goto consider_inactive
;
2301 * just resniff instead of trying
2302 * to compute a new delay time... we're going to be
2303 * awakened immediately upon a laundry completion,
2304 * so we won't wait any longer than necessary
2306 msecs
= vm_pageout_state
.vm_pageout_idle_wait
;
2309 case FCS_DEADLOCK_DETECTED
:
2310 if (vm_pageout_deadlock_target
) {
2311 goto consider_inactive
;
2313 goto reset_deadlock_timer
;
2315 vm_pageout_scan_delay
:
2316 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2318 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
2319 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2321 if (vm_page_free_count
>= vm_page_free_target
) {
2323 * we're here because
2324 * 1) someone else freed up some pages while we had
2325 * the queues unlocked above
2326 * and we've hit one of the 3 conditions that
2327 * cause us to pause the pageout scan thread
2329 * since we already have enough free pages,
2330 * let's avoid stalling and return normally
2332 * before we return, make sure the pageout I/O threads
2333 * are running throttled in case there are still requests
2334 * in the laundry... since we have enough free pages
2335 * we don't need the laundry to be cleaned in a timely
2336 * fashion... so let's avoid interfering with foreground
2339 * we don't want to hold vm_page_queue_free_lock when
2340 * calling vm_pageout_adjust_eq_iothrottle (since it
2341 * may cause other locks to be taken), we do the intitial
2342 * check outside of the lock. Once we take the lock,
2343 * we recheck the condition since it may have changed.
2344 * if it has, no problem, we will make the threads
2345 * non-throttled before actually blocking
2347 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
2349 lck_mtx_lock(&vm_page_queue_free_lock
);
2351 if (vm_page_free_count
>= vm_page_free_target
&&
2352 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
2353 goto return_from_scan
;
2355 lck_mtx_unlock(&vm_page_queue_free_lock
);
2357 if ((vm_page_free_count
+ vm_page_cleaned_count
) < vm_page_free_target
) {
2359 * we're most likely about to block due to one of
2360 * the 3 conditions that cause vm_pageout_scan to
2361 * not be able to make forward progress w/r
2362 * to providing new pages to the free queue,
2363 * so unthrottle the I/O threads in case we
2364 * have laundry to be cleaned... it needs
2365 * to be completed ASAP.
2367 * even if we don't block, we want the io threads
2368 * running unthrottled since the sum of free +
2369 * clean pages is still under our free target
2371 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
2373 if (vm_page_cleaned_count
> 0 && exceeded_burst_throttle
== FALSE
) {
2375 * if we get here we're below our free target and
2376 * we're stalling due to a full laundry queue or
2377 * we don't have any inactive pages other then
2378 * those in the clean queue...
2379 * however, we have pages on the clean queue that
2380 * can be moved to the free queue, so let's not
2381 * stall the pageout scan
2383 flow_control
.state
= FCS_IDLE
;
2384 goto consider_inactive
;
2386 if (flow_control
.state
== FCS_DELAYED
&& !VM_PAGE_Q_THROTTLED(iq
)) {
2387 flow_control
.state
= FCS_IDLE
;
2388 goto consider_inactive
;
2391 VM_CHECK_MEMORYSTATUS
;
2393 if (flow_control
.state
!= FCS_IDLE
) {
2394 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle
, 1);
2397 iq
->pgo_throttled
= TRUE
;
2398 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000 * NSEC_PER_USEC
);
2400 counter(c_vm_pageout_scan_block
++);
2402 vm_page_unlock_queues();
2404 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2406 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
2407 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2408 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
);
2410 thread_block(THREAD_CONTINUE_NULL
);
2412 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
2413 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2414 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
);
2416 vm_page_lock_queues();
2418 iq
->pgo_throttled
= FALSE
;
2420 if (loop_count
>= vm_page_inactive_count
) {
2423 inactive_burst_count
= 0;
2430 flow_control
.state
= FCS_IDLE
;
2432 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
2433 vm_pageout_inactive_external_forced_reactivate_limit
);
2435 inactive_burst_count
++;
2436 vm_pageout_state
.vm_pageout_inactive
++;
2442 #if CONFIG_BACKGROUND_QUEUE
2443 page_from_bg_q
= FALSE
;
2444 #endif /* CONFIG_BACKGROUND_QUEUE */
2447 m_object
= VM_OBJECT_NULL
;
2449 if (VM_DYNAMIC_PAGING_ENABLED()) {
2450 assert(vm_page_throttled_count
== 0);
2451 assert(vm_page_queue_empty(&vm_page_queue_throttled
));
2455 * Try for a clean-queue inactive page.
2456 * These are pages that vm_pageout_scan tried to steal earlier, but
2457 * were dirty and had to be cleaned. Pick them up now that they are clean.
2459 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2460 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2462 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
2468 * The next most eligible pages are ones we paged in speculatively,
2469 * but which have not yet been touched and have been aged out.
2471 if (!vm_page_queue_empty(&sq
->age_q
)) {
2472 m
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2474 assert(m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
2476 if (!m
->vmp_dirty
|| force_anonymous
== FALSE
) {
2483 #if CONFIG_BACKGROUND_QUEUE
2484 if (vm_page_background_mode
!= VM_PAGE_BG_DISABLED
&& (vm_page_background_count
> vm_page_background_target
)) {
2485 vm_object_t bg_m_object
= NULL
;
2487 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_background
);
2489 bg_m_object
= VM_PAGE_OBJECT(m
);
2491 if (!VM_PAGE_PAGEABLE(m
)) {
2493 * This page is on the background queue
2494 * but not on a pageable queue. This is
2495 * likely a transient state and whoever
2496 * took it out of its pageable queue
2497 * will likely put it back on a pageable
2498 * queue soon but we can't deal with it
2499 * at this point, so let's ignore this
2502 } else if (force_anonymous
== FALSE
|| bg_m_object
->internal
) {
2503 if (bg_m_object
->internal
&&
2504 (VM_PAGE_Q_THROTTLED(iq
) ||
2505 vm_compressor_out_of_space() == TRUE
||
2506 vm_page_free_count
< (vm_page_free_reserved
/ 4))) {
2507 vm_pageout_skipped_bq_internal
++;
2509 page_from_bg_q
= TRUE
;
2511 if (bg_m_object
->internal
) {
2512 vm_pageout_vminfo
.vm_pageout_considered_bq_internal
++;
2514 vm_pageout_vminfo
.vm_pageout_considered_bq_external
++;
2521 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2523 if ((vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
|| force_anonymous
== TRUE
) ||
2524 (inactive_external_count
< VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2525 grab_anonymous
= TRUE
;
2528 vm_pageout_vminfo
.vm_pageout_skipped_external
++;
2529 goto want_anonymous
;
2531 grab_anonymous
= (vm_page_anonymous_count
> vm_page_anonymous_min
);
2534 /* If the file-backed pool has accumulated
2535 * significantly more pages than the jetsam
2536 * threshold, prefer to reclaim those
2537 * inline to minimise compute overhead of reclaiming
2539 * This calculation does not account for the CPU local
2540 * external page queues, as those are expected to be
2541 * much smaller relative to the global pools.
2543 if (grab_anonymous
== TRUE
&& !VM_PAGE_Q_THROTTLED(eq
)) {
2544 if (vm_page_pageable_external_count
>
2545 vm_pageout_state
.vm_page_filecache_min
) {
2546 if ((vm_page_pageable_external_count
*
2547 vm_pageout_memorystatus_fb_factor_dr
) >
2548 (memorystatus_available_pages_critical
*
2549 vm_pageout_memorystatus_fb_factor_nr
)) {
2550 grab_anonymous
= FALSE
;
2552 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides
, 1);
2555 if (grab_anonymous
) {
2556 VM_PAGEOUT_DEBUG(vm_grab_anon_nops
, 1);
2559 #endif /* CONFIG_JETSAM */
2562 if (grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
|| vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2563 if (!vm_page_queue_empty(&vm_page_queue_inactive
)) {
2564 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2566 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
2569 if (vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
) {
2570 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2571 if ((++reactivated_this_call
% 100)) {
2572 vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
++;
2573 goto must_activate_page
;
2576 * steal 1% of the file backed pages even if
2577 * we are under the limit that has been set
2578 * for a healthy filecache
2585 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2586 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2588 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
2595 * if we've gotten here, we have no victim page.
2596 * check to see if we've not finished balancing the queues
2597 * or we have a page on the aged speculative queue that we
2598 * skipped due to force_anonymous == TRUE.. or we have
2599 * speculative pages that we can prematurely age... if
2600 * one of these cases we'll keep going, else panic
2602 force_anonymous
= FALSE
;
2603 VM_PAGEOUT_DEBUG(vm_pageout_no_victim
, 1);
2605 if (!vm_page_queue_empty(&sq
->age_q
)) {
2606 goto done_with_inactivepage
;
2609 if (vm_page_speculative_count
) {
2610 force_speculative_aging
= TRUE
;
2611 goto done_with_inactivepage
;
2613 panic("vm_pageout: no victim");
2617 assert(VM_PAGE_PAGEABLE(m
));
2618 m_object
= VM_PAGE_OBJECT(m
);
2619 force_anonymous
= FALSE
;
2621 page_prev_q_state
= m
->vmp_q_state
;
2623 * we just found this page on one of our queues...
2624 * it can't also be on the pageout queue, so safe
2625 * to call vm_page_queues_remove
2627 vm_page_queues_remove(m
, TRUE
);
2629 assert(!m
->vmp_laundry
);
2630 assert(!m
->vmp_private
);
2631 assert(!m
->vmp_fictitious
);
2632 assert(m_object
!= kernel_object
);
2633 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
2635 vm_pageout_vminfo
.vm_pageout_considered_page
++;
2637 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
2640 * check to see if we currently are working
2641 * with the same object... if so, we've
2642 * already got the lock
2644 if (m_object
!= object
) {
2646 * the object associated with candidate page is
2647 * different from the one we were just working
2648 * with... dump the lock if we still own it
2650 if (object
!= NULL
) {
2651 vm_object_unlock(object
);
2655 * Try to lock object; since we've alread got the
2656 * page queues lock, we can only 'try' for this one.
2657 * if the 'try' fails, we need to do a mutex_pause
2658 * to allow the owner of the object lock a chance to
2659 * run... otherwise, we're likely to trip over this
2660 * object in the same state as we work our way through
2661 * the queue... clumps of pages associated with the same
2662 * object are fairly typical on the inactive and active queues
2664 if (!vm_object_lock_try_scan(m_object
)) {
2665 vm_page_t m_want
= NULL
;
2667 vm_pageout_vminfo
.vm_pageout_inactive_nolock
++;
2669 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2670 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock
, 1);
2673 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m
));
2675 m
->vmp_reference
= FALSE
;
2677 if (!m_object
->object_is_shared_cache
) {
2679 * don't apply this optimization if this is the shared cache
2680 * object, it's too easy to get rid of very hot and important
2682 * m->vmp_object must be stable since we hold the page queues lock...
2683 * we can update the scan_collisions field sans the object lock
2684 * since it is a separate field and this is the only spot that does
2685 * a read-modify-write operation and it is never executed concurrently...
2686 * we can asynchronously set this field to 0 when creating a UPL, so it
2687 * is possible for the value to be a bit non-determistic, but that's ok
2688 * since it's only used as a hint
2690 m_object
->scan_collisions
= 1;
2692 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2693 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2694 } else if (!vm_page_queue_empty(&sq
->age_q
)) {
2695 m_want
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2696 } else if ((grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
||
2697 vm_page_queue_empty(&vm_page_queue_anonymous
)) &&
2698 !vm_page_queue_empty(&vm_page_queue_inactive
)) {
2699 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2700 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2701 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2705 * this is the next object we're going to be interested in
2706 * try to make sure its available after the mutex_pause
2710 vm_pageout_scan_wants_object
= VM_PAGE_OBJECT(m_want
);
2716 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2718 assert(m_object
== object
);
2719 assert(VM_PAGE_OBJECT(m
) == m_object
);
2723 * Somebody is already playing with this page.
2724 * Put it back on the appropriate queue
2727 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy
, 1);
2729 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2730 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy
, 1);
2733 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
2734 vm_page_enqueue_inactive(m
, FALSE
);
2736 vm_page_activate(m
);
2738 #if CONFIG_BACKGROUND_QUEUE
2739 #if DEVELOPMENT || DEBUG
2740 if (page_from_bg_q
== TRUE
) {
2741 if (m_object
->internal
) {
2742 vm_pageout_rejected_bq_internal
++;
2744 vm_pageout_rejected_bq_external
++;
2749 goto done_with_inactivepage
;
2753 * if (m->vmp_cleaning && !m->vmp_free_when_done)
2754 * If already cleaning this page in place
2755 * just leave if off the paging queues.
2756 * We can leave the page mapped, and upl_commit_range
2757 * will put it on the clean queue.
2759 * if (m->vmp_free_when_done && !m->vmp_cleaning)
2760 * an msync INVALIDATE is in progress...
2761 * this page has been marked for destruction
2762 * after it has been cleaned,
2763 * but not yet gathered into a UPL
2764 * where 'cleaning' will be set...
2765 * just leave it off the paging queues
2767 * if (m->vmp_free_when_done && m->vmp_clenaing)
2768 * an msync INVALIDATE is in progress
2769 * and the UPL has already gathered this page...
2770 * just leave it off the paging queues
2772 if (m
->vmp_free_when_done
|| m
->vmp_cleaning
) {
2773 goto done_with_inactivepage
;
2778 * If it's absent, in error or the object is no longer alive,
2779 * we can reclaim the page... in the no longer alive case,
2780 * there are 2 states the page can be in that preclude us
2781 * from reclaiming it - busy or cleaning - that we've already
2784 if (m
->vmp_absent
|| m
->vmp_error
|| !object
->alive
) {
2785 if (m
->vmp_absent
) {
2786 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent
, 1);
2787 } else if (!object
->alive
) {
2788 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive
, 1);
2790 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error
, 1);
2793 if (vm_pageout_deadlock_target
) {
2794 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success
, 1);
2795 vm_pageout_deadlock_target
--;
2798 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
2800 if (object
->internal
) {
2801 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
2803 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
2805 assert(!m
->vmp_cleaning
);
2806 assert(!m
->vmp_laundry
);
2808 if (!object
->internal
&&
2809 object
->pager
!= NULL
&&
2810 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
2811 shared_region_pager_reclaimed
++;
2817 * remove page from object here since we're already
2818 * behind the object lock... defer the rest of the work
2819 * we'd normally do in vm_page_free_prepare_object
2820 * until 'vm_page_free_list' is called
2822 if (m
->vmp_tabled
) {
2823 vm_page_remove(m
, TRUE
);
2826 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
2827 m
->vmp_snext
= local_freeq
;
2831 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
2832 vm_pageout_vminfo
.vm_pageout_freed_speculative
++;
2833 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2834 vm_pageout_vminfo
.vm_pageout_freed_cleaned
++;
2835 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
) {
2836 vm_pageout_vminfo
.vm_pageout_freed_internal
++;
2838 vm_pageout_vminfo
.vm_pageout_freed_external
++;
2841 inactive_burst_count
= 0;
2842 goto done_with_inactivepage
;
2844 if (object
->copy
== VM_OBJECT_NULL
) {
2846 * No one else can have any interest in this page.
2847 * If this is an empty purgable object, the page can be
2848 * reclaimed even if dirty.
2849 * If the page belongs to a volatile purgable object, we
2850 * reactivate it if the compressor isn't active.
2852 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
2853 if (m
->vmp_pmapped
== TRUE
) {
2854 /* unmap the page */
2855 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
2856 if (refmod_state
& VM_MEM_MODIFIED
) {
2857 SET_PAGE_DIRTY(m
, FALSE
);
2860 if (m
->vmp_dirty
|| m
->vmp_precious
) {
2861 /* we saved the cost of cleaning this page ! */
2862 vm_page_purged_count
++;
2867 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
2869 * With the VM compressor, the cost of
2870 * reclaiming a page is much lower (no I/O),
2871 * so if we find a "volatile" page, it's better
2872 * to let it get compressed rather than letting
2873 * it occupy a full page until it gets purged.
2874 * So no need to check for "volatile" here.
2876 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
2878 * Avoid cleaning a "volatile" page which might
2882 /* if it's wired, we can't put it on our queue */
2883 assert(!VM_PAGE_WIRED(m
));
2885 /* just stick it back on! */
2886 reactivated_this_call
++;
2888 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2889 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated
, 1);
2892 goto reactivate_page
;
2896 * If it's being used, reactivate.
2897 * (Fictitious pages are either busy or absent.)
2898 * First, update the reference and dirty bits
2899 * to make sure the page is unreferenced.
2903 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
2904 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
2906 if (refmod_state
& VM_MEM_REFERENCED
) {
2907 m
->vmp_reference
= TRUE
;
2909 if (refmod_state
& VM_MEM_MODIFIED
) {
2910 SET_PAGE_DIRTY(m
, FALSE
);
2914 if (m
->vmp_reference
|| m
->vmp_dirty
) {
2915 /* deal with a rogue "reusable" page */
2916 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
, m_object
);
2918 divisor
= vm_pageout_state
.vm_page_xpmapped_min_divisor
;
2921 vm_pageout_state
.vm_page_xpmapped_min
= 0;
2923 vm_pageout_state
.vm_page_xpmapped_min
= (vm_page_external_count
* 10) / divisor
;
2926 if (!m
->vmp_no_cache
&&
2927 #if CONFIG_BACKGROUND_QUEUE
2928 page_from_bg_q
== FALSE
&&
2930 (m
->vmp_reference
|| (m
->vmp_xpmapped
&& !object
->internal
&&
2931 (vm_page_xpmapped_external_count
< vm_pageout_state
.vm_page_xpmapped_min
)))) {
2933 * The page we pulled off the inactive list has
2934 * been referenced. It is possible for other
2935 * processors to be touching pages faster than we
2936 * can clear the referenced bit and traverse the
2937 * inactive queue, so we limit the number of
2940 if (++reactivated_this_call
>= reactivate_limit
) {
2941 vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
++;
2942 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
2943 vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
++;
2947 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2948 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated
, 1);
2951 vm_pageout_vminfo
.vm_pageout_inactive_referenced
++;
2953 if (!object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
2954 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
2956 * no explict mappings of this object exist
2957 * and it's not open via the filesystem
2959 vm_page_deactivate(m
);
2960 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated
, 1);
2964 * The page was/is being used, so put back on active list.
2966 vm_page_activate(m
);
2967 VM_STAT_INCR(reactivations
);
2968 inactive_burst_count
= 0;
2970 #if CONFIG_BACKGROUND_QUEUE
2971 #if DEVELOPMENT || DEBUG
2972 if (page_from_bg_q
== TRUE
) {
2973 if (m_object
->internal
) {
2974 vm_pageout_rejected_bq_internal
++;
2976 vm_pageout_rejected_bq_external
++;
2981 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2982 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
2984 vm_pageout_state
.vm_pageout_inactive_used
++;
2986 goto done_with_inactivepage
;
2989 * Make sure we call pmap_get_refmod() if it
2990 * wasn't already called just above, to update
2993 if ((refmod_state
== -1) && !m
->vmp_dirty
&& m
->vmp_pmapped
) {
2994 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
2995 if (refmod_state
& VM_MEM_MODIFIED
) {
2996 SET_PAGE_DIRTY(m
, FALSE
);
3002 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
3003 object
, m
->vmp_offset
, m
, 0, 0);
3006 * we've got a candidate page to steal...
3008 * m->vmp_dirty is up to date courtesy of the
3009 * preceding check for m->vmp_reference... if
3010 * we get here, then m->vmp_reference had to be
3011 * FALSE (or possibly "reactivate_limit" was
3012 * exceeded), but in either case we called
3013 * pmap_get_refmod() and updated both
3014 * m->vmp_reference and m->vmp_dirty
3016 * if it's dirty or precious we need to
3017 * see if the target queue is throtttled
3018 * it if is, we need to skip over it by moving it back
3019 * to the end of the inactive queue
3022 inactive_throttled
= FALSE
;
3024 if (m
->vmp_dirty
|| m
->vmp_precious
) {
3025 if (object
->internal
) {
3026 if (VM_PAGE_Q_THROTTLED(iq
)) {
3027 inactive_throttled
= TRUE
;
3029 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3030 inactive_throttled
= TRUE
;
3034 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3035 object
->internal
&& m
->vmp_dirty
&&
3036 (object
->purgable
== VM_PURGABLE_DENY
||
3037 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
3038 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
3039 vm_page_check_pageable_safe(m
);
3040 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3041 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
3042 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
3043 vm_page_throttled_count
++;
3045 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled
, 1);
3047 inactive_burst_count
= 0;
3048 goto done_with_inactivepage
;
3050 if (inactive_throttled
== TRUE
) {
3051 if (object
->internal
== FALSE
) {
3053 * we need to break up the following potential deadlock case...
3054 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
3055 * b) The thread doing the writing is waiting for pages while holding the truncate lock
3056 * c) Most of the pages in the inactive queue belong to this file.
3058 * we are potentially in this deadlock because...
3059 * a) the external pageout queue is throttled
3060 * b) we're done with the active queue and moved on to the inactive queue
3061 * c) we've got a dirty external page
3063 * since we don't know the reason for the external pageout queue being throttled we
3064 * must suspect that we are deadlocked, so move the current page onto the active queue
3065 * in an effort to cause a page from the active queue to 'age' to the inactive queue
3067 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
3068 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
3069 * pool the next time we select a victim page... if we can make enough new free pages,
3070 * the deadlock will break, the external pageout queue will empty and it will no longer
3073 * if we have jetsam configured, keep a count of the pages reactivated this way so
3074 * that we can try to find clean pages in the active/inactive queues before
3075 * deciding to jetsam a process
3077 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
++;
3079 vm_page_check_pageable_safe(m
);
3080 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3081 vm_page_queue_enter(&vm_page_queue_active
, m
, vmp_pageq
);
3082 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
3083 vm_page_active_count
++;
3084 vm_page_pageable_external_count
++;
3086 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
3088 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
3089 vm_pageout_inactive_external_forced_reactivate_limit
--;
3091 if (vm_pageout_inactive_external_forced_reactivate_limit
<= 0) {
3092 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
3094 * Possible deadlock scenario so request jetsam action
3097 vm_object_unlock(object
);
3098 object
= VM_OBJECT_NULL
;
3099 vm_page_unlock_queues();
3101 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
3102 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
3104 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
3105 if (memorystatus_kill_on_VM_page_shortage(FALSE
) == TRUE
) {
3106 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count
, 1);
3109 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
,
3110 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
3112 vm_page_lock_queues();
3115 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
3116 force_anonymous
= TRUE
;
3118 inactive_burst_count
= 0;
3119 goto done_with_inactivepage
;
3121 goto must_activate_page
;
3126 * we've got a page that we can steal...
3127 * eliminate all mappings and make sure
3128 * we have the up-to-date modified state
3130 * if we need to do a pmap_disconnect then we
3131 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3132 * provides the true state atomically... the
3133 * page was still mapped up to the pmap_disconnect
3134 * and may have been dirtied at the last microsecond
3136 * Note that if 'pmapped' is FALSE then the page is not
3137 * and has not been in any map, so there is no point calling
3138 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3139 * of likely usage of the page.
3141 if (m
->vmp_pmapped
== TRUE
) {
3145 * Don't count this page as going into the compressor
3146 * if any of these are true:
3147 * 1) compressed pager isn't enabled
3148 * 2) Freezer enabled device with compressed pager
3149 * backend (exclusive use) i.e. most of the VM system
3150 * (including vm_pageout_scan) has no knowledge of
3152 * 3) This page belongs to a file and hence will not be
3153 * sent into the compressor
3155 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
||
3156 object
->internal
== FALSE
) {
3158 } else if (m
->vmp_dirty
|| m
->vmp_precious
) {
3160 * VM knows that this page is dirty (or
3161 * precious) and needs to be compressed
3162 * rather than freed.
3163 * Tell the pmap layer to count this page
3166 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
3169 * VM does not know if the page needs to
3170 * be preserved but the pmap layer might tell
3171 * us if any mapping has "modified" it.
3172 * Let's the pmap layer to count this page
3173 * as compressed if and only if it has been
3177 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
3179 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m
),
3182 if (refmod_state
& VM_MEM_MODIFIED
) {
3183 SET_PAGE_DIRTY(m
, FALSE
);
3188 * reset our count of pages that have been reclaimed
3189 * since the last page was 'stolen'
3191 inactive_reclaim_run
= 0;
3194 * If it's clean and not precious, we can free the page.
3196 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
3197 vm_pageout_state
.vm_pageout_inactive_clean
++;
3200 * OK, at this point we have found a page we are going to free.
3202 #if CONFIG_PHANTOM_CACHE
3203 if (!object
->internal
) {
3204 vm_phantom_cache_add_ghost(m
);
3211 * The page may have been dirtied since the last check
3212 * for a throttled target queue (which may have been skipped
3213 * if the page was clean then). With the dirty page
3214 * disconnected here, we can make one final check.
3216 if (object
->internal
) {
3217 if (VM_PAGE_Q_THROTTLED(iq
)) {
3218 inactive_throttled
= TRUE
;
3220 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3221 inactive_throttled
= TRUE
;
3224 if (inactive_throttled
== TRUE
) {
3225 goto throttle_inactive
;
3228 #if VM_PRESSURE_EVENTS
3232 * If Jetsam is enabled, then the sending
3233 * of memory pressure notifications is handled
3234 * from the same thread that takes care of high-water
3235 * and other jetsams i.e. the memorystatus_thread.
3238 #else /* CONFIG_JETSAM */
3240 vm_pressure_response();
3242 #endif /* CONFIG_JETSAM */
3243 #endif /* VM_PRESSURE_EVENTS */
3245 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
3246 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty
, 1);
3249 if (object
->internal
) {
3250 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
++;
3252 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
++;
3256 * internal pages will go to the compressor...
3257 * external pages will go to the appropriate pager to be cleaned
3258 * and upon completion will end up on 'vm_page_queue_cleaned' which
3259 * is a preferred queue to steal from
3261 vm_pageout_cluster(m
);
3262 inactive_burst_count
= 0;
3264 done_with_inactivepage
:
3266 if (delayed_unlock
++ > delayed_unlock_limit
) {
3267 int freed
= local_freed
;
3269 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
3270 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
3272 lck_mtx_yield(&vm_page_queue_lock
);
3274 } else if (vm_pageout_scan_wants_object
) {
3275 vm_page_unlock_queues();
3277 vm_page_lock_queues();
3280 * back to top of pageout scan loop
3287 vm_page_free_reserve(
3290 int free_after_reserve
;
3292 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
3293 if ((vm_page_free_reserved
+ pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
) >= (VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3294 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
;
3296 vm_page_free_reserved
+= (pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
);
3299 if ((vm_page_free_reserved
+ pages
) >= VM_PAGE_FREE_RESERVED_LIMIT
) {
3300 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
3302 vm_page_free_reserved
+= pages
;
3305 free_after_reserve
= vm_pageout_state
.vm_page_free_count_init
- vm_page_free_reserved
;
3307 vm_page_free_min
= vm_page_free_reserved
+
3308 VM_PAGE_FREE_MIN(free_after_reserve
);
3310 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
) {
3311 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
3314 vm_page_free_target
= vm_page_free_reserved
+
3315 VM_PAGE_FREE_TARGET(free_after_reserve
);
3317 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
) {
3318 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
3321 if (vm_page_free_target
< vm_page_free_min
+ 5) {
3322 vm_page_free_target
= vm_page_free_min
+ 5;
3325 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 2);
3329 * vm_pageout is the high level pageout daemon.
3333 vm_pageout_continue(void)
3335 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
3336 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter
, 1);
3338 #if !CONFIG_EMBEDDED
3339 lck_mtx_lock(&vm_page_queue_free_lock
);
3340 vm_pageout_running
= TRUE
;
3341 lck_mtx_unlock(&vm_page_queue_free_lock
);
3342 #endif /* CONFIG_EMBEDDED */
3346 * we hold both the vm_page_queue_free_lock
3347 * and the vm_page_queues_lock at this point
3349 assert(vm_page_free_wanted
== 0);
3350 assert(vm_page_free_wanted_privileged
== 0);
3351 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
3353 #if !CONFIG_EMBEDDED
3354 vm_pageout_running
= FALSE
;
3355 if (vm_pageout_waiter
) {
3356 vm_pageout_waiter
= FALSE
;
3357 thread_wakeup((event_t
)&vm_pageout_waiter
);
3359 #endif /* !CONFIG_EMBEDDED */
3361 lck_mtx_unlock(&vm_page_queue_free_lock
);
3362 vm_page_unlock_queues();
3364 counter(c_vm_pageout_block
++);
3365 thread_block((thread_continue_t
)vm_pageout_continue
);
3369 #if !CONFIG_EMBEDDED
3371 vm_pageout_wait(uint64_t deadline
)
3375 lck_mtx_lock(&vm_page_queue_free_lock
);
3376 for (kr
= KERN_SUCCESS
; vm_pageout_running
&& (KERN_SUCCESS
== kr
);) {
3377 vm_pageout_waiter
= TRUE
;
3378 if (THREAD_AWAKENED
!= lck_mtx_sleep_deadline(
3379 &vm_page_queue_free_lock
, LCK_SLEEP_DEFAULT
,
3380 (event_t
) &vm_pageout_waiter
, THREAD_UNINT
, deadline
)) {
3381 kr
= KERN_OPERATION_TIMED_OUT
;
3384 lck_mtx_unlock(&vm_page_queue_free_lock
);
3388 #endif /* !CONFIG_EMBEDDED */
3392 vm_pageout_iothread_external_continue(struct vm_pageout_queue
*q
)
3396 vm_object_offset_t offset
;
3397 memory_object_t pager
;
3399 /* On systems with a compressor, the external IO thread clears its
3400 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3403 if (vm_pageout_state
.vm_pageout_internal_iothread
!= THREAD_NULL
) {
3404 current_thread()->options
&= ~TH_OPT_VMPRIV
;
3407 vm_page_lockspin_queues();
3409 while (!vm_page_queue_empty(&q
->pgo_pending
)) {
3411 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3413 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3416 * grab a snapshot of the object and offset this
3417 * page is tabled in so that we can relookup this
3418 * page after we've taken the object lock - these
3419 * fields are stable while we hold the page queues lock
3420 * but as soon as we drop it, there is nothing to keep
3421 * this page in this object... we hold an activity_in_progress
3422 * on this object which will keep it from terminating
3424 object
= VM_PAGE_OBJECT(m
);
3425 offset
= m
->vmp_offset
;
3427 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3428 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3430 vm_page_unlock_queues();
3432 vm_object_lock(object
);
3434 m
= vm_page_lookup(object
, offset
);
3436 if (m
== NULL
|| m
->vmp_busy
|| m
->vmp_cleaning
||
3437 !m
->vmp_laundry
|| (m
->vmp_q_state
!= VM_PAGE_NOT_ON_Q
)) {
3439 * it's either the same page that someone else has
3440 * started cleaning (or it's finished cleaning or
3441 * been put back on the pageout queue), or
3442 * the page has been freed or we have found a
3443 * new page at this offset... in all of these cases
3444 * we merely need to release the activity_in_progress
3445 * we took when we put the page on the pageout queue
3447 vm_object_activity_end(object
);
3448 vm_object_unlock(object
);
3450 vm_page_lockspin_queues();
3453 pager
= object
->pager
;
3455 if (pager
== MEMORY_OBJECT_NULL
) {
3457 * This pager has been destroyed by either
3458 * memory_object_destroy or vm_object_destroy, and
3459 * so there is nowhere for the page to go.
3461 if (m
->vmp_free_when_done
) {
3463 * Just free the page... VM_PAGE_FREE takes
3464 * care of cleaning up all the state...
3465 * including doing the vm_pageout_throttle_up
3469 vm_page_lockspin_queues();
3471 vm_pageout_throttle_up(m
);
3472 vm_page_activate(m
);
3474 vm_page_unlock_queues();
3477 * And we are done with it.
3480 vm_object_activity_end(object
);
3481 vm_object_unlock(object
);
3483 vm_page_lockspin_queues();
3488 * we don't hold the page queue lock
3489 * so this check isn't safe to make
3494 * give back the activity_in_progress reference we
3495 * took when we queued up this page and replace it
3496 * it with a paging_in_progress reference that will
3497 * also hold the paging offset from changing and
3498 * prevent the object from terminating
3500 vm_object_activity_end(object
);
3501 vm_object_paging_begin(object
);
3502 vm_object_unlock(object
);
3505 * Send the data to the pager.
3506 * any pageout clustering happens there
3508 memory_object_data_return(pager
,
3509 m
->vmp_offset
+ object
->paging_offset
,
3517 vm_object_lock(object
);
3518 vm_object_paging_end(object
);
3519 vm_object_unlock(object
);
3521 vm_pageout_io_throttle();
3523 vm_page_lockspin_queues();
3525 q
->pgo_busy
= FALSE
;
3528 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3529 vm_page_unlock_queues();
3531 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_external_continue
, (void *) q
);
3536 #define MAX_FREE_BATCH 32
3537 uint32_t vm_compressor_time_thread
; /* Set via sysctl to record time accrued by
3543 vm_pageout_iothread_internal_continue(struct cq
*);
3545 vm_pageout_iothread_internal_continue(struct cq
*cq
)
3547 struct vm_pageout_queue
*q
;
3549 boolean_t pgo_draining
;
3552 vm_page_t local_freeq
= NULL
;
3553 int local_freed
= 0;
3554 int local_batch_size
;
3555 #if DEVELOPMENT || DEBUG
3557 boolean_t marked_active
= FALSE
;
3559 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3562 local_batch_size
= q
->pgo_maxlaundry
/ (vm_pageout_state
.vm_compressor_thread_count
* 2);
3564 #if RECORD_THE_COMPRESSED_DATA
3565 if (q
->pgo_laundry
) {
3566 c_compressed_record_init();
3570 int pages_left_on_q
= 0;
3575 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3577 vm_page_lock_queues();
3578 #if DEVELOPMENT || DEBUG
3579 if (marked_active
== FALSE
) {
3581 vmct_state
[cq
->id
] = VMCT_ACTIVE
;
3582 marked_active
= TRUE
;
3583 if (vmct_active
== 1) {
3584 vm_compressor_epoch_start
= mach_absolute_time();
3588 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3590 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START
, q
->pgo_laundry
, 0, 0, 0, 0);
3592 while (!vm_page_queue_empty(&q
->pgo_pending
) && local_cnt
< local_batch_size
) {
3593 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3594 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3597 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3598 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3599 m
->vmp_laundry
= FALSE
;
3601 m
->vmp_snext
= local_q
;
3605 if (local_q
== NULL
) {
3611 if ((pgo_draining
= q
->pgo_draining
) == FALSE
) {
3612 vm_pageout_throttle_up_batch(q
, local_cnt
);
3613 pages_left_on_q
= q
->pgo_laundry
;
3615 pages_left_on_q
= q
->pgo_laundry
- local_cnt
;
3618 vm_page_unlock_queues();
3620 #if !RECORD_THE_COMPRESSED_DATA
3621 if (pages_left_on_q
>= local_batch_size
&& cq
->id
< (vm_pageout_state
.vm_compressor_thread_count
- 1)) {
3622 thread_wakeup((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
+ 1));
3625 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, q
->pgo_laundry
, 0, 0, 0, 0);
3628 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START
, local_cnt
, 0, 0, 0, 0);
3631 local_q
= m
->vmp_snext
;
3632 m
->vmp_snext
= NULL
;
3634 if (vm_pageout_compress_page(&cq
->current_chead
, cq
->scratch_buf
, m
) == KERN_SUCCESS
) {
3635 #if DEVELOPMENT || DEBUG
3638 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END
, local_cnt
, 0, 0, 0, 0);
3640 m
->vmp_snext
= local_freeq
;
3644 if (local_freed
>= MAX_FREE_BATCH
) {
3645 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
3647 vm_page_free_list(local_freeq
, TRUE
);
3654 while (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
3655 kern_return_t wait_result
;
3656 int need_wakeup
= 0;
3659 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
3661 vm_page_free_list(local_freeq
, TRUE
);
3667 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3669 if (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
3670 if (vm_page_free_wanted_privileged
++ == 0) {
3673 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, THREAD_UNINT
);
3675 lck_mtx_unlock(&vm_page_queue_free_lock
);
3678 thread_wakeup((event_t
)&vm_page_free_wanted
);
3681 if (wait_result
== THREAD_WAITING
) {
3682 thread_block(THREAD_CONTINUE_NULL
);
3685 lck_mtx_unlock(&vm_page_queue_free_lock
);
3691 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
3693 vm_page_free_list(local_freeq
, TRUE
);
3697 if (pgo_draining
== TRUE
) {
3698 vm_page_lockspin_queues();
3699 vm_pageout_throttle_up_batch(q
, local_cnt
);
3700 vm_page_unlock_queues();
3703 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3706 * queue lock is held and our q is empty
3708 q
->pgo_busy
= FALSE
;
3711 assert_wait((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
), THREAD_UNINT
);
3712 #if DEVELOPMENT || DEBUG
3713 if (marked_active
== TRUE
) {
3715 vmct_state
[cq
->id
] = VMCT_IDLE
;
3717 if (vmct_active
== 0) {
3718 vm_compressor_epoch_stop
= mach_absolute_time();
3719 assertf(vm_compressor_epoch_stop
>= vm_compressor_epoch_start
,
3720 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
3721 vm_compressor_epoch_start
, vm_compressor_epoch_stop
);
3722 /* This interval includes intervals where one or more
3723 * compressor threads were pre-empted
3725 vmct_stats
.vmct_cthreads_total
+= vm_compressor_epoch_stop
- vm_compressor_epoch_start
;
3729 vm_page_unlock_queues();
3730 #if DEVELOPMENT || DEBUG
3731 if (__improbable(vm_compressor_time_thread
)) {
3732 vmct_stats
.vmct_runtimes
[cq
->id
] = thread_get_runtime_self();
3733 vmct_stats
.vmct_pages
[cq
->id
] += ncomps
;
3734 vmct_stats
.vmct_iterations
[cq
->id
]++;
3735 if (ncomps
> vmct_stats
.vmct_maxpages
[cq
->id
]) {
3736 vmct_stats
.vmct_maxpages
[cq
->id
] = ncomps
;
3738 if (ncomps
< vmct_stats
.vmct_minpages
[cq
->id
]) {
3739 vmct_stats
.vmct_minpages
[cq
->id
] = ncomps
;
3744 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3746 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_internal_continue
, (void *) cq
);
3752 vm_pageout_compress_page(void **current_chead
, char *scratch_buf
, vm_page_t m
)
3755 memory_object_t pager
;
3756 int compressed_count_delta
;
3757 kern_return_t retval
;
3759 object
= VM_PAGE_OBJECT(m
);
3761 assert(!m
->vmp_free_when_done
);
3762 assert(!m
->vmp_laundry
);
3764 pager
= object
->pager
;
3766 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
3767 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START
, object
, pager
, 0, 0, 0);
3769 vm_object_lock(object
);
3772 * If there is no memory object for the page, create
3773 * one and hand it to the compression pager.
3776 if (!object
->pager_initialized
) {
3777 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
3779 if (!object
->pager_initialized
) {
3780 vm_object_compressor_pager_create(object
);
3783 pager
= object
->pager
;
3785 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
3787 * Still no pager for the object,
3788 * or the pager has been destroyed.
3789 * Reactivate the page.
3791 * Should only happen if there is no
3794 PAGE_WAKEUP_DONE(m
);
3796 vm_page_lockspin_queues();
3797 vm_page_activate(m
);
3798 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager
, 1);
3799 vm_page_unlock_queues();
3802 * And we are done with it.
3804 vm_object_activity_end(object
);
3805 vm_object_unlock(object
);
3807 return KERN_FAILURE
;
3809 vm_object_unlock(object
);
3811 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END
, object
, pager
, 0, 0, 0);
3813 assert(object
->pager_initialized
&& pager
!= MEMORY_OBJECT_NULL
);
3814 assert(object
->activity_in_progress
> 0);
3816 retval
= vm_compressor_pager_put(
3818 m
->vmp_offset
+ object
->paging_offset
,
3819 VM_PAGE_GET_PHYS_PAGE(m
),
3822 &compressed_count_delta
);
3824 vm_object_lock(object
);
3826 assert(object
->activity_in_progress
> 0);
3827 assert(VM_PAGE_OBJECT(m
) == object
);
3828 assert( !VM_PAGE_WIRED(m
));
3830 vm_compressor_pager_count(pager
,
3831 compressed_count_delta
,
3832 FALSE
, /* shared_lock */
3835 if (retval
== KERN_SUCCESS
) {
3837 * If the object is purgeable, its owner's
3838 * purgeable ledgers will be updated in
3839 * vm_page_remove() but the page still
3840 * contributes to the owner's memory footprint,
3841 * so account for it as such.
3843 if ((object
->purgable
!= VM_PURGABLE_DENY
||
3844 object
->vo_ledger_tag
) &&
3845 object
->vo_owner
!= NULL
) {
3846 /* one more compressed purgeable/tagged page */
3847 vm_object_owner_compressed_update(object
,
3850 VM_STAT_INCR(compressions
);
3852 if (m
->vmp_tabled
) {
3853 vm_page_remove(m
, TRUE
);
3856 PAGE_WAKEUP_DONE(m
);
3858 vm_page_lockspin_queues();
3860 vm_page_activate(m
);
3861 vm_pageout_vminfo
.vm_compressor_failed
++;
3863 vm_page_unlock_queues();
3865 vm_object_activity_end(object
);
3866 vm_object_unlock(object
);
3873 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*eq
, boolean_t req_lowpriority
)
3877 if (hibernate_cleaning_in_progress
== TRUE
) {
3878 req_lowpriority
= FALSE
;
3881 if (eq
->pgo_inited
== TRUE
&& eq
->pgo_lowpriority
!= req_lowpriority
) {
3882 vm_page_unlock_queues();
3884 if (req_lowpriority
== TRUE
) {
3885 policy
= THROTTLE_LEVEL_PAGEOUT_THROTTLED
;
3886 DTRACE_VM(laundrythrottle
);
3888 policy
= THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED
;
3889 DTRACE_VM(laundryunthrottle
);
3891 proc_set_thread_policy_with_tid(kernel_task
, eq
->pgo_tid
,
3892 TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
3894 eq
->pgo_lowpriority
= req_lowpriority
;
3896 vm_page_lock_queues();
3902 vm_pageout_iothread_external(void)
3904 thread_t self
= current_thread();
3906 self
->options
|= TH_OPT_VMPRIV
;
3908 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
3910 proc_set_thread_policy(self
, TASK_POLICY_EXTERNAL
,
3911 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
3913 vm_page_lock_queues();
3915 vm_pageout_queue_external
.pgo_tid
= self
->thread_id
;
3916 vm_pageout_queue_external
.pgo_lowpriority
= TRUE
;
3917 vm_pageout_queue_external
.pgo_inited
= TRUE
;
3919 vm_page_unlock_queues();
3921 vm_pageout_iothread_external_continue(&vm_pageout_queue_external
);
3928 vm_pageout_iothread_internal(struct cq
*cq
)
3930 thread_t self
= current_thread();
3932 self
->options
|= TH_OPT_VMPRIV
;
3934 vm_page_lock_queues();
3936 vm_pageout_queue_internal
.pgo_tid
= self
->thread_id
;
3937 vm_pageout_queue_internal
.pgo_lowpriority
= TRUE
;
3938 vm_pageout_queue_internal
.pgo_inited
= TRUE
;
3940 vm_page_unlock_queues();
3942 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
3943 thread_vm_bind_group_add();
3947 thread_set_thread_name(current_thread(), "VM_compressor");
3948 #if DEVELOPMENT || DEBUG
3949 vmct_stats
.vmct_minpages
[cq
->id
] = INT32_MAX
;
3951 vm_pageout_iothread_internal_continue(cq
);
3957 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
3959 if (OSCompareAndSwapPtr(NULL
, func
, (void * volatile *) &consider_buffer_cache_collect
)) {
3960 return KERN_SUCCESS
;
3962 return KERN_FAILURE
; /* Already set */
3966 extern boolean_t memorystatus_manual_testing_on
;
3967 extern unsigned int memorystatus_level
;
3970 #if VM_PRESSURE_EVENTS
3972 boolean_t vm_pressure_events_enabled
= FALSE
;
3975 vm_pressure_response(void)
3977 vm_pressure_level_t old_level
= kVMPressureNormal
;
3979 unsigned int total_pages
;
3980 uint64_t available_memory
= 0;
3982 if (vm_pressure_events_enabled
== FALSE
) {
3988 available_memory
= (uint64_t) memorystatus_available_pages
;
3990 #else /* CONFIG_EMBEDDED */
3992 available_memory
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
3993 memorystatus_available_pages
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
3995 #endif /* CONFIG_EMBEDDED */
3997 total_pages
= (unsigned int) atop_64(max_mem
);
3998 #if CONFIG_SECLUDED_MEMORY
3999 total_pages
-= vm_page_secluded_count
;
4000 #endif /* CONFIG_SECLUDED_MEMORY */
4001 memorystatus_level
= (unsigned int) ((available_memory
* 100) / total_pages
);
4003 if (memorystatus_manual_testing_on
) {
4007 old_level
= memorystatus_vm_pressure_level
;
4009 switch (memorystatus_vm_pressure_level
) {
4010 case kVMPressureNormal
:
4012 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4013 new_level
= kVMPressureCritical
;
4014 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4015 new_level
= kVMPressureWarning
;
4020 case kVMPressureWarning
:
4021 case kVMPressureUrgent
:
4023 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4024 new_level
= kVMPressureNormal
;
4025 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4026 new_level
= kVMPressureCritical
;
4031 case kVMPressureCritical
:
4033 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4034 new_level
= kVMPressureNormal
;
4035 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4036 new_level
= kVMPressureWarning
;
4045 if (new_level
!= -1) {
4046 memorystatus_vm_pressure_level
= (vm_pressure_level_t
) new_level
;
4048 if (new_level
!= (int) old_level
) {
4049 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change
, VM_PRESSURE_LEVEL_CHANGE
, DBG_FUNC_NONE
,
4050 new_level
, old_level
, 0, 0);
4053 if ((memorystatus_vm_pressure_level
!= kVMPressureNormal
) || (old_level
!= memorystatus_vm_pressure_level
)) {
4054 if (vm_pageout_state
.vm_pressure_thread_running
== FALSE
) {
4055 thread_wakeup(&vm_pressure_thread
);
4058 if (old_level
!= memorystatus_vm_pressure_level
) {
4059 thread_wakeup(&vm_pageout_state
.vm_pressure_changed
);
4064 #endif /* VM_PRESSURE_EVENTS */
4067 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure
, __unused
unsigned int *pressure_level
)
4071 return KERN_FAILURE
;
4073 #elif !VM_PRESSURE_EVENTS
4075 return KERN_FAILURE
;
4077 #else /* VM_PRESSURE_EVENTS */
4079 kern_return_t kr
= KERN_SUCCESS
;
4081 if (pressure_level
!= NULL
) {
4082 vm_pressure_level_t old_level
= memorystatus_vm_pressure_level
;
4084 if (wait_for_pressure
== TRUE
) {
4085 wait_result_t wr
= 0;
4087 while (old_level
== *pressure_level
) {
4088 wr
= assert_wait((event_t
) &vm_pageout_state
.vm_pressure_changed
,
4089 THREAD_INTERRUPTIBLE
);
4090 if (wr
== THREAD_WAITING
) {
4091 wr
= thread_block(THREAD_CONTINUE_NULL
);
4093 if (wr
== THREAD_INTERRUPTED
) {
4094 return KERN_ABORTED
;
4096 if (wr
== THREAD_AWAKENED
) {
4097 old_level
= memorystatus_vm_pressure_level
;
4099 if (old_level
!= *pressure_level
) {
4106 *pressure_level
= old_level
;
4109 kr
= KERN_INVALID_ARGUMENT
;
4113 #endif /* VM_PRESSURE_EVENTS */
4116 #if VM_PRESSURE_EVENTS
4118 vm_pressure_thread(void)
4120 static boolean_t thread_initialized
= FALSE
;
4122 if (thread_initialized
== TRUE
) {
4123 vm_pageout_state
.vm_pressure_thread_running
= TRUE
;
4124 consider_vm_pressure_events();
4125 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4128 thread_set_thread_name(current_thread(), "VM_pressure");
4129 thread_initialized
= TRUE
;
4130 assert_wait((event_t
) &vm_pressure_thread
, THREAD_UNINT
);
4131 thread_block((thread_continue_t
)vm_pressure_thread
);
4133 #endif /* VM_PRESSURE_EVENTS */
4137 * called once per-second via "compute_averages"
4140 compute_pageout_gc_throttle(__unused
void *arg
)
4142 if (vm_pageout_vminfo
.vm_pageout_considered_page
!= vm_pageout_state
.vm_pageout_considered_page_last
) {
4143 vm_pageout_state
.vm_pageout_considered_page_last
= vm_pageout_vminfo
.vm_pageout_considered_page
;
4145 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
4150 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4151 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4152 * jetsams. We need to check if the zone map size is above its jetsam limit to
4153 * decide if this was indeed the case.
4155 * We need to do this on a different thread because of the following reasons:
4157 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4158 * itself causing the system to hang. We perform synchronous jetsams if we're
4159 * leaking in the VM map entries zone, so the leaking process could be doing a
4160 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4161 * jetsam itself. We also need the vm_map lock on the process termination path,
4162 * which would now lead the dying process to deadlock against itself.
4164 * 2. The jetsam path might need to allocate zone memory itself. We could try
4165 * using the non-blocking variant of zalloc for this path, but we can still
4166 * end up trying to do a kernel_memory_allocate when the zone_map is almost
4170 extern boolean_t
is_zone_map_nearing_exhaustion(void);
4173 vm_pageout_garbage_collect(int collect
)
4176 if (is_zone_map_nearing_exhaustion()) {
4178 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4180 * Bail out after calling zone_gc (which triggers the
4181 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4182 * operations that clear out a bunch of caches might allocate zone
4183 * memory themselves (for eg. vm_map operations would need VM map
4184 * entries). Since the zone map is almost full at this point, we
4185 * could end up with a panic. We just need to quickly jetsam a
4186 * process and exit here.
4188 * It could so happen that we were woken up to relieve memory
4189 * pressure and the zone map also happened to be near its limit at
4190 * the time, in which case we'll skip out early. But that should be
4191 * ok; if memory pressure persists, the thread will simply be woken
4194 consider_zone_gc(TRUE
);
4196 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4197 boolean_t buf_large_zfree
= FALSE
;
4198 boolean_t first_try
= TRUE
;
4202 consider_machine_collect();
4206 if (consider_buffer_cache_collect
!= NULL
) {
4207 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
4209 if (first_try
== TRUE
|| buf_large_zfree
== TRUE
) {
4211 * consider_zone_gc should be last, because the other operations
4212 * might return memory to zones.
4214 consider_zone_gc(FALSE
);
4217 } while (buf_large_zfree
== TRUE
&& vm_page_free_count
< vm_page_free_target
);
4219 consider_machine_adjust();
4223 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
4225 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
4230 #if VM_PAGE_BUCKETS_CHECK
4231 #if VM_PAGE_FAKE_BUCKETS
4232 extern vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
4233 #endif /* VM_PAGE_FAKE_BUCKETS */
4234 #endif /* VM_PAGE_BUCKETS_CHECK */
4239 vm_set_restrictions()
4241 host_basic_info_data_t hinfo
;
4242 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
4245 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
4247 assert(hinfo
.max_cpus
> 0);
4249 if (hinfo
.max_cpus
<= 3) {
4251 * on systems with a limited number of CPUS, bind the
4252 * 4 major threads that can free memory and that tend to use
4253 * a fair bit of CPU under pressured conditions to a single processor.
4254 * This insures that these threads don't hog all of the available CPUs
4255 * (important for camera launch), while allowing them to run independently
4256 * w/r to locks... the 4 threads are
4257 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4258 * vm_compressor_swap_trigger_thread (minor and major compactions),
4259 * memorystatus_thread (jetsams).
4261 * the first time the thread is run, it is responsible for checking the
4262 * state of vm_restricted_to_single_processor, and if TRUE it calls
4263 * thread_bind_master... someday this should be replaced with a group
4264 * scheduling mechanism and KPI.
4266 vm_pageout_state
.vm_restricted_to_single_processor
= TRUE
;
4268 vm_pageout_state
.vm_restricted_to_single_processor
= FALSE
;
4275 thread_t self
= current_thread();
4277 kern_return_t result
;
4281 * Set thread privileges.
4286 self
->options
|= TH_OPT_VMPRIV
;
4287 sched_set_thread_base_priority(self
, BASEPRI_VM
);
4288 thread_unlock(self
);
4290 if (!self
->reserved_stack
) {
4291 self
->reserved_stack
= self
->kernel_stack
;
4294 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
4295 thread_vm_bind_group_add();
4300 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4303 * Initialize some paging parameters.
4306 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4307 vm_pageout_state
.vm_pressure_changed
= FALSE
;
4308 vm_pageout_state
.memorystatus_purge_on_warning
= 2;
4309 vm_pageout_state
.memorystatus_purge_on_urgent
= 5;
4310 vm_pageout_state
.memorystatus_purge_on_critical
= 8;
4311 vm_pageout_state
.vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
4312 vm_pageout_state
.vm_page_speculative_percentage
= 5;
4313 vm_pageout_state
.vm_page_speculative_target
= 0;
4315 vm_pageout_state
.vm_pageout_external_iothread
= THREAD_NULL
;
4316 vm_pageout_state
.vm_pageout_internal_iothread
= THREAD_NULL
;
4318 vm_pageout_state
.vm_pageout_swap_wait
= 0;
4319 vm_pageout_state
.vm_pageout_idle_wait
= 0;
4320 vm_pageout_state
.vm_pageout_empty_wait
= 0;
4321 vm_pageout_state
.vm_pageout_burst_wait
= 0;
4322 vm_pageout_state
.vm_pageout_deadlock_wait
= 0;
4323 vm_pageout_state
.vm_pageout_deadlock_relief
= 0;
4324 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= 0;
4326 vm_pageout_state
.vm_pageout_inactive
= 0;
4327 vm_pageout_state
.vm_pageout_inactive_used
= 0;
4328 vm_pageout_state
.vm_pageout_inactive_clean
= 0;
4330 vm_pageout_state
.vm_memory_pressure
= 0;
4331 vm_pageout_state
.vm_page_filecache_min
= 0;
4333 vm_pageout_state
.vm_page_filecache_min_divisor
= 70;
4334 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 40;
4336 vm_pageout_state
.vm_page_filecache_min_divisor
= 27;
4337 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 36;
4339 vm_pageout_state
.vm_page_free_count_init
= vm_page_free_count
;
4341 vm_pageout_state
.vm_pageout_considered_page_last
= 0;
4343 if (vm_pageout_state
.vm_pageout_swap_wait
== 0) {
4344 vm_pageout_state
.vm_pageout_swap_wait
= VM_PAGEOUT_SWAP_WAIT
;
4347 if (vm_pageout_state
.vm_pageout_idle_wait
== 0) {
4348 vm_pageout_state
.vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
4351 if (vm_pageout_state
.vm_pageout_burst_wait
== 0) {
4352 vm_pageout_state
.vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
4355 if (vm_pageout_state
.vm_pageout_empty_wait
== 0) {
4356 vm_pageout_state
.vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
4359 if (vm_pageout_state
.vm_pageout_deadlock_wait
== 0) {
4360 vm_pageout_state
.vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
4363 if (vm_pageout_state
.vm_pageout_deadlock_relief
== 0) {
4364 vm_pageout_state
.vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
4367 if (vm_pageout_state
.vm_pageout_burst_inactive_throttle
== 0) {
4368 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
4371 * even if we've already called vm_page_free_reserve
4372 * call it again here to insure that the targets are
4373 * accurately calculated (it uses vm_page_free_count_init)
4374 * calling it with an arg of 0 will not change the reserve
4375 * but will re-calculate free_min and free_target
4377 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
4378 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
4380 vm_page_free_reserve(0);
4384 vm_page_queue_init(&vm_pageout_queue_external
.pgo_pending
);
4385 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
4386 vm_pageout_queue_external
.pgo_laundry
= 0;
4387 vm_pageout_queue_external
.pgo_idle
= FALSE
;
4388 vm_pageout_queue_external
.pgo_busy
= FALSE
;
4389 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
4390 vm_pageout_queue_external
.pgo_draining
= FALSE
;
4391 vm_pageout_queue_external
.pgo_lowpriority
= FALSE
;
4392 vm_pageout_queue_external
.pgo_tid
= -1;
4393 vm_pageout_queue_external
.pgo_inited
= FALSE
;
4395 vm_page_queue_init(&vm_pageout_queue_internal
.pgo_pending
);
4396 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
4397 vm_pageout_queue_internal
.pgo_laundry
= 0;
4398 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
4399 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
4400 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
4401 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
4402 vm_pageout_queue_internal
.pgo_lowpriority
= FALSE
;
4403 vm_pageout_queue_internal
.pgo_tid
= -1;
4404 vm_pageout_queue_internal
.pgo_inited
= FALSE
;
4406 /* internal pageout thread started when default pager registered first time */
4407 /* external pageout and garbage collection threads started here */
4409 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
4411 &vm_pageout_state
.vm_pageout_external_iothread
);
4412 if (result
!= KERN_SUCCESS
) {
4413 panic("vm_pageout_iothread_external: create failed");
4416 thread_deallocate(vm_pageout_state
.vm_pageout_external_iothread
);
4418 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
4421 if (result
!= KERN_SUCCESS
) {
4422 panic("vm_pageout_garbage_collect: create failed");
4425 thread_deallocate(thread
);
4427 #if VM_PRESSURE_EVENTS
4428 result
= kernel_thread_start_priority((thread_continue_t
)vm_pressure_thread
, NULL
,
4432 if (result
!= KERN_SUCCESS
) {
4433 panic("vm_pressure_thread: create failed");
4436 thread_deallocate(thread
);
4439 vm_object_reaper_init();
4442 bzero(&vm_config
, sizeof(vm_config
));
4444 switch (vm_compressor_mode
) {
4445 case VM_PAGER_DEFAULT
:
4446 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4448 case VM_PAGER_COMPRESSOR_WITH_SWAP
:
4449 vm_config
.compressor_is_present
= TRUE
;
4450 vm_config
.swap_is_present
= TRUE
;
4451 vm_config
.compressor_is_active
= TRUE
;
4452 vm_config
.swap_is_active
= TRUE
;
4455 case VM_PAGER_COMPRESSOR_NO_SWAP
:
4456 vm_config
.compressor_is_present
= TRUE
;
4457 vm_config
.swap_is_present
= TRUE
;
4458 vm_config
.compressor_is_active
= TRUE
;
4461 case VM_PAGER_FREEZER_DEFAULT
:
4462 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4464 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP
:
4465 vm_config
.compressor_is_present
= TRUE
;
4466 vm_config
.swap_is_present
= TRUE
;
4469 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP
:
4470 vm_config
.compressor_is_present
= TRUE
;
4471 vm_config
.swap_is_present
= TRUE
;
4472 vm_config
.compressor_is_active
= TRUE
;
4473 vm_config
.freezer_swap_is_active
= TRUE
;
4476 case VM_PAGER_NOT_CONFIGURED
:
4480 printf("unknown compressor mode - %x\n", vm_compressor_mode
);
4483 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
4484 vm_compressor_pager_init();
4487 #if VM_PRESSURE_EVENTS
4488 vm_pressure_events_enabled
= TRUE
;
4489 #endif /* VM_PRESSURE_EVENTS */
4491 #if CONFIG_PHANTOM_CACHE
4492 vm_phantom_cache_init();
4494 #if VM_PAGE_BUCKETS_CHECK
4495 #if VM_PAGE_FAKE_BUCKETS
4496 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
4497 (uint64_t) vm_page_fake_buckets_start
,
4498 (uint64_t) vm_page_fake_buckets_end
);
4499 pmap_protect(kernel_pmap
,
4500 vm_page_fake_buckets_start
,
4501 vm_page_fake_buckets_end
,
4503 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
4504 #endif /* VM_PAGE_FAKE_BUCKETS */
4505 #endif /* VM_PAGE_BUCKETS_CHECK */
4507 #if VM_OBJECT_TRACKING
4508 vm_object_tracking_init();
4509 #endif /* VM_OBJECT_TRACKING */
4513 vm_pageout_continue();
4518 * The vm_pageout_continue() call above never returns, so the code below is never
4519 * executed. We take advantage of this to declare several DTrace VM related probe
4520 * points that our kernel doesn't have an analog for. These are probe points that
4521 * exist in Solaris and are in the DTrace documentation, so people may have written
4522 * scripts that use them. Declaring the probe points here means their scripts will
4523 * compile and execute which we want for portability of the scripts, but since this
4524 * section of code is never reached, the probe points will simply never fire. Yes,
4525 * this is basically a hack. The problem is the DTrace probe points were chosen with
4526 * Solaris specific VM events in mind, not portability to different VM implementations.
4529 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
4530 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
4531 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
4532 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
4533 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
4534 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
4535 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
4542 vm_pageout_internal_start(void)
4544 kern_return_t result
;
4546 host_basic_info_data_t hinfo
;
4548 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
4550 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
4552 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
4554 assert(hinfo
.max_cpus
> 0);
4556 lck_grp_init(&vm_pageout_lck_grp
, "vm_pageout", LCK_GRP_ATTR_NULL
);
4559 vm_pageout_state
.vm_compressor_thread_count
= 1;
4561 if (hinfo
.max_cpus
> 4) {
4562 vm_pageout_state
.vm_compressor_thread_count
= 2;
4564 vm_pageout_state
.vm_compressor_thread_count
= 1;
4567 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state
.vm_compressor_thread_count
,
4568 sizeof(vm_pageout_state
.vm_compressor_thread_count
));
4570 if (vm_pageout_state
.vm_compressor_thread_count
>= hinfo
.max_cpus
) {
4571 vm_pageout_state
.vm_compressor_thread_count
= hinfo
.max_cpus
- 1;
4573 if (vm_pageout_state
.vm_compressor_thread_count
<= 0) {
4574 vm_pageout_state
.vm_compressor_thread_count
= 1;
4575 } else if (vm_pageout_state
.vm_compressor_thread_count
> MAX_COMPRESSOR_THREAD_COUNT
) {
4576 vm_pageout_state
.vm_compressor_thread_count
= MAX_COMPRESSOR_THREAD_COUNT
;
4579 vm_pageout_queue_internal
.pgo_maxlaundry
= (vm_pageout_state
.vm_compressor_thread_count
* 4) * VM_PAGE_LAUNDRY_MAX
;
4581 PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal
.pgo_maxlaundry
, sizeof(vm_pageout_queue_internal
.pgo_maxlaundry
));
4583 for (i
= 0; i
< vm_pageout_state
.vm_compressor_thread_count
; i
++) {
4585 ciq
[i
].q
= &vm_pageout_queue_internal
;
4586 ciq
[i
].current_chead
= NULL
;
4587 ciq
[i
].scratch_buf
= kalloc(COMPRESSOR_SCRATCH_BUF_SIZE
);
4589 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
, (void *)&ciq
[i
],
4590 BASEPRI_VM
, &vm_pageout_state
.vm_pageout_internal_iothread
);
4592 if (result
== KERN_SUCCESS
) {
4593 thread_deallocate(vm_pageout_state
.vm_pageout_internal_iothread
);
4603 * To support I/O Expedite for compressed files we mark the upls with special flags.
4604 * The way decmpfs works is that we create a big upl which marks all the pages needed to
4605 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
4606 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
4607 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
4608 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
4609 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
4610 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
4611 * unless the real I/O upl is being destroyed).
4616 upl_set_decmp_info(upl_t upl
, upl_t src_upl
)
4618 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
4621 if (src_upl
->decmp_io_upl
) {
4623 * If there is already an alive real I/O UPL, ignore this new UPL.
4624 * This case should rarely happen and even if it does, it just means
4625 * that we might issue a spurious expedite which the driver is expected
4628 upl_unlock(src_upl
);
4631 src_upl
->decmp_io_upl
= (void *)upl
;
4632 src_upl
->ref_count
++;
4634 upl
->flags
|= UPL_DECMP_REAL_IO
;
4635 upl
->decmp_io_upl
= (void *)src_upl
;
4636 upl_unlock(src_upl
);
4638 #endif /* CONFIG_IOSCHED */
4641 int upl_debug_enabled
= 1;
4643 int upl_debug_enabled
= 0;
4647 upl_create(int type
, int flags
, upl_size_t size
)
4650 vm_size_t page_field_size
= 0;
4652 vm_size_t upl_size
= sizeof(struct upl
);
4654 size
= round_page_32(size
);
4656 if (type
& UPL_CREATE_LITE
) {
4657 page_field_size
= (atop(size
) + 7) >> 3;
4658 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
4660 upl_flags
|= UPL_LITE
;
4662 if (type
& UPL_CREATE_INTERNAL
) {
4663 upl_size
+= sizeof(struct upl_page_info
) * atop(size
);
4665 upl_flags
|= UPL_INTERNAL
;
4667 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
4669 if (page_field_size
) {
4670 bzero((char *)upl
+ upl_size
, page_field_size
);
4673 upl
->flags
= upl_flags
| flags
;
4674 upl
->kaddr
= (vm_offset_t
)0;
4676 upl
->map_object
= NULL
;
4678 upl
->ext_ref_count
= 0;
4679 upl
->highest_page
= 0;
4681 upl
->vector_upl
= NULL
;
4682 upl
->associated_upl
= NULL
;
4683 upl
->upl_iodone
= NULL
;
4685 if (type
& UPL_CREATE_IO_TRACKING
) {
4686 upl
->upl_priority
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
4689 upl
->upl_reprio_info
= 0;
4690 upl
->decmp_io_upl
= 0;
4691 if ((type
& UPL_CREATE_INTERNAL
) && (type
& UPL_CREATE_EXPEDITE_SUP
)) {
4692 /* Only support expedite on internal UPLs */
4693 thread_t curthread
= current_thread();
4694 upl
->upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * atop(size
));
4695 bzero(upl
->upl_reprio_info
, (sizeof(uint64_t) * atop(size
)));
4696 upl
->flags
|= UPL_EXPEDITE_SUPPORTED
;
4697 if (curthread
->decmp_upl
!= NULL
) {
4698 upl_set_decmp_info(upl
, curthread
->decmp_upl
);
4702 #if CONFIG_IOSCHED || UPL_DEBUG
4703 if ((type
& UPL_CREATE_IO_TRACKING
) || upl_debug_enabled
) {
4704 upl
->upl_creator
= current_thread();
4707 upl
->flags
|= UPL_TRACKED_BY_OBJECT
;
4712 upl
->ubc_alias1
= 0;
4713 upl
->ubc_alias2
= 0;
4716 upl
->upl_commit_index
= 0;
4717 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
4719 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
4720 #endif /* UPL_DEBUG */
4726 upl_destroy(upl_t upl
)
4728 int page_field_size
; /* bit field in word size buf */
4731 if (upl
->ext_ref_count
) {
4732 panic("upl(%p) ext_ref_count", upl
);
4736 if ((upl
->flags
& UPL_DECMP_REAL_IO
) && upl
->decmp_io_upl
) {
4738 src_upl
= upl
->decmp_io_upl
;
4739 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
4741 src_upl
->decmp_io_upl
= NULL
;
4742 upl_unlock(src_upl
);
4743 upl_deallocate(src_upl
);
4745 #endif /* CONFIG_IOSCHED */
4747 #if CONFIG_IOSCHED || UPL_DEBUG
4748 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) && !(upl
->flags
& UPL_VECTOR
)) {
4751 if (upl
->flags
& UPL_SHADOWED
) {
4752 object
= upl
->map_object
->shadow
;
4754 object
= upl
->map_object
;
4757 vm_object_lock(object
);
4758 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
4759 vm_object_activity_end(object
);
4760 vm_object_collapse(object
, 0, TRUE
);
4761 vm_object_unlock(object
);
4765 * drop a reference on the map_object whether or
4766 * not a pageout object is inserted
4768 if (upl
->flags
& UPL_SHADOWED
) {
4769 vm_object_deallocate(upl
->map_object
);
4772 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
4777 page_field_size
= 0;
4779 if (upl
->flags
& UPL_LITE
) {
4780 page_field_size
= ((size
/ PAGE_SIZE
) + 7) >> 3;
4781 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
4783 upl_lock_destroy(upl
);
4784 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
4787 if (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) {
4788 kfree(upl
->upl_reprio_info
, sizeof(uint64_t) * (size
/ PAGE_SIZE
));
4792 if (upl
->flags
& UPL_INTERNAL
) {
4794 sizeof(struct upl
) +
4795 (sizeof(struct upl_page_info
) * (size
/ PAGE_SIZE
))
4798 kfree(upl
, sizeof(struct upl
) + page_field_size
);
4803 upl_deallocate(upl_t upl
)
4807 if (--upl
->ref_count
== 0) {
4808 if (vector_upl_is_valid(upl
)) {
4809 vector_upl_deallocate(upl
);
4813 if (upl
->upl_iodone
) {
4814 upl_callout_iodone(upl
);
4825 upl_mark_decmp(upl_t upl
)
4827 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
4828 upl
->flags
|= UPL_DECMP_REQ
;
4829 upl
->upl_creator
->decmp_upl
= (void *)upl
;
4834 upl_unmark_decmp(upl_t upl
)
4836 if (upl
&& (upl
->flags
& UPL_DECMP_REQ
)) {
4837 upl
->upl_creator
->decmp_upl
= NULL
;
4841 #endif /* CONFIG_IOSCHED */
4843 #define VM_PAGE_Q_BACKING_UP(q) \
4844 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
4846 boolean_t
must_throttle_writes(void);
4849 must_throttle_writes()
4851 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external
) &&
4852 vm_page_pageable_external_count
> (AVAILABLE_NON_COMPRESSED_MEMORY
* 6) / 10) {
4861 * Routine: vm_object_upl_request
4863 * Cause the population of a portion of a vm_object.
4864 * Depending on the nature of the request, the pages
4865 * returned may be contain valid data or be uninitialized.
4866 * A page list structure, listing the physical pages
4867 * will be returned upon request.
4868 * This function is called by the file system or any other
4869 * supplier of backing store to a pager.
4870 * IMPORTANT NOTE: The caller must still respect the relationship
4871 * between the vm_object and its backing memory object. The
4872 * caller MUST NOT substitute changes in the backing file
4873 * without first doing a memory_object_lock_request on the
4874 * target range unless it is know that the pages are not
4875 * shared with another entity at the pager level.
4877 * if a page list structure is present
4878 * return the mapped physical pages, where a
4879 * page is not present, return a non-initialized
4880 * one. If the no_sync bit is turned on, don't
4881 * call the pager unlock to synchronize with other
4882 * possible copies of the page. Leave pages busy
4883 * in the original object, if a page list structure
4884 * was specified. When a commit of the page list
4885 * pages is done, the dirty bit will be set for each one.
4887 * If a page list structure is present, return
4888 * all mapped pages. Where a page does not exist
4889 * map a zero filled one. Leave pages busy in
4890 * the original object. If a page list structure
4891 * is not specified, this call is a no-op.
4893 * Note: access of default pager objects has a rather interesting
4894 * twist. The caller of this routine, presumably the file system
4895 * page cache handling code, will never actually make a request
4896 * against a default pager backed object. Only the default
4897 * pager will make requests on backing store related vm_objects
4898 * In this way the default pager can maintain the relationship
4899 * between backing store files (abstract memory objects) and
4900 * the vm_objects (cache objects), they support.
4904 __private_extern__ kern_return_t
4905 vm_object_upl_request(
4907 vm_object_offset_t offset
,
4910 upl_page_info_array_t user_page_list
,
4911 unsigned int *page_list_count
,
4912 upl_control_flags_t cntrl_flags
,
4915 vm_page_t dst_page
= VM_PAGE_NULL
;
4916 vm_object_offset_t dst_offset
;
4917 upl_size_t xfer_size
;
4918 unsigned int size_in_pages
;
4923 vm_page_t alias_page
= NULL
;
4924 int refmod_state
= 0;
4925 wpl_array_t lite_list
= NULL
;
4926 vm_object_t last_copy_object
;
4927 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
4928 struct vm_page_delayed_work
*dwp
;
4931 int io_tracking_flag
= 0;
4933 int page_grab_count
= 0;
4935 pmap_flush_context pmap_flush_context_storage
;
4936 boolean_t pmap_flushes_delayed
= FALSE
;
4937 #if DEVELOPMENT || DEBUG
4938 task_t task
= current_task();
4939 #endif /* DEVELOPMENT || DEBUG */
4941 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
4943 * For forward compatibility's sake,
4944 * reject any unknown flag.
4946 return KERN_INVALID_VALUE
;
4948 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
4949 panic("vm_object_upl_request: external object with non-zero paging offset\n");
4951 if (object
->phys_contiguous
) {
4952 panic("vm_object_upl_request: contiguous object specified\n");
4955 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, 0, 0);
4957 if (size
> MAX_UPL_SIZE_BYTES
) {
4958 size
= MAX_UPL_SIZE_BYTES
;
4961 if ((cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
) {
4962 *page_list_count
= MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
;
4965 #if CONFIG_IOSCHED || UPL_DEBUG
4966 if (object
->io_tracking
|| upl_debug_enabled
) {
4967 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
4971 if (object
->io_tracking
) {
4972 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
4976 if (cntrl_flags
& UPL_SET_INTERNAL
) {
4977 if (cntrl_flags
& UPL_SET_LITE
) {
4978 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
4980 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4981 lite_list
= (wpl_array_t
)
4982 (((uintptr_t)user_page_list
) +
4983 ((size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
4985 user_page_list
= NULL
;
4989 upl
= upl_create(UPL_CREATE_INTERNAL
| io_tracking_flag
, 0, size
);
4991 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4993 user_page_list
= NULL
;
4997 if (cntrl_flags
& UPL_SET_LITE
) {
4998 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
5000 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
5005 upl
= upl_create(UPL_CREATE_EXTERNAL
| io_tracking_flag
, 0, size
);
5010 if (user_page_list
) {
5011 user_page_list
[0].device
= FALSE
;
5014 if (cntrl_flags
& UPL_SET_LITE
) {
5015 upl
->map_object
= object
;
5017 upl
->map_object
= vm_object_allocate(size
);
5019 * No neeed to lock the new object: nobody else knows
5020 * about it yet, so it's all ours so far.
5022 upl
->map_object
->shadow
= object
;
5023 upl
->map_object
->pageout
= TRUE
;
5024 upl
->map_object
->can_persist
= FALSE
;
5025 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
5026 upl
->map_object
->vo_shadow_offset
= offset
;
5027 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
5029 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5031 upl
->flags
|= UPL_SHADOWED
;
5033 if (cntrl_flags
& UPL_FOR_PAGEOUT
) {
5034 upl
->flags
|= UPL_PAGEOUT
;
5037 vm_object_lock(object
);
5038 vm_object_activity_begin(object
);
5041 #if CONFIG_SECLUDED_MEMORY
5042 if (object
->can_grab_secluded
) {
5043 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
5045 #endif /* CONFIG_SECLUDED_MEMORY */
5048 * we can lock in the paging_offset once paging_in_progress is set
5051 upl
->offset
= offset
+ object
->paging_offset
;
5053 #if CONFIG_IOSCHED || UPL_DEBUG
5054 if (object
->io_tracking
|| upl_debug_enabled
) {
5055 vm_object_activity_begin(object
);
5056 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
5059 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
5061 * Honor copy-on-write obligations
5063 * The caller is gathering these pages and
5064 * might modify their contents. We need to
5065 * make sure that the copy object has its own
5066 * private copies of these pages before we let
5067 * the caller modify them.
5069 vm_object_update(object
,
5074 FALSE
, /* should_return */
5075 MEMORY_OBJECT_COPY_SYNC
,
5078 VM_PAGEOUT_DEBUG(upl_cow
, 1);
5079 VM_PAGEOUT_DEBUG(upl_cow_pages
, (size
>> PAGE_SHIFT
));
5082 * remember which copy object we synchronized with
5084 last_copy_object
= object
->copy
;
5088 dst_offset
= offset
;
5089 size_in_pages
= size
/ PAGE_SIZE
;
5093 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5095 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
5096 object
->resident_page_count
< ((MAX_UPL_SIZE_BYTES
* 2) >> PAGE_SHIFT
)) {
5097 object
->scan_collisions
= 0;
5100 if ((cntrl_flags
& UPL_WILL_MODIFY
) && must_throttle_writes() == TRUE
) {
5101 boolean_t isSSD
= FALSE
;
5106 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
5108 vm_object_unlock(object
);
5110 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5112 if (isSSD
== TRUE
) {
5113 delay(1000 * size_in_pages
);
5115 delay(5000 * size_in_pages
);
5117 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5119 vm_object_lock(object
);
5125 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
5126 vm_object_unlock(object
);
5127 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5128 vm_object_lock(object
);
5130 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
5131 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
5133 if (((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
5134 dst_page
->vmp_fictitious
||
5135 dst_page
->vmp_absent
||
5136 dst_page
->vmp_error
||
5137 dst_page
->vmp_cleaning
||
5138 (VM_PAGE_WIRED(dst_page
))) {
5139 if (user_page_list
) {
5140 user_page_list
[entry
].phys_addr
= 0;
5145 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
5148 * grab this up front...
5149 * a high percentange of the time we're going to
5150 * need the hardware modification state a bit later
5151 * anyway... so we can eliminate an extra call into
5152 * the pmap layer by grabbing it here and recording it
5154 if (dst_page
->vmp_pmapped
) {
5155 refmod_state
= pmap_get_refmod(phys_page
);
5160 if ((refmod_state
& VM_MEM_REFERENCED
) && VM_PAGE_INACTIVE(dst_page
)) {
5162 * page is on inactive list and referenced...
5163 * reactivate it now... this gets it out of the
5164 * way of vm_pageout_scan which would have to
5165 * reactivate it upon tripping over it
5167 dwp
->dw_mask
|= DW_vm_page_activate
;
5169 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
5171 * we're only asking for DIRTY pages to be returned
5173 if (dst_page
->vmp_laundry
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
5175 * if we were the page stolen by vm_pageout_scan to be
5176 * cleaned (as opposed to a buddy being clustered in
5177 * or this request is not being driven by a PAGEOUT cluster
5178 * then we only need to check for the page being dirty or
5179 * precious to decide whether to return it
5181 if (dst_page
->vmp_dirty
|| dst_page
->vmp_precious
|| (refmod_state
& VM_MEM_MODIFIED
)) {
5187 * this is a request for a PAGEOUT cluster and this page
5188 * is merely along for the ride as a 'buddy'... not only
5189 * does it have to be dirty to be returned, but it also
5190 * can't have been referenced recently...
5192 if ((hibernate_cleaning_in_progress
== TRUE
||
5193 (!((refmod_state
& VM_MEM_REFERENCED
) || dst_page
->vmp_reference
) ||
5194 (dst_page
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
))) &&
5195 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->vmp_dirty
|| dst_page
->vmp_precious
)) {
5200 * if we reach here, we're not to return
5201 * the page... go on to the next one
5203 if (dst_page
->vmp_laundry
== TRUE
) {
5205 * if we get here, the page is not 'cleaning' (filtered out above).
5206 * since it has been referenced, remove it from the laundry
5207 * so we don't pay the cost of an I/O to clean a page
5208 * we're just going to take back
5210 vm_page_lockspin_queues();
5212 vm_pageout_steal_laundry(dst_page
, TRUE
);
5213 vm_page_activate(dst_page
);
5215 vm_page_unlock_queues();
5217 if (user_page_list
) {
5218 user_page_list
[entry
].phys_addr
= 0;
5224 if (dst_page
->vmp_busy
) {
5225 if (cntrl_flags
& UPL_NOBLOCK
) {
5226 if (user_page_list
) {
5227 user_page_list
[entry
].phys_addr
= 0;
5234 * someone else is playing with the
5235 * page. We will have to wait.
5237 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5241 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5242 vm_page_lockspin_queues();
5244 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5246 * we've buddied up a page for a clustered pageout
5247 * that has already been moved to the pageout
5248 * queue by pageout_scan... we need to remove
5249 * it from the queue and drop the laundry count
5252 vm_pageout_throttle_up(dst_page
);
5254 vm_page_unlock_queues();
5256 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5257 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
5259 if (phys_page
> upl
->highest_page
) {
5260 upl
->highest_page
= phys_page
;
5263 assert(!pmap_is_noencrypt(phys_page
));
5265 if (cntrl_flags
& UPL_SET_LITE
) {
5266 unsigned int pg_num
;
5268 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
5269 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
5270 lite_list
[pg_num
>> 5] |= 1 << (pg_num
& 31);
5273 if (pmap_flushes_delayed
== FALSE
) {
5274 pmap_flush_context_init(&pmap_flush_context_storage
);
5275 pmap_flushes_delayed
= TRUE
;
5277 pmap_clear_refmod_options(phys_page
,
5279 PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_CLEAR_WRITE
,
5280 &pmap_flush_context_storage
);
5284 * Mark original page as cleaning
5287 dst_page
->vmp_cleaning
= TRUE
;
5288 dst_page
->vmp_precious
= FALSE
;
5291 * use pageclean setup, it is more
5292 * convenient even for the pageout
5295 vm_object_lock(upl
->map_object
);
5296 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5297 vm_object_unlock(upl
->map_object
);
5299 alias_page
->vmp_absent
= FALSE
;
5303 SET_PAGE_DIRTY(dst_page
, FALSE
);
5305 dst_page
->vmp_dirty
= FALSE
;
5309 dst_page
->vmp_precious
= TRUE
;
5312 if (!(cntrl_flags
& UPL_CLEAN_IN_PLACE
)) {
5313 if (!VM_PAGE_WIRED(dst_page
)) {
5314 dst_page
->vmp_free_when_done
= TRUE
;
5318 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
5320 * Honor copy-on-write obligations
5322 * The copy object has changed since we
5323 * last synchronized for copy-on-write.
5324 * Another copy object might have been
5325 * inserted while we released the object's
5326 * lock. Since someone could have seen the
5327 * original contents of the remaining pages
5328 * through that new object, we have to
5329 * synchronize with it again for the remaining
5330 * pages only. The previous pages are "busy"
5331 * so they can not be seen through the new
5332 * mapping. The new mapping will see our
5333 * upcoming changes for those previous pages,
5334 * but that's OK since they couldn't see what
5335 * was there before. It's just a race anyway
5336 * and there's no guarantee of consistency or
5337 * atomicity. We just don't want new mappings
5338 * to see both the *before* and *after* pages.
5340 if (object
->copy
!= VM_OBJECT_NULL
) {
5343 dst_offset
,/* current offset */
5344 xfer_size
, /* remaining size */
5347 FALSE
, /* should_return */
5348 MEMORY_OBJECT_COPY_SYNC
,
5351 VM_PAGEOUT_DEBUG(upl_cow_again
, 1);
5352 VM_PAGEOUT_DEBUG(upl_cow_again_pages
, (xfer_size
>> PAGE_SHIFT
));
5355 * remember the copy object we synced with
5357 last_copy_object
= object
->copy
;
5359 dst_page
= vm_page_lookup(object
, dst_offset
);
5361 if (dst_page
!= VM_PAGE_NULL
) {
5362 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5364 * skip over pages already present in the cache
5366 if (user_page_list
) {
5367 user_page_list
[entry
].phys_addr
= 0;
5372 if (dst_page
->vmp_fictitious
) {
5373 panic("need corner case for fictitious page");
5376 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
5378 * someone else is playing with the
5379 * page. We will have to wait.
5381 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5385 if (dst_page
->vmp_laundry
) {
5386 vm_pageout_steal_laundry(dst_page
, FALSE
);
5389 if (object
->private) {
5391 * This is a nasty wrinkle for users
5392 * of upl who encounter device or
5393 * private memory however, it is
5394 * unavoidable, only a fault can
5395 * resolve the actual backing
5396 * physical page by asking the
5399 if (user_page_list
) {
5400 user_page_list
[entry
].phys_addr
= 0;
5405 if (object
->scan_collisions
) {
5407 * the pageout_scan thread is trying to steal
5408 * pages from this object, but has run into our
5409 * lock... grab 2 pages from the head of the object...
5410 * the first is freed on behalf of pageout_scan, the
5411 * 2nd is for our own use... we use vm_object_page_grab
5412 * in both cases to avoid taking pages from the free
5413 * list since we are under memory pressure and our
5414 * lock on this object is getting in the way of
5417 dst_page
= vm_object_page_grab(object
);
5419 if (dst_page
!= VM_PAGE_NULL
) {
5420 vm_page_release(dst_page
,
5424 dst_page
= vm_object_page_grab(object
);
5426 if (dst_page
== VM_PAGE_NULL
) {
5428 * need to allocate a page
5430 dst_page
= vm_page_grab_options(grab_options
);
5431 if (dst_page
!= VM_PAGE_NULL
) {
5435 if (dst_page
== VM_PAGE_NULL
) {
5436 if ((cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
5438 * we don't want to stall waiting for pages to come onto the free list
5439 * while we're already holding absent pages in this UPL
5440 * the caller will deal with the empty slots
5442 if (user_page_list
) {
5443 user_page_list
[entry
].phys_addr
= 0;
5449 * no pages available... wait
5450 * then try again for the same
5453 vm_object_unlock(object
);
5455 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5457 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
5460 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5462 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
5464 vm_object_lock(object
);
5468 vm_page_insert(dst_page
, object
, dst_offset
);
5470 dst_page
->vmp_absent
= TRUE
;
5471 dst_page
->vmp_busy
= FALSE
;
5473 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
5475 * if UPL_RET_ONLY_ABSENT was specified,
5476 * than we're definitely setting up a
5477 * upl for a clustered read/pagein
5478 * operation... mark the pages as clustered
5479 * so upl_commit_range can put them on the
5482 dst_page
->vmp_clustered
= TRUE
;
5484 if (!(cntrl_flags
& UPL_FILE_IO
)) {
5485 VM_STAT_INCR(pageins
);
5489 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
5491 dst_page
->vmp_overwriting
= TRUE
;
5493 if (dst_page
->vmp_pmapped
) {
5494 if (!(cntrl_flags
& UPL_FILE_IO
)) {
5496 * eliminate all mappings from the
5497 * original object and its prodigy
5499 refmod_state
= pmap_disconnect(phys_page
);
5501 refmod_state
= pmap_get_refmod(phys_page
);
5507 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5508 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
5510 if (cntrl_flags
& UPL_SET_LITE
) {
5511 unsigned int pg_num
;
5513 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
5514 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
5515 lite_list
[pg_num
>> 5] |= 1 << (pg_num
& 31);
5518 pmap_clear_modify(phys_page
);
5522 * Mark original page as cleaning
5525 dst_page
->vmp_cleaning
= TRUE
;
5526 dst_page
->vmp_precious
= FALSE
;
5529 * use pageclean setup, it is more
5530 * convenient even for the pageout
5533 vm_object_lock(upl
->map_object
);
5534 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5535 vm_object_unlock(upl
->map_object
);
5537 alias_page
->vmp_absent
= FALSE
;
5541 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
5542 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
5543 upl
->flags
|= UPL_SET_DIRTY
;
5545 upl
->flags
|= UPL_SET_DIRTY
;
5546 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
5548 * clean in place for read implies
5549 * that a write will be done on all
5550 * the pages that are dirty before
5551 * a upl commit is done. The caller
5552 * is obligated to preserve the
5553 * contents of all pages marked dirty
5555 upl
->flags
|= UPL_CLEAR_DIRTY
;
5557 dst_page
->vmp_dirty
= dirty
;
5560 dst_page
->vmp_precious
= TRUE
;
5563 if (!VM_PAGE_WIRED(dst_page
)) {
5565 * deny access to the target page while
5566 * it is being worked on
5568 dst_page
->vmp_busy
= TRUE
;
5570 dwp
->dw_mask
|= DW_vm_page_wire
;
5574 * We might be about to satisfy a fault which has been
5575 * requested. So no need for the "restart" bit.
5577 dst_page
->vmp_restart
= FALSE
;
5578 if (!dst_page
->vmp_absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
5580 * expect the page to be used
5582 dwp
->dw_mask
|= DW_set_reference
;
5584 if (cntrl_flags
& UPL_PRECIOUS
) {
5585 if (object
->internal
) {
5586 SET_PAGE_DIRTY(dst_page
, FALSE
);
5587 dst_page
->vmp_precious
= FALSE
;
5589 dst_page
->vmp_precious
= TRUE
;
5592 dst_page
->vmp_precious
= FALSE
;
5595 if (dst_page
->vmp_busy
) {
5596 upl
->flags
|= UPL_HAS_BUSY
;
5599 if (phys_page
> upl
->highest_page
) {
5600 upl
->highest_page
= phys_page
;
5602 assert(!pmap_is_noencrypt(phys_page
));
5603 if (user_page_list
) {
5604 user_page_list
[entry
].phys_addr
= phys_page
;
5605 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
5606 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
5607 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
5608 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
5609 user_page_list
[entry
].device
= FALSE
;
5610 user_page_list
[entry
].needed
= FALSE
;
5611 if (dst_page
->vmp_clustered
== TRUE
) {
5612 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
5614 user_page_list
[entry
].speculative
= FALSE
;
5616 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
5617 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
5618 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
5619 user_page_list
[entry
].mark
= FALSE
;
5622 * if UPL_RET_ONLY_ABSENT is set, then
5623 * we are working with a fresh page and we've
5624 * just set the clustered flag on it to
5625 * indicate that it was drug in as part of a
5626 * speculative cluster... so leave it alone
5628 if (!(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5630 * someone is explicitly grabbing this page...
5631 * update clustered and speculative state
5634 if (dst_page
->vmp_clustered
) {
5635 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
5640 if (dwp
->dw_mask
& DW_vm_page_activate
) {
5641 VM_STAT_INCR(reactivations
);
5644 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
5646 if (dw_count
>= dw_limit
) {
5647 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
5654 dst_offset
+= PAGE_SIZE_64
;
5655 xfer_size
-= PAGE_SIZE
;
5658 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
5661 if (alias_page
!= NULL
) {
5662 VM_PAGE_FREE(alias_page
);
5664 if (pmap_flushes_delayed
== TRUE
) {
5665 pmap_flush(&pmap_flush_context_storage
);
5668 if (page_list_count
!= NULL
) {
5669 if (upl
->flags
& UPL_INTERNAL
) {
5670 *page_list_count
= 0;
5671 } else if (*page_list_count
> entry
) {
5672 *page_list_count
= entry
;
5678 vm_object_unlock(object
);
5680 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
5681 #if DEVELOPMENT || DEBUG
5683 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_upl
, page_grab_count
);
5685 #endif /* DEVELOPMENT || DEBUG */
5687 return KERN_SUCCESS
;
5691 * Routine: vm_object_super_upl_request
5693 * Cause the population of a portion of a vm_object
5694 * in much the same way as memory_object_upl_request.
5695 * Depending on the nature of the request, the pages
5696 * returned may be contain valid data or be uninitialized.
5697 * However, the region may be expanded up to the super
5698 * cluster size provided.
5701 __private_extern__ kern_return_t
5702 vm_object_super_upl_request(
5704 vm_object_offset_t offset
,
5706 upl_size_t super_cluster
,
5708 upl_page_info_t
*user_page_list
,
5709 unsigned int *page_list_count
,
5710 upl_control_flags_t cntrl_flags
,
5713 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
5714 return KERN_FAILURE
;
5717 assert(object
->paging_in_progress
);
5718 offset
= offset
- object
->paging_offset
;
5720 if (super_cluster
> size
) {
5721 vm_object_offset_t base_offset
;
5722 upl_size_t super_size
;
5723 vm_object_size_t super_size_64
;
5725 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
5726 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<< 1 : super_cluster
;
5727 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
5728 super_size
= (upl_size_t
) super_size_64
;
5729 assert(super_size
== super_size_64
);
5731 if (offset
> (base_offset
+ super_size
)) {
5732 panic("vm_object_super_upl_request: Missed target pageout"
5733 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
5734 offset
, base_offset
, super_size
, super_cluster
,
5735 size
, object
->paging_offset
);
5738 * apparently there is a case where the vm requests a
5739 * page to be written out who's offset is beyond the
5742 if ((offset
+ size
) > (base_offset
+ super_size
)) {
5743 super_size_64
= (offset
+ size
) - base_offset
;
5744 super_size
= (upl_size_t
) super_size_64
;
5745 assert(super_size
== super_size_64
);
5748 offset
= base_offset
;
5751 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
, tag
);
5755 int cs_executable_create_upl
= 0;
5756 extern int proc_selfpid(void);
5757 extern char *proc_name_address(void *p
);
5758 #endif /* CONFIG_EMBEDDED */
5763 vm_map_address_t offset
,
5764 upl_size_t
*upl_size
,
5766 upl_page_info_array_t page_list
,
5767 unsigned int *count
,
5768 upl_control_flags_t
*flags
,
5771 vm_map_entry_t entry
;
5772 upl_control_flags_t caller_flags
;
5773 int force_data_sync
;
5775 vm_object_t local_object
;
5776 vm_map_offset_t local_offset
;
5777 vm_map_offset_t local_start
;
5780 assert(page_aligned(offset
));
5782 caller_flags
= *flags
;
5784 if (caller_flags
& ~UPL_VALID_FLAGS
) {
5786 * For forward compatibility's sake,
5787 * reject any unknown flag.
5789 return KERN_INVALID_VALUE
;
5791 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
5792 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
5795 return KERN_INVALID_ARGUMENT
;
5799 vm_map_lock_read(map
);
5801 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
5802 vm_map_unlock_read(map
);
5803 return KERN_FAILURE
;
5806 if ((entry
->vme_end
- offset
) < *upl_size
) {
5807 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
5808 assert(*upl_size
== entry
->vme_end
- offset
);
5811 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
5814 if (!entry
->is_sub_map
&&
5815 VME_OBJECT(entry
) != VM_OBJECT_NULL
) {
5816 if (VME_OBJECT(entry
)->private) {
5817 *flags
= UPL_DEV_MEMORY
;
5820 if (VME_OBJECT(entry
)->phys_contiguous
) {
5821 *flags
|= UPL_PHYS_CONTIG
;
5824 vm_map_unlock_read(map
);
5825 return KERN_SUCCESS
;
5828 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
||
5829 !VME_OBJECT(entry
)->phys_contiguous
) {
5830 if (*upl_size
> MAX_UPL_SIZE_BYTES
) {
5831 *upl_size
= MAX_UPL_SIZE_BYTES
;
5836 * Create an object if necessary.
5838 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
5839 if (vm_map_lock_read_to_write(map
)) {
5840 goto REDISCOVER_ENTRY
;
5843 VME_OBJECT_SET(entry
,
5844 vm_object_allocate((vm_size_t
)
5846 entry
->vme_start
)));
5847 VME_OFFSET_SET(entry
, 0);
5848 assert(entry
->use_pmap
);
5850 vm_map_lock_write_to_read(map
);
5853 if (!(caller_flags
& UPL_COPYOUT_FROM
) &&
5854 !entry
->is_sub_map
&&
5855 !(entry
->protection
& VM_PROT_WRITE
)) {
5856 vm_map_unlock_read(map
);
5857 return KERN_PROTECTION_FAILURE
;
5861 if (map
->pmap
!= kernel_pmap
&&
5862 (caller_flags
& UPL_COPYOUT_FROM
) &&
5863 (entry
->protection
& VM_PROT_EXECUTE
) &&
5864 !(entry
->protection
& VM_PROT_WRITE
)) {
5869 * We're about to create a read-only UPL backed by
5870 * memory from an executable mapping.
5871 * Wiring the pages would result in the pages being copied
5872 * (due to the "MAP_PRIVATE" mapping) and no longer
5873 * code-signed, so no longer eligible for execution.
5874 * Instead, let's copy the data into a kernel buffer and
5875 * create the UPL from this kernel buffer.
5876 * The kernel buffer is then freed, leaving the UPL holding
5877 * the last reference on the VM object, so the memory will
5878 * be released when the UPL is committed.
5881 vm_map_unlock_read(map
);
5882 /* allocate kernel buffer */
5883 ksize
= round_page(*upl_size
);
5885 ret
= kmem_alloc_pageable(kernel_map
,
5889 if (ret
== KERN_SUCCESS
) {
5890 /* copyin the user data */
5891 assert(page_aligned(offset
));
5892 ret
= copyinmap(map
, offset
, (void *)kaddr
, *upl_size
);
5894 if (ret
== KERN_SUCCESS
) {
5895 if (ksize
> *upl_size
) {
5896 /* zero out the extra space in kernel buffer */
5897 memset((void *)(kaddr
+ *upl_size
),
5901 /* create the UPL from the kernel buffer */
5902 ret
= vm_map_create_upl(kernel_map
, kaddr
, upl_size
,
5903 upl
, page_list
, count
, flags
, tag
);
5906 /* free the kernel buffer */
5907 kmem_free(kernel_map
, kaddr
, ksize
);
5911 #if DEVELOPMENT || DEBUG
5912 DTRACE_VM4(create_upl_from_executable
,
5914 vm_map_address_t
, offset
,
5915 upl_size_t
, *upl_size
,
5916 kern_return_t
, ret
);
5917 #endif /* DEVELOPMENT || DEBUG */
5920 #endif /* CONFIG_EMBEDDED */
5922 local_object
= VME_OBJECT(entry
);
5923 assert(local_object
!= VM_OBJECT_NULL
);
5925 if (!entry
->is_sub_map
&&
5926 !entry
->needs_copy
&&
5928 local_object
->vo_size
> *upl_size
&& /* partial UPL */
5929 entry
->wired_count
== 0 && /* No COW for entries that are wired */
5930 (map
->pmap
!= kernel_pmap
) && /* alias checks */
5931 (vm_map_entry_should_cow_for_true_share(entry
) /* case 1 */
5934 local_object
->internal
&&
5935 (local_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) &&
5936 local_object
->ref_count
> 1))) {
5941 * Set up the targeted range for copy-on-write to avoid
5942 * applying true_share/copy_delay to the entire object.
5945 * This map entry covers only part of an internal
5946 * object. There could be other map entries covering
5947 * other areas of this object and some of these map
5948 * entries could be marked as "needs_copy", which
5949 * assumes that the object is COPY_SYMMETRIC.
5950 * To avoid marking this object as COPY_DELAY and
5951 * "true_share", let's shadow it and mark the new
5952 * (smaller) object as "true_share" and COPY_DELAY.
5955 if (vm_map_lock_read_to_write(map
)) {
5956 goto REDISCOVER_ENTRY
;
5958 vm_map_lock_assert_exclusive(map
);
5959 assert(VME_OBJECT(entry
) == local_object
);
5961 vm_map_clip_start(map
,
5963 vm_map_trunc_page(offset
,
5964 VM_MAP_PAGE_MASK(map
)));
5965 vm_map_clip_end(map
,
5967 vm_map_round_page(offset
+ *upl_size
,
5968 VM_MAP_PAGE_MASK(map
)));
5969 if ((entry
->vme_end
- offset
) < *upl_size
) {
5970 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
5971 assert(*upl_size
== entry
->vme_end
- offset
);
5974 prot
= entry
->protection
& ~VM_PROT_WRITE
;
5975 if (override_nx(map
, VME_ALIAS(entry
)) && prot
) {
5976 prot
|= VM_PROT_EXECUTE
;
5978 vm_object_pmap_protect(local_object
,
5980 entry
->vme_end
- entry
->vme_start
,
5981 ((entry
->is_shared
||
5982 map
->mapped_in_other_pmaps
)
5988 assert(entry
->wired_count
== 0);
5991 * Lock the VM object and re-check its status: if it's mapped
5992 * in another address space, we could still be racing with
5993 * another thread holding that other VM map exclusively.
5995 vm_object_lock(local_object
);
5996 if (local_object
->true_share
) {
5997 /* object is already in proper state: no COW needed */
5998 assert(local_object
->copy_strategy
!=
5999 MEMORY_OBJECT_COPY_SYMMETRIC
);
6001 /* not true_share: ask for copy-on-write below */
6002 assert(local_object
->copy_strategy
==
6003 MEMORY_OBJECT_COPY_SYMMETRIC
);
6004 entry
->needs_copy
= TRUE
;
6006 vm_object_unlock(local_object
);
6008 vm_map_lock_write_to_read(map
);
6011 if (entry
->needs_copy
) {
6013 * Honor copy-on-write for COPY_SYMMETRIC
6018 vm_object_offset_t new_offset
;
6021 vm_map_version_t version
;
6023 vm_prot_t fault_type
;
6027 if (caller_flags
& UPL_COPYOUT_FROM
) {
6028 fault_type
= VM_PROT_READ
| VM_PROT_COPY
;
6029 vm_counters
.create_upl_extra_cow
++;
6030 vm_counters
.create_upl_extra_cow_pages
+=
6031 (entry
->vme_end
- entry
->vme_start
) / PAGE_SIZE
;
6033 fault_type
= VM_PROT_WRITE
;
6035 if (vm_map_lookup_locked(&local_map
,
6037 OBJECT_LOCK_EXCLUSIVE
,
6039 &new_offset
, &prot
, &wired
,
6041 &real_map
) != KERN_SUCCESS
) {
6042 if (fault_type
== VM_PROT_WRITE
) {
6043 vm_counters
.create_upl_lookup_failure_write
++;
6045 vm_counters
.create_upl_lookup_failure_copy
++;
6047 vm_map_unlock_read(local_map
);
6048 return KERN_FAILURE
;
6050 if (real_map
!= map
) {
6051 vm_map_unlock(real_map
);
6053 vm_map_unlock_read(local_map
);
6055 vm_object_unlock(object
);
6057 goto REDISCOVER_ENTRY
;
6060 if (entry
->is_sub_map
) {
6063 submap
= VME_SUBMAP(entry
);
6064 local_start
= entry
->vme_start
;
6065 local_offset
= VME_OFFSET(entry
);
6067 vm_map_reference(submap
);
6068 vm_map_unlock_read(map
);
6070 ret
= vm_map_create_upl(submap
,
6071 local_offset
+ (offset
- local_start
),
6072 upl_size
, upl
, page_list
, count
, flags
, tag
);
6073 vm_map_deallocate(submap
);
6078 if (sync_cow_data
&&
6079 (VME_OBJECT(entry
)->shadow
||
6080 VME_OBJECT(entry
)->copy
)) {
6081 local_object
= VME_OBJECT(entry
);
6082 local_start
= entry
->vme_start
;
6083 local_offset
= VME_OFFSET(entry
);
6085 vm_object_reference(local_object
);
6086 vm_map_unlock_read(map
);
6088 if (local_object
->shadow
&& local_object
->copy
) {
6089 vm_object_lock_request(local_object
->shadow
,
6090 ((vm_object_offset_t
)
6091 ((offset
- local_start
) +
6093 local_object
->vo_shadow_offset
),
6095 MEMORY_OBJECT_DATA_SYNC
,
6098 sync_cow_data
= FALSE
;
6099 vm_object_deallocate(local_object
);
6101 goto REDISCOVER_ENTRY
;
6103 if (force_data_sync
) {
6104 local_object
= VME_OBJECT(entry
);
6105 local_start
= entry
->vme_start
;
6106 local_offset
= VME_OFFSET(entry
);
6108 vm_object_reference(local_object
);
6109 vm_map_unlock_read(map
);
6111 vm_object_lock_request(local_object
,
6112 ((vm_object_offset_t
)
6113 ((offset
- local_start
) +
6115 (vm_object_size_t
)*upl_size
,
6117 MEMORY_OBJECT_DATA_SYNC
,
6120 force_data_sync
= FALSE
;
6121 vm_object_deallocate(local_object
);
6123 goto REDISCOVER_ENTRY
;
6125 if (VME_OBJECT(entry
)->private) {
6126 *flags
= UPL_DEV_MEMORY
;
6131 if (VME_OBJECT(entry
)->phys_contiguous
) {
6132 *flags
|= UPL_PHYS_CONTIG
;
6135 local_object
= VME_OBJECT(entry
);
6136 local_offset
= VME_OFFSET(entry
);
6137 local_start
= entry
->vme_start
;
6141 * Wiring will copy the pages to the shadow object.
6142 * The shadow object will not be code-signed so
6143 * attempting to execute code from these copied pages
6144 * would trigger a code-signing violation.
6146 if (entry
->protection
& VM_PROT_EXECUTE
) {
6148 printf("pid %d[%s] create_upl out of executable range from "
6149 "0x%llx to 0x%llx: side effects may include "
6150 "code-signing violations later on\n",
6152 (current_task()->bsd_info
6153 ? proc_name_address(current_task()->bsd_info
)
6155 (uint64_t) entry
->vme_start
,
6156 (uint64_t) entry
->vme_end
);
6157 #endif /* MACH_ASSERT */
6158 DTRACE_VM2(cs_executable_create_upl
,
6159 uint64_t, (uint64_t)entry
->vme_start
,
6160 uint64_t, (uint64_t)entry
->vme_end
);
6161 cs_executable_create_upl
++;
6163 #endif /* CONFIG_EMBEDDED */
6165 vm_object_lock(local_object
);
6168 * Ensure that this object is "true_share" and "copy_delay" now,
6169 * while we're still holding the VM map lock. After we unlock the map,
6170 * anything could happen to that mapping, including some copy-on-write
6171 * activity. We need to make sure that the IOPL will point at the
6172 * same memory as the mapping.
6174 if (local_object
->true_share
) {
6175 assert(local_object
->copy_strategy
!=
6176 MEMORY_OBJECT_COPY_SYMMETRIC
);
6177 } else if (local_object
!= kernel_object
&&
6178 local_object
!= compressor_object
&&
6179 !local_object
->phys_contiguous
) {
6180 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6181 if (!local_object
->true_share
&&
6182 vm_object_tracking_inited
) {
6183 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
6185 num
= OSBacktrace(bt
,
6186 VM_OBJECT_TRACKING_BTDEPTH
);
6187 btlog_add_entry(vm_object_tracking_btlog
,
6189 VM_OBJECT_TRACKING_OP_TRUESHARE
,
6193 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6194 local_object
->true_share
= TRUE
;
6195 if (local_object
->copy_strategy
==
6196 MEMORY_OBJECT_COPY_SYMMETRIC
) {
6197 local_object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6201 vm_object_reference_locked(local_object
);
6202 vm_object_unlock(local_object
);
6204 vm_map_unlock_read(map
);
6206 ret
= vm_object_iopl_request(local_object
,
6207 ((vm_object_offset_t
)
6208 ((offset
- local_start
) + local_offset
)),
6215 vm_object_deallocate(local_object
);
6221 * Internal routine to enter a UPL into a VM map.
6223 * JMM - This should just be doable through the standard
6224 * vm_map_enter() API.
6230 vm_map_offset_t
*dst_addr
)
6233 vm_object_offset_t offset
;
6234 vm_map_offset_t addr
;
6237 int isVectorUPL
= 0, curr_upl
= 0;
6238 upl_t vector_upl
= NULL
;
6239 vm_offset_t vector_upl_dst_addr
= 0;
6240 vm_map_t vector_upl_submap
= NULL
;
6241 upl_offset_t subupl_offset
= 0;
6242 upl_size_t subupl_size
= 0;
6244 if (upl
== UPL_NULL
) {
6245 return KERN_INVALID_ARGUMENT
;
6248 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6249 int mapped
= 0, valid_upls
= 0;
6252 upl_lock(vector_upl
);
6253 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6254 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6259 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6265 if (mapped
!= valid_upls
) {
6266 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
6268 upl_unlock(vector_upl
);
6269 return KERN_FAILURE
;
6273 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
, vector_upl
->size
, FALSE
,
6274 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
6275 &vector_upl_submap
);
6276 if (kr
!= KERN_SUCCESS
) {
6277 panic("Vector UPL submap allocation failed\n");
6279 map
= vector_upl_submap
;
6280 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
6286 process_upl_to_enter
:
6288 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6289 *dst_addr
= vector_upl_dst_addr
;
6290 upl_unlock(vector_upl
);
6291 return KERN_SUCCESS
;
6293 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6295 goto process_upl_to_enter
;
6298 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
6299 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
6302 * check to see if already mapped
6304 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6306 return KERN_FAILURE
;
6309 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
6310 ((upl
->flags
& UPL_HAS_BUSY
) ||
6311 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
6313 vm_page_t alias_page
;
6314 vm_object_offset_t new_offset
;
6315 unsigned int pg_num
;
6316 wpl_array_t lite_list
;
6318 if (upl
->flags
& UPL_INTERNAL
) {
6319 lite_list
= (wpl_array_t
)
6320 ((((uintptr_t)upl
) + sizeof(struct upl
))
6321 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6323 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
6325 object
= upl
->map_object
;
6326 upl
->map_object
= vm_object_allocate(upl
->size
);
6328 vm_object_lock(upl
->map_object
);
6330 upl
->map_object
->shadow
= object
;
6331 upl
->map_object
->pageout
= TRUE
;
6332 upl
->map_object
->can_persist
= FALSE
;
6333 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
6334 upl
->map_object
->vo_shadow_offset
= upl
->offset
- object
->paging_offset
;
6335 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
6336 offset
= upl
->map_object
->vo_shadow_offset
;
6340 upl
->flags
|= UPL_SHADOWED
;
6343 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
6344 assert(pg_num
== new_offset
/ PAGE_SIZE
);
6346 if (lite_list
[pg_num
>> 5] & (1 << (pg_num
& 31))) {
6347 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
6349 vm_object_lock(object
);
6351 m
= vm_page_lookup(object
, offset
);
6352 if (m
== VM_PAGE_NULL
) {
6353 panic("vm_upl_map: page missing\n");
6357 * Convert the fictitious page to a private
6358 * shadow of the real page.
6360 assert(alias_page
->vmp_fictitious
);
6361 alias_page
->vmp_fictitious
= FALSE
;
6362 alias_page
->vmp_private
= TRUE
;
6363 alias_page
->vmp_free_when_done
= TRUE
;
6365 * since m is a page in the upl it must
6366 * already be wired or BUSY, so it's
6367 * safe to assign the underlying physical
6370 VM_PAGE_SET_PHYS_PAGE(alias_page
, VM_PAGE_GET_PHYS_PAGE(m
));
6372 vm_object_unlock(object
);
6374 vm_page_lockspin_queues();
6375 vm_page_wire(alias_page
, VM_KERN_MEMORY_NONE
, TRUE
);
6376 vm_page_unlock_queues();
6378 vm_page_insert_wired(alias_page
, upl
->map_object
, new_offset
, VM_KERN_MEMORY_NONE
);
6380 assert(!alias_page
->vmp_wanted
);
6381 alias_page
->vmp_busy
= FALSE
;
6382 alias_page
->vmp_absent
= FALSE
;
6385 offset
+= PAGE_SIZE_64
;
6386 new_offset
+= PAGE_SIZE_64
;
6388 vm_object_unlock(upl
->map_object
);
6390 if (upl
->flags
& UPL_SHADOWED
) {
6393 offset
= upl
->offset
- upl
->map_object
->paging_offset
;
6398 vm_object_reference(upl
->map_object
);
6403 * NEED A UPL_MAP ALIAS
6405 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
6406 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
6407 upl
->map_object
, offset
, FALSE
,
6408 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
6410 if (kr
!= KERN_SUCCESS
) {
6411 vm_object_deallocate(upl
->map_object
);
6416 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
6417 VM_FLAGS_FIXED
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
6418 upl
->map_object
, offset
, FALSE
,
6419 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
6421 panic("vm_map_enter failed for a Vector UPL\n");
6424 vm_object_lock(upl
->map_object
);
6426 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
6427 m
= vm_page_lookup(upl
->map_object
, offset
);
6430 m
->vmp_pmapped
= TRUE
;
6432 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
6433 * but only in kernel space. If this was on a user map,
6434 * we'd have to set the wpmapped bit. */
6435 /* m->vmp_wpmapped = TRUE; */
6436 assert(map
->pmap
== kernel_pmap
);
6438 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_DEFAULT
, VM_PROT_NONE
, 0, TRUE
, kr
);
6440 assert(kr
== KERN_SUCCESS
);
6442 kasan_notify_address(addr
, PAGE_SIZE_64
);
6445 offset
+= PAGE_SIZE_64
;
6447 vm_object_unlock(upl
->map_object
);
6450 * hold a reference for the mapping
6453 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
6454 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
6455 assert(upl
->kaddr
== *dst_addr
);
6458 goto process_upl_to_enter
;
6463 return KERN_SUCCESS
;
6467 * Internal routine to remove a UPL mapping from a VM map.
6469 * XXX - This should just be doable through a standard
6470 * vm_map_remove() operation. Otherwise, implicit clean-up
6471 * of the target map won't be able to correctly remove
6472 * these (and release the reference on the UPL). Having
6473 * to do this means we can't map these into user-space
6483 int isVectorUPL
= 0, curr_upl
= 0;
6484 upl_t vector_upl
= NULL
;
6486 if (upl
== UPL_NULL
) {
6487 return KERN_INVALID_ARGUMENT
;
6490 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6491 int unmapped
= 0, valid_upls
= 0;
6493 upl_lock(vector_upl
);
6494 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6495 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6500 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
)) {
6506 if (unmapped
!= valid_upls
) {
6507 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
6509 upl_unlock(vector_upl
);
6510 return KERN_FAILURE
;
6518 process_upl_to_remove
:
6520 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6521 vm_map_t v_upl_submap
;
6522 vm_offset_t v_upl_submap_dst_addr
;
6523 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
6525 vm_map_remove(map
, v_upl_submap_dst_addr
, v_upl_submap_dst_addr
+ vector_upl
->size
, VM_MAP_REMOVE_NO_FLAGS
);
6526 vm_map_deallocate(v_upl_submap
);
6527 upl_unlock(vector_upl
);
6528 return KERN_SUCCESS
;
6531 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6533 goto process_upl_to_remove
;
6537 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
6541 assert(upl
->ref_count
> 1);
6542 upl
->ref_count
--; /* removing mapping ref */
6544 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
6545 upl
->kaddr
= (vm_offset_t
) 0;
6552 vm_map_trunc_page(addr
,
6553 VM_MAP_PAGE_MASK(map
)),
6554 vm_map_round_page(addr
+ size
,
6555 VM_MAP_PAGE_MASK(map
)),
6556 VM_MAP_REMOVE_NO_FLAGS
);
6557 return KERN_SUCCESS
;
6560 * If it's a Vectored UPL, we'll be removing the entire
6561 * submap anyways, so no need to remove individual UPL
6562 * element mappings from within the submap
6564 goto process_upl_to_remove
;
6569 return KERN_FAILURE
;
6576 upl_offset_t offset
,
6579 upl_page_info_t
*page_list
,
6580 mach_msg_type_number_t count
,
6583 upl_size_t xfer_size
, subupl_size
= size
;
6584 vm_object_t shadow_object
;
6586 vm_object_t m_object
;
6587 vm_object_offset_t target_offset
;
6588 upl_offset_t subupl_offset
= offset
;
6590 wpl_array_t lite_list
;
6592 int clear_refmod
= 0;
6593 int pgpgout_count
= 0;
6594 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
6595 struct vm_page_delayed_work
*dwp
;
6598 int isVectorUPL
= 0;
6599 upl_t vector_upl
= NULL
;
6600 boolean_t should_be_throttled
= FALSE
;
6602 vm_page_t nxt_page
= VM_PAGE_NULL
;
6603 int fast_path_possible
= 0;
6604 int fast_path_full_commit
= 0;
6605 int throttle_page
= 0;
6606 int unwired_count
= 0;
6607 int local_queue_count
= 0;
6608 vm_page_t first_local
, last_local
;
6612 if (upl
== UPL_NULL
) {
6613 return KERN_INVALID_ARGUMENT
;
6620 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6622 upl_lock(vector_upl
);
6627 process_upl_to_commit
:
6631 offset
= subupl_offset
;
6633 upl_unlock(vector_upl
);
6634 return KERN_SUCCESS
;
6636 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
6638 upl_unlock(vector_upl
);
6639 return KERN_FAILURE
;
6641 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
6642 subupl_size
-= size
;
6643 subupl_offset
+= size
;
6647 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
6648 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
6650 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
6651 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
6653 upl
->upl_commit_index
++;
6656 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
6658 } else if ((offset
+ size
) <= upl
->size
) {
6664 upl_unlock(vector_upl
);
6666 return KERN_FAILURE
;
6668 if (upl
->flags
& UPL_SET_DIRTY
) {
6669 flags
|= UPL_COMMIT_SET_DIRTY
;
6671 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
6672 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
6675 if (upl
->flags
& UPL_INTERNAL
) {
6676 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
6677 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6679 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
6682 object
= upl
->map_object
;
6684 if (upl
->flags
& UPL_SHADOWED
) {
6685 vm_object_lock(object
);
6686 shadow_object
= object
->shadow
;
6688 shadow_object
= object
;
6690 entry
= offset
/ PAGE_SIZE
;
6691 target_offset
= (vm_object_offset_t
)offset
;
6693 assert(!(target_offset
& PAGE_MASK
));
6694 assert(!(xfer_size
& PAGE_MASK
));
6696 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
6697 vm_object_lock_shared(shadow_object
);
6699 vm_object_lock(shadow_object
);
6702 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object
);
6704 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6705 assert(shadow_object
->blocked_access
);
6706 shadow_object
->blocked_access
= FALSE
;
6707 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
6710 if (shadow_object
->code_signed
) {
6713 * If the object is code-signed, do not let this UPL tell
6714 * us if the pages are valid or not. Let the pages be
6715 * validated by VM the normal way (when they get mapped or
6718 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
6722 * No page list to get the code-signing info from !?
6724 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
6726 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
) {
6727 should_be_throttled
= TRUE
;
6732 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
6734 if ((upl
->flags
& UPL_IO_WIRE
) &&
6735 !(flags
& UPL_COMMIT_FREE_ABSENT
) &&
6737 shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
&&
6738 shadow_object
->purgable
!= VM_PURGABLE_EMPTY
) {
6739 if (!vm_page_queue_empty(&shadow_object
->memq
)) {
6740 if (size
== shadow_object
->vo_size
) {
6741 nxt_page
= (vm_page_t
)vm_page_queue_first(&shadow_object
->memq
);
6742 fast_path_full_commit
= 1;
6744 fast_path_possible
= 1;
6746 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
&&
6747 (shadow_object
->purgable
== VM_PURGABLE_DENY
||
6748 shadow_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
6749 shadow_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
6754 first_local
= VM_PAGE_NULL
;
6755 last_local
= VM_PAGE_NULL
;
6765 if (upl
->flags
& UPL_LITE
) {
6766 unsigned int pg_num
;
6768 if (nxt_page
!= VM_PAGE_NULL
) {
6770 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
6771 target_offset
= m
->vmp_offset
;
6773 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
6774 assert(pg_num
== target_offset
/ PAGE_SIZE
);
6776 if (lite_list
[pg_num
>> 5] & (1 << (pg_num
& 31))) {
6777 lite_list
[pg_num
>> 5] &= ~(1 << (pg_num
& 31));
6779 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
6780 m
= vm_page_lookup(shadow_object
, target_offset
+ (upl
->offset
- shadow_object
->paging_offset
));
6786 if (upl
->flags
& UPL_SHADOWED
) {
6787 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
6788 t
->vmp_free_when_done
= FALSE
;
6792 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
6793 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
6797 if (m
== VM_PAGE_NULL
) {
6798 goto commit_next_page
;
6801 m_object
= VM_PAGE_OBJECT(m
);
6803 if (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
6804 assert(m
->vmp_busy
);
6806 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6807 goto commit_next_page
;
6810 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
6813 * Set the code signing bits according to
6814 * what the UPL says they should be.
6816 m
->vmp_cs_validated
= page_list
[entry
].cs_validated
;
6817 m
->vmp_cs_tainted
= page_list
[entry
].cs_tainted
;
6818 m
->vmp_cs_nx
= page_list
[entry
].cs_nx
;
6820 if (flags
& UPL_COMMIT_WRITTEN_BY_KERNEL
) {
6821 m
->vmp_written_by_kernel
= TRUE
;
6824 if (upl
->flags
& UPL_IO_WIRE
) {
6826 page_list
[entry
].phys_addr
= 0;
6829 if (flags
& UPL_COMMIT_SET_DIRTY
) {
6830 SET_PAGE_DIRTY(m
, FALSE
);
6831 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
6832 m
->vmp_dirty
= FALSE
;
6834 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
6835 m
->vmp_cs_validated
&& !m
->vmp_cs_tainted
) {
6838 * This page is no longer dirty
6839 * but could have been modified,
6840 * so it will need to be
6843 m
->vmp_cs_validated
= FALSE
;
6845 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
6847 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
6849 clear_refmod
|= VM_MEM_MODIFIED
;
6851 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6853 * We blocked access to the pages in this UPL.
6854 * Clear the "busy" bit and wake up any waiter
6857 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6859 if (fast_path_possible
) {
6860 assert(m_object
->purgable
!= VM_PURGABLE_EMPTY
);
6861 assert(m_object
->purgable
!= VM_PURGABLE_VOLATILE
);
6862 if (m
->vmp_absent
) {
6863 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
6864 assert(m
->vmp_wire_count
== 0);
6865 assert(m
->vmp_busy
);
6867 m
->vmp_absent
= FALSE
;
6868 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6870 if (m
->vmp_wire_count
== 0) {
6871 panic("wire_count == 0, m = %p, obj = %p\n", m
, shadow_object
);
6873 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
6876 * XXX FBDP need to update some other
6877 * counters here (purgeable_wired_count)
6880 assert(m
->vmp_wire_count
> 0);
6881 m
->vmp_wire_count
--;
6883 if (m
->vmp_wire_count
== 0) {
6884 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
6888 if (m
->vmp_wire_count
== 0) {
6889 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
6891 if (last_local
== VM_PAGE_NULL
) {
6892 assert(first_local
== VM_PAGE_NULL
);
6897 assert(first_local
!= VM_PAGE_NULL
);
6899 m
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
6900 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m
);
6903 local_queue_count
++;
6905 if (throttle_page
) {
6906 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
6908 if (flags
& UPL_COMMIT_INACTIVATE
) {
6909 if (shadow_object
->internal
) {
6910 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_INTERNAL_Q
;
6912 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_EXTERNAL_Q
;
6915 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
6920 if (flags
& UPL_COMMIT_INACTIVATE
) {
6921 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6922 clear_refmod
|= VM_MEM_REFERENCED
;
6924 if (m
->vmp_absent
) {
6925 if (flags
& UPL_COMMIT_FREE_ABSENT
) {
6926 dwp
->dw_mask
|= DW_vm_page_free
;
6928 m
->vmp_absent
= FALSE
;
6929 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6931 if (!(dwp
->dw_mask
& DW_vm_page_deactivate_internal
)) {
6932 dwp
->dw_mask
|= DW_vm_page_activate
;
6936 dwp
->dw_mask
|= DW_vm_page_unwire
;
6939 goto commit_next_page
;
6941 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
6944 page_list
[entry
].phys_addr
= 0;
6948 * make sure to clear the hardware
6949 * modify or reference bits before
6950 * releasing the BUSY bit on this page
6951 * otherwise we risk losing a legitimate
6954 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
6955 m
->vmp_dirty
= FALSE
;
6957 clear_refmod
|= VM_MEM_MODIFIED
;
6959 if (m
->vmp_laundry
) {
6960 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
6963 if (VM_PAGE_WIRED(m
)) {
6964 m
->vmp_free_when_done
= FALSE
;
6967 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
6968 m
->vmp_cs_validated
&& !m
->vmp_cs_tainted
) {
6971 * This page is no longer dirty
6972 * but could have been modified,
6973 * so it will need to be
6976 m
->vmp_cs_validated
= FALSE
;
6978 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
6980 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
6982 if (m
->vmp_overwriting
) {
6984 * the (COPY_OUT_FROM == FALSE) request_page_list case
6987 #if CONFIG_PHANTOM_CACHE
6988 if (m
->vmp_absent
&& !m_object
->internal
) {
6989 dwp
->dw_mask
|= DW_vm_phantom_cache_update
;
6992 m
->vmp_absent
= FALSE
;
6994 dwp
->dw_mask
|= DW_clear_busy
;
6997 * alternate (COPY_OUT_FROM == FALSE) page_list case
6998 * Occurs when the original page was wired
6999 * at the time of the list request
7001 assert(VM_PAGE_WIRED(m
));
7003 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
7005 m
->vmp_overwriting
= FALSE
;
7007 m
->vmp_cleaning
= FALSE
;
7009 if (m
->vmp_free_when_done
) {
7011 * With the clean queue enabled, UPL_PAGEOUT should
7012 * no longer set the pageout bit. It's pages now go
7013 * to the clean queue.
7015 assert(!(flags
& UPL_PAGEOUT
));
7016 assert(!m_object
->internal
);
7018 m
->vmp_free_when_done
= FALSE
;
7020 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
7021 (m
->vmp_pmapped
&& (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
))) {
7023 * page was re-dirtied after we started
7024 * the pageout... reactivate it since
7025 * we don't know whether the on-disk
7026 * copy matches what is now in memory
7028 SET_PAGE_DIRTY(m
, FALSE
);
7030 dwp
->dw_mask
|= DW_vm_page_activate
| DW_PAGE_WAKEUP
;
7032 if (upl
->flags
& UPL_PAGEOUT
) {
7033 VM_STAT_INCR(reactivations
);
7034 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
7038 * page has been successfully cleaned
7039 * go ahead and free it for other use
7041 if (m_object
->internal
) {
7042 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
7044 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
7046 m
->vmp_dirty
= FALSE
;
7049 dwp
->dw_mask
|= DW_vm_page_free
;
7051 goto commit_next_page
;
7054 * It is a part of the semantic of COPYOUT_FROM
7055 * UPLs that a commit implies cache sync
7056 * between the vm page and the backing store
7057 * this can be used to strip the precious bit
7060 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
)) {
7061 m
->vmp_precious
= FALSE
;
7064 if (flags
& UPL_COMMIT_SET_DIRTY
) {
7065 SET_PAGE_DIRTY(m
, FALSE
);
7067 m
->vmp_dirty
= FALSE
;
7070 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7071 if (hibernate_cleaning_in_progress
== FALSE
&& !m
->vmp_dirty
&& (upl
->flags
& UPL_PAGEOUT
)) {
7074 VM_STAT_INCR(pageouts
);
7075 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
7077 dwp
->dw_mask
|= DW_enqueue_cleaned
;
7078 } else if (should_be_throttled
== TRUE
&& (m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
)) {
7080 * page coming back in from being 'frozen'...
7081 * it was dirty before it was frozen, so keep it so
7082 * the vm_page_activate will notice that it really belongs
7083 * on the throttle queue and put it there
7085 SET_PAGE_DIRTY(m
, FALSE
);
7086 dwp
->dw_mask
|= DW_vm_page_activate
;
7088 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->vmp_clustered
&& (m
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
)) {
7089 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7090 clear_refmod
|= VM_MEM_REFERENCED
;
7091 } else if (!VM_PAGE_PAGEABLE(m
)) {
7092 if (m
->vmp_clustered
|| (flags
& UPL_COMMIT_SPECULATE
)) {
7093 dwp
->dw_mask
|= DW_vm_page_speculate
;
7094 } else if (m
->vmp_reference
) {
7095 dwp
->dw_mask
|= DW_vm_page_activate
;
7097 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7098 clear_refmod
|= VM_MEM_REFERENCED
;
7102 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7104 * We blocked access to the pages in this URL.
7105 * Clear the "busy" bit on this page before we
7106 * wake up any waiter.
7108 dwp
->dw_mask
|= DW_clear_busy
;
7111 * Wakeup any thread waiting for the page to be un-cleaning.
7113 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
7117 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m
), clear_refmod
);
7120 target_offset
+= PAGE_SIZE_64
;
7121 xfer_size
-= PAGE_SIZE
;
7125 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
7126 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
7128 if (dw_count
>= dw_limit
) {
7129 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7135 if (dwp
->dw_mask
& DW_clear_busy
) {
7136 m
->vmp_busy
= FALSE
;
7139 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
7146 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7149 if (fast_path_possible
) {
7150 assert(shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
);
7151 assert(shadow_object
->purgable
!= VM_PURGABLE_EMPTY
);
7153 if (local_queue_count
|| unwired_count
) {
7154 if (local_queue_count
) {
7155 vm_page_t first_target
;
7156 vm_page_queue_head_t
*target_queue
;
7158 if (throttle_page
) {
7159 target_queue
= &vm_page_queue_throttled
;
7161 if (flags
& UPL_COMMIT_INACTIVATE
) {
7162 if (shadow_object
->internal
) {
7163 target_queue
= &vm_page_queue_anonymous
;
7165 target_queue
= &vm_page_queue_inactive
;
7168 target_queue
= &vm_page_queue_active
;
7172 * Transfer the entire local queue to a regular LRU page queues.
7174 vm_page_lockspin_queues();
7176 first_target
= (vm_page_t
) vm_page_queue_first(target_queue
);
7178 if (vm_page_queue_empty(target_queue
)) {
7179 target_queue
->prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7181 first_target
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7184 target_queue
->next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
7185 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue
);
7186 last_local
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target
);
7189 * Adjust the global page counts.
7191 if (throttle_page
) {
7192 vm_page_throttled_count
+= local_queue_count
;
7194 if (flags
& UPL_COMMIT_INACTIVATE
) {
7195 if (shadow_object
->internal
) {
7196 vm_page_anonymous_count
+= local_queue_count
;
7198 vm_page_inactive_count
+= local_queue_count
;
7200 token_new_pagecount
+= local_queue_count
;
7202 vm_page_active_count
+= local_queue_count
;
7205 if (shadow_object
->internal
) {
7206 vm_page_pageable_internal_count
+= local_queue_count
;
7208 vm_page_pageable_external_count
+= local_queue_count
;
7212 vm_page_lockspin_queues();
7214 if (unwired_count
) {
7215 vm_page_wire_count
-= unwired_count
;
7216 VM_CHECK_MEMORYSTATUS
;
7218 vm_page_unlock_queues();
7220 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object
, -unwired_count
);
7225 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7227 } else if (upl
->flags
& UPL_LITE
) {
7233 if (!fast_path_full_commit
) {
7234 pg_num
= upl
->size
/ PAGE_SIZE
;
7235 pg_num
= (pg_num
+ 31) >> 5;
7237 for (i
= 0; i
< pg_num
; i
++) {
7238 if (lite_list
[i
] != 0) {
7245 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
7249 if (occupied
== 0) {
7251 * If this UPL element belongs to a Vector UPL and is
7252 * empty, then this is the right function to deallocate
7253 * it. So go ahead set the *empty variable. The flag
7254 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7255 * should be considered relevant for the Vector UPL and not
7256 * the internal UPLs.
7258 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
7262 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7264 * this is not a paging object
7265 * so we need to drop the paging reference
7266 * that was taken when we created the UPL
7267 * against this object
7269 vm_object_activity_end(shadow_object
);
7270 vm_object_collapse(shadow_object
, 0, TRUE
);
7273 * we dontated the paging reference to
7274 * the map object... vm_pageout_object_terminate
7275 * will drop this reference
7279 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object
, shadow_object
->wire_tag
);
7280 vm_object_unlock(shadow_object
);
7281 if (object
!= shadow_object
) {
7282 vm_object_unlock(object
);
7289 * If we completed our operations on an UPL that is
7290 * part of a Vectored UPL and if empty is TRUE, then
7291 * we should go ahead and deallocate this UPL element.
7292 * Then we check if this was the last of the UPL elements
7293 * within that Vectored UPL. If so, set empty to TRUE
7294 * so that in ubc_upl_commit_range or ubc_upl_commit, we
7295 * can go ahead and deallocate the Vector UPL too.
7297 if (*empty
== TRUE
) {
7298 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
7299 upl_deallocate(upl
);
7301 goto process_upl_to_commit
;
7303 if (pgpgout_count
) {
7304 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
7307 return KERN_SUCCESS
;
7313 upl_offset_t offset
,
7318 upl_page_info_t
*user_page_list
= NULL
;
7319 upl_size_t xfer_size
, subupl_size
= size
;
7320 vm_object_t shadow_object
;
7322 vm_object_offset_t target_offset
;
7323 upl_offset_t subupl_offset
= offset
;
7325 wpl_array_t lite_list
;
7327 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
7328 struct vm_page_delayed_work
*dwp
;
7331 int isVectorUPL
= 0;
7332 upl_t vector_upl
= NULL
;
7336 if (upl
== UPL_NULL
) {
7337 return KERN_INVALID_ARGUMENT
;
7340 if ((upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
)) {
7341 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
7344 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
7346 upl_lock(vector_upl
);
7351 process_upl_to_abort
:
7354 offset
= subupl_offset
;
7356 upl_unlock(vector_upl
);
7357 return KERN_SUCCESS
;
7359 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
7361 upl_unlock(vector_upl
);
7362 return KERN_FAILURE
;
7364 subupl_size
-= size
;
7365 subupl_offset
+= size
;
7371 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
7372 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
7374 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
7375 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
7376 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
7378 upl
->upl_commit_index
++;
7381 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7383 } else if ((offset
+ size
) <= upl
->size
) {
7389 upl_unlock(vector_upl
);
7392 return KERN_FAILURE
;
7394 if (upl
->flags
& UPL_INTERNAL
) {
7395 lite_list
= (wpl_array_t
)
7396 ((((uintptr_t)upl
) + sizeof(struct upl
))
7397 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
7399 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
7401 lite_list
= (wpl_array_t
)
7402 (((uintptr_t)upl
) + sizeof(struct upl
));
7404 object
= upl
->map_object
;
7406 if (upl
->flags
& UPL_SHADOWED
) {
7407 vm_object_lock(object
);
7408 shadow_object
= object
->shadow
;
7410 shadow_object
= object
;
7413 entry
= offset
/ PAGE_SIZE
;
7414 target_offset
= (vm_object_offset_t
)offset
;
7416 assert(!(target_offset
& PAGE_MASK
));
7417 assert(!(xfer_size
& PAGE_MASK
));
7419 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
7420 vm_object_lock_shared(shadow_object
);
7422 vm_object_lock(shadow_object
);
7425 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7426 assert(shadow_object
->blocked_access
);
7427 shadow_object
->blocked_access
= FALSE
;
7428 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
7433 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
7435 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
)) {
7436 panic("upl_abort_range: kernel_object being DUMPED");
7441 unsigned int pg_num
;
7444 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
7445 assert(pg_num
== target_offset
/ PAGE_SIZE
);
7449 if (user_page_list
) {
7450 needed
= user_page_list
[pg_num
].needed
;
7456 if (upl
->flags
& UPL_LITE
) {
7457 if (lite_list
[pg_num
>> 5] & (1 << (pg_num
& 31))) {
7458 lite_list
[pg_num
>> 5] &= ~(1 << (pg_num
& 31));
7460 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7461 m
= vm_page_lookup(shadow_object
, target_offset
+
7462 (upl
->offset
- shadow_object
->paging_offset
));
7466 if (upl
->flags
& UPL_SHADOWED
) {
7467 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
7468 t
->vmp_free_when_done
= FALSE
;
7472 if (m
== VM_PAGE_NULL
) {
7473 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
7477 if ((upl
->flags
& UPL_KERNEL_OBJECT
)) {
7478 goto abort_next_page
;
7481 if (m
!= VM_PAGE_NULL
) {
7482 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
7484 if (m
->vmp_absent
) {
7485 boolean_t must_free
= TRUE
;
7488 * COPYOUT = FALSE case
7489 * check for error conditions which must
7490 * be passed back to the pages customer
7492 if (error
& UPL_ABORT_RESTART
) {
7493 m
->vmp_restart
= TRUE
;
7494 m
->vmp_absent
= FALSE
;
7495 m
->vmp_unusual
= TRUE
;
7497 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
7498 m
->vmp_restart
= FALSE
;
7499 m
->vmp_unusual
= TRUE
;
7501 } else if (error
& UPL_ABORT_ERROR
) {
7502 m
->vmp_restart
= FALSE
;
7503 m
->vmp_absent
= FALSE
;
7504 m
->vmp_error
= TRUE
;
7505 m
->vmp_unusual
= TRUE
;
7508 if (m
->vmp_clustered
&& needed
== FALSE
) {
7510 * This page was a part of a speculative
7511 * read-ahead initiated by the kernel
7512 * itself. No one is expecting this
7513 * page and no one will clean up its
7514 * error state if it ever becomes valid
7516 * We have to free it here.
7520 m
->vmp_cleaning
= FALSE
;
7522 if (m
->vmp_overwriting
&& !m
->vmp_busy
) {
7524 * this shouldn't happen since
7525 * this is an 'absent' page, but
7526 * it doesn't hurt to check for
7527 * the 'alternate' method of
7528 * stabilizing the page...
7529 * we will mark 'busy' to be cleared
7530 * in the following code which will
7531 * take care of the primary stabilzation
7532 * method (i.e. setting 'busy' to TRUE)
7534 dwp
->dw_mask
|= DW_vm_page_unwire
;
7536 m
->vmp_overwriting
= FALSE
;
7538 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7540 if (must_free
== TRUE
) {
7541 dwp
->dw_mask
|= DW_vm_page_free
;
7543 dwp
->dw_mask
|= DW_vm_page_activate
;
7547 * Handle the trusted pager throttle.
7549 if (m
->vmp_laundry
) {
7550 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
7553 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7555 * We blocked access to the pages in this UPL.
7556 * Clear the "busy" bit and wake up any waiter
7559 dwp
->dw_mask
|= DW_clear_busy
;
7561 if (m
->vmp_overwriting
) {
7563 dwp
->dw_mask
|= DW_clear_busy
;
7566 * deal with the 'alternate' method
7567 * of stabilizing the page...
7568 * we will either free the page
7569 * or mark 'busy' to be cleared
7570 * in the following code which will
7571 * take care of the primary stabilzation
7572 * method (i.e. setting 'busy' to TRUE)
7574 dwp
->dw_mask
|= DW_vm_page_unwire
;
7576 m
->vmp_overwriting
= FALSE
;
7578 m
->vmp_free_when_done
= FALSE
;
7579 m
->vmp_cleaning
= FALSE
;
7581 if (error
& UPL_ABORT_DUMP_PAGES
) {
7582 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7584 dwp
->dw_mask
|= DW_vm_page_free
;
7586 if (!(dwp
->dw_mask
& DW_vm_page_unwire
)) {
7587 if (error
& UPL_ABORT_REFERENCE
) {
7589 * we've been told to explictly
7590 * reference this page... for
7591 * file I/O, this is done by
7592 * implementing an LRU on the inactive q
7594 dwp
->dw_mask
|= DW_vm_page_lru
;
7595 } else if (!VM_PAGE_PAGEABLE(m
)) {
7596 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7599 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
7604 target_offset
+= PAGE_SIZE_64
;
7605 xfer_size
-= PAGE_SIZE
;
7609 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
7610 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
7612 if (dw_count
>= dw_limit
) {
7613 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7619 if (dwp
->dw_mask
& DW_clear_busy
) {
7620 m
->vmp_busy
= FALSE
;
7623 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
7630 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7635 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7637 } else if (upl
->flags
& UPL_LITE
) {
7641 pg_num
= upl
->size
/ PAGE_SIZE
;
7642 pg_num
= (pg_num
+ 31) >> 5;
7645 for (i
= 0; i
< pg_num
; i
++) {
7646 if (lite_list
[i
] != 0) {
7652 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
7656 if (occupied
== 0) {
7658 * If this UPL element belongs to a Vector UPL and is
7659 * empty, then this is the right function to deallocate
7660 * it. So go ahead set the *empty variable. The flag
7661 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7662 * should be considered relevant for the Vector UPL and
7663 * not the internal UPLs.
7665 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
7669 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7671 * this is not a paging object
7672 * so we need to drop the paging reference
7673 * that was taken when we created the UPL
7674 * against this object
7676 vm_object_activity_end(shadow_object
);
7677 vm_object_collapse(shadow_object
, 0, TRUE
);
7680 * we dontated the paging reference to
7681 * the map object... vm_pageout_object_terminate
7682 * will drop this reference
7686 vm_object_unlock(shadow_object
);
7687 if (object
!= shadow_object
) {
7688 vm_object_unlock(object
);
7695 * If we completed our operations on an UPL that is
7696 * part of a Vectored UPL and if empty is TRUE, then
7697 * we should go ahead and deallocate this UPL element.
7698 * Then we check if this was the last of the UPL elements
7699 * within that Vectored UPL. If so, set empty to TRUE
7700 * so that in ubc_upl_abort_range or ubc_upl_abort, we
7701 * can go ahead and deallocate the Vector UPL too.
7703 if (*empty
== TRUE
) {
7704 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
7705 upl_deallocate(upl
);
7707 goto process_upl_to_abort
;
7710 return KERN_SUCCESS
;
7721 if (upl
== UPL_NULL
) {
7722 return KERN_INVALID_ARGUMENT
;
7725 return upl_abort_range(upl
, 0, upl
->size
, error
, &empty
);
7729 /* an option on commit should be wire */
7733 upl_page_info_t
*page_list
,
7734 mach_msg_type_number_t count
)
7738 if (upl
== UPL_NULL
) {
7739 return KERN_INVALID_ARGUMENT
;
7742 return upl_commit_range(upl
, 0, upl
->size
, 0, page_list
, count
, &empty
);
7753 vm_page_t m
, nxt_page
= VM_PAGE_NULL
;
7755 int wired_count
= 0;
7758 panic("iopl_valid_data: NULL upl");
7760 if (vector_upl_is_valid(upl
)) {
7761 panic("iopl_valid_data: vector upl");
7763 if ((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_SHADOWED
| UPL_ACCESS_BLOCKED
| UPL_IO_WIRE
| UPL_INTERNAL
)) != UPL_IO_WIRE
) {
7764 panic("iopl_valid_data: unsupported upl, flags = %x", upl
->flags
);
7767 object
= upl
->map_object
;
7769 if (object
== kernel_object
|| object
== compressor_object
) {
7770 panic("iopl_valid_data: object == kernel or compressor");
7773 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
7774 object
->purgable
== VM_PURGABLE_EMPTY
) {
7775 panic("iopl_valid_data: object %p purgable %d",
7776 object
, object
->purgable
);
7781 vm_object_lock(object
);
7782 VM_OBJECT_WIRED_PAGE_UPDATE_START(object
);
7784 if (object
->vo_size
== size
&& object
->resident_page_count
== (size
/ PAGE_SIZE
)) {
7785 nxt_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7787 offset
= 0 + upl
->offset
- object
->paging_offset
;
7791 if (nxt_page
!= VM_PAGE_NULL
) {
7793 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
7795 m
= vm_page_lookup(object
, offset
);
7796 offset
+= PAGE_SIZE
;
7798 if (m
== VM_PAGE_NULL
) {
7799 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset
);
7803 if (!m
->vmp_absent
) {
7804 panic("iopl_valid_data: busy page w/o absent");
7807 if (m
->vmp_pageq
.next
|| m
->vmp_pageq
.prev
) {
7808 panic("iopl_valid_data: busy+absent page on page queue");
7810 if (m
->vmp_reusable
) {
7811 panic("iopl_valid_data: %p is reusable", m
);
7814 m
->vmp_absent
= FALSE
;
7815 m
->vmp_dirty
= TRUE
;
7816 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
7817 assert(m
->vmp_wire_count
== 0);
7818 m
->vmp_wire_count
++;
7819 assert(m
->vmp_wire_count
);
7820 if (m
->vmp_wire_count
== 1) {
7821 m
->vmp_q_state
= VM_PAGE_IS_WIRED
;
7824 panic("iopl_valid_data: %p already wired\n", m
);
7827 PAGE_WAKEUP_DONE(m
);
7832 VM_OBJECT_WIRED_PAGE_COUNT(object
, wired_count
);
7833 assert(object
->resident_page_count
>= object
->wired_page_count
);
7835 /* no need to adjust purgeable accounting for this object: */
7836 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
7837 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
7839 vm_page_lockspin_queues();
7840 vm_page_wire_count
+= wired_count
;
7841 vm_page_unlock_queues();
7843 VM_OBJECT_WIRED_PAGE_UPDATE_END(object
, tag
);
7844 vm_object_unlock(object
);
7849 vm_object_set_pmap_cache_attr(
7851 upl_page_info_array_t user_page_list
,
7852 unsigned int num_pages
,
7853 boolean_t batch_pmap_op
)
7855 unsigned int cache_attr
= 0;
7857 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
7858 assert(user_page_list
);
7859 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
7860 PMAP_BATCH_SET_CACHE_ATTR(object
, user_page_list
, cache_attr
, num_pages
, batch_pmap_op
);
7865 boolean_t
vm_object_iopl_wire_full(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
);
7866 kern_return_t
vm_object_iopl_wire_empty(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
, vm_object_offset_t
*, int, int*);
7871 vm_object_iopl_wire_full(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
7872 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
)
7877 int delayed_unlock
= 0;
7878 boolean_t retval
= TRUE
;
7881 vm_object_lock_assert_exclusive(object
);
7882 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
7883 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
7884 assert(object
->pager
== NULL
);
7885 assert(object
->copy
== NULL
);
7886 assert(object
->shadow
== NULL
);
7888 page_count
= object
->resident_page_count
;
7889 dst_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
7891 vm_page_lock_queues();
7893 while (page_count
--) {
7894 if (dst_page
->vmp_busy
||
7895 dst_page
->vmp_fictitious
||
7896 dst_page
->vmp_absent
||
7897 dst_page
->vmp_error
||
7898 dst_page
->vmp_cleaning
||
7899 dst_page
->vmp_restart
||
7900 dst_page
->vmp_laundry
) {
7904 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
7908 dst_page
->vmp_reference
= TRUE
;
7910 vm_page_wire(dst_page
, tag
, FALSE
);
7912 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
7913 SET_PAGE_DIRTY(dst_page
, FALSE
);
7915 entry
= (unsigned int)(dst_page
->vmp_offset
/ PAGE_SIZE
);
7916 assert(entry
>= 0 && entry
< object
->resident_page_count
);
7917 lite_list
[entry
>> 5] |= 1 << (entry
& 31);
7919 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
7921 if (phys_page
> upl
->highest_page
) {
7922 upl
->highest_page
= phys_page
;
7925 if (user_page_list
) {
7926 user_page_list
[entry
].phys_addr
= phys_page
;
7927 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
7928 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
7929 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
7930 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
7931 user_page_list
[entry
].device
= FALSE
;
7932 user_page_list
[entry
].speculative
= FALSE
;
7933 user_page_list
[entry
].cs_validated
= FALSE
;
7934 user_page_list
[entry
].cs_tainted
= FALSE
;
7935 user_page_list
[entry
].cs_nx
= FALSE
;
7936 user_page_list
[entry
].needed
= FALSE
;
7937 user_page_list
[entry
].mark
= FALSE
;
7939 if (delayed_unlock
++ > 256) {
7941 lck_mtx_yield(&vm_page_queue_lock
);
7943 VM_CHECK_MEMORYSTATUS
;
7945 dst_page
= (vm_page_t
)vm_page_queue_next(&dst_page
->vmp_listq
);
7948 vm_page_unlock_queues();
7950 VM_CHECK_MEMORYSTATUS
;
7957 vm_object_iopl_wire_empty(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
7958 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
, vm_object_offset_t
*dst_offset
,
7959 int page_count
, int* page_grab_count
)
7962 boolean_t no_zero_fill
= FALSE
;
7964 int pages_wired
= 0;
7965 int pages_inserted
= 0;
7967 uint64_t delayed_ledger_update
= 0;
7968 kern_return_t ret
= KERN_SUCCESS
;
7972 vm_object_lock_assert_exclusive(object
);
7973 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
7974 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
7975 assert(object
->pager
== NULL
);
7976 assert(object
->copy
== NULL
);
7977 assert(object
->shadow
== NULL
);
7979 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
7980 interruptible
= THREAD_ABORTSAFE
;
7982 interruptible
= THREAD_UNINT
;
7985 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
7986 no_zero_fill
= TRUE
;
7990 #if CONFIG_SECLUDED_MEMORY
7991 if (object
->can_grab_secluded
) {
7992 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
7994 #endif /* CONFIG_SECLUDED_MEMORY */
7996 while (page_count
--) {
7997 while ((dst_page
= vm_page_grab_options(grab_options
))
7999 OSAddAtomic(page_count
, &vm_upl_wait_for_pages
);
8001 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
8003 if (vm_page_wait(interruptible
) == FALSE
) {
8007 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8009 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
8011 ret
= MACH_SEND_INTERRUPTED
;
8014 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8016 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
8018 if (no_zero_fill
== FALSE
) {
8019 vm_page_zero_fill(dst_page
);
8021 dst_page
->vmp_absent
= TRUE
;
8024 dst_page
->vmp_reference
= TRUE
;
8026 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8027 SET_PAGE_DIRTY(dst_page
, FALSE
);
8029 if (dst_page
->vmp_absent
== FALSE
) {
8030 assert(dst_page
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8031 assert(dst_page
->vmp_wire_count
== 0);
8032 dst_page
->vmp_wire_count
++;
8033 dst_page
->vmp_q_state
= VM_PAGE_IS_WIRED
;
8034 assert(dst_page
->vmp_wire_count
);
8036 PAGE_WAKEUP_DONE(dst_page
);
8040 vm_page_insert_internal(dst_page
, object
, *dst_offset
, tag
, FALSE
, TRUE
, TRUE
, TRUE
, &delayed_ledger_update
);
8042 lite_list
[entry
>> 5] |= 1 << (entry
& 31);
8044 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8046 if (phys_page
> upl
->highest_page
) {
8047 upl
->highest_page
= phys_page
;
8050 if (user_page_list
) {
8051 user_page_list
[entry
].phys_addr
= phys_page
;
8052 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8053 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8054 user_page_list
[entry
].free_when_done
= FALSE
;
8055 user_page_list
[entry
].precious
= FALSE
;
8056 user_page_list
[entry
].device
= FALSE
;
8057 user_page_list
[entry
].speculative
= FALSE
;
8058 user_page_list
[entry
].cs_validated
= FALSE
;
8059 user_page_list
[entry
].cs_tainted
= FALSE
;
8060 user_page_list
[entry
].cs_nx
= FALSE
;
8061 user_page_list
[entry
].needed
= FALSE
;
8062 user_page_list
[entry
].mark
= FALSE
;
8065 *dst_offset
+= PAGE_SIZE_64
;
8069 vm_page_lockspin_queues();
8070 vm_page_wire_count
+= pages_wired
;
8071 vm_page_unlock_queues();
8073 if (pages_inserted
) {
8074 if (object
->internal
) {
8075 OSAddAtomic(pages_inserted
, &vm_page_internal_count
);
8077 OSAddAtomic(pages_inserted
, &vm_page_external_count
);
8080 if (delayed_ledger_update
) {
8082 int ledger_idx_volatile
;
8083 int ledger_idx_nonvolatile
;
8084 int ledger_idx_volatile_compressed
;
8085 int ledger_idx_nonvolatile_compressed
;
8086 boolean_t do_footprint
;
8088 owner
= VM_OBJECT_OWNER(object
);
8091 vm_object_ledger_tag_ledgers(object
,
8092 &ledger_idx_volatile
,
8093 &ledger_idx_nonvolatile
,
8094 &ledger_idx_volatile_compressed
,
8095 &ledger_idx_nonvolatile_compressed
,
8098 /* more non-volatile bytes */
8099 ledger_credit(owner
->ledger
,
8100 ledger_idx_nonvolatile
,
8101 delayed_ledger_update
);
8103 /* more footprint */
8104 ledger_credit(owner
->ledger
,
8105 task_ledgers
.phys_footprint
,
8106 delayed_ledger_update
);
8110 assert(page_grab_count
);
8111 *page_grab_count
= pages_inserted
;
8119 vm_object_iopl_request(
8121 vm_object_offset_t offset
,
8124 upl_page_info_array_t user_page_list
,
8125 unsigned int *page_list_count
,
8126 upl_control_flags_t cntrl_flags
,
8130 vm_object_offset_t dst_offset
;
8131 upl_size_t xfer_size
;
8134 wpl_array_t lite_list
= NULL
;
8135 int no_zero_fill
= FALSE
;
8136 unsigned int size_in_pages
;
8137 int page_grab_count
= 0;
8141 struct vm_object_fault_info fault_info
= {};
8142 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
8143 struct vm_page_delayed_work
*dwp
;
8147 boolean_t caller_lookup
;
8148 int io_tracking_flag
= 0;
8152 boolean_t set_cache_attr_needed
= FALSE
;
8153 boolean_t free_wired_pages
= FALSE
;
8154 boolean_t fast_path_empty_req
= FALSE
;
8155 boolean_t fast_path_full_req
= FALSE
;
8157 #if DEVELOPMENT || DEBUG
8158 task_t task
= current_task();
8159 #endif /* DEVELOPMENT || DEBUG */
8161 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
8163 * For forward compatibility's sake,
8164 * reject any unknown flag.
8166 return KERN_INVALID_VALUE
;
8168 if (vm_lopage_needed
== FALSE
) {
8169 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
8172 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
8173 if ((cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) {
8174 return KERN_INVALID_VALUE
;
8177 if (object
->phys_contiguous
) {
8178 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8179 return KERN_INVALID_ADDRESS
;
8182 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8183 return KERN_INVALID_ADDRESS
;
8187 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
8188 no_zero_fill
= TRUE
;
8191 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
8192 prot
= VM_PROT_READ
;
8194 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
8197 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
8198 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
8201 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, prot
, 0);
8203 #if CONFIG_IOSCHED || UPL_DEBUG
8204 if ((object
->io_tracking
&& object
!= kernel_object
) || upl_debug_enabled
) {
8205 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
8210 if (object
->io_tracking
) {
8211 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
8212 if (object
!= kernel_object
) {
8213 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
8218 if (object
->phys_contiguous
) {
8224 if (cntrl_flags
& UPL_SET_INTERNAL
) {
8225 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
8227 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
8228 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
8229 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
8231 user_page_list
= NULL
;
8235 upl
= upl_create(UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
8237 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
8242 if (user_page_list
) {
8243 user_page_list
[0].device
= FALSE
;
8247 if (cntrl_flags
& UPL_NOZEROFILLIO
) {
8248 DTRACE_VM4(upl_nozerofillio
,
8249 vm_object_t
, object
,
8250 vm_object_offset_t
, offset
,
8255 upl
->map_object
= object
;
8258 size_in_pages
= size
/ PAGE_SIZE
;
8260 if (object
== kernel_object
&&
8261 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
8262 upl
->flags
|= UPL_KERNEL_OBJECT
;
8264 vm_object_lock(object
);
8266 vm_object_lock_shared(object
);
8269 vm_object_lock(object
);
8270 vm_object_activity_begin(object
);
8273 * paging in progress also protects the paging_offset
8275 upl
->offset
= offset
+ object
->paging_offset
;
8277 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
8279 * The user requested that access to the pages in this UPL
8280 * be blocked until the UPL is commited or aborted.
8282 upl
->flags
|= UPL_ACCESS_BLOCKED
;
8285 #if CONFIG_IOSCHED || UPL_DEBUG
8286 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
8287 vm_object_activity_begin(object
);
8288 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
8292 if (object
->phys_contiguous
) {
8293 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
8294 assert(!object
->blocked_access
);
8295 object
->blocked_access
= TRUE
;
8298 vm_object_unlock(object
);
8301 * don't need any shadow mappings for this one
8302 * since it is already I/O memory
8304 upl
->flags
|= UPL_DEVICE_MEMORY
;
8306 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1) >> PAGE_SHIFT
);
8308 if (user_page_list
) {
8309 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
) >> PAGE_SHIFT
);
8310 user_page_list
[0].device
= TRUE
;
8312 if (page_list_count
!= NULL
) {
8313 if (upl
->flags
& UPL_INTERNAL
) {
8314 *page_list_count
= 0;
8316 *page_list_count
= 1;
8320 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
8321 #if DEVELOPMENT || DEBUG
8323 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
8325 #endif /* DEVELOPMENT || DEBUG */
8326 return KERN_SUCCESS
;
8328 if (object
!= kernel_object
&& object
!= compressor_object
) {
8330 * Protect user space from future COW operations
8332 #if VM_OBJECT_TRACKING_OP_TRUESHARE
8333 if (!object
->true_share
&&
8334 vm_object_tracking_inited
) {
8335 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
8338 num
= OSBacktrace(bt
,
8339 VM_OBJECT_TRACKING_BTDEPTH
);
8340 btlog_add_entry(vm_object_tracking_btlog
,
8342 VM_OBJECT_TRACKING_OP_TRUESHARE
,
8346 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
8348 vm_object_lock_assert_exclusive(object
);
8349 object
->true_share
= TRUE
;
8351 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
8352 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
8356 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
8357 object
->copy
!= VM_OBJECT_NULL
) {
8359 * Honor copy-on-write obligations
8361 * The caller is gathering these pages and
8362 * might modify their contents. We need to
8363 * make sure that the copy object has its own
8364 * private copies of these pages before we let
8365 * the caller modify them.
8367 * NOTE: someone else could map the original object
8368 * after we've done this copy-on-write here, and they
8369 * could then see an inconsistent picture of the memory
8370 * while it's being modified via the UPL. To prevent this,
8371 * we would have to block access to these pages until the
8372 * UPL is released. We could use the UPL_BLOCK_ACCESS
8373 * code path for that...
8375 vm_object_update(object
,
8380 FALSE
, /* should_return */
8381 MEMORY_OBJECT_COPY_SYNC
,
8383 VM_PAGEOUT_DEBUG(iopl_cow
, 1);
8384 VM_PAGEOUT_DEBUG(iopl_cow_pages
, (size
>> PAGE_SHIFT
));
8386 if (!(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
)) &&
8387 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8388 object
->purgable
!= VM_PURGABLE_EMPTY
&&
8389 object
->copy
== NULL
&&
8390 size
== object
->vo_size
&&
8392 object
->shadow
== NULL
&&
8393 object
->pager
== NULL
) {
8394 if (object
->resident_page_count
== size_in_pages
) {
8395 assert(object
!= compressor_object
);
8396 assert(object
!= kernel_object
);
8397 fast_path_full_req
= TRUE
;
8398 } else if (object
->resident_page_count
== 0) {
8399 assert(object
!= compressor_object
);
8400 assert(object
!= kernel_object
);
8401 fast_path_empty_req
= TRUE
;
8402 set_cache_attr_needed
= TRUE
;
8406 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
8407 interruptible
= THREAD_ABORTSAFE
;
8409 interruptible
= THREAD_UNINT
;
8415 dst_offset
= offset
;
8418 if (fast_path_full_req
) {
8419 if (vm_object_iopl_wire_full(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
) == TRUE
) {
8423 * we couldn't complete the processing of this request on the fast path
8424 * so fall through to the slow path and finish up
8426 } else if (fast_path_empty_req
) {
8427 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
8428 ret
= KERN_MEMORY_ERROR
;
8431 ret
= vm_object_iopl_wire_empty(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
, &dst_offset
, size_in_pages
, &page_grab_count
);
8434 free_wired_pages
= TRUE
;
8440 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
8441 fault_info
.lo_offset
= offset
;
8442 fault_info
.hi_offset
= offset
+ xfer_size
;
8443 fault_info
.mark_zf_absent
= TRUE
;
8444 fault_info
.interruptible
= interruptible
;
8445 fault_info
.batch_pmap_op
= TRUE
;
8448 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
8451 vm_fault_return_t result
;
8455 if (fast_path_full_req
) {
8457 * if we get here, it means that we ran into a page
8458 * state we couldn't handle in the fast path and
8459 * bailed out to the slow path... since the order
8460 * we look at pages is different between the 2 paths,
8461 * the following check is needed to determine whether
8462 * this page was already processed in the fast path
8464 if (lite_list
[entry
>> 5] & (1 << (entry
& 31))) {
8468 dst_page
= vm_page_lookup(object
, dst_offset
);
8470 if (dst_page
== VM_PAGE_NULL
||
8471 dst_page
->vmp_busy
||
8472 dst_page
->vmp_error
||
8473 dst_page
->vmp_restart
||
8474 dst_page
->vmp_absent
||
8475 dst_page
->vmp_fictitious
) {
8476 if (object
== kernel_object
) {
8477 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
8479 if (object
== compressor_object
) {
8480 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
8483 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
8484 ret
= KERN_MEMORY_ERROR
;
8487 set_cache_attr_needed
= TRUE
;
8490 * We just looked up the page and the result remains valid
8491 * until the object lock is release, so send it to
8492 * vm_fault_page() (as "dst_page"), to avoid having to
8493 * look it up again there.
8495 caller_lookup
= TRUE
;
8499 kern_return_t error_code
;
8501 fault_info
.cluster_size
= xfer_size
;
8503 vm_object_paging_begin(object
);
8505 result
= vm_fault_page(object
, dst_offset
,
8506 prot
| VM_PROT_WRITE
, FALSE
,
8508 &prot
, &dst_page
, &top_page
,
8510 &error_code
, no_zero_fill
,
8511 FALSE
, &fault_info
);
8513 /* our lookup is no longer valid at this point */
8514 caller_lookup
= FALSE
;
8517 case VM_FAULT_SUCCESS
:
8520 if (!dst_page
->vmp_absent
) {
8521 PAGE_WAKEUP_DONE(dst_page
);
8524 * we only get back an absent page if we
8525 * requested that it not be zero-filled
8526 * because we are about to fill it via I/O
8528 * absent pages should be left BUSY
8529 * to prevent them from being faulted
8530 * into an address space before we've
8531 * had a chance to complete the I/O on
8532 * them since they may contain info that
8533 * shouldn't be seen by the faulting task
8537 * Release paging references and
8538 * top-level placeholder page, if any.
8540 if (top_page
!= VM_PAGE_NULL
) {
8541 vm_object_t local_object
;
8543 local_object
= VM_PAGE_OBJECT(top_page
);
8546 * comparing 2 packed pointers
8548 if (top_page
->vmp_object
!= dst_page
->vmp_object
) {
8549 vm_object_lock(local_object
);
8550 VM_PAGE_FREE(top_page
);
8551 vm_object_paging_end(local_object
);
8552 vm_object_unlock(local_object
);
8554 VM_PAGE_FREE(top_page
);
8555 vm_object_paging_end(local_object
);
8558 vm_object_paging_end(object
);
8561 case VM_FAULT_RETRY
:
8562 vm_object_lock(object
);
8565 case VM_FAULT_MEMORY_SHORTAGE
:
8566 OSAddAtomic((size_in_pages
- entry
), &vm_upl_wait_for_pages
);
8568 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
8570 if (vm_page_wait(interruptible
)) {
8571 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
8573 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
8574 vm_object_lock(object
);
8578 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
8580 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
8584 case VM_FAULT_INTERRUPTED
:
8585 error_code
= MACH_SEND_INTERRUPTED
;
8586 case VM_FAULT_MEMORY_ERROR
:
8588 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
8590 vm_object_lock(object
);
8593 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
8594 /* success but no page: fail */
8595 vm_object_paging_end(object
);
8596 vm_object_unlock(object
);
8600 panic("vm_object_iopl_request: unexpected error"
8601 " 0x%x from vm_fault_page()\n", result
);
8603 } while (result
!= VM_FAULT_SUCCESS
);
8605 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8607 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
8608 goto record_phys_addr
;
8611 if (dst_page
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
8612 dst_page
->vmp_busy
= TRUE
;
8613 goto record_phys_addr
;
8616 if (dst_page
->vmp_cleaning
) {
8618 * Someone else is cleaning this page in place.
8619 * In theory, we should be able to proceed and use this
8620 * page but they'll probably end up clearing the "busy"
8621 * bit on it in upl_commit_range() but they didn't set
8622 * it, so they would clear our "busy" bit and open
8623 * us to race conditions.
8624 * We'd better wait for the cleaning to complete and
8627 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning
, 1);
8628 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
8631 if (dst_page
->vmp_laundry
) {
8632 vm_pageout_steal_laundry(dst_page
, FALSE
);
8635 if ((cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
8636 phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
)) {
8641 * support devices that can't DMA above 32 bits
8642 * by substituting pages from a pool of low address
8643 * memory for any pages we find above the 4G mark
8644 * can't substitute if the page is already wired because
8645 * we don't know whether that physical address has been
8646 * handed out to some other 64 bit capable DMA device to use
8648 if (VM_PAGE_WIRED(dst_page
)) {
8649 ret
= KERN_PROTECTION_FAILURE
;
8652 low_page
= vm_page_grablo();
8654 if (low_page
== VM_PAGE_NULL
) {
8655 ret
= KERN_RESOURCE_SHORTAGE
;
8659 * from here until the vm_page_replace completes
8660 * we musn't drop the object lock... we don't
8661 * want anyone refaulting this page in and using
8662 * it after we disconnect it... we want the fault
8663 * to find the new page being substituted.
8665 if (dst_page
->vmp_pmapped
) {
8666 refmod
= pmap_disconnect(phys_page
);
8671 if (!dst_page
->vmp_absent
) {
8672 vm_page_copy(dst_page
, low_page
);
8675 low_page
->vmp_reference
= dst_page
->vmp_reference
;
8676 low_page
->vmp_dirty
= dst_page
->vmp_dirty
;
8677 low_page
->vmp_absent
= dst_page
->vmp_absent
;
8679 if (refmod
& VM_MEM_REFERENCED
) {
8680 low_page
->vmp_reference
= TRUE
;
8682 if (refmod
& VM_MEM_MODIFIED
) {
8683 SET_PAGE_DIRTY(low_page
, FALSE
);
8686 vm_page_replace(low_page
, object
, dst_offset
);
8688 dst_page
= low_page
;
8690 * vm_page_grablo returned the page marked
8691 * BUSY... we don't need a PAGE_WAKEUP_DONE
8692 * here, because we've never dropped the object lock
8694 if (!dst_page
->vmp_absent
) {
8695 dst_page
->vmp_busy
= FALSE
;
8698 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8700 if (!dst_page
->vmp_busy
) {
8701 dwp
->dw_mask
|= DW_vm_page_wire
;
8704 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
8706 * Mark the page "busy" to block any future page fault
8707 * on this page in addition to wiring it.
8708 * We'll also remove the mapping
8709 * of all these pages before leaving this routine.
8711 assert(!dst_page
->vmp_fictitious
);
8712 dst_page
->vmp_busy
= TRUE
;
8715 * expect the page to be used
8716 * page queues lock must be held to set 'reference'
8718 dwp
->dw_mask
|= DW_set_reference
;
8720 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8721 SET_PAGE_DIRTY(dst_page
, TRUE
);
8723 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
8724 pmap_sync_page_attributes_phys(phys_page
);
8725 dst_page
->vmp_written_by_kernel
= FALSE
;
8729 if (dst_page
->vmp_busy
) {
8730 upl
->flags
|= UPL_HAS_BUSY
;
8733 lite_list
[entry
>> 5] |= 1 << (entry
& 31);
8735 if (phys_page
> upl
->highest_page
) {
8736 upl
->highest_page
= phys_page
;
8739 if (user_page_list
) {
8740 user_page_list
[entry
].phys_addr
= phys_page
;
8741 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
8742 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8743 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8744 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
8745 user_page_list
[entry
].device
= FALSE
;
8746 user_page_list
[entry
].needed
= FALSE
;
8747 if (dst_page
->vmp_clustered
== TRUE
) {
8748 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
8750 user_page_list
[entry
].speculative
= FALSE
;
8752 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
8753 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
8754 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
8755 user_page_list
[entry
].mark
= FALSE
;
8757 if (object
!= kernel_object
&& object
!= compressor_object
) {
8759 * someone is explicitly grabbing this page...
8760 * update clustered and speculative state
8763 if (dst_page
->vmp_clustered
) {
8764 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
8769 dst_offset
+= PAGE_SIZE_64
;
8770 xfer_size
-= PAGE_SIZE
;
8773 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
8775 if (dw_count
>= dw_limit
) {
8776 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
8783 assert(entry
== size_in_pages
);
8786 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
8789 if (user_page_list
&& set_cache_attr_needed
== TRUE
) {
8790 vm_object_set_pmap_cache_attr(object
, user_page_list
, size_in_pages
, TRUE
);
8793 if (page_list_count
!= NULL
) {
8794 if (upl
->flags
& UPL_INTERNAL
) {
8795 *page_list_count
= 0;
8796 } else if (*page_list_count
> size_in_pages
) {
8797 *page_list_count
= size_in_pages
;
8800 vm_object_unlock(object
);
8802 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
8804 * We've marked all the pages "busy" so that future
8805 * page faults will block.
8806 * Now remove the mapping for these pages, so that they
8807 * can't be accessed without causing a page fault.
8809 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
8810 PMAP_NULL
, 0, VM_PROT_NONE
);
8811 assert(!object
->blocked_access
);
8812 object
->blocked_access
= TRUE
;
8815 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
8816 #if DEVELOPMENT || DEBUG
8818 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
8820 #endif /* DEVELOPMENT || DEBUG */
8821 return KERN_SUCCESS
;
8826 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
8827 boolean_t need_unwire
;
8829 dst_page
= vm_page_lookup(object
, offset
);
8831 if (dst_page
== VM_PAGE_NULL
) {
8832 panic("vm_object_iopl_request: Wired page missing. \n");
8836 * if we've already processed this page in an earlier
8837 * dw_do_work, we need to undo the wiring... we will
8838 * leave the dirty and reference bits on if they
8839 * were set, since we don't have a good way of knowing
8840 * what the previous state was and we won't get here
8841 * under any normal circumstances... we will always
8842 * clear BUSY and wakeup any waiters via vm_page_free
8843 * or PAGE_WAKEUP_DONE
8848 if (dw_array
[dw_index
].dw_m
== dst_page
) {
8850 * still in the deferred work list
8851 * which means we haven't yet called
8852 * vm_page_wire on this page
8854 need_unwire
= FALSE
;
8860 vm_page_lock_queues();
8862 if (dst_page
->vmp_absent
|| free_wired_pages
== TRUE
) {
8863 vm_page_free(dst_page
);
8865 need_unwire
= FALSE
;
8867 if (need_unwire
== TRUE
) {
8868 vm_page_unwire(dst_page
, TRUE
);
8871 PAGE_WAKEUP_DONE(dst_page
);
8873 vm_page_unlock_queues();
8875 if (need_unwire
== TRUE
) {
8876 VM_STAT_INCR(reactivations
);
8882 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
8883 vm_object_activity_end(object
);
8884 vm_object_collapse(object
, 0, TRUE
);
8886 vm_object_unlock(object
);
8889 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, ret
, 0, 0);
8890 #if DEVELOPMENT || DEBUG
8892 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
8894 #endif /* DEVELOPMENT || DEBUG */
8903 kern_return_t retval
;
8904 boolean_t upls_locked
;
8905 vm_object_t object1
, object2
;
8907 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
) == UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
8908 return KERN_INVALID_ARGUMENT
;
8911 upls_locked
= FALSE
;
8914 * Since we need to lock both UPLs at the same time,
8915 * avoid deadlocks by always taking locks in the same order.
8924 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
8926 object1
= upl1
->map_object
;
8927 object2
= upl2
->map_object
;
8929 if (upl1
->offset
!= 0 || upl2
->offset
!= 0 ||
8930 upl1
->size
!= upl2
->size
) {
8932 * We deal only with full objects, not subsets.
8933 * That's because we exchange the entire backing store info
8934 * for the objects: pager, resident pages, etc... We can't do
8937 retval
= KERN_INVALID_VALUE
;
8942 * Tranpose the VM objects' backing store.
8944 retval
= vm_object_transpose(object1
, object2
,
8945 (vm_object_size_t
) upl1
->size
);
8947 if (retval
== KERN_SUCCESS
) {
8949 * Make each UPL point to the correct VM object, i.e. the
8950 * object holding the pages that the UPL refers to...
8952 #if CONFIG_IOSCHED || UPL_DEBUG
8953 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
8954 vm_object_lock(object1
);
8955 vm_object_lock(object2
);
8957 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
) {
8958 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
8960 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
) {
8961 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
8964 upl1
->map_object
= object2
;
8965 upl2
->map_object
= object1
;
8967 #if CONFIG_IOSCHED || UPL_DEBUG
8968 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
) {
8969 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
8971 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
) {
8972 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
8974 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
8975 vm_object_unlock(object2
);
8976 vm_object_unlock(object1
);
8988 upls_locked
= FALSE
;
9000 upl_page_info_t
*user_page_list
;
9003 if (!(upl
->flags
& UPL_INTERNAL
) || count
<= 0) {
9007 size_in_pages
= upl
->size
/ PAGE_SIZE
;
9009 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
9011 while (count
-- && index
< size_in_pages
) {
9012 user_page_list
[index
++].needed
= TRUE
;
9018 * Reserve of virtual addresses in the kernel address space.
9019 * We need to map the physical pages in the kernel, so that we
9020 * can call the code-signing or slide routines with a kernel
9021 * virtual address. We keep this pool of pre-allocated kernel
9022 * virtual addresses so that we don't have to scan the kernel's
9023 * virtaul address space each time we need to work with
9026 decl_simple_lock_data(, vm_paging_lock
)
9027 #define VM_PAGING_NUM_PAGES 64
9028 vm_map_offset_t vm_paging_base_address
= 0;
9029 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
9030 int vm_paging_max_index
= 0;
9031 int vm_paging_page_waiter
= 0;
9032 int vm_paging_page_waiter_total
= 0;
9034 unsigned long vm_paging_no_kernel_page
= 0;
9035 unsigned long vm_paging_objects_mapped
= 0;
9036 unsigned long vm_paging_pages_mapped
= 0;
9037 unsigned long vm_paging_objects_mapped_slow
= 0;
9038 unsigned long vm_paging_pages_mapped_slow
= 0;
9041 vm_paging_map_init(void)
9044 vm_map_offset_t page_map_offset
;
9045 vm_map_entry_t map_entry
;
9047 assert(vm_paging_base_address
== 0);
9050 * Initialize our pool of pre-allocated kernel
9051 * virtual addresses.
9053 page_map_offset
= 0;
9054 kr
= vm_map_find_space(kernel_map
,
9056 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
9059 VM_MAP_KERNEL_FLAGS_NONE
,
9060 VM_KERN_MEMORY_NONE
,
9062 if (kr
!= KERN_SUCCESS
) {
9063 panic("vm_paging_map_init: kernel_map full\n");
9065 VME_OBJECT_SET(map_entry
, kernel_object
);
9066 VME_OFFSET_SET(map_entry
, page_map_offset
);
9067 map_entry
->protection
= VM_PROT_NONE
;
9068 map_entry
->max_protection
= VM_PROT_NONE
;
9069 map_entry
->permanent
= TRUE
;
9070 vm_object_reference(kernel_object
);
9071 vm_map_unlock(kernel_map
);
9073 assert(vm_paging_base_address
== 0);
9074 vm_paging_base_address
= page_map_offset
;
9078 * vm_paging_map_object:
9079 * Maps part of a VM object's pages in the kernel
9080 * virtual address space, using the pre-allocated
9081 * kernel virtual addresses, if possible.
9083 * The VM object is locked. This lock will get
9084 * dropped and re-acquired though, so the caller
9085 * must make sure the VM object is kept alive
9086 * (by holding a VM map that has a reference
9087 * on it, for example, or taking an extra reference).
9088 * The page should also be kept busy to prevent
9089 * it from being reclaimed.
9092 vm_paging_map_object(
9095 vm_object_offset_t offset
,
9096 vm_prot_t protection
,
9097 boolean_t can_unlock_object
,
9098 vm_map_size_t
*size
, /* IN/OUT */
9099 vm_map_offset_t
*address
, /* OUT */
9100 boolean_t
*need_unmap
) /* OUT */
9103 vm_map_offset_t page_map_offset
;
9104 vm_map_size_t map_size
;
9105 vm_object_offset_t object_offset
;
9108 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
9109 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9111 *address
= (vm_map_offset_t
)
9112 PHYSMAP_PTOV((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(page
) <<
9114 *need_unmap
= FALSE
;
9115 return KERN_SUCCESS
;
9116 #elif __arm__ || __arm64__
9117 *address
= (vm_map_offset_t
)
9118 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(page
) << PAGE_SHIFT
);
9119 *need_unmap
= FALSE
;
9120 return KERN_SUCCESS
;
9122 #warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
9125 assert(page
->vmp_busy
);
9127 * Use one of the pre-allocated kernel virtual addresses
9128 * and just enter the VM page in the kernel address space
9129 * at that virtual address.
9131 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9134 * Try and find an available kernel virtual address
9135 * from our pre-allocated pool.
9137 page_map_offset
= 0;
9139 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
9140 if (vm_paging_page_inuse
[i
] == FALSE
) {
9142 vm_paging_base_address
+
9147 if (page_map_offset
!= 0) {
9148 /* found a space to map our page ! */
9152 if (can_unlock_object
) {
9154 * If we can afford to unlock the VM object,
9155 * let's take the slow path now...
9160 * We can't afford to unlock the VM object, so
9161 * let's wait for a space to become available...
9163 vm_paging_page_waiter_total
++;
9164 vm_paging_page_waiter
++;
9165 kr
= assert_wait((event_t
)&vm_paging_page_waiter
, THREAD_UNINT
);
9166 if (kr
== THREAD_WAITING
) {
9167 simple_unlock(&vm_paging_lock
);
9168 kr
= thread_block(THREAD_CONTINUE_NULL
);
9169 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9171 vm_paging_page_waiter
--;
9172 /* ... and try again */
9175 if (page_map_offset
!= 0) {
9177 * We found a kernel virtual address;
9178 * map the physical page to that virtual address.
9180 if (i
> vm_paging_max_index
) {
9181 vm_paging_max_index
= i
;
9183 vm_paging_page_inuse
[i
] = TRUE
;
9184 simple_unlock(&vm_paging_lock
);
9186 page
->vmp_pmapped
= TRUE
;
9189 * Keep the VM object locked over the PMAP_ENTER
9190 * and the actual use of the page by the kernel,
9191 * or this pmap mapping might get undone by a
9192 * vm_object_pmap_protect() call...
9194 PMAP_ENTER(kernel_pmap
,
9202 assert(kr
== KERN_SUCCESS
);
9203 vm_paging_objects_mapped
++;
9204 vm_paging_pages_mapped
++;
9205 *address
= page_map_offset
;
9209 kasan_notify_address(page_map_offset
, PAGE_SIZE
);
9212 /* all done and mapped, ready to use ! */
9213 return KERN_SUCCESS
;
9217 * We ran out of pre-allocated kernel virtual
9218 * addresses. Just map the page in the kernel
9219 * the slow and regular way.
9221 vm_paging_no_kernel_page
++;
9222 simple_unlock(&vm_paging_lock
);
9225 if (!can_unlock_object
) {
9228 *need_unmap
= FALSE
;
9229 return KERN_NOT_SUPPORTED
;
9232 object_offset
= vm_object_trunc_page(offset
);
9233 map_size
= vm_map_round_page(*size
,
9234 VM_MAP_PAGE_MASK(kernel_map
));
9237 * Try and map the required range of the object
9241 vm_object_reference_locked(object
); /* for the map entry */
9242 vm_object_unlock(object
);
9244 kr
= vm_map_enter(kernel_map
,
9249 VM_MAP_KERNEL_FLAGS_NONE
,
9250 VM_KERN_MEMORY_NONE
,
9257 if (kr
!= KERN_SUCCESS
) {
9260 *need_unmap
= FALSE
;
9261 vm_object_deallocate(object
); /* for the map entry */
9262 vm_object_lock(object
);
9269 * Enter the mapped pages in the page table now.
9271 vm_object_lock(object
);
9273 * VM object must be kept locked from before PMAP_ENTER()
9274 * until after the kernel is done accessing the page(s).
9275 * Otherwise, the pmap mappings in the kernel could be
9276 * undone by a call to vm_object_pmap_protect().
9279 for (page_map_offset
= 0;
9281 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
9282 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
9283 if (page
== VM_PAGE_NULL
) {
9284 printf("vm_paging_map_object: no page !?");
9285 vm_object_unlock(object
);
9286 kr
= vm_map_remove(kernel_map
, *address
, *size
,
9287 VM_MAP_REMOVE_NO_FLAGS
);
9288 assert(kr
== KERN_SUCCESS
);
9291 *need_unmap
= FALSE
;
9292 vm_object_lock(object
);
9293 return KERN_MEMORY_ERROR
;
9295 page
->vmp_pmapped
= TRUE
;
9297 //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
9298 PMAP_ENTER(kernel_pmap
,
9299 *address
+ page_map_offset
,
9306 assert(kr
== KERN_SUCCESS
);
9308 kasan_notify_address(*address
+ page_map_offset
, PAGE_SIZE
);
9312 vm_paging_objects_mapped_slow
++;
9313 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
9317 return KERN_SUCCESS
;
9321 * vm_paging_unmap_object:
9322 * Unmaps part of a VM object's pages from the kernel
9323 * virtual address space.
9325 * The VM object is locked. This lock will get
9326 * dropped and re-acquired though.
9329 vm_paging_unmap_object(
9331 vm_map_offset_t start
,
9332 vm_map_offset_t end
)
9337 if ((vm_paging_base_address
== 0) ||
9338 (start
< vm_paging_base_address
) ||
9339 (end
> (vm_paging_base_address
9340 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
9342 * We didn't use our pre-allocated pool of
9343 * kernel virtual address. Deallocate the
9346 if (object
!= VM_OBJECT_NULL
) {
9347 vm_object_unlock(object
);
9349 kr
= vm_map_remove(kernel_map
, start
, end
,
9350 VM_MAP_REMOVE_NO_FLAGS
);
9351 if (object
!= VM_OBJECT_NULL
) {
9352 vm_object_lock(object
);
9354 assert(kr
== KERN_SUCCESS
);
9357 * We used a kernel virtual address from our
9358 * pre-allocated pool. Put it back in the pool
9361 assert(end
- start
== PAGE_SIZE
);
9362 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
9363 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
9365 /* undo the pmap mapping */
9366 pmap_remove(kernel_pmap
, start
, end
);
9368 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9369 vm_paging_page_inuse
[i
] = FALSE
;
9370 if (vm_paging_page_waiter
) {
9371 thread_wakeup(&vm_paging_page_waiter
);
9373 simple_unlock(&vm_paging_lock
);
9379 * page->vmp_object must be locked
9382 vm_pageout_steal_laundry(vm_page_t page
, boolean_t queues_locked
)
9384 if (!queues_locked
) {
9385 vm_page_lockspin_queues();
9388 page
->vmp_free_when_done
= FALSE
;
9390 * need to drop the laundry count...
9391 * we may also need to remove it
9392 * from the I/O paging queue...
9393 * vm_pageout_throttle_up handles both cases
9395 * the laundry and pageout_queue flags are cleared...
9397 vm_pageout_throttle_up(page
);
9399 if (!queues_locked
) {
9400 vm_page_unlock_queues();
9405 vector_upl_create(vm_offset_t upl_offset
)
9407 int vector_upl_size
= sizeof(struct _vector_upl
);
9410 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
9412 upl
= upl_create(0, UPL_VECTOR
, 0);
9413 upl
->vector_upl
= vector_upl
;
9414 upl
->offset
= upl_offset
;
9415 vector_upl
->size
= 0;
9416 vector_upl
->offset
= upl_offset
;
9417 vector_upl
->invalid_upls
= 0;
9418 vector_upl
->num_upls
= 0;
9419 vector_upl
->pagelist
= NULL
;
9421 for (i
= 0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
9422 vector_upl
->upl_iostates
[i
].size
= 0;
9423 vector_upl
->upl_iostates
[i
].offset
= 0;
9429 vector_upl_deallocate(upl_t upl
)
9432 vector_upl_t vector_upl
= upl
->vector_upl
;
9434 if (vector_upl
->invalid_upls
!= vector_upl
->num_upls
) {
9435 panic("Deallocating non-empty Vectored UPL\n");
9437 kfree(vector_upl
->pagelist
, (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
9438 vector_upl
->invalid_upls
= 0;
9439 vector_upl
->num_upls
= 0;
9440 vector_upl
->pagelist
= NULL
;
9441 vector_upl
->size
= 0;
9442 vector_upl
->offset
= 0;
9443 kfree(vector_upl
, sizeof(struct _vector_upl
));
9444 vector_upl
= (vector_upl_t
)0xfeedfeed;
9446 panic("vector_upl_deallocate was passed a non-vectored upl\n");
9449 panic("vector_upl_deallocate was passed a NULL upl\n");
9454 vector_upl_is_valid(upl_t upl
)
9456 if (upl
&& ((upl
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
9457 vector_upl_t vector_upl
= upl
->vector_upl
;
9458 if (vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xfeedfeed || vector_upl
== (vector_upl_t
)0xfeedbeef) {
9468 vector_upl_set_subupl(upl_t upl
, upl_t subupl
, uint32_t io_size
)
9470 if (vector_upl_is_valid(upl
)) {
9471 vector_upl_t vector_upl
= upl
->vector_upl
;
9476 if (io_size
< PAGE_SIZE
) {
9477 io_size
= PAGE_SIZE
;
9479 subupl
->vector_upl
= (void*)vector_upl
;
9480 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
9481 vector_upl
->size
+= io_size
;
9482 upl
->size
+= io_size
;
9484 uint32_t i
= 0, invalid_upls
= 0;
9485 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9486 if (vector_upl
->upl_elems
[i
] == subupl
) {
9490 if (i
== vector_upl
->num_upls
) {
9491 panic("Trying to remove sub-upl when none exists");
9494 vector_upl
->upl_elems
[i
] = NULL
;
9495 invalid_upls
= hw_atomic_add(&(vector_upl
)->invalid_upls
, 1);
9496 if (invalid_upls
== vector_upl
->num_upls
) {
9503 panic("vector_upl_set_subupl was passed a NULL upl element\n");
9506 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
9509 panic("vector_upl_set_subupl was passed a NULL upl\n");
9516 vector_upl_set_pagelist(upl_t upl
)
9518 if (vector_upl_is_valid(upl
)) {
9520 vector_upl_t vector_upl
= upl
->vector_upl
;
9523 vm_offset_t pagelist_size
= 0, cur_upl_pagelist_size
= 0;
9525 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
));
9527 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9528 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * vector_upl
->upl_elems
[i
]->size
/ PAGE_SIZE
;
9529 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
9530 pagelist_size
+= cur_upl_pagelist_size
;
9531 if (vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
) {
9532 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
9535 assert( pagelist_size
== (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
9537 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
9540 panic("vector_upl_set_pagelist was passed a NULL upl\n");
9545 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
9547 if (vector_upl_is_valid(upl
)) {
9548 vector_upl_t vector_upl
= upl
->vector_upl
;
9550 if (index
< vector_upl
->num_upls
) {
9551 return vector_upl
->upl_elems
[index
];
9554 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
9561 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
9563 if (vector_upl_is_valid(upl
)) {
9565 vector_upl_t vector_upl
= upl
->vector_upl
;
9568 upl_t subupl
= NULL
;
9569 vector_upl_iostates_t subupl_state
;
9571 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9572 subupl
= vector_upl
->upl_elems
[i
];
9573 subupl_state
= vector_upl
->upl_iostates
[i
];
9574 if (*upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
9575 /* We could have been passed an offset/size pair that belongs
9576 * to an UPL element that has already been committed/aborted.
9577 * If so, return NULL.
9579 if (subupl
== NULL
) {
9582 if ((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
9583 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
9584 if (*upl_size
> subupl_state
.size
) {
9585 *upl_size
= subupl_state
.size
;
9588 if (*upl_offset
>= subupl_state
.offset
) {
9589 *upl_offset
-= subupl_state
.offset
;
9591 panic("Vector UPL offset miscalculation\n");
9597 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
9604 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
9606 *v_upl_submap
= NULL
;
9608 if (vector_upl_is_valid(upl
)) {
9609 vector_upl_t vector_upl
= upl
->vector_upl
;
9611 *v_upl_submap
= vector_upl
->submap
;
9612 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
9614 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9617 panic("vector_upl_get_submap was passed a null UPL\n");
9622 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
9624 if (vector_upl_is_valid(upl
)) {
9625 vector_upl_t vector_upl
= upl
->vector_upl
;
9627 vector_upl
->submap
= submap
;
9628 vector_upl
->submap_dst_addr
= submap_dst_addr
;
9630 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9633 panic("vector_upl_get_submap was passed a NULL UPL\n");
9638 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
9640 if (vector_upl_is_valid(upl
)) {
9642 vector_upl_t vector_upl
= upl
->vector_upl
;
9645 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9646 if (vector_upl
->upl_elems
[i
] == subupl
) {
9651 if (i
== vector_upl
->num_upls
) {
9652 panic("setting sub-upl iostate when none exists");
9655 vector_upl
->upl_iostates
[i
].offset
= offset
;
9656 if (size
< PAGE_SIZE
) {
9659 vector_upl
->upl_iostates
[i
].size
= size
;
9661 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
9664 panic("vector_upl_set_iostate was passed a NULL UPL\n");
9669 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
9671 if (vector_upl_is_valid(upl
)) {
9673 vector_upl_t vector_upl
= upl
->vector_upl
;
9676 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9677 if (vector_upl
->upl_elems
[i
] == subupl
) {
9682 if (i
== vector_upl
->num_upls
) {
9683 panic("getting sub-upl iostate when none exists");
9686 *offset
= vector_upl
->upl_iostates
[i
].offset
;
9687 *size
= vector_upl
->upl_iostates
[i
].size
;
9689 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
9692 panic("vector_upl_get_iostate was passed a NULL UPL\n");
9697 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
9699 if (vector_upl_is_valid(upl
)) {
9700 vector_upl_t vector_upl
= upl
->vector_upl
;
9702 if (index
< vector_upl
->num_upls
) {
9703 *offset
= vector_upl
->upl_iostates
[index
].offset
;
9704 *size
= vector_upl
->upl_iostates
[index
].size
;
9706 *offset
= *size
= 0;
9709 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
9712 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
9717 upl_get_internal_vectorupl_pagelist(upl_t upl
)
9719 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
9723 upl_get_internal_vectorupl(upl_t upl
)
9725 return upl
->vector_upl
;
9729 upl_get_internal_pagelist_offset(void)
9731 return sizeof(struct upl
);
9740 upl
->flags
|= UPL_CLEAR_DIRTY
;
9742 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
9753 upl
->ext_ref_count
++;
9755 if (!upl
->ext_ref_count
) {
9756 panic("upl_set_referenced not %p\n", upl
);
9758 upl
->ext_ref_count
--;
9767 vm_offset_t upl_offset
,
9772 if ((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) {
9776 assert(upl
->upl_reprio_info
!= 0);
9777 for (i
= (int)(upl_offset
/ PAGE_SIZE
), j
= 0; j
< io_size
; i
++, j
+= PAGE_SIZE
) {
9778 UPL_SET_REPRIO_INFO(upl
, i
, blkno
, io_size
);
9784 memoryshot(unsigned int event
, unsigned int control
)
9786 if (vm_debug_events
) {
9787 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE
, event
)) | control
,
9788 vm_page_active_count
, vm_page_inactive_count
,
9789 vm_page_free_count
, vm_page_speculative_count
,
9790 vm_page_throttled_count
);
9800 upl_device_page(upl_page_info_t
*upl
)
9802 return UPL_DEVICE_PAGE(upl
);
9805 upl_page_present(upl_page_info_t
*upl
, int index
)
9807 return UPL_PAGE_PRESENT(upl
, index
);
9810 upl_speculative_page(upl_page_info_t
*upl
, int index
)
9812 return UPL_SPECULATIVE_PAGE(upl
, index
);
9815 upl_dirty_page(upl_page_info_t
*upl
, int index
)
9817 return UPL_DIRTY_PAGE(upl
, index
);
9820 upl_valid_page(upl_page_info_t
*upl
, int index
)
9822 return UPL_VALID_PAGE(upl
, index
);
9825 upl_phys_page(upl_page_info_t
*upl
, int index
)
9827 return UPL_PHYS_PAGE(upl
, index
);
9831 upl_page_set_mark(upl_page_info_t
*upl
, int index
, boolean_t v
)
9833 upl
[index
].mark
= v
;
9837 upl_page_get_mark(upl_page_info_t
*upl
, int index
)
9839 return upl
[index
].mark
;
9843 vm_countdirtypages(void)
9855 vm_page_lock_queues();
9856 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
9858 if (m
== (vm_page_t
)0) {
9865 if (m
->vmp_free_when_done
) {
9868 if (m
->vmp_precious
) {
9872 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
9873 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
9874 if (m
== (vm_page_t
)0) {
9877 } while (!vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
) m
));
9878 vm_page_unlock_queues();
9880 vm_page_lock_queues();
9881 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
9883 if (m
== (vm_page_t
)0) {
9888 assert(m
->vmp_dirty
);
9889 assert(!m
->vmp_free_when_done
);
9890 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
9891 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
9892 if (m
== (vm_page_t
)0) {
9895 } while (!vm_page_queue_end(&vm_page_queue_throttled
, (vm_page_queue_entry_t
) m
));
9896 vm_page_unlock_queues();
9898 vm_page_lock_queues();
9899 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
9901 if (m
== (vm_page_t
)0) {
9908 if (m
->vmp_free_when_done
) {
9911 if (m
->vmp_precious
) {
9915 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
9916 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
9917 if (m
== (vm_page_t
)0) {
9920 } while (!vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
) m
));
9921 vm_page_unlock_queues();
9923 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
9929 vm_page_lock_queues();
9930 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
9933 if (m
== (vm_page_t
)0) {
9939 if (m
->vmp_free_when_done
) {
9942 if (m
->vmp_precious
) {
9946 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
9947 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
9948 if (m
== (vm_page_t
)0) {
9951 } while (!vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
) m
));
9952 vm_page_unlock_queues();
9954 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
9956 #endif /* MACH_BSD */
9961 upl_get_cached_tier(upl_t upl
)
9964 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
9965 return upl
->upl_priority
;
9969 #endif /* CONFIG_IOSCHED */
9973 upl_callout_iodone(upl_t upl
)
9975 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
9978 void (*iodone_func
)(void *, int) = upl_ctx
->io_done
;
9980 assert(upl_ctx
->io_done
);
9982 (*iodone_func
)(upl_ctx
->io_context
, upl_ctx
->io_error
);
9987 upl_set_iodone(upl_t upl
, void *upl_iodone
)
9989 upl
->upl_iodone
= (struct upl_io_completion
*)upl_iodone
;
9993 upl_set_iodone_error(upl_t upl
, int error
)
9995 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
9998 upl_ctx
->io_error
= error
;
10004 upl_get_highest_page(
10007 return upl
->highest_page
;
10018 upl_associated_upl(upl_t upl
)
10020 return upl
->associated_upl
;
10024 upl_set_associated_upl(upl_t upl
, upl_t associated_upl
)
10026 upl
->associated_upl
= associated_upl
;
10030 upl_lookup_vnode(upl_t upl
)
10032 if (!upl
->map_object
->internal
) {
10033 return vnode_pager_lookup_vnode(upl
->map_object
->pager
);
10041 upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
10043 upl
->ubc_alias1
= alias1
;
10044 upl
->ubc_alias2
= alias2
;
10045 return KERN_SUCCESS
;
10048 upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
10051 *al
= upl
->ubc_alias1
;
10054 *al2
= upl
->ubc_alias2
;
10056 return KERN_SUCCESS
;
10058 #endif /* UPL_DEBUG */
10060 #if VM_PRESSURE_EVENTS
10062 * Upward trajectory.
10064 extern boolean_t
vm_compressor_low_on_space(void);
10067 VM_PRESSURE_NORMAL_TO_WARNING(void)
10069 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10070 /* Available pages below our threshold */
10071 if (memorystatus_available_pages
< memorystatus_available_pages_pressure
) {
10072 /* No frozen processes to kill */
10073 if (memorystatus_frozen_count
== 0) {
10074 /* Not enough suspended processes available. */
10075 if (memorystatus_suspended_count
< MEMORYSTATUS_SUSPENDED_THRESHOLD
) {
10082 return (AVAILABLE_NON_COMPRESSED_MEMORY
< VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) ? 1 : 0;
10087 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10089 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10090 /* Available pages below our threshold */
10091 if (memorystatus_available_pages
< memorystatus_available_pages_critical
) {
10096 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY
< ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10101 * Downward trajectory.
10104 VM_PRESSURE_WARNING_TO_NORMAL(void)
10106 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10107 /* Available pages above our threshold */
10108 unsigned int target_threshold
= (unsigned int) (memorystatus_available_pages_pressure
+ ((15 * memorystatus_available_pages_pressure
) / 100));
10109 if (memorystatus_available_pages
> target_threshold
) {
10114 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) / 10)) ? 1 : 0;
10119 VM_PRESSURE_CRITICAL_TO_WARNING(void)
10121 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10122 /* Available pages above our threshold */
10123 unsigned int target_threshold
= (unsigned int)(memorystatus_available_pages_critical
+ ((15 * memorystatus_available_pages_critical
) / 100));
10124 if (memorystatus_available_pages
> target_threshold
) {
10129 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10132 #endif /* VM_PRESSURE_EVENTS */
10136 #define VM_TEST_COLLAPSE_COMPRESSOR 0
10137 #define VM_TEST_WIRE_AND_EXTRACT 0
10138 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
10140 #define VM_TEST_KERNEL_OBJECT_FAULT 0
10141 #endif /* __arm64__ */
10142 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
10144 #if VM_TEST_COLLAPSE_COMPRESSOR
10145 extern boolean_t vm_object_collapse_compressor_allowed
;
10146 #include <IOKit/IOLib.h>
10148 vm_test_collapse_compressor(void)
10150 vm_object_size_t backing_size
, top_size
;
10151 vm_object_t backing_object
, top_object
;
10152 vm_map_offset_t backing_offset
, top_offset
;
10153 unsigned char *backing_address
, *top_address
;
10156 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
10158 /* create backing object */
10159 backing_size
= 15 * PAGE_SIZE
;
10160 backing_object
= vm_object_allocate(backing_size
);
10161 assert(backing_object
!= VM_OBJECT_NULL
);
10162 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
10164 /* map backing object */
10165 backing_offset
= 0;
10166 kr
= vm_map_enter(kernel_map
, &backing_offset
, backing_size
, 0,
10167 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
10168 backing_object
, 0, FALSE
,
10169 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
10170 assert(kr
== KERN_SUCCESS
);
10171 backing_address
= (unsigned char *) backing_offset
;
10172 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10173 "mapped backing object %p at 0x%llx\n",
10174 backing_object
, (uint64_t) backing_offset
);
10175 /* populate with pages to be compressed in backing object */
10176 backing_address
[0x1 * PAGE_SIZE
] = 0xB1;
10177 backing_address
[0x4 * PAGE_SIZE
] = 0xB4;
10178 backing_address
[0x7 * PAGE_SIZE
] = 0xB7;
10179 backing_address
[0xa * PAGE_SIZE
] = 0xBA;
10180 backing_address
[0xd * PAGE_SIZE
] = 0xBD;
10181 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10182 "populated pages to be compressed in "
10183 "backing_object %p\n", backing_object
);
10184 /* compress backing object */
10185 vm_object_pageout(backing_object
);
10186 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
10188 /* wait for all the pages to be gone */
10189 while (*(volatile int *)&backing_object
->resident_page_count
!= 0) {
10192 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
10194 /* populate with pages to be resident in backing object */
10195 backing_address
[0x0 * PAGE_SIZE
] = 0xB0;
10196 backing_address
[0x3 * PAGE_SIZE
] = 0xB3;
10197 backing_address
[0x6 * PAGE_SIZE
] = 0xB6;
10198 backing_address
[0x9 * PAGE_SIZE
] = 0xB9;
10199 backing_address
[0xc * PAGE_SIZE
] = 0xBC;
10200 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10201 "populated pages to be resident in "
10202 "backing_object %p\n", backing_object
);
10203 /* leave the other pages absent */
10204 /* mess with the paging_offset of the backing_object */
10205 assert(backing_object
->paging_offset
== 0);
10206 backing_object
->paging_offset
= 0x3000;
10208 /* create top object */
10209 top_size
= 9 * PAGE_SIZE
;
10210 top_object
= vm_object_allocate(top_size
);
10211 assert(top_object
!= VM_OBJECT_NULL
);
10212 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
10214 /* map top object */
10216 kr
= vm_map_enter(kernel_map
, &top_offset
, top_size
, 0,
10217 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
10218 top_object
, 0, FALSE
,
10219 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
10220 assert(kr
== KERN_SUCCESS
);
10221 top_address
= (unsigned char *) top_offset
;
10222 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10223 "mapped top object %p at 0x%llx\n",
10224 top_object
, (uint64_t) top_offset
);
10225 /* populate with pages to be compressed in top object */
10226 top_address
[0x3 * PAGE_SIZE
] = 0xA3;
10227 top_address
[0x4 * PAGE_SIZE
] = 0xA4;
10228 top_address
[0x5 * PAGE_SIZE
] = 0xA5;
10229 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10230 "populated pages to be compressed in "
10231 "top_object %p\n", top_object
);
10232 /* compress top object */
10233 vm_object_pageout(top_object
);
10234 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
10236 /* wait for all the pages to be gone */
10237 while (top_object
->resident_page_count
!= 0) {
10240 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
10242 /* populate with pages to be resident in top object */
10243 top_address
[0x0 * PAGE_SIZE
] = 0xA0;
10244 top_address
[0x1 * PAGE_SIZE
] = 0xA1;
10245 top_address
[0x2 * PAGE_SIZE
] = 0xA2;
10246 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10247 "populated pages to be resident in "
10248 "top_object %p\n", top_object
);
10249 /* leave the other pages absent */
10251 /* link the 2 objects */
10252 vm_object_reference(backing_object
);
10253 top_object
->shadow
= backing_object
;
10254 top_object
->vo_shadow_offset
= 0x3000;
10255 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
10256 top_object
, backing_object
);
10258 /* unmap backing object */
10259 vm_map_remove(kernel_map
,
10261 backing_offset
+ backing_size
,
10262 VM_MAP_REMOVE_NO_FLAGS
);
10263 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10264 "unmapped backing_object %p [0x%llx:0x%llx]\n",
10266 (uint64_t) backing_offset
,
10267 (uint64_t) (backing_offset
+ backing_size
));
10270 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object
);
10271 vm_object_lock(top_object
);
10272 vm_object_collapse(top_object
, 0, FALSE
);
10273 vm_object_unlock(top_object
);
10274 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object
);
10277 if (top_object
->shadow
!= VM_OBJECT_NULL
) {
10278 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
10279 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10280 if (vm_object_collapse_compressor_allowed
) {
10281 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10284 /* check the contents of the mapping */
10285 unsigned char expect
[9] =
10286 { 0xA0, 0xA1, 0xA2, /* resident in top */
10287 0xA3, 0xA4, 0xA5, /* compressed in top */
10288 0xB9, /* resident in backing + shadow_offset */
10289 0xBD, /* compressed in backing + shadow_offset + paging_offset */
10290 0x00 }; /* absent in both */
10291 unsigned char actual
[9];
10292 unsigned int i
, errors
;
10295 for (i
= 0; i
< sizeof(actual
); i
++) {
10296 actual
[i
] = (unsigned char) top_address
[i
* PAGE_SIZE
];
10297 if (actual
[i
] != expect
[i
]) {
10301 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10302 "actual [%x %x %x %x %x %x %x %x %x] "
10303 "expect [%x %x %x %x %x %x %x %x %x] "
10305 actual
[0], actual
[1], actual
[2], actual
[3],
10306 actual
[4], actual
[5], actual
[6], actual
[7],
10308 expect
[0], expect
[1], expect
[2], expect
[3],
10309 expect
[4], expect
[5], expect
[6], expect
[7],
10313 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10315 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
10319 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
10320 #define vm_test_collapse_compressor()
10321 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
10323 #if VM_TEST_WIRE_AND_EXTRACT
10324 extern ledger_template_t task_ledger_template
;
10325 #include <mach/mach_vm.h>
10326 extern ppnum_t
vm_map_get_phys_page(vm_map_t map
,
10327 vm_offset_t offset
);
10329 vm_test_wire_and_extract(void)
10332 vm_map_t user_map
, wire_map
;
10333 mach_vm_address_t user_addr
, wire_addr
;
10334 mach_vm_size_t user_size
, wire_size
;
10335 mach_vm_offset_t cur_offset
;
10336 vm_prot_t cur_prot
, max_prot
;
10337 ppnum_t user_ppnum
, wire_ppnum
;
10340 ledger
= ledger_instantiate(task_ledger_template
,
10341 LEDGER_CREATE_ACTIVE_ENTRIES
);
10342 user_map
= vm_map_create(pmap_create(ledger
, 0, PMAP_CREATE_64BIT
),
10346 wire_map
= vm_map_create(NULL
,
10351 user_size
= 0x10000;
10352 kr
= mach_vm_allocate(user_map
,
10355 VM_FLAGS_ANYWHERE
);
10356 assert(kr
== KERN_SUCCESS
);
10358 wire_size
= user_size
;
10359 kr
= mach_vm_remap(wire_map
,
10370 assert(kr
== KERN_SUCCESS
);
10371 for (cur_offset
= 0;
10372 cur_offset
< wire_size
;
10373 cur_offset
+= PAGE_SIZE
) {
10374 kr
= vm_map_wire_and_extract(wire_map
,
10375 wire_addr
+ cur_offset
,
10376 VM_PROT_DEFAULT
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
10379 assert(kr
== KERN_SUCCESS
);
10380 user_ppnum
= vm_map_get_phys_page(user_map
,
10381 user_addr
+ cur_offset
);
10382 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
10383 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10385 user_map
, user_addr
+ cur_offset
, user_ppnum
,
10386 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
10387 if (kr
!= KERN_SUCCESS
||
10389 wire_ppnum
!= user_ppnum
) {
10390 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10393 cur_offset
-= PAGE_SIZE
;
10394 kr
= vm_map_wire_and_extract(wire_map
,
10395 wire_addr
+ cur_offset
,
10399 assert(kr
== KERN_SUCCESS
);
10400 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
10401 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10403 user_map
, user_addr
+ cur_offset
, user_ppnum
,
10404 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
10405 if (kr
!= KERN_SUCCESS
||
10407 wire_ppnum
!= user_ppnum
) {
10408 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10411 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
10413 #else /* VM_TEST_WIRE_AND_EXTRACT */
10414 #define vm_test_wire_and_extract()
10415 #endif /* VM_TEST_WIRE_AND_EXTRACT */
10417 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
10419 vm_test_page_wire_overflow_panic(void)
10421 vm_object_t object
;
10424 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
10426 object
= vm_object_allocate(PAGE_SIZE
);
10427 vm_object_lock(object
);
10428 page
= vm_page_alloc(object
, 0x0);
10429 vm_page_lock_queues();
10431 vm_page_wire(page
, 1, FALSE
);
10432 } while (page
->wire_count
!= 0);
10433 vm_page_unlock_queues();
10434 vm_object_unlock(object
);
10435 panic("FBDP(%p,%p): wire_count overflow not detected\n",
10438 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10439 #define vm_test_page_wire_overflow_panic()
10440 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10442 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
10443 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
10445 vm_test_kernel_object_fault(void)
10449 uintptr_t frameb
[2];
10452 kr
= kernel_memory_allocate(kernel_map
, &stack
,
10453 kernel_stack_size
+ (2 * PAGE_SIZE
),
10455 (KMA_KSTACK
| KMA_KOBJECT
|
10456 KMA_GUARD_FIRST
| KMA_GUARD_LAST
),
10457 VM_KERN_MEMORY_STACK
);
10458 if (kr
!= KERN_SUCCESS
) {
10459 panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr
);
10461 ret
= copyinframe((uintptr_t)stack
, (char *)frameb
, TRUE
);
10463 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
10465 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
10467 vm_map_remove(kernel_map
,
10469 stack
+ kernel_stack_size
+ (2 * PAGE_SIZE
),
10470 VM_MAP_REMOVE_KUNWIRE
);
10473 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10474 #define vm_test_kernel_object_fault()
10475 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10477 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
10479 vm_test_device_pager_transpose(void)
10481 memory_object_t device_pager
;
10482 vm_object_t anon_object
, device_object
;
10484 vm_map_offset_t device_mapping
;
10487 size
= 3 * PAGE_SIZE
;
10488 anon_object
= vm_object_allocate(size
);
10489 assert(anon_object
!= VM_OBJECT_NULL
);
10490 device_pager
= device_pager_setup(NULL
, 0, size
, 0);
10491 assert(device_pager
!= NULL
);
10492 device_object
= memory_object_to_vm_object(device_pager
);
10493 assert(device_object
!= VM_OBJECT_NULL
);
10496 * Can't actually map this, since another thread might do a
10497 * vm_map_enter() that gets coalesced into this object, which
10498 * would cause the test to fail.
10500 vm_map_offset_t anon_mapping
= 0;
10501 kr
= vm_map_enter(kernel_map
, &anon_mapping
, size
, 0,
10502 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
10503 anon_object
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
10504 VM_INHERIT_DEFAULT
);
10505 assert(kr
== KERN_SUCCESS
);
10507 device_mapping
= 0;
10508 kr
= vm_map_enter_mem_object(kernel_map
, &device_mapping
, size
, 0,
10510 VM_MAP_KERNEL_FLAGS_NONE
,
10511 VM_KERN_MEMORY_NONE
,
10512 (void *)device_pager
, 0, FALSE
,
10513 VM_PROT_DEFAULT
, VM_PROT_ALL
,
10514 VM_INHERIT_DEFAULT
);
10515 assert(kr
== KERN_SUCCESS
);
10516 memory_object_deallocate(device_pager
);
10518 vm_object_lock(anon_object
);
10519 vm_object_activity_begin(anon_object
);
10520 anon_object
->blocked_access
= TRUE
;
10521 vm_object_unlock(anon_object
);
10522 vm_object_lock(device_object
);
10523 vm_object_activity_begin(device_object
);
10524 device_object
->blocked_access
= TRUE
;
10525 vm_object_unlock(device_object
);
10527 assert(anon_object
->ref_count
== 1);
10528 assert(!anon_object
->named
);
10529 assert(device_object
->ref_count
== 2);
10530 assert(device_object
->named
);
10532 kr
= vm_object_transpose(device_object
, anon_object
, size
);
10533 assert(kr
== KERN_SUCCESS
);
10535 vm_object_lock(anon_object
);
10536 vm_object_activity_end(anon_object
);
10537 anon_object
->blocked_access
= FALSE
;
10538 vm_object_unlock(anon_object
);
10539 vm_object_lock(device_object
);
10540 vm_object_activity_end(device_object
);
10541 device_object
->blocked_access
= FALSE
;
10542 vm_object_unlock(device_object
);
10544 assert(anon_object
->ref_count
== 2);
10545 assert(anon_object
->named
);
10547 kr
= vm_deallocate(kernel_map
, anon_mapping
, size
);
10548 assert(kr
== KERN_SUCCESS
);
10550 assert(device_object
->ref_count
== 1);
10551 assert(!device_object
->named
);
10552 kr
= vm_deallocate(kernel_map
, device_mapping
, size
);
10553 assert(kr
== KERN_SUCCESS
);
10555 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
10557 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
10558 #define vm_test_device_pager_transpose()
10559 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
10564 vm_test_collapse_compressor();
10565 vm_test_wire_and_extract();
10566 vm_test_page_wire_overflow_panic();
10567 vm_test_kernel_object_fault();
10568 vm_test_device_pager_transpose();