2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
72 #include <mach/mach_types.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/mach_host_server.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_param.h>
80 #include <mach/vm_statistics.h>
83 #include <kern/kern_types.h>
84 #include <kern/counters.h>
85 #include <kern/host_statistics.h>
86 #include <kern/machine.h>
87 #include <kern/misc_protos.h>
88 #include <kern/sched.h>
89 #include <kern/thread.h>
90 #include <kern/kalloc.h>
91 #include <kern/policy_internal.h>
92 #include <kern/thread_group.h>
94 #include <machine/vm_tuning.h>
95 #include <machine/commpage.h>
98 #include <vm/vm_compressor_pager.h>
99 #include <vm/vm_fault.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_protos.h> /* must be last */
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h>
107 #include <vm/vm_shared_region.h>
108 #include <vm/vm_compressor.h>
110 #include <san/kasan.h>
112 #if CONFIG_PHANTOM_CACHE
113 #include <vm/vm_phantom_cache.h>
117 #include <libkern/OSDebug.h>
122 extern void mbuf_drain(boolean_t
);
124 #if VM_PRESSURE_EVENTS
126 extern unsigned int memorystatus_available_pages
;
127 extern unsigned int memorystatus_available_pages_pressure
;
128 extern unsigned int memorystatus_available_pages_critical
;
129 #else /* CONFIG_JETSAM */
130 extern uint64_t memorystatus_available_pages
;
131 extern uint64_t memorystatus_available_pages_pressure
;
132 extern uint64_t memorystatus_available_pages_critical
;
133 #endif /* CONFIG_JETSAM */
135 extern unsigned int memorystatus_frozen_count
;
136 extern unsigned int memorystatus_suspended_count
;
137 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
139 extern lck_mtx_t memorystatus_jetsam_fg_band_lock
;
140 extern uint32_t memorystatus_jetsam_fg_band_waiters
;
142 void vm_pressure_response(void);
143 extern void consider_vm_pressure_events(void);
145 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
146 #endif /* VM_PRESSURE_EVENTS */
148 thread_t vm_pageout_scan_thread
= THREAD_NULL
;
149 boolean_t vps_dynamic_priority_enabled
= FALSE
;
151 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
152 #ifdef CONFIG_EMBEDDED
153 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
155 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
159 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
160 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
163 #ifndef VM_PAGE_LAUNDRY_MAX
164 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
165 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
167 #ifndef VM_PAGEOUT_BURST_WAIT
168 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
169 #endif /* VM_PAGEOUT_BURST_WAIT */
171 #ifndef VM_PAGEOUT_EMPTY_WAIT
172 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
173 #endif /* VM_PAGEOUT_EMPTY_WAIT */
175 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
176 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
177 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
179 #ifndef VM_PAGEOUT_IDLE_WAIT
180 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
181 #endif /* VM_PAGEOUT_IDLE_WAIT */
183 #ifndef VM_PAGEOUT_SWAP_WAIT
184 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
185 #endif /* VM_PAGEOUT_SWAP_WAIT */
188 #ifndef VM_PAGE_SPECULATIVE_TARGET
189 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
190 #endif /* VM_PAGE_SPECULATIVE_TARGET */
194 * To obtain a reasonable LRU approximation, the inactive queue
195 * needs to be large enough to give pages on it a chance to be
196 * referenced a second time. This macro defines the fraction
197 * of active+inactive pages that should be inactive.
198 * The pageout daemon uses it to update vm_page_inactive_target.
200 * If vm_page_free_count falls below vm_page_free_target and
201 * vm_page_inactive_count is below vm_page_inactive_target,
202 * then the pageout daemon starts running.
205 #ifndef VM_PAGE_INACTIVE_TARGET
206 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
207 #endif /* VM_PAGE_INACTIVE_TARGET */
210 * Once the pageout daemon starts running, it keeps going
211 * until vm_page_free_count meets or exceeds vm_page_free_target.
214 #ifndef VM_PAGE_FREE_TARGET
215 #ifdef CONFIG_EMBEDDED
216 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
218 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
220 #endif /* VM_PAGE_FREE_TARGET */
224 * The pageout daemon always starts running once vm_page_free_count
225 * falls below vm_page_free_min.
228 #ifndef VM_PAGE_FREE_MIN
229 #ifdef CONFIG_EMBEDDED
230 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
232 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
234 #endif /* VM_PAGE_FREE_MIN */
236 #ifdef CONFIG_EMBEDDED
237 #define VM_PAGE_FREE_RESERVED_LIMIT 100
238 #define VM_PAGE_FREE_MIN_LIMIT 1500
239 #define VM_PAGE_FREE_TARGET_LIMIT 2000
241 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
242 #define VM_PAGE_FREE_MIN_LIMIT 3500
243 #define VM_PAGE_FREE_TARGET_LIMIT 4000
247 * When vm_page_free_count falls below vm_page_free_reserved,
248 * only vm-privileged threads can allocate pages. vm-privilege
249 * allows the pageout daemon and default pager (and any other
250 * associated threads needed for default pageout) to continue
251 * operation by dipping into the reserved pool of pages.
254 #ifndef VM_PAGE_FREE_RESERVED
255 #define VM_PAGE_FREE_RESERVED(n) \
256 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
257 #endif /* VM_PAGE_FREE_RESERVED */
260 * When we dequeue pages from the inactive list, they are
261 * reactivated (ie, put back on the active queue) if referenced.
262 * However, it is possible to starve the free list if other
263 * processors are referencing pages faster than we can turn off
264 * the referenced bit. So we limit the number of reactivations
265 * we will make per call of vm_pageout_scan().
267 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
269 #ifndef VM_PAGE_REACTIVATE_LIMIT
270 #ifdef CONFIG_EMBEDDED
271 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
273 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
275 #endif /* VM_PAGE_REACTIVATE_LIMIT */
276 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
278 extern boolean_t hibernate_cleaning_in_progress
;
281 * Forward declarations for internal routines.
284 struct vm_pageout_queue
*q
;
290 struct cq ciq
[MAX_COMPRESSOR_THREAD_COUNT
];
293 #if VM_PRESSURE_EVENTS
294 void vm_pressure_thread(void);
296 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void);
297 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void);
299 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void);
300 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void);
303 void vm_pageout_garbage_collect(int);
304 static void vm_pageout_iothread_external(void);
305 static void vm_pageout_iothread_internal(struct cq
*cq
);
306 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*, boolean_t
);
308 extern void vm_pageout_continue(void);
309 extern void vm_pageout_scan(void);
311 void vm_tests(void); /* forward */
313 boolean_t vm_pageout_running
= FALSE
;
315 uint32_t vm_page_upl_tainted
= 0;
316 uint32_t vm_page_iopl_tainted
= 0;
319 static boolean_t vm_pageout_waiter
= FALSE
;
320 #endif /* !CONFIG_EMBEDDED */
323 #if DEVELOPMENT || DEBUG
324 struct vm_pageout_debug vm_pageout_debug
;
326 struct vm_pageout_vminfo vm_pageout_vminfo
;
327 struct vm_pageout_state vm_pageout_state
;
328 struct vm_config vm_config
;
330 struct vm_pageout_queue vm_pageout_queue_internal
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
331 struct vm_pageout_queue vm_pageout_queue_external
__attribute__((aligned(VM_PACKED_POINTER_ALIGNMENT
)));
333 int vm_upl_wait_for_pages
= 0;
334 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
336 boolean_t(*volatile consider_buffer_cache_collect
)(int) = NULL
;
338 int vm_debug_events
= 0;
340 lck_grp_t vm_pageout_lck_grp
;
342 #if CONFIG_MEMORYSTATUS
343 extern boolean_t
memorystatus_kill_on_VM_page_shortage(boolean_t async
);
345 uint32_t vm_pageout_memorystatus_fb_factor_nr
= 5;
346 uint32_t vm_pageout_memorystatus_fb_factor_dr
= 2;
353 * Routine: vm_pageout_object_terminate
355 * Destroy the pageout_object, and perform all of the
356 * required cleanup actions.
359 * The object must be locked, and will be returned locked.
362 vm_pageout_object_terminate(
365 vm_object_t shadow_object
;
368 * Deal with the deallocation (last reference) of a pageout object
369 * (used for cleaning-in-place) by dropping the paging references/
370 * freeing pages in the original object.
373 assert(object
->pageout
);
374 shadow_object
= object
->shadow
;
375 vm_object_lock(shadow_object
);
377 while (!vm_page_queue_empty(&object
->memq
)) {
379 vm_object_offset_t offset
;
381 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
383 assert(p
->vmp_private
);
384 assert(p
->vmp_free_when_done
);
385 p
->vmp_free_when_done
= FALSE
;
386 assert(!p
->vmp_cleaning
);
387 assert(!p
->vmp_laundry
);
389 offset
= p
->vmp_offset
;
393 m
= vm_page_lookup(shadow_object
,
394 offset
+ object
->vo_shadow_offset
);
396 if (m
== VM_PAGE_NULL
) {
400 assert((m
->vmp_dirty
) || (m
->vmp_precious
) ||
401 (m
->vmp_busy
&& m
->vmp_cleaning
));
404 * Handle the trusted pager throttle.
405 * Also decrement the burst throttle (if external).
407 vm_page_lock_queues();
408 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
409 vm_pageout_throttle_up(m
);
413 * Handle the "target" page(s). These pages are to be freed if
414 * successfully cleaned. Target pages are always busy, and are
415 * wired exactly once. The initial target pages are not mapped,
416 * (so cannot be referenced or modified) but converted target
417 * pages may have been modified between the selection as an
418 * adjacent page and conversion to a target.
420 if (m
->vmp_free_when_done
) {
422 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
423 assert(m
->vmp_wire_count
== 1);
424 m
->vmp_cleaning
= FALSE
;
425 m
->vmp_free_when_done
= FALSE
;
427 * Revoke all access to the page. Since the object is
428 * locked, and the page is busy, this prevents the page
429 * from being dirtied after the pmap_disconnect() call
432 * Since the page is left "dirty" but "not modifed", we
433 * can detect whether the page was redirtied during
434 * pageout by checking the modify state.
436 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
) {
437 SET_PAGE_DIRTY(m
, FALSE
);
439 m
->vmp_dirty
= FALSE
;
443 vm_page_unwire(m
, TRUE
); /* reactivates */
444 VM_STAT_INCR(reactivations
);
447 vm_page_free(m
); /* clears busy, etc. */
449 vm_page_unlock_queues();
453 * Handle the "adjacent" pages. These pages were cleaned in
454 * place, and should be left alone.
455 * If prep_pin_count is nonzero, then someone is using the
456 * page, so make it active.
458 if ((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) && !m
->vmp_private
) {
459 if (m
->vmp_reference
) {
462 vm_page_deactivate(m
);
465 if (m
->vmp_overwriting
) {
467 * the (COPY_OUT_FROM == FALSE) request_page_list case
471 * We do not re-set m->vmp_dirty !
472 * The page was busy so no extraneous activity
473 * could have occurred. COPY_INTO is a read into the
474 * new pages. CLEAN_IN_PLACE does actually write
475 * out the pages but handling outside of this code
476 * will take care of resetting dirty. We clear the
477 * modify however for the Programmed I/O case.
479 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
482 m
->vmp_absent
= FALSE
;
485 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
486 * Occurs when the original page was wired
487 * at the time of the list request
489 assert(VM_PAGE_WIRED(m
));
490 vm_page_unwire(m
, TRUE
); /* reactivates */
492 m
->vmp_overwriting
= FALSE
;
494 m
->vmp_dirty
= FALSE
;
496 m
->vmp_cleaning
= FALSE
;
499 * Wakeup any thread waiting for the page to be un-cleaning.
502 vm_page_unlock_queues();
505 * Account for the paging reference taken in vm_paging_object_allocate.
507 vm_object_activity_end(shadow_object
);
508 vm_object_unlock(shadow_object
);
510 assert(object
->ref_count
== 0);
511 assert(object
->paging_in_progress
== 0);
512 assert(object
->activity_in_progress
== 0);
513 assert(object
->resident_page_count
== 0);
518 * Routine: vm_pageclean_setup
520 * Purpose: setup a page to be cleaned (made non-dirty), but not
521 * necessarily flushed from the VM page cache.
522 * This is accomplished by cleaning in place.
524 * The page must not be busy, and new_object
532 vm_object_t new_object
,
533 vm_object_offset_t new_offset
)
535 assert(!m
->vmp_busy
);
537 assert(!m
->vmp_cleaning
);
540 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
543 * Mark original page as cleaning in place.
545 m
->vmp_cleaning
= TRUE
;
546 SET_PAGE_DIRTY(m
, FALSE
);
547 m
->vmp_precious
= FALSE
;
550 * Convert the fictitious page to a private shadow of
553 assert(new_m
->vmp_fictitious
);
554 assert(VM_PAGE_GET_PHYS_PAGE(new_m
) == vm_page_fictitious_addr
);
555 new_m
->vmp_fictitious
= FALSE
;
556 new_m
->vmp_private
= TRUE
;
557 new_m
->vmp_free_when_done
= TRUE
;
558 VM_PAGE_SET_PHYS_PAGE(new_m
, VM_PAGE_GET_PHYS_PAGE(m
));
560 vm_page_lockspin_queues();
561 vm_page_wire(new_m
, VM_KERN_MEMORY_NONE
, TRUE
);
562 vm_page_unlock_queues();
564 vm_page_insert_wired(new_m
, new_object
, new_offset
, VM_KERN_MEMORY_NONE
);
565 assert(!new_m
->vmp_wanted
);
566 new_m
->vmp_busy
= FALSE
;
570 * Routine: vm_pageout_initialize_page
572 * Causes the specified page to be initialized in
573 * the appropriate memory object. This routine is used to push
574 * pages into a copy-object when they are modified in the
577 * The page is moved to a temporary object and paged out.
580 * The page in question must not be on any pageout queues.
581 * The object to which it belongs must be locked.
582 * The page must be busy, but not hold a paging reference.
585 * Move this page to a completely new object.
588 vm_pageout_initialize_page(
592 vm_object_offset_t paging_offset
;
593 memory_object_t pager
;
595 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
597 object
= VM_PAGE_OBJECT(m
);
600 assert(object
->internal
);
603 * Verify that we really want to clean this page
605 assert(!m
->vmp_absent
);
606 assert(!m
->vmp_error
);
607 assert(m
->vmp_dirty
);
610 * Create a paging reference to let us play with the object.
612 paging_offset
= m
->vmp_offset
+ object
->paging_offset
;
614 if (m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_restart
|| (!m
->vmp_dirty
&& !m
->vmp_precious
)) {
615 panic("reservation without pageout?"); /* alan */
618 vm_object_unlock(object
);
624 * If there's no pager, then we can't clean the page. This should
625 * never happen since this should be a copy object and therefore not
626 * an external object, so the pager should always be there.
629 pager
= object
->pager
;
631 if (pager
== MEMORY_OBJECT_NULL
) {
632 panic("missing pager for copy object");
639 * set the page for future call to vm_fault_list_request
641 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
642 SET_PAGE_DIRTY(m
, FALSE
);
645 * keep the object from collapsing or terminating
647 vm_object_paging_begin(object
);
648 vm_object_unlock(object
);
651 * Write the data to its pager.
652 * Note that the data is passed by naming the new object,
653 * not a virtual address; the pager interface has been
654 * manipulated to use the "internal memory" data type.
655 * [The object reference from its allocation is donated
656 * to the eventual recipient.]
658 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
660 vm_object_lock(object
);
661 vm_object_paging_end(object
);
666 * vm_pageout_cluster:
668 * Given a page, queue it to the appropriate I/O thread,
669 * which will page it out and attempt to clean adjacent pages
670 * in the same operation.
672 * The object and queues must be locked. We will take a
673 * paging reference to prevent deallocation or collapse when we
674 * release the object lock back at the call site. The I/O thread
675 * is responsible for consuming this reference
677 * The page must not be on any pageout queue.
679 #if DEVELOPMENT || DEBUG
680 vmct_stats_t vmct_stats
;
682 int32_t vmct_active
= 0;
683 uint64_t vm_compressor_epoch_start
= 0;
684 uint64_t vm_compressor_epoch_stop
= 0;
686 typedef enum vmct_state_t
{
691 vmct_state_t vmct_state
[MAX_COMPRESSOR_THREAD_COUNT
];
696 vm_pageout_cluster(vm_page_t m
)
698 vm_object_t object
= VM_PAGE_OBJECT(m
);
699 struct vm_pageout_queue
*q
;
702 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
703 vm_object_lock_assert_exclusive(object
);
706 * Only a certain kind of page is appreciated here.
708 assert((m
->vmp_dirty
|| m
->vmp_precious
) && (!VM_PAGE_WIRED(m
)));
709 assert(!m
->vmp_cleaning
&& !m
->vmp_laundry
);
710 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
713 * protect the object from collapse or termination
715 vm_object_activity_begin(object
);
717 if (object
->internal
== TRUE
) {
718 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
722 q
= &vm_pageout_queue_internal
;
724 q
= &vm_pageout_queue_external
;
728 * pgo_laundry count is tied to the laundry bit
730 m
->vmp_laundry
= TRUE
;
733 m
->vmp_q_state
= VM_PAGE_ON_PAGEOUT_Q
;
734 vm_page_queue_enter(&q
->pgo_pending
, m
, vmp_pageq
);
736 if (q
->pgo_idle
== TRUE
) {
738 thread_wakeup((event_t
) &q
->pgo_pending
);
745 * A page is back from laundry or we are stealing it back from
746 * the laundering state. See if there are some pages waiting to
747 * go to laundry and if we can let some of them go now.
749 * Object and page queues must be locked.
752 vm_pageout_throttle_up(
755 struct vm_pageout_queue
*q
;
756 vm_object_t m_object
;
758 m_object
= VM_PAGE_OBJECT(m
);
760 assert(m_object
!= VM_OBJECT_NULL
);
761 assert(m_object
!= kernel_object
);
763 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
764 vm_object_lock_assert_exclusive(m_object
);
766 if (m_object
->internal
== TRUE
) {
767 q
= &vm_pageout_queue_internal
;
769 q
= &vm_pageout_queue_external
;
772 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
773 vm_page_queue_remove(&q
->pgo_pending
, m
, vmp_pageq
);
774 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
776 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
778 vm_object_activity_end(m_object
);
780 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page
, 1);
782 if (m
->vmp_laundry
== TRUE
) {
783 m
->vmp_laundry
= FALSE
;
786 if (q
->pgo_throttled
== TRUE
) {
787 q
->pgo_throttled
= FALSE
;
788 thread_wakeup((event_t
) &q
->pgo_laundry
);
790 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
791 q
->pgo_draining
= FALSE
;
792 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
794 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, 1);
800 vm_pageout_throttle_up_batch(
801 struct vm_pageout_queue
*q
,
804 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
806 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, batch_cnt
);
808 q
->pgo_laundry
-= batch_cnt
;
810 if (q
->pgo_throttled
== TRUE
) {
811 q
->pgo_throttled
= FALSE
;
812 thread_wakeup((event_t
) &q
->pgo_laundry
);
814 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
815 q
->pgo_draining
= FALSE
;
816 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
823 * VM memory pressure monitoring.
825 * vm_pageout_scan() keeps track of the number of pages it considers and
826 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
828 * compute_memory_pressure() is called every second from compute_averages()
829 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
830 * of recalimed pages in a new vm_pageout_stat[] bucket.
832 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
833 * The caller provides the number of seconds ("nsecs") worth of statistics
834 * it wants, up to 30 seconds.
835 * It computes the number of pages reclaimed in the past "nsecs" seconds and
836 * also returns the number of pages the system still needs to reclaim at this
839 #if DEVELOPMENT || DEBUG
840 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
842 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
844 struct vm_pageout_stat
{
845 unsigned long vm_page_active_count
;
846 unsigned long vm_page_speculative_count
;
847 unsigned long vm_page_inactive_count
;
848 unsigned long vm_page_anonymous_count
;
850 unsigned long vm_page_free_count
;
851 unsigned long vm_page_wire_count
;
852 unsigned long vm_page_compressor_count
;
854 unsigned long vm_page_pages_compressed
;
855 unsigned long vm_page_pageable_internal_count
;
856 unsigned long vm_page_pageable_external_count
;
857 unsigned long vm_page_xpmapped_external_count
;
859 unsigned int pages_grabbed
;
860 unsigned int pages_freed
;
862 unsigned int pages_compressed
;
863 unsigned int pages_grabbed_by_compressor
;
864 unsigned int failed_compressions
;
866 unsigned int pages_evicted
;
867 unsigned int pages_purged
;
869 unsigned int considered
;
870 unsigned int considered_bq_internal
;
871 unsigned int considered_bq_external
;
873 unsigned int skipped_external
;
874 unsigned int filecache_min_reactivations
;
876 unsigned int freed_speculative
;
877 unsigned int freed_cleaned
;
878 unsigned int freed_internal
;
879 unsigned int freed_external
;
881 unsigned int cleaned_dirty_external
;
882 unsigned int cleaned_dirty_internal
;
884 unsigned int inactive_referenced
;
885 unsigned int inactive_nolock
;
886 unsigned int reactivation_limit_exceeded
;
887 unsigned int forced_inactive_reclaim
;
889 unsigned int throttled_internal_q
;
890 unsigned int throttled_external_q
;
892 unsigned int phantom_ghosts_found
;
893 unsigned int phantom_ghosts_added
;
894 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
896 unsigned int vm_pageout_stat_now
= 0;
898 #define VM_PAGEOUT_STAT_BEFORE(i) \
899 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
900 #define VM_PAGEOUT_STAT_AFTER(i) \
901 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
903 #if VM_PAGE_BUCKETS_CHECK
904 int vm_page_buckets_check_interval
= 80; /* in eighths of a second */
905 #endif /* VM_PAGE_BUCKETS_CHECK */
909 record_memory_pressure(void);
911 record_memory_pressure(void)
913 unsigned int vm_pageout_next
;
915 #if VM_PAGE_BUCKETS_CHECK
916 /* check the consistency of VM page buckets at regular interval */
917 static int counter
= 0;
918 if ((++counter
% vm_page_buckets_check_interval
) == 0) {
919 vm_page_buckets_check();
921 #endif /* VM_PAGE_BUCKETS_CHECK */
923 vm_pageout_state
.vm_memory_pressure
=
924 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_speculative
+
925 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_cleaned
+
926 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_internal
+
927 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_external
;
929 commpage_set_memory_pressure((unsigned int)vm_pageout_state
.vm_memory_pressure
);
931 /* move "now" forward */
932 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
934 bzero(&vm_pageout_stats
[vm_pageout_next
], sizeof(struct vm_pageout_stat
));
936 vm_pageout_stat_now
= vm_pageout_next
;
942 * mach_vm_ctl_page_free_wanted() is called indirectly, via
943 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
944 * it must be safe in the restricted stackshot context. Locks and/or
945 * blocking are not allowable.
948 mach_vm_ctl_page_free_wanted(void)
950 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
952 page_free_target
= vm_page_free_target
;
953 page_free_count
= vm_page_free_count
;
954 if (page_free_target
> page_free_count
) {
955 page_free_wanted
= page_free_target
- page_free_count
;
957 page_free_wanted
= 0;
960 return page_free_wanted
;
966 * mach_vm_pressure_monitor() is called when taking a stackshot, with
967 * wait_for_pressure FALSE, so that code path must remain safe in the
968 * restricted stackshot context. No blocking or locks are allowable.
973 mach_vm_pressure_monitor(
974 boolean_t wait_for_pressure
,
975 unsigned int nsecs_monitored
,
976 unsigned int *pages_reclaimed_p
,
977 unsigned int *pages_wanted_p
)
980 unsigned int vm_pageout_then
, vm_pageout_now
;
981 unsigned int pages_reclaimed
;
982 unsigned int units_of_monitor
;
984 units_of_monitor
= 8 * nsecs_monitored
;
986 * We don't take the vm_page_queue_lock here because we don't want
987 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
988 * thread when it's trying to reclaim memory. We don't need fully
989 * accurate monitoring anyway...
992 if (wait_for_pressure
) {
993 /* wait until there's memory pressure */
994 while (vm_page_free_count
>= vm_page_free_target
) {
995 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
996 THREAD_INTERRUPTIBLE
);
997 if (wr
== THREAD_WAITING
) {
998 wr
= thread_block(THREAD_CONTINUE_NULL
);
1000 if (wr
== THREAD_INTERRUPTED
) {
1001 return KERN_ABORTED
;
1003 if (wr
== THREAD_AWAKENED
) {
1005 * The memory pressure might have already
1006 * been relieved but let's not block again
1007 * and let's report that there was memory
1008 * pressure at some point.
1015 /* provide the number of pages the system wants to reclaim */
1016 if (pages_wanted_p
!= NULL
) {
1017 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1020 if (pages_reclaimed_p
== NULL
) {
1021 return KERN_SUCCESS
;
1024 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1025 vm_pageout_now
= vm_pageout_stat_now
;
1026 pages_reclaimed
= 0;
1027 for (vm_pageout_then
=
1028 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1029 vm_pageout_then
!= vm_pageout_now
&&
1030 units_of_monitor
-- != 0;
1032 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1033 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_speculative
;
1034 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_cleaned
;
1035 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_internal
;
1036 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_external
;
1038 *pages_reclaimed_p
= pages_reclaimed
;
1040 return KERN_SUCCESS
;
1045 #if DEVELOPMENT || DEBUG
1048 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*, int);
1051 * condition variable used to make sure there is
1052 * only a single sweep going on at a time
1054 boolean_t vm_pageout_disconnect_all_pages_active
= FALSE
;
1058 vm_pageout_disconnect_all_pages()
1060 vm_page_lock_queues();
1062 if (vm_pageout_disconnect_all_pages_active
== TRUE
) {
1063 vm_page_unlock_queues();
1066 vm_pageout_disconnect_all_pages_active
= TRUE
;
1067 vm_page_unlock_queues();
1069 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1070 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1071 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active
, vm_page_active_count
);
1073 vm_pageout_disconnect_all_pages_active
= FALSE
;
1078 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*q
, int qcount
)
1081 vm_object_t t_object
= NULL
;
1082 vm_object_t l_object
= NULL
;
1083 vm_object_t m_object
= NULL
;
1084 int delayed_unlock
= 0;
1085 int try_failed_count
= 0;
1086 int disconnected_count
= 0;
1087 int paused_count
= 0;
1088 int object_locked_count
= 0;
1090 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_START
,
1091 q
, qcount
, 0, 0, 0);
1093 vm_page_lock_queues();
1095 while (qcount
&& !vm_page_queue_empty(q
)) {
1096 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1098 m
= (vm_page_t
) vm_page_queue_first(q
);
1099 m_object
= VM_PAGE_OBJECT(m
);
1102 * check to see if we currently are working
1103 * with the same object... if so, we've
1104 * already got the lock
1106 if (m_object
!= l_object
) {
1108 * the object associated with candidate page is
1109 * different from the one we were just working
1110 * with... dump the lock if we still own it
1112 if (l_object
!= NULL
) {
1113 vm_object_unlock(l_object
);
1116 if (m_object
!= t_object
) {
1117 try_failed_count
= 0;
1121 * Try to lock object; since we've alread got the
1122 * page queues lock, we can only 'try' for this one.
1123 * if the 'try' fails, we need to do a mutex_pause
1124 * to allow the owner of the object lock a chance to
1127 if (!vm_object_lock_try_scan(m_object
)) {
1128 if (try_failed_count
> 20) {
1129 goto reenter_pg_on_q
;
1131 vm_page_unlock_queues();
1132 mutex_pause(try_failed_count
++);
1133 vm_page_lock_queues();
1138 t_object
= m_object
;
1141 object_locked_count
++;
1143 l_object
= m_object
;
1145 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1147 * put it back on the head of its queue
1149 goto reenter_pg_on_q
;
1151 if (m
->vmp_pmapped
== TRUE
) {
1152 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
1154 disconnected_count
++;
1157 vm_page_queue_remove(q
, m
, vmp_pageq
);
1158 vm_page_queue_enter(q
, m
, vmp_pageq
);
1161 try_failed_count
= 0;
1163 if (delayed_unlock
++ > 128) {
1164 if (l_object
!= NULL
) {
1165 vm_object_unlock(l_object
);
1168 lck_mtx_yield(&vm_page_queue_lock
);
1172 if (l_object
!= NULL
) {
1173 vm_object_unlock(l_object
);
1176 vm_page_unlock_queues();
1178 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_END
,
1179 q
, disconnected_count
, object_locked_count
, paused_count
, 0);
1186 vm_pageout_page_queue(vm_page_queue_head_t
*, int);
1189 * condition variable used to make sure there is
1190 * only a single sweep going on at a time
1192 boolean_t vm_pageout_anonymous_pages_active
= FALSE
;
1196 vm_pageout_anonymous_pages()
1198 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
1199 vm_page_lock_queues();
1201 if (vm_pageout_anonymous_pages_active
== TRUE
) {
1202 vm_page_unlock_queues();
1205 vm_pageout_anonymous_pages_active
= TRUE
;
1206 vm_page_unlock_queues();
1208 vm_pageout_page_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1209 vm_pageout_page_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1210 vm_pageout_page_queue(&vm_page_queue_active
, vm_page_active_count
);
1212 if (VM_CONFIG_SWAP_IS_PRESENT
) {
1213 vm_consider_swapping();
1216 vm_page_lock_queues();
1217 vm_pageout_anonymous_pages_active
= FALSE
;
1218 vm_page_unlock_queues();
1224 vm_pageout_page_queue(vm_page_queue_head_t
*q
, int qcount
)
1227 vm_object_t t_object
= NULL
;
1228 vm_object_t l_object
= NULL
;
1229 vm_object_t m_object
= NULL
;
1230 int delayed_unlock
= 0;
1231 int try_failed_count
= 0;
1234 struct vm_pageout_queue
*iq
;
1238 iq
= &vm_pageout_queue_internal
;
1240 vm_page_lock_queues();
1242 while (qcount
&& !vm_page_queue_empty(q
)) {
1243 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1245 if (VM_PAGE_Q_THROTTLED(iq
)) {
1246 if (l_object
!= NULL
) {
1247 vm_object_unlock(l_object
);
1250 iq
->pgo_draining
= TRUE
;
1252 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1), THREAD_INTERRUPTIBLE
);
1253 vm_page_unlock_queues();
1255 thread_block(THREAD_CONTINUE_NULL
);
1257 vm_page_lock_queues();
1261 m
= (vm_page_t
) vm_page_queue_first(q
);
1262 m_object
= VM_PAGE_OBJECT(m
);
1265 * check to see if we currently are working
1266 * with the same object... if so, we've
1267 * already got the lock
1269 if (m_object
!= l_object
) {
1270 if (!m_object
->internal
) {
1271 goto reenter_pg_on_q
;
1275 * the object associated with candidate page is
1276 * different from the one we were just working
1277 * with... dump the lock if we still own it
1279 if (l_object
!= NULL
) {
1280 vm_object_unlock(l_object
);
1283 if (m_object
!= t_object
) {
1284 try_failed_count
= 0;
1288 * Try to lock object; since we've alread got the
1289 * page queues lock, we can only 'try' for this one.
1290 * if the 'try' fails, we need to do a mutex_pause
1291 * to allow the owner of the object lock a chance to
1294 if (!vm_object_lock_try_scan(m_object
)) {
1295 if (try_failed_count
> 20) {
1296 goto reenter_pg_on_q
;
1298 vm_page_unlock_queues();
1299 mutex_pause(try_failed_count
++);
1300 vm_page_lock_queues();
1303 t_object
= m_object
;
1306 l_object
= m_object
;
1308 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1310 * page is not to be cleaned
1311 * put it back on the head of its queue
1313 goto reenter_pg_on_q
;
1315 phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
1317 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
1318 refmod_state
= pmap_get_refmod(phys_page
);
1320 if (refmod_state
& VM_MEM_REFERENCED
) {
1321 m
->vmp_reference
= TRUE
;
1323 if (refmod_state
& VM_MEM_MODIFIED
) {
1324 SET_PAGE_DIRTY(m
, FALSE
);
1327 if (m
->vmp_reference
== TRUE
) {
1328 m
->vmp_reference
= FALSE
;
1329 pmap_clear_refmod_options(phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1330 goto reenter_pg_on_q
;
1332 if (m
->vmp_pmapped
== TRUE
) {
1333 if (m
->vmp_dirty
|| m
->vmp_precious
) {
1334 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
1336 pmap_options
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
1338 refmod_state
= pmap_disconnect_options(phys_page
, pmap_options
, NULL
);
1339 if (refmod_state
& VM_MEM_MODIFIED
) {
1340 SET_PAGE_DIRTY(m
, FALSE
);
1344 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
1345 vm_page_unlock_queues();
1347 vm_page_lock_queues();
1352 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1353 if (!m_object
->pager_initialized
) {
1354 vm_page_unlock_queues();
1356 vm_object_collapse(m_object
, (vm_object_offset_t
) 0, TRUE
);
1358 if (!m_object
->pager_initialized
) {
1359 vm_object_compressor_pager_create(m_object
);
1362 vm_page_lock_queues();
1365 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1366 goto reenter_pg_on_q
;
1369 * vm_object_compressor_pager_create will drop the object lock
1370 * which means 'm' may no longer be valid to use
1375 * we've already factored out pages in the laundry which
1376 * means this page can't be on the pageout queue so it's
1377 * safe to do the vm_page_queues_remove
1379 vm_page_queues_remove(m
, TRUE
);
1381 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1383 vm_pageout_cluster(m
);
1388 vm_page_queue_remove(q
, m
, vmp_pageq
);
1389 vm_page_queue_enter(q
, m
, vmp_pageq
);
1392 try_failed_count
= 0;
1394 if (delayed_unlock
++ > 128) {
1395 if (l_object
!= NULL
) {
1396 vm_object_unlock(l_object
);
1399 lck_mtx_yield(&vm_page_queue_lock
);
1403 if (l_object
!= NULL
) {
1404 vm_object_unlock(l_object
);
1407 vm_page_unlock_queues();
1413 * function in BSD to apply I/O throttle to the pageout thread
1415 extern void vm_pageout_io_throttle(void);
1417 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1420 * If a "reusable" page somehow made it back into \
1421 * the active queue, it's been re-used and is not \
1422 * quite re-usable. \
1423 * If the VM object was "all_reusable", consider it \
1424 * as "all re-used" instead of converting it to \
1425 * "partially re-used", which could be expensive. \
1427 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1428 if ((m)->vmp_reusable || \
1429 (obj)->all_reusable) { \
1430 vm_object_reuse_pages((obj), \
1432 (m)->vmp_offset + PAGE_SIZE_64, \
1438 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1439 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1442 #define FCS_DELAYED 1
1443 #define FCS_DEADLOCK_DETECTED 2
1445 struct flow_control
{
1451 #if CONFIG_BACKGROUND_QUEUE
1452 uint64_t vm_pageout_rejected_bq_internal
= 0;
1453 uint64_t vm_pageout_rejected_bq_external
= 0;
1454 uint64_t vm_pageout_skipped_bq_internal
= 0;
1457 #define ANONS_GRABBED_LIMIT 2
1461 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t
*);
1463 static void vm_pageout_prepare_to_block(vm_object_t
*, int *, vm_page_t
*, int *, int);
1465 #define VM_PAGEOUT_PB_NO_ACTION 0
1466 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1467 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1472 vm_pageout_delayed_unlock(int *delayed_unlock
, int *local_freed
, vm_page_t
*local_freeq
)
1475 vm_page_unlock_queues();
1477 VM_DEBUG_CONSTANT_EVENT(
1478 vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1479 vm_page_free_count
, 0, 0, 1);
1481 vm_page_free_list(*local_freeq
, TRUE
);
1483 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1484 vm_page_free_count
, *local_freed
, 0, 1);
1486 *local_freeq
= NULL
;
1489 vm_page_lock_queues();
1491 lck_mtx_yield(&vm_page_queue_lock
);
1493 *delayed_unlock
= 1;
1499 vm_pageout_prepare_to_block(vm_object_t
*object
, int *delayed_unlock
,
1500 vm_page_t
*local_freeq
, int *local_freed
, int action
)
1502 vm_page_unlock_queues();
1504 if (*object
!= NULL
) {
1505 vm_object_unlock(*object
);
1509 vm_page_free_list(*local_freeq
, TRUE
);
1511 *local_freeq
= NULL
;
1514 *delayed_unlock
= 1;
1517 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
:
1518 vm_consider_waking_compactor_swapper();
1520 case VM_PAGEOUT_PB_THREAD_YIELD
:
1521 thread_yield_internal(1);
1523 case VM_PAGEOUT_PB_NO_ACTION
:
1527 vm_page_lock_queues();
1531 static struct vm_pageout_vminfo last
;
1533 uint64_t last_vm_page_pages_grabbed
= 0;
1535 extern uint32_t c_segment_pages_compressed
;
1537 extern uint64_t shared_region_pager_reclaimed
;
1538 extern struct memory_object_pager_ops shared_region_pager_ops
;
1541 update_vm_info(void)
1545 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
= vm_page_active_count
;
1546 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
= vm_page_speculative_count
;
1547 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
= vm_page_inactive_count
;
1548 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
= vm_page_anonymous_count
;
1550 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
= vm_page_free_count
;
1551 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
= vm_page_wire_count
;
1552 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
= VM_PAGE_COMPRESSOR_COUNT
;
1554 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
= c_segment_pages_compressed
;
1555 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
= vm_page_pageable_internal_count
;
1556 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
= vm_page_pageable_external_count
;
1557 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
= vm_page_xpmapped_external_count
;
1560 tmp
= vm_pageout_vminfo
.vm_pageout_considered_page
;
1561 vm_pageout_stats
[vm_pageout_stat_now
].considered
= (unsigned int)(tmp
- last
.vm_pageout_considered_page
);
1562 last
.vm_pageout_considered_page
= tmp
;
1564 tmp
= vm_pageout_vminfo
.vm_pageout_compressions
;
1565 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
= (unsigned int)(tmp
- last
.vm_pageout_compressions
);
1566 last
.vm_pageout_compressions
= tmp
;
1568 tmp
= vm_pageout_vminfo
.vm_compressor_failed
;
1569 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
= (unsigned int)(tmp
- last
.vm_compressor_failed
);
1570 last
.vm_compressor_failed
= tmp
;
1572 tmp
= vm_pageout_vminfo
.vm_compressor_pages_grabbed
;
1573 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
= (unsigned int)(tmp
- last
.vm_compressor_pages_grabbed
);
1574 last
.vm_compressor_pages_grabbed
= tmp
;
1576 tmp
= vm_pageout_vminfo
.vm_phantom_cache_found_ghost
;
1577 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
= (unsigned int)(tmp
- last
.vm_phantom_cache_found_ghost
);
1578 last
.vm_phantom_cache_found_ghost
= tmp
;
1580 tmp
= vm_pageout_vminfo
.vm_phantom_cache_added_ghost
;
1581 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
= (unsigned int)(tmp
- last
.vm_phantom_cache_added_ghost
);
1582 last
.vm_phantom_cache_added_ghost
= tmp
;
1584 tmp
= get_pages_grabbed_count();
1585 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
= (unsigned int)(tmp
- last_vm_page_pages_grabbed
);
1586 last_vm_page_pages_grabbed
= tmp
;
1588 tmp
= vm_pageout_vminfo
.vm_page_pages_freed
;
1589 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
= (unsigned int)(tmp
- last
.vm_page_pages_freed
);
1590 last
.vm_page_pages_freed
= tmp
;
1593 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
) {
1594 tmp
= vm_pageout_vminfo
.vm_pageout_pages_evicted
;
1595 vm_pageout_stats
[vm_pageout_stat_now
].pages_evicted
= (unsigned int)(tmp
- last
.vm_pageout_pages_evicted
);
1596 last
.vm_pageout_pages_evicted
= tmp
;
1598 tmp
= vm_pageout_vminfo
.vm_pageout_pages_purged
;
1599 vm_pageout_stats
[vm_pageout_stat_now
].pages_purged
= (unsigned int)(tmp
- last
.vm_pageout_pages_purged
);
1600 last
.vm_pageout_pages_purged
= tmp
;
1602 tmp
= vm_pageout_vminfo
.vm_pageout_freed_speculative
;
1603 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
= (unsigned int)(tmp
- last
.vm_pageout_freed_speculative
);
1604 last
.vm_pageout_freed_speculative
= tmp
;
1606 tmp
= vm_pageout_vminfo
.vm_pageout_freed_external
;
1607 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
= (unsigned int)(tmp
- last
.vm_pageout_freed_external
);
1608 last
.vm_pageout_freed_external
= tmp
;
1610 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_referenced
;
1611 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
= (unsigned int)(tmp
- last
.vm_pageout_inactive_referenced
);
1612 last
.vm_pageout_inactive_referenced
= tmp
;
1614 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
;
1615 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_external
);
1616 last
.vm_pageout_scan_inactive_throttled_external
= tmp
;
1618 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
;
1619 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_external
);
1620 last
.vm_pageout_inactive_dirty_external
= tmp
;
1622 tmp
= vm_pageout_vminfo
.vm_pageout_freed_cleaned
;
1623 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
= (unsigned int)(tmp
- last
.vm_pageout_freed_cleaned
);
1624 last
.vm_pageout_freed_cleaned
= tmp
;
1626 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_nolock
;
1627 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
= (unsigned int)(tmp
- last
.vm_pageout_inactive_nolock
);
1628 last
.vm_pageout_inactive_nolock
= tmp
;
1630 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
;
1631 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_internal
);
1632 last
.vm_pageout_scan_inactive_throttled_internal
= tmp
;
1634 tmp
= vm_pageout_vminfo
.vm_pageout_skipped_external
;
1635 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
= (unsigned int)(tmp
- last
.vm_pageout_skipped_external
);
1636 last
.vm_pageout_skipped_external
= tmp
;
1638 tmp
= vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
;
1639 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
= (unsigned int)(tmp
- last
.vm_pageout_reactivation_limit_exceeded
);
1640 last
.vm_pageout_reactivation_limit_exceeded
= tmp
;
1642 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
;
1643 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
= (unsigned int)(tmp
- last
.vm_pageout_inactive_force_reclaim
);
1644 last
.vm_pageout_inactive_force_reclaim
= tmp
;
1646 tmp
= vm_pageout_vminfo
.vm_pageout_freed_internal
;
1647 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
= (unsigned int)(tmp
- last
.vm_pageout_freed_internal
);
1648 last
.vm_pageout_freed_internal
= tmp
;
1650 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_internal
;
1651 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_internal
);
1652 last
.vm_pageout_considered_bq_internal
= tmp
;
1654 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_external
;
1655 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_external
);
1656 last
.vm_pageout_considered_bq_external
= tmp
;
1658 tmp
= vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
;
1659 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
= (unsigned int)(tmp
- last
.vm_pageout_filecache_min_reactivated
);
1660 last
.vm_pageout_filecache_min_reactivated
= tmp
;
1662 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
;
1663 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_internal
);
1664 last
.vm_pageout_inactive_dirty_internal
= tmp
;
1667 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO1
)) | DBG_FUNC_NONE
,
1668 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
,
1669 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
,
1670 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
,
1671 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
,
1674 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO2
)) | DBG_FUNC_NONE
,
1675 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
,
1676 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
,
1677 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
,
1681 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO3
)) | DBG_FUNC_NONE
,
1682 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
,
1683 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
,
1684 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
,
1685 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
,
1688 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
||
1689 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
||
1690 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
) {
1691 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO4
)) | DBG_FUNC_NONE
,
1692 vm_pageout_stats
[vm_pageout_stat_now
].considered
,
1693 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
,
1694 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
,
1695 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
,
1698 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO5
)) | DBG_FUNC_NONE
,
1699 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
,
1700 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
,
1701 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
,
1702 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
,
1705 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO6
)) | DBG_FUNC_NONE
,
1706 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
,
1707 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
,
1708 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
,
1709 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
,
1712 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO7
)) | DBG_FUNC_NONE
,
1713 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
,
1714 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
,
1715 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
,
1716 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
,
1719 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO8
)) | DBG_FUNC_NONE
,
1720 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
,
1721 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
,
1722 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
,
1723 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
,
1726 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO9
)) | DBG_FUNC_NONE
,
1727 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
,
1728 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
,
1729 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
,
1730 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
,
1733 record_memory_pressure();
1736 extern boolean_t hibernation_vmqueues_inspection
;
1739 * Return values for functions called by vm_pageout_scan
1740 * that control its flow.
1742 * PROCEED -- vm_pageout_scan will keep making forward progress.
1743 * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1744 * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1747 #define VM_PAGEOUT_SCAN_PROCEED (0)
1748 #define VM_PAGEOUT_SCAN_DONE_RETURN (1)
1749 #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2)
1752 * This function is called only from vm_pageout_scan and
1753 * it moves overflow secluded pages (one-at-a-time) to the
1754 * batched 'local' free Q or active Q.
1757 vps_deal_with_secluded_page_overflow(vm_page_t
*local_freeq
, int *local_freed
)
1759 #if CONFIG_SECLUDED_MEMORY
1761 * Deal with secluded_q overflow.
1763 if (vm_page_secluded_count
> vm_page_secluded_target
) {
1764 vm_page_t secluded_page
;
1767 * SECLUDED_AGING_BEFORE_ACTIVE:
1768 * Excess secluded pages go to the active queue and
1769 * will later go to the inactive queue.
1771 assert((vm_page_secluded_count_free
+
1772 vm_page_secluded_count_inuse
) ==
1773 vm_page_secluded_count
);
1774 secluded_page
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_secluded
);
1775 assert(secluded_page
->vmp_q_state
== VM_PAGE_ON_SECLUDED_Q
);
1777 vm_page_queues_remove(secluded_page
, FALSE
);
1778 assert(!secluded_page
->vmp_fictitious
);
1779 assert(!VM_PAGE_WIRED(secluded_page
));
1781 if (secluded_page
->vmp_object
== 0) {
1782 /* transfer to free queue */
1783 assert(secluded_page
->vmp_busy
);
1784 secluded_page
->vmp_snext
= *local_freeq
;
1785 *local_freeq
= secluded_page
;
1788 /* transfer to head of active queue */
1789 vm_page_enqueue_active(secluded_page
, FALSE
);
1790 secluded_page
= VM_PAGE_NULL
;
1793 #else /* CONFIG_SECLUDED_MEMORY */
1795 #pragma unused(local_freeq)
1796 #pragma unused(local_freed)
1800 #endif /* CONFIG_SECLUDED_MEMORY */
1804 * This function is called only from vm_pageout_scan and
1805 * it initializes the loop targets for vm_pageout_scan().
1808 vps_init_page_targets(void)
1811 * LD TODO: Other page targets should be calculated here too.
1813 vm_page_anonymous_min
= vm_page_inactive_target
/ 20;
1815 if (vm_pageout_state
.vm_page_speculative_percentage
> 50) {
1816 vm_pageout_state
.vm_page_speculative_percentage
= 50;
1817 } else if (vm_pageout_state
.vm_page_speculative_percentage
<= 0) {
1818 vm_pageout_state
.vm_page_speculative_percentage
= 1;
1821 vm_pageout_state
.vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1822 vm_page_inactive_count
);
1826 * This function is called only from vm_pageout_scan and
1827 * it purges a single VM object at-a-time and will either
1828 * make vm_pageout_scan() restart the loop or keeping moving forward.
1835 assert(available_for_purge
>= 0);
1836 force_purge
= 0; /* no force-purging */
1838 #if VM_PRESSURE_EVENTS
1839 vm_pressure_level_t pressure_level
;
1841 pressure_level
= memorystatus_vm_pressure_level
;
1843 if (pressure_level
> kVMPressureNormal
) {
1844 if (pressure_level
>= kVMPressureCritical
) {
1845 force_purge
= vm_pageout_state
.memorystatus_purge_on_critical
;
1846 } else if (pressure_level
>= kVMPressureUrgent
) {
1847 force_purge
= vm_pageout_state
.memorystatus_purge_on_urgent
;
1848 } else if (pressure_level
>= kVMPressureWarning
) {
1849 force_purge
= vm_pageout_state
.memorystatus_purge_on_warning
;
1852 #endif /* VM_PRESSURE_EVENTS */
1854 if (available_for_purge
|| force_purge
) {
1855 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
);
1857 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
1858 if (vm_purgeable_object_purge_one(force_purge
, C_DONT_BLOCK
)) {
1859 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects
, 1);
1860 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
1861 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1863 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1865 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
1866 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1869 return VM_PAGEOUT_SCAN_PROCEED
;
1873 * This function is called only from vm_pageout_scan and
1874 * it will try to age the next speculative Q if the oldest
1878 vps_age_speculative_queue(boolean_t force_speculative_aging
)
1880 #define DELAY_SPECULATIVE_AGE 1000
1883 * try to pull pages from the aging bins...
1884 * see vm_page.h for an explanation of how
1885 * this mechanism works
1887 boolean_t can_steal
= FALSE
;
1888 int num_scanned_queues
;
1889 static int delay_speculative_age
= 0; /* depends the # of times we go through the main pageout_scan loop.*/
1891 struct vm_speculative_age_q
*aq
;
1892 struct vm_speculative_age_q
*sq
;
1894 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1896 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1898 num_scanned_queues
= 0;
1899 while (vm_page_queue_empty(&aq
->age_q
) &&
1900 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1901 speculative_steal_index
++;
1903 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1904 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
1907 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1910 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
1912 * XXX We've scanned all the speculative
1913 * queues but still haven't found one
1914 * that is not empty, even though
1915 * vm_page_speculative_count is not 0.
1917 if (!vm_page_queue_empty(&sq
->age_q
)) {
1918 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1920 #if DEVELOPMENT || DEBUG
1921 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count
);
1924 vm_page_speculative_count
= 0;
1925 /* ... and continue */
1926 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1929 if (vm_page_speculative_count
> vm_pageout_state
.vm_page_speculative_target
|| force_speculative_aging
== TRUE
) {
1932 if (!delay_speculative_age
) {
1933 mach_timespec_t ts_fully_aged
;
1935 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) / 1000;
1936 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) % 1000)
1937 * 1000 * NSEC_PER_USEC
;
1939 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
1943 clock_get_system_nanotime(&sec
, &nsec
);
1944 ts
.tv_sec
= (unsigned int) sec
;
1947 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0) {
1950 delay_speculative_age
++;
1953 delay_speculative_age
++;
1954 if (delay_speculative_age
== DELAY_SPECULATIVE_AGE
) {
1955 delay_speculative_age
= 0;
1959 if (can_steal
== TRUE
) {
1960 vm_page_speculate_ageit(aq
);
1963 return VM_PAGEOUT_SCAN_PROCEED
;
1967 * This function is called only from vm_pageout_scan and
1968 * it evicts a single VM object from the cache.
1971 vps_object_cache_evict(vm_object_t
*object_to_unlock
)
1973 static int cache_evict_throttle
= 0;
1974 struct vm_speculative_age_q
*sq
;
1976 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1978 if (vm_page_queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
1981 if (*object_to_unlock
!= NULL
) {
1982 vm_object_unlock(*object_to_unlock
);
1983 *object_to_unlock
= NULL
;
1985 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1987 pages_evicted
= vm_object_cache_evict(100, 10);
1989 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END
, pages_evicted
, 0, 0, 0, 0);
1991 if (pages_evicted
) {
1992 vm_pageout_vminfo
.vm_pageout_pages_evicted
+= pages_evicted
;
1994 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
1995 vm_page_free_count
, pages_evicted
, vm_pageout_vminfo
.vm_pageout_pages_evicted
, 0);
1996 memoryshot(VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
);
1999 * we just freed up to 100 pages,
2000 * so go back to the top of the main loop
2001 * and re-evaulate the memory situation
2003 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2005 cache_evict_throttle
= 1000;
2008 if (cache_evict_throttle
) {
2009 cache_evict_throttle
--;
2012 return VM_PAGEOUT_SCAN_PROCEED
;
2017 * This function is called only from vm_pageout_scan and
2018 * it calculates the filecache min. that needs to be maintained
2019 * as we start to steal pages.
2022 vps_calculate_filecache_min(void)
2024 int divisor
= vm_pageout_state
.vm_page_filecache_min_divisor
;
2028 * don't let the filecache_min fall below 15% of available memory
2029 * on systems with an active compressor that isn't nearing its
2030 * limits w/r to accepting new data
2032 * on systems w/o the compressor/swapper, the filecache is always
2033 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2034 * since most (if not all) of the anonymous pages are in the
2035 * throttled queue (which isn't counted as available) which
2036 * effectively disables this filter
2038 if (vm_compressor_low_on_space() || divisor
== 0) {
2039 vm_pageout_state
.vm_page_filecache_min
= 0;
2041 vm_pageout_state
.vm_page_filecache_min
=
2042 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2045 if (vm_compressor_out_of_space() || divisor
== 0) {
2046 vm_pageout_state
.vm_page_filecache_min
= 0;
2049 * don't let the filecache_min fall below the specified critical level
2051 vm_pageout_state
.vm_page_filecache_min
=
2052 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2055 if (vm_page_free_count
< (vm_page_free_reserved
/ 4)) {
2056 vm_pageout_state
.vm_page_filecache_min
= 0;
2061 * This function is called only from vm_pageout_scan and
2062 * it updates the flow control time to detect if VM pageoutscan
2063 * isn't making progress.
2066 vps_flow_control_reset_deadlock_timer(struct flow_control
*flow_control
)
2072 ts
.tv_sec
= vm_pageout_state
.vm_pageout_deadlock_wait
/ 1000;
2073 ts
.tv_nsec
= (vm_pageout_state
.vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
2074 clock_get_system_nanotime(&sec
, &nsec
);
2075 flow_control
->ts
.tv_sec
= (unsigned int) sec
;
2076 flow_control
->ts
.tv_nsec
= nsec
;
2077 ADD_MACH_TIMESPEC(&flow_control
->ts
, &ts
);
2079 flow_control
->state
= FCS_DELAYED
;
2081 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
++;
2085 * This function is called only from vm_pageout_scan and
2086 * it is the flow control logic of VM pageout scan which
2087 * controls if it should block and for how long.
2088 * Any blocking of vm_pageout_scan happens ONLY in this function.
2091 vps_flow_control(struct flow_control
*flow_control
, int *anons_grabbed
, vm_object_t
*object
, int *delayed_unlock
,
2092 vm_page_t
*local_freeq
, int *local_freed
, int *vm_pageout_deadlock_target
, unsigned int inactive_burst_count
)
2094 boolean_t exceeded_burst_throttle
= FALSE
;
2095 unsigned int msecs
= 0;
2096 uint32_t inactive_external_count
;
2098 struct vm_pageout_queue
*iq
;
2099 struct vm_pageout_queue
*eq
;
2100 struct vm_speculative_age_q
*sq
;
2102 iq
= &vm_pageout_queue_internal
;
2103 eq
= &vm_pageout_queue_external
;
2104 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2107 * Sometimes we have to pause:
2108 * 1) No inactive pages - nothing to do.
2109 * 2) Loop control - no acceptable pages found on the inactive queue
2110 * within the last vm_pageout_burst_inactive_throttle iterations
2111 * 3) Flow control - default pageout queue is full
2113 if (vm_page_queue_empty(&vm_page_queue_inactive
) &&
2114 vm_page_queue_empty(&vm_page_queue_anonymous
) &&
2115 vm_page_queue_empty(&vm_page_queue_cleaned
) &&
2116 vm_page_queue_empty(&sq
->age_q
)) {
2117 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle
, 1);
2118 msecs
= vm_pageout_state
.vm_pageout_empty_wait
;
2119 } else if (inactive_burst_count
>=
2120 MIN(vm_pageout_state
.vm_pageout_burst_inactive_throttle
,
2121 (vm_page_inactive_count
+
2122 vm_page_speculative_count
))) {
2123 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle
, 1);
2124 msecs
= vm_pageout_state
.vm_pageout_burst_wait
;
2126 exceeded_burst_throttle
= TRUE
;
2127 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
2128 VM_DYNAMIC_PAGING_ENABLED()) {
2132 switch (flow_control
->state
) {
2134 if ((vm_page_free_count
+ *local_freed
) < vm_page_free_target
&&
2135 vm_pageout_state
.vm_restricted_to_single_processor
== FALSE
) {
2137 * since the compressor is running independently of vm_pageout_scan
2138 * let's not wait for it just yet... as long as we have a healthy supply
2139 * of filecache pages to work with, let's keep stealing those.
2141 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2143 if (vm_page_pageable_external_count
> vm_pageout_state
.vm_page_filecache_min
&&
2144 (inactive_external_count
>= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2145 *anons_grabbed
= ANONS_GRABBED_LIMIT
;
2146 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred
, 1);
2147 return VM_PAGEOUT_SCAN_PROCEED
;
2151 vps_flow_control_reset_deadlock_timer(flow_control
);
2152 msecs
= vm_pageout_state
.vm_pageout_deadlock_wait
;
2157 clock_get_system_nanotime(&sec
, &nsec
);
2158 ts
.tv_sec
= (unsigned int) sec
;
2161 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
->ts
) >= 0) {
2163 * the pageout thread for the default pager is potentially
2164 * deadlocked since the
2165 * default pager queue has been throttled for more than the
2166 * allowable time... we need to move some clean pages or dirty
2167 * pages belonging to the external pagers if they aren't throttled
2168 * vm_page_free_wanted represents the number of threads currently
2169 * blocked waiting for pages... we'll move one page for each of
2170 * these plus a fixed amount to break the logjam... once we're done
2171 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2172 * with a new timeout target since we have no way of knowing
2173 * whether we've broken the deadlock except through observation
2174 * of the queue associated with the default pager... we need to
2175 * stop moving pages and allow the system to run to see what
2176 * state it settles into.
2179 *vm_pageout_deadlock_target
= vm_pageout_state
.vm_pageout_deadlock_relief
+
2180 vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
2181 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected
, 1);
2182 flow_control
->state
= FCS_DEADLOCK_DETECTED
;
2183 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
2184 return VM_PAGEOUT_SCAN_PROCEED
;
2187 * just resniff instead of trying
2188 * to compute a new delay time... we're going to be
2189 * awakened immediately upon a laundry completion,
2190 * so we won't wait any longer than necessary
2192 msecs
= vm_pageout_state
.vm_pageout_idle_wait
;
2195 case FCS_DEADLOCK_DETECTED
:
2196 if (*vm_pageout_deadlock_target
) {
2197 return VM_PAGEOUT_SCAN_PROCEED
;
2200 vps_flow_control_reset_deadlock_timer(flow_control
);
2201 msecs
= vm_pageout_state
.vm_pageout_deadlock_wait
;
2207 * No need to pause...
2209 return VM_PAGEOUT_SCAN_PROCEED
;
2212 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2214 vm_pageout_prepare_to_block(object
, delayed_unlock
, local_freeq
, local_freed
,
2215 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2217 if (vm_page_free_count
>= vm_page_free_target
) {
2219 * we're here because
2220 * 1) someone else freed up some pages while we had
2221 * the queues unlocked above
2222 * and we've hit one of the 3 conditions that
2223 * cause us to pause the pageout scan thread
2225 * since we already have enough free pages,
2226 * let's avoid stalling and return normally
2228 * before we return, make sure the pageout I/O threads
2229 * are running throttled in case there are still requests
2230 * in the laundry... since we have enough free pages
2231 * we don't need the laundry to be cleaned in a timely
2232 * fashion... so let's avoid interfering with foreground
2235 * we don't want to hold vm_page_queue_free_lock when
2236 * calling vm_pageout_adjust_eq_iothrottle (since it
2237 * may cause other locks to be taken), we do the intitial
2238 * check outside of the lock. Once we take the lock,
2239 * we recheck the condition since it may have changed.
2240 * if it has, no problem, we will make the threads
2241 * non-throttled before actually blocking
2243 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
2245 lck_mtx_lock(&vm_page_queue_free_lock
);
2247 if (vm_page_free_count
>= vm_page_free_target
&&
2248 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
2249 return VM_PAGEOUT_SCAN_DONE_RETURN
;
2251 lck_mtx_unlock(&vm_page_queue_free_lock
);
2253 if ((vm_page_free_count
+ vm_page_cleaned_count
) < vm_page_free_target
) {
2255 * we're most likely about to block due to one of
2256 * the 3 conditions that cause vm_pageout_scan to
2257 * not be able to make forward progress w/r
2258 * to providing new pages to the free queue,
2259 * so unthrottle the I/O threads in case we
2260 * have laundry to be cleaned... it needs
2261 * to be completed ASAP.
2263 * even if we don't block, we want the io threads
2264 * running unthrottled since the sum of free +
2265 * clean pages is still under our free target
2267 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
2269 if (vm_page_cleaned_count
> 0 && exceeded_burst_throttle
== FALSE
) {
2271 * if we get here we're below our free target and
2272 * we're stalling due to a full laundry queue or
2273 * we don't have any inactive pages other then
2274 * those in the clean queue...
2275 * however, we have pages on the clean queue that
2276 * can be moved to the free queue, so let's not
2277 * stall the pageout scan
2279 flow_control
->state
= FCS_IDLE
;
2280 return VM_PAGEOUT_SCAN_PROCEED
;
2282 if (flow_control
->state
== FCS_DELAYED
&& !VM_PAGE_Q_THROTTLED(iq
)) {
2283 flow_control
->state
= FCS_IDLE
;
2284 return VM_PAGEOUT_SCAN_PROCEED
;
2287 VM_CHECK_MEMORYSTATUS
;
2289 if (flow_control
->state
!= FCS_IDLE
) {
2290 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle
, 1);
2293 iq
->pgo_throttled
= TRUE
;
2294 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000 * NSEC_PER_USEC
);
2296 counter(c_vm_pageout_scan_block
++);
2298 vm_page_unlock_queues();
2300 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2302 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
2303 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2304 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
);
2306 thread_block(THREAD_CONTINUE_NULL
);
2308 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
2309 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2310 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
);
2312 vm_page_lock_queues();
2314 iq
->pgo_throttled
= FALSE
;
2316 vps_init_page_targets();
2318 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2322 * This function is called only from vm_pageout_scan and
2323 * it will find and return the most appropriate page to be
2327 vps_choose_victim_page(vm_page_t
*victim_page
, int *anons_grabbed
, boolean_t
*grab_anonymous
, boolean_t force_anonymous
,
2328 boolean_t
*is_page_from_bg_q
, unsigned int reactivated_this_call
)
2331 vm_object_t m_object
= VM_OBJECT_NULL
;
2332 uint32_t inactive_external_count
;
2333 struct vm_speculative_age_q
*sq
;
2334 struct vm_pageout_queue
*iq
;
2335 int retval
= VM_PAGEOUT_SCAN_PROCEED
;
2337 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2338 iq
= &vm_pageout_queue_internal
;
2341 *is_page_from_bg_q
= FALSE
;
2344 m_object
= VM_OBJECT_NULL
;
2346 if (VM_DYNAMIC_PAGING_ENABLED()) {
2347 assert(vm_page_throttled_count
== 0);
2348 assert(vm_page_queue_empty(&vm_page_queue_throttled
));
2352 * Try for a clean-queue inactive page.
2353 * These are pages that vm_pageout_scan tried to steal earlier, but
2354 * were dirty and had to be cleaned. Pick them up now that they are clean.
2356 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2357 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2359 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
2365 * The next most eligible pages are ones we paged in speculatively,
2366 * but which have not yet been touched and have been aged out.
2368 if (!vm_page_queue_empty(&sq
->age_q
)) {
2369 m
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2371 assert(m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
2373 if (!m
->vmp_dirty
|| force_anonymous
== FALSE
) {
2380 #if CONFIG_BACKGROUND_QUEUE
2381 if (vm_page_background_mode
!= VM_PAGE_BG_DISABLED
&& (vm_page_background_count
> vm_page_background_target
)) {
2382 vm_object_t bg_m_object
= NULL
;
2384 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_background
);
2386 bg_m_object
= VM_PAGE_OBJECT(m
);
2388 if (!VM_PAGE_PAGEABLE(m
)) {
2390 * This page is on the background queue
2391 * but not on a pageable queue. This is
2392 * likely a transient state and whoever
2393 * took it out of its pageable queue
2394 * will likely put it back on a pageable
2395 * queue soon but we can't deal with it
2396 * at this point, so let's ignore this
2399 } else if (force_anonymous
== FALSE
|| bg_m_object
->internal
) {
2400 if (bg_m_object
->internal
&&
2401 (VM_PAGE_Q_THROTTLED(iq
) ||
2402 vm_compressor_out_of_space() == TRUE
||
2403 vm_page_free_count
< (vm_page_free_reserved
/ 4))) {
2404 vm_pageout_skipped_bq_internal
++;
2406 *is_page_from_bg_q
= TRUE
;
2408 if (bg_m_object
->internal
) {
2409 vm_pageout_vminfo
.vm_pageout_considered_bq_internal
++;
2411 vm_pageout_vminfo
.vm_pageout_considered_bq_external
++;
2417 #endif /* CONFIG_BACKGROUND_QUEUE */
2419 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2421 if ((vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
|| force_anonymous
== TRUE
) ||
2422 (inactive_external_count
< VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2423 *grab_anonymous
= TRUE
;
2426 vm_pageout_vminfo
.vm_pageout_skipped_external
++;
2427 goto want_anonymous
;
2429 *grab_anonymous
= (vm_page_anonymous_count
> vm_page_anonymous_min
);
2432 /* If the file-backed pool has accumulated
2433 * significantly more pages than the jetsam
2434 * threshold, prefer to reclaim those
2435 * inline to minimise compute overhead of reclaiming
2437 * This calculation does not account for the CPU local
2438 * external page queues, as those are expected to be
2439 * much smaller relative to the global pools.
2442 struct vm_pageout_queue
*eq
= &vm_pageout_queue_external
;
2444 if (*grab_anonymous
== TRUE
&& !VM_PAGE_Q_THROTTLED(eq
)) {
2445 if (vm_page_pageable_external_count
>
2446 vm_pageout_state
.vm_page_filecache_min
) {
2447 if ((vm_page_pageable_external_count
*
2448 vm_pageout_memorystatus_fb_factor_dr
) >
2449 (memorystatus_available_pages_critical
*
2450 vm_pageout_memorystatus_fb_factor_nr
)) {
2451 *grab_anonymous
= FALSE
;
2453 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides
, 1);
2456 if (*grab_anonymous
) {
2457 VM_PAGEOUT_DEBUG(vm_grab_anon_nops
, 1);
2460 #endif /* CONFIG_JETSAM */
2463 if (*grab_anonymous
== FALSE
|| *anons_grabbed
>= ANONS_GRABBED_LIMIT
|| vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2464 if (!vm_page_queue_empty(&vm_page_queue_inactive
)) {
2465 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2467 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
2470 if (vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
) {
2471 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2472 if ((++reactivated_this_call
% 100)) {
2473 vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
++;
2475 vm_page_activate(m
);
2476 VM_STAT_INCR(reactivations
);
2477 #if CONFIG_BACKGROUND_QUEUE
2478 #if DEVELOPMENT || DEBUG
2479 if (*is_page_from_bg_q
== TRUE
) {
2480 if (m_object
->internal
) {
2481 vm_pageout_rejected_bq_internal
++;
2483 vm_pageout_rejected_bq_external
++;
2486 #endif /* DEVELOPMENT || DEBUG */
2487 #endif /* CONFIG_BACKGROUND_QUEUE */
2488 vm_pageout_state
.vm_pageout_inactive_used
++;
2491 retval
= VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2497 * steal 1% of the file backed pages even if
2498 * we are under the limit that has been set
2499 * for a healthy filecache
2506 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2507 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2509 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
2510 *anons_grabbed
+= 1;
2524 * This function is called only from vm_pageout_scan and
2525 * it will put a page back on the active/inactive queue
2526 * if we can't reclaim it for some reason.
2529 vps_requeue_page(vm_page_t m
, int page_prev_q_state
, __unused boolean_t page_from_bg_q
)
2531 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
2532 vm_page_enqueue_inactive(m
, FALSE
);
2534 vm_page_activate(m
);
2537 #if CONFIG_BACKGROUND_QUEUE
2538 #if DEVELOPMENT || DEBUG
2539 vm_object_t m_object
= VM_PAGE_OBJECT(m
);
2541 if (page_from_bg_q
== TRUE
) {
2542 if (m_object
->internal
) {
2543 vm_pageout_rejected_bq_internal
++;
2545 vm_pageout_rejected_bq_external
++;
2548 #endif /* DEVELOPMENT || DEBUG */
2549 #endif /* CONFIG_BACKGROUND_QUEUE */
2553 * This function is called only from vm_pageout_scan and
2554 * it will try to grab the victim page's VM object (m_object)
2555 * which differs from the previous victim page's object (object).
2558 vps_switch_object(vm_page_t m
, vm_object_t m_object
, vm_object_t
*object
, int page_prev_q_state
, boolean_t avoid_anon_pages
, boolean_t page_from_bg_q
)
2560 struct vm_speculative_age_q
*sq
;
2562 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2565 * the object associated with candidate page is
2566 * different from the one we were just working
2567 * with... dump the lock if we still own it
2569 if (*object
!= NULL
) {
2570 vm_object_unlock(*object
);
2574 * Try to lock object; since we've alread got the
2575 * page queues lock, we can only 'try' for this one.
2576 * if the 'try' fails, we need to do a mutex_pause
2577 * to allow the owner of the object lock a chance to
2578 * run... otherwise, we're likely to trip over this
2579 * object in the same state as we work our way through
2580 * the queue... clumps of pages associated with the same
2581 * object are fairly typical on the inactive and active queues
2583 if (!vm_object_lock_try_scan(m_object
)) {
2584 vm_page_t m_want
= NULL
;
2586 vm_pageout_vminfo
.vm_pageout_inactive_nolock
++;
2588 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2589 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock
, 1);
2592 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m
));
2594 m
->vmp_reference
= FALSE
;
2596 if (!m_object
->object_is_shared_cache
) {
2598 * don't apply this optimization if this is the shared cache
2599 * object, it's too easy to get rid of very hot and important
2601 * m->vmp_object must be stable since we hold the page queues lock...
2602 * we can update the scan_collisions field sans the object lock
2603 * since it is a separate field and this is the only spot that does
2604 * a read-modify-write operation and it is never executed concurrently...
2605 * we can asynchronously set this field to 0 when creating a UPL, so it
2606 * is possible for the value to be a bit non-determistic, but that's ok
2607 * since it's only used as a hint
2609 m_object
->scan_collisions
= 1;
2611 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2612 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2613 } else if (!vm_page_queue_empty(&sq
->age_q
)) {
2614 m_want
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2615 } else if ((avoid_anon_pages
|| vm_page_queue_empty(&vm_page_queue_anonymous
)) &&
2616 !vm_page_queue_empty(&vm_page_queue_inactive
)) {
2617 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2618 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2619 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2623 * this is the next object we're going to be interested in
2624 * try to make sure its available after the mutex_pause
2628 vm_pageout_scan_wants_object
= VM_PAGE_OBJECT(m_want
);
2631 vps_requeue_page(m
, page_prev_q_state
, page_from_bg_q
);
2633 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2636 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2639 return VM_PAGEOUT_SCAN_PROCEED
;
2643 * This function is called only from vm_pageout_scan and
2644 * it notices that pageout scan may be rendered ineffective
2645 * due to a FS deadlock and will jetsam a process if possible.
2646 * If jetsam isn't supported, it'll move the page to the active
2647 * queue to try and get some different pages pushed onwards so
2648 * we can try to get out of this scenario.
2651 vps_deal_with_throttled_queues(vm_page_t m
, vm_object_t
*object
, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit
,
2652 int *delayed_unlock
, boolean_t
*force_anonymous
, __unused boolean_t is_page_from_bg_q
)
2654 struct vm_pageout_queue
*eq
;
2655 vm_object_t cur_object
= VM_OBJECT_NULL
;
2657 cur_object
= *object
;
2659 eq
= &vm_pageout_queue_external
;
2661 if (cur_object
->internal
== FALSE
) {
2663 * we need to break up the following potential deadlock case...
2664 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2665 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2666 * c) Most of the pages in the inactive queue belong to this file.
2668 * we are potentially in this deadlock because...
2669 * a) the external pageout queue is throttled
2670 * b) we're done with the active queue and moved on to the inactive queue
2671 * c) we've got a dirty external page
2673 * since we don't know the reason for the external pageout queue being throttled we
2674 * must suspect that we are deadlocked, so move the current page onto the active queue
2675 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2677 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2678 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2679 * pool the next time we select a victim page... if we can make enough new free pages,
2680 * the deadlock will break, the external pageout queue will empty and it will no longer
2683 * if we have jetsam configured, keep a count of the pages reactivated this way so
2684 * that we can try to find clean pages in the active/inactive queues before
2685 * deciding to jetsam a process
2687 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
++;
2689 vm_page_check_pageable_safe(m
);
2690 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
2691 vm_page_queue_enter(&vm_page_queue_active
, m
, vmp_pageq
);
2692 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
2693 vm_page_active_count
++;
2694 vm_page_pageable_external_count
++;
2696 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
2698 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2700 #pragma unused(force_anonymous)
2702 *vm_pageout_inactive_external_forced_reactivate_limit
-= 1;
2704 if (*vm_pageout_inactive_external_forced_reactivate_limit
<= 0) {
2705 *vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2707 * Possible deadlock scenario so request jetsam action
2711 vm_object_unlock(cur_object
);
2713 cur_object
= VM_OBJECT_NULL
;
2716 * VM pageout scan needs to know we have dropped this lock and so set the
2717 * object variable we got passed in to NULL.
2719 *object
= VM_OBJECT_NULL
;
2721 vm_page_unlock_queues();
2723 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
2724 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2726 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
2727 if (memorystatus_kill_on_VM_page_shortage(FALSE
) == TRUE
) {
2728 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count
, 1);
2731 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
,
2732 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2734 vm_page_lock_queues();
2735 *delayed_unlock
= 1;
2737 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2739 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2740 #pragma unused(delayed_unlock)
2742 *force_anonymous
= TRUE
;
2743 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2745 vm_page_activate(m
);
2746 VM_STAT_INCR(reactivations
);
2748 #if CONFIG_BACKGROUND_QUEUE
2749 #if DEVELOPMENT || DEBUG
2750 if (is_page_from_bg_q
== TRUE
) {
2751 if (cur_object
->internal
) {
2752 vm_pageout_rejected_bq_internal
++;
2754 vm_pageout_rejected_bq_external
++;
2757 #endif /* DEVELOPMENT || DEBUG */
2758 #endif /* CONFIG_BACKGROUND_QUEUE */
2760 vm_pageout_state
.vm_pageout_inactive_used
++;
2766 vm_page_balance_inactive(int max_to_move
)
2770 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2772 if (hibernation_vmqueues_inspection
== TRUE
) {
2774 * It is likely that the hibernation code path is
2775 * dealing with these very queues as we are about
2776 * to move pages around in/from them and completely
2777 * change the linkage of the pages.
2779 * And so we skip the rebalancing of these queues.
2783 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
2784 vm_page_inactive_count
+
2785 vm_page_speculative_count
);
2787 while (max_to_move
-- && (vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) {
2788 VM_PAGEOUT_DEBUG(vm_pageout_balanced
, 1);
2790 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
2792 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
);
2793 assert(!m
->vmp_laundry
);
2794 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
2795 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
2797 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
2800 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2802 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2803 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2804 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2805 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2806 * by pageout_scan, which is just fine since the last reference would have happened quite far
2807 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2808 * have happened before we moved the page
2810 if (m
->vmp_pmapped
== TRUE
) {
2811 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
), VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
2815 * The page might be absent or busy,
2816 * but vm_page_deactivate can handle that.
2817 * FALSE indicates that we don't want a H/W clear reference
2819 vm_page_deactivate_internal(m
, FALSE
);
2825 * vm_pageout_scan does the dirty work for the pageout daemon.
2826 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2827 * held and vm_page_free_wanted == 0.
2830 vm_pageout_scan(void)
2832 unsigned int loop_count
= 0;
2833 unsigned int inactive_burst_count
= 0;
2834 unsigned int reactivated_this_call
;
2835 unsigned int reactivate_limit
;
2836 vm_page_t local_freeq
= NULL
;
2837 int local_freed
= 0;
2839 int delayed_unlock_limit
= 0;
2840 int refmod_state
= 0;
2841 int vm_pageout_deadlock_target
= 0;
2842 struct vm_pageout_queue
*iq
;
2843 struct vm_pageout_queue
*eq
;
2844 struct vm_speculative_age_q
*sq
;
2845 struct flow_control flow_control
= { .state
= 0, .ts
= { .tv_sec
= 0, .tv_nsec
= 0 } };
2846 boolean_t inactive_throttled
= FALSE
;
2847 vm_object_t object
= NULL
;
2848 uint32_t inactive_reclaim_run
;
2849 boolean_t grab_anonymous
= FALSE
;
2850 boolean_t force_anonymous
= FALSE
;
2851 boolean_t force_speculative_aging
= FALSE
;
2852 int anons_grabbed
= 0;
2853 int page_prev_q_state
= 0;
2854 boolean_t page_from_bg_q
= FALSE
;
2855 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
2856 vm_object_t m_object
= VM_OBJECT_NULL
;
2858 boolean_t lock_yield_check
= FALSE
;
2861 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
2862 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
2863 vm_pageout_state
.vm_pageout_inactive_clean
,
2864 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
2865 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
2867 flow_control
.state
= FCS_IDLE
;
2868 iq
= &vm_pageout_queue_internal
;
2869 eq
= &vm_pageout_queue_external
;
2870 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2872 /* Ask the pmap layer to return any pages it no longer needs. */
2873 uint64_t pmap_wired_pages_freed
= pmap_release_pages_fast();
2875 vm_page_lock_queues();
2877 vm_page_wire_count
-= pmap_wired_pages_freed
;
2882 * Calculate the max number of referenced pages on the inactive
2883 * queue that we will reactivate.
2885 reactivated_this_call
= 0;
2886 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
2887 vm_page_inactive_count
);
2888 inactive_reclaim_run
= 0;
2890 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2893 * We must limit the rate at which we send pages to the pagers
2894 * so that we don't tie up too many pages in the I/O queues.
2895 * We implement a throttling mechanism using the laundry count
2896 * to limit the number of pages outstanding to the default
2897 * and external pagers. We can bypass the throttles and look
2898 * for clean pages if the pageout queues don't drain in a timely
2899 * fashion since this may indicate that the pageout paths are
2900 * stalled waiting for memory, which only we can provide.
2903 vps_init_page_targets();
2904 assert(object
== NULL
);
2905 assert(delayed_unlock
!= 0);
2910 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
2912 if (lock_yield_check
) {
2913 lock_yield_check
= FALSE
;
2915 if (delayed_unlock
++ > delayed_unlock_limit
) {
2916 int freed
= local_freed
;
2918 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
2919 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2921 lck_mtx_yield(&vm_page_queue_lock
);
2923 } else if (vm_pageout_scan_wants_object
) {
2924 vm_page_unlock_queues();
2926 vm_page_lock_queues();
2930 if (vm_upl_wait_for_pages
< 0) {
2931 vm_upl_wait_for_pages
= 0;
2934 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
2936 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
) {
2937 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
2940 vps_deal_with_secluded_page_overflow(&local_freeq
, &local_freed
);
2942 assert(delayed_unlock
);
2945 * maintain our balance
2947 vm_page_balance_inactive(1);
2950 /**********************************************************************
2951 * above this point we're playing with the active and secluded queues
2952 * below this point we're playing with the throttling mechanisms
2953 * and the inactive queue
2954 **********************************************************************/
2956 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
2957 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2959 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
2960 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2962 * make sure the pageout I/O threads are running
2963 * throttled in case there are still requests
2964 * in the laundry... since we have met our targets
2965 * we don't need the laundry to be cleaned in a timely
2966 * fashion... so let's avoid interfering with foreground
2969 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
2971 lck_mtx_lock(&vm_page_queue_free_lock
);
2973 if ((vm_page_free_count
>= vm_page_free_target
) &&
2974 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
2976 * done - we have met our target *and*
2977 * there is no one waiting for a page.
2980 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2982 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
2983 vm_pageout_state
.vm_pageout_inactive
,
2984 vm_pageout_state
.vm_pageout_inactive_used
, 0, 0);
2985 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
2986 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
2987 vm_pageout_state
.vm_pageout_inactive_clean
,
2988 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
2989 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
2993 lck_mtx_unlock(&vm_page_queue_free_lock
);
2997 * Before anything, we check if we have any ripe volatile
2998 * objects around. If so, try to purge the first object.
2999 * If the purge fails, fall through to reclaim a page instead.
3000 * If the purge succeeds, go back to the top and reevalute
3001 * the new memory situation.
3003 retval
= vps_purge_object();
3005 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3009 if (object
!= NULL
) {
3010 vm_object_unlock(object
);
3014 lock_yield_check
= FALSE
;
3019 * If our 'aged' queue is empty and we have some speculative pages
3020 * in the other queues, let's go through and see if we need to age
3023 * If we succeeded in aging a speculative Q or just that everything
3024 * looks normal w.r.t queue age and queue counts, we keep going onward.
3026 * If, for some reason, we seem to have a mismatch between the spec.
3027 * page count and the page queues, we reset those variables and
3028 * restart the loop (LD TODO: Track this better?).
3030 if (vm_page_queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
3031 retval
= vps_age_speculative_queue(force_speculative_aging
);
3033 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3034 lock_yield_check
= FALSE
;
3038 force_speculative_aging
= FALSE
;
3041 * Check to see if we need to evict objects from the cache.
3043 * Note: 'object' here doesn't have anything to do with
3044 * the eviction part. We just need to make sure we have dropped
3045 * any object lock we might be holding if we need to go down
3046 * into the eviction logic.
3048 retval
= vps_object_cache_evict(&object
);
3050 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3051 lock_yield_check
= FALSE
;
3057 * Calculate our filecache_min that will affect the loop
3060 vps_calculate_filecache_min();
3063 * LD TODO: Use a structure to hold all state variables for a single
3064 * vm_pageout_scan iteration and pass that structure to this function instead.
3066 retval
= vps_flow_control(&flow_control
, &anons_grabbed
, &object
,
3067 &delayed_unlock
, &local_freeq
, &local_freed
,
3068 &vm_pageout_deadlock_target
, inactive_burst_count
);
3070 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3071 if (loop_count
>= vm_page_inactive_count
) {
3075 inactive_burst_count
= 0;
3077 assert(object
== NULL
);
3078 assert(delayed_unlock
!= 0);
3080 lock_yield_check
= FALSE
;
3082 } else if (retval
== VM_PAGEOUT_SCAN_DONE_RETURN
) {
3083 goto return_from_scan
;
3086 flow_control
.state
= FCS_IDLE
;
3088 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
3089 vm_pageout_inactive_external_forced_reactivate_limit
);
3091 inactive_burst_count
++;
3092 vm_pageout_state
.vm_pageout_inactive
++;
3099 retval
= vps_choose_victim_page(&m
, &anons_grabbed
, &grab_anonymous
, force_anonymous
, &page_from_bg_q
, reactivated_this_call
);
3102 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3103 reactivated_this_call
++;
3105 inactive_burst_count
= 0;
3107 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3108 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3111 lock_yield_check
= TRUE
;
3116 * if we've gotten here, we have no victim page.
3117 * check to see if we've not finished balancing the queues
3118 * or we have a page on the aged speculative queue that we
3119 * skipped due to force_anonymous == TRUE.. or we have
3120 * speculative pages that we can prematurely age... if
3121 * one of these cases we'll keep going, else panic
3123 force_anonymous
= FALSE
;
3124 VM_PAGEOUT_DEBUG(vm_pageout_no_victim
, 1);
3126 if (!vm_page_queue_empty(&sq
->age_q
)) {
3127 lock_yield_check
= TRUE
;
3131 if (vm_page_speculative_count
) {
3132 force_speculative_aging
= TRUE
;
3133 lock_yield_check
= TRUE
;
3136 panic("vm_pageout: no victim");
3141 assert(VM_PAGE_PAGEABLE(m
));
3142 m_object
= VM_PAGE_OBJECT(m
);
3143 force_anonymous
= FALSE
;
3145 page_prev_q_state
= m
->vmp_q_state
;
3147 * we just found this page on one of our queues...
3148 * it can't also be on the pageout queue, so safe
3149 * to call vm_page_queues_remove
3151 vm_page_queues_remove(m
, TRUE
);
3153 assert(!m
->vmp_laundry
);
3154 assert(!m
->vmp_private
);
3155 assert(!m
->vmp_fictitious
);
3156 assert(m_object
!= kernel_object
);
3157 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
3159 vm_pageout_vminfo
.vm_pageout_considered_page
++;
3161 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
3164 * check to see if we currently are working
3165 * with the same object... if so, we've
3166 * already got the lock
3168 if (m_object
!= object
) {
3169 boolean_t avoid_anon_pages
= (grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
);
3172 * vps_switch_object() will always drop the 'object' lock first
3173 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3174 * either 'm_object' or NULL.
3176 retval
= vps_switch_object(m
, m_object
, &object
, page_prev_q_state
, avoid_anon_pages
, page_from_bg_q
);
3178 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3179 lock_yield_check
= TRUE
;
3183 assert(m_object
== object
);
3184 assert(VM_PAGE_OBJECT(m
) == m_object
);
3188 * Somebody is already playing with this page.
3189 * Put it back on the appropriate queue
3192 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy
, 1);
3194 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3195 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy
, 1);
3198 vps_requeue_page(m
, page_prev_q_state
, page_from_bg_q
);
3200 lock_yield_check
= TRUE
;
3205 * if (m->vmp_cleaning && !m->vmp_free_when_done)
3206 * If already cleaning this page in place
3207 * just leave if off the paging queues.
3208 * We can leave the page mapped, and upl_commit_range
3209 * will put it on the clean queue.
3211 * if (m->vmp_free_when_done && !m->vmp_cleaning)
3212 * an msync INVALIDATE is in progress...
3213 * this page has been marked for destruction
3214 * after it has been cleaned,
3215 * but not yet gathered into a UPL
3216 * where 'cleaning' will be set...
3217 * just leave it off the paging queues
3219 * if (m->vmp_free_when_done && m->vmp_clenaing)
3220 * an msync INVALIDATE is in progress
3221 * and the UPL has already gathered this page...
3222 * just leave it off the paging queues
3224 if (m
->vmp_free_when_done
|| m
->vmp_cleaning
) {
3225 lock_yield_check
= TRUE
;
3231 * If it's absent, in error or the object is no longer alive,
3232 * we can reclaim the page... in the no longer alive case,
3233 * there are 2 states the page can be in that preclude us
3234 * from reclaiming it - busy or cleaning - that we've already
3237 if (m
->vmp_absent
|| m
->vmp_error
|| !object
->alive
) {
3238 if (m
->vmp_absent
) {
3239 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent
, 1);
3240 } else if (!object
->alive
) {
3241 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive
, 1);
3243 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error
, 1);
3246 if (vm_pageout_deadlock_target
) {
3247 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success
, 1);
3248 vm_pageout_deadlock_target
--;
3251 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
3253 if (object
->internal
) {
3254 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
3256 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
3258 assert(!m
->vmp_cleaning
);
3259 assert(!m
->vmp_laundry
);
3261 if (!object
->internal
&&
3262 object
->pager
!= NULL
&&
3263 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
3264 shared_region_pager_reclaimed
++;
3270 * remove page from object here since we're already
3271 * behind the object lock... defer the rest of the work
3272 * we'd normally do in vm_page_free_prepare_object
3273 * until 'vm_page_free_list' is called
3275 if (m
->vmp_tabled
) {
3276 vm_page_remove(m
, TRUE
);
3279 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
3280 m
->vmp_snext
= local_freeq
;
3284 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
3285 vm_pageout_vminfo
.vm_pageout_freed_speculative
++;
3286 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3287 vm_pageout_vminfo
.vm_pageout_freed_cleaned
++;
3288 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
) {
3289 vm_pageout_vminfo
.vm_pageout_freed_internal
++;
3291 vm_pageout_vminfo
.vm_pageout_freed_external
++;
3294 inactive_burst_count
= 0;
3296 lock_yield_check
= TRUE
;
3299 if (object
->copy
== VM_OBJECT_NULL
) {
3301 * No one else can have any interest in this page.
3302 * If this is an empty purgable object, the page can be
3303 * reclaimed even if dirty.
3304 * If the page belongs to a volatile purgable object, we
3305 * reactivate it if the compressor isn't active.
3307 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
3308 if (m
->vmp_pmapped
== TRUE
) {
3309 /* unmap the page */
3310 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
3311 if (refmod_state
& VM_MEM_MODIFIED
) {
3312 SET_PAGE_DIRTY(m
, FALSE
);
3315 if (m
->vmp_dirty
|| m
->vmp_precious
) {
3316 /* we saved the cost of cleaning this page ! */
3317 vm_page_purged_count
++;
3322 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
3324 * With the VM compressor, the cost of
3325 * reclaiming a page is much lower (no I/O),
3326 * so if we find a "volatile" page, it's better
3327 * to let it get compressed rather than letting
3328 * it occupy a full page until it gets purged.
3329 * So no need to check for "volatile" here.
3331 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
3333 * Avoid cleaning a "volatile" page which might
3337 /* if it's wired, we can't put it on our queue */
3338 assert(!VM_PAGE_WIRED(m
));
3340 /* just stick it back on! */
3341 reactivated_this_call
++;
3343 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3344 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated
, 1);
3347 goto reactivate_page
;
3351 * If it's being used, reactivate.
3352 * (Fictitious pages are either busy or absent.)
3353 * First, update the reference and dirty bits
3354 * to make sure the page is unreferenced.
3358 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
3359 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
3361 if (refmod_state
& VM_MEM_REFERENCED
) {
3362 m
->vmp_reference
= TRUE
;
3364 if (refmod_state
& VM_MEM_MODIFIED
) {
3365 SET_PAGE_DIRTY(m
, FALSE
);
3369 if (m
->vmp_reference
|| m
->vmp_dirty
) {
3370 /* deal with a rogue "reusable" page */
3371 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
, m_object
);
3374 if (vm_pageout_state
.vm_page_xpmapped_min_divisor
== 0) {
3375 vm_pageout_state
.vm_page_xpmapped_min
= 0;
3377 vm_pageout_state
.vm_page_xpmapped_min
= (vm_page_external_count
* 10) / vm_pageout_state
.vm_page_xpmapped_min_divisor
;
3380 if (!m
->vmp_no_cache
&&
3381 page_from_bg_q
== FALSE
&&
3382 (m
->vmp_reference
|| (m
->vmp_xpmapped
&& !object
->internal
&&
3383 (vm_page_xpmapped_external_count
< vm_pageout_state
.vm_page_xpmapped_min
)))) {
3385 * The page we pulled off the inactive list has
3386 * been referenced. It is possible for other
3387 * processors to be touching pages faster than we
3388 * can clear the referenced bit and traverse the
3389 * inactive queue, so we limit the number of
3392 if (++reactivated_this_call
>= reactivate_limit
) {
3393 vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
++;
3394 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
3395 vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
++;
3399 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3400 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated
, 1);
3403 vm_pageout_vminfo
.vm_pageout_inactive_referenced
++;
3405 if (!object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
3406 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
3408 * no explict mappings of this object exist
3409 * and it's not open via the filesystem
3411 vm_page_deactivate(m
);
3412 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated
, 1);
3415 * The page was/is being used, so put back on active list.
3417 vm_page_activate(m
);
3418 VM_STAT_INCR(reactivations
);
3419 inactive_burst_count
= 0;
3421 #if CONFIG_BACKGROUND_QUEUE
3422 #if DEVELOPMENT || DEBUG
3423 if (page_from_bg_q
== TRUE
) {
3424 if (m_object
->internal
) {
3425 vm_pageout_rejected_bq_internal
++;
3427 vm_pageout_rejected_bq_external
++;
3430 #endif /* DEVELOPMENT || DEBUG */
3431 #endif /* CONFIG_BACKGROUND_QUEUE */
3433 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3434 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3436 vm_pageout_state
.vm_pageout_inactive_used
++;
3438 lock_yield_check
= TRUE
;
3442 * Make sure we call pmap_get_refmod() if it
3443 * wasn't already called just above, to update
3446 if ((refmod_state
== -1) && !m
->vmp_dirty
&& m
->vmp_pmapped
) {
3447 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
3448 if (refmod_state
& VM_MEM_MODIFIED
) {
3449 SET_PAGE_DIRTY(m
, FALSE
);
3455 * we've got a candidate page to steal...
3457 * m->vmp_dirty is up to date courtesy of the
3458 * preceding check for m->vmp_reference... if
3459 * we get here, then m->vmp_reference had to be
3460 * FALSE (or possibly "reactivate_limit" was
3461 * exceeded), but in either case we called
3462 * pmap_get_refmod() and updated both
3463 * m->vmp_reference and m->vmp_dirty
3465 * if it's dirty or precious we need to
3466 * see if the target queue is throtttled
3467 * it if is, we need to skip over it by moving it back
3468 * to the end of the inactive queue
3471 inactive_throttled
= FALSE
;
3473 if (m
->vmp_dirty
|| m
->vmp_precious
) {
3474 if (object
->internal
) {
3475 if (VM_PAGE_Q_THROTTLED(iq
)) {
3476 inactive_throttled
= TRUE
;
3478 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3479 inactive_throttled
= TRUE
;
3483 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3484 object
->internal
&& m
->vmp_dirty
&&
3485 (object
->purgable
== VM_PURGABLE_DENY
||
3486 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
3487 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
3488 vm_page_check_pageable_safe(m
);
3489 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3490 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
3491 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
3492 vm_page_throttled_count
++;
3494 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled
, 1);
3496 inactive_burst_count
= 0;
3498 lock_yield_check
= TRUE
;
3501 if (inactive_throttled
== TRUE
) {
3502 vps_deal_with_throttled_queues(m
, &object
, &vm_pageout_inactive_external_forced_reactivate_limit
,
3503 &delayed_unlock
, &force_anonymous
, page_from_bg_q
);
3505 inactive_burst_count
= 0;
3507 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3508 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3511 lock_yield_check
= TRUE
;
3516 * we've got a page that we can steal...
3517 * eliminate all mappings and make sure
3518 * we have the up-to-date modified state
3520 * if we need to do a pmap_disconnect then we
3521 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3522 * provides the true state atomically... the
3523 * page was still mapped up to the pmap_disconnect
3524 * and may have been dirtied at the last microsecond
3526 * Note that if 'pmapped' is FALSE then the page is not
3527 * and has not been in any map, so there is no point calling
3528 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3529 * of likely usage of the page.
3531 if (m
->vmp_pmapped
== TRUE
) {
3535 * Don't count this page as going into the compressor
3536 * if any of these are true:
3537 * 1) compressed pager isn't enabled
3538 * 2) Freezer enabled device with compressed pager
3539 * backend (exclusive use) i.e. most of the VM system
3540 * (including vm_pageout_scan) has no knowledge of
3542 * 3) This page belongs to a file and hence will not be
3543 * sent into the compressor
3545 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
||
3546 object
->internal
== FALSE
) {
3548 } else if (m
->vmp_dirty
|| m
->vmp_precious
) {
3550 * VM knows that this page is dirty (or
3551 * precious) and needs to be compressed
3552 * rather than freed.
3553 * Tell the pmap layer to count this page
3556 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
3559 * VM does not know if the page needs to
3560 * be preserved but the pmap layer might tell
3561 * us if any mapping has "modified" it.
3562 * Let's the pmap layer to count this page
3563 * as compressed if and only if it has been
3567 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
3569 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m
),
3572 if (refmod_state
& VM_MEM_MODIFIED
) {
3573 SET_PAGE_DIRTY(m
, FALSE
);
3578 * reset our count of pages that have been reclaimed
3579 * since the last page was 'stolen'
3581 inactive_reclaim_run
= 0;
3584 * If it's clean and not precious, we can free the page.
3586 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
3587 vm_pageout_state
.vm_pageout_inactive_clean
++;
3590 * OK, at this point we have found a page we are going to free.
3592 #if CONFIG_PHANTOM_CACHE
3593 if (!object
->internal
) {
3594 vm_phantom_cache_add_ghost(m
);
3601 * The page may have been dirtied since the last check
3602 * for a throttled target queue (which may have been skipped
3603 * if the page was clean then). With the dirty page
3604 * disconnected here, we can make one final check.
3606 if (object
->internal
) {
3607 if (VM_PAGE_Q_THROTTLED(iq
)) {
3608 inactive_throttled
= TRUE
;
3610 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3611 inactive_throttled
= TRUE
;
3614 if (inactive_throttled
== TRUE
) {
3615 goto throttle_inactive
;
3618 #if VM_PRESSURE_EVENTS
3622 * If Jetsam is enabled, then the sending
3623 * of memory pressure notifications is handled
3624 * from the same thread that takes care of high-water
3625 * and other jetsams i.e. the memorystatus_thread.
3628 #else /* CONFIG_JETSAM */
3630 vm_pressure_response();
3632 #endif /* CONFIG_JETSAM */
3633 #endif /* VM_PRESSURE_EVENTS */
3635 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
3636 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty
, 1);
3639 if (object
->internal
) {
3640 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
++;
3642 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
++;
3646 * internal pages will go to the compressor...
3647 * external pages will go to the appropriate pager to be cleaned
3648 * and upon completion will end up on 'vm_page_queue_cleaned' which
3649 * is a preferred queue to steal from
3651 vm_pageout_cluster(m
);
3652 inactive_burst_count
= 0;
3655 * back to top of pageout scan loop
3662 vm_page_free_reserve(
3665 int free_after_reserve
;
3667 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
3668 if ((vm_page_free_reserved
+ pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
) >= (VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3669 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
;
3671 vm_page_free_reserved
+= (pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
);
3674 if ((vm_page_free_reserved
+ pages
) >= VM_PAGE_FREE_RESERVED_LIMIT
) {
3675 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
3677 vm_page_free_reserved
+= pages
;
3680 free_after_reserve
= vm_pageout_state
.vm_page_free_count_init
- vm_page_free_reserved
;
3682 vm_page_free_min
= vm_page_free_reserved
+
3683 VM_PAGE_FREE_MIN(free_after_reserve
);
3685 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
) {
3686 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
3689 vm_page_free_target
= vm_page_free_reserved
+
3690 VM_PAGE_FREE_TARGET(free_after_reserve
);
3692 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
) {
3693 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
3696 if (vm_page_free_target
< vm_page_free_min
+ 5) {
3697 vm_page_free_target
= vm_page_free_min
+ 5;
3700 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 2);
3704 * vm_pageout is the high level pageout daemon.
3708 vm_pageout_continue(void)
3710 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
3711 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter
, 1);
3713 lck_mtx_lock(&vm_page_queue_free_lock
);
3714 vm_pageout_running
= TRUE
;
3715 lck_mtx_unlock(&vm_page_queue_free_lock
);
3719 * we hold both the vm_page_queue_free_lock
3720 * and the vm_page_queues_lock at this point
3722 assert(vm_page_free_wanted
== 0);
3723 assert(vm_page_free_wanted_privileged
== 0);
3724 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
3726 vm_pageout_running
= FALSE
;
3727 #if !CONFIG_EMBEDDED
3728 if (vm_pageout_waiter
) {
3729 vm_pageout_waiter
= FALSE
;
3730 thread_wakeup((event_t
)&vm_pageout_waiter
);
3732 #endif /* !CONFIG_EMBEDDED */
3734 lck_mtx_unlock(&vm_page_queue_free_lock
);
3735 vm_page_unlock_queues();
3737 counter(c_vm_pageout_block
++);
3738 thread_block((thread_continue_t
)vm_pageout_continue
);
3742 #if !CONFIG_EMBEDDED
3744 vm_pageout_wait(uint64_t deadline
)
3748 lck_mtx_lock(&vm_page_queue_free_lock
);
3749 for (kr
= KERN_SUCCESS
; vm_pageout_running
&& (KERN_SUCCESS
== kr
);) {
3750 vm_pageout_waiter
= TRUE
;
3751 if (THREAD_AWAKENED
!= lck_mtx_sleep_deadline(
3752 &vm_page_queue_free_lock
, LCK_SLEEP_DEFAULT
,
3753 (event_t
) &vm_pageout_waiter
, THREAD_UNINT
, deadline
)) {
3754 kr
= KERN_OPERATION_TIMED_OUT
;
3757 lck_mtx_unlock(&vm_page_queue_free_lock
);
3761 #endif /* !CONFIG_EMBEDDED */
3765 vm_pageout_iothread_external_continue(struct vm_pageout_queue
*q
)
3769 vm_object_offset_t offset
;
3770 memory_object_t pager
;
3772 /* On systems with a compressor, the external IO thread clears its
3773 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3776 if (vm_pageout_state
.vm_pageout_internal_iothread
!= THREAD_NULL
) {
3777 current_thread()->options
&= ~TH_OPT_VMPRIV
;
3780 vm_page_lockspin_queues();
3782 while (!vm_page_queue_empty(&q
->pgo_pending
)) {
3784 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3786 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3789 * grab a snapshot of the object and offset this
3790 * page is tabled in so that we can relookup this
3791 * page after we've taken the object lock - these
3792 * fields are stable while we hold the page queues lock
3793 * but as soon as we drop it, there is nothing to keep
3794 * this page in this object... we hold an activity_in_progress
3795 * on this object which will keep it from terminating
3797 object
= VM_PAGE_OBJECT(m
);
3798 offset
= m
->vmp_offset
;
3800 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3801 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3803 vm_page_unlock_queues();
3805 vm_object_lock(object
);
3807 m
= vm_page_lookup(object
, offset
);
3809 if (m
== NULL
|| m
->vmp_busy
|| m
->vmp_cleaning
||
3810 !m
->vmp_laundry
|| (m
->vmp_q_state
!= VM_PAGE_NOT_ON_Q
)) {
3812 * it's either the same page that someone else has
3813 * started cleaning (or it's finished cleaning or
3814 * been put back on the pageout queue), or
3815 * the page has been freed or we have found a
3816 * new page at this offset... in all of these cases
3817 * we merely need to release the activity_in_progress
3818 * we took when we put the page on the pageout queue
3820 vm_object_activity_end(object
);
3821 vm_object_unlock(object
);
3823 vm_page_lockspin_queues();
3826 pager
= object
->pager
;
3828 if (pager
== MEMORY_OBJECT_NULL
) {
3830 * This pager has been destroyed by either
3831 * memory_object_destroy or vm_object_destroy, and
3832 * so there is nowhere for the page to go.
3834 if (m
->vmp_free_when_done
) {
3836 * Just free the page... VM_PAGE_FREE takes
3837 * care of cleaning up all the state...
3838 * including doing the vm_pageout_throttle_up
3842 vm_page_lockspin_queues();
3844 vm_pageout_throttle_up(m
);
3845 vm_page_activate(m
);
3847 vm_page_unlock_queues();
3850 * And we are done with it.
3853 vm_object_activity_end(object
);
3854 vm_object_unlock(object
);
3856 vm_page_lockspin_queues();
3861 * we don't hold the page queue lock
3862 * so this check isn't safe to make
3867 * give back the activity_in_progress reference we
3868 * took when we queued up this page and replace it
3869 * it with a paging_in_progress reference that will
3870 * also hold the paging offset from changing and
3871 * prevent the object from terminating
3873 vm_object_activity_end(object
);
3874 vm_object_paging_begin(object
);
3875 vm_object_unlock(object
);
3878 * Send the data to the pager.
3879 * any pageout clustering happens there
3881 memory_object_data_return(pager
,
3882 m
->vmp_offset
+ object
->paging_offset
,
3890 vm_object_lock(object
);
3891 vm_object_paging_end(object
);
3892 vm_object_unlock(object
);
3894 vm_pageout_io_throttle();
3896 vm_page_lockspin_queues();
3898 q
->pgo_busy
= FALSE
;
3901 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3902 vm_page_unlock_queues();
3904 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_external_continue
, (void *) q
);
3909 #define MAX_FREE_BATCH 32
3910 uint32_t vm_compressor_time_thread
; /* Set via sysctl to record time accrued by
3916 vm_pageout_iothread_internal_continue(struct cq
*);
3918 vm_pageout_iothread_internal_continue(struct cq
*cq
)
3920 struct vm_pageout_queue
*q
;
3922 boolean_t pgo_draining
;
3925 vm_page_t local_freeq
= NULL
;
3926 int local_freed
= 0;
3927 int local_batch_size
;
3928 #if DEVELOPMENT || DEBUG
3930 boolean_t marked_active
= FALSE
;
3932 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3935 local_batch_size
= q
->pgo_maxlaundry
/ (vm_pageout_state
.vm_compressor_thread_count
* 2);
3937 #if RECORD_THE_COMPRESSED_DATA
3938 if (q
->pgo_laundry
) {
3939 c_compressed_record_init();
3943 int pages_left_on_q
= 0;
3948 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3950 vm_page_lock_queues();
3951 #if DEVELOPMENT || DEBUG
3952 if (marked_active
== FALSE
) {
3954 vmct_state
[cq
->id
] = VMCT_ACTIVE
;
3955 marked_active
= TRUE
;
3956 if (vmct_active
== 1) {
3957 vm_compressor_epoch_start
= mach_absolute_time();
3961 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3963 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START
, q
->pgo_laundry
, 0, 0, 0, 0);
3965 while (!vm_page_queue_empty(&q
->pgo_pending
) && local_cnt
< local_batch_size
) {
3966 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3967 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3970 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3971 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3972 m
->vmp_laundry
= FALSE
;
3974 m
->vmp_snext
= local_q
;
3978 if (local_q
== NULL
) {
3984 if ((pgo_draining
= q
->pgo_draining
) == FALSE
) {
3985 vm_pageout_throttle_up_batch(q
, local_cnt
);
3986 pages_left_on_q
= q
->pgo_laundry
;
3988 pages_left_on_q
= q
->pgo_laundry
- local_cnt
;
3991 vm_page_unlock_queues();
3993 #if !RECORD_THE_COMPRESSED_DATA
3994 if (pages_left_on_q
>= local_batch_size
&& cq
->id
< (vm_pageout_state
.vm_compressor_thread_count
- 1)) {
3995 thread_wakeup((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
+ 1));
3998 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, q
->pgo_laundry
, 0, 0, 0, 0);
4001 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START
, local_cnt
, 0, 0, 0, 0);
4004 local_q
= m
->vmp_snext
;
4005 m
->vmp_snext
= NULL
;
4007 if (vm_pageout_compress_page(&cq
->current_chead
, cq
->scratch_buf
, m
) == KERN_SUCCESS
) {
4008 #if DEVELOPMENT || DEBUG
4011 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END
, local_cnt
, 0, 0, 0, 0);
4013 m
->vmp_snext
= local_freeq
;
4017 if (local_freed
>= MAX_FREE_BATCH
) {
4018 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4020 vm_page_free_list(local_freeq
, TRUE
);
4027 while (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
4028 kern_return_t wait_result
;
4029 int need_wakeup
= 0;
4032 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4034 vm_page_free_list(local_freeq
, TRUE
);
4040 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
4042 if (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
4043 if (vm_page_free_wanted_privileged
++ == 0) {
4046 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, THREAD_UNINT
);
4048 lck_mtx_unlock(&vm_page_queue_free_lock
);
4051 thread_wakeup((event_t
)&vm_page_free_wanted
);
4054 if (wait_result
== THREAD_WAITING
) {
4055 thread_block(THREAD_CONTINUE_NULL
);
4058 lck_mtx_unlock(&vm_page_queue_free_lock
);
4064 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4066 vm_page_free_list(local_freeq
, TRUE
);
4070 if (pgo_draining
== TRUE
) {
4071 vm_page_lockspin_queues();
4072 vm_pageout_throttle_up_batch(q
, local_cnt
);
4073 vm_page_unlock_queues();
4076 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START
, 0, 0, 0, 0, 0);
4079 * queue lock is held and our q is empty
4081 q
->pgo_busy
= FALSE
;
4084 assert_wait((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
), THREAD_UNINT
);
4085 #if DEVELOPMENT || DEBUG
4086 if (marked_active
== TRUE
) {
4088 vmct_state
[cq
->id
] = VMCT_IDLE
;
4090 if (vmct_active
== 0) {
4091 vm_compressor_epoch_stop
= mach_absolute_time();
4092 assertf(vm_compressor_epoch_stop
>= vm_compressor_epoch_start
,
4093 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4094 vm_compressor_epoch_start
, vm_compressor_epoch_stop
);
4095 /* This interval includes intervals where one or more
4096 * compressor threads were pre-empted
4098 vmct_stats
.vmct_cthreads_total
+= vm_compressor_epoch_stop
- vm_compressor_epoch_start
;
4102 vm_page_unlock_queues();
4103 #if DEVELOPMENT || DEBUG
4104 if (__improbable(vm_compressor_time_thread
)) {
4105 vmct_stats
.vmct_runtimes
[cq
->id
] = thread_get_runtime_self();
4106 vmct_stats
.vmct_pages
[cq
->id
] += ncomps
;
4107 vmct_stats
.vmct_iterations
[cq
->id
]++;
4108 if (ncomps
> vmct_stats
.vmct_maxpages
[cq
->id
]) {
4109 vmct_stats
.vmct_maxpages
[cq
->id
] = ncomps
;
4111 if (ncomps
< vmct_stats
.vmct_minpages
[cq
->id
]) {
4112 vmct_stats
.vmct_minpages
[cq
->id
] = ncomps
;
4117 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
4119 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_internal_continue
, (void *) cq
);
4125 vm_pageout_compress_page(void **current_chead
, char *scratch_buf
, vm_page_t m
)
4128 memory_object_t pager
;
4129 int compressed_count_delta
;
4130 kern_return_t retval
;
4132 object
= VM_PAGE_OBJECT(m
);
4134 assert(!m
->vmp_free_when_done
);
4135 assert(!m
->vmp_laundry
);
4137 pager
= object
->pager
;
4139 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
4140 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START
, object
, pager
, 0, 0, 0);
4142 vm_object_lock(object
);
4145 * If there is no memory object for the page, create
4146 * one and hand it to the compression pager.
4149 if (!object
->pager_initialized
) {
4150 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
4152 if (!object
->pager_initialized
) {
4153 vm_object_compressor_pager_create(object
);
4156 pager
= object
->pager
;
4158 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
4160 * Still no pager for the object,
4161 * or the pager has been destroyed.
4162 * Reactivate the page.
4164 * Should only happen if there is no
4167 PAGE_WAKEUP_DONE(m
);
4169 vm_page_lockspin_queues();
4170 vm_page_activate(m
);
4171 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager
, 1);
4172 vm_page_unlock_queues();
4175 * And we are done with it.
4177 vm_object_activity_end(object
);
4178 vm_object_unlock(object
);
4180 return KERN_FAILURE
;
4182 vm_object_unlock(object
);
4184 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END
, object
, pager
, 0, 0, 0);
4186 assert(object
->pager_initialized
&& pager
!= MEMORY_OBJECT_NULL
);
4187 assert(object
->activity_in_progress
> 0);
4189 retval
= vm_compressor_pager_put(
4191 m
->vmp_offset
+ object
->paging_offset
,
4192 VM_PAGE_GET_PHYS_PAGE(m
),
4195 &compressed_count_delta
);
4197 vm_object_lock(object
);
4199 assert(object
->activity_in_progress
> 0);
4200 assert(VM_PAGE_OBJECT(m
) == object
);
4201 assert( !VM_PAGE_WIRED(m
));
4203 vm_compressor_pager_count(pager
,
4204 compressed_count_delta
,
4205 FALSE
, /* shared_lock */
4208 if (retval
== KERN_SUCCESS
) {
4210 * If the object is purgeable, its owner's
4211 * purgeable ledgers will be updated in
4212 * vm_page_remove() but the page still
4213 * contributes to the owner's memory footprint,
4214 * so account for it as such.
4216 if ((object
->purgable
!= VM_PURGABLE_DENY
||
4217 object
->vo_ledger_tag
) &&
4218 object
->vo_owner
!= NULL
) {
4219 /* one more compressed purgeable/tagged page */
4220 vm_object_owner_compressed_update(object
,
4223 VM_STAT_INCR(compressions
);
4225 if (m
->vmp_tabled
) {
4226 vm_page_remove(m
, TRUE
);
4229 PAGE_WAKEUP_DONE(m
);
4231 vm_page_lockspin_queues();
4233 vm_page_activate(m
);
4234 vm_pageout_vminfo
.vm_compressor_failed
++;
4236 vm_page_unlock_queues();
4238 vm_object_activity_end(object
);
4239 vm_object_unlock(object
);
4246 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*eq
, boolean_t req_lowpriority
)
4250 if (hibernate_cleaning_in_progress
== TRUE
) {
4251 req_lowpriority
= FALSE
;
4254 if (eq
->pgo_inited
== TRUE
&& eq
->pgo_lowpriority
!= req_lowpriority
) {
4255 vm_page_unlock_queues();
4257 if (req_lowpriority
== TRUE
) {
4258 policy
= THROTTLE_LEVEL_PAGEOUT_THROTTLED
;
4259 DTRACE_VM(laundrythrottle
);
4261 policy
= THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED
;
4262 DTRACE_VM(laundryunthrottle
);
4264 proc_set_thread_policy_with_tid(kernel_task
, eq
->pgo_tid
,
4265 TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
4267 eq
->pgo_lowpriority
= req_lowpriority
;
4269 vm_page_lock_queues();
4275 vm_pageout_iothread_external(void)
4277 thread_t self
= current_thread();
4279 self
->options
|= TH_OPT_VMPRIV
;
4281 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
4283 proc_set_thread_policy(self
, TASK_POLICY_EXTERNAL
,
4284 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
4286 vm_page_lock_queues();
4288 vm_pageout_queue_external
.pgo_tid
= self
->thread_id
;
4289 vm_pageout_queue_external
.pgo_lowpriority
= TRUE
;
4290 vm_pageout_queue_external
.pgo_inited
= TRUE
;
4292 vm_page_unlock_queues();
4294 vm_pageout_iothread_external_continue(&vm_pageout_queue_external
);
4301 vm_pageout_iothread_internal(struct cq
*cq
)
4303 thread_t self
= current_thread();
4305 self
->options
|= TH_OPT_VMPRIV
;
4307 vm_page_lock_queues();
4309 vm_pageout_queue_internal
.pgo_tid
= self
->thread_id
;
4310 vm_pageout_queue_internal
.pgo_lowpriority
= TRUE
;
4311 vm_pageout_queue_internal
.pgo_inited
= TRUE
;
4313 vm_page_unlock_queues();
4315 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
4316 thread_vm_bind_group_add();
4321 thread_set_thread_name(current_thread(), "VM_compressor");
4322 #if DEVELOPMENT || DEBUG
4323 vmct_stats
.vmct_minpages
[cq
->id
] = INT32_MAX
;
4325 vm_pageout_iothread_internal_continue(cq
);
4331 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
4333 if (OSCompareAndSwapPtr(NULL
, func
, (void * volatile *) &consider_buffer_cache_collect
)) {
4334 return KERN_SUCCESS
;
4336 return KERN_FAILURE
; /* Already set */
4340 extern boolean_t memorystatus_manual_testing_on
;
4341 extern unsigned int memorystatus_level
;
4344 #if VM_PRESSURE_EVENTS
4346 boolean_t vm_pressure_events_enabled
= FALSE
;
4349 vm_pressure_response(void)
4351 vm_pressure_level_t old_level
= kVMPressureNormal
;
4353 unsigned int total_pages
;
4354 uint64_t available_memory
= 0;
4356 if (vm_pressure_events_enabled
== FALSE
) {
4362 available_memory
= (uint64_t) memorystatus_available_pages
;
4364 #else /* CONFIG_EMBEDDED */
4366 available_memory
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
4367 memorystatus_available_pages
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
4369 #endif /* CONFIG_EMBEDDED */
4371 total_pages
= (unsigned int) atop_64(max_mem
);
4372 #if CONFIG_SECLUDED_MEMORY
4373 total_pages
-= vm_page_secluded_count
;
4374 #endif /* CONFIG_SECLUDED_MEMORY */
4375 memorystatus_level
= (unsigned int) ((available_memory
* 100) / total_pages
);
4377 if (memorystatus_manual_testing_on
) {
4381 old_level
= memorystatus_vm_pressure_level
;
4383 switch (memorystatus_vm_pressure_level
) {
4384 case kVMPressureNormal
:
4386 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4387 new_level
= kVMPressureCritical
;
4388 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4389 new_level
= kVMPressureWarning
;
4394 case kVMPressureWarning
:
4395 case kVMPressureUrgent
:
4397 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4398 new_level
= kVMPressureNormal
;
4399 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4400 new_level
= kVMPressureCritical
;
4405 case kVMPressureCritical
:
4407 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4408 new_level
= kVMPressureNormal
;
4409 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4410 new_level
= kVMPressureWarning
;
4419 if (new_level
!= -1) {
4420 memorystatus_vm_pressure_level
= (vm_pressure_level_t
) new_level
;
4422 if (new_level
!= (int) old_level
) {
4423 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change
, VM_PRESSURE_LEVEL_CHANGE
, DBG_FUNC_NONE
,
4424 new_level
, old_level
, 0, 0);
4427 if ((memorystatus_vm_pressure_level
!= kVMPressureNormal
) || (old_level
!= memorystatus_vm_pressure_level
)) {
4428 if (vm_pageout_state
.vm_pressure_thread_running
== FALSE
) {
4429 thread_wakeup(&vm_pressure_thread
);
4432 if (old_level
!= memorystatus_vm_pressure_level
) {
4433 thread_wakeup(&vm_pageout_state
.vm_pressure_changed
);
4438 #endif /* VM_PRESSURE_EVENTS */
4441 * Function called by a kernel thread to either get the current pressure level or
4442 * wait until memory pressure changes from a given level.
4445 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure
, __unused
unsigned int *pressure_level
)
4447 #if !VM_PRESSURE_EVENTS
4449 return KERN_FAILURE
;
4451 #else /* VM_PRESSURE_EVENTS */
4453 wait_result_t wr
= 0;
4454 vm_pressure_level_t old_level
= memorystatus_vm_pressure_level
;
4456 if (pressure_level
== NULL
) {
4457 return KERN_INVALID_ARGUMENT
;
4460 if (*pressure_level
== kVMPressureJetsam
) {
4461 if (!wait_for_pressure
) {
4462 return KERN_INVALID_ARGUMENT
;
4465 lck_mtx_lock(&memorystatus_jetsam_fg_band_lock
);
4466 wr
= assert_wait((event_t
)&memorystatus_jetsam_fg_band_waiters
,
4467 THREAD_INTERRUPTIBLE
);
4468 if (wr
== THREAD_WAITING
) {
4469 ++memorystatus_jetsam_fg_band_waiters
;
4470 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock
);
4471 wr
= thread_block(THREAD_CONTINUE_NULL
);
4473 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock
);
4475 if (wr
!= THREAD_AWAKENED
) {
4476 return KERN_ABORTED
;
4478 *pressure_level
= kVMPressureJetsam
;
4479 return KERN_SUCCESS
;
4482 if (wait_for_pressure
== TRUE
) {
4483 while (old_level
== *pressure_level
) {
4484 wr
= assert_wait((event_t
) &vm_pageout_state
.vm_pressure_changed
,
4485 THREAD_INTERRUPTIBLE
);
4486 if (wr
== THREAD_WAITING
) {
4487 wr
= thread_block(THREAD_CONTINUE_NULL
);
4489 if (wr
== THREAD_INTERRUPTED
) {
4490 return KERN_ABORTED
;
4493 if (wr
== THREAD_AWAKENED
) {
4494 old_level
= memorystatus_vm_pressure_level
;
4499 *pressure_level
= old_level
;
4500 return KERN_SUCCESS
;
4501 #endif /* VM_PRESSURE_EVENTS */
4504 #if VM_PRESSURE_EVENTS
4506 vm_pressure_thread(void)
4508 static boolean_t thread_initialized
= FALSE
;
4510 if (thread_initialized
== TRUE
) {
4511 vm_pageout_state
.vm_pressure_thread_running
= TRUE
;
4512 consider_vm_pressure_events();
4513 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4516 thread_set_thread_name(current_thread(), "VM_pressure");
4517 thread_initialized
= TRUE
;
4518 assert_wait((event_t
) &vm_pressure_thread
, THREAD_UNINT
);
4519 thread_block((thread_continue_t
)vm_pressure_thread
);
4521 #endif /* VM_PRESSURE_EVENTS */
4525 * called once per-second via "compute_averages"
4528 compute_pageout_gc_throttle(__unused
void *arg
)
4530 if (vm_pageout_vminfo
.vm_pageout_considered_page
!= vm_pageout_state
.vm_pageout_considered_page_last
) {
4531 vm_pageout_state
.vm_pageout_considered_page_last
= vm_pageout_vminfo
.vm_pageout_considered_page
;
4533 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
4538 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4539 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4540 * jetsams. We need to check if the zone map size is above its jetsam limit to
4541 * decide if this was indeed the case.
4543 * We need to do this on a different thread because of the following reasons:
4545 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4546 * itself causing the system to hang. We perform synchronous jetsams if we're
4547 * leaking in the VM map entries zone, so the leaking process could be doing a
4548 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4549 * jetsam itself. We also need the vm_map lock on the process termination path,
4550 * which would now lead the dying process to deadlock against itself.
4552 * 2. The jetsam path might need to allocate zone memory itself. We could try
4553 * using the non-blocking variant of zalloc for this path, but we can still
4554 * end up trying to do a kernel_memory_allocate when the zone_map is almost
4558 extern boolean_t
is_zone_map_nearing_exhaustion(void);
4561 vm_pageout_garbage_collect(int collect
)
4564 if (is_zone_map_nearing_exhaustion()) {
4566 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4568 * Bail out after calling zone_gc (which triggers the
4569 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4570 * operations that clear out a bunch of caches might allocate zone
4571 * memory themselves (for eg. vm_map operations would need VM map
4572 * entries). Since the zone map is almost full at this point, we
4573 * could end up with a panic. We just need to quickly jetsam a
4574 * process and exit here.
4576 * It could so happen that we were woken up to relieve memory
4577 * pressure and the zone map also happened to be near its limit at
4578 * the time, in which case we'll skip out early. But that should be
4579 * ok; if memory pressure persists, the thread will simply be woken
4582 consider_zone_gc(TRUE
);
4584 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4585 boolean_t buf_large_zfree
= FALSE
;
4586 boolean_t first_try
= TRUE
;
4590 consider_machine_collect();
4594 if (consider_buffer_cache_collect
!= NULL
) {
4595 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
4597 if (first_try
== TRUE
|| buf_large_zfree
== TRUE
) {
4599 * consider_zone_gc should be last, because the other operations
4600 * might return memory to zones.
4602 consider_zone_gc(FALSE
);
4605 } while (buf_large_zfree
== TRUE
&& vm_page_free_count
< vm_page_free_target
);
4607 consider_machine_adjust();
4611 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
4613 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
4618 #if VM_PAGE_BUCKETS_CHECK
4619 #if VM_PAGE_FAKE_BUCKETS
4620 extern vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
4621 #endif /* VM_PAGE_FAKE_BUCKETS */
4622 #endif /* VM_PAGE_BUCKETS_CHECK */
4627 vm_set_restrictions()
4629 int vm_restricted_to_single_processor
= 0;
4631 if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor
, sizeof(vm_restricted_to_single_processor
))) {
4632 kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor
);
4633 vm_pageout_state
.vm_restricted_to_single_processor
= (vm_restricted_to_single_processor
? TRUE
: FALSE
);
4635 host_basic_info_data_t hinfo
;
4636 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
4639 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
4641 assert(hinfo
.max_cpus
> 0);
4643 if (hinfo
.max_cpus
<= 3) {
4645 * on systems with a limited number of CPUS, bind the
4646 * 4 major threads that can free memory and that tend to use
4647 * a fair bit of CPU under pressured conditions to a single processor.
4648 * This insures that these threads don't hog all of the available CPUs
4649 * (important for camera launch), while allowing them to run independently
4650 * w/r to locks... the 4 threads are
4651 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4652 * vm_compressor_swap_trigger_thread (minor and major compactions),
4653 * memorystatus_thread (jetsams).
4655 * the first time the thread is run, it is responsible for checking the
4656 * state of vm_restricted_to_single_processor, and if TRUE it calls
4657 * thread_bind_master... someday this should be replaced with a group
4658 * scheduling mechanism and KPI.
4660 vm_pageout_state
.vm_restricted_to_single_processor
= TRUE
;
4662 vm_pageout_state
.vm_restricted_to_single_processor
= FALSE
;
4670 thread_t self
= current_thread();
4672 kern_return_t result
;
4676 * Set thread privileges.
4680 vm_pageout_scan_thread
= self
;
4682 #if CONFIG_VPS_DYNAMIC_PRIO
4684 int vps_dynprio_bootarg
= 0;
4686 if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg
, sizeof(vps_dynprio_bootarg
))) {
4687 vps_dynamic_priority_enabled
= (vps_dynprio_bootarg
? TRUE
: FALSE
);
4688 kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled
);
4690 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
4691 vps_dynamic_priority_enabled
= TRUE
;
4693 vps_dynamic_priority_enabled
= FALSE
;
4697 if (vps_dynamic_priority_enabled
) {
4698 sched_set_kernel_thread_priority(self
, MAXPRI_THROTTLE
);
4699 thread_set_eager_preempt(self
);
4701 sched_set_kernel_thread_priority(self
, BASEPRI_VM
);
4704 #else /* CONFIG_VPS_DYNAMIC_PRIO */
4706 vps_dynamic_priority_enabled
= FALSE
;
4707 sched_set_kernel_thread_priority(self
, BASEPRI_VM
);
4709 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
4712 self
->options
|= TH_OPT_VMPRIV
;
4713 thread_unlock(self
);
4715 if (!self
->reserved_stack
) {
4716 self
->reserved_stack
= self
->kernel_stack
;
4719 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
&&
4720 vps_dynamic_priority_enabled
== FALSE
) {
4721 thread_vm_bind_group_add();
4729 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4732 * Initialize some paging parameters.
4735 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4736 vm_pageout_state
.vm_pressure_changed
= FALSE
;
4737 vm_pageout_state
.memorystatus_purge_on_warning
= 2;
4738 vm_pageout_state
.memorystatus_purge_on_urgent
= 5;
4739 vm_pageout_state
.memorystatus_purge_on_critical
= 8;
4740 vm_pageout_state
.vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
4741 vm_pageout_state
.vm_page_speculative_percentage
= 5;
4742 vm_pageout_state
.vm_page_speculative_target
= 0;
4744 vm_pageout_state
.vm_pageout_external_iothread
= THREAD_NULL
;
4745 vm_pageout_state
.vm_pageout_internal_iothread
= THREAD_NULL
;
4747 vm_pageout_state
.vm_pageout_swap_wait
= 0;
4748 vm_pageout_state
.vm_pageout_idle_wait
= 0;
4749 vm_pageout_state
.vm_pageout_empty_wait
= 0;
4750 vm_pageout_state
.vm_pageout_burst_wait
= 0;
4751 vm_pageout_state
.vm_pageout_deadlock_wait
= 0;
4752 vm_pageout_state
.vm_pageout_deadlock_relief
= 0;
4753 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= 0;
4755 vm_pageout_state
.vm_pageout_inactive
= 0;
4756 vm_pageout_state
.vm_pageout_inactive_used
= 0;
4757 vm_pageout_state
.vm_pageout_inactive_clean
= 0;
4759 vm_pageout_state
.vm_memory_pressure
= 0;
4760 vm_pageout_state
.vm_page_filecache_min
= 0;
4762 vm_pageout_state
.vm_page_filecache_min_divisor
= 70;
4763 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 40;
4765 vm_pageout_state
.vm_page_filecache_min_divisor
= 27;
4766 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 36;
4768 vm_pageout_state
.vm_page_free_count_init
= vm_page_free_count
;
4770 vm_pageout_state
.vm_pageout_considered_page_last
= 0;
4772 if (vm_pageout_state
.vm_pageout_swap_wait
== 0) {
4773 vm_pageout_state
.vm_pageout_swap_wait
= VM_PAGEOUT_SWAP_WAIT
;
4776 if (vm_pageout_state
.vm_pageout_idle_wait
== 0) {
4777 vm_pageout_state
.vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
4780 if (vm_pageout_state
.vm_pageout_burst_wait
== 0) {
4781 vm_pageout_state
.vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
4784 if (vm_pageout_state
.vm_pageout_empty_wait
== 0) {
4785 vm_pageout_state
.vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
4788 if (vm_pageout_state
.vm_pageout_deadlock_wait
== 0) {
4789 vm_pageout_state
.vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
4792 if (vm_pageout_state
.vm_pageout_deadlock_relief
== 0) {
4793 vm_pageout_state
.vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
4796 if (vm_pageout_state
.vm_pageout_burst_inactive_throttle
== 0) {
4797 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
4800 * even if we've already called vm_page_free_reserve
4801 * call it again here to insure that the targets are
4802 * accurately calculated (it uses vm_page_free_count_init)
4803 * calling it with an arg of 0 will not change the reserve
4804 * but will re-calculate free_min and free_target
4806 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
4807 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
4809 vm_page_free_reserve(0);
4813 vm_page_queue_init(&vm_pageout_queue_external
.pgo_pending
);
4814 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
4815 vm_pageout_queue_external
.pgo_laundry
= 0;
4816 vm_pageout_queue_external
.pgo_idle
= FALSE
;
4817 vm_pageout_queue_external
.pgo_busy
= FALSE
;
4818 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
4819 vm_pageout_queue_external
.pgo_draining
= FALSE
;
4820 vm_pageout_queue_external
.pgo_lowpriority
= FALSE
;
4821 vm_pageout_queue_external
.pgo_tid
= -1;
4822 vm_pageout_queue_external
.pgo_inited
= FALSE
;
4824 vm_page_queue_init(&vm_pageout_queue_internal
.pgo_pending
);
4825 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
4826 vm_pageout_queue_internal
.pgo_laundry
= 0;
4827 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
4828 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
4829 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
4830 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
4831 vm_pageout_queue_internal
.pgo_lowpriority
= FALSE
;
4832 vm_pageout_queue_internal
.pgo_tid
= -1;
4833 vm_pageout_queue_internal
.pgo_inited
= FALSE
;
4835 /* internal pageout thread started when default pager registered first time */
4836 /* external pageout and garbage collection threads started here */
4838 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
4840 &vm_pageout_state
.vm_pageout_external_iothread
);
4841 if (result
!= KERN_SUCCESS
) {
4842 panic("vm_pageout_iothread_external: create failed");
4844 thread_set_thread_name(vm_pageout_state
.vm_pageout_external_iothread
, "VM_pageout_external_iothread");
4845 thread_deallocate(vm_pageout_state
.vm_pageout_external_iothread
);
4847 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
4850 if (result
!= KERN_SUCCESS
) {
4851 panic("vm_pageout_garbage_collect: create failed");
4853 thread_set_thread_name(thread
, "VM_pageout_garbage_collect");
4854 thread_deallocate(thread
);
4856 #if VM_PRESSURE_EVENTS
4857 result
= kernel_thread_start_priority((thread_continue_t
)vm_pressure_thread
, NULL
,
4861 if (result
!= KERN_SUCCESS
) {
4862 panic("vm_pressure_thread: create failed");
4865 thread_deallocate(thread
);
4868 vm_object_reaper_init();
4871 bzero(&vm_config
, sizeof(vm_config
));
4873 switch (vm_compressor_mode
) {
4874 case VM_PAGER_DEFAULT
:
4875 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4877 case VM_PAGER_COMPRESSOR_WITH_SWAP
:
4878 vm_config
.compressor_is_present
= TRUE
;
4879 vm_config
.swap_is_present
= TRUE
;
4880 vm_config
.compressor_is_active
= TRUE
;
4881 vm_config
.swap_is_active
= TRUE
;
4884 case VM_PAGER_COMPRESSOR_NO_SWAP
:
4885 vm_config
.compressor_is_present
= TRUE
;
4886 vm_config
.swap_is_present
= TRUE
;
4887 vm_config
.compressor_is_active
= TRUE
;
4890 case VM_PAGER_FREEZER_DEFAULT
:
4891 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4893 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP
:
4894 vm_config
.compressor_is_present
= TRUE
;
4895 vm_config
.swap_is_present
= TRUE
;
4898 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP
:
4899 vm_config
.compressor_is_present
= TRUE
;
4900 vm_config
.swap_is_present
= TRUE
;
4901 vm_config
.compressor_is_active
= TRUE
;
4902 vm_config
.freezer_swap_is_active
= TRUE
;
4905 case VM_PAGER_NOT_CONFIGURED
:
4909 printf("unknown compressor mode - %x\n", vm_compressor_mode
);
4912 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
4913 vm_compressor_pager_init();
4916 #if VM_PRESSURE_EVENTS
4917 vm_pressure_events_enabled
= TRUE
;
4918 #endif /* VM_PRESSURE_EVENTS */
4920 #if CONFIG_PHANTOM_CACHE
4921 vm_phantom_cache_init();
4923 #if VM_PAGE_BUCKETS_CHECK
4924 #if VM_PAGE_FAKE_BUCKETS
4925 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
4926 (uint64_t) vm_page_fake_buckets_start
,
4927 (uint64_t) vm_page_fake_buckets_end
);
4928 pmap_protect(kernel_pmap
,
4929 vm_page_fake_buckets_start
,
4930 vm_page_fake_buckets_end
,
4932 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
4933 #endif /* VM_PAGE_FAKE_BUCKETS */
4934 #endif /* VM_PAGE_BUCKETS_CHECK */
4936 #if VM_OBJECT_TRACKING
4937 vm_object_tracking_init();
4938 #endif /* VM_OBJECT_TRACKING */
4942 vm_pageout_continue();
4947 * The vm_pageout_continue() call above never returns, so the code below is never
4948 * executed. We take advantage of this to declare several DTrace VM related probe
4949 * points that our kernel doesn't have an analog for. These are probe points that
4950 * exist in Solaris and are in the DTrace documentation, so people may have written
4951 * scripts that use them. Declaring the probe points here means their scripts will
4952 * compile and execute which we want for portability of the scripts, but since this
4953 * section of code is never reached, the probe points will simply never fire. Yes,
4954 * this is basically a hack. The problem is the DTrace probe points were chosen with
4955 * Solaris specific VM events in mind, not portability to different VM implementations.
4958 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
4959 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
4960 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
4961 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
4962 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
4963 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
4964 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
4971 vm_pageout_internal_start(void)
4973 kern_return_t result
;
4975 host_basic_info_data_t hinfo
;
4977 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
4979 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
4981 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
4983 assert(hinfo
.max_cpus
> 0);
4985 lck_grp_init(&vm_pageout_lck_grp
, "vm_pageout", LCK_GRP_ATTR_NULL
);
4988 vm_pageout_state
.vm_compressor_thread_count
= 1;
4990 if (hinfo
.max_cpus
> 4) {
4991 vm_pageout_state
.vm_compressor_thread_count
= 2;
4993 vm_pageout_state
.vm_compressor_thread_count
= 1;
4996 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state
.vm_compressor_thread_count
,
4997 sizeof(vm_pageout_state
.vm_compressor_thread_count
));
4999 if (vm_pageout_state
.vm_compressor_thread_count
>= hinfo
.max_cpus
) {
5000 vm_pageout_state
.vm_compressor_thread_count
= hinfo
.max_cpus
- 1;
5002 if (vm_pageout_state
.vm_compressor_thread_count
<= 0) {
5003 vm_pageout_state
.vm_compressor_thread_count
= 1;
5004 } else if (vm_pageout_state
.vm_compressor_thread_count
> MAX_COMPRESSOR_THREAD_COUNT
) {
5005 vm_pageout_state
.vm_compressor_thread_count
= MAX_COMPRESSOR_THREAD_COUNT
;
5008 vm_pageout_queue_internal
.pgo_maxlaundry
= (vm_pageout_state
.vm_compressor_thread_count
* 4) * VM_PAGE_LAUNDRY_MAX
;
5010 PE_parse_boot_argn("vmpgoi_maxlaundry", &vm_pageout_queue_internal
.pgo_maxlaundry
, sizeof(vm_pageout_queue_internal
.pgo_maxlaundry
));
5012 for (i
= 0; i
< vm_pageout_state
.vm_compressor_thread_count
; i
++) {
5014 ciq
[i
].q
= &vm_pageout_queue_internal
;
5015 ciq
[i
].current_chead
= NULL
;
5016 ciq
[i
].scratch_buf
= kalloc(COMPRESSOR_SCRATCH_BUF_SIZE
);
5018 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
, (void *)&ciq
[i
],
5019 BASEPRI_VM
, &vm_pageout_state
.vm_pageout_internal_iothread
);
5021 if (result
== KERN_SUCCESS
) {
5022 thread_deallocate(vm_pageout_state
.vm_pageout_internal_iothread
);
5032 * To support I/O Expedite for compressed files we mark the upls with special flags.
5033 * The way decmpfs works is that we create a big upl which marks all the pages needed to
5034 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5035 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5036 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5037 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5038 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5039 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5040 * unless the real I/O upl is being destroyed).
5045 upl_set_decmp_info(upl_t upl
, upl_t src_upl
)
5047 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
5050 if (src_upl
->decmp_io_upl
) {
5052 * If there is already an alive real I/O UPL, ignore this new UPL.
5053 * This case should rarely happen and even if it does, it just means
5054 * that we might issue a spurious expedite which the driver is expected
5057 upl_unlock(src_upl
);
5060 src_upl
->decmp_io_upl
= (void *)upl
;
5061 src_upl
->ref_count
++;
5063 upl
->flags
|= UPL_DECMP_REAL_IO
;
5064 upl
->decmp_io_upl
= (void *)src_upl
;
5065 upl_unlock(src_upl
);
5067 #endif /* CONFIG_IOSCHED */
5070 int upl_debug_enabled
= 1;
5072 int upl_debug_enabled
= 0;
5076 upl_create(int type
, int flags
, upl_size_t size
)
5079 vm_size_t page_field_size
= 0;
5081 vm_size_t upl_size
= sizeof(struct upl
);
5083 size
= round_page_32(size
);
5085 if (type
& UPL_CREATE_LITE
) {
5086 page_field_size
= (atop(size
) + 7) >> 3;
5087 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
5089 upl_flags
|= UPL_LITE
;
5091 if (type
& UPL_CREATE_INTERNAL
) {
5092 upl_size
+= sizeof(struct upl_page_info
) * atop(size
);
5094 upl_flags
|= UPL_INTERNAL
;
5096 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
5098 if (page_field_size
) {
5099 bzero((char *)upl
+ upl_size
, page_field_size
);
5102 upl
->flags
= upl_flags
| flags
;
5103 upl
->kaddr
= (vm_offset_t
)0;
5105 upl
->map_object
= NULL
;
5107 upl
->ext_ref_count
= 0;
5108 upl
->highest_page
= 0;
5110 upl
->vector_upl
= NULL
;
5111 upl
->associated_upl
= NULL
;
5112 upl
->upl_iodone
= NULL
;
5114 if (type
& UPL_CREATE_IO_TRACKING
) {
5115 upl
->upl_priority
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
5118 upl
->upl_reprio_info
= 0;
5119 upl
->decmp_io_upl
= 0;
5120 if ((type
& UPL_CREATE_INTERNAL
) && (type
& UPL_CREATE_EXPEDITE_SUP
)) {
5121 /* Only support expedite on internal UPLs */
5122 thread_t curthread
= current_thread();
5123 upl
->upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * atop(size
));
5124 bzero(upl
->upl_reprio_info
, (sizeof(uint64_t) * atop(size
)));
5125 upl
->flags
|= UPL_EXPEDITE_SUPPORTED
;
5126 if (curthread
->decmp_upl
!= NULL
) {
5127 upl_set_decmp_info(upl
, curthread
->decmp_upl
);
5131 #if CONFIG_IOSCHED || UPL_DEBUG
5132 if ((type
& UPL_CREATE_IO_TRACKING
) || upl_debug_enabled
) {
5133 upl
->upl_creator
= current_thread();
5136 upl
->flags
|= UPL_TRACKED_BY_OBJECT
;
5141 upl
->ubc_alias1
= 0;
5142 upl
->ubc_alias2
= 0;
5145 upl
->upl_commit_index
= 0;
5146 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
5148 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
5149 #endif /* UPL_DEBUG */
5155 upl_destroy(upl_t upl
)
5157 int page_field_size
; /* bit field in word size buf */
5160 if (upl
->ext_ref_count
) {
5161 panic("upl(%p) ext_ref_count", upl
);
5165 if ((upl
->flags
& UPL_DECMP_REAL_IO
) && upl
->decmp_io_upl
) {
5167 src_upl
= upl
->decmp_io_upl
;
5168 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
5170 src_upl
->decmp_io_upl
= NULL
;
5171 upl_unlock(src_upl
);
5172 upl_deallocate(src_upl
);
5174 #endif /* CONFIG_IOSCHED */
5176 #if CONFIG_IOSCHED || UPL_DEBUG
5177 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) && !(upl
->flags
& UPL_VECTOR
)) {
5180 if (upl
->flags
& UPL_SHADOWED
) {
5181 object
= upl
->map_object
->shadow
;
5183 object
= upl
->map_object
;
5186 vm_object_lock(object
);
5187 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
5188 vm_object_activity_end(object
);
5189 vm_object_collapse(object
, 0, TRUE
);
5190 vm_object_unlock(object
);
5194 * drop a reference on the map_object whether or
5195 * not a pageout object is inserted
5197 if (upl
->flags
& UPL_SHADOWED
) {
5198 vm_object_deallocate(upl
->map_object
);
5201 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
5206 page_field_size
= 0;
5208 if (upl
->flags
& UPL_LITE
) {
5209 page_field_size
= ((size
/ PAGE_SIZE
) + 7) >> 3;
5210 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
5212 upl_lock_destroy(upl
);
5213 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
5216 if (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) {
5217 kfree(upl
->upl_reprio_info
, sizeof(uint64_t) * (size
/ PAGE_SIZE
));
5221 if (upl
->flags
& UPL_INTERNAL
) {
5223 sizeof(struct upl
) +
5224 (sizeof(struct upl_page_info
) * (size
/ PAGE_SIZE
))
5227 kfree(upl
, sizeof(struct upl
) + page_field_size
);
5232 upl_deallocate(upl_t upl
)
5236 if (--upl
->ref_count
== 0) {
5237 if (vector_upl_is_valid(upl
)) {
5238 vector_upl_deallocate(upl
);
5242 if (upl
->upl_iodone
) {
5243 upl_callout_iodone(upl
);
5254 upl_mark_decmp(upl_t upl
)
5256 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
5257 upl
->flags
|= UPL_DECMP_REQ
;
5258 upl
->upl_creator
->decmp_upl
= (void *)upl
;
5263 upl_unmark_decmp(upl_t upl
)
5265 if (upl
&& (upl
->flags
& UPL_DECMP_REQ
)) {
5266 upl
->upl_creator
->decmp_upl
= NULL
;
5270 #endif /* CONFIG_IOSCHED */
5272 #define VM_PAGE_Q_BACKING_UP(q) \
5273 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5275 boolean_t
must_throttle_writes(void);
5278 must_throttle_writes()
5280 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external
) &&
5281 vm_page_pageable_external_count
> (AVAILABLE_NON_COMPRESSED_MEMORY
* 6) / 10) {
5290 * Routine: vm_object_upl_request
5292 * Cause the population of a portion of a vm_object.
5293 * Depending on the nature of the request, the pages
5294 * returned may be contain valid data or be uninitialized.
5295 * A page list structure, listing the physical pages
5296 * will be returned upon request.
5297 * This function is called by the file system or any other
5298 * supplier of backing store to a pager.
5299 * IMPORTANT NOTE: The caller must still respect the relationship
5300 * between the vm_object and its backing memory object. The
5301 * caller MUST NOT substitute changes in the backing file
5302 * without first doing a memory_object_lock_request on the
5303 * target range unless it is know that the pages are not
5304 * shared with another entity at the pager level.
5306 * if a page list structure is present
5307 * return the mapped physical pages, where a
5308 * page is not present, return a non-initialized
5309 * one. If the no_sync bit is turned on, don't
5310 * call the pager unlock to synchronize with other
5311 * possible copies of the page. Leave pages busy
5312 * in the original object, if a page list structure
5313 * was specified. When a commit of the page list
5314 * pages is done, the dirty bit will be set for each one.
5316 * If a page list structure is present, return
5317 * all mapped pages. Where a page does not exist
5318 * map a zero filled one. Leave pages busy in
5319 * the original object. If a page list structure
5320 * is not specified, this call is a no-op.
5322 * Note: access of default pager objects has a rather interesting
5323 * twist. The caller of this routine, presumably the file system
5324 * page cache handling code, will never actually make a request
5325 * against a default pager backed object. Only the default
5326 * pager will make requests on backing store related vm_objects
5327 * In this way the default pager can maintain the relationship
5328 * between backing store files (abstract memory objects) and
5329 * the vm_objects (cache objects), they support.
5333 __private_extern__ kern_return_t
5334 vm_object_upl_request(
5336 vm_object_offset_t offset
,
5339 upl_page_info_array_t user_page_list
,
5340 unsigned int *page_list_count
,
5341 upl_control_flags_t cntrl_flags
,
5344 vm_page_t dst_page
= VM_PAGE_NULL
;
5345 vm_object_offset_t dst_offset
;
5346 upl_size_t xfer_size
;
5347 unsigned int size_in_pages
;
5352 vm_page_t alias_page
= NULL
;
5353 int refmod_state
= 0;
5354 wpl_array_t lite_list
= NULL
;
5355 vm_object_t last_copy_object
;
5356 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
5357 struct vm_page_delayed_work
*dwp
;
5360 int io_tracking_flag
= 0;
5362 int page_grab_count
= 0;
5364 pmap_flush_context pmap_flush_context_storage
;
5365 boolean_t pmap_flushes_delayed
= FALSE
;
5366 #if DEVELOPMENT || DEBUG
5367 task_t task
= current_task();
5368 #endif /* DEVELOPMENT || DEBUG */
5370 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
5372 * For forward compatibility's sake,
5373 * reject any unknown flag.
5375 return KERN_INVALID_VALUE
;
5377 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
5378 panic("vm_object_upl_request: external object with non-zero paging offset\n");
5380 if (object
->phys_contiguous
) {
5381 panic("vm_object_upl_request: contiguous object specified\n");
5384 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, 0, 0);
5386 if (size
> MAX_UPL_SIZE_BYTES
) {
5387 size
= MAX_UPL_SIZE_BYTES
;
5390 if ((cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
) {
5391 *page_list_count
= MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
;
5394 #if CONFIG_IOSCHED || UPL_DEBUG
5395 if (object
->io_tracking
|| upl_debug_enabled
) {
5396 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
5400 if (object
->io_tracking
) {
5401 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
5405 if (cntrl_flags
& UPL_SET_INTERNAL
) {
5406 if (cntrl_flags
& UPL_SET_LITE
) {
5407 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
5409 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
5410 lite_list
= (wpl_array_t
)
5411 (((uintptr_t)user_page_list
) +
5412 ((size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5414 user_page_list
= NULL
;
5418 upl
= upl_create(UPL_CREATE_INTERNAL
| io_tracking_flag
, 0, size
);
5420 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
5422 user_page_list
= NULL
;
5426 if (cntrl_flags
& UPL_SET_LITE
) {
5427 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
5429 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
5434 upl
= upl_create(UPL_CREATE_EXTERNAL
| io_tracking_flag
, 0, size
);
5439 if (user_page_list
) {
5440 user_page_list
[0].device
= FALSE
;
5443 if (cntrl_flags
& UPL_SET_LITE
) {
5444 upl
->map_object
= object
;
5446 upl
->map_object
= vm_object_allocate(size
);
5448 * No neeed to lock the new object: nobody else knows
5449 * about it yet, so it's all ours so far.
5451 upl
->map_object
->shadow
= object
;
5452 upl
->map_object
->pageout
= TRUE
;
5453 upl
->map_object
->can_persist
= FALSE
;
5454 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
5455 upl
->map_object
->vo_shadow_offset
= offset
;
5456 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
5458 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5460 upl
->flags
|= UPL_SHADOWED
;
5462 if (cntrl_flags
& UPL_FOR_PAGEOUT
) {
5463 upl
->flags
|= UPL_PAGEOUT
;
5466 vm_object_lock(object
);
5467 vm_object_activity_begin(object
);
5470 #if CONFIG_SECLUDED_MEMORY
5471 if (object
->can_grab_secluded
) {
5472 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
5474 #endif /* CONFIG_SECLUDED_MEMORY */
5477 * we can lock in the paging_offset once paging_in_progress is set
5480 upl
->offset
= offset
+ object
->paging_offset
;
5482 #if CONFIG_IOSCHED || UPL_DEBUG
5483 if (object
->io_tracking
|| upl_debug_enabled
) {
5484 vm_object_activity_begin(object
);
5485 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
5488 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
5490 * Honor copy-on-write obligations
5492 * The caller is gathering these pages and
5493 * might modify their contents. We need to
5494 * make sure that the copy object has its own
5495 * private copies of these pages before we let
5496 * the caller modify them.
5498 vm_object_update(object
,
5503 FALSE
, /* should_return */
5504 MEMORY_OBJECT_COPY_SYNC
,
5507 VM_PAGEOUT_DEBUG(upl_cow
, 1);
5508 VM_PAGEOUT_DEBUG(upl_cow_pages
, (size
>> PAGE_SHIFT
));
5511 * remember which copy object we synchronized with
5513 last_copy_object
= object
->copy
;
5517 dst_offset
= offset
;
5518 size_in_pages
= size
/ PAGE_SIZE
;
5522 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5524 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
5525 object
->resident_page_count
< ((MAX_UPL_SIZE_BYTES
* 2) >> PAGE_SHIFT
)) {
5526 object
->scan_collisions
= 0;
5529 if ((cntrl_flags
& UPL_WILL_MODIFY
) && must_throttle_writes() == TRUE
) {
5530 boolean_t isSSD
= FALSE
;
5535 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
5537 vm_object_unlock(object
);
5539 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5541 if (isSSD
== TRUE
) {
5542 delay(1000 * size_in_pages
);
5544 delay(5000 * size_in_pages
);
5546 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5548 vm_object_lock(object
);
5554 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
5555 vm_object_unlock(object
);
5556 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5557 vm_object_lock(object
);
5559 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
5560 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
5562 if (((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
5563 dst_page
->vmp_fictitious
||
5564 dst_page
->vmp_absent
||
5565 dst_page
->vmp_error
||
5566 dst_page
->vmp_cleaning
||
5567 (VM_PAGE_WIRED(dst_page
))) {
5568 if (user_page_list
) {
5569 user_page_list
[entry
].phys_addr
= 0;
5574 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
5577 * grab this up front...
5578 * a high percentange of the time we're going to
5579 * need the hardware modification state a bit later
5580 * anyway... so we can eliminate an extra call into
5581 * the pmap layer by grabbing it here and recording it
5583 if (dst_page
->vmp_pmapped
) {
5584 refmod_state
= pmap_get_refmod(phys_page
);
5589 if ((refmod_state
& VM_MEM_REFERENCED
) && VM_PAGE_INACTIVE(dst_page
)) {
5591 * page is on inactive list and referenced...
5592 * reactivate it now... this gets it out of the
5593 * way of vm_pageout_scan which would have to
5594 * reactivate it upon tripping over it
5596 dwp
->dw_mask
|= DW_vm_page_activate
;
5598 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
5600 * we're only asking for DIRTY pages to be returned
5602 if (dst_page
->vmp_laundry
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
5604 * if we were the page stolen by vm_pageout_scan to be
5605 * cleaned (as opposed to a buddy being clustered in
5606 * or this request is not being driven by a PAGEOUT cluster
5607 * then we only need to check for the page being dirty or
5608 * precious to decide whether to return it
5610 if (dst_page
->vmp_dirty
|| dst_page
->vmp_precious
|| (refmod_state
& VM_MEM_MODIFIED
)) {
5616 * this is a request for a PAGEOUT cluster and this page
5617 * is merely along for the ride as a 'buddy'... not only
5618 * does it have to be dirty to be returned, but it also
5619 * can't have been referenced recently...
5621 if ((hibernate_cleaning_in_progress
== TRUE
||
5622 (!((refmod_state
& VM_MEM_REFERENCED
) || dst_page
->vmp_reference
) ||
5623 (dst_page
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
))) &&
5624 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->vmp_dirty
|| dst_page
->vmp_precious
)) {
5629 * if we reach here, we're not to return
5630 * the page... go on to the next one
5632 if (dst_page
->vmp_laundry
== TRUE
) {
5634 * if we get here, the page is not 'cleaning' (filtered out above).
5635 * since it has been referenced, remove it from the laundry
5636 * so we don't pay the cost of an I/O to clean a page
5637 * we're just going to take back
5639 vm_page_lockspin_queues();
5641 vm_pageout_steal_laundry(dst_page
, TRUE
);
5642 vm_page_activate(dst_page
);
5644 vm_page_unlock_queues();
5646 if (user_page_list
) {
5647 user_page_list
[entry
].phys_addr
= 0;
5653 if (dst_page
->vmp_busy
) {
5654 if (cntrl_flags
& UPL_NOBLOCK
) {
5655 if (user_page_list
) {
5656 user_page_list
[entry
].phys_addr
= 0;
5663 * someone else is playing with the
5664 * page. We will have to wait.
5666 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5670 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5671 vm_page_lockspin_queues();
5673 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5675 * we've buddied up a page for a clustered pageout
5676 * that has already been moved to the pageout
5677 * queue by pageout_scan... we need to remove
5678 * it from the queue and drop the laundry count
5681 vm_pageout_throttle_up(dst_page
);
5683 vm_page_unlock_queues();
5685 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5686 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
5688 if (phys_page
> upl
->highest_page
) {
5689 upl
->highest_page
= phys_page
;
5692 assert(!pmap_is_noencrypt(phys_page
));
5694 if (cntrl_flags
& UPL_SET_LITE
) {
5695 unsigned int pg_num
;
5697 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
5698 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
5699 lite_list
[pg_num
>> 5] |= 1U << (pg_num
& 31);
5702 if (pmap_flushes_delayed
== FALSE
) {
5703 pmap_flush_context_init(&pmap_flush_context_storage
);
5704 pmap_flushes_delayed
= TRUE
;
5706 pmap_clear_refmod_options(phys_page
,
5708 PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_CLEAR_WRITE
,
5709 &pmap_flush_context_storage
);
5713 * Mark original page as cleaning
5716 dst_page
->vmp_cleaning
= TRUE
;
5717 dst_page
->vmp_precious
= FALSE
;
5720 * use pageclean setup, it is more
5721 * convenient even for the pageout
5724 vm_object_lock(upl
->map_object
);
5725 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5726 vm_object_unlock(upl
->map_object
);
5728 alias_page
->vmp_absent
= FALSE
;
5732 SET_PAGE_DIRTY(dst_page
, FALSE
);
5734 dst_page
->vmp_dirty
= FALSE
;
5738 dst_page
->vmp_precious
= TRUE
;
5741 if (!(cntrl_flags
& UPL_CLEAN_IN_PLACE
)) {
5742 if (!VM_PAGE_WIRED(dst_page
)) {
5743 dst_page
->vmp_free_when_done
= TRUE
;
5747 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
5749 * Honor copy-on-write obligations
5751 * The copy object has changed since we
5752 * last synchronized for copy-on-write.
5753 * Another copy object might have been
5754 * inserted while we released the object's
5755 * lock. Since someone could have seen the
5756 * original contents of the remaining pages
5757 * through that new object, we have to
5758 * synchronize with it again for the remaining
5759 * pages only. The previous pages are "busy"
5760 * so they can not be seen through the new
5761 * mapping. The new mapping will see our
5762 * upcoming changes for those previous pages,
5763 * but that's OK since they couldn't see what
5764 * was there before. It's just a race anyway
5765 * and there's no guarantee of consistency or
5766 * atomicity. We just don't want new mappings
5767 * to see both the *before* and *after* pages.
5769 if (object
->copy
!= VM_OBJECT_NULL
) {
5772 dst_offset
,/* current offset */
5773 xfer_size
, /* remaining size */
5776 FALSE
, /* should_return */
5777 MEMORY_OBJECT_COPY_SYNC
,
5780 VM_PAGEOUT_DEBUG(upl_cow_again
, 1);
5781 VM_PAGEOUT_DEBUG(upl_cow_again_pages
, (xfer_size
>> PAGE_SHIFT
));
5784 * remember the copy object we synced with
5786 last_copy_object
= object
->copy
;
5788 dst_page
= vm_page_lookup(object
, dst_offset
);
5790 if (dst_page
!= VM_PAGE_NULL
) {
5791 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5793 * skip over pages already present in the cache
5795 if (user_page_list
) {
5796 user_page_list
[entry
].phys_addr
= 0;
5801 if (dst_page
->vmp_fictitious
) {
5802 panic("need corner case for fictitious page");
5805 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
5807 * someone else is playing with the
5808 * page. We will have to wait.
5810 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5814 if (dst_page
->vmp_laundry
) {
5815 vm_pageout_steal_laundry(dst_page
, FALSE
);
5818 if (object
->private) {
5820 * This is a nasty wrinkle for users
5821 * of upl who encounter device or
5822 * private memory however, it is
5823 * unavoidable, only a fault can
5824 * resolve the actual backing
5825 * physical page by asking the
5828 if (user_page_list
) {
5829 user_page_list
[entry
].phys_addr
= 0;
5834 if (object
->scan_collisions
) {
5836 * the pageout_scan thread is trying to steal
5837 * pages from this object, but has run into our
5838 * lock... grab 2 pages from the head of the object...
5839 * the first is freed on behalf of pageout_scan, the
5840 * 2nd is for our own use... we use vm_object_page_grab
5841 * in both cases to avoid taking pages from the free
5842 * list since we are under memory pressure and our
5843 * lock on this object is getting in the way of
5846 dst_page
= vm_object_page_grab(object
);
5848 if (dst_page
!= VM_PAGE_NULL
) {
5849 vm_page_release(dst_page
,
5853 dst_page
= vm_object_page_grab(object
);
5855 if (dst_page
== VM_PAGE_NULL
) {
5857 * need to allocate a page
5859 dst_page
= vm_page_grab_options(grab_options
);
5860 if (dst_page
!= VM_PAGE_NULL
) {
5864 if (dst_page
== VM_PAGE_NULL
) {
5865 if ((cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
5867 * we don't want to stall waiting for pages to come onto the free list
5868 * while we're already holding absent pages in this UPL
5869 * the caller will deal with the empty slots
5871 if (user_page_list
) {
5872 user_page_list
[entry
].phys_addr
= 0;
5878 * no pages available... wait
5879 * then try again for the same
5882 vm_object_unlock(object
);
5884 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5886 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
5889 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5891 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
5893 vm_object_lock(object
);
5897 vm_page_insert(dst_page
, object
, dst_offset
);
5899 dst_page
->vmp_absent
= TRUE
;
5900 dst_page
->vmp_busy
= FALSE
;
5902 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
5904 * if UPL_RET_ONLY_ABSENT was specified,
5905 * than we're definitely setting up a
5906 * upl for a clustered read/pagein
5907 * operation... mark the pages as clustered
5908 * so upl_commit_range can put them on the
5911 dst_page
->vmp_clustered
= TRUE
;
5913 if (!(cntrl_flags
& UPL_FILE_IO
)) {
5914 VM_STAT_INCR(pageins
);
5918 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
5920 dst_page
->vmp_overwriting
= TRUE
;
5922 if (dst_page
->vmp_pmapped
) {
5923 if (!(cntrl_flags
& UPL_FILE_IO
)) {
5925 * eliminate all mappings from the
5926 * original object and its prodigy
5928 refmod_state
= pmap_disconnect(phys_page
);
5930 refmod_state
= pmap_get_refmod(phys_page
);
5936 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5937 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
5939 if (cntrl_flags
& UPL_SET_LITE
) {
5940 unsigned int pg_num
;
5942 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
5943 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
5944 lite_list
[pg_num
>> 5] |= 1U << (pg_num
& 31);
5947 pmap_clear_modify(phys_page
);
5951 * Mark original page as cleaning
5954 dst_page
->vmp_cleaning
= TRUE
;
5955 dst_page
->vmp_precious
= FALSE
;
5958 * use pageclean setup, it is more
5959 * convenient even for the pageout
5962 vm_object_lock(upl
->map_object
);
5963 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5964 vm_object_unlock(upl
->map_object
);
5966 alias_page
->vmp_absent
= FALSE
;
5970 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
5971 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
5972 upl
->flags
|= UPL_SET_DIRTY
;
5975 * Page belonging to a code-signed object is about to
5976 * be written. Mark it tainted and disconnect it from
5977 * all pmaps so processes have to fault it back in and
5978 * deal with the tainted bit.
5980 if (object
->code_signed
&& dst_page
->vmp_cs_tainted
== FALSE
) {
5981 dst_page
->vmp_cs_tainted
= TRUE
;
5982 vm_page_upl_tainted
++;
5983 if (dst_page
->vmp_pmapped
) {
5984 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
5985 if (refmod_state
& VM_MEM_REFERENCED
) {
5986 dst_page
->vmp_reference
= TRUE
;
5990 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
5992 * clean in place for read implies
5993 * that a write will be done on all
5994 * the pages that are dirty before
5995 * a upl commit is done. The caller
5996 * is obligated to preserve the
5997 * contents of all pages marked dirty
5999 upl
->flags
|= UPL_CLEAR_DIRTY
;
6001 dst_page
->vmp_dirty
= dirty
;
6004 dst_page
->vmp_precious
= TRUE
;
6007 if (!VM_PAGE_WIRED(dst_page
)) {
6009 * deny access to the target page while
6010 * it is being worked on
6012 dst_page
->vmp_busy
= TRUE
;
6014 dwp
->dw_mask
|= DW_vm_page_wire
;
6018 * We might be about to satisfy a fault which has been
6019 * requested. So no need for the "restart" bit.
6021 dst_page
->vmp_restart
= FALSE
;
6022 if (!dst_page
->vmp_absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
6024 * expect the page to be used
6026 dwp
->dw_mask
|= DW_set_reference
;
6028 if (cntrl_flags
& UPL_PRECIOUS
) {
6029 if (object
->internal
) {
6030 SET_PAGE_DIRTY(dst_page
, FALSE
);
6031 dst_page
->vmp_precious
= FALSE
;
6033 dst_page
->vmp_precious
= TRUE
;
6036 dst_page
->vmp_precious
= FALSE
;
6039 if (dst_page
->vmp_busy
) {
6040 upl
->flags
|= UPL_HAS_BUSY
;
6043 if (phys_page
> upl
->highest_page
) {
6044 upl
->highest_page
= phys_page
;
6046 assert(!pmap_is_noencrypt(phys_page
));
6047 if (user_page_list
) {
6048 user_page_list
[entry
].phys_addr
= phys_page
;
6049 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
6050 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
6051 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
6052 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
6053 user_page_list
[entry
].device
= FALSE
;
6054 user_page_list
[entry
].needed
= FALSE
;
6055 if (dst_page
->vmp_clustered
== TRUE
) {
6056 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
6058 user_page_list
[entry
].speculative
= FALSE
;
6060 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
6061 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
6062 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
6063 user_page_list
[entry
].mark
= FALSE
;
6066 * if UPL_RET_ONLY_ABSENT is set, then
6067 * we are working with a fresh page and we've
6068 * just set the clustered flag on it to
6069 * indicate that it was drug in as part of a
6070 * speculative cluster... so leave it alone
6072 if (!(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
6074 * someone is explicitly grabbing this page...
6075 * update clustered and speculative state
6078 if (dst_page
->vmp_clustered
) {
6079 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
6084 if (dwp
->dw_mask
& DW_vm_page_activate
) {
6085 VM_STAT_INCR(reactivations
);
6088 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
6090 if (dw_count
>= dw_limit
) {
6091 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
6098 dst_offset
+= PAGE_SIZE_64
;
6099 xfer_size
-= PAGE_SIZE
;
6102 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
6105 if (alias_page
!= NULL
) {
6106 VM_PAGE_FREE(alias_page
);
6108 if (pmap_flushes_delayed
== TRUE
) {
6109 pmap_flush(&pmap_flush_context_storage
);
6112 if (page_list_count
!= NULL
) {
6113 if (upl
->flags
& UPL_INTERNAL
) {
6114 *page_list_count
= 0;
6115 } else if (*page_list_count
> entry
) {
6116 *page_list_count
= entry
;
6122 vm_object_unlock(object
);
6124 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
6125 #if DEVELOPMENT || DEBUG
6127 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_upl
, page_grab_count
);
6129 #endif /* DEVELOPMENT || DEBUG */
6131 return KERN_SUCCESS
;
6135 * Routine: vm_object_super_upl_request
6137 * Cause the population of a portion of a vm_object
6138 * in much the same way as memory_object_upl_request.
6139 * Depending on the nature of the request, the pages
6140 * returned may be contain valid data or be uninitialized.
6141 * However, the region may be expanded up to the super
6142 * cluster size provided.
6145 __private_extern__ kern_return_t
6146 vm_object_super_upl_request(
6148 vm_object_offset_t offset
,
6150 upl_size_t super_cluster
,
6152 upl_page_info_t
*user_page_list
,
6153 unsigned int *page_list_count
,
6154 upl_control_flags_t cntrl_flags
,
6157 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
6158 return KERN_FAILURE
;
6161 assert(object
->paging_in_progress
);
6162 offset
= offset
- object
->paging_offset
;
6164 if (super_cluster
> size
) {
6165 vm_object_offset_t base_offset
;
6166 upl_size_t super_size
;
6167 vm_object_size_t super_size_64
;
6169 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
6170 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<< 1 : super_cluster
;
6171 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
6172 super_size
= (upl_size_t
) super_size_64
;
6173 assert(super_size
== super_size_64
);
6175 if (offset
> (base_offset
+ super_size
)) {
6176 panic("vm_object_super_upl_request: Missed target pageout"
6177 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6178 offset
, base_offset
, super_size
, super_cluster
,
6179 size
, object
->paging_offset
);
6182 * apparently there is a case where the vm requests a
6183 * page to be written out who's offset is beyond the
6186 if ((offset
+ size
) > (base_offset
+ super_size
)) {
6187 super_size_64
= (offset
+ size
) - base_offset
;
6188 super_size
= (upl_size_t
) super_size_64
;
6189 assert(super_size
== super_size_64
);
6192 offset
= base_offset
;
6195 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
, tag
);
6199 int cs_executable_create_upl
= 0;
6200 extern int proc_selfpid(void);
6201 extern char *proc_name_address(void *p
);
6202 #endif /* CONFIG_EMBEDDED */
6207 vm_map_address_t offset
,
6208 upl_size_t
*upl_size
,
6210 upl_page_info_array_t page_list
,
6211 unsigned int *count
,
6212 upl_control_flags_t
*flags
,
6215 vm_map_entry_t entry
;
6216 upl_control_flags_t caller_flags
;
6217 int force_data_sync
;
6219 vm_object_t local_object
;
6220 vm_map_offset_t local_offset
;
6221 vm_map_offset_t local_start
;
6224 assert(page_aligned(offset
));
6226 caller_flags
= *flags
;
6228 if (caller_flags
& ~UPL_VALID_FLAGS
) {
6230 * For forward compatibility's sake,
6231 * reject any unknown flag.
6233 return KERN_INVALID_VALUE
;
6235 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
6236 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
6239 return KERN_INVALID_ARGUMENT
;
6243 vm_map_lock_read(map
);
6245 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
6246 vm_map_unlock_read(map
);
6247 return KERN_FAILURE
;
6250 if ((entry
->vme_end
- offset
) < *upl_size
) {
6251 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
6252 assert(*upl_size
== entry
->vme_end
- offset
);
6255 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
6258 if (!entry
->is_sub_map
&&
6259 VME_OBJECT(entry
) != VM_OBJECT_NULL
) {
6260 if (VME_OBJECT(entry
)->private) {
6261 *flags
= UPL_DEV_MEMORY
;
6264 if (VME_OBJECT(entry
)->phys_contiguous
) {
6265 *flags
|= UPL_PHYS_CONTIG
;
6268 vm_map_unlock_read(map
);
6269 return KERN_SUCCESS
;
6272 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
||
6273 !VME_OBJECT(entry
)->phys_contiguous
) {
6274 if (*upl_size
> MAX_UPL_SIZE_BYTES
) {
6275 *upl_size
= MAX_UPL_SIZE_BYTES
;
6280 * Create an object if necessary.
6282 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
6283 if (vm_map_lock_read_to_write(map
)) {
6284 goto REDISCOVER_ENTRY
;
6287 VME_OBJECT_SET(entry
,
6288 vm_object_allocate((vm_size_t
)
6290 entry
->vme_start
)));
6291 VME_OFFSET_SET(entry
, 0);
6292 assert(entry
->use_pmap
);
6294 vm_map_lock_write_to_read(map
);
6297 if (!(caller_flags
& UPL_COPYOUT_FROM
) &&
6298 !entry
->is_sub_map
&&
6299 !(entry
->protection
& VM_PROT_WRITE
)) {
6300 vm_map_unlock_read(map
);
6301 return KERN_PROTECTION_FAILURE
;
6305 if (map
->pmap
!= kernel_pmap
&&
6306 (caller_flags
& UPL_COPYOUT_FROM
) &&
6307 (entry
->protection
& VM_PROT_EXECUTE
) &&
6308 !(entry
->protection
& VM_PROT_WRITE
)) {
6313 * We're about to create a read-only UPL backed by
6314 * memory from an executable mapping.
6315 * Wiring the pages would result in the pages being copied
6316 * (due to the "MAP_PRIVATE" mapping) and no longer
6317 * code-signed, so no longer eligible for execution.
6318 * Instead, let's copy the data into a kernel buffer and
6319 * create the UPL from this kernel buffer.
6320 * The kernel buffer is then freed, leaving the UPL holding
6321 * the last reference on the VM object, so the memory will
6322 * be released when the UPL is committed.
6325 vm_map_unlock_read(map
);
6326 /* allocate kernel buffer */
6327 ksize
= round_page(*upl_size
);
6329 ret
= kmem_alloc_pageable(kernel_map
,
6333 if (ret
== KERN_SUCCESS
) {
6334 /* copyin the user data */
6335 assert(page_aligned(offset
));
6336 ret
= copyinmap(map
, offset
, (void *)kaddr
, *upl_size
);
6338 if (ret
== KERN_SUCCESS
) {
6339 if (ksize
> *upl_size
) {
6340 /* zero out the extra space in kernel buffer */
6341 memset((void *)(kaddr
+ *upl_size
),
6345 /* create the UPL from the kernel buffer */
6346 ret
= vm_map_create_upl(kernel_map
, kaddr
, upl_size
,
6347 upl
, page_list
, count
, flags
, tag
);
6350 /* free the kernel buffer */
6351 kmem_free(kernel_map
, kaddr
, ksize
);
6355 #if DEVELOPMENT || DEBUG
6356 DTRACE_VM4(create_upl_from_executable
,
6358 vm_map_address_t
, offset
,
6359 upl_size_t
, *upl_size
,
6360 kern_return_t
, ret
);
6361 #endif /* DEVELOPMENT || DEBUG */
6364 #endif /* CONFIG_EMBEDDED */
6366 local_object
= VME_OBJECT(entry
);
6367 assert(local_object
!= VM_OBJECT_NULL
);
6369 if (!entry
->is_sub_map
&&
6370 !entry
->needs_copy
&&
6372 local_object
->vo_size
> *upl_size
&& /* partial UPL */
6373 entry
->wired_count
== 0 && /* No COW for entries that are wired */
6374 (map
->pmap
!= kernel_pmap
) && /* alias checks */
6375 (vm_map_entry_should_cow_for_true_share(entry
) /* case 1 */
6378 local_object
->internal
&&
6379 (local_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) &&
6380 local_object
->ref_count
> 1))) {
6385 * Set up the targeted range for copy-on-write to avoid
6386 * applying true_share/copy_delay to the entire object.
6389 * This map entry covers only part of an internal
6390 * object. There could be other map entries covering
6391 * other areas of this object and some of these map
6392 * entries could be marked as "needs_copy", which
6393 * assumes that the object is COPY_SYMMETRIC.
6394 * To avoid marking this object as COPY_DELAY and
6395 * "true_share", let's shadow it and mark the new
6396 * (smaller) object as "true_share" and COPY_DELAY.
6399 if (vm_map_lock_read_to_write(map
)) {
6400 goto REDISCOVER_ENTRY
;
6402 vm_map_lock_assert_exclusive(map
);
6403 assert(VME_OBJECT(entry
) == local_object
);
6405 vm_map_clip_start(map
,
6407 vm_map_trunc_page(offset
,
6408 VM_MAP_PAGE_MASK(map
)));
6409 vm_map_clip_end(map
,
6411 vm_map_round_page(offset
+ *upl_size
,
6412 VM_MAP_PAGE_MASK(map
)));
6413 if ((entry
->vme_end
- offset
) < *upl_size
) {
6414 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
6415 assert(*upl_size
== entry
->vme_end
- offset
);
6418 prot
= entry
->protection
& ~VM_PROT_WRITE
;
6419 if (override_nx(map
, VME_ALIAS(entry
)) && prot
) {
6420 prot
|= VM_PROT_EXECUTE
;
6422 vm_object_pmap_protect(local_object
,
6424 entry
->vme_end
- entry
->vme_start
,
6425 ((entry
->is_shared
||
6426 map
->mapped_in_other_pmaps
)
6432 assert(entry
->wired_count
== 0);
6435 * Lock the VM object and re-check its status: if it's mapped
6436 * in another address space, we could still be racing with
6437 * another thread holding that other VM map exclusively.
6439 vm_object_lock(local_object
);
6440 if (local_object
->true_share
) {
6441 /* object is already in proper state: no COW needed */
6442 assert(local_object
->copy_strategy
!=
6443 MEMORY_OBJECT_COPY_SYMMETRIC
);
6445 /* not true_share: ask for copy-on-write below */
6446 assert(local_object
->copy_strategy
==
6447 MEMORY_OBJECT_COPY_SYMMETRIC
);
6448 entry
->needs_copy
= TRUE
;
6450 vm_object_unlock(local_object
);
6452 vm_map_lock_write_to_read(map
);
6455 if (entry
->needs_copy
) {
6457 * Honor copy-on-write for COPY_SYMMETRIC
6462 vm_object_offset_t new_offset
;
6465 vm_map_version_t version
;
6467 vm_prot_t fault_type
;
6471 if (caller_flags
& UPL_COPYOUT_FROM
) {
6472 fault_type
= VM_PROT_READ
| VM_PROT_COPY
;
6473 vm_counters
.create_upl_extra_cow
++;
6474 vm_counters
.create_upl_extra_cow_pages
+=
6475 (entry
->vme_end
- entry
->vme_start
) / PAGE_SIZE
;
6477 fault_type
= VM_PROT_WRITE
;
6479 if (vm_map_lookup_locked(&local_map
,
6481 OBJECT_LOCK_EXCLUSIVE
,
6483 &new_offset
, &prot
, &wired
,
6485 &real_map
) != KERN_SUCCESS
) {
6486 if (fault_type
== VM_PROT_WRITE
) {
6487 vm_counters
.create_upl_lookup_failure_write
++;
6489 vm_counters
.create_upl_lookup_failure_copy
++;
6491 vm_map_unlock_read(local_map
);
6492 return KERN_FAILURE
;
6494 if (real_map
!= map
) {
6495 vm_map_unlock(real_map
);
6497 vm_map_unlock_read(local_map
);
6499 vm_object_unlock(object
);
6501 goto REDISCOVER_ENTRY
;
6504 if (entry
->is_sub_map
) {
6507 submap
= VME_SUBMAP(entry
);
6508 local_start
= entry
->vme_start
;
6509 local_offset
= VME_OFFSET(entry
);
6511 vm_map_reference(submap
);
6512 vm_map_unlock_read(map
);
6514 ret
= vm_map_create_upl(submap
,
6515 local_offset
+ (offset
- local_start
),
6516 upl_size
, upl
, page_list
, count
, flags
, tag
);
6517 vm_map_deallocate(submap
);
6522 if (sync_cow_data
&&
6523 (VME_OBJECT(entry
)->shadow
||
6524 VME_OBJECT(entry
)->copy
)) {
6525 local_object
= VME_OBJECT(entry
);
6526 local_start
= entry
->vme_start
;
6527 local_offset
= VME_OFFSET(entry
);
6529 vm_object_reference(local_object
);
6530 vm_map_unlock_read(map
);
6532 if (local_object
->shadow
&& local_object
->copy
) {
6533 vm_object_lock_request(local_object
->shadow
,
6534 ((vm_object_offset_t
)
6535 ((offset
- local_start
) +
6537 local_object
->vo_shadow_offset
),
6539 MEMORY_OBJECT_DATA_SYNC
,
6542 sync_cow_data
= FALSE
;
6543 vm_object_deallocate(local_object
);
6545 goto REDISCOVER_ENTRY
;
6547 if (force_data_sync
) {
6548 local_object
= VME_OBJECT(entry
);
6549 local_start
= entry
->vme_start
;
6550 local_offset
= VME_OFFSET(entry
);
6552 vm_object_reference(local_object
);
6553 vm_map_unlock_read(map
);
6555 vm_object_lock_request(local_object
,
6556 ((vm_object_offset_t
)
6557 ((offset
- local_start
) +
6559 (vm_object_size_t
)*upl_size
,
6561 MEMORY_OBJECT_DATA_SYNC
,
6564 force_data_sync
= FALSE
;
6565 vm_object_deallocate(local_object
);
6567 goto REDISCOVER_ENTRY
;
6569 if (VME_OBJECT(entry
)->private) {
6570 *flags
= UPL_DEV_MEMORY
;
6575 if (VME_OBJECT(entry
)->phys_contiguous
) {
6576 *flags
|= UPL_PHYS_CONTIG
;
6579 local_object
= VME_OBJECT(entry
);
6580 local_offset
= VME_OFFSET(entry
);
6581 local_start
= entry
->vme_start
;
6585 * Wiring will copy the pages to the shadow object.
6586 * The shadow object will not be code-signed so
6587 * attempting to execute code from these copied pages
6588 * would trigger a code-signing violation.
6590 if (entry
->protection
& VM_PROT_EXECUTE
) {
6592 printf("pid %d[%s] create_upl out of executable range from "
6593 "0x%llx to 0x%llx: side effects may include "
6594 "code-signing violations later on\n",
6596 (current_task()->bsd_info
6597 ? proc_name_address(current_task()->bsd_info
)
6599 (uint64_t) entry
->vme_start
,
6600 (uint64_t) entry
->vme_end
);
6601 #endif /* MACH_ASSERT */
6602 DTRACE_VM2(cs_executable_create_upl
,
6603 uint64_t, (uint64_t)entry
->vme_start
,
6604 uint64_t, (uint64_t)entry
->vme_end
);
6605 cs_executable_create_upl
++;
6607 #endif /* CONFIG_EMBEDDED */
6609 vm_object_lock(local_object
);
6612 * Ensure that this object is "true_share" and "copy_delay" now,
6613 * while we're still holding the VM map lock. After we unlock the map,
6614 * anything could happen to that mapping, including some copy-on-write
6615 * activity. We need to make sure that the IOPL will point at the
6616 * same memory as the mapping.
6618 if (local_object
->true_share
) {
6619 assert(local_object
->copy_strategy
!=
6620 MEMORY_OBJECT_COPY_SYMMETRIC
);
6621 } else if (local_object
!= kernel_object
&&
6622 local_object
!= compressor_object
&&
6623 !local_object
->phys_contiguous
) {
6624 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6625 if (!local_object
->true_share
&&
6626 vm_object_tracking_inited
) {
6627 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
6629 num
= OSBacktrace(bt
,
6630 VM_OBJECT_TRACKING_BTDEPTH
);
6631 btlog_add_entry(vm_object_tracking_btlog
,
6633 VM_OBJECT_TRACKING_OP_TRUESHARE
,
6637 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6638 local_object
->true_share
= TRUE
;
6639 if (local_object
->copy_strategy
==
6640 MEMORY_OBJECT_COPY_SYMMETRIC
) {
6641 local_object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6645 vm_object_reference_locked(local_object
);
6646 vm_object_unlock(local_object
);
6648 vm_map_unlock_read(map
);
6650 ret
= vm_object_iopl_request(local_object
,
6651 ((vm_object_offset_t
)
6652 ((offset
- local_start
) + local_offset
)),
6659 vm_object_deallocate(local_object
);
6665 * Internal routine to enter a UPL into a VM map.
6667 * JMM - This should just be doable through the standard
6668 * vm_map_enter() API.
6674 vm_map_offset_t
*dst_addr
)
6677 vm_object_offset_t offset
;
6678 vm_map_offset_t addr
;
6681 int isVectorUPL
= 0, curr_upl
= 0;
6682 upl_t vector_upl
= NULL
;
6683 vm_offset_t vector_upl_dst_addr
= 0;
6684 vm_map_t vector_upl_submap
= NULL
;
6685 upl_offset_t subupl_offset
= 0;
6686 upl_size_t subupl_size
= 0;
6688 if (upl
== UPL_NULL
) {
6689 return KERN_INVALID_ARGUMENT
;
6692 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6693 int mapped
= 0, valid_upls
= 0;
6696 upl_lock(vector_upl
);
6697 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6698 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6703 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6709 if (mapped
!= valid_upls
) {
6710 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
6712 upl_unlock(vector_upl
);
6713 return KERN_FAILURE
;
6717 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
, vector_upl
->size
, FALSE
,
6718 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
6719 &vector_upl_submap
);
6720 if (kr
!= KERN_SUCCESS
) {
6721 panic("Vector UPL submap allocation failed\n");
6723 map
= vector_upl_submap
;
6724 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
6730 process_upl_to_enter
:
6732 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6733 *dst_addr
= vector_upl_dst_addr
;
6734 upl_unlock(vector_upl
);
6735 return KERN_SUCCESS
;
6737 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6739 goto process_upl_to_enter
;
6742 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
6743 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
6746 * check to see if already mapped
6748 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6750 return KERN_FAILURE
;
6753 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
6754 ((upl
->flags
& UPL_HAS_BUSY
) ||
6755 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
6757 vm_page_t alias_page
;
6758 vm_object_offset_t new_offset
;
6759 unsigned int pg_num
;
6760 wpl_array_t lite_list
;
6762 if (upl
->flags
& UPL_INTERNAL
) {
6763 lite_list
= (wpl_array_t
)
6764 ((((uintptr_t)upl
) + sizeof(struct upl
))
6765 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6767 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
6769 object
= upl
->map_object
;
6770 upl
->map_object
= vm_object_allocate(upl
->size
);
6772 vm_object_lock(upl
->map_object
);
6774 upl
->map_object
->shadow
= object
;
6775 upl
->map_object
->pageout
= TRUE
;
6776 upl
->map_object
->can_persist
= FALSE
;
6777 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
6778 upl
->map_object
->vo_shadow_offset
= upl
->offset
- object
->paging_offset
;
6779 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
6780 offset
= upl
->map_object
->vo_shadow_offset
;
6784 upl
->flags
|= UPL_SHADOWED
;
6787 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
6788 assert(pg_num
== new_offset
/ PAGE_SIZE
);
6790 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
6791 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
6793 vm_object_lock(object
);
6795 m
= vm_page_lookup(object
, offset
);
6796 if (m
== VM_PAGE_NULL
) {
6797 panic("vm_upl_map: page missing\n");
6801 * Convert the fictitious page to a private
6802 * shadow of the real page.
6804 assert(alias_page
->vmp_fictitious
);
6805 alias_page
->vmp_fictitious
= FALSE
;
6806 alias_page
->vmp_private
= TRUE
;
6807 alias_page
->vmp_free_when_done
= TRUE
;
6809 * since m is a page in the upl it must
6810 * already be wired or BUSY, so it's
6811 * safe to assign the underlying physical
6814 VM_PAGE_SET_PHYS_PAGE(alias_page
, VM_PAGE_GET_PHYS_PAGE(m
));
6816 vm_object_unlock(object
);
6818 vm_page_lockspin_queues();
6819 vm_page_wire(alias_page
, VM_KERN_MEMORY_NONE
, TRUE
);
6820 vm_page_unlock_queues();
6822 vm_page_insert_wired(alias_page
, upl
->map_object
, new_offset
, VM_KERN_MEMORY_NONE
);
6824 assert(!alias_page
->vmp_wanted
);
6825 alias_page
->vmp_busy
= FALSE
;
6826 alias_page
->vmp_absent
= FALSE
;
6829 offset
+= PAGE_SIZE_64
;
6830 new_offset
+= PAGE_SIZE_64
;
6832 vm_object_unlock(upl
->map_object
);
6834 if (upl
->flags
& UPL_SHADOWED
) {
6837 offset
= upl
->offset
- upl
->map_object
->paging_offset
;
6842 vm_object_reference(upl
->map_object
);
6847 * NEED A UPL_MAP ALIAS
6849 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
6850 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
6851 upl
->map_object
, offset
, FALSE
,
6852 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
6854 if (kr
!= KERN_SUCCESS
) {
6855 vm_object_deallocate(upl
->map_object
);
6860 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
6861 VM_FLAGS_FIXED
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
6862 upl
->map_object
, offset
, FALSE
,
6863 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
6865 panic("vm_map_enter failed for a Vector UPL\n");
6868 vm_object_lock(upl
->map_object
);
6870 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
6871 m
= vm_page_lookup(upl
->map_object
, offset
);
6874 m
->vmp_pmapped
= TRUE
;
6876 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
6877 * but only in kernel space. If this was on a user map,
6878 * we'd have to set the wpmapped bit. */
6879 /* m->vmp_wpmapped = TRUE; */
6880 assert(map
->pmap
== kernel_pmap
);
6882 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_DEFAULT
, VM_PROT_NONE
, 0, TRUE
, kr
);
6884 assert(kr
== KERN_SUCCESS
);
6886 kasan_notify_address(addr
, PAGE_SIZE_64
);
6889 offset
+= PAGE_SIZE_64
;
6891 vm_object_unlock(upl
->map_object
);
6894 * hold a reference for the mapping
6897 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
6898 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
6899 assert(upl
->kaddr
== *dst_addr
);
6902 goto process_upl_to_enter
;
6907 return KERN_SUCCESS
;
6911 * Internal routine to remove a UPL mapping from a VM map.
6913 * XXX - This should just be doable through a standard
6914 * vm_map_remove() operation. Otherwise, implicit clean-up
6915 * of the target map won't be able to correctly remove
6916 * these (and release the reference on the UPL). Having
6917 * to do this means we can't map these into user-space
6927 int isVectorUPL
= 0, curr_upl
= 0;
6928 upl_t vector_upl
= NULL
;
6930 if (upl
== UPL_NULL
) {
6931 return KERN_INVALID_ARGUMENT
;
6934 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6935 int unmapped
= 0, valid_upls
= 0;
6937 upl_lock(vector_upl
);
6938 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6939 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6944 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
)) {
6950 if (unmapped
!= valid_upls
) {
6951 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
6953 upl_unlock(vector_upl
);
6954 return KERN_FAILURE
;
6962 process_upl_to_remove
:
6964 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6965 vm_map_t v_upl_submap
;
6966 vm_offset_t v_upl_submap_dst_addr
;
6967 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
6969 vm_map_remove(map
, v_upl_submap_dst_addr
, v_upl_submap_dst_addr
+ vector_upl
->size
, VM_MAP_REMOVE_NO_FLAGS
);
6970 vm_map_deallocate(v_upl_submap
);
6971 upl_unlock(vector_upl
);
6972 return KERN_SUCCESS
;
6975 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6977 goto process_upl_to_remove
;
6981 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
6985 assert(upl
->ref_count
> 1);
6986 upl
->ref_count
--; /* removing mapping ref */
6988 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
6989 upl
->kaddr
= (vm_offset_t
) 0;
6996 vm_map_trunc_page(addr
,
6997 VM_MAP_PAGE_MASK(map
)),
6998 vm_map_round_page(addr
+ size
,
6999 VM_MAP_PAGE_MASK(map
)),
7000 VM_MAP_REMOVE_NO_FLAGS
);
7001 return KERN_SUCCESS
;
7004 * If it's a Vectored UPL, we'll be removing the entire
7005 * submap anyways, so no need to remove individual UPL
7006 * element mappings from within the submap
7008 goto process_upl_to_remove
;
7013 return KERN_FAILURE
;
7020 upl_offset_t offset
,
7023 upl_page_info_t
*page_list
,
7024 mach_msg_type_number_t count
,
7027 upl_size_t xfer_size
, subupl_size
= size
;
7028 vm_object_t shadow_object
;
7030 vm_object_t m_object
;
7031 vm_object_offset_t target_offset
;
7032 upl_offset_t subupl_offset
= offset
;
7034 wpl_array_t lite_list
;
7036 int clear_refmod
= 0;
7037 int pgpgout_count
= 0;
7038 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
7039 struct vm_page_delayed_work
*dwp
;
7042 int isVectorUPL
= 0;
7043 upl_t vector_upl
= NULL
;
7044 boolean_t should_be_throttled
= FALSE
;
7046 vm_page_t nxt_page
= VM_PAGE_NULL
;
7047 int fast_path_possible
= 0;
7048 int fast_path_full_commit
= 0;
7049 int throttle_page
= 0;
7050 int unwired_count
= 0;
7051 int local_queue_count
= 0;
7052 vm_page_t first_local
, last_local
;
7056 if (upl
== UPL_NULL
) {
7057 return KERN_INVALID_ARGUMENT
;
7064 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
7066 upl_lock(vector_upl
);
7071 process_upl_to_commit
:
7075 offset
= subupl_offset
;
7077 upl_unlock(vector_upl
);
7078 return KERN_SUCCESS
;
7080 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
7082 upl_unlock(vector_upl
);
7083 return KERN_FAILURE
;
7085 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
7086 subupl_size
-= size
;
7087 subupl_offset
+= size
;
7091 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
7092 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
7094 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
7095 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
7097 upl
->upl_commit_index
++;
7100 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7102 } else if ((offset
+ size
) <= upl
->size
) {
7108 upl_unlock(vector_upl
);
7110 return KERN_FAILURE
;
7112 if (upl
->flags
& UPL_SET_DIRTY
) {
7113 flags
|= UPL_COMMIT_SET_DIRTY
;
7115 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
7116 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
7119 if (upl
->flags
& UPL_INTERNAL
) {
7120 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
7121 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
7123 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
7126 object
= upl
->map_object
;
7128 if (upl
->flags
& UPL_SHADOWED
) {
7129 vm_object_lock(object
);
7130 shadow_object
= object
->shadow
;
7132 shadow_object
= object
;
7134 entry
= offset
/ PAGE_SIZE
;
7135 target_offset
= (vm_object_offset_t
)offset
;
7137 assert(!(target_offset
& PAGE_MASK
));
7138 assert(!(xfer_size
& PAGE_MASK
));
7140 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
7141 vm_object_lock_shared(shadow_object
);
7143 vm_object_lock(shadow_object
);
7146 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object
);
7148 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7149 assert(shadow_object
->blocked_access
);
7150 shadow_object
->blocked_access
= FALSE
;
7151 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
7154 if (shadow_object
->code_signed
) {
7157 * If the object is code-signed, do not let this UPL tell
7158 * us if the pages are valid or not. Let the pages be
7159 * validated by VM the normal way (when they get mapped or
7162 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
7166 * No page list to get the code-signing info from !?
7168 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
7170 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
) {
7171 should_be_throttled
= TRUE
;
7176 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
7178 if ((upl
->flags
& UPL_IO_WIRE
) &&
7179 !(flags
& UPL_COMMIT_FREE_ABSENT
) &&
7181 shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
&&
7182 shadow_object
->purgable
!= VM_PURGABLE_EMPTY
) {
7183 if (!vm_page_queue_empty(&shadow_object
->memq
)) {
7184 if (size
== shadow_object
->vo_size
) {
7185 nxt_page
= (vm_page_t
)vm_page_queue_first(&shadow_object
->memq
);
7186 fast_path_full_commit
= 1;
7188 fast_path_possible
= 1;
7190 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
&&
7191 (shadow_object
->purgable
== VM_PURGABLE_DENY
||
7192 shadow_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
7193 shadow_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
7198 first_local
= VM_PAGE_NULL
;
7199 last_local
= VM_PAGE_NULL
;
7209 if (upl
->flags
& UPL_LITE
) {
7210 unsigned int pg_num
;
7212 if (nxt_page
!= VM_PAGE_NULL
) {
7214 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
7215 target_offset
= m
->vmp_offset
;
7217 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
7218 assert(pg_num
== target_offset
/ PAGE_SIZE
);
7220 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
7221 lite_list
[pg_num
>> 5] &= ~(1U << (pg_num
& 31));
7223 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
7224 m
= vm_page_lookup(shadow_object
, target_offset
+ (upl
->offset
- shadow_object
->paging_offset
));
7230 if (upl
->flags
& UPL_SHADOWED
) {
7231 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
7232 t
->vmp_free_when_done
= FALSE
;
7236 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
7237 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
7241 if (m
== VM_PAGE_NULL
) {
7242 goto commit_next_page
;
7245 m_object
= VM_PAGE_OBJECT(m
);
7247 if (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
7248 assert(m
->vmp_busy
);
7250 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7251 goto commit_next_page
;
7254 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
7257 * Set the code signing bits according to
7258 * what the UPL says they should be.
7260 m
->vmp_cs_validated
= page_list
[entry
].cs_validated
;
7261 m
->vmp_cs_tainted
= page_list
[entry
].cs_tainted
;
7262 m
->vmp_cs_nx
= page_list
[entry
].cs_nx
;
7264 if (flags
& UPL_COMMIT_WRITTEN_BY_KERNEL
) {
7265 m
->vmp_written_by_kernel
= TRUE
;
7268 if (upl
->flags
& UPL_IO_WIRE
) {
7270 page_list
[entry
].phys_addr
= 0;
7273 if (flags
& UPL_COMMIT_SET_DIRTY
) {
7274 SET_PAGE_DIRTY(m
, FALSE
);
7275 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
7276 m
->vmp_dirty
= FALSE
;
7278 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
7279 m
->vmp_cs_validated
&& !m
->vmp_cs_tainted
) {
7282 * This page is no longer dirty
7283 * but could have been modified,
7284 * so it will need to be
7287 m
->vmp_cs_validated
= FALSE
;
7289 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
7291 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7293 clear_refmod
|= VM_MEM_MODIFIED
;
7295 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7297 * We blocked access to the pages in this UPL.
7298 * Clear the "busy" bit and wake up any waiter
7301 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7303 if (fast_path_possible
) {
7304 assert(m_object
->purgable
!= VM_PURGABLE_EMPTY
);
7305 assert(m_object
->purgable
!= VM_PURGABLE_VOLATILE
);
7306 if (m
->vmp_absent
) {
7307 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
7308 assert(m
->vmp_wire_count
== 0);
7309 assert(m
->vmp_busy
);
7311 m
->vmp_absent
= FALSE
;
7312 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7314 if (m
->vmp_wire_count
== 0) {
7315 panic("wire_count == 0, m = %p, obj = %p\n", m
, shadow_object
);
7317 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
7320 * XXX FBDP need to update some other
7321 * counters here (purgeable_wired_count)
7324 assert(m
->vmp_wire_count
> 0);
7325 m
->vmp_wire_count
--;
7327 if (m
->vmp_wire_count
== 0) {
7328 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
7332 if (m
->vmp_wire_count
== 0) {
7333 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
7335 if (last_local
== VM_PAGE_NULL
) {
7336 assert(first_local
== VM_PAGE_NULL
);
7341 assert(first_local
!= VM_PAGE_NULL
);
7343 m
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
7344 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m
);
7347 local_queue_count
++;
7349 if (throttle_page
) {
7350 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
7352 if (flags
& UPL_COMMIT_INACTIVATE
) {
7353 if (shadow_object
->internal
) {
7354 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_INTERNAL_Q
;
7356 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_EXTERNAL_Q
;
7359 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
7364 if (flags
& UPL_COMMIT_INACTIVATE
) {
7365 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7366 clear_refmod
|= VM_MEM_REFERENCED
;
7368 if (m
->vmp_absent
) {
7369 if (flags
& UPL_COMMIT_FREE_ABSENT
) {
7370 dwp
->dw_mask
|= DW_vm_page_free
;
7372 m
->vmp_absent
= FALSE
;
7373 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7375 if (!(dwp
->dw_mask
& DW_vm_page_deactivate_internal
)) {
7376 dwp
->dw_mask
|= DW_vm_page_activate
;
7380 dwp
->dw_mask
|= DW_vm_page_unwire
;
7383 goto commit_next_page
;
7385 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
7388 page_list
[entry
].phys_addr
= 0;
7392 * make sure to clear the hardware
7393 * modify or reference bits before
7394 * releasing the BUSY bit on this page
7395 * otherwise we risk losing a legitimate
7398 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
7399 m
->vmp_dirty
= FALSE
;
7401 clear_refmod
|= VM_MEM_MODIFIED
;
7403 if (m
->vmp_laundry
) {
7404 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
7407 if (VM_PAGE_WIRED(m
)) {
7408 m
->vmp_free_when_done
= FALSE
;
7411 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
7412 m
->vmp_cs_validated
&& !m
->vmp_cs_tainted
) {
7415 * This page is no longer dirty
7416 * but could have been modified,
7417 * so it will need to be
7420 m
->vmp_cs_validated
= FALSE
;
7422 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
7424 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7426 if (m
->vmp_overwriting
) {
7428 * the (COPY_OUT_FROM == FALSE) request_page_list case
7431 #if CONFIG_PHANTOM_CACHE
7432 if (m
->vmp_absent
&& !m_object
->internal
) {
7433 dwp
->dw_mask
|= DW_vm_phantom_cache_update
;
7436 m
->vmp_absent
= FALSE
;
7438 dwp
->dw_mask
|= DW_clear_busy
;
7441 * alternate (COPY_OUT_FROM == FALSE) page_list case
7442 * Occurs when the original page was wired
7443 * at the time of the list request
7445 assert(VM_PAGE_WIRED(m
));
7447 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
7449 m
->vmp_overwriting
= FALSE
;
7451 m
->vmp_cleaning
= FALSE
;
7453 if (m
->vmp_free_when_done
) {
7455 * With the clean queue enabled, UPL_PAGEOUT should
7456 * no longer set the pageout bit. Its pages now go
7457 * to the clean queue.
7459 * We don't use the cleaned Q anymore and so this
7460 * assert isn't correct. The code for the clean Q
7461 * still exists and might be used in the future. If we
7462 * go back to the cleaned Q, we will re-enable this
7465 * assert(!(upl->flags & UPL_PAGEOUT));
7467 assert(!m_object
->internal
);
7469 m
->vmp_free_when_done
= FALSE
;
7471 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
7472 (m
->vmp_pmapped
&& (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
))) {
7474 * page was re-dirtied after we started
7475 * the pageout... reactivate it since
7476 * we don't know whether the on-disk
7477 * copy matches what is now in memory
7479 SET_PAGE_DIRTY(m
, FALSE
);
7481 dwp
->dw_mask
|= DW_vm_page_activate
| DW_PAGE_WAKEUP
;
7483 if (upl
->flags
& UPL_PAGEOUT
) {
7484 VM_STAT_INCR(reactivations
);
7485 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
7489 * page has been successfully cleaned
7490 * go ahead and free it for other use
7492 if (m_object
->internal
) {
7493 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
7495 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
7497 m
->vmp_dirty
= FALSE
;
7500 dwp
->dw_mask
|= DW_vm_page_free
;
7502 goto commit_next_page
;
7505 * It is a part of the semantic of COPYOUT_FROM
7506 * UPLs that a commit implies cache sync
7507 * between the vm page and the backing store
7508 * this can be used to strip the precious bit
7511 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
)) {
7512 m
->vmp_precious
= FALSE
;
7515 if (flags
& UPL_COMMIT_SET_DIRTY
) {
7516 SET_PAGE_DIRTY(m
, FALSE
);
7518 m
->vmp_dirty
= FALSE
;
7521 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7522 if (hibernate_cleaning_in_progress
== FALSE
&& !m
->vmp_dirty
&& (upl
->flags
& UPL_PAGEOUT
)) {
7525 VM_STAT_INCR(pageouts
);
7526 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
7528 dwp
->dw_mask
|= DW_enqueue_cleaned
;
7529 } else if (should_be_throttled
== TRUE
&& (m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
)) {
7531 * page coming back in from being 'frozen'...
7532 * it was dirty before it was frozen, so keep it so
7533 * the vm_page_activate will notice that it really belongs
7534 * on the throttle queue and put it there
7536 SET_PAGE_DIRTY(m
, FALSE
);
7537 dwp
->dw_mask
|= DW_vm_page_activate
;
7539 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->vmp_clustered
&& (m
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
)) {
7540 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7541 clear_refmod
|= VM_MEM_REFERENCED
;
7542 } else if (!VM_PAGE_PAGEABLE(m
)) {
7543 if (m
->vmp_clustered
|| (flags
& UPL_COMMIT_SPECULATE
)) {
7544 dwp
->dw_mask
|= DW_vm_page_speculate
;
7545 } else if (m
->vmp_reference
) {
7546 dwp
->dw_mask
|= DW_vm_page_activate
;
7548 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7549 clear_refmod
|= VM_MEM_REFERENCED
;
7553 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7555 * We blocked access to the pages in this URL.
7556 * Clear the "busy" bit on this page before we
7557 * wake up any waiter.
7559 dwp
->dw_mask
|= DW_clear_busy
;
7562 * Wakeup any thread waiting for the page to be un-cleaning.
7564 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
7568 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m
), clear_refmod
);
7571 target_offset
+= PAGE_SIZE_64
;
7572 xfer_size
-= PAGE_SIZE
;
7576 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
7577 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
7579 if (dw_count
>= dw_limit
) {
7580 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7586 if (dwp
->dw_mask
& DW_clear_busy
) {
7587 m
->vmp_busy
= FALSE
;
7590 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
7597 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
7600 if (fast_path_possible
) {
7601 assert(shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
);
7602 assert(shadow_object
->purgable
!= VM_PURGABLE_EMPTY
);
7604 if (local_queue_count
|| unwired_count
) {
7605 if (local_queue_count
) {
7606 vm_page_t first_target
;
7607 vm_page_queue_head_t
*target_queue
;
7609 if (throttle_page
) {
7610 target_queue
= &vm_page_queue_throttled
;
7612 if (flags
& UPL_COMMIT_INACTIVATE
) {
7613 if (shadow_object
->internal
) {
7614 target_queue
= &vm_page_queue_anonymous
;
7616 target_queue
= &vm_page_queue_inactive
;
7619 target_queue
= &vm_page_queue_active
;
7623 * Transfer the entire local queue to a regular LRU page queues.
7625 vm_page_lockspin_queues();
7627 first_target
= (vm_page_t
) vm_page_queue_first(target_queue
);
7629 if (vm_page_queue_empty(target_queue
)) {
7630 target_queue
->prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7632 first_target
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7635 target_queue
->next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
7636 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue
);
7637 last_local
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target
);
7640 * Adjust the global page counts.
7642 if (throttle_page
) {
7643 vm_page_throttled_count
+= local_queue_count
;
7645 if (flags
& UPL_COMMIT_INACTIVATE
) {
7646 if (shadow_object
->internal
) {
7647 vm_page_anonymous_count
+= local_queue_count
;
7649 vm_page_inactive_count
+= local_queue_count
;
7651 token_new_pagecount
+= local_queue_count
;
7653 vm_page_active_count
+= local_queue_count
;
7656 if (shadow_object
->internal
) {
7657 vm_page_pageable_internal_count
+= local_queue_count
;
7659 vm_page_pageable_external_count
+= local_queue_count
;
7663 vm_page_lockspin_queues();
7665 if (unwired_count
) {
7666 vm_page_wire_count
-= unwired_count
;
7667 VM_CHECK_MEMORYSTATUS
;
7669 vm_page_unlock_queues();
7671 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object
, -unwired_count
);
7676 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7678 } else if (upl
->flags
& UPL_LITE
) {
7684 if (!fast_path_full_commit
) {
7685 pg_num
= upl
->size
/ PAGE_SIZE
;
7686 pg_num
= (pg_num
+ 31) >> 5;
7688 for (i
= 0; i
< pg_num
; i
++) {
7689 if (lite_list
[i
] != 0) {
7696 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
7700 if (occupied
== 0) {
7702 * If this UPL element belongs to a Vector UPL and is
7703 * empty, then this is the right function to deallocate
7704 * it. So go ahead set the *empty variable. The flag
7705 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7706 * should be considered relevant for the Vector UPL and not
7707 * the internal UPLs.
7709 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
7713 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7715 * this is not a paging object
7716 * so we need to drop the paging reference
7717 * that was taken when we created the UPL
7718 * against this object
7720 vm_object_activity_end(shadow_object
);
7721 vm_object_collapse(shadow_object
, 0, TRUE
);
7724 * we dontated the paging reference to
7725 * the map object... vm_pageout_object_terminate
7726 * will drop this reference
7730 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object
, shadow_object
->wire_tag
);
7731 vm_object_unlock(shadow_object
);
7732 if (object
!= shadow_object
) {
7733 vm_object_unlock(object
);
7740 * If we completed our operations on an UPL that is
7741 * part of a Vectored UPL and if empty is TRUE, then
7742 * we should go ahead and deallocate this UPL element.
7743 * Then we check if this was the last of the UPL elements
7744 * within that Vectored UPL. If so, set empty to TRUE
7745 * so that in ubc_upl_commit_range or ubc_upl_commit, we
7746 * can go ahead and deallocate the Vector UPL too.
7748 if (*empty
== TRUE
) {
7749 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
7750 upl_deallocate(upl
);
7752 goto process_upl_to_commit
;
7754 if (pgpgout_count
) {
7755 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
7758 return KERN_SUCCESS
;
7764 upl_offset_t offset
,
7769 upl_page_info_t
*user_page_list
= NULL
;
7770 upl_size_t xfer_size
, subupl_size
= size
;
7771 vm_object_t shadow_object
;
7773 vm_object_offset_t target_offset
;
7774 upl_offset_t subupl_offset
= offset
;
7776 wpl_array_t lite_list
;
7778 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
7779 struct vm_page_delayed_work
*dwp
;
7782 int isVectorUPL
= 0;
7783 upl_t vector_upl
= NULL
;
7787 if (upl
== UPL_NULL
) {
7788 return KERN_INVALID_ARGUMENT
;
7791 if ((upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
)) {
7792 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
7795 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
7797 upl_lock(vector_upl
);
7802 process_upl_to_abort
:
7805 offset
= subupl_offset
;
7807 upl_unlock(vector_upl
);
7808 return KERN_SUCCESS
;
7810 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
7812 upl_unlock(vector_upl
);
7813 return KERN_FAILURE
;
7815 subupl_size
-= size
;
7816 subupl_offset
+= size
;
7822 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
7823 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
7825 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
7826 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
7827 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
7829 upl
->upl_commit_index
++;
7832 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7834 } else if ((offset
+ size
) <= upl
->size
) {
7840 upl_unlock(vector_upl
);
7843 return KERN_FAILURE
;
7845 if (upl
->flags
& UPL_INTERNAL
) {
7846 lite_list
= (wpl_array_t
)
7847 ((((uintptr_t)upl
) + sizeof(struct upl
))
7848 + ((upl
->size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
7850 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
7852 lite_list
= (wpl_array_t
)
7853 (((uintptr_t)upl
) + sizeof(struct upl
));
7855 object
= upl
->map_object
;
7857 if (upl
->flags
& UPL_SHADOWED
) {
7858 vm_object_lock(object
);
7859 shadow_object
= object
->shadow
;
7861 shadow_object
= object
;
7864 entry
= offset
/ PAGE_SIZE
;
7865 target_offset
= (vm_object_offset_t
)offset
;
7867 assert(!(target_offset
& PAGE_MASK
));
7868 assert(!(xfer_size
& PAGE_MASK
));
7870 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
7871 vm_object_lock_shared(shadow_object
);
7873 vm_object_lock(shadow_object
);
7876 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7877 assert(shadow_object
->blocked_access
);
7878 shadow_object
->blocked_access
= FALSE
;
7879 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
7884 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
7886 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
)) {
7887 panic("upl_abort_range: kernel_object being DUMPED");
7892 unsigned int pg_num
;
7895 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
7896 assert(pg_num
== target_offset
/ PAGE_SIZE
);
7900 if (user_page_list
) {
7901 needed
= user_page_list
[pg_num
].needed
;
7907 if (upl
->flags
& UPL_LITE
) {
7908 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
7909 lite_list
[pg_num
>> 5] &= ~(1U << (pg_num
& 31));
7911 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7912 m
= vm_page_lookup(shadow_object
, target_offset
+
7913 (upl
->offset
- shadow_object
->paging_offset
));
7917 if (upl
->flags
& UPL_SHADOWED
) {
7918 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
7919 t
->vmp_free_when_done
= FALSE
;
7923 if (m
== VM_PAGE_NULL
) {
7924 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
7928 if ((upl
->flags
& UPL_KERNEL_OBJECT
)) {
7929 goto abort_next_page
;
7932 if (m
!= VM_PAGE_NULL
) {
7933 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
7935 if (m
->vmp_absent
) {
7936 boolean_t must_free
= TRUE
;
7939 * COPYOUT = FALSE case
7940 * check for error conditions which must
7941 * be passed back to the pages customer
7943 if (error
& UPL_ABORT_RESTART
) {
7944 m
->vmp_restart
= TRUE
;
7945 m
->vmp_absent
= FALSE
;
7946 m
->vmp_unusual
= TRUE
;
7948 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
7949 m
->vmp_restart
= FALSE
;
7950 m
->vmp_unusual
= TRUE
;
7952 } else if (error
& UPL_ABORT_ERROR
) {
7953 m
->vmp_restart
= FALSE
;
7954 m
->vmp_absent
= FALSE
;
7955 m
->vmp_error
= TRUE
;
7956 m
->vmp_unusual
= TRUE
;
7959 if (m
->vmp_clustered
&& needed
== FALSE
) {
7961 * This page was a part of a speculative
7962 * read-ahead initiated by the kernel
7963 * itself. No one is expecting this
7964 * page and no one will clean up its
7965 * error state if it ever becomes valid
7967 * We have to free it here.
7971 m
->vmp_cleaning
= FALSE
;
7973 if (m
->vmp_overwriting
&& !m
->vmp_busy
) {
7975 * this shouldn't happen since
7976 * this is an 'absent' page, but
7977 * it doesn't hurt to check for
7978 * the 'alternate' method of
7979 * stabilizing the page...
7980 * we will mark 'busy' to be cleared
7981 * in the following code which will
7982 * take care of the primary stabilzation
7983 * method (i.e. setting 'busy' to TRUE)
7985 dwp
->dw_mask
|= DW_vm_page_unwire
;
7987 m
->vmp_overwriting
= FALSE
;
7989 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7991 if (must_free
== TRUE
) {
7992 dwp
->dw_mask
|= DW_vm_page_free
;
7994 dwp
->dw_mask
|= DW_vm_page_activate
;
7998 * Handle the trusted pager throttle.
8000 if (m
->vmp_laundry
) {
8001 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
8004 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
8006 * We blocked access to the pages in this UPL.
8007 * Clear the "busy" bit and wake up any waiter
8010 dwp
->dw_mask
|= DW_clear_busy
;
8012 if (m
->vmp_overwriting
) {
8014 dwp
->dw_mask
|= DW_clear_busy
;
8017 * deal with the 'alternate' method
8018 * of stabilizing the page...
8019 * we will either free the page
8020 * or mark 'busy' to be cleared
8021 * in the following code which will
8022 * take care of the primary stabilzation
8023 * method (i.e. setting 'busy' to TRUE)
8025 dwp
->dw_mask
|= DW_vm_page_unwire
;
8027 m
->vmp_overwriting
= FALSE
;
8029 m
->vmp_free_when_done
= FALSE
;
8030 m
->vmp_cleaning
= FALSE
;
8032 if (error
& UPL_ABORT_DUMP_PAGES
) {
8033 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
8035 dwp
->dw_mask
|= DW_vm_page_free
;
8037 if (!(dwp
->dw_mask
& DW_vm_page_unwire
)) {
8038 if (error
& UPL_ABORT_REFERENCE
) {
8040 * we've been told to explictly
8041 * reference this page... for
8042 * file I/O, this is done by
8043 * implementing an LRU on the inactive q
8045 dwp
->dw_mask
|= DW_vm_page_lru
;
8046 } else if (!VM_PAGE_PAGEABLE(m
)) {
8047 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
8050 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
8055 target_offset
+= PAGE_SIZE_64
;
8056 xfer_size
-= PAGE_SIZE
;
8060 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
8061 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
8063 if (dw_count
>= dw_limit
) {
8064 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
8070 if (dwp
->dw_mask
& DW_clear_busy
) {
8071 m
->vmp_busy
= FALSE
;
8074 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
8081 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, &dw_array
[0], dw_count
);
8086 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
8088 } else if (upl
->flags
& UPL_LITE
) {
8092 pg_num
= upl
->size
/ PAGE_SIZE
;
8093 pg_num
= (pg_num
+ 31) >> 5;
8096 for (i
= 0; i
< pg_num
; i
++) {
8097 if (lite_list
[i
] != 0) {
8103 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
8107 if (occupied
== 0) {
8109 * If this UPL element belongs to a Vector UPL and is
8110 * empty, then this is the right function to deallocate
8111 * it. So go ahead set the *empty variable. The flag
8112 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8113 * should be considered relevant for the Vector UPL and
8114 * not the internal UPLs.
8116 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
8120 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
8122 * this is not a paging object
8123 * so we need to drop the paging reference
8124 * that was taken when we created the UPL
8125 * against this object
8127 vm_object_activity_end(shadow_object
);
8128 vm_object_collapse(shadow_object
, 0, TRUE
);
8131 * we dontated the paging reference to
8132 * the map object... vm_pageout_object_terminate
8133 * will drop this reference
8137 vm_object_unlock(shadow_object
);
8138 if (object
!= shadow_object
) {
8139 vm_object_unlock(object
);
8146 * If we completed our operations on an UPL that is
8147 * part of a Vectored UPL and if empty is TRUE, then
8148 * we should go ahead and deallocate this UPL element.
8149 * Then we check if this was the last of the UPL elements
8150 * within that Vectored UPL. If so, set empty to TRUE
8151 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8152 * can go ahead and deallocate the Vector UPL too.
8154 if (*empty
== TRUE
) {
8155 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
8156 upl_deallocate(upl
);
8158 goto process_upl_to_abort
;
8161 return KERN_SUCCESS
;
8172 if (upl
== UPL_NULL
) {
8173 return KERN_INVALID_ARGUMENT
;
8176 return upl_abort_range(upl
, 0, upl
->size
, error
, &empty
);
8180 /* an option on commit should be wire */
8184 upl_page_info_t
*page_list
,
8185 mach_msg_type_number_t count
)
8189 if (upl
== UPL_NULL
) {
8190 return KERN_INVALID_ARGUMENT
;
8193 return upl_commit_range(upl
, 0, upl
->size
, 0, page_list
, count
, &empty
);
8204 vm_page_t m
, nxt_page
= VM_PAGE_NULL
;
8206 int wired_count
= 0;
8209 panic("iopl_valid_data: NULL upl");
8211 if (vector_upl_is_valid(upl
)) {
8212 panic("iopl_valid_data: vector upl");
8214 if ((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_SHADOWED
| UPL_ACCESS_BLOCKED
| UPL_IO_WIRE
| UPL_INTERNAL
)) != UPL_IO_WIRE
) {
8215 panic("iopl_valid_data: unsupported upl, flags = %x", upl
->flags
);
8218 object
= upl
->map_object
;
8220 if (object
== kernel_object
|| object
== compressor_object
) {
8221 panic("iopl_valid_data: object == kernel or compressor");
8224 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8225 object
->purgable
== VM_PURGABLE_EMPTY
) {
8226 panic("iopl_valid_data: object %p purgable %d",
8227 object
, object
->purgable
);
8232 vm_object_lock(object
);
8233 VM_OBJECT_WIRED_PAGE_UPDATE_START(object
);
8235 if (object
->vo_size
== size
&& object
->resident_page_count
== (size
/ PAGE_SIZE
)) {
8236 nxt_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8238 offset
= 0 + upl
->offset
- object
->paging_offset
;
8242 if (nxt_page
!= VM_PAGE_NULL
) {
8244 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
8246 m
= vm_page_lookup(object
, offset
);
8247 offset
+= PAGE_SIZE
;
8249 if (m
== VM_PAGE_NULL
) {
8250 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset
);
8254 if (!m
->vmp_absent
) {
8255 panic("iopl_valid_data: busy page w/o absent");
8258 if (m
->vmp_pageq
.next
|| m
->vmp_pageq
.prev
) {
8259 panic("iopl_valid_data: busy+absent page on page queue");
8261 if (m
->vmp_reusable
) {
8262 panic("iopl_valid_data: %p is reusable", m
);
8265 m
->vmp_absent
= FALSE
;
8266 m
->vmp_dirty
= TRUE
;
8267 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8268 assert(m
->vmp_wire_count
== 0);
8269 m
->vmp_wire_count
++;
8270 assert(m
->vmp_wire_count
);
8271 if (m
->vmp_wire_count
== 1) {
8272 m
->vmp_q_state
= VM_PAGE_IS_WIRED
;
8275 panic("iopl_valid_data: %p already wired\n", m
);
8278 PAGE_WAKEUP_DONE(m
);
8283 VM_OBJECT_WIRED_PAGE_COUNT(object
, wired_count
);
8284 assert(object
->resident_page_count
>= object
->wired_page_count
);
8286 /* no need to adjust purgeable accounting for this object: */
8287 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8288 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8290 vm_page_lockspin_queues();
8291 vm_page_wire_count
+= wired_count
;
8292 vm_page_unlock_queues();
8294 VM_OBJECT_WIRED_PAGE_UPDATE_END(object
, tag
);
8295 vm_object_unlock(object
);
8300 vm_object_set_pmap_cache_attr(
8302 upl_page_info_array_t user_page_list
,
8303 unsigned int num_pages
,
8304 boolean_t batch_pmap_op
)
8306 unsigned int cache_attr
= 0;
8308 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
8309 assert(user_page_list
);
8310 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
8311 PMAP_BATCH_SET_CACHE_ATTR(object
, user_page_list
, cache_attr
, num_pages
, batch_pmap_op
);
8316 boolean_t
vm_object_iopl_wire_full(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
);
8317 kern_return_t
vm_object_iopl_wire_empty(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
, vm_object_offset_t
*, int, int*);
8322 vm_object_iopl_wire_full(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
8323 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
)
8328 int delayed_unlock
= 0;
8329 boolean_t retval
= TRUE
;
8332 vm_object_lock_assert_exclusive(object
);
8333 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8334 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8335 assert(object
->pager
== NULL
);
8336 assert(object
->copy
== NULL
);
8337 assert(object
->shadow
== NULL
);
8339 page_count
= object
->resident_page_count
;
8340 dst_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8342 vm_page_lock_queues();
8344 while (page_count
--) {
8345 if (dst_page
->vmp_busy
||
8346 dst_page
->vmp_fictitious
||
8347 dst_page
->vmp_absent
||
8348 dst_page
->vmp_error
||
8349 dst_page
->vmp_cleaning
||
8350 dst_page
->vmp_restart
||
8351 dst_page
->vmp_laundry
) {
8355 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
8359 dst_page
->vmp_reference
= TRUE
;
8361 vm_page_wire(dst_page
, tag
, FALSE
);
8363 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8364 SET_PAGE_DIRTY(dst_page
, FALSE
);
8366 entry
= (unsigned int)(dst_page
->vmp_offset
/ PAGE_SIZE
);
8367 assert(entry
>= 0 && entry
< object
->resident_page_count
);
8368 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
8370 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8372 if (phys_page
> upl
->highest_page
) {
8373 upl
->highest_page
= phys_page
;
8376 if (user_page_list
) {
8377 user_page_list
[entry
].phys_addr
= phys_page
;
8378 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8379 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8380 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
8381 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
8382 user_page_list
[entry
].device
= FALSE
;
8383 user_page_list
[entry
].speculative
= FALSE
;
8384 user_page_list
[entry
].cs_validated
= FALSE
;
8385 user_page_list
[entry
].cs_tainted
= FALSE
;
8386 user_page_list
[entry
].cs_nx
= FALSE
;
8387 user_page_list
[entry
].needed
= FALSE
;
8388 user_page_list
[entry
].mark
= FALSE
;
8390 if (delayed_unlock
++ > 256) {
8392 lck_mtx_yield(&vm_page_queue_lock
);
8394 VM_CHECK_MEMORYSTATUS
;
8396 dst_page
= (vm_page_t
)vm_page_queue_next(&dst_page
->vmp_listq
);
8399 vm_page_unlock_queues();
8401 VM_CHECK_MEMORYSTATUS
;
8408 vm_object_iopl_wire_empty(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
8409 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
, vm_object_offset_t
*dst_offset
,
8410 int page_count
, int* page_grab_count
)
8413 boolean_t no_zero_fill
= FALSE
;
8415 int pages_wired
= 0;
8416 int pages_inserted
= 0;
8418 uint64_t delayed_ledger_update
= 0;
8419 kern_return_t ret
= KERN_SUCCESS
;
8423 vm_object_lock_assert_exclusive(object
);
8424 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8425 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8426 assert(object
->pager
== NULL
);
8427 assert(object
->copy
== NULL
);
8428 assert(object
->shadow
== NULL
);
8430 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
8431 interruptible
= THREAD_ABORTSAFE
;
8433 interruptible
= THREAD_UNINT
;
8436 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
8437 no_zero_fill
= TRUE
;
8441 #if CONFIG_SECLUDED_MEMORY
8442 if (object
->can_grab_secluded
) {
8443 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
8445 #endif /* CONFIG_SECLUDED_MEMORY */
8447 while (page_count
--) {
8448 while ((dst_page
= vm_page_grab_options(grab_options
))
8450 OSAddAtomic(page_count
, &vm_upl_wait_for_pages
);
8452 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
8454 if (vm_page_wait(interruptible
) == FALSE
) {
8458 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8460 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
8462 ret
= MACH_SEND_INTERRUPTED
;
8465 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8467 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
8469 if (no_zero_fill
== FALSE
) {
8470 vm_page_zero_fill(dst_page
);
8472 dst_page
->vmp_absent
= TRUE
;
8475 dst_page
->vmp_reference
= TRUE
;
8477 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8478 SET_PAGE_DIRTY(dst_page
, FALSE
);
8480 if (dst_page
->vmp_absent
== FALSE
) {
8481 assert(dst_page
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8482 assert(dst_page
->vmp_wire_count
== 0);
8483 dst_page
->vmp_wire_count
++;
8484 dst_page
->vmp_q_state
= VM_PAGE_IS_WIRED
;
8485 assert(dst_page
->vmp_wire_count
);
8487 PAGE_WAKEUP_DONE(dst_page
);
8491 vm_page_insert_internal(dst_page
, object
, *dst_offset
, tag
, FALSE
, TRUE
, TRUE
, TRUE
, &delayed_ledger_update
);
8493 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
8495 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8497 if (phys_page
> upl
->highest_page
) {
8498 upl
->highest_page
= phys_page
;
8501 if (user_page_list
) {
8502 user_page_list
[entry
].phys_addr
= phys_page
;
8503 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8504 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8505 user_page_list
[entry
].free_when_done
= FALSE
;
8506 user_page_list
[entry
].precious
= FALSE
;
8507 user_page_list
[entry
].device
= FALSE
;
8508 user_page_list
[entry
].speculative
= FALSE
;
8509 user_page_list
[entry
].cs_validated
= FALSE
;
8510 user_page_list
[entry
].cs_tainted
= FALSE
;
8511 user_page_list
[entry
].cs_nx
= FALSE
;
8512 user_page_list
[entry
].needed
= FALSE
;
8513 user_page_list
[entry
].mark
= FALSE
;
8516 *dst_offset
+= PAGE_SIZE_64
;
8520 vm_page_lockspin_queues();
8521 vm_page_wire_count
+= pages_wired
;
8522 vm_page_unlock_queues();
8524 if (pages_inserted
) {
8525 if (object
->internal
) {
8526 OSAddAtomic(pages_inserted
, &vm_page_internal_count
);
8528 OSAddAtomic(pages_inserted
, &vm_page_external_count
);
8531 if (delayed_ledger_update
) {
8533 int ledger_idx_volatile
;
8534 int ledger_idx_nonvolatile
;
8535 int ledger_idx_volatile_compressed
;
8536 int ledger_idx_nonvolatile_compressed
;
8537 boolean_t do_footprint
;
8539 owner
= VM_OBJECT_OWNER(object
);
8542 vm_object_ledger_tag_ledgers(object
,
8543 &ledger_idx_volatile
,
8544 &ledger_idx_nonvolatile
,
8545 &ledger_idx_volatile_compressed
,
8546 &ledger_idx_nonvolatile_compressed
,
8549 /* more non-volatile bytes */
8550 ledger_credit(owner
->ledger
,
8551 ledger_idx_nonvolatile
,
8552 delayed_ledger_update
);
8554 /* more footprint */
8555 ledger_credit(owner
->ledger
,
8556 task_ledgers
.phys_footprint
,
8557 delayed_ledger_update
);
8561 assert(page_grab_count
);
8562 *page_grab_count
= pages_inserted
;
8570 vm_object_iopl_request(
8572 vm_object_offset_t offset
,
8575 upl_page_info_array_t user_page_list
,
8576 unsigned int *page_list_count
,
8577 upl_control_flags_t cntrl_flags
,
8581 vm_object_offset_t dst_offset
;
8582 upl_size_t xfer_size
;
8585 wpl_array_t lite_list
= NULL
;
8586 int no_zero_fill
= FALSE
;
8587 unsigned int size_in_pages
;
8588 int page_grab_count
= 0;
8592 struct vm_object_fault_info fault_info
= {};
8593 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
8594 struct vm_page_delayed_work
*dwp
;
8598 boolean_t caller_lookup
;
8599 int io_tracking_flag
= 0;
8603 boolean_t set_cache_attr_needed
= FALSE
;
8604 boolean_t free_wired_pages
= FALSE
;
8605 boolean_t fast_path_empty_req
= FALSE
;
8606 boolean_t fast_path_full_req
= FALSE
;
8608 #if DEVELOPMENT || DEBUG
8609 task_t task
= current_task();
8610 #endif /* DEVELOPMENT || DEBUG */
8612 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
8614 * For forward compatibility's sake,
8615 * reject any unknown flag.
8617 return KERN_INVALID_VALUE
;
8619 if (vm_lopage_needed
== FALSE
) {
8620 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
8623 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
8624 if ((cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) {
8625 return KERN_INVALID_VALUE
;
8628 if (object
->phys_contiguous
) {
8629 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8630 return KERN_INVALID_ADDRESS
;
8633 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8634 return KERN_INVALID_ADDRESS
;
8638 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
8639 no_zero_fill
= TRUE
;
8642 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
8643 prot
= VM_PROT_READ
;
8645 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
8648 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
8649 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
8652 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, prot
, 0);
8654 #if CONFIG_IOSCHED || UPL_DEBUG
8655 if ((object
->io_tracking
&& object
!= kernel_object
) || upl_debug_enabled
) {
8656 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
8661 if (object
->io_tracking
) {
8662 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
8663 if (object
!= kernel_object
) {
8664 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
8669 if (object
->phys_contiguous
) {
8675 if (cntrl_flags
& UPL_SET_INTERNAL
) {
8676 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
8678 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
8679 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
8680 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
8682 user_page_list
= NULL
;
8686 upl
= upl_create(UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
8688 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
8693 if (user_page_list
) {
8694 user_page_list
[0].device
= FALSE
;
8698 if (cntrl_flags
& UPL_NOZEROFILLIO
) {
8699 DTRACE_VM4(upl_nozerofillio
,
8700 vm_object_t
, object
,
8701 vm_object_offset_t
, offset
,
8706 upl
->map_object
= object
;
8709 size_in_pages
= size
/ PAGE_SIZE
;
8711 if (object
== kernel_object
&&
8712 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
8713 upl
->flags
|= UPL_KERNEL_OBJECT
;
8715 vm_object_lock(object
);
8717 vm_object_lock_shared(object
);
8720 vm_object_lock(object
);
8721 vm_object_activity_begin(object
);
8724 * paging in progress also protects the paging_offset
8726 upl
->offset
= offset
+ object
->paging_offset
;
8728 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
8730 * The user requested that access to the pages in this UPL
8731 * be blocked until the UPL is commited or aborted.
8733 upl
->flags
|= UPL_ACCESS_BLOCKED
;
8736 #if CONFIG_IOSCHED || UPL_DEBUG
8737 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
8738 vm_object_activity_begin(object
);
8739 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
8743 if (object
->phys_contiguous
) {
8744 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
8745 assert(!object
->blocked_access
);
8746 object
->blocked_access
= TRUE
;
8749 vm_object_unlock(object
);
8752 * don't need any shadow mappings for this one
8753 * since it is already I/O memory
8755 upl
->flags
|= UPL_DEVICE_MEMORY
;
8757 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1) >> PAGE_SHIFT
);
8759 if (user_page_list
) {
8760 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
) >> PAGE_SHIFT
);
8761 user_page_list
[0].device
= TRUE
;
8763 if (page_list_count
!= NULL
) {
8764 if (upl
->flags
& UPL_INTERNAL
) {
8765 *page_list_count
= 0;
8767 *page_list_count
= 1;
8771 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
8772 #if DEVELOPMENT || DEBUG
8774 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
8776 #endif /* DEVELOPMENT || DEBUG */
8777 return KERN_SUCCESS
;
8779 if (object
!= kernel_object
&& object
!= compressor_object
) {
8781 * Protect user space from future COW operations
8783 #if VM_OBJECT_TRACKING_OP_TRUESHARE
8784 if (!object
->true_share
&&
8785 vm_object_tracking_inited
) {
8786 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
8789 num
= OSBacktrace(bt
,
8790 VM_OBJECT_TRACKING_BTDEPTH
);
8791 btlog_add_entry(vm_object_tracking_btlog
,
8793 VM_OBJECT_TRACKING_OP_TRUESHARE
,
8797 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
8799 vm_object_lock_assert_exclusive(object
);
8800 object
->true_share
= TRUE
;
8802 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
8803 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
8807 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
8808 object
->copy
!= VM_OBJECT_NULL
) {
8810 * Honor copy-on-write obligations
8812 * The caller is gathering these pages and
8813 * might modify their contents. We need to
8814 * make sure that the copy object has its own
8815 * private copies of these pages before we let
8816 * the caller modify them.
8818 * NOTE: someone else could map the original object
8819 * after we've done this copy-on-write here, and they
8820 * could then see an inconsistent picture of the memory
8821 * while it's being modified via the UPL. To prevent this,
8822 * we would have to block access to these pages until the
8823 * UPL is released. We could use the UPL_BLOCK_ACCESS
8824 * code path for that...
8826 vm_object_update(object
,
8831 FALSE
, /* should_return */
8832 MEMORY_OBJECT_COPY_SYNC
,
8834 VM_PAGEOUT_DEBUG(iopl_cow
, 1);
8835 VM_PAGEOUT_DEBUG(iopl_cow_pages
, (size
>> PAGE_SHIFT
));
8837 if (!(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
)) &&
8838 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
8839 object
->purgable
!= VM_PURGABLE_EMPTY
&&
8840 object
->copy
== NULL
&&
8841 size
== object
->vo_size
&&
8843 object
->shadow
== NULL
&&
8844 object
->pager
== NULL
) {
8845 if (object
->resident_page_count
== size_in_pages
) {
8846 assert(object
!= compressor_object
);
8847 assert(object
!= kernel_object
);
8848 fast_path_full_req
= TRUE
;
8849 } else if (object
->resident_page_count
== 0) {
8850 assert(object
!= compressor_object
);
8851 assert(object
!= kernel_object
);
8852 fast_path_empty_req
= TRUE
;
8853 set_cache_attr_needed
= TRUE
;
8857 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
8858 interruptible
= THREAD_ABORTSAFE
;
8860 interruptible
= THREAD_UNINT
;
8866 dst_offset
= offset
;
8869 if (fast_path_full_req
) {
8870 if (vm_object_iopl_wire_full(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
) == TRUE
) {
8874 * we couldn't complete the processing of this request on the fast path
8875 * so fall through to the slow path and finish up
8877 } else if (fast_path_empty_req
) {
8878 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
8879 ret
= KERN_MEMORY_ERROR
;
8882 ret
= vm_object_iopl_wire_empty(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
, &dst_offset
, size_in_pages
, &page_grab_count
);
8885 free_wired_pages
= TRUE
;
8891 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
8892 fault_info
.lo_offset
= offset
;
8893 fault_info
.hi_offset
= offset
+ xfer_size
;
8894 fault_info
.mark_zf_absent
= TRUE
;
8895 fault_info
.interruptible
= interruptible
;
8896 fault_info
.batch_pmap_op
= TRUE
;
8899 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
8902 vm_fault_return_t result
;
8906 if (fast_path_full_req
) {
8908 * if we get here, it means that we ran into a page
8909 * state we couldn't handle in the fast path and
8910 * bailed out to the slow path... since the order
8911 * we look at pages is different between the 2 paths,
8912 * the following check is needed to determine whether
8913 * this page was already processed in the fast path
8915 if (lite_list
[entry
>> 5] & (1 << (entry
& 31))) {
8919 dst_page
= vm_page_lookup(object
, dst_offset
);
8921 if (dst_page
== VM_PAGE_NULL
||
8922 dst_page
->vmp_busy
||
8923 dst_page
->vmp_error
||
8924 dst_page
->vmp_restart
||
8925 dst_page
->vmp_absent
||
8926 dst_page
->vmp_fictitious
) {
8927 if (object
== kernel_object
) {
8928 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
8930 if (object
== compressor_object
) {
8931 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
8934 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
8935 ret
= KERN_MEMORY_ERROR
;
8938 set_cache_attr_needed
= TRUE
;
8941 * We just looked up the page and the result remains valid
8942 * until the object lock is release, so send it to
8943 * vm_fault_page() (as "dst_page"), to avoid having to
8944 * look it up again there.
8946 caller_lookup
= TRUE
;
8950 kern_return_t error_code
;
8952 fault_info
.cluster_size
= xfer_size
;
8954 vm_object_paging_begin(object
);
8956 result
= vm_fault_page(object
, dst_offset
,
8957 prot
| VM_PROT_WRITE
, FALSE
,
8959 &prot
, &dst_page
, &top_page
,
8961 &error_code
, no_zero_fill
,
8962 FALSE
, &fault_info
);
8964 /* our lookup is no longer valid at this point */
8965 caller_lookup
= FALSE
;
8968 case VM_FAULT_SUCCESS
:
8971 if (!dst_page
->vmp_absent
) {
8972 PAGE_WAKEUP_DONE(dst_page
);
8975 * we only get back an absent page if we
8976 * requested that it not be zero-filled
8977 * because we are about to fill it via I/O
8979 * absent pages should be left BUSY
8980 * to prevent them from being faulted
8981 * into an address space before we've
8982 * had a chance to complete the I/O on
8983 * them since they may contain info that
8984 * shouldn't be seen by the faulting task
8988 * Release paging references and
8989 * top-level placeholder page, if any.
8991 if (top_page
!= VM_PAGE_NULL
) {
8992 vm_object_t local_object
;
8994 local_object
= VM_PAGE_OBJECT(top_page
);
8997 * comparing 2 packed pointers
8999 if (top_page
->vmp_object
!= dst_page
->vmp_object
) {
9000 vm_object_lock(local_object
);
9001 VM_PAGE_FREE(top_page
);
9002 vm_object_paging_end(local_object
);
9003 vm_object_unlock(local_object
);
9005 VM_PAGE_FREE(top_page
);
9006 vm_object_paging_end(local_object
);
9009 vm_object_paging_end(object
);
9012 case VM_FAULT_RETRY
:
9013 vm_object_lock(object
);
9016 case VM_FAULT_MEMORY_SHORTAGE
:
9017 OSAddAtomic((size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9019 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
9021 if (vm_page_wait(interruptible
)) {
9022 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9024 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
9025 vm_object_lock(object
);
9029 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9031 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
9035 case VM_FAULT_INTERRUPTED
:
9036 error_code
= MACH_SEND_INTERRUPTED
;
9037 case VM_FAULT_MEMORY_ERROR
:
9039 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
9041 vm_object_lock(object
);
9044 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
9045 /* success but no page: fail */
9046 vm_object_paging_end(object
);
9047 vm_object_unlock(object
);
9051 panic("vm_object_iopl_request: unexpected error"
9052 " 0x%x from vm_fault_page()\n", result
);
9054 } while (result
!= VM_FAULT_SUCCESS
);
9056 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
9058 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
9059 goto record_phys_addr
;
9062 if (dst_page
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
9063 dst_page
->vmp_busy
= TRUE
;
9064 goto record_phys_addr
;
9067 if (dst_page
->vmp_cleaning
) {
9069 * Someone else is cleaning this page in place.
9070 * In theory, we should be able to proceed and use this
9071 * page but they'll probably end up clearing the "busy"
9072 * bit on it in upl_commit_range() but they didn't set
9073 * it, so they would clear our "busy" bit and open
9074 * us to race conditions.
9075 * We'd better wait for the cleaning to complete and
9078 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning
, 1);
9079 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
9082 if (dst_page
->vmp_laundry
) {
9083 vm_pageout_steal_laundry(dst_page
, FALSE
);
9086 if ((cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
9087 phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
)) {
9092 * support devices that can't DMA above 32 bits
9093 * by substituting pages from a pool of low address
9094 * memory for any pages we find above the 4G mark
9095 * can't substitute if the page is already wired because
9096 * we don't know whether that physical address has been
9097 * handed out to some other 64 bit capable DMA device to use
9099 if (VM_PAGE_WIRED(dst_page
)) {
9100 ret
= KERN_PROTECTION_FAILURE
;
9103 low_page
= vm_page_grablo();
9105 if (low_page
== VM_PAGE_NULL
) {
9106 ret
= KERN_RESOURCE_SHORTAGE
;
9110 * from here until the vm_page_replace completes
9111 * we musn't drop the object lock... we don't
9112 * want anyone refaulting this page in and using
9113 * it after we disconnect it... we want the fault
9114 * to find the new page being substituted.
9116 if (dst_page
->vmp_pmapped
) {
9117 refmod
= pmap_disconnect(phys_page
);
9122 if (!dst_page
->vmp_absent
) {
9123 vm_page_copy(dst_page
, low_page
);
9126 low_page
->vmp_reference
= dst_page
->vmp_reference
;
9127 low_page
->vmp_dirty
= dst_page
->vmp_dirty
;
9128 low_page
->vmp_absent
= dst_page
->vmp_absent
;
9130 if (refmod
& VM_MEM_REFERENCED
) {
9131 low_page
->vmp_reference
= TRUE
;
9133 if (refmod
& VM_MEM_MODIFIED
) {
9134 SET_PAGE_DIRTY(low_page
, FALSE
);
9137 vm_page_replace(low_page
, object
, dst_offset
);
9139 dst_page
= low_page
;
9141 * vm_page_grablo returned the page marked
9142 * BUSY... we don't need a PAGE_WAKEUP_DONE
9143 * here, because we've never dropped the object lock
9145 if (!dst_page
->vmp_absent
) {
9146 dst_page
->vmp_busy
= FALSE
;
9149 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
9151 if (!dst_page
->vmp_busy
) {
9152 dwp
->dw_mask
|= DW_vm_page_wire
;
9155 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
9157 * Mark the page "busy" to block any future page fault
9158 * on this page in addition to wiring it.
9159 * We'll also remove the mapping
9160 * of all these pages before leaving this routine.
9162 assert(!dst_page
->vmp_fictitious
);
9163 dst_page
->vmp_busy
= TRUE
;
9166 * expect the page to be used
9167 * page queues lock must be held to set 'reference'
9169 dwp
->dw_mask
|= DW_set_reference
;
9171 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
9172 SET_PAGE_DIRTY(dst_page
, TRUE
);
9174 * Page belonging to a code-signed object is about to
9175 * be written. Mark it tainted and disconnect it from
9176 * all pmaps so processes have to fault it back in and
9177 * deal with the tainted bit.
9179 if (object
->code_signed
&& dst_page
->vmp_cs_tainted
== FALSE
) {
9180 dst_page
->vmp_cs_tainted
= TRUE
;
9181 vm_page_iopl_tainted
++;
9182 if (dst_page
->vmp_pmapped
) {
9183 int refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
9184 if (refmod
& VM_MEM_REFERENCED
) {
9185 dst_page
->vmp_reference
= TRUE
;
9190 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
9191 pmap_sync_page_attributes_phys(phys_page
);
9192 dst_page
->vmp_written_by_kernel
= FALSE
;
9196 if (dst_page
->vmp_busy
) {
9197 upl
->flags
|= UPL_HAS_BUSY
;
9200 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
9202 if (phys_page
> upl
->highest_page
) {
9203 upl
->highest_page
= phys_page
;
9206 if (user_page_list
) {
9207 user_page_list
[entry
].phys_addr
= phys_page
;
9208 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
9209 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
9210 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
9211 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
9212 user_page_list
[entry
].device
= FALSE
;
9213 user_page_list
[entry
].needed
= FALSE
;
9214 if (dst_page
->vmp_clustered
== TRUE
) {
9215 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
9217 user_page_list
[entry
].speculative
= FALSE
;
9219 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
9220 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
9221 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
9222 user_page_list
[entry
].mark
= FALSE
;
9224 if (object
!= kernel_object
&& object
!= compressor_object
) {
9226 * someone is explicitly grabbing this page...
9227 * update clustered and speculative state
9230 if (dst_page
->vmp_clustered
) {
9231 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
9236 dst_offset
+= PAGE_SIZE_64
;
9237 xfer_size
-= PAGE_SIZE
;
9240 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
9242 if (dw_count
>= dw_limit
) {
9243 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
9250 assert(entry
== size_in_pages
);
9253 vm_page_do_delayed_work(object
, tag
, &dw_array
[0], dw_count
);
9256 if (user_page_list
&& set_cache_attr_needed
== TRUE
) {
9257 vm_object_set_pmap_cache_attr(object
, user_page_list
, size_in_pages
, TRUE
);
9260 if (page_list_count
!= NULL
) {
9261 if (upl
->flags
& UPL_INTERNAL
) {
9262 *page_list_count
= 0;
9263 } else if (*page_list_count
> size_in_pages
) {
9264 *page_list_count
= size_in_pages
;
9267 vm_object_unlock(object
);
9269 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
9271 * We've marked all the pages "busy" so that future
9272 * page faults will block.
9273 * Now remove the mapping for these pages, so that they
9274 * can't be accessed without causing a page fault.
9276 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
9277 PMAP_NULL
, 0, VM_PROT_NONE
);
9278 assert(!object
->blocked_access
);
9279 object
->blocked_access
= TRUE
;
9282 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
9283 #if DEVELOPMENT || DEBUG
9285 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
9287 #endif /* DEVELOPMENT || DEBUG */
9288 return KERN_SUCCESS
;
9293 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
9294 boolean_t need_unwire
;
9296 dst_page
= vm_page_lookup(object
, offset
);
9298 if (dst_page
== VM_PAGE_NULL
) {
9299 panic("vm_object_iopl_request: Wired page missing. \n");
9303 * if we've already processed this page in an earlier
9304 * dw_do_work, we need to undo the wiring... we will
9305 * leave the dirty and reference bits on if they
9306 * were set, since we don't have a good way of knowing
9307 * what the previous state was and we won't get here
9308 * under any normal circumstances... we will always
9309 * clear BUSY and wakeup any waiters via vm_page_free
9310 * or PAGE_WAKEUP_DONE
9315 if (dw_array
[dw_index
].dw_m
== dst_page
) {
9317 * still in the deferred work list
9318 * which means we haven't yet called
9319 * vm_page_wire on this page
9321 need_unwire
= FALSE
;
9327 vm_page_lock_queues();
9329 if (dst_page
->vmp_absent
|| free_wired_pages
== TRUE
) {
9330 vm_page_free(dst_page
);
9332 need_unwire
= FALSE
;
9334 if (need_unwire
== TRUE
) {
9335 vm_page_unwire(dst_page
, TRUE
);
9338 PAGE_WAKEUP_DONE(dst_page
);
9340 vm_page_unlock_queues();
9342 if (need_unwire
== TRUE
) {
9343 VM_STAT_INCR(reactivations
);
9349 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
9350 vm_object_activity_end(object
);
9351 vm_object_collapse(object
, 0, TRUE
);
9353 vm_object_unlock(object
);
9356 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, ret
, 0, 0);
9357 #if DEVELOPMENT || DEBUG
9359 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
9361 #endif /* DEVELOPMENT || DEBUG */
9370 kern_return_t retval
;
9371 boolean_t upls_locked
;
9372 vm_object_t object1
, object2
;
9374 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
) == UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
9375 return KERN_INVALID_ARGUMENT
;
9378 upls_locked
= FALSE
;
9381 * Since we need to lock both UPLs at the same time,
9382 * avoid deadlocks by always taking locks in the same order.
9391 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
9393 object1
= upl1
->map_object
;
9394 object2
= upl2
->map_object
;
9396 if (upl1
->offset
!= 0 || upl2
->offset
!= 0 ||
9397 upl1
->size
!= upl2
->size
) {
9399 * We deal only with full objects, not subsets.
9400 * That's because we exchange the entire backing store info
9401 * for the objects: pager, resident pages, etc... We can't do
9404 retval
= KERN_INVALID_VALUE
;
9409 * Tranpose the VM objects' backing store.
9411 retval
= vm_object_transpose(object1
, object2
,
9412 (vm_object_size_t
) upl1
->size
);
9414 if (retval
== KERN_SUCCESS
) {
9416 * Make each UPL point to the correct VM object, i.e. the
9417 * object holding the pages that the UPL refers to...
9419 #if CONFIG_IOSCHED || UPL_DEBUG
9420 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
9421 vm_object_lock(object1
);
9422 vm_object_lock(object2
);
9424 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
) {
9425 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
9427 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
) {
9428 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
9431 upl1
->map_object
= object2
;
9432 upl2
->map_object
= object1
;
9434 #if CONFIG_IOSCHED || UPL_DEBUG
9435 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
) {
9436 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
9438 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
) {
9439 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
9441 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
9442 vm_object_unlock(object2
);
9443 vm_object_unlock(object1
);
9455 upls_locked
= FALSE
;
9467 upl_page_info_t
*user_page_list
;
9470 if (!(upl
->flags
& UPL_INTERNAL
) || count
<= 0) {
9474 size_in_pages
= upl
->size
/ PAGE_SIZE
;
9476 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
9478 while (count
-- && index
< size_in_pages
) {
9479 user_page_list
[index
++].needed
= TRUE
;
9485 * Reserve of virtual addresses in the kernel address space.
9486 * We need to map the physical pages in the kernel, so that we
9487 * can call the code-signing or slide routines with a kernel
9488 * virtual address. We keep this pool of pre-allocated kernel
9489 * virtual addresses so that we don't have to scan the kernel's
9490 * virtaul address space each time we need to work with
9493 decl_simple_lock_data(, vm_paging_lock
);
9494 #define VM_PAGING_NUM_PAGES 64
9495 vm_map_offset_t vm_paging_base_address
= 0;
9496 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
9497 int vm_paging_max_index
= 0;
9498 int vm_paging_page_waiter
= 0;
9499 int vm_paging_page_waiter_total
= 0;
9501 unsigned long vm_paging_no_kernel_page
= 0;
9502 unsigned long vm_paging_objects_mapped
= 0;
9503 unsigned long vm_paging_pages_mapped
= 0;
9504 unsigned long vm_paging_objects_mapped_slow
= 0;
9505 unsigned long vm_paging_pages_mapped_slow
= 0;
9508 vm_paging_map_init(void)
9511 vm_map_offset_t page_map_offset
;
9512 vm_map_entry_t map_entry
;
9514 assert(vm_paging_base_address
== 0);
9517 * Initialize our pool of pre-allocated kernel
9518 * virtual addresses.
9520 page_map_offset
= 0;
9521 kr
= vm_map_find_space(kernel_map
,
9523 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
9526 VM_MAP_KERNEL_FLAGS_NONE
,
9527 VM_KERN_MEMORY_NONE
,
9529 if (kr
!= KERN_SUCCESS
) {
9530 panic("vm_paging_map_init: kernel_map full\n");
9532 VME_OBJECT_SET(map_entry
, kernel_object
);
9533 VME_OFFSET_SET(map_entry
, page_map_offset
);
9534 map_entry
->protection
= VM_PROT_NONE
;
9535 map_entry
->max_protection
= VM_PROT_NONE
;
9536 map_entry
->permanent
= TRUE
;
9537 vm_object_reference(kernel_object
);
9538 vm_map_unlock(kernel_map
);
9540 assert(vm_paging_base_address
== 0);
9541 vm_paging_base_address
= page_map_offset
;
9545 * vm_paging_map_object:
9546 * Maps part of a VM object's pages in the kernel
9547 * virtual address space, using the pre-allocated
9548 * kernel virtual addresses, if possible.
9550 * The VM object is locked. This lock will get
9551 * dropped and re-acquired though, so the caller
9552 * must make sure the VM object is kept alive
9553 * (by holding a VM map that has a reference
9554 * on it, for example, or taking an extra reference).
9555 * The page should also be kept busy to prevent
9556 * it from being reclaimed.
9559 vm_paging_map_object(
9562 vm_object_offset_t offset
,
9563 vm_prot_t protection
,
9564 boolean_t can_unlock_object
,
9565 vm_map_size_t
*size
, /* IN/OUT */
9566 vm_map_offset_t
*address
, /* OUT */
9567 boolean_t
*need_unmap
) /* OUT */
9570 vm_map_offset_t page_map_offset
;
9571 vm_map_size_t map_size
;
9572 vm_object_offset_t object_offset
;
9575 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
9576 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9577 *address
= (vm_map_offset_t
)
9578 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(page
) << PAGE_SHIFT
);
9579 *need_unmap
= FALSE
;
9580 return KERN_SUCCESS
;
9582 assert(page
->vmp_busy
);
9584 * Use one of the pre-allocated kernel virtual addresses
9585 * and just enter the VM page in the kernel address space
9586 * at that virtual address.
9588 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9591 * Try and find an available kernel virtual address
9592 * from our pre-allocated pool.
9594 page_map_offset
= 0;
9596 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
9597 if (vm_paging_page_inuse
[i
] == FALSE
) {
9599 vm_paging_base_address
+
9604 if (page_map_offset
!= 0) {
9605 /* found a space to map our page ! */
9609 if (can_unlock_object
) {
9611 * If we can afford to unlock the VM object,
9612 * let's take the slow path now...
9617 * We can't afford to unlock the VM object, so
9618 * let's wait for a space to become available...
9620 vm_paging_page_waiter_total
++;
9621 vm_paging_page_waiter
++;
9622 kr
= assert_wait((event_t
)&vm_paging_page_waiter
, THREAD_UNINT
);
9623 if (kr
== THREAD_WAITING
) {
9624 simple_unlock(&vm_paging_lock
);
9625 kr
= thread_block(THREAD_CONTINUE_NULL
);
9626 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9628 vm_paging_page_waiter
--;
9629 /* ... and try again */
9632 if (page_map_offset
!= 0) {
9634 * We found a kernel virtual address;
9635 * map the physical page to that virtual address.
9637 if (i
> vm_paging_max_index
) {
9638 vm_paging_max_index
= i
;
9640 vm_paging_page_inuse
[i
] = TRUE
;
9641 simple_unlock(&vm_paging_lock
);
9643 page
->vmp_pmapped
= TRUE
;
9646 * Keep the VM object locked over the PMAP_ENTER
9647 * and the actual use of the page by the kernel,
9648 * or this pmap mapping might get undone by a
9649 * vm_object_pmap_protect() call...
9651 PMAP_ENTER(kernel_pmap
,
9659 assert(kr
== KERN_SUCCESS
);
9660 vm_paging_objects_mapped
++;
9661 vm_paging_pages_mapped
++;
9662 *address
= page_map_offset
;
9666 kasan_notify_address(page_map_offset
, PAGE_SIZE
);
9669 /* all done and mapped, ready to use ! */
9670 return KERN_SUCCESS
;
9674 * We ran out of pre-allocated kernel virtual
9675 * addresses. Just map the page in the kernel
9676 * the slow and regular way.
9678 vm_paging_no_kernel_page
++;
9679 simple_unlock(&vm_paging_lock
);
9682 if (!can_unlock_object
) {
9685 *need_unmap
= FALSE
;
9686 return KERN_NOT_SUPPORTED
;
9689 object_offset
= vm_object_trunc_page(offset
);
9690 map_size
= vm_map_round_page(*size
,
9691 VM_MAP_PAGE_MASK(kernel_map
));
9694 * Try and map the required range of the object
9698 vm_object_reference_locked(object
); /* for the map entry */
9699 vm_object_unlock(object
);
9701 kr
= vm_map_enter(kernel_map
,
9706 VM_MAP_KERNEL_FLAGS_NONE
,
9707 VM_KERN_MEMORY_NONE
,
9714 if (kr
!= KERN_SUCCESS
) {
9717 *need_unmap
= FALSE
;
9718 vm_object_deallocate(object
); /* for the map entry */
9719 vm_object_lock(object
);
9726 * Enter the mapped pages in the page table now.
9728 vm_object_lock(object
);
9730 * VM object must be kept locked from before PMAP_ENTER()
9731 * until after the kernel is done accessing the page(s).
9732 * Otherwise, the pmap mappings in the kernel could be
9733 * undone by a call to vm_object_pmap_protect().
9736 for (page_map_offset
= 0;
9738 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
9739 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
9740 if (page
== VM_PAGE_NULL
) {
9741 printf("vm_paging_map_object: no page !?");
9742 vm_object_unlock(object
);
9743 kr
= vm_map_remove(kernel_map
, *address
, *size
,
9744 VM_MAP_REMOVE_NO_FLAGS
);
9745 assert(kr
== KERN_SUCCESS
);
9748 *need_unmap
= FALSE
;
9749 vm_object_lock(object
);
9750 return KERN_MEMORY_ERROR
;
9752 page
->vmp_pmapped
= TRUE
;
9754 //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
9755 PMAP_ENTER(kernel_pmap
,
9756 *address
+ page_map_offset
,
9763 assert(kr
== KERN_SUCCESS
);
9765 kasan_notify_address(*address
+ page_map_offset
, PAGE_SIZE
);
9769 vm_paging_objects_mapped_slow
++;
9770 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
9774 return KERN_SUCCESS
;
9778 * vm_paging_unmap_object:
9779 * Unmaps part of a VM object's pages from the kernel
9780 * virtual address space.
9782 * The VM object is locked. This lock will get
9783 * dropped and re-acquired though.
9786 vm_paging_unmap_object(
9788 vm_map_offset_t start
,
9789 vm_map_offset_t end
)
9794 if ((vm_paging_base_address
== 0) ||
9795 (start
< vm_paging_base_address
) ||
9796 (end
> (vm_paging_base_address
9797 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
9799 * We didn't use our pre-allocated pool of
9800 * kernel virtual address. Deallocate the
9803 if (object
!= VM_OBJECT_NULL
) {
9804 vm_object_unlock(object
);
9806 kr
= vm_map_remove(kernel_map
, start
, end
,
9807 VM_MAP_REMOVE_NO_FLAGS
);
9808 if (object
!= VM_OBJECT_NULL
) {
9809 vm_object_lock(object
);
9811 assert(kr
== KERN_SUCCESS
);
9814 * We used a kernel virtual address from our
9815 * pre-allocated pool. Put it back in the pool
9818 assert(end
- start
== PAGE_SIZE
);
9819 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
9820 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
9822 /* undo the pmap mapping */
9823 pmap_remove(kernel_pmap
, start
, end
);
9825 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9826 vm_paging_page_inuse
[i
] = FALSE
;
9827 if (vm_paging_page_waiter
) {
9828 thread_wakeup(&vm_paging_page_waiter
);
9830 simple_unlock(&vm_paging_lock
);
9836 * page->vmp_object must be locked
9839 vm_pageout_steal_laundry(vm_page_t page
, boolean_t queues_locked
)
9841 if (!queues_locked
) {
9842 vm_page_lockspin_queues();
9845 page
->vmp_free_when_done
= FALSE
;
9847 * need to drop the laundry count...
9848 * we may also need to remove it
9849 * from the I/O paging queue...
9850 * vm_pageout_throttle_up handles both cases
9852 * the laundry and pageout_queue flags are cleared...
9854 vm_pageout_throttle_up(page
);
9856 if (!queues_locked
) {
9857 vm_page_unlock_queues();
9862 vector_upl_create(vm_offset_t upl_offset
)
9864 int vector_upl_size
= sizeof(struct _vector_upl
);
9867 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
9869 upl
= upl_create(0, UPL_VECTOR
, 0);
9870 upl
->vector_upl
= vector_upl
;
9871 upl
->offset
= upl_offset
;
9872 vector_upl
->size
= 0;
9873 vector_upl
->offset
= upl_offset
;
9874 vector_upl
->invalid_upls
= 0;
9875 vector_upl
->num_upls
= 0;
9876 vector_upl
->pagelist
= NULL
;
9878 for (i
= 0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
9879 vector_upl
->upl_iostates
[i
].size
= 0;
9880 vector_upl
->upl_iostates
[i
].offset
= 0;
9886 vector_upl_deallocate(upl_t upl
)
9889 vector_upl_t vector_upl
= upl
->vector_upl
;
9891 if (vector_upl
->invalid_upls
!= vector_upl
->num_upls
) {
9892 panic("Deallocating non-empty Vectored UPL\n");
9894 kfree(vector_upl
->pagelist
, (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
9895 vector_upl
->invalid_upls
= 0;
9896 vector_upl
->num_upls
= 0;
9897 vector_upl
->pagelist
= NULL
;
9898 vector_upl
->size
= 0;
9899 vector_upl
->offset
= 0;
9900 kfree(vector_upl
, sizeof(struct _vector_upl
));
9901 vector_upl
= (vector_upl_t
)0xfeedfeed;
9903 panic("vector_upl_deallocate was passed a non-vectored upl\n");
9906 panic("vector_upl_deallocate was passed a NULL upl\n");
9911 vector_upl_is_valid(upl_t upl
)
9913 if (upl
&& ((upl
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
9914 vector_upl_t vector_upl
= upl
->vector_upl
;
9915 if (vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xfeedfeed || vector_upl
== (vector_upl_t
)0xfeedbeef) {
9925 vector_upl_set_subupl(upl_t upl
, upl_t subupl
, uint32_t io_size
)
9927 if (vector_upl_is_valid(upl
)) {
9928 vector_upl_t vector_upl
= upl
->vector_upl
;
9933 if (io_size
< PAGE_SIZE
) {
9934 io_size
= PAGE_SIZE
;
9936 subupl
->vector_upl
= (void*)vector_upl
;
9937 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
9938 vector_upl
->size
+= io_size
;
9939 upl
->size
+= io_size
;
9941 uint32_t i
= 0, invalid_upls
= 0;
9942 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9943 if (vector_upl
->upl_elems
[i
] == subupl
) {
9947 if (i
== vector_upl
->num_upls
) {
9948 panic("Trying to remove sub-upl when none exists");
9951 vector_upl
->upl_elems
[i
] = NULL
;
9952 invalid_upls
= os_atomic_inc(&(vector_upl
)->invalid_upls
,
9954 if (invalid_upls
== vector_upl
->num_upls
) {
9961 panic("vector_upl_set_subupl was passed a NULL upl element\n");
9964 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
9967 panic("vector_upl_set_subupl was passed a NULL upl\n");
9974 vector_upl_set_pagelist(upl_t upl
)
9976 if (vector_upl_is_valid(upl
)) {
9978 vector_upl_t vector_upl
= upl
->vector_upl
;
9981 vm_offset_t pagelist_size
= 0, cur_upl_pagelist_size
= 0;
9983 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
));
9985 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
9986 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * vector_upl
->upl_elems
[i
]->size
/ PAGE_SIZE
;
9987 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
9988 pagelist_size
+= cur_upl_pagelist_size
;
9989 if (vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
) {
9990 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
9993 assert( pagelist_size
== (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
9995 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
9998 panic("vector_upl_set_pagelist was passed a NULL upl\n");
10003 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
10005 if (vector_upl_is_valid(upl
)) {
10006 vector_upl_t vector_upl
= upl
->vector_upl
;
10008 if (index
< vector_upl
->num_upls
) {
10009 return vector_upl
->upl_elems
[index
];
10012 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
10019 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
10021 if (vector_upl_is_valid(upl
)) {
10023 vector_upl_t vector_upl
= upl
->vector_upl
;
10026 upl_t subupl
= NULL
;
10027 vector_upl_iostates_t subupl_state
;
10029 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10030 subupl
= vector_upl
->upl_elems
[i
];
10031 subupl_state
= vector_upl
->upl_iostates
[i
];
10032 if (*upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
10033 /* We could have been passed an offset/size pair that belongs
10034 * to an UPL element that has already been committed/aborted.
10035 * If so, return NULL.
10037 if (subupl
== NULL
) {
10040 if ((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
10041 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
10042 if (*upl_size
> subupl_state
.size
) {
10043 *upl_size
= subupl_state
.size
;
10046 if (*upl_offset
>= subupl_state
.offset
) {
10047 *upl_offset
-= subupl_state
.offset
;
10049 panic("Vector UPL offset miscalculation\n");
10055 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
10062 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
10064 *v_upl_submap
= NULL
;
10066 if (vector_upl_is_valid(upl
)) {
10067 vector_upl_t vector_upl
= upl
->vector_upl
;
10069 *v_upl_submap
= vector_upl
->submap
;
10070 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
10072 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10075 panic("vector_upl_get_submap was passed a null UPL\n");
10080 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
10082 if (vector_upl_is_valid(upl
)) {
10083 vector_upl_t vector_upl
= upl
->vector_upl
;
10085 vector_upl
->submap
= submap
;
10086 vector_upl
->submap_dst_addr
= submap_dst_addr
;
10088 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10091 panic("vector_upl_get_submap was passed a NULL UPL\n");
10096 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
10098 if (vector_upl_is_valid(upl
)) {
10100 vector_upl_t vector_upl
= upl
->vector_upl
;
10103 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10104 if (vector_upl
->upl_elems
[i
] == subupl
) {
10109 if (i
== vector_upl
->num_upls
) {
10110 panic("setting sub-upl iostate when none exists");
10113 vector_upl
->upl_iostates
[i
].offset
= offset
;
10114 if (size
< PAGE_SIZE
) {
10117 vector_upl
->upl_iostates
[i
].size
= size
;
10119 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
10122 panic("vector_upl_set_iostate was passed a NULL UPL\n");
10127 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
10129 if (vector_upl_is_valid(upl
)) {
10131 vector_upl_t vector_upl
= upl
->vector_upl
;
10134 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10135 if (vector_upl
->upl_elems
[i
] == subupl
) {
10140 if (i
== vector_upl
->num_upls
) {
10141 panic("getting sub-upl iostate when none exists");
10144 *offset
= vector_upl
->upl_iostates
[i
].offset
;
10145 *size
= vector_upl
->upl_iostates
[i
].size
;
10147 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
10150 panic("vector_upl_get_iostate was passed a NULL UPL\n");
10155 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
10157 if (vector_upl_is_valid(upl
)) {
10158 vector_upl_t vector_upl
= upl
->vector_upl
;
10160 if (index
< vector_upl
->num_upls
) {
10161 *offset
= vector_upl
->upl_iostates
[index
].offset
;
10162 *size
= vector_upl
->upl_iostates
[index
].size
;
10164 *offset
= *size
= 0;
10167 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
10170 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
10175 upl_get_internal_vectorupl_pagelist(upl_t upl
)
10177 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
10181 upl_get_internal_vectorupl(upl_t upl
)
10183 return upl
->vector_upl
;
10187 upl_get_internal_pagelist_offset(void)
10189 return sizeof(struct upl
);
10198 upl
->flags
|= UPL_CLEAR_DIRTY
;
10200 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
10205 upl_set_referenced(
10211 upl
->ext_ref_count
++;
10213 if (!upl
->ext_ref_count
) {
10214 panic("upl_set_referenced not %p\n", upl
);
10216 upl
->ext_ref_count
--;
10225 vm_offset_t upl_offset
,
10230 if ((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) {
10234 assert(upl
->upl_reprio_info
!= 0);
10235 for (i
= (int)(upl_offset
/ PAGE_SIZE
), j
= 0; j
< io_size
; i
++, j
+= PAGE_SIZE
) {
10236 UPL_SET_REPRIO_INFO(upl
, i
, blkno
, io_size
);
10242 memoryshot(unsigned int event
, unsigned int control
)
10244 if (vm_debug_events
) {
10245 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE
, event
)) | control
,
10246 vm_page_active_count
, vm_page_inactive_count
,
10247 vm_page_free_count
, vm_page_speculative_count
,
10248 vm_page_throttled_count
);
10258 upl_device_page(upl_page_info_t
*upl
)
10260 return UPL_DEVICE_PAGE(upl
);
10263 upl_page_present(upl_page_info_t
*upl
, int index
)
10265 return UPL_PAGE_PRESENT(upl
, index
);
10268 upl_speculative_page(upl_page_info_t
*upl
, int index
)
10270 return UPL_SPECULATIVE_PAGE(upl
, index
);
10273 upl_dirty_page(upl_page_info_t
*upl
, int index
)
10275 return UPL_DIRTY_PAGE(upl
, index
);
10278 upl_valid_page(upl_page_info_t
*upl
, int index
)
10280 return UPL_VALID_PAGE(upl
, index
);
10283 upl_phys_page(upl_page_info_t
*upl
, int index
)
10285 return UPL_PHYS_PAGE(upl
, index
);
10289 upl_page_set_mark(upl_page_info_t
*upl
, int index
, boolean_t v
)
10291 upl
[index
].mark
= v
;
10295 upl_page_get_mark(upl_page_info_t
*upl
, int index
)
10297 return upl
[index
].mark
;
10301 vm_countdirtypages(void)
10313 vm_page_lock_queues();
10314 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
10316 if (m
== (vm_page_t
)0) {
10320 if (m
->vmp_dirty
) {
10323 if (m
->vmp_free_when_done
) {
10326 if (m
->vmp_precious
) {
10330 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10331 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10332 if (m
== (vm_page_t
)0) {
10335 } while (!vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
) m
));
10336 vm_page_unlock_queues();
10338 vm_page_lock_queues();
10339 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
10341 if (m
== (vm_page_t
)0) {
10346 assert(m
->vmp_dirty
);
10347 assert(!m
->vmp_free_when_done
);
10348 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10349 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10350 if (m
== (vm_page_t
)0) {
10353 } while (!vm_page_queue_end(&vm_page_queue_throttled
, (vm_page_queue_entry_t
) m
));
10354 vm_page_unlock_queues();
10356 vm_page_lock_queues();
10357 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
10359 if (m
== (vm_page_t
)0) {
10363 if (m
->vmp_dirty
) {
10366 if (m
->vmp_free_when_done
) {
10369 if (m
->vmp_precious
) {
10373 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10374 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10375 if (m
== (vm_page_t
)0) {
10378 } while (!vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
) m
));
10379 vm_page_unlock_queues();
10381 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
10387 vm_page_lock_queues();
10388 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
10391 if (m
== (vm_page_t
)0) {
10394 if (m
->vmp_dirty
) {
10397 if (m
->vmp_free_when_done
) {
10400 if (m
->vmp_precious
) {
10404 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10405 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10406 if (m
== (vm_page_t
)0) {
10409 } while (!vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
) m
));
10410 vm_page_unlock_queues();
10412 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
10414 #endif /* MACH_BSD */
10419 upl_get_cached_tier(upl_t upl
)
10422 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
10423 return upl
->upl_priority
;
10427 #endif /* CONFIG_IOSCHED */
10431 upl_callout_iodone(upl_t upl
)
10433 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
10436 void (*iodone_func
)(void *, int) = upl_ctx
->io_done
;
10438 assert(upl_ctx
->io_done
);
10440 (*iodone_func
)(upl_ctx
->io_context
, upl_ctx
->io_error
);
10445 upl_set_iodone(upl_t upl
, void *upl_iodone
)
10447 upl
->upl_iodone
= (struct upl_io_completion
*)upl_iodone
;
10451 upl_set_iodone_error(upl_t upl
, int error
)
10453 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
10456 upl_ctx
->io_error
= error
;
10462 upl_get_highest_page(
10465 return upl
->highest_page
;
10476 upl_associated_upl(upl_t upl
)
10478 return upl
->associated_upl
;
10482 upl_set_associated_upl(upl_t upl
, upl_t associated_upl
)
10484 upl
->associated_upl
= associated_upl
;
10488 upl_lookup_vnode(upl_t upl
)
10490 if (!upl
->map_object
->internal
) {
10491 return vnode_pager_lookup_vnode(upl
->map_object
->pager
);
10499 upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
10501 upl
->ubc_alias1
= alias1
;
10502 upl
->ubc_alias2
= alias2
;
10503 return KERN_SUCCESS
;
10506 upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
10509 *al
= upl
->ubc_alias1
;
10512 *al2
= upl
->ubc_alias2
;
10514 return KERN_SUCCESS
;
10516 #endif /* UPL_DEBUG */
10518 #if VM_PRESSURE_EVENTS
10520 * Upward trajectory.
10522 extern boolean_t
vm_compressor_low_on_space(void);
10525 VM_PRESSURE_NORMAL_TO_WARNING(void)
10527 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10528 /* Available pages below our threshold */
10529 if (memorystatus_available_pages
< memorystatus_available_pages_pressure
) {
10530 /* No frozen processes to kill */
10531 if (memorystatus_frozen_count
== 0) {
10532 /* Not enough suspended processes available. */
10533 if (memorystatus_suspended_count
< MEMORYSTATUS_SUSPENDED_THRESHOLD
) {
10540 return (AVAILABLE_NON_COMPRESSED_MEMORY
< VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) ? 1 : 0;
10545 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10547 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10548 /* Available pages below our threshold */
10549 if (memorystatus_available_pages
< memorystatus_available_pages_critical
) {
10554 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY
< ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10559 * Downward trajectory.
10562 VM_PRESSURE_WARNING_TO_NORMAL(void)
10564 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10565 /* Available pages above our threshold */
10566 unsigned int target_threshold
= (unsigned int) (memorystatus_available_pages_pressure
+ ((15 * memorystatus_available_pages_pressure
) / 100));
10567 if (memorystatus_available_pages
> target_threshold
) {
10572 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) / 10)) ? 1 : 0;
10577 VM_PRESSURE_CRITICAL_TO_WARNING(void)
10579 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10580 /* Available pages above our threshold */
10581 unsigned int target_threshold
= (unsigned int)(memorystatus_available_pages_critical
+ ((15 * memorystatus_available_pages_critical
) / 100));
10582 if (memorystatus_available_pages
> target_threshold
) {
10587 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10590 #endif /* VM_PRESSURE_EVENTS */
10594 #define VM_TEST_COLLAPSE_COMPRESSOR 0
10595 #define VM_TEST_WIRE_AND_EXTRACT 0
10596 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
10598 #define VM_TEST_KERNEL_OBJECT_FAULT 0
10599 #endif /* __arm64__ */
10600 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
10602 #if VM_TEST_COLLAPSE_COMPRESSOR
10603 extern boolean_t vm_object_collapse_compressor_allowed
;
10604 #include <IOKit/IOLib.h>
10606 vm_test_collapse_compressor(void)
10608 vm_object_size_t backing_size
, top_size
;
10609 vm_object_t backing_object
, top_object
;
10610 vm_map_offset_t backing_offset
, top_offset
;
10611 unsigned char *backing_address
, *top_address
;
10614 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
10616 /* create backing object */
10617 backing_size
= 15 * PAGE_SIZE
;
10618 backing_object
= vm_object_allocate(backing_size
);
10619 assert(backing_object
!= VM_OBJECT_NULL
);
10620 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
10622 /* map backing object */
10623 backing_offset
= 0;
10624 kr
= vm_map_enter(kernel_map
, &backing_offset
, backing_size
, 0,
10625 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
10626 backing_object
, 0, FALSE
,
10627 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
10628 assert(kr
== KERN_SUCCESS
);
10629 backing_address
= (unsigned char *) backing_offset
;
10630 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10631 "mapped backing object %p at 0x%llx\n",
10632 backing_object
, (uint64_t) backing_offset
);
10633 /* populate with pages to be compressed in backing object */
10634 backing_address
[0x1 * PAGE_SIZE
] = 0xB1;
10635 backing_address
[0x4 * PAGE_SIZE
] = 0xB4;
10636 backing_address
[0x7 * PAGE_SIZE
] = 0xB7;
10637 backing_address
[0xa * PAGE_SIZE
] = 0xBA;
10638 backing_address
[0xd * PAGE_SIZE
] = 0xBD;
10639 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10640 "populated pages to be compressed in "
10641 "backing_object %p\n", backing_object
);
10642 /* compress backing object */
10643 vm_object_pageout(backing_object
);
10644 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
10646 /* wait for all the pages to be gone */
10647 while (*(volatile int *)&backing_object
->resident_page_count
!= 0) {
10650 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
10652 /* populate with pages to be resident in backing object */
10653 backing_address
[0x0 * PAGE_SIZE
] = 0xB0;
10654 backing_address
[0x3 * PAGE_SIZE
] = 0xB3;
10655 backing_address
[0x6 * PAGE_SIZE
] = 0xB6;
10656 backing_address
[0x9 * PAGE_SIZE
] = 0xB9;
10657 backing_address
[0xc * PAGE_SIZE
] = 0xBC;
10658 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10659 "populated pages to be resident in "
10660 "backing_object %p\n", backing_object
);
10661 /* leave the other pages absent */
10662 /* mess with the paging_offset of the backing_object */
10663 assert(backing_object
->paging_offset
== 0);
10664 backing_object
->paging_offset
= 0x3000;
10666 /* create top object */
10667 top_size
= 9 * PAGE_SIZE
;
10668 top_object
= vm_object_allocate(top_size
);
10669 assert(top_object
!= VM_OBJECT_NULL
);
10670 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
10672 /* map top object */
10674 kr
= vm_map_enter(kernel_map
, &top_offset
, top_size
, 0,
10675 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
10676 top_object
, 0, FALSE
,
10677 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
10678 assert(kr
== KERN_SUCCESS
);
10679 top_address
= (unsigned char *) top_offset
;
10680 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10681 "mapped top object %p at 0x%llx\n",
10682 top_object
, (uint64_t) top_offset
);
10683 /* populate with pages to be compressed in top object */
10684 top_address
[0x3 * PAGE_SIZE
] = 0xA3;
10685 top_address
[0x4 * PAGE_SIZE
] = 0xA4;
10686 top_address
[0x5 * PAGE_SIZE
] = 0xA5;
10687 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10688 "populated pages to be compressed in "
10689 "top_object %p\n", top_object
);
10690 /* compress top object */
10691 vm_object_pageout(top_object
);
10692 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
10694 /* wait for all the pages to be gone */
10695 while (top_object
->resident_page_count
!= 0) {
10698 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
10700 /* populate with pages to be resident in top object */
10701 top_address
[0x0 * PAGE_SIZE
] = 0xA0;
10702 top_address
[0x1 * PAGE_SIZE
] = 0xA1;
10703 top_address
[0x2 * PAGE_SIZE
] = 0xA2;
10704 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10705 "populated pages to be resident in "
10706 "top_object %p\n", top_object
);
10707 /* leave the other pages absent */
10709 /* link the 2 objects */
10710 vm_object_reference(backing_object
);
10711 top_object
->shadow
= backing_object
;
10712 top_object
->vo_shadow_offset
= 0x3000;
10713 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
10714 top_object
, backing_object
);
10716 /* unmap backing object */
10717 vm_map_remove(kernel_map
,
10719 backing_offset
+ backing_size
,
10720 VM_MAP_REMOVE_NO_FLAGS
);
10721 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10722 "unmapped backing_object %p [0x%llx:0x%llx]\n",
10724 (uint64_t) backing_offset
,
10725 (uint64_t) (backing_offset
+ backing_size
));
10728 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object
);
10729 vm_object_lock(top_object
);
10730 vm_object_collapse(top_object
, 0, FALSE
);
10731 vm_object_unlock(top_object
);
10732 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object
);
10735 if (top_object
->shadow
!= VM_OBJECT_NULL
) {
10736 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
10737 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10738 if (vm_object_collapse_compressor_allowed
) {
10739 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10742 /* check the contents of the mapping */
10743 unsigned char expect
[9] =
10744 { 0xA0, 0xA1, 0xA2, /* resident in top */
10745 0xA3, 0xA4, 0xA5, /* compressed in top */
10746 0xB9, /* resident in backing + shadow_offset */
10747 0xBD, /* compressed in backing + shadow_offset + paging_offset */
10748 0x00 }; /* absent in both */
10749 unsigned char actual
[9];
10750 unsigned int i
, errors
;
10753 for (i
= 0; i
< sizeof(actual
); i
++) {
10754 actual
[i
] = (unsigned char) top_address
[i
* PAGE_SIZE
];
10755 if (actual
[i
] != expect
[i
]) {
10759 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
10760 "actual [%x %x %x %x %x %x %x %x %x] "
10761 "expect [%x %x %x %x %x %x %x %x %x] "
10763 actual
[0], actual
[1], actual
[2], actual
[3],
10764 actual
[4], actual
[5], actual
[6], actual
[7],
10766 expect
[0], expect
[1], expect
[2], expect
[3],
10767 expect
[4], expect
[5], expect
[6], expect
[7],
10771 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
10773 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
10777 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
10778 #define vm_test_collapse_compressor()
10779 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
10781 #if VM_TEST_WIRE_AND_EXTRACT
10782 extern ledger_template_t task_ledger_template
;
10783 #include <mach/mach_vm.h>
10784 extern ppnum_t
vm_map_get_phys_page(vm_map_t map
,
10785 vm_offset_t offset
);
10787 vm_test_wire_and_extract(void)
10790 vm_map_t user_map
, wire_map
;
10791 mach_vm_address_t user_addr
, wire_addr
;
10792 mach_vm_size_t user_size
, wire_size
;
10793 mach_vm_offset_t cur_offset
;
10794 vm_prot_t cur_prot
, max_prot
;
10795 ppnum_t user_ppnum
, wire_ppnum
;
10798 ledger
= ledger_instantiate(task_ledger_template
,
10799 LEDGER_CREATE_ACTIVE_ENTRIES
);
10800 user_map
= vm_map_create(pmap_create_options(ledger
, 0, PMAP_CREATE_64BIT
),
10804 wire_map
= vm_map_create(NULL
,
10809 user_size
= 0x10000;
10810 kr
= mach_vm_allocate(user_map
,
10813 VM_FLAGS_ANYWHERE
);
10814 assert(kr
== KERN_SUCCESS
);
10816 wire_size
= user_size
;
10817 kr
= mach_vm_remap(wire_map
,
10828 assert(kr
== KERN_SUCCESS
);
10829 for (cur_offset
= 0;
10830 cur_offset
< wire_size
;
10831 cur_offset
+= PAGE_SIZE
) {
10832 kr
= vm_map_wire_and_extract(wire_map
,
10833 wire_addr
+ cur_offset
,
10834 VM_PROT_DEFAULT
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
10837 assert(kr
== KERN_SUCCESS
);
10838 user_ppnum
= vm_map_get_phys_page(user_map
,
10839 user_addr
+ cur_offset
);
10840 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
10841 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10843 user_map
, user_addr
+ cur_offset
, user_ppnum
,
10844 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
10845 if (kr
!= KERN_SUCCESS
||
10847 wire_ppnum
!= user_ppnum
) {
10848 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10851 cur_offset
-= PAGE_SIZE
;
10852 kr
= vm_map_wire_and_extract(wire_map
,
10853 wire_addr
+ cur_offset
,
10857 assert(kr
== KERN_SUCCESS
);
10858 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
10859 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
10861 user_map
, user_addr
+ cur_offset
, user_ppnum
,
10862 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
10863 if (kr
!= KERN_SUCCESS
||
10865 wire_ppnum
!= user_ppnum
) {
10866 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
10869 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
10871 #else /* VM_TEST_WIRE_AND_EXTRACT */
10872 #define vm_test_wire_and_extract()
10873 #endif /* VM_TEST_WIRE_AND_EXTRACT */
10875 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
10877 vm_test_page_wire_overflow_panic(void)
10879 vm_object_t object
;
10882 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
10884 object
= vm_object_allocate(PAGE_SIZE
);
10885 vm_object_lock(object
);
10886 page
= vm_page_alloc(object
, 0x0);
10887 vm_page_lock_queues();
10889 vm_page_wire(page
, 1, FALSE
);
10890 } while (page
->wire_count
!= 0);
10891 vm_page_unlock_queues();
10892 vm_object_unlock(object
);
10893 panic("FBDP(%p,%p): wire_count overflow not detected\n",
10896 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10897 #define vm_test_page_wire_overflow_panic()
10898 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
10900 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
10901 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
10903 vm_test_kernel_object_fault(void)
10907 uintptr_t frameb
[2];
10910 kr
= kernel_memory_allocate(kernel_map
, &stack
,
10911 kernel_stack_size
+ (2 * PAGE_SIZE
),
10913 (KMA_KSTACK
| KMA_KOBJECT
|
10914 KMA_GUARD_FIRST
| KMA_GUARD_LAST
),
10915 VM_KERN_MEMORY_STACK
);
10916 if (kr
!= KERN_SUCCESS
) {
10917 panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr
);
10919 ret
= copyinframe((uintptr_t)stack
, (char *)frameb
, TRUE
);
10921 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
10923 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
10925 vm_map_remove(kernel_map
,
10927 stack
+ kernel_stack_size
+ (2 * PAGE_SIZE
),
10928 VM_MAP_REMOVE_KUNWIRE
);
10931 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10932 #define vm_test_kernel_object_fault()
10933 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
10935 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
10937 vm_test_device_pager_transpose(void)
10939 memory_object_t device_pager
;
10940 vm_object_t anon_object
, device_object
;
10942 vm_map_offset_t device_mapping
;
10945 size
= 3 * PAGE_SIZE
;
10946 anon_object
= vm_object_allocate(size
);
10947 assert(anon_object
!= VM_OBJECT_NULL
);
10948 device_pager
= device_pager_setup(NULL
, 0, size
, 0);
10949 assert(device_pager
!= NULL
);
10950 device_object
= memory_object_to_vm_object(device_pager
);
10951 assert(device_object
!= VM_OBJECT_NULL
);
10954 * Can't actually map this, since another thread might do a
10955 * vm_map_enter() that gets coalesced into this object, which
10956 * would cause the test to fail.
10958 vm_map_offset_t anon_mapping
= 0;
10959 kr
= vm_map_enter(kernel_map
, &anon_mapping
, size
, 0,
10960 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
10961 anon_object
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
10962 VM_INHERIT_DEFAULT
);
10963 assert(kr
== KERN_SUCCESS
);
10965 device_mapping
= 0;
10966 kr
= vm_map_enter_mem_object(kernel_map
, &device_mapping
, size
, 0,
10968 VM_MAP_KERNEL_FLAGS_NONE
,
10969 VM_KERN_MEMORY_NONE
,
10970 (void *)device_pager
, 0, FALSE
,
10971 VM_PROT_DEFAULT
, VM_PROT_ALL
,
10972 VM_INHERIT_DEFAULT
);
10973 assert(kr
== KERN_SUCCESS
);
10974 memory_object_deallocate(device_pager
);
10976 vm_object_lock(anon_object
);
10977 vm_object_activity_begin(anon_object
);
10978 anon_object
->blocked_access
= TRUE
;
10979 vm_object_unlock(anon_object
);
10980 vm_object_lock(device_object
);
10981 vm_object_activity_begin(device_object
);
10982 device_object
->blocked_access
= TRUE
;
10983 vm_object_unlock(device_object
);
10985 assert(anon_object
->ref_count
== 1);
10986 assert(!anon_object
->named
);
10987 assert(device_object
->ref_count
== 2);
10988 assert(device_object
->named
);
10990 kr
= vm_object_transpose(device_object
, anon_object
, size
);
10991 assert(kr
== KERN_SUCCESS
);
10993 vm_object_lock(anon_object
);
10994 vm_object_activity_end(anon_object
);
10995 anon_object
->blocked_access
= FALSE
;
10996 vm_object_unlock(anon_object
);
10997 vm_object_lock(device_object
);
10998 vm_object_activity_end(device_object
);
10999 device_object
->blocked_access
= FALSE
;
11000 vm_object_unlock(device_object
);
11002 assert(anon_object
->ref_count
== 2);
11003 assert(anon_object
->named
);
11005 kr
= vm_deallocate(kernel_map
, anon_mapping
, size
);
11006 assert(kr
== KERN_SUCCESS
);
11008 assert(device_object
->ref_count
== 1);
11009 assert(!device_object
->named
);
11010 kr
= vm_deallocate(kernel_map
, device_mapping
, size
);
11011 assert(kr
== KERN_SUCCESS
);
11013 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
11015 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
11016 #define vm_test_device_pager_transpose()
11017 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
11022 vm_test_collapse_compressor();
11023 vm_test_wire_and_extract();
11024 vm_test_page_wire_overflow_panic();
11025 vm_test_kernel_object_fault();
11026 vm_test_device_pager_transpose();