2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
70 #include <mach_pagemap.h>
71 #include <mach_cluster_stats.h>
73 #include <mach/mach_types.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/mach_host_server.h>
79 #include <mach/vm_map.h>
80 #include <mach/vm_param.h>
81 #include <mach/vm_statistics.h>
84 #include <kern/kern_types.h>
85 #include <kern/counters.h>
86 #include <kern/host_statistics.h>
87 #include <kern/machine.h>
88 #include <kern/misc_protos.h>
89 #include <kern/sched.h>
90 #include <kern/thread.h>
91 #include <kern/kalloc.h>
92 #include <kern/zalloc_internal.h>
93 #include <kern/policy_internal.h>
94 #include <kern/thread_group.h>
96 #include <machine/vm_tuning.h>
97 #include <machine/commpage.h>
100 #include <vm/vm_compressor_pager.h>
101 #include <vm/vm_fault.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_protos.h> /* must be last */
107 #include <vm/memory_object.h>
108 #include <vm/vm_purgeable_internal.h>
109 #include <vm/vm_shared_region.h>
110 #include <vm/vm_compressor.h>
112 #include <san/kasan.h>
114 #if CONFIG_PHANTOM_CACHE
115 #include <vm/vm_phantom_cache.h>
119 #include <libkern/OSDebug.h>
124 extern void mbuf_drain(boolean_t
);
126 #if VM_PRESSURE_EVENTS
128 extern unsigned int memorystatus_available_pages
;
129 extern unsigned int memorystatus_available_pages_pressure
;
130 extern unsigned int memorystatus_available_pages_critical
;
131 #else /* CONFIG_JETSAM */
132 extern uint64_t memorystatus_available_pages
;
133 extern uint64_t memorystatus_available_pages_pressure
;
134 extern uint64_t memorystatus_available_pages_critical
;
135 #endif /* CONFIG_JETSAM */
137 extern unsigned int memorystatus_frozen_count
;
138 extern unsigned int memorystatus_suspended_count
;
139 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
141 extern lck_mtx_t memorystatus_jetsam_fg_band_lock
;
142 extern uint32_t memorystatus_jetsam_fg_band_waiters
;
144 void vm_pressure_response(void);
145 extern void consider_vm_pressure_events(void);
147 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
148 #endif /* VM_PRESSURE_EVENTS */
150 thread_t vm_pageout_scan_thread
= THREAD_NULL
;
151 boolean_t vps_dynamic_priority_enabled
= FALSE
;
153 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
154 #ifdef CONFIG_EMBEDDED
155 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
157 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
161 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
162 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
165 #ifndef VM_PAGE_LAUNDRY_MAX
166 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
167 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
169 #ifndef VM_PAGEOUT_BURST_WAIT
170 #define VM_PAGEOUT_BURST_WAIT 1 /* milliseconds */
171 #endif /* VM_PAGEOUT_BURST_WAIT */
173 #ifndef VM_PAGEOUT_EMPTY_WAIT
174 #define VM_PAGEOUT_EMPTY_WAIT 50 /* milliseconds */
175 #endif /* VM_PAGEOUT_EMPTY_WAIT */
177 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
178 #define VM_PAGEOUT_DEADLOCK_WAIT 100 /* milliseconds */
179 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
181 #ifndef VM_PAGEOUT_IDLE_WAIT
182 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
183 #endif /* VM_PAGEOUT_IDLE_WAIT */
185 #ifndef VM_PAGEOUT_SWAP_WAIT
186 #define VM_PAGEOUT_SWAP_WAIT 10 /* milliseconds */
187 #endif /* VM_PAGEOUT_SWAP_WAIT */
190 #ifndef VM_PAGE_SPECULATIVE_TARGET
191 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_pageout_state.vm_page_speculative_percentage))
192 #endif /* VM_PAGE_SPECULATIVE_TARGET */
196 * To obtain a reasonable LRU approximation, the inactive queue
197 * needs to be large enough to give pages on it a chance to be
198 * referenced a second time. This macro defines the fraction
199 * of active+inactive pages that should be inactive.
200 * The pageout daemon uses it to update vm_page_inactive_target.
202 * If vm_page_free_count falls below vm_page_free_target and
203 * vm_page_inactive_count is below vm_page_inactive_target,
204 * then the pageout daemon starts running.
207 #ifndef VM_PAGE_INACTIVE_TARGET
208 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
209 #endif /* VM_PAGE_INACTIVE_TARGET */
212 * Once the pageout daemon starts running, it keeps going
213 * until vm_page_free_count meets or exceeds vm_page_free_target.
216 #ifndef VM_PAGE_FREE_TARGET
217 #ifdef CONFIG_EMBEDDED
218 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
220 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
222 #endif /* VM_PAGE_FREE_TARGET */
226 * The pageout daemon always starts running once vm_page_free_count
227 * falls below vm_page_free_min.
230 #ifndef VM_PAGE_FREE_MIN
231 #ifdef CONFIG_EMBEDDED
232 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
234 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
236 #endif /* VM_PAGE_FREE_MIN */
238 #ifdef CONFIG_EMBEDDED
239 #define VM_PAGE_FREE_RESERVED_LIMIT 100
240 #define VM_PAGE_FREE_MIN_LIMIT 1500
241 #define VM_PAGE_FREE_TARGET_LIMIT 2000
243 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
244 #define VM_PAGE_FREE_MIN_LIMIT 3500
245 #define VM_PAGE_FREE_TARGET_LIMIT 4000
249 * When vm_page_free_count falls below vm_page_free_reserved,
250 * only vm-privileged threads can allocate pages. vm-privilege
251 * allows the pageout daemon and default pager (and any other
252 * associated threads needed for default pageout) to continue
253 * operation by dipping into the reserved pool of pages.
256 #ifndef VM_PAGE_FREE_RESERVED
257 #define VM_PAGE_FREE_RESERVED(n) \
258 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
259 #endif /* VM_PAGE_FREE_RESERVED */
262 * When we dequeue pages from the inactive list, they are
263 * reactivated (ie, put back on the active queue) if referenced.
264 * However, it is possible to starve the free list if other
265 * processors are referencing pages faster than we can turn off
266 * the referenced bit. So we limit the number of reactivations
267 * we will make per call of vm_pageout_scan().
269 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
271 #ifndef VM_PAGE_REACTIVATE_LIMIT
272 #ifdef CONFIG_EMBEDDED
273 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
275 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
277 #endif /* VM_PAGE_REACTIVATE_LIMIT */
278 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 1000
280 extern boolean_t hibernate_cleaning_in_progress
;
283 * Forward declarations for internal routines.
286 struct vm_pageout_queue
*q
;
292 struct cq ciq
[MAX_COMPRESSOR_THREAD_COUNT
];
295 #if VM_PRESSURE_EVENTS
296 void vm_pressure_thread(void);
298 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void);
299 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void);
301 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void);
302 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void);
305 void vm_pageout_garbage_collect(int);
306 static void vm_pageout_iothread_external(void);
307 static void vm_pageout_iothread_internal(struct cq
*cq
);
308 static void vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*, boolean_t
);
310 extern void vm_pageout_continue(void);
311 extern void vm_pageout_scan(void);
313 boolean_t vm_pageout_running
= FALSE
;
315 uint32_t vm_page_upl_tainted
= 0;
316 uint32_t vm_page_iopl_tainted
= 0;
319 static boolean_t vm_pageout_waiter
= FALSE
;
320 #endif /* !CONFIG_EMBEDDED */
323 #if DEVELOPMENT || DEBUG
324 struct vm_pageout_debug vm_pageout_debug
;
326 struct vm_pageout_vminfo vm_pageout_vminfo
;
327 struct vm_pageout_state vm_pageout_state
;
328 struct vm_config vm_config
;
330 struct vm_pageout_queue vm_pageout_queue_internal VM_PAGE_PACKED_ALIGNED
;
331 struct vm_pageout_queue vm_pageout_queue_external VM_PAGE_PACKED_ALIGNED
;
333 int vm_upl_wait_for_pages
= 0;
334 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
336 boolean_t(*volatile consider_buffer_cache_collect
)(int) = NULL
;
338 int vm_debug_events
= 0;
340 LCK_GRP_DECLARE(vm_pageout_lck_grp
, "vm_pageout");
342 #if CONFIG_MEMORYSTATUS
343 extern boolean_t
memorystatus_kill_on_VM_page_shortage(boolean_t async
);
345 uint32_t vm_pageout_memorystatus_fb_factor_nr
= 5;
346 uint32_t vm_pageout_memorystatus_fb_factor_dr
= 2;
351 int vm_compressor_ebound
= 1;
352 int vm_pgo_pbound
= 0;
353 extern void thread_bind_cluster_type(thread_t
, char, bool);
358 * Routine: vm_pageout_object_terminate
360 * Destroy the pageout_object, and perform all of the
361 * required cleanup actions.
364 * The object must be locked, and will be returned locked.
367 vm_pageout_object_terminate(
370 vm_object_t shadow_object
;
373 * Deal with the deallocation (last reference) of a pageout object
374 * (used for cleaning-in-place) by dropping the paging references/
375 * freeing pages in the original object.
378 assert(object
->pageout
);
379 shadow_object
= object
->shadow
;
380 vm_object_lock(shadow_object
);
382 while (!vm_page_queue_empty(&object
->memq
)) {
384 vm_object_offset_t offset
;
386 p
= (vm_page_t
) vm_page_queue_first(&object
->memq
);
388 assert(p
->vmp_private
);
389 assert(p
->vmp_free_when_done
);
390 p
->vmp_free_when_done
= FALSE
;
391 assert(!p
->vmp_cleaning
);
392 assert(!p
->vmp_laundry
);
394 offset
= p
->vmp_offset
;
398 m
= vm_page_lookup(shadow_object
,
399 offset
+ object
->vo_shadow_offset
);
401 if (m
== VM_PAGE_NULL
) {
405 assert((m
->vmp_dirty
) || (m
->vmp_precious
) ||
406 (m
->vmp_busy
&& m
->vmp_cleaning
));
409 * Handle the trusted pager throttle.
410 * Also decrement the burst throttle (if external).
412 vm_page_lock_queues();
413 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
414 vm_pageout_throttle_up(m
);
418 * Handle the "target" page(s). These pages are to be freed if
419 * successfully cleaned. Target pages are always busy, and are
420 * wired exactly once. The initial target pages are not mapped,
421 * (so cannot be referenced or modified) but converted target
422 * pages may have been modified between the selection as an
423 * adjacent page and conversion to a target.
425 if (m
->vmp_free_when_done
) {
427 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
428 assert(m
->vmp_wire_count
== 1);
429 m
->vmp_cleaning
= FALSE
;
430 m
->vmp_free_when_done
= FALSE
;
432 * Revoke all access to the page. Since the object is
433 * locked, and the page is busy, this prevents the page
434 * from being dirtied after the pmap_disconnect() call
437 * Since the page is left "dirty" but "not modifed", we
438 * can detect whether the page was redirtied during
439 * pageout by checking the modify state.
441 if (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
) {
442 SET_PAGE_DIRTY(m
, FALSE
);
444 m
->vmp_dirty
= FALSE
;
448 vm_page_unwire(m
, TRUE
); /* reactivates */
449 VM_STAT_INCR(reactivations
);
452 vm_page_free(m
); /* clears busy, etc. */
454 vm_page_unlock_queues();
458 * Handle the "adjacent" pages. These pages were cleaned in
459 * place, and should be left alone.
460 * If prep_pin_count is nonzero, then someone is using the
461 * page, so make it active.
463 if ((m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
) && !m
->vmp_private
) {
464 if (m
->vmp_reference
) {
467 vm_page_deactivate(m
);
470 if (m
->vmp_overwriting
) {
472 * the (COPY_OUT_FROM == FALSE) request_page_list case
476 * We do not re-set m->vmp_dirty !
477 * The page was busy so no extraneous activity
478 * could have occurred. COPY_INTO is a read into the
479 * new pages. CLEAN_IN_PLACE does actually write
480 * out the pages but handling outside of this code
481 * will take care of resetting dirty. We clear the
482 * modify however for the Programmed I/O case.
484 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
487 m
->vmp_absent
= FALSE
;
490 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
491 * Occurs when the original page was wired
492 * at the time of the list request
494 assert(VM_PAGE_WIRED(m
));
495 vm_page_unwire(m
, TRUE
); /* reactivates */
497 m
->vmp_overwriting
= FALSE
;
499 m
->vmp_dirty
= FALSE
;
501 m
->vmp_cleaning
= FALSE
;
504 * Wakeup any thread waiting for the page to be un-cleaning.
507 vm_page_unlock_queues();
510 * Account for the paging reference taken in vm_paging_object_allocate.
512 vm_object_activity_end(shadow_object
);
513 vm_object_unlock(shadow_object
);
515 assert(object
->ref_count
== 0);
516 assert(object
->paging_in_progress
== 0);
517 assert(object
->activity_in_progress
== 0);
518 assert(object
->resident_page_count
== 0);
523 * Routine: vm_pageclean_setup
525 * Purpose: setup a page to be cleaned (made non-dirty), but not
526 * necessarily flushed from the VM page cache.
527 * This is accomplished by cleaning in place.
529 * The page must not be busy, and new_object
537 vm_object_t new_object
,
538 vm_object_offset_t new_offset
)
540 assert(!m
->vmp_busy
);
542 assert(!m
->vmp_cleaning
);
545 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
548 * Mark original page as cleaning in place.
550 m
->vmp_cleaning
= TRUE
;
551 SET_PAGE_DIRTY(m
, FALSE
);
552 m
->vmp_precious
= FALSE
;
555 * Convert the fictitious page to a private shadow of
558 assert(new_m
->vmp_fictitious
);
559 assert(VM_PAGE_GET_PHYS_PAGE(new_m
) == vm_page_fictitious_addr
);
560 new_m
->vmp_fictitious
= FALSE
;
561 new_m
->vmp_private
= TRUE
;
562 new_m
->vmp_free_when_done
= TRUE
;
563 VM_PAGE_SET_PHYS_PAGE(new_m
, VM_PAGE_GET_PHYS_PAGE(m
));
565 vm_page_lockspin_queues();
566 vm_page_wire(new_m
, VM_KERN_MEMORY_NONE
, TRUE
);
567 vm_page_unlock_queues();
569 vm_page_insert_wired(new_m
, new_object
, new_offset
, VM_KERN_MEMORY_NONE
);
570 assert(!new_m
->vmp_wanted
);
571 new_m
->vmp_busy
= FALSE
;
575 * Routine: vm_pageout_initialize_page
577 * Causes the specified page to be initialized in
578 * the appropriate memory object. This routine is used to push
579 * pages into a copy-object when they are modified in the
582 * The page is moved to a temporary object and paged out.
585 * The page in question must not be on any pageout queues.
586 * The object to which it belongs must be locked.
587 * The page must be busy, but not hold a paging reference.
590 * Move this page to a completely new object.
593 vm_pageout_initialize_page(
597 vm_object_offset_t paging_offset
;
598 memory_object_t pager
;
600 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
602 object
= VM_PAGE_OBJECT(m
);
605 assert(object
->internal
);
608 * Verify that we really want to clean this page
610 assert(!m
->vmp_absent
);
611 assert(!m
->vmp_error
);
612 assert(m
->vmp_dirty
);
615 * Create a paging reference to let us play with the object.
617 paging_offset
= m
->vmp_offset
+ object
->paging_offset
;
619 if (m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_restart
|| (!m
->vmp_dirty
&& !m
->vmp_precious
)) {
620 panic("reservation without pageout?"); /* alan */
623 vm_object_unlock(object
);
629 * If there's no pager, then we can't clean the page. This should
630 * never happen since this should be a copy object and therefore not
631 * an external object, so the pager should always be there.
634 pager
= object
->pager
;
636 if (pager
== MEMORY_OBJECT_NULL
) {
637 panic("missing pager for copy object");
644 * set the page for future call to vm_fault_list_request
646 pmap_clear_modify(VM_PAGE_GET_PHYS_PAGE(m
));
647 SET_PAGE_DIRTY(m
, FALSE
);
650 * keep the object from collapsing or terminating
652 vm_object_paging_begin(object
);
653 vm_object_unlock(object
);
656 * Write the data to its pager.
657 * Note that the data is passed by naming the new object,
658 * not a virtual address; the pager interface has been
659 * manipulated to use the "internal memory" data type.
660 * [The object reference from its allocation is donated
661 * to the eventual recipient.]
663 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
665 vm_object_lock(object
);
666 vm_object_paging_end(object
);
671 * vm_pageout_cluster:
673 * Given a page, queue it to the appropriate I/O thread,
674 * which will page it out and attempt to clean adjacent pages
675 * in the same operation.
677 * The object and queues must be locked. We will take a
678 * paging reference to prevent deallocation or collapse when we
679 * release the object lock back at the call site. The I/O thread
680 * is responsible for consuming this reference
682 * The page must not be on any pageout queue.
684 #if DEVELOPMENT || DEBUG
685 vmct_stats_t vmct_stats
;
687 int32_t vmct_active
= 0;
688 uint64_t vm_compressor_epoch_start
= 0;
689 uint64_t vm_compressor_epoch_stop
= 0;
691 typedef enum vmct_state_t
{
696 vmct_state_t vmct_state
[MAX_COMPRESSOR_THREAD_COUNT
];
701 vm_pageout_cluster(vm_page_t m
)
703 vm_object_t object
= VM_PAGE_OBJECT(m
);
704 struct vm_pageout_queue
*q
;
707 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
708 vm_object_lock_assert_exclusive(object
);
711 * Only a certain kind of page is appreciated here.
713 assert((m
->vmp_dirty
|| m
->vmp_precious
) && (!VM_PAGE_WIRED(m
)));
714 assert(!m
->vmp_cleaning
&& !m
->vmp_laundry
);
715 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
718 * protect the object from collapse or termination
720 vm_object_activity_begin(object
);
722 if (object
->internal
== TRUE
) {
723 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
727 q
= &vm_pageout_queue_internal
;
729 q
= &vm_pageout_queue_external
;
733 * pgo_laundry count is tied to the laundry bit
735 m
->vmp_laundry
= TRUE
;
738 m
->vmp_q_state
= VM_PAGE_ON_PAGEOUT_Q
;
739 vm_page_queue_enter(&q
->pgo_pending
, m
, vmp_pageq
);
741 if (q
->pgo_idle
== TRUE
) {
743 thread_wakeup((event_t
) &q
->pgo_pending
);
750 * A page is back from laundry or we are stealing it back from
751 * the laundering state. See if there are some pages waiting to
752 * go to laundry and if we can let some of them go now.
754 * Object and page queues must be locked.
757 vm_pageout_throttle_up(
760 struct vm_pageout_queue
*q
;
761 vm_object_t m_object
;
763 m_object
= VM_PAGE_OBJECT(m
);
765 assert(m_object
!= VM_OBJECT_NULL
);
766 assert(m_object
!= kernel_object
);
768 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
769 vm_object_lock_assert_exclusive(m_object
);
771 if (m_object
->internal
== TRUE
) {
772 q
= &vm_pageout_queue_internal
;
774 q
= &vm_pageout_queue_external
;
777 if (m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
778 vm_page_queue_remove(&q
->pgo_pending
, m
, vmp_pageq
);
779 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
781 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
783 vm_object_activity_end(m_object
);
785 VM_PAGEOUT_DEBUG(vm_page_steal_pageout_page
, 1);
787 if (m
->vmp_laundry
== TRUE
) {
788 m
->vmp_laundry
= FALSE
;
791 if (q
->pgo_throttled
== TRUE
) {
792 q
->pgo_throttled
= FALSE
;
793 thread_wakeup((event_t
) &q
->pgo_laundry
);
795 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
796 q
->pgo_draining
= FALSE
;
797 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
799 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, 1);
805 vm_pageout_throttle_up_batch(
806 struct vm_pageout_queue
*q
,
809 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
811 VM_PAGEOUT_DEBUG(vm_pageout_throttle_up_count
, batch_cnt
);
813 q
->pgo_laundry
-= batch_cnt
;
815 if (q
->pgo_throttled
== TRUE
) {
816 q
->pgo_throttled
= FALSE
;
817 thread_wakeup((event_t
) &q
->pgo_laundry
);
819 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
820 q
->pgo_draining
= FALSE
;
821 thread_wakeup((event_t
) (&q
->pgo_laundry
+ 1));
828 * VM memory pressure monitoring.
830 * vm_pageout_scan() keeps track of the number of pages it considers and
831 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
833 * compute_memory_pressure() is called every second from compute_averages()
834 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
835 * of recalimed pages in a new vm_pageout_stat[] bucket.
837 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
838 * The caller provides the number of seconds ("nsecs") worth of statistics
839 * it wants, up to 30 seconds.
840 * It computes the number of pages reclaimed in the past "nsecs" seconds and
841 * also returns the number of pages the system still needs to reclaim at this
844 #if DEVELOPMENT || DEBUG
845 #define VM_PAGEOUT_STAT_SIZE (30 * 8) + 1
847 #define VM_PAGEOUT_STAT_SIZE (1 * 8) + 1
849 struct vm_pageout_stat
{
850 unsigned long vm_page_active_count
;
851 unsigned long vm_page_speculative_count
;
852 unsigned long vm_page_inactive_count
;
853 unsigned long vm_page_anonymous_count
;
855 unsigned long vm_page_free_count
;
856 unsigned long vm_page_wire_count
;
857 unsigned long vm_page_compressor_count
;
859 unsigned long vm_page_pages_compressed
;
860 unsigned long vm_page_pageable_internal_count
;
861 unsigned long vm_page_pageable_external_count
;
862 unsigned long vm_page_xpmapped_external_count
;
864 unsigned int pages_grabbed
;
865 unsigned int pages_freed
;
867 unsigned int pages_compressed
;
868 unsigned int pages_grabbed_by_compressor
;
869 unsigned int failed_compressions
;
871 unsigned int pages_evicted
;
872 unsigned int pages_purged
;
874 unsigned int considered
;
875 unsigned int considered_bq_internal
;
876 unsigned int considered_bq_external
;
878 unsigned int skipped_external
;
879 unsigned int filecache_min_reactivations
;
881 unsigned int freed_speculative
;
882 unsigned int freed_cleaned
;
883 unsigned int freed_internal
;
884 unsigned int freed_external
;
886 unsigned int cleaned_dirty_external
;
887 unsigned int cleaned_dirty_internal
;
889 unsigned int inactive_referenced
;
890 unsigned int inactive_nolock
;
891 unsigned int reactivation_limit_exceeded
;
892 unsigned int forced_inactive_reclaim
;
894 unsigned int throttled_internal_q
;
895 unsigned int throttled_external_q
;
897 unsigned int phantom_ghosts_found
;
898 unsigned int phantom_ghosts_added
;
899 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, };
901 unsigned int vm_pageout_stat_now
= 0;
903 #define VM_PAGEOUT_STAT_BEFORE(i) \
904 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
905 #define VM_PAGEOUT_STAT_AFTER(i) \
906 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
908 #if VM_PAGE_BUCKETS_CHECK
909 int vm_page_buckets_check_interval
= 80; /* in eighths of a second */
910 #endif /* VM_PAGE_BUCKETS_CHECK */
914 record_memory_pressure(void);
916 record_memory_pressure(void)
918 unsigned int vm_pageout_next
;
920 #if VM_PAGE_BUCKETS_CHECK
921 /* check the consistency of VM page buckets at regular interval */
922 static int counter
= 0;
923 if ((++counter
% vm_page_buckets_check_interval
) == 0) {
924 vm_page_buckets_check();
926 #endif /* VM_PAGE_BUCKETS_CHECK */
928 vm_pageout_state
.vm_memory_pressure
=
929 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_speculative
+
930 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_cleaned
+
931 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_internal
+
932 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].freed_external
;
934 commpage_set_memory_pressure((unsigned int)vm_pageout_state
.vm_memory_pressure
);
936 /* move "now" forward */
937 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
939 bzero(&vm_pageout_stats
[vm_pageout_next
], sizeof(struct vm_pageout_stat
));
941 vm_pageout_stat_now
= vm_pageout_next
;
947 * mach_vm_ctl_page_free_wanted() is called indirectly, via
948 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
949 * it must be safe in the restricted stackshot context. Locks and/or
950 * blocking are not allowable.
953 mach_vm_ctl_page_free_wanted(void)
955 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
957 page_free_target
= vm_page_free_target
;
958 page_free_count
= vm_page_free_count
;
959 if (page_free_target
> page_free_count
) {
960 page_free_wanted
= page_free_target
- page_free_count
;
962 page_free_wanted
= 0;
965 return page_free_wanted
;
971 * mach_vm_pressure_monitor() is called when taking a stackshot, with
972 * wait_for_pressure FALSE, so that code path must remain safe in the
973 * restricted stackshot context. No blocking or locks are allowable.
978 mach_vm_pressure_monitor(
979 boolean_t wait_for_pressure
,
980 unsigned int nsecs_monitored
,
981 unsigned int *pages_reclaimed_p
,
982 unsigned int *pages_wanted_p
)
985 unsigned int vm_pageout_then
, vm_pageout_now
;
986 unsigned int pages_reclaimed
;
987 unsigned int units_of_monitor
;
989 units_of_monitor
= 8 * nsecs_monitored
;
991 * We don't take the vm_page_queue_lock here because we don't want
992 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
993 * thread when it's trying to reclaim memory. We don't need fully
994 * accurate monitoring anyway...
997 if (wait_for_pressure
) {
998 /* wait until there's memory pressure */
999 while (vm_page_free_count
>= vm_page_free_target
) {
1000 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
1001 THREAD_INTERRUPTIBLE
);
1002 if (wr
== THREAD_WAITING
) {
1003 wr
= thread_block(THREAD_CONTINUE_NULL
);
1005 if (wr
== THREAD_INTERRUPTED
) {
1006 return KERN_ABORTED
;
1008 if (wr
== THREAD_AWAKENED
) {
1010 * The memory pressure might have already
1011 * been relieved but let's not block again
1012 * and let's report that there was memory
1013 * pressure at some point.
1020 /* provide the number of pages the system wants to reclaim */
1021 if (pages_wanted_p
!= NULL
) {
1022 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1025 if (pages_reclaimed_p
== NULL
) {
1026 return KERN_SUCCESS
;
1029 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1030 vm_pageout_now
= vm_pageout_stat_now
;
1031 pages_reclaimed
= 0;
1032 for (vm_pageout_then
=
1033 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1034 vm_pageout_then
!= vm_pageout_now
&&
1035 units_of_monitor
-- != 0;
1037 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1038 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_speculative
;
1039 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_cleaned
;
1040 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_internal
;
1041 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].freed_external
;
1043 *pages_reclaimed_p
= pages_reclaimed
;
1045 return KERN_SUCCESS
;
1050 #if DEVELOPMENT || DEBUG
1053 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*, int);
1056 * condition variable used to make sure there is
1057 * only a single sweep going on at a time
1059 boolean_t vm_pageout_disconnect_all_pages_active
= FALSE
;
1063 vm_pageout_disconnect_all_pages()
1065 vm_page_lock_queues();
1067 if (vm_pageout_disconnect_all_pages_active
== TRUE
) {
1068 vm_page_unlock_queues();
1071 vm_pageout_disconnect_all_pages_active
= TRUE
;
1072 vm_page_unlock_queues();
1074 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1075 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1076 vm_pageout_disconnect_all_pages_in_queue(&vm_page_queue_active
, vm_page_active_count
);
1078 vm_pageout_disconnect_all_pages_active
= FALSE
;
1083 vm_pageout_disconnect_all_pages_in_queue(vm_page_queue_head_t
*q
, int qcount
)
1086 vm_object_t t_object
= NULL
;
1087 vm_object_t l_object
= NULL
;
1088 vm_object_t m_object
= NULL
;
1089 int delayed_unlock
= 0;
1090 int try_failed_count
= 0;
1091 int disconnected_count
= 0;
1092 int paused_count
= 0;
1093 int object_locked_count
= 0;
1095 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_START
,
1096 q
, qcount
, 0, 0, 0);
1098 vm_page_lock_queues();
1100 while (qcount
&& !vm_page_queue_empty(q
)) {
1101 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1103 m
= (vm_page_t
) vm_page_queue_first(q
);
1104 m_object
= VM_PAGE_OBJECT(m
);
1107 * check to see if we currently are working
1108 * with the same object... if so, we've
1109 * already got the lock
1111 if (m_object
!= l_object
) {
1113 * the object associated with candidate page is
1114 * different from the one we were just working
1115 * with... dump the lock if we still own it
1117 if (l_object
!= NULL
) {
1118 vm_object_unlock(l_object
);
1121 if (m_object
!= t_object
) {
1122 try_failed_count
= 0;
1126 * Try to lock object; since we've alread got the
1127 * page queues lock, we can only 'try' for this one.
1128 * if the 'try' fails, we need to do a mutex_pause
1129 * to allow the owner of the object lock a chance to
1132 if (!vm_object_lock_try_scan(m_object
)) {
1133 if (try_failed_count
> 20) {
1134 goto reenter_pg_on_q
;
1136 vm_page_unlock_queues();
1137 mutex_pause(try_failed_count
++);
1138 vm_page_lock_queues();
1143 t_object
= m_object
;
1146 object_locked_count
++;
1148 l_object
= m_object
;
1150 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1152 * put it back on the head of its queue
1154 goto reenter_pg_on_q
;
1156 if (m
->vmp_pmapped
== TRUE
) {
1157 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
1159 disconnected_count
++;
1162 vm_page_queue_remove(q
, m
, vmp_pageq
);
1163 vm_page_queue_enter(q
, m
, vmp_pageq
);
1166 try_failed_count
= 0;
1168 if (delayed_unlock
++ > 128) {
1169 if (l_object
!= NULL
) {
1170 vm_object_unlock(l_object
);
1173 lck_mtx_yield(&vm_page_queue_lock
);
1177 if (l_object
!= NULL
) {
1178 vm_object_unlock(l_object
);
1181 vm_page_unlock_queues();
1183 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (MACHDBG_CODE(DBG_MACH_WORKINGSET
, VM_DISCONNECT_ALL_PAGE_MAPPINGS
)) | DBG_FUNC_END
,
1184 q
, disconnected_count
, object_locked_count
, paused_count
, 0);
1191 vm_pageout_page_queue(vm_page_queue_head_t
*, int);
1194 * condition variable used to make sure there is
1195 * only a single sweep going on at a time
1197 boolean_t vm_pageout_anonymous_pages_active
= FALSE
;
1201 vm_pageout_anonymous_pages()
1203 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
1204 vm_page_lock_queues();
1206 if (vm_pageout_anonymous_pages_active
== TRUE
) {
1207 vm_page_unlock_queues();
1210 vm_pageout_anonymous_pages_active
= TRUE
;
1211 vm_page_unlock_queues();
1213 vm_pageout_page_queue(&vm_page_queue_throttled
, vm_page_throttled_count
);
1214 vm_pageout_page_queue(&vm_page_queue_anonymous
, vm_page_anonymous_count
);
1215 vm_pageout_page_queue(&vm_page_queue_active
, vm_page_active_count
);
1217 if (VM_CONFIG_SWAP_IS_PRESENT
) {
1218 vm_consider_swapping();
1221 vm_page_lock_queues();
1222 vm_pageout_anonymous_pages_active
= FALSE
;
1223 vm_page_unlock_queues();
1229 vm_pageout_page_queue(vm_page_queue_head_t
*q
, int qcount
)
1232 vm_object_t t_object
= NULL
;
1233 vm_object_t l_object
= NULL
;
1234 vm_object_t m_object
= NULL
;
1235 int delayed_unlock
= 0;
1236 int try_failed_count
= 0;
1239 struct vm_pageout_queue
*iq
;
1243 iq
= &vm_pageout_queue_internal
;
1245 vm_page_lock_queues();
1247 while (qcount
&& !vm_page_queue_empty(q
)) {
1248 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1250 if (VM_PAGE_Q_THROTTLED(iq
)) {
1251 if (l_object
!= NULL
) {
1252 vm_object_unlock(l_object
);
1255 iq
->pgo_draining
= TRUE
;
1257 assert_wait((event_t
) (&iq
->pgo_laundry
+ 1), THREAD_INTERRUPTIBLE
);
1258 vm_page_unlock_queues();
1260 thread_block(THREAD_CONTINUE_NULL
);
1262 vm_page_lock_queues();
1266 m
= (vm_page_t
) vm_page_queue_first(q
);
1267 m_object
= VM_PAGE_OBJECT(m
);
1270 * check to see if we currently are working
1271 * with the same object... if so, we've
1272 * already got the lock
1274 if (m_object
!= l_object
) {
1275 if (!m_object
->internal
) {
1276 goto reenter_pg_on_q
;
1280 * the object associated with candidate page is
1281 * different from the one we were just working
1282 * with... dump the lock if we still own it
1284 if (l_object
!= NULL
) {
1285 vm_object_unlock(l_object
);
1288 if (m_object
!= t_object
) {
1289 try_failed_count
= 0;
1293 * Try to lock object; since we've alread got the
1294 * page queues lock, we can only 'try' for this one.
1295 * if the 'try' fails, we need to do a mutex_pause
1296 * to allow the owner of the object lock a chance to
1299 if (!vm_object_lock_try_scan(m_object
)) {
1300 if (try_failed_count
> 20) {
1301 goto reenter_pg_on_q
;
1303 vm_page_unlock_queues();
1304 mutex_pause(try_failed_count
++);
1305 vm_page_lock_queues();
1308 t_object
= m_object
;
1311 l_object
= m_object
;
1313 if (!m_object
->alive
|| m
->vmp_cleaning
|| m
->vmp_laundry
|| m
->vmp_busy
|| m
->vmp_absent
|| m
->vmp_error
|| m
->vmp_free_when_done
) {
1315 * page is not to be cleaned
1316 * put it back on the head of its queue
1318 goto reenter_pg_on_q
;
1320 phys_page
= VM_PAGE_GET_PHYS_PAGE(m
);
1322 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
1323 refmod_state
= pmap_get_refmod(phys_page
);
1325 if (refmod_state
& VM_MEM_REFERENCED
) {
1326 m
->vmp_reference
= TRUE
;
1328 if (refmod_state
& VM_MEM_MODIFIED
) {
1329 SET_PAGE_DIRTY(m
, FALSE
);
1332 if (m
->vmp_reference
== TRUE
) {
1333 m
->vmp_reference
= FALSE
;
1334 pmap_clear_refmod_options(phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1335 goto reenter_pg_on_q
;
1337 if (m
->vmp_pmapped
== TRUE
) {
1338 if (m
->vmp_dirty
|| m
->vmp_precious
) {
1339 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
1341 pmap_options
= PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
1343 refmod_state
= pmap_disconnect_options(phys_page
, pmap_options
, NULL
);
1344 if (refmod_state
& VM_MEM_MODIFIED
) {
1345 SET_PAGE_DIRTY(m
, FALSE
);
1349 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
1350 vm_page_unlock_queues();
1352 vm_page_lock_queues();
1357 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1358 if (!m_object
->pager_initialized
) {
1359 vm_page_unlock_queues();
1361 vm_object_collapse(m_object
, (vm_object_offset_t
) 0, TRUE
);
1363 if (!m_object
->pager_initialized
) {
1364 vm_object_compressor_pager_create(m_object
);
1367 vm_page_lock_queues();
1370 if (!m_object
->pager_initialized
|| m_object
->pager
== MEMORY_OBJECT_NULL
) {
1371 goto reenter_pg_on_q
;
1374 * vm_object_compressor_pager_create will drop the object lock
1375 * which means 'm' may no longer be valid to use
1380 * we've already factored out pages in the laundry which
1381 * means this page can't be on the pageout queue so it's
1382 * safe to do the vm_page_queues_remove
1384 vm_page_queues_remove(m
, TRUE
);
1386 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
1388 vm_pageout_cluster(m
);
1393 vm_page_queue_remove(q
, m
, vmp_pageq
);
1394 vm_page_queue_enter(q
, m
, vmp_pageq
);
1397 try_failed_count
= 0;
1399 if (delayed_unlock
++ > 128) {
1400 if (l_object
!= NULL
) {
1401 vm_object_unlock(l_object
);
1404 lck_mtx_yield(&vm_page_queue_lock
);
1408 if (l_object
!= NULL
) {
1409 vm_object_unlock(l_object
);
1412 vm_page_unlock_queues();
1418 * function in BSD to apply I/O throttle to the pageout thread
1420 extern void vm_pageout_io_throttle(void);
1422 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m, obj) \
1425 * If a "reusable" page somehow made it back into \
1426 * the active queue, it's been re-used and is not \
1427 * quite re-usable. \
1428 * If the VM object was "all_reusable", consider it \
1429 * as "all re-used" instead of converting it to \
1430 * "partially re-used", which could be expensive. \
1432 assert(VM_PAGE_OBJECT((m)) == (obj)); \
1433 if ((m)->vmp_reusable || \
1434 (obj)->all_reusable) { \
1435 vm_object_reuse_pages((obj), \
1437 (m)->vmp_offset + PAGE_SIZE_64, \
1443 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1444 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1447 #define FCS_DELAYED 1
1448 #define FCS_DEADLOCK_DETECTED 2
1450 struct flow_control
{
1456 #if CONFIG_BACKGROUND_QUEUE
1457 uint64_t vm_pageout_rejected_bq_internal
= 0;
1458 uint64_t vm_pageout_rejected_bq_external
= 0;
1459 uint64_t vm_pageout_skipped_bq_internal
= 0;
1462 #define ANONS_GRABBED_LIMIT 2
1466 static void vm_pageout_delayed_unlock(int *, int *, vm_page_t
*);
1468 static void vm_pageout_prepare_to_block(vm_object_t
*, int *, vm_page_t
*, int *, int);
1470 #define VM_PAGEOUT_PB_NO_ACTION 0
1471 #define VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER 1
1472 #define VM_PAGEOUT_PB_THREAD_YIELD 2
1477 vm_pageout_delayed_unlock(int *delayed_unlock
, int *local_freed
, vm_page_t
*local_freeq
)
1480 vm_page_unlock_queues();
1482 VM_DEBUG_CONSTANT_EVENT(
1483 vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1484 vm_page_free_count
, 0, 0, 1);
1486 vm_page_free_list(*local_freeq
, TRUE
);
1488 VM_DEBUG_CONSTANT_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1489 vm_page_free_count
, *local_freed
, 0, 1);
1491 *local_freeq
= NULL
;
1494 vm_page_lock_queues();
1496 lck_mtx_yield(&vm_page_queue_lock
);
1498 *delayed_unlock
= 1;
1504 vm_pageout_prepare_to_block(vm_object_t
*object
, int *delayed_unlock
,
1505 vm_page_t
*local_freeq
, int *local_freed
, int action
)
1507 vm_page_unlock_queues();
1509 if (*object
!= NULL
) {
1510 vm_object_unlock(*object
);
1514 vm_page_free_list(*local_freeq
, TRUE
);
1516 *local_freeq
= NULL
;
1519 *delayed_unlock
= 1;
1522 case VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
:
1523 vm_consider_waking_compactor_swapper();
1525 case VM_PAGEOUT_PB_THREAD_YIELD
:
1526 thread_yield_internal(1);
1528 case VM_PAGEOUT_PB_NO_ACTION
:
1532 vm_page_lock_queues();
1536 static struct vm_pageout_vminfo last
;
1538 uint64_t last_vm_page_pages_grabbed
= 0;
1540 extern uint32_t c_segment_pages_compressed
;
1542 extern uint64_t shared_region_pager_reclaimed
;
1543 extern struct memory_object_pager_ops shared_region_pager_ops
;
1546 update_vm_info(void)
1551 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
= vm_page_active_count
;
1552 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
= vm_page_speculative_count
;
1553 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
= vm_page_inactive_count
;
1554 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
= vm_page_anonymous_count
;
1556 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
= vm_page_free_count
;
1557 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
= vm_page_wire_count
;
1558 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
= VM_PAGE_COMPRESSOR_COUNT
;
1560 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
= c_segment_pages_compressed
;
1561 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
= vm_page_pageable_internal_count
;
1562 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
= vm_page_pageable_external_count
;
1563 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
= vm_page_xpmapped_external_count
;
1566 tmp
= vm_pageout_vminfo
.vm_pageout_considered_page
;
1567 vm_pageout_stats
[vm_pageout_stat_now
].considered
= (unsigned int)(tmp
- last
.vm_pageout_considered_page
);
1568 last
.vm_pageout_considered_page
= tmp
;
1570 tmp64
= vm_pageout_vminfo
.vm_pageout_compressions
;
1571 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
= (unsigned int)(tmp64
- last
.vm_pageout_compressions
);
1572 last
.vm_pageout_compressions
= tmp64
;
1574 tmp
= vm_pageout_vminfo
.vm_compressor_failed
;
1575 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
= (unsigned int)(tmp
- last
.vm_compressor_failed
);
1576 last
.vm_compressor_failed
= tmp
;
1578 tmp64
= vm_pageout_vminfo
.vm_compressor_pages_grabbed
;
1579 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
= (unsigned int)(tmp64
- last
.vm_compressor_pages_grabbed
);
1580 last
.vm_compressor_pages_grabbed
= tmp64
;
1582 tmp
= vm_pageout_vminfo
.vm_phantom_cache_found_ghost
;
1583 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
= (unsigned int)(tmp
- last
.vm_phantom_cache_found_ghost
);
1584 last
.vm_phantom_cache_found_ghost
= tmp
;
1586 tmp
= vm_pageout_vminfo
.vm_phantom_cache_added_ghost
;
1587 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
= (unsigned int)(tmp
- last
.vm_phantom_cache_added_ghost
);
1588 last
.vm_phantom_cache_added_ghost
= tmp
;
1590 tmp64
= get_pages_grabbed_count();
1591 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
= (unsigned int)(tmp64
- last_vm_page_pages_grabbed
);
1592 last_vm_page_pages_grabbed
= tmp64
;
1594 tmp
= vm_pageout_vminfo
.vm_page_pages_freed
;
1595 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
= (unsigned int)(tmp
- last
.vm_page_pages_freed
);
1596 last
.vm_page_pages_freed
= tmp
;
1599 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
) {
1600 tmp
= vm_pageout_vminfo
.vm_pageout_pages_evicted
;
1601 vm_pageout_stats
[vm_pageout_stat_now
].pages_evicted
= (unsigned int)(tmp
- last
.vm_pageout_pages_evicted
);
1602 last
.vm_pageout_pages_evicted
= tmp
;
1604 tmp
= vm_pageout_vminfo
.vm_pageout_pages_purged
;
1605 vm_pageout_stats
[vm_pageout_stat_now
].pages_purged
= (unsigned int)(tmp
- last
.vm_pageout_pages_purged
);
1606 last
.vm_pageout_pages_purged
= tmp
;
1608 tmp
= vm_pageout_vminfo
.vm_pageout_freed_speculative
;
1609 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
= (unsigned int)(tmp
- last
.vm_pageout_freed_speculative
);
1610 last
.vm_pageout_freed_speculative
= tmp
;
1612 tmp
= vm_pageout_vminfo
.vm_pageout_freed_external
;
1613 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
= (unsigned int)(tmp
- last
.vm_pageout_freed_external
);
1614 last
.vm_pageout_freed_external
= tmp
;
1616 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_referenced
;
1617 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
= (unsigned int)(tmp
- last
.vm_pageout_inactive_referenced
);
1618 last
.vm_pageout_inactive_referenced
= tmp
;
1620 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
;
1621 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_external
);
1622 last
.vm_pageout_scan_inactive_throttled_external
= tmp
;
1624 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
;
1625 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_external
);
1626 last
.vm_pageout_inactive_dirty_external
= tmp
;
1628 tmp
= vm_pageout_vminfo
.vm_pageout_freed_cleaned
;
1629 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
= (unsigned int)(tmp
- last
.vm_pageout_freed_cleaned
);
1630 last
.vm_pageout_freed_cleaned
= tmp
;
1632 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_nolock
;
1633 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
= (unsigned int)(tmp
- last
.vm_pageout_inactive_nolock
);
1634 last
.vm_pageout_inactive_nolock
= tmp
;
1636 tmp
= vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
;
1637 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
= (unsigned int)(tmp
- last
.vm_pageout_scan_inactive_throttled_internal
);
1638 last
.vm_pageout_scan_inactive_throttled_internal
= tmp
;
1640 tmp
= vm_pageout_vminfo
.vm_pageout_skipped_external
;
1641 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
= (unsigned int)(tmp
- last
.vm_pageout_skipped_external
);
1642 last
.vm_pageout_skipped_external
= tmp
;
1644 tmp
= vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
;
1645 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
= (unsigned int)(tmp
- last
.vm_pageout_reactivation_limit_exceeded
);
1646 last
.vm_pageout_reactivation_limit_exceeded
= tmp
;
1648 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
;
1649 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
= (unsigned int)(tmp
- last
.vm_pageout_inactive_force_reclaim
);
1650 last
.vm_pageout_inactive_force_reclaim
= tmp
;
1652 tmp
= vm_pageout_vminfo
.vm_pageout_freed_internal
;
1653 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
= (unsigned int)(tmp
- last
.vm_pageout_freed_internal
);
1654 last
.vm_pageout_freed_internal
= tmp
;
1656 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_internal
;
1657 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_internal
);
1658 last
.vm_pageout_considered_bq_internal
= tmp
;
1660 tmp
= vm_pageout_vminfo
.vm_pageout_considered_bq_external
;
1661 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
= (unsigned int)(tmp
- last
.vm_pageout_considered_bq_external
);
1662 last
.vm_pageout_considered_bq_external
= tmp
;
1664 tmp
= vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
;
1665 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
= (unsigned int)(tmp
- last
.vm_pageout_filecache_min_reactivated
);
1666 last
.vm_pageout_filecache_min_reactivated
= tmp
;
1668 tmp
= vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
;
1669 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
= (unsigned int)(tmp
- last
.vm_pageout_inactive_dirty_internal
);
1670 last
.vm_pageout_inactive_dirty_internal
= tmp
;
1673 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO1
)) | DBG_FUNC_NONE
,
1674 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_active_count
,
1675 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_speculative_count
,
1676 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_inactive_count
,
1677 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_anonymous_count
,
1680 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO2
)) | DBG_FUNC_NONE
,
1681 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_free_count
,
1682 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_wire_count
,
1683 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_compressor_count
,
1687 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO3
)) | DBG_FUNC_NONE
,
1688 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pages_compressed
,
1689 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_internal_count
,
1690 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_pageable_external_count
,
1691 vm_pageout_stats
[vm_pageout_stat_now
].vm_page_xpmapped_external_count
,
1694 if (vm_pageout_stats
[vm_pageout_stat_now
].considered
||
1695 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
||
1696 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
) {
1697 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO4
)) | DBG_FUNC_NONE
,
1698 vm_pageout_stats
[vm_pageout_stat_now
].considered
,
1699 vm_pageout_stats
[vm_pageout_stat_now
].freed_speculative
,
1700 vm_pageout_stats
[vm_pageout_stat_now
].freed_external
,
1701 vm_pageout_stats
[vm_pageout_stat_now
].inactive_referenced
,
1704 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO5
)) | DBG_FUNC_NONE
,
1705 vm_pageout_stats
[vm_pageout_stat_now
].throttled_external_q
,
1706 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_external
,
1707 vm_pageout_stats
[vm_pageout_stat_now
].freed_cleaned
,
1708 vm_pageout_stats
[vm_pageout_stat_now
].inactive_nolock
,
1711 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO6
)) | DBG_FUNC_NONE
,
1712 vm_pageout_stats
[vm_pageout_stat_now
].throttled_internal_q
,
1713 vm_pageout_stats
[vm_pageout_stat_now
].pages_compressed
,
1714 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed_by_compressor
,
1715 vm_pageout_stats
[vm_pageout_stat_now
].skipped_external
,
1718 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO7
)) | DBG_FUNC_NONE
,
1719 vm_pageout_stats
[vm_pageout_stat_now
].reactivation_limit_exceeded
,
1720 vm_pageout_stats
[vm_pageout_stat_now
].forced_inactive_reclaim
,
1721 vm_pageout_stats
[vm_pageout_stat_now
].failed_compressions
,
1722 vm_pageout_stats
[vm_pageout_stat_now
].freed_internal
,
1725 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO8
)) | DBG_FUNC_NONE
,
1726 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_internal
,
1727 vm_pageout_stats
[vm_pageout_stat_now
].considered_bq_external
,
1728 vm_pageout_stats
[vm_pageout_stat_now
].filecache_min_reactivations
,
1729 vm_pageout_stats
[vm_pageout_stat_now
].cleaned_dirty_internal
,
1732 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM
, VM_INFO9
)) | DBG_FUNC_NONE
,
1733 vm_pageout_stats
[vm_pageout_stat_now
].pages_grabbed
,
1734 vm_pageout_stats
[vm_pageout_stat_now
].pages_freed
,
1735 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_found
,
1736 vm_pageout_stats
[vm_pageout_stat_now
].phantom_ghosts_added
,
1739 record_memory_pressure();
1742 extern boolean_t hibernation_vmqueues_inspection
;
1745 * Return values for functions called by vm_pageout_scan
1746 * that control its flow.
1748 * PROCEED -- vm_pageout_scan will keep making forward progress.
1749 * DONE_RETURN -- page demand satisfied, work is done -> vm_pageout_scan returns.
1750 * NEXT_ITERATION -- restart the 'for' loop in vm_pageout_scan aka continue.
1753 #define VM_PAGEOUT_SCAN_PROCEED (0)
1754 #define VM_PAGEOUT_SCAN_DONE_RETURN (1)
1755 #define VM_PAGEOUT_SCAN_NEXT_ITERATION (2)
1758 * This function is called only from vm_pageout_scan and
1759 * it moves overflow secluded pages (one-at-a-time) to the
1760 * batched 'local' free Q or active Q.
1763 vps_deal_with_secluded_page_overflow(vm_page_t
*local_freeq
, int *local_freed
)
1765 #if CONFIG_SECLUDED_MEMORY
1767 * Deal with secluded_q overflow.
1769 if (vm_page_secluded_count
> vm_page_secluded_target
) {
1770 vm_page_t secluded_page
;
1773 * SECLUDED_AGING_BEFORE_ACTIVE:
1774 * Excess secluded pages go to the active queue and
1775 * will later go to the inactive queue.
1777 assert((vm_page_secluded_count_free
+
1778 vm_page_secluded_count_inuse
) ==
1779 vm_page_secluded_count
);
1780 secluded_page
= (vm_page_t
)vm_page_queue_first(&vm_page_queue_secluded
);
1781 assert(secluded_page
->vmp_q_state
== VM_PAGE_ON_SECLUDED_Q
);
1783 vm_page_queues_remove(secluded_page
, FALSE
);
1784 assert(!secluded_page
->vmp_fictitious
);
1785 assert(!VM_PAGE_WIRED(secluded_page
));
1787 if (secluded_page
->vmp_object
== 0) {
1788 /* transfer to free queue */
1789 assert(secluded_page
->vmp_busy
);
1790 secluded_page
->vmp_snext
= *local_freeq
;
1791 *local_freeq
= secluded_page
;
1794 /* transfer to head of active queue */
1795 vm_page_enqueue_active(secluded_page
, FALSE
);
1796 secluded_page
= VM_PAGE_NULL
;
1799 #else /* CONFIG_SECLUDED_MEMORY */
1801 #pragma unused(local_freeq)
1802 #pragma unused(local_freed)
1806 #endif /* CONFIG_SECLUDED_MEMORY */
1810 * This function is called only from vm_pageout_scan and
1811 * it initializes the loop targets for vm_pageout_scan().
1814 vps_init_page_targets(void)
1817 * LD TODO: Other page targets should be calculated here too.
1819 vm_page_anonymous_min
= vm_page_inactive_target
/ 20;
1821 if (vm_pageout_state
.vm_page_speculative_percentage
> 50) {
1822 vm_pageout_state
.vm_page_speculative_percentage
= 50;
1823 } else if (vm_pageout_state
.vm_page_speculative_percentage
<= 0) {
1824 vm_pageout_state
.vm_page_speculative_percentage
= 1;
1827 vm_pageout_state
.vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1828 vm_page_inactive_count
);
1832 * This function is called only from vm_pageout_scan and
1833 * it purges a single VM object at-a-time and will either
1834 * make vm_pageout_scan() restart the loop or keeping moving forward.
1841 assert(available_for_purge
>= 0);
1842 force_purge
= 0; /* no force-purging */
1844 #if VM_PRESSURE_EVENTS
1845 vm_pressure_level_t pressure_level
;
1847 pressure_level
= memorystatus_vm_pressure_level
;
1849 if (pressure_level
> kVMPressureNormal
) {
1850 if (pressure_level
>= kVMPressureCritical
) {
1851 force_purge
= vm_pageout_state
.memorystatus_purge_on_critical
;
1852 } else if (pressure_level
>= kVMPressureUrgent
) {
1853 force_purge
= vm_pageout_state
.memorystatus_purge_on_urgent
;
1854 } else if (pressure_level
>= kVMPressureWarning
) {
1855 force_purge
= vm_pageout_state
.memorystatus_purge_on_warning
;
1858 #endif /* VM_PRESSURE_EVENTS */
1860 if (available_for_purge
|| force_purge
) {
1861 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
);
1863 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
1864 if (vm_purgeable_object_purge_one(force_purge
, C_DONT_BLOCK
)) {
1865 VM_PAGEOUT_DEBUG(vm_pageout_purged_objects
, 1);
1866 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
1867 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1869 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1871 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
1872 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1875 return VM_PAGEOUT_SCAN_PROCEED
;
1879 * This function is called only from vm_pageout_scan and
1880 * it will try to age the next speculative Q if the oldest
1884 vps_age_speculative_queue(boolean_t force_speculative_aging
)
1886 #define DELAY_SPECULATIVE_AGE 1000
1889 * try to pull pages from the aging bins...
1890 * see vm_page.h for an explanation of how
1891 * this mechanism works
1893 boolean_t can_steal
= FALSE
;
1894 int num_scanned_queues
;
1895 static int delay_speculative_age
= 0; /* depends the # of times we go through the main pageout_scan loop.*/
1897 struct vm_speculative_age_q
*aq
;
1898 struct vm_speculative_age_q
*sq
;
1900 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1902 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1904 num_scanned_queues
= 0;
1905 while (vm_page_queue_empty(&aq
->age_q
) &&
1906 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1907 speculative_steal_index
++;
1909 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1910 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
1913 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1916 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
1918 * XXX We've scanned all the speculative
1919 * queues but still haven't found one
1920 * that is not empty, even though
1921 * vm_page_speculative_count is not 0.
1923 if (!vm_page_queue_empty(&sq
->age_q
)) {
1924 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1926 #if DEVELOPMENT || DEBUG
1927 panic("vm_pageout_scan: vm_page_speculative_count=%d but queues are empty", vm_page_speculative_count
);
1930 vm_page_speculative_count
= 0;
1931 /* ... and continue */
1932 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
1935 if (vm_page_speculative_count
> vm_pageout_state
.vm_page_speculative_target
|| force_speculative_aging
== TRUE
) {
1938 if (!delay_speculative_age
) {
1939 mach_timespec_t ts_fully_aged
;
1941 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) / 1000;
1942 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_pageout_state
.vm_page_speculative_q_age_ms
) % 1000)
1943 * 1000 * NSEC_PER_USEC
;
1945 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
1949 clock_get_system_nanotime(&sec
, &nsec
);
1950 ts
.tv_sec
= (unsigned int) sec
;
1953 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0) {
1956 delay_speculative_age
++;
1959 delay_speculative_age
++;
1960 if (delay_speculative_age
== DELAY_SPECULATIVE_AGE
) {
1961 delay_speculative_age
= 0;
1965 if (can_steal
== TRUE
) {
1966 vm_page_speculate_ageit(aq
);
1969 return VM_PAGEOUT_SCAN_PROCEED
;
1973 * This function is called only from vm_pageout_scan and
1974 * it evicts a single VM object from the cache.
1977 vps_object_cache_evict(vm_object_t
*object_to_unlock
)
1979 static int cache_evict_throttle
= 0;
1980 struct vm_speculative_age_q
*sq
;
1982 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1984 if (vm_page_queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
1987 if (*object_to_unlock
!= NULL
) {
1988 vm_object_unlock(*object_to_unlock
);
1989 *object_to_unlock
= NULL
;
1991 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_START
, 0, 0, 0, 0, 0);
1993 pages_evicted
= vm_object_cache_evict(100, 10);
1995 KERNEL_DEBUG_CONSTANT(0x13001ec | DBG_FUNC_END
, pages_evicted
, 0, 0, 0, 0);
1997 if (pages_evicted
) {
1998 vm_pageout_vminfo
.vm_pageout_pages_evicted
+= pages_evicted
;
2000 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
2001 vm_page_free_count
, pages_evicted
, vm_pageout_vminfo
.vm_pageout_pages_evicted
, 0);
2002 memoryshot(VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
);
2005 * we just freed up to 100 pages,
2006 * so go back to the top of the main loop
2007 * and re-evaulate the memory situation
2009 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2011 cache_evict_throttle
= 1000;
2014 if (cache_evict_throttle
) {
2015 cache_evict_throttle
--;
2018 return VM_PAGEOUT_SCAN_PROCEED
;
2023 * This function is called only from vm_pageout_scan and
2024 * it calculates the filecache min. that needs to be maintained
2025 * as we start to steal pages.
2028 vps_calculate_filecache_min(void)
2030 int divisor
= vm_pageout_state
.vm_page_filecache_min_divisor
;
2034 * don't let the filecache_min fall below 15% of available memory
2035 * on systems with an active compressor that isn't nearing its
2036 * limits w/r to accepting new data
2038 * on systems w/o the compressor/swapper, the filecache is always
2039 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
2040 * since most (if not all) of the anonymous pages are in the
2041 * throttled queue (which isn't counted as available) which
2042 * effectively disables this filter
2044 if (vm_compressor_low_on_space() || divisor
== 0) {
2045 vm_pageout_state
.vm_page_filecache_min
= 0;
2047 vm_pageout_state
.vm_page_filecache_min
=
2048 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2051 if (vm_compressor_out_of_space() || divisor
== 0) {
2052 vm_pageout_state
.vm_page_filecache_min
= 0;
2055 * don't let the filecache_min fall below the specified critical level
2057 vm_pageout_state
.vm_page_filecache_min
=
2058 ((AVAILABLE_NON_COMPRESSED_MEMORY
) * 10) / divisor
;
2061 if (vm_page_free_count
< (vm_page_free_reserved
/ 4)) {
2062 vm_pageout_state
.vm_page_filecache_min
= 0;
2067 * This function is called only from vm_pageout_scan and
2068 * it updates the flow control time to detect if VM pageoutscan
2069 * isn't making progress.
2072 vps_flow_control_reset_deadlock_timer(struct flow_control
*flow_control
)
2078 ts
.tv_sec
= vm_pageout_state
.vm_pageout_deadlock_wait
/ 1000;
2079 ts
.tv_nsec
= (vm_pageout_state
.vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
2080 clock_get_system_nanotime(&sec
, &nsec
);
2081 flow_control
->ts
.tv_sec
= (unsigned int) sec
;
2082 flow_control
->ts
.tv_nsec
= nsec
;
2083 ADD_MACH_TIMESPEC(&flow_control
->ts
, &ts
);
2085 flow_control
->state
= FCS_DELAYED
;
2087 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_internal
++;
2091 * This function is called only from vm_pageout_scan and
2092 * it is the flow control logic of VM pageout scan which
2093 * controls if it should block and for how long.
2094 * Any blocking of vm_pageout_scan happens ONLY in this function.
2097 vps_flow_control(struct flow_control
*flow_control
, int *anons_grabbed
, vm_object_t
*object
, int *delayed_unlock
,
2098 vm_page_t
*local_freeq
, int *local_freed
, int *vm_pageout_deadlock_target
, unsigned int inactive_burst_count
)
2100 boolean_t exceeded_burst_throttle
= FALSE
;
2101 unsigned int msecs
= 0;
2102 uint32_t inactive_external_count
;
2104 struct vm_pageout_queue
*iq
;
2105 struct vm_pageout_queue
*eq
;
2106 struct vm_speculative_age_q
*sq
;
2108 iq
= &vm_pageout_queue_internal
;
2109 eq
= &vm_pageout_queue_external
;
2110 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2113 * Sometimes we have to pause:
2114 * 1) No inactive pages - nothing to do.
2115 * 2) Loop control - no acceptable pages found on the inactive queue
2116 * within the last vm_pageout_burst_inactive_throttle iterations
2117 * 3) Flow control - default pageout queue is full
2119 if (vm_page_queue_empty(&vm_page_queue_inactive
) &&
2120 vm_page_queue_empty(&vm_page_queue_anonymous
) &&
2121 vm_page_queue_empty(&vm_page_queue_cleaned
) &&
2122 vm_page_queue_empty(&sq
->age_q
)) {
2123 VM_PAGEOUT_DEBUG(vm_pageout_scan_empty_throttle
, 1);
2124 msecs
= vm_pageout_state
.vm_pageout_empty_wait
;
2125 } else if (inactive_burst_count
>=
2126 MIN(vm_pageout_state
.vm_pageout_burst_inactive_throttle
,
2127 (vm_page_inactive_count
+
2128 vm_page_speculative_count
))) {
2129 VM_PAGEOUT_DEBUG(vm_pageout_scan_burst_throttle
, 1);
2130 msecs
= vm_pageout_state
.vm_pageout_burst_wait
;
2132 exceeded_burst_throttle
= TRUE
;
2133 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
2134 VM_DYNAMIC_PAGING_ENABLED()) {
2138 switch (flow_control
->state
) {
2140 if ((vm_page_free_count
+ *local_freed
) < vm_page_free_target
&&
2141 vm_pageout_state
.vm_restricted_to_single_processor
== FALSE
) {
2143 * since the compressor is running independently of vm_pageout_scan
2144 * let's not wait for it just yet... as long as we have a healthy supply
2145 * of filecache pages to work with, let's keep stealing those.
2147 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2149 if (vm_page_pageable_external_count
> vm_pageout_state
.vm_page_filecache_min
&&
2150 (inactive_external_count
>= VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2151 *anons_grabbed
= ANONS_GRABBED_LIMIT
;
2152 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle_deferred
, 1);
2153 return VM_PAGEOUT_SCAN_PROCEED
;
2157 vps_flow_control_reset_deadlock_timer(flow_control
);
2158 msecs
= vm_pageout_state
.vm_pageout_deadlock_wait
;
2163 clock_get_system_nanotime(&sec
, &nsec
);
2164 ts
.tv_sec
= (unsigned int) sec
;
2167 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
->ts
) >= 0) {
2169 * the pageout thread for the default pager is potentially
2170 * deadlocked since the
2171 * default pager queue has been throttled for more than the
2172 * allowable time... we need to move some clean pages or dirty
2173 * pages belonging to the external pagers if they aren't throttled
2174 * vm_page_free_wanted represents the number of threads currently
2175 * blocked waiting for pages... we'll move one page for each of
2176 * these plus a fixed amount to break the logjam... once we're done
2177 * moving this number of pages, we'll re-enter the FSC_DELAYED state
2178 * with a new timeout target since we have no way of knowing
2179 * whether we've broken the deadlock except through observation
2180 * of the queue associated with the default pager... we need to
2181 * stop moving pages and allow the system to run to see what
2182 * state it settles into.
2185 *vm_pageout_deadlock_target
= vm_pageout_state
.vm_pageout_deadlock_relief
+
2186 vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
2187 VM_PAGEOUT_DEBUG(vm_pageout_scan_deadlock_detected
, 1);
2188 flow_control
->state
= FCS_DEADLOCK_DETECTED
;
2189 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
2190 return VM_PAGEOUT_SCAN_PROCEED
;
2193 * just resniff instead of trying
2194 * to compute a new delay time... we're going to be
2195 * awakened immediately upon a laundry completion,
2196 * so we won't wait any longer than necessary
2198 msecs
= vm_pageout_state
.vm_pageout_idle_wait
;
2201 case FCS_DEADLOCK_DETECTED
:
2202 if (*vm_pageout_deadlock_target
) {
2203 return VM_PAGEOUT_SCAN_PROCEED
;
2206 vps_flow_control_reset_deadlock_timer(flow_control
);
2207 msecs
= vm_pageout_state
.vm_pageout_deadlock_wait
;
2213 * No need to pause...
2215 return VM_PAGEOUT_SCAN_PROCEED
;
2218 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2220 vm_pageout_prepare_to_block(object
, delayed_unlock
, local_freeq
, local_freed
,
2221 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2223 if (vm_page_free_count
>= vm_page_free_target
) {
2225 * we're here because
2226 * 1) someone else freed up some pages while we had
2227 * the queues unlocked above
2228 * and we've hit one of the 3 conditions that
2229 * cause us to pause the pageout scan thread
2231 * since we already have enough free pages,
2232 * let's avoid stalling and return normally
2234 * before we return, make sure the pageout I/O threads
2235 * are running throttled in case there are still requests
2236 * in the laundry... since we have enough free pages
2237 * we don't need the laundry to be cleaned in a timely
2238 * fashion... so let's avoid interfering with foreground
2241 * we don't want to hold vm_page_queue_free_lock when
2242 * calling vm_pageout_adjust_eq_iothrottle (since it
2243 * may cause other locks to be taken), we do the intitial
2244 * check outside of the lock. Once we take the lock,
2245 * we recheck the condition since it may have changed.
2246 * if it has, no problem, we will make the threads
2247 * non-throttled before actually blocking
2249 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
2251 lck_mtx_lock(&vm_page_queue_free_lock
);
2253 if (vm_page_free_count
>= vm_page_free_target
&&
2254 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
2255 return VM_PAGEOUT_SCAN_DONE_RETURN
;
2257 lck_mtx_unlock(&vm_page_queue_free_lock
);
2259 if ((vm_page_free_count
+ vm_page_cleaned_count
) < vm_page_free_target
) {
2261 * we're most likely about to block due to one of
2262 * the 3 conditions that cause vm_pageout_scan to
2263 * not be able to make forward progress w/r
2264 * to providing new pages to the free queue,
2265 * so unthrottle the I/O threads in case we
2266 * have laundry to be cleaned... it needs
2267 * to be completed ASAP.
2269 * even if we don't block, we want the io threads
2270 * running unthrottled since the sum of free +
2271 * clean pages is still under our free target
2273 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
2275 if (vm_page_cleaned_count
> 0 && exceeded_burst_throttle
== FALSE
) {
2277 * if we get here we're below our free target and
2278 * we're stalling due to a full laundry queue or
2279 * we don't have any inactive pages other then
2280 * those in the clean queue...
2281 * however, we have pages on the clean queue that
2282 * can be moved to the free queue, so let's not
2283 * stall the pageout scan
2285 flow_control
->state
= FCS_IDLE
;
2286 return VM_PAGEOUT_SCAN_PROCEED
;
2288 if (flow_control
->state
== FCS_DELAYED
&& !VM_PAGE_Q_THROTTLED(iq
)) {
2289 flow_control
->state
= FCS_IDLE
;
2290 return VM_PAGEOUT_SCAN_PROCEED
;
2293 VM_CHECK_MEMORYSTATUS
;
2295 if (flow_control
->state
!= FCS_IDLE
) {
2296 VM_PAGEOUT_DEBUG(vm_pageout_scan_throttle
, 1);
2299 iq
->pgo_throttled
= TRUE
;
2300 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000 * NSEC_PER_USEC
);
2302 counter(c_vm_pageout_scan_block
++);
2304 vm_page_unlock_queues();
2306 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2308 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
2309 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2310 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
);
2312 thread_block(THREAD_CONTINUE_NULL
);
2314 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
2315 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
2316 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
);
2318 vm_page_lock_queues();
2320 iq
->pgo_throttled
= FALSE
;
2322 vps_init_page_targets();
2324 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2328 * This function is called only from vm_pageout_scan and
2329 * it will find and return the most appropriate page to be
2333 vps_choose_victim_page(vm_page_t
*victim_page
, int *anons_grabbed
, boolean_t
*grab_anonymous
, boolean_t force_anonymous
,
2334 boolean_t
*is_page_from_bg_q
, unsigned int *reactivated_this_call
)
2337 vm_object_t m_object
= VM_OBJECT_NULL
;
2338 uint32_t inactive_external_count
;
2339 struct vm_speculative_age_q
*sq
;
2340 struct vm_pageout_queue
*iq
;
2341 int retval
= VM_PAGEOUT_SCAN_PROCEED
;
2343 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2344 iq
= &vm_pageout_queue_internal
;
2346 *is_page_from_bg_q
= FALSE
;
2349 m_object
= VM_OBJECT_NULL
;
2351 if (VM_DYNAMIC_PAGING_ENABLED()) {
2352 assert(vm_page_throttled_count
== 0);
2353 assert(vm_page_queue_empty(&vm_page_queue_throttled
));
2357 * Try for a clean-queue inactive page.
2358 * These are pages that vm_pageout_scan tried to steal earlier, but
2359 * were dirty and had to be cleaned. Pick them up now that they are clean.
2361 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2362 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2364 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
);
2370 * The next most eligible pages are ones we paged in speculatively,
2371 * but which have not yet been touched and have been aged out.
2373 if (!vm_page_queue_empty(&sq
->age_q
)) {
2374 m
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2376 assert(m
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
);
2378 if (!m
->vmp_dirty
|| force_anonymous
== FALSE
) {
2385 #if CONFIG_BACKGROUND_QUEUE
2386 if (vm_page_background_mode
!= VM_PAGE_BG_DISABLED
&& (vm_page_background_count
> vm_page_background_target
)) {
2387 vm_object_t bg_m_object
= NULL
;
2389 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_background
);
2391 bg_m_object
= VM_PAGE_OBJECT(m
);
2393 if (!VM_PAGE_PAGEABLE(m
)) {
2395 * This page is on the background queue
2396 * but not on a pageable queue. This is
2397 * likely a transient state and whoever
2398 * took it out of its pageable queue
2399 * will likely put it back on a pageable
2400 * queue soon but we can't deal with it
2401 * at this point, so let's ignore this
2404 } else if (force_anonymous
== FALSE
|| bg_m_object
->internal
) {
2405 if (bg_m_object
->internal
&&
2406 (VM_PAGE_Q_THROTTLED(iq
) ||
2407 vm_compressor_out_of_space() == TRUE
||
2408 vm_page_free_count
< (vm_page_free_reserved
/ 4))) {
2409 vm_pageout_skipped_bq_internal
++;
2411 *is_page_from_bg_q
= TRUE
;
2413 if (bg_m_object
->internal
) {
2414 vm_pageout_vminfo
.vm_pageout_considered_bq_internal
++;
2416 vm_pageout_vminfo
.vm_pageout_considered_bq_external
++;
2422 #endif /* CONFIG_BACKGROUND_QUEUE */
2424 inactive_external_count
= vm_page_inactive_count
- vm_page_anonymous_count
;
2426 if ((vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
|| force_anonymous
== TRUE
) ||
2427 (inactive_external_count
< VM_PAGE_INACTIVE_TARGET(vm_page_pageable_external_count
))) {
2428 *grab_anonymous
= TRUE
;
2431 vm_pageout_vminfo
.vm_pageout_skipped_external
++;
2432 goto want_anonymous
;
2434 *grab_anonymous
= (vm_page_anonymous_count
> vm_page_anonymous_min
);
2437 /* If the file-backed pool has accumulated
2438 * significantly more pages than the jetsam
2439 * threshold, prefer to reclaim those
2440 * inline to minimise compute overhead of reclaiming
2442 * This calculation does not account for the CPU local
2443 * external page queues, as those are expected to be
2444 * much smaller relative to the global pools.
2447 struct vm_pageout_queue
*eq
= &vm_pageout_queue_external
;
2449 if (*grab_anonymous
== TRUE
&& !VM_PAGE_Q_THROTTLED(eq
)) {
2450 if (vm_page_pageable_external_count
>
2451 vm_pageout_state
.vm_page_filecache_min
) {
2452 if ((vm_page_pageable_external_count
*
2453 vm_pageout_memorystatus_fb_factor_dr
) >
2454 (memorystatus_available_pages_critical
*
2455 vm_pageout_memorystatus_fb_factor_nr
)) {
2456 *grab_anonymous
= FALSE
;
2458 VM_PAGEOUT_DEBUG(vm_grab_anon_overrides
, 1);
2461 if (*grab_anonymous
) {
2462 VM_PAGEOUT_DEBUG(vm_grab_anon_nops
, 1);
2465 #endif /* CONFIG_JETSAM */
2468 if (*grab_anonymous
== FALSE
|| *anons_grabbed
>= ANONS_GRABBED_LIMIT
|| vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2469 if (!vm_page_queue_empty(&vm_page_queue_inactive
)) {
2470 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2472 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_EXTERNAL_Q
);
2475 if (vm_page_pageable_external_count
< vm_pageout_state
.vm_page_filecache_min
) {
2476 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2477 if ((++(*reactivated_this_call
) % 100)) {
2478 vm_pageout_vminfo
.vm_pageout_filecache_min_reactivated
++;
2480 vm_page_activate(m
);
2481 VM_STAT_INCR(reactivations
);
2482 #if CONFIG_BACKGROUND_QUEUE
2483 #if DEVELOPMENT || DEBUG
2484 if (*is_page_from_bg_q
== TRUE
) {
2485 if (m_object
->internal
) {
2486 vm_pageout_rejected_bq_internal
++;
2488 vm_pageout_rejected_bq_external
++;
2491 #endif /* DEVELOPMENT || DEBUG */
2492 #endif /* CONFIG_BACKGROUND_QUEUE */
2493 vm_pageout_state
.vm_pageout_inactive_used
++;
2496 retval
= VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2502 * steal 1 of the file backed pages even if
2503 * we are under the limit that has been set
2504 * for a healthy filecache
2511 if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2512 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2514 assert(m
->vmp_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
);
2515 *anons_grabbed
+= 1;
2529 * This function is called only from vm_pageout_scan and
2530 * it will put a page back on the active/inactive queue
2531 * if we can't reclaim it for some reason.
2534 vps_requeue_page(vm_page_t m
, int page_prev_q_state
, __unused boolean_t page_from_bg_q
)
2536 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
2537 vm_page_enqueue_inactive(m
, FALSE
);
2539 vm_page_activate(m
);
2542 #if CONFIG_BACKGROUND_QUEUE
2543 #if DEVELOPMENT || DEBUG
2544 vm_object_t m_object
= VM_PAGE_OBJECT(m
);
2546 if (page_from_bg_q
== TRUE
) {
2547 if (m_object
->internal
) {
2548 vm_pageout_rejected_bq_internal
++;
2550 vm_pageout_rejected_bq_external
++;
2553 #endif /* DEVELOPMENT || DEBUG */
2554 #endif /* CONFIG_BACKGROUND_QUEUE */
2558 * This function is called only from vm_pageout_scan and
2559 * it will try to grab the victim page's VM object (m_object)
2560 * which differs from the previous victim page's object (object).
2563 vps_switch_object(vm_page_t m
, vm_object_t m_object
, vm_object_t
*object
, int page_prev_q_state
, boolean_t avoid_anon_pages
, boolean_t page_from_bg_q
)
2565 struct vm_speculative_age_q
*sq
;
2567 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2570 * the object associated with candidate page is
2571 * different from the one we were just working
2572 * with... dump the lock if we still own it
2574 if (*object
!= NULL
) {
2575 vm_object_unlock(*object
);
2579 * Try to lock object; since we've alread got the
2580 * page queues lock, we can only 'try' for this one.
2581 * if the 'try' fails, we need to do a mutex_pause
2582 * to allow the owner of the object lock a chance to
2583 * run... otherwise, we're likely to trip over this
2584 * object in the same state as we work our way through
2585 * the queue... clumps of pages associated with the same
2586 * object are fairly typical on the inactive and active queues
2588 if (!vm_object_lock_try_scan(m_object
)) {
2589 vm_page_t m_want
= NULL
;
2591 vm_pageout_vminfo
.vm_pageout_inactive_nolock
++;
2593 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
2594 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_nolock
, 1);
2597 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m
));
2599 m
->vmp_reference
= FALSE
;
2601 if (!m_object
->object_is_shared_cache
) {
2603 * don't apply this optimization if this is the shared cache
2604 * object, it's too easy to get rid of very hot and important
2606 * m->vmp_object must be stable since we hold the page queues lock...
2607 * we can update the scan_collisions field sans the object lock
2608 * since it is a separate field and this is the only spot that does
2609 * a read-modify-write operation and it is never executed concurrently...
2610 * we can asynchronously set this field to 0 when creating a UPL, so it
2611 * is possible for the value to be a bit non-determistic, but that's ok
2612 * since it's only used as a hint
2614 m_object
->scan_collisions
= 1;
2616 if (!vm_page_queue_empty(&vm_page_queue_cleaned
)) {
2617 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_cleaned
);
2618 } else if (!vm_page_queue_empty(&sq
->age_q
)) {
2619 m_want
= (vm_page_t
) vm_page_queue_first(&sq
->age_q
);
2620 } else if ((avoid_anon_pages
|| vm_page_queue_empty(&vm_page_queue_anonymous
)) &&
2621 !vm_page_queue_empty(&vm_page_queue_inactive
)) {
2622 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
2623 } else if (!vm_page_queue_empty(&vm_page_queue_anonymous
)) {
2624 m_want
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
2628 * this is the next object we're going to be interested in
2629 * try to make sure its available after the mutex_pause
2633 vm_pageout_scan_wants_object
= VM_PAGE_OBJECT(m_want
);
2636 vps_requeue_page(m
, page_prev_q_state
, page_from_bg_q
);
2638 return VM_PAGEOUT_SCAN_NEXT_ITERATION
;
2641 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2644 return VM_PAGEOUT_SCAN_PROCEED
;
2648 * This function is called only from vm_pageout_scan and
2649 * it notices that pageout scan may be rendered ineffective
2650 * due to a FS deadlock and will jetsam a process if possible.
2651 * If jetsam isn't supported, it'll move the page to the active
2652 * queue to try and get some different pages pushed onwards so
2653 * we can try to get out of this scenario.
2656 vps_deal_with_throttled_queues(vm_page_t m
, vm_object_t
*object
, uint32_t *vm_pageout_inactive_external_forced_reactivate_limit
,
2657 int *delayed_unlock
, boolean_t
*force_anonymous
, __unused boolean_t is_page_from_bg_q
)
2659 struct vm_pageout_queue
*eq
;
2660 vm_object_t cur_object
= VM_OBJECT_NULL
;
2662 cur_object
= *object
;
2664 eq
= &vm_pageout_queue_external
;
2666 if (cur_object
->internal
== FALSE
) {
2668 * we need to break up the following potential deadlock case...
2669 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2670 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2671 * c) Most of the pages in the inactive queue belong to this file.
2673 * we are potentially in this deadlock because...
2674 * a) the external pageout queue is throttled
2675 * b) we're done with the active queue and moved on to the inactive queue
2676 * c) we've got a dirty external page
2678 * since we don't know the reason for the external pageout queue being throttled we
2679 * must suspect that we are deadlocked, so move the current page onto the active queue
2680 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2682 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2683 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2684 * pool the next time we select a victim page... if we can make enough new free pages,
2685 * the deadlock will break, the external pageout queue will empty and it will no longer
2688 * if we have jetsam configured, keep a count of the pages reactivated this way so
2689 * that we can try to find clean pages in the active/inactive queues before
2690 * deciding to jetsam a process
2692 vm_pageout_vminfo
.vm_pageout_scan_inactive_throttled_external
++;
2694 vm_page_check_pageable_safe(m
);
2695 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
2696 vm_page_queue_enter(&vm_page_queue_active
, m
, vmp_pageq
);
2697 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
2698 vm_page_active_count
++;
2699 vm_page_pageable_external_count
++;
2701 vm_pageout_adjust_eq_iothrottle(eq
, FALSE
);
2703 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2705 #pragma unused(force_anonymous)
2707 *vm_pageout_inactive_external_forced_reactivate_limit
-= 1;
2709 if (*vm_pageout_inactive_external_forced_reactivate_limit
<= 0) {
2710 *vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2712 * Possible deadlock scenario so request jetsam action
2716 vm_object_unlock(cur_object
);
2718 cur_object
= VM_OBJECT_NULL
;
2721 * VM pageout scan needs to know we have dropped this lock and so set the
2722 * object variable we got passed in to NULL.
2724 *object
= VM_OBJECT_NULL
;
2726 vm_page_unlock_queues();
2728 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
2729 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2731 /* Kill first suitable process. If this call returned FALSE, we might have simply purged a process instead. */
2732 if (memorystatus_kill_on_VM_page_shortage(FALSE
) == TRUE
) {
2733 VM_PAGEOUT_DEBUG(vm_pageout_inactive_external_forced_jetsam_count
, 1);
2736 VM_DEBUG_CONSTANT_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
,
2737 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2739 vm_page_lock_queues();
2740 *delayed_unlock
= 1;
2742 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2744 #pragma unused(vm_pageout_inactive_external_forced_reactivate_limit)
2745 #pragma unused(delayed_unlock)
2747 *force_anonymous
= TRUE
;
2748 #endif /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2750 vm_page_activate(m
);
2751 VM_STAT_INCR(reactivations
);
2753 #if CONFIG_BACKGROUND_QUEUE
2754 #if DEVELOPMENT || DEBUG
2755 if (is_page_from_bg_q
== TRUE
) {
2756 if (cur_object
->internal
) {
2757 vm_pageout_rejected_bq_internal
++;
2759 vm_pageout_rejected_bq_external
++;
2762 #endif /* DEVELOPMENT || DEBUG */
2763 #endif /* CONFIG_BACKGROUND_QUEUE */
2765 vm_pageout_state
.vm_pageout_inactive_used
++;
2771 vm_page_balance_inactive(int max_to_move
)
2775 LCK_MTX_ASSERT(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
2777 if (hibernation_vmqueues_inspection
|| hibernate_cleaning_in_progress
) {
2779 * It is likely that the hibernation code path is
2780 * dealing with these very queues as we are about
2781 * to move pages around in/from them and completely
2782 * change the linkage of the pages.
2784 * And so we skip the rebalancing of these queues.
2788 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
2789 vm_page_inactive_count
+
2790 vm_page_speculative_count
);
2792 while (max_to_move
-- && (vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) {
2793 VM_PAGEOUT_DEBUG(vm_pageout_balanced
, 1);
2795 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
2797 assert(m
->vmp_q_state
== VM_PAGE_ON_ACTIVE_Q
);
2798 assert(!m
->vmp_laundry
);
2799 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
2800 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
2802 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
2805 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
2807 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
2808 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
2809 * new reference happens. If no futher references happen on the page after that remote TLB flushes
2810 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
2811 * by pageout_scan, which is just fine since the last reference would have happened quite far
2812 * in the past (TLB caches don't hang around for very long), and of course could just as easily
2813 * have happened before we moved the page
2815 if (m
->vmp_pmapped
== TRUE
) {
2816 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m
), VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
2820 * The page might be absent or busy,
2821 * but vm_page_deactivate can handle that.
2822 * FALSE indicates that we don't want a H/W clear reference
2824 vm_page_deactivate_internal(m
, FALSE
);
2830 * vm_pageout_scan does the dirty work for the pageout daemon.
2831 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
2832 * held and vm_page_free_wanted == 0.
2835 vm_pageout_scan(void)
2837 unsigned int loop_count
= 0;
2838 unsigned int inactive_burst_count
= 0;
2839 unsigned int reactivated_this_call
;
2840 unsigned int reactivate_limit
;
2841 vm_page_t local_freeq
= NULL
;
2842 int local_freed
= 0;
2844 int delayed_unlock_limit
= 0;
2845 int refmod_state
= 0;
2846 int vm_pageout_deadlock_target
= 0;
2847 struct vm_pageout_queue
*iq
;
2848 struct vm_pageout_queue
*eq
;
2849 struct vm_speculative_age_q
*sq
;
2850 struct flow_control flow_control
= { .state
= 0, .ts
= { .tv_sec
= 0, .tv_nsec
= 0 } };
2851 boolean_t inactive_throttled
= FALSE
;
2852 vm_object_t object
= NULL
;
2853 uint32_t inactive_reclaim_run
;
2854 boolean_t grab_anonymous
= FALSE
;
2855 boolean_t force_anonymous
= FALSE
;
2856 boolean_t force_speculative_aging
= FALSE
;
2857 int anons_grabbed
= 0;
2858 int page_prev_q_state
= 0;
2859 boolean_t page_from_bg_q
= FALSE
;
2860 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
2861 vm_object_t m_object
= VM_OBJECT_NULL
;
2863 boolean_t lock_yield_check
= FALSE
;
2866 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
2867 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
2868 vm_pageout_state
.vm_pageout_inactive_clean
,
2869 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
2870 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
2872 flow_control
.state
= FCS_IDLE
;
2873 iq
= &vm_pageout_queue_internal
;
2874 eq
= &vm_pageout_queue_external
;
2875 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
2877 /* Ask the pmap layer to return any pages it no longer needs. */
2878 uint64_t pmap_wired_pages_freed
= pmap_release_pages_fast();
2880 vm_page_lock_queues();
2882 vm_page_wire_count
-= pmap_wired_pages_freed
;
2887 * Calculate the max number of referenced pages on the inactive
2888 * queue that we will reactivate.
2890 reactivated_this_call
= 0;
2891 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
2892 vm_page_inactive_count
);
2893 inactive_reclaim_run
= 0;
2895 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2898 * We must limit the rate at which we send pages to the pagers
2899 * so that we don't tie up too many pages in the I/O queues.
2900 * We implement a throttling mechanism using the laundry count
2901 * to limit the number of pages outstanding to the default
2902 * and external pagers. We can bypass the throttles and look
2903 * for clean pages if the pageout queues don't drain in a timely
2904 * fashion since this may indicate that the pageout paths are
2905 * stalled waiting for memory, which only we can provide.
2908 vps_init_page_targets();
2909 assert(object
== NULL
);
2910 assert(delayed_unlock
!= 0);
2915 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
2917 if (lock_yield_check
) {
2918 lock_yield_check
= FALSE
;
2920 if (delayed_unlock
++ > delayed_unlock_limit
) {
2921 int freed
= local_freed
;
2923 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
2924 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2926 lck_mtx_yield(&vm_page_queue_lock
);
2928 } else if (vm_pageout_scan_wants_object
) {
2929 vm_page_unlock_queues();
2931 vm_page_lock_queues();
2935 if (vm_upl_wait_for_pages
< 0) {
2936 vm_upl_wait_for_pages
= 0;
2939 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
2941 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
) {
2942 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
2945 vps_deal_with_secluded_page_overflow(&local_freeq
, &local_freed
);
2947 assert(delayed_unlock
);
2950 * maintain our balance
2952 vm_page_balance_inactive(1);
2955 /**********************************************************************
2956 * above this point we're playing with the active and secluded queues
2957 * below this point we're playing with the throttling mechanisms
2958 * and the inactive queue
2959 **********************************************************************/
2961 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
2962 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2964 vm_pageout_prepare_to_block(&object
, &delayed_unlock
, &local_freeq
, &local_freed
,
2965 VM_PAGEOUT_PB_CONSIDER_WAKING_COMPACTOR_SWAPPER
);
2967 * make sure the pageout I/O threads are running
2968 * throttled in case there are still requests
2969 * in the laundry... since we have met our targets
2970 * we don't need the laundry to be cleaned in a timely
2971 * fashion... so let's avoid interfering with foreground
2974 vm_pageout_adjust_eq_iothrottle(eq
, TRUE
);
2976 lck_mtx_lock(&vm_page_queue_free_lock
);
2978 if ((vm_page_free_count
>= vm_page_free_target
) &&
2979 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
2981 * done - we have met our target *and*
2982 * there is no one waiting for a page.
2985 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
2987 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
2988 vm_pageout_state
.vm_pageout_inactive
,
2989 vm_pageout_state
.vm_pageout_inactive_used
, 0, 0);
2990 VM_DEBUG_CONSTANT_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
2991 vm_pageout_vminfo
.vm_pageout_freed_speculative
,
2992 vm_pageout_state
.vm_pageout_inactive_clean
,
2993 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
,
2994 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
);
2998 lck_mtx_unlock(&vm_page_queue_free_lock
);
3002 * Before anything, we check if we have any ripe volatile
3003 * objects around. If so, try to purge the first object.
3004 * If the purge fails, fall through to reclaim a page instead.
3005 * If the purge succeeds, go back to the top and reevalute
3006 * the new memory situation.
3008 retval
= vps_purge_object();
3010 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3014 if (object
!= NULL
) {
3015 vm_object_unlock(object
);
3019 lock_yield_check
= FALSE
;
3024 * If our 'aged' queue is empty and we have some speculative pages
3025 * in the other queues, let's go through and see if we need to age
3028 * If we succeeded in aging a speculative Q or just that everything
3029 * looks normal w.r.t queue age and queue counts, we keep going onward.
3031 * If, for some reason, we seem to have a mismatch between the spec.
3032 * page count and the page queues, we reset those variables and
3033 * restart the loop (LD TODO: Track this better?).
3035 if (vm_page_queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
3036 retval
= vps_age_speculative_queue(force_speculative_aging
);
3038 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3039 lock_yield_check
= FALSE
;
3043 force_speculative_aging
= FALSE
;
3046 * Check to see if we need to evict objects from the cache.
3048 * Note: 'object' here doesn't have anything to do with
3049 * the eviction part. We just need to make sure we have dropped
3050 * any object lock we might be holding if we need to go down
3051 * into the eviction logic.
3053 retval
= vps_object_cache_evict(&object
);
3055 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3056 lock_yield_check
= FALSE
;
3062 * Calculate our filecache_min that will affect the loop
3065 vps_calculate_filecache_min();
3068 * LD TODO: Use a structure to hold all state variables for a single
3069 * vm_pageout_scan iteration and pass that structure to this function instead.
3071 retval
= vps_flow_control(&flow_control
, &anons_grabbed
, &object
,
3072 &delayed_unlock
, &local_freeq
, &local_freed
,
3073 &vm_pageout_deadlock_target
, inactive_burst_count
);
3075 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3076 if (loop_count
>= vm_page_inactive_count
) {
3080 inactive_burst_count
= 0;
3082 assert(object
== NULL
);
3083 assert(delayed_unlock
!= 0);
3085 lock_yield_check
= FALSE
;
3087 } else if (retval
== VM_PAGEOUT_SCAN_DONE_RETURN
) {
3088 goto return_from_scan
;
3091 flow_control
.state
= FCS_IDLE
;
3093 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
3094 vm_pageout_inactive_external_forced_reactivate_limit
);
3096 inactive_burst_count
++;
3097 vm_pageout_state
.vm_pageout_inactive
++;
3104 retval
= vps_choose_victim_page(&m
, &anons_grabbed
, &grab_anonymous
, force_anonymous
, &page_from_bg_q
, &reactivated_this_call
);
3107 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3108 inactive_burst_count
= 0;
3110 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3111 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3114 lock_yield_check
= TRUE
;
3119 * if we've gotten here, we have no victim page.
3120 * check to see if we've not finished balancing the queues
3121 * or we have a page on the aged speculative queue that we
3122 * skipped due to force_anonymous == TRUE.. or we have
3123 * speculative pages that we can prematurely age... if
3124 * one of these cases we'll keep going, else panic
3126 force_anonymous
= FALSE
;
3127 VM_PAGEOUT_DEBUG(vm_pageout_no_victim
, 1);
3129 if (!vm_page_queue_empty(&sq
->age_q
)) {
3130 lock_yield_check
= TRUE
;
3134 if (vm_page_speculative_count
) {
3135 force_speculative_aging
= TRUE
;
3136 lock_yield_check
= TRUE
;
3139 panic("vm_pageout: no victim");
3144 assert(VM_PAGE_PAGEABLE(m
));
3145 m_object
= VM_PAGE_OBJECT(m
);
3146 force_anonymous
= FALSE
;
3148 page_prev_q_state
= m
->vmp_q_state
;
3150 * we just found this page on one of our queues...
3151 * it can't also be on the pageout queue, so safe
3152 * to call vm_page_queues_remove
3154 vm_page_queues_remove(m
, TRUE
);
3156 assert(!m
->vmp_laundry
);
3157 assert(!m
->vmp_private
);
3158 assert(!m
->vmp_fictitious
);
3159 assert(m_object
!= kernel_object
);
3160 assert(VM_PAGE_GET_PHYS_PAGE(m
) != vm_page_guard_addr
);
3162 vm_pageout_vminfo
.vm_pageout_considered_page
++;
3164 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
3167 * check to see if we currently are working
3168 * with the same object... if so, we've
3169 * already got the lock
3171 if (m_object
!= object
) {
3172 boolean_t avoid_anon_pages
= (grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
);
3175 * vps_switch_object() will always drop the 'object' lock first
3176 * and then try to acquire the 'm_object' lock. So 'object' has to point to
3177 * either 'm_object' or NULL.
3179 retval
= vps_switch_object(m
, m_object
, &object
, page_prev_q_state
, avoid_anon_pages
, page_from_bg_q
);
3181 if (retval
== VM_PAGEOUT_SCAN_NEXT_ITERATION
) {
3182 lock_yield_check
= TRUE
;
3186 assert(m_object
== object
);
3187 assert(VM_PAGE_OBJECT(m
) == m_object
);
3191 * Somebody is already playing with this page.
3192 * Put it back on the appropriate queue
3195 VM_PAGEOUT_DEBUG(vm_pageout_inactive_busy
, 1);
3197 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3198 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_busy
, 1);
3201 vps_requeue_page(m
, page_prev_q_state
, page_from_bg_q
);
3203 lock_yield_check
= TRUE
;
3208 * if (m->vmp_cleaning && !m->vmp_free_when_done)
3209 * If already cleaning this page in place
3210 * just leave if off the paging queues.
3211 * We can leave the page mapped, and upl_commit_range
3212 * will put it on the clean queue.
3214 * if (m->vmp_free_when_done && !m->vmp_cleaning)
3215 * an msync INVALIDATE is in progress...
3216 * this page has been marked for destruction
3217 * after it has been cleaned,
3218 * but not yet gathered into a UPL
3219 * where 'cleaning' will be set...
3220 * just leave it off the paging queues
3222 * if (m->vmp_free_when_done && m->vmp_clenaing)
3223 * an msync INVALIDATE is in progress
3224 * and the UPL has already gathered this page...
3225 * just leave it off the paging queues
3227 if (m
->vmp_free_when_done
|| m
->vmp_cleaning
) {
3228 lock_yield_check
= TRUE
;
3234 * If it's absent, in error or the object is no longer alive,
3235 * we can reclaim the page... in the no longer alive case,
3236 * there are 2 states the page can be in that preclude us
3237 * from reclaiming it - busy or cleaning - that we've already
3240 if (m
->vmp_absent
|| m
->vmp_error
|| !object
->alive
) {
3241 if (m
->vmp_absent
) {
3242 VM_PAGEOUT_DEBUG(vm_pageout_inactive_absent
, 1);
3243 } else if (!object
->alive
) {
3244 VM_PAGEOUT_DEBUG(vm_pageout_inactive_notalive
, 1);
3246 VM_PAGEOUT_DEBUG(vm_pageout_inactive_error
, 1);
3249 if (vm_pageout_deadlock_target
) {
3250 VM_PAGEOUT_DEBUG(vm_pageout_scan_inactive_throttle_success
, 1);
3251 vm_pageout_deadlock_target
--;
3254 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
3256 if (object
->internal
) {
3257 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
3259 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
3261 assert(!m
->vmp_cleaning
);
3262 assert(!m
->vmp_laundry
);
3264 if (!object
->internal
&&
3265 object
->pager
!= NULL
&&
3266 object
->pager
->mo_pager_ops
== &shared_region_pager_ops
) {
3267 shared_region_pager_reclaimed
++;
3273 * remove page from object here since we're already
3274 * behind the object lock... defer the rest of the work
3275 * we'd normally do in vm_page_free_prepare_object
3276 * until 'vm_page_free_list' is called
3278 if (m
->vmp_tabled
) {
3279 vm_page_remove(m
, TRUE
);
3282 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
3283 m
->vmp_snext
= local_freeq
;
3287 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
3288 vm_pageout_vminfo
.vm_pageout_freed_speculative
++;
3289 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3290 vm_pageout_vminfo
.vm_pageout_freed_cleaned
++;
3291 } else if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_INTERNAL_Q
) {
3292 vm_pageout_vminfo
.vm_pageout_freed_internal
++;
3294 vm_pageout_vminfo
.vm_pageout_freed_external
++;
3297 inactive_burst_count
= 0;
3299 lock_yield_check
= TRUE
;
3302 if (object
->copy
== VM_OBJECT_NULL
) {
3304 * No one else can have any interest in this page.
3305 * If this is an empty purgable object, the page can be
3306 * reclaimed even if dirty.
3307 * If the page belongs to a volatile purgable object, we
3308 * reactivate it if the compressor isn't active.
3310 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
3311 if (m
->vmp_pmapped
== TRUE
) {
3312 /* unmap the page */
3313 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
3314 if (refmod_state
& VM_MEM_MODIFIED
) {
3315 SET_PAGE_DIRTY(m
, FALSE
);
3318 if (m
->vmp_dirty
|| m
->vmp_precious
) {
3319 /* we saved the cost of cleaning this page ! */
3320 vm_page_purged_count
++;
3325 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
3327 * With the VM compressor, the cost of
3328 * reclaiming a page is much lower (no I/O),
3329 * so if we find a "volatile" page, it's better
3330 * to let it get compressed rather than letting
3331 * it occupy a full page until it gets purged.
3332 * So no need to check for "volatile" here.
3334 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
3336 * Avoid cleaning a "volatile" page which might
3340 /* if it's wired, we can't put it on our queue */
3341 assert(!VM_PAGE_WIRED(m
));
3343 /* just stick it back on! */
3344 reactivated_this_call
++;
3346 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3347 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_volatile_reactivated
, 1);
3350 goto reactivate_page
;
3354 * If it's being used, reactivate.
3355 * (Fictitious pages are either busy or absent.)
3356 * First, update the reference and dirty bits
3357 * to make sure the page is unreferenced.
3361 if (m
->vmp_reference
== FALSE
&& m
->vmp_pmapped
== TRUE
) {
3362 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
3364 if (refmod_state
& VM_MEM_REFERENCED
) {
3365 m
->vmp_reference
= TRUE
;
3367 if (refmod_state
& VM_MEM_MODIFIED
) {
3368 SET_PAGE_DIRTY(m
, FALSE
);
3372 if (m
->vmp_reference
|| m
->vmp_dirty
) {
3373 /* deal with a rogue "reusable" page */
3374 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
, m_object
);
3377 if (vm_pageout_state
.vm_page_xpmapped_min_divisor
== 0) {
3378 vm_pageout_state
.vm_page_xpmapped_min
= 0;
3380 vm_pageout_state
.vm_page_xpmapped_min
= (vm_page_external_count
* 10) / vm_pageout_state
.vm_page_xpmapped_min_divisor
;
3383 if (!m
->vmp_no_cache
&&
3384 page_from_bg_q
== FALSE
&&
3385 (m
->vmp_reference
|| (m
->vmp_xpmapped
&& !object
->internal
&&
3386 (vm_page_xpmapped_external_count
< vm_pageout_state
.vm_page_xpmapped_min
)))) {
3388 * The page we pulled off the inactive list has
3389 * been referenced. It is possible for other
3390 * processors to be touching pages faster than we
3391 * can clear the referenced bit and traverse the
3392 * inactive queue, so we limit the number of
3395 if (++reactivated_this_call
>= reactivate_limit
) {
3396 vm_pageout_vminfo
.vm_pageout_reactivation_limit_exceeded
++;
3397 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
3398 vm_pageout_vminfo
.vm_pageout_inactive_force_reclaim
++;
3402 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3403 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reference_reactivated
, 1);
3406 vm_pageout_vminfo
.vm_pageout_inactive_referenced
++;
3408 if (!object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
3409 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
3411 * no explict mappings of this object exist
3412 * and it's not open via the filesystem
3414 vm_page_deactivate(m
);
3415 VM_PAGEOUT_DEBUG(vm_pageout_inactive_deactivated
, 1);
3418 * The page was/is being used, so put back on active list.
3420 vm_page_activate(m
);
3421 VM_STAT_INCR(reactivations
);
3422 inactive_burst_count
= 0;
3424 #if CONFIG_BACKGROUND_QUEUE
3425 #if DEVELOPMENT || DEBUG
3426 if (page_from_bg_q
== TRUE
) {
3427 if (m_object
->internal
) {
3428 vm_pageout_rejected_bq_internal
++;
3430 vm_pageout_rejected_bq_external
++;
3433 #endif /* DEVELOPMENT || DEBUG */
3434 #endif /* CONFIG_BACKGROUND_QUEUE */
3436 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3437 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3439 vm_pageout_state
.vm_pageout_inactive_used
++;
3441 lock_yield_check
= TRUE
;
3445 * Make sure we call pmap_get_refmod() if it
3446 * wasn't already called just above, to update
3449 if ((refmod_state
== -1) && !m
->vmp_dirty
&& m
->vmp_pmapped
) {
3450 refmod_state
= pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m
));
3451 if (refmod_state
& VM_MEM_MODIFIED
) {
3452 SET_PAGE_DIRTY(m
, FALSE
);
3458 * we've got a candidate page to steal...
3460 * m->vmp_dirty is up to date courtesy of the
3461 * preceding check for m->vmp_reference... if
3462 * we get here, then m->vmp_reference had to be
3463 * FALSE (or possibly "reactivate_limit" was
3464 * exceeded), but in either case we called
3465 * pmap_get_refmod() and updated both
3466 * m->vmp_reference and m->vmp_dirty
3468 * if it's dirty or precious we need to
3469 * see if the target queue is throtttled
3470 * it if is, we need to skip over it by moving it back
3471 * to the end of the inactive queue
3474 inactive_throttled
= FALSE
;
3476 if (m
->vmp_dirty
|| m
->vmp_precious
) {
3477 if (object
->internal
) {
3478 if (VM_PAGE_Q_THROTTLED(iq
)) {
3479 inactive_throttled
= TRUE
;
3481 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3482 inactive_throttled
= TRUE
;
3486 if (!VM_DYNAMIC_PAGING_ENABLED() &&
3487 object
->internal
&& m
->vmp_dirty
&&
3488 (object
->purgable
== VM_PURGABLE_DENY
||
3489 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
3490 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
3491 vm_page_check_pageable_safe(m
);
3492 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
3493 vm_page_queue_enter(&vm_page_queue_throttled
, m
, vmp_pageq
);
3494 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
3495 vm_page_throttled_count
++;
3497 VM_PAGEOUT_DEBUG(vm_pageout_scan_reclaimed_throttled
, 1);
3499 inactive_burst_count
= 0;
3501 lock_yield_check
= TRUE
;
3504 if (inactive_throttled
== TRUE
) {
3505 vps_deal_with_throttled_queues(m
, &object
, &vm_pageout_inactive_external_forced_reactivate_limit
,
3506 &delayed_unlock
, &force_anonymous
, page_from_bg_q
);
3508 inactive_burst_count
= 0;
3510 if (page_prev_q_state
== VM_PAGE_ON_INACTIVE_CLEANED_Q
) {
3511 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated
, 1);
3514 lock_yield_check
= TRUE
;
3519 * we've got a page that we can steal...
3520 * eliminate all mappings and make sure
3521 * we have the up-to-date modified state
3523 * if we need to do a pmap_disconnect then we
3524 * need to re-evaluate m->vmp_dirty since the pmap_disconnect
3525 * provides the true state atomically... the
3526 * page was still mapped up to the pmap_disconnect
3527 * and may have been dirtied at the last microsecond
3529 * Note that if 'pmapped' is FALSE then the page is not
3530 * and has not been in any map, so there is no point calling
3531 * pmap_disconnect(). m->vmp_dirty could have been set in anticipation
3532 * of likely usage of the page.
3534 if (m
->vmp_pmapped
== TRUE
) {
3538 * Don't count this page as going into the compressor
3539 * if any of these are true:
3540 * 1) compressed pager isn't enabled
3541 * 2) Freezer enabled device with compressed pager
3542 * backend (exclusive use) i.e. most of the VM system
3543 * (including vm_pageout_scan) has no knowledge of
3545 * 3) This page belongs to a file and hence will not be
3546 * sent into the compressor
3548 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
||
3549 object
->internal
== FALSE
) {
3551 } else if (m
->vmp_dirty
|| m
->vmp_precious
) {
3553 * VM knows that this page is dirty (or
3554 * precious) and needs to be compressed
3555 * rather than freed.
3556 * Tell the pmap layer to count this page
3559 pmap_options
= PMAP_OPTIONS_COMPRESSOR
;
3562 * VM does not know if the page needs to
3563 * be preserved but the pmap layer might tell
3564 * us if any mapping has "modified" it.
3565 * Let's the pmap layer to count this page
3566 * as compressed if and only if it has been
3570 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED
;
3572 refmod_state
= pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m
),
3575 if (refmod_state
& VM_MEM_MODIFIED
) {
3576 SET_PAGE_DIRTY(m
, FALSE
);
3581 * reset our count of pages that have been reclaimed
3582 * since the last page was 'stolen'
3584 inactive_reclaim_run
= 0;
3587 * If it's clean and not precious, we can free the page.
3589 if (!m
->vmp_dirty
&& !m
->vmp_precious
) {
3590 vm_pageout_state
.vm_pageout_inactive_clean
++;
3593 * OK, at this point we have found a page we are going to free.
3595 #if CONFIG_PHANTOM_CACHE
3596 if (!object
->internal
) {
3597 vm_phantom_cache_add_ghost(m
);
3604 * The page may have been dirtied since the last check
3605 * for a throttled target queue (which may have been skipped
3606 * if the page was clean then). With the dirty page
3607 * disconnected here, we can make one final check.
3609 if (object
->internal
) {
3610 if (VM_PAGE_Q_THROTTLED(iq
)) {
3611 inactive_throttled
= TRUE
;
3613 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
3614 inactive_throttled
= TRUE
;
3617 if (inactive_throttled
== TRUE
) {
3618 goto throttle_inactive
;
3621 #if VM_PRESSURE_EVENTS
3625 * If Jetsam is enabled, then the sending
3626 * of memory pressure notifications is handled
3627 * from the same thread that takes care of high-water
3628 * and other jetsams i.e. the memorystatus_thread.
3631 #else /* CONFIG_JETSAM */
3633 vm_pressure_response();
3635 #endif /* CONFIG_JETSAM */
3636 #endif /* VM_PRESSURE_EVENTS */
3638 if (page_prev_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) {
3639 VM_PAGEOUT_DEBUG(vm_pageout_speculative_dirty
, 1);
3642 if (object
->internal
) {
3643 vm_pageout_vminfo
.vm_pageout_inactive_dirty_internal
++;
3645 vm_pageout_vminfo
.vm_pageout_inactive_dirty_external
++;
3649 * internal pages will go to the compressor...
3650 * external pages will go to the appropriate pager to be cleaned
3651 * and upon completion will end up on 'vm_page_queue_cleaned' which
3652 * is a preferred queue to steal from
3654 vm_pageout_cluster(m
);
3655 inactive_burst_count
= 0;
3658 * back to top of pageout scan loop
3665 vm_page_free_reserve(
3668 int free_after_reserve
;
3670 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
3671 if ((vm_page_free_reserved
+ pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
) >= (VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3672 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
;
3674 vm_page_free_reserved
+= (pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
);
3677 if ((vm_page_free_reserved
+ pages
) >= VM_PAGE_FREE_RESERVED_LIMIT
) {
3678 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
3680 vm_page_free_reserved
+= pages
;
3683 free_after_reserve
= vm_pageout_state
.vm_page_free_count_init
- vm_page_free_reserved
;
3685 vm_page_free_min
= vm_page_free_reserved
+
3686 VM_PAGE_FREE_MIN(free_after_reserve
);
3688 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
) {
3689 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
3692 vm_page_free_target
= vm_page_free_reserved
+
3693 VM_PAGE_FREE_TARGET(free_after_reserve
);
3695 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
) {
3696 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
3699 if (vm_page_free_target
< vm_page_free_min
+ 5) {
3700 vm_page_free_target
= vm_page_free_min
+ 5;
3703 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 2);
3707 * vm_pageout is the high level pageout daemon.
3711 vm_pageout_continue(void)
3713 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
3714 VM_PAGEOUT_DEBUG(vm_pageout_scan_event_counter
, 1);
3716 lck_mtx_lock(&vm_page_queue_free_lock
);
3717 vm_pageout_running
= TRUE
;
3718 lck_mtx_unlock(&vm_page_queue_free_lock
);
3722 * we hold both the vm_page_queue_free_lock
3723 * and the vm_page_queues_lock at this point
3725 assert(vm_page_free_wanted
== 0);
3726 assert(vm_page_free_wanted_privileged
== 0);
3727 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
3729 vm_pageout_running
= FALSE
;
3730 #if !CONFIG_EMBEDDED
3731 if (vm_pageout_waiter
) {
3732 vm_pageout_waiter
= FALSE
;
3733 thread_wakeup((event_t
)&vm_pageout_waiter
);
3735 #endif /* !CONFIG_EMBEDDED */
3737 lck_mtx_unlock(&vm_page_queue_free_lock
);
3738 vm_page_unlock_queues();
3740 counter(c_vm_pageout_block
++);
3741 thread_block((thread_continue_t
)vm_pageout_continue
);
3745 #if !CONFIG_EMBEDDED
3747 vm_pageout_wait(uint64_t deadline
)
3751 lck_mtx_lock(&vm_page_queue_free_lock
);
3752 for (kr
= KERN_SUCCESS
; vm_pageout_running
&& (KERN_SUCCESS
== kr
);) {
3753 vm_pageout_waiter
= TRUE
;
3754 if (THREAD_AWAKENED
!= lck_mtx_sleep_deadline(
3755 &vm_page_queue_free_lock
, LCK_SLEEP_DEFAULT
,
3756 (event_t
) &vm_pageout_waiter
, THREAD_UNINT
, deadline
)) {
3757 kr
= KERN_OPERATION_TIMED_OUT
;
3760 lck_mtx_unlock(&vm_page_queue_free_lock
);
3764 #endif /* !CONFIG_EMBEDDED */
3768 vm_pageout_iothread_external_continue(struct vm_pageout_queue
*q
)
3772 vm_object_offset_t offset
;
3773 memory_object_t pager
;
3775 /* On systems with a compressor, the external IO thread clears its
3776 * VM privileged bit to accommodate large allocations (e.g. bulk UPL
3779 if (vm_pageout_state
.vm_pageout_internal_iothread
!= THREAD_NULL
) {
3780 current_thread()->options
&= ~TH_OPT_VMPRIV
;
3783 vm_page_lockspin_queues();
3785 while (!vm_page_queue_empty(&q
->pgo_pending
)) {
3787 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3789 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3792 * grab a snapshot of the object and offset this
3793 * page is tabled in so that we can relookup this
3794 * page after we've taken the object lock - these
3795 * fields are stable while we hold the page queues lock
3796 * but as soon as we drop it, there is nothing to keep
3797 * this page in this object... we hold an activity_in_progress
3798 * on this object which will keep it from terminating
3800 object
= VM_PAGE_OBJECT(m
);
3801 offset
= m
->vmp_offset
;
3803 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3804 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3806 vm_page_unlock_queues();
3808 vm_object_lock(object
);
3810 m
= vm_page_lookup(object
, offset
);
3812 if (m
== NULL
|| m
->vmp_busy
|| m
->vmp_cleaning
||
3813 !m
->vmp_laundry
|| (m
->vmp_q_state
!= VM_PAGE_NOT_ON_Q
)) {
3815 * it's either the same page that someone else has
3816 * started cleaning (or it's finished cleaning or
3817 * been put back on the pageout queue), or
3818 * the page has been freed or we have found a
3819 * new page at this offset... in all of these cases
3820 * we merely need to release the activity_in_progress
3821 * we took when we put the page on the pageout queue
3823 vm_object_activity_end(object
);
3824 vm_object_unlock(object
);
3826 vm_page_lockspin_queues();
3829 pager
= object
->pager
;
3831 if (pager
== MEMORY_OBJECT_NULL
) {
3833 * This pager has been destroyed by either
3834 * memory_object_destroy or vm_object_destroy, and
3835 * so there is nowhere for the page to go.
3837 if (m
->vmp_free_when_done
) {
3839 * Just free the page... VM_PAGE_FREE takes
3840 * care of cleaning up all the state...
3841 * including doing the vm_pageout_throttle_up
3845 vm_page_lockspin_queues();
3847 vm_pageout_throttle_up(m
);
3848 vm_page_activate(m
);
3850 vm_page_unlock_queues();
3853 * And we are done with it.
3856 vm_object_activity_end(object
);
3857 vm_object_unlock(object
);
3859 vm_page_lockspin_queues();
3864 * we don't hold the page queue lock
3865 * so this check isn't safe to make
3870 * give back the activity_in_progress reference we
3871 * took when we queued up this page and replace it
3872 * it with a paging_in_progress reference that will
3873 * also hold the paging offset from changing and
3874 * prevent the object from terminating
3876 vm_object_activity_end(object
);
3877 vm_object_paging_begin(object
);
3878 vm_object_unlock(object
);
3881 * Send the data to the pager.
3882 * any pageout clustering happens there
3884 memory_object_data_return(pager
,
3885 m
->vmp_offset
+ object
->paging_offset
,
3893 vm_object_lock(object
);
3894 vm_object_paging_end(object
);
3895 vm_object_unlock(object
);
3897 vm_pageout_io_throttle();
3899 vm_page_lockspin_queues();
3901 q
->pgo_busy
= FALSE
;
3904 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3905 vm_page_unlock_queues();
3907 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_external_continue
, (void *) q
);
3912 #define MAX_FREE_BATCH 32
3913 uint32_t vm_compressor_time_thread
; /* Set via sysctl to record time accrued by
3919 vm_pageout_iothread_internal_continue(struct cq
*);
3921 vm_pageout_iothread_internal_continue(struct cq
*cq
)
3923 struct vm_pageout_queue
*q
;
3925 boolean_t pgo_draining
;
3928 vm_page_t local_freeq
= NULL
;
3929 int local_freed
= 0;
3930 int local_batch_size
;
3931 #if DEVELOPMENT || DEBUG
3933 boolean_t marked_active
= FALSE
;
3935 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3939 if (vm_compressor_ebound
&& (vm_pageout_state
.vm_compressor_thread_count
> 1)) {
3940 local_batch_size
= (q
->pgo_maxlaundry
>> 3);
3941 local_batch_size
= MAX(local_batch_size
, 16);
3943 local_batch_size
= q
->pgo_maxlaundry
/ (vm_pageout_state
.vm_compressor_thread_count
* 2);
3946 local_batch_size
= q
->pgo_maxlaundry
/ (vm_pageout_state
.vm_compressor_thread_count
* 2);
3949 #if RECORD_THE_COMPRESSED_DATA
3950 if (q
->pgo_laundry
) {
3951 c_compressed_record_init();
3955 int pages_left_on_q
= 0;
3960 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3962 vm_page_lock_queues();
3963 #if DEVELOPMENT || DEBUG
3964 if (marked_active
== FALSE
) {
3966 vmct_state
[cq
->id
] = VMCT_ACTIVE
;
3967 marked_active
= TRUE
;
3968 if (vmct_active
== 1) {
3969 vm_compressor_epoch_start
= mach_absolute_time();
3973 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3975 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START
, q
->pgo_laundry
, 0, 0, 0, 0);
3977 while (!vm_page_queue_empty(&q
->pgo_pending
) && local_cnt
< local_batch_size
) {
3978 vm_page_queue_remove_first(&q
->pgo_pending
, m
, vmp_pageq
);
3979 assert(m
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
);
3982 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
3983 VM_PAGE_ZERO_PAGEQ_ENTRY(m
);
3984 m
->vmp_laundry
= FALSE
;
3986 m
->vmp_snext
= local_q
;
3990 if (local_q
== NULL
) {
3996 if ((pgo_draining
= q
->pgo_draining
) == FALSE
) {
3997 vm_pageout_throttle_up_batch(q
, local_cnt
);
3998 pages_left_on_q
= q
->pgo_laundry
;
4000 pages_left_on_q
= q
->pgo_laundry
- local_cnt
;
4003 vm_page_unlock_queues();
4005 #if !RECORD_THE_COMPRESSED_DATA
4006 if (pages_left_on_q
>= local_batch_size
&& cq
->id
< (vm_pageout_state
.vm_compressor_thread_count
- 1)) {
4007 thread_wakeup((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
+ 1));
4010 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, q
->pgo_laundry
, 0, 0, 0, 0);
4013 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_START
, local_cnt
, 0, 0, 0, 0);
4016 local_q
= m
->vmp_snext
;
4017 m
->vmp_snext
= NULL
;
4019 if (vm_pageout_compress_page(&cq
->current_chead
, cq
->scratch_buf
, m
) == KERN_SUCCESS
) {
4020 #if DEVELOPMENT || DEBUG
4023 KERNEL_DEBUG(0xe0400024 | DBG_FUNC_END
, local_cnt
, 0, 0, 0, 0);
4025 m
->vmp_snext
= local_freeq
;
4029 if (local_freed
>= MAX_FREE_BATCH
) {
4030 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4032 vm_page_free_list(local_freeq
, TRUE
);
4039 while (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
4040 kern_return_t wait_result
;
4041 int need_wakeup
= 0;
4044 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4046 vm_page_free_list(local_freeq
, TRUE
);
4052 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
4054 if (vm_page_free_count
< COMPRESSOR_FREE_RESERVED_LIMIT
) {
4055 if (vm_page_free_wanted_privileged
++ == 0) {
4058 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, THREAD_UNINT
);
4060 lck_mtx_unlock(&vm_page_queue_free_lock
);
4063 thread_wakeup((event_t
)&vm_page_free_wanted
);
4066 if (wait_result
== THREAD_WAITING
) {
4067 thread_block(THREAD_CONTINUE_NULL
);
4070 lck_mtx_unlock(&vm_page_queue_free_lock
);
4076 OSAddAtomic64(local_freed
, &vm_pageout_vminfo
.vm_pageout_compressions
);
4078 vm_page_free_list(local_freeq
, TRUE
);
4082 if (pgo_draining
== TRUE
) {
4083 vm_page_lockspin_queues();
4084 vm_pageout_throttle_up_batch(q
, local_cnt
);
4085 vm_page_unlock_queues();
4088 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START
, 0, 0, 0, 0, 0);
4091 * queue lock is held and our q is empty
4093 q
->pgo_busy
= FALSE
;
4096 assert_wait((event_t
) ((uintptr_t)&q
->pgo_pending
+ cq
->id
), THREAD_UNINT
);
4097 #if DEVELOPMENT || DEBUG
4098 if (marked_active
== TRUE
) {
4100 vmct_state
[cq
->id
] = VMCT_IDLE
;
4102 if (vmct_active
== 0) {
4103 vm_compressor_epoch_stop
= mach_absolute_time();
4104 assertf(vm_compressor_epoch_stop
>= vm_compressor_epoch_start
,
4105 "Compressor epoch non-monotonic: 0x%llx -> 0x%llx",
4106 vm_compressor_epoch_start
, vm_compressor_epoch_stop
);
4107 /* This interval includes intervals where one or more
4108 * compressor threads were pre-empted
4110 vmct_stats
.vmct_cthreads_total
+= vm_compressor_epoch_stop
- vm_compressor_epoch_start
;
4114 vm_page_unlock_queues();
4115 #if DEVELOPMENT || DEBUG
4116 if (__improbable(vm_compressor_time_thread
)) {
4117 vmct_stats
.vmct_runtimes
[cq
->id
] = thread_get_runtime_self();
4118 vmct_stats
.vmct_pages
[cq
->id
] += ncomps
;
4119 vmct_stats
.vmct_iterations
[cq
->id
]++;
4120 if (ncomps
> vmct_stats
.vmct_maxpages
[cq
->id
]) {
4121 vmct_stats
.vmct_maxpages
[cq
->id
] = ncomps
;
4123 if (ncomps
< vmct_stats
.vmct_minpages
[cq
->id
]) {
4124 vmct_stats
.vmct_minpages
[cq
->id
] = ncomps
;
4129 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
4131 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_internal_continue
, (void *) cq
);
4137 vm_pageout_compress_page(void **current_chead
, char *scratch_buf
, vm_page_t m
)
4140 memory_object_t pager
;
4141 int compressed_count_delta
;
4142 kern_return_t retval
;
4144 object
= VM_PAGE_OBJECT(m
);
4146 assert(!m
->vmp_free_when_done
);
4147 assert(!m
->vmp_laundry
);
4149 pager
= object
->pager
;
4151 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
4152 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START
, object
, pager
, 0, 0, 0);
4154 vm_object_lock(object
);
4157 * If there is no memory object for the page, create
4158 * one and hand it to the compression pager.
4161 if (!object
->pager_initialized
) {
4162 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
4164 if (!object
->pager_initialized
) {
4165 vm_object_compressor_pager_create(object
);
4168 pager
= object
->pager
;
4170 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
4172 * Still no pager for the object,
4173 * or the pager has been destroyed.
4174 * Reactivate the page.
4176 * Should only happen if there is no
4179 PAGE_WAKEUP_DONE(m
);
4181 vm_page_lockspin_queues();
4182 vm_page_activate(m
);
4183 VM_PAGEOUT_DEBUG(vm_pageout_dirty_no_pager
, 1);
4184 vm_page_unlock_queues();
4187 * And we are done with it.
4189 vm_object_activity_end(object
);
4190 vm_object_unlock(object
);
4192 return KERN_FAILURE
;
4194 vm_object_unlock(object
);
4196 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END
, object
, pager
, 0, 0, 0);
4198 assert(object
->pager_initialized
&& pager
!= MEMORY_OBJECT_NULL
);
4199 assert(object
->activity_in_progress
> 0);
4201 retval
= vm_compressor_pager_put(
4203 m
->vmp_offset
+ object
->paging_offset
,
4204 VM_PAGE_GET_PHYS_PAGE(m
),
4207 &compressed_count_delta
);
4209 vm_object_lock(object
);
4211 assert(object
->activity_in_progress
> 0);
4212 assert(VM_PAGE_OBJECT(m
) == object
);
4213 assert( !VM_PAGE_WIRED(m
));
4215 vm_compressor_pager_count(pager
,
4216 compressed_count_delta
,
4217 FALSE
, /* shared_lock */
4220 if (retval
== KERN_SUCCESS
) {
4222 * If the object is purgeable, its owner's
4223 * purgeable ledgers will be updated in
4224 * vm_page_remove() but the page still
4225 * contributes to the owner's memory footprint,
4226 * so account for it as such.
4228 if ((object
->purgable
!= VM_PURGABLE_DENY
||
4229 object
->vo_ledger_tag
) &&
4230 object
->vo_owner
!= NULL
) {
4231 /* one more compressed purgeable/tagged page */
4232 vm_object_owner_compressed_update(object
,
4235 VM_STAT_INCR(compressions
);
4237 if (m
->vmp_tabled
) {
4238 vm_page_remove(m
, TRUE
);
4241 PAGE_WAKEUP_DONE(m
);
4243 vm_page_lockspin_queues();
4245 vm_page_activate(m
);
4246 vm_pageout_vminfo
.vm_compressor_failed
++;
4248 vm_page_unlock_queues();
4250 vm_object_activity_end(object
);
4251 vm_object_unlock(object
);
4258 vm_pageout_adjust_eq_iothrottle(struct vm_pageout_queue
*eq
, boolean_t req_lowpriority
)
4262 if (hibernate_cleaning_in_progress
== TRUE
) {
4263 req_lowpriority
= FALSE
;
4266 if (eq
->pgo_inited
== TRUE
&& eq
->pgo_lowpriority
!= req_lowpriority
) {
4267 vm_page_unlock_queues();
4269 if (req_lowpriority
== TRUE
) {
4270 policy
= THROTTLE_LEVEL_PAGEOUT_THROTTLED
;
4271 DTRACE_VM(laundrythrottle
);
4273 policy
= THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED
;
4274 DTRACE_VM(laundryunthrottle
);
4276 proc_set_thread_policy_with_tid(kernel_task
, eq
->pgo_tid
,
4277 TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
4279 vm_page_lock_queues();
4280 eq
->pgo_lowpriority
= req_lowpriority
;
4286 vm_pageout_iothread_external(void)
4288 thread_t self
= current_thread();
4290 self
->options
|= TH_OPT_VMPRIV
;
4292 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
4294 proc_set_thread_policy(self
, TASK_POLICY_EXTERNAL
,
4295 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
4297 vm_page_lock_queues();
4299 vm_pageout_queue_external
.pgo_tid
= self
->thread_id
;
4300 vm_pageout_queue_external
.pgo_lowpriority
= TRUE
;
4301 vm_pageout_queue_external
.pgo_inited
= TRUE
;
4303 vm_page_unlock_queues();
4305 vm_pageout_iothread_external_continue(&vm_pageout_queue_external
);
4312 vm_pageout_iothread_internal(struct cq
*cq
)
4314 thread_t self
= current_thread();
4316 self
->options
|= TH_OPT_VMPRIV
;
4318 vm_page_lock_queues();
4320 vm_pageout_queue_internal
.pgo_tid
= self
->thread_id
;
4321 vm_pageout_queue_internal
.pgo_lowpriority
= TRUE
;
4322 vm_pageout_queue_internal
.pgo_inited
= TRUE
;
4324 vm_page_unlock_queues();
4326 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
4327 thread_vm_bind_group_add();
4330 #if CONFIG_THREAD_GROUPS
4331 thread_group_vm_add();
4332 #endif /* CONFIG_THREAD_GROUPS */
4335 if (vm_compressor_ebound
) {
4337 * Use the soft bound option for vm_compressor to allow it to run on
4338 * P-cores if E-cluster is unavailable.
4340 thread_bind_cluster_type(self
, 'E', true);
4342 #endif /* __AMP__ */
4344 thread_set_thread_name(current_thread(), "VM_compressor");
4345 #if DEVELOPMENT || DEBUG
4346 vmct_stats
.vmct_minpages
[cq
->id
] = INT32_MAX
;
4348 vm_pageout_iothread_internal_continue(cq
);
4354 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
4356 if (OSCompareAndSwapPtr(NULL
, ptrauth_nop_cast(void *, func
), (void * volatile *) &consider_buffer_cache_collect
)) {
4357 return KERN_SUCCESS
;
4359 return KERN_FAILURE
; /* Already set */
4363 extern boolean_t memorystatus_manual_testing_on
;
4364 extern unsigned int memorystatus_level
;
4367 #if VM_PRESSURE_EVENTS
4369 boolean_t vm_pressure_events_enabled
= FALSE
;
4372 vm_pressure_response(void)
4374 vm_pressure_level_t old_level
= kVMPressureNormal
;
4376 unsigned int total_pages
;
4377 uint64_t available_memory
= 0;
4379 if (vm_pressure_events_enabled
== FALSE
) {
4385 available_memory
= (uint64_t) memorystatus_available_pages
;
4387 #else /* CONFIG_EMBEDDED */
4389 available_memory
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
4390 memorystatus_available_pages
= (uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
;
4392 #endif /* CONFIG_EMBEDDED */
4394 total_pages
= (unsigned int) atop_64(max_mem
);
4395 #if CONFIG_SECLUDED_MEMORY
4396 total_pages
-= vm_page_secluded_count
;
4397 #endif /* CONFIG_SECLUDED_MEMORY */
4398 memorystatus_level
= (unsigned int) ((available_memory
* 100) / total_pages
);
4400 if (memorystatus_manual_testing_on
) {
4404 old_level
= memorystatus_vm_pressure_level
;
4406 switch (memorystatus_vm_pressure_level
) {
4407 case kVMPressureNormal
:
4409 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4410 new_level
= kVMPressureCritical
;
4411 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
4412 new_level
= kVMPressureWarning
;
4417 case kVMPressureWarning
:
4418 case kVMPressureUrgent
:
4420 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4421 new_level
= kVMPressureNormal
;
4422 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
4423 new_level
= kVMPressureCritical
;
4428 case kVMPressureCritical
:
4430 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
4431 new_level
= kVMPressureNormal
;
4432 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
4433 new_level
= kVMPressureWarning
;
4442 if (new_level
!= -1) {
4443 memorystatus_vm_pressure_level
= (vm_pressure_level_t
) new_level
;
4445 if (new_level
!= (int) old_level
) {
4446 VM_DEBUG_CONSTANT_EVENT(vm_pressure_level_change
, VM_PRESSURE_LEVEL_CHANGE
, DBG_FUNC_NONE
,
4447 new_level
, old_level
, 0, 0);
4450 if ((memorystatus_vm_pressure_level
!= kVMPressureNormal
) || (old_level
!= memorystatus_vm_pressure_level
)) {
4451 if (vm_pageout_state
.vm_pressure_thread_running
== FALSE
) {
4452 thread_wakeup(&vm_pressure_thread
);
4455 if (old_level
!= memorystatus_vm_pressure_level
) {
4456 thread_wakeup(&vm_pageout_state
.vm_pressure_changed
);
4461 #endif /* VM_PRESSURE_EVENTS */
4464 * Function called by a kernel thread to either get the current pressure level or
4465 * wait until memory pressure changes from a given level.
4468 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure
, __unused
unsigned int *pressure_level
)
4470 #if !VM_PRESSURE_EVENTS
4472 return KERN_FAILURE
;
4474 #else /* VM_PRESSURE_EVENTS */
4476 wait_result_t wr
= 0;
4477 vm_pressure_level_t old_level
= memorystatus_vm_pressure_level
;
4479 if (pressure_level
== NULL
) {
4480 return KERN_INVALID_ARGUMENT
;
4483 if (*pressure_level
== kVMPressureJetsam
) {
4484 if (!wait_for_pressure
) {
4485 return KERN_INVALID_ARGUMENT
;
4488 lck_mtx_lock(&memorystatus_jetsam_fg_band_lock
);
4489 wr
= assert_wait((event_t
)&memorystatus_jetsam_fg_band_waiters
,
4490 THREAD_INTERRUPTIBLE
);
4491 if (wr
== THREAD_WAITING
) {
4492 ++memorystatus_jetsam_fg_band_waiters
;
4493 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock
);
4494 wr
= thread_block(THREAD_CONTINUE_NULL
);
4496 lck_mtx_unlock(&memorystatus_jetsam_fg_band_lock
);
4498 if (wr
!= THREAD_AWAKENED
) {
4499 return KERN_ABORTED
;
4501 *pressure_level
= kVMPressureJetsam
;
4502 return KERN_SUCCESS
;
4505 if (wait_for_pressure
== TRUE
) {
4506 while (old_level
== *pressure_level
) {
4507 wr
= assert_wait((event_t
) &vm_pageout_state
.vm_pressure_changed
,
4508 THREAD_INTERRUPTIBLE
);
4509 if (wr
== THREAD_WAITING
) {
4510 wr
= thread_block(THREAD_CONTINUE_NULL
);
4512 if (wr
== THREAD_INTERRUPTED
) {
4513 return KERN_ABORTED
;
4516 if (wr
== THREAD_AWAKENED
) {
4517 old_level
= memorystatus_vm_pressure_level
;
4522 *pressure_level
= old_level
;
4523 return KERN_SUCCESS
;
4524 #endif /* VM_PRESSURE_EVENTS */
4527 #if VM_PRESSURE_EVENTS
4529 vm_pressure_thread(void)
4531 static boolean_t thread_initialized
= FALSE
;
4533 if (thread_initialized
== TRUE
) {
4534 vm_pageout_state
.vm_pressure_thread_running
= TRUE
;
4535 consider_vm_pressure_events();
4536 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4539 thread_set_thread_name(current_thread(), "VM_pressure");
4540 thread_initialized
= TRUE
;
4541 assert_wait((event_t
) &vm_pressure_thread
, THREAD_UNINT
);
4542 thread_block((thread_continue_t
)vm_pressure_thread
);
4544 #endif /* VM_PRESSURE_EVENTS */
4548 * called once per-second via "compute_averages"
4551 compute_pageout_gc_throttle(__unused
void *arg
)
4553 if (vm_pageout_vminfo
.vm_pageout_considered_page
!= vm_pageout_state
.vm_pageout_considered_page_last
) {
4554 vm_pageout_state
.vm_pageout_considered_page_last
= vm_pageout_vminfo
.vm_pageout_considered_page
;
4556 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
4561 * vm_pageout_garbage_collect can also be called when the zone allocator needs
4562 * to call zone_gc on a different thread in order to trigger zone-map-exhaustion
4563 * jetsams. We need to check if the zone map size is above its jetsam limit to
4564 * decide if this was indeed the case.
4566 * We need to do this on a different thread because of the following reasons:
4568 * 1. In the case of synchronous jetsams, the leaking process can try to jetsam
4569 * itself causing the system to hang. We perform synchronous jetsams if we're
4570 * leaking in the VM map entries zone, so the leaking process could be doing a
4571 * zalloc for a VM map entry while holding its vm_map lock, when it decides to
4572 * jetsam itself. We also need the vm_map lock on the process termination path,
4573 * which would now lead the dying process to deadlock against itself.
4575 * 2. The jetsam path might need to allocate zone memory itself. We could try
4576 * using the non-blocking variant of zalloc for this path, but we can still
4577 * end up trying to do a kernel_memory_allocate when the zone maps are almost
4582 vm_pageout_garbage_collect(int collect
)
4585 if (is_zone_map_nearing_exhaustion()) {
4587 * Woken up by the zone allocator for zone-map-exhaustion jetsams.
4589 * Bail out after calling zone_gc (which triggers the
4590 * zone-map-exhaustion jetsams). If we fall through, the subsequent
4591 * operations that clear out a bunch of caches might allocate zone
4592 * memory themselves (for eg. vm_map operations would need VM map
4593 * entries). Since the zone map is almost full at this point, we
4594 * could end up with a panic. We just need to quickly jetsam a
4595 * process and exit here.
4597 * It could so happen that we were woken up to relieve memory
4598 * pressure and the zone map also happened to be near its limit at
4599 * the time, in which case we'll skip out early. But that should be
4600 * ok; if memory pressure persists, the thread will simply be woken
4603 consider_zone_gc(TRUE
);
4605 /* Woken up by vm_pageout_scan or compute_pageout_gc_throttle. */
4606 boolean_t buf_large_zfree
= FALSE
;
4607 boolean_t first_try
= TRUE
;
4611 consider_machine_collect();
4615 if (consider_buffer_cache_collect
!= NULL
) {
4616 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
4618 if (first_try
== TRUE
|| buf_large_zfree
== TRUE
) {
4620 * consider_zone_gc should be last, because the other operations
4621 * might return memory to zones.
4623 consider_zone_gc(FALSE
);
4626 } while (buf_large_zfree
== TRUE
&& vm_page_free_count
< vm_page_free_target
);
4628 consider_machine_adjust();
4632 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
4634 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
4639 #if VM_PAGE_BUCKETS_CHECK
4640 #if VM_PAGE_FAKE_BUCKETS
4641 extern vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
4642 #endif /* VM_PAGE_FAKE_BUCKETS */
4643 #endif /* VM_PAGE_BUCKETS_CHECK */
4648 vm_set_restrictions(unsigned int num_cpus
)
4650 int vm_restricted_to_single_processor
= 0;
4652 if (PE_parse_boot_argn("vm_restricted_to_single_processor", &vm_restricted_to_single_processor
, sizeof(vm_restricted_to_single_processor
))) {
4653 kprintf("Overriding vm_restricted_to_single_processor to %d\n", vm_restricted_to_single_processor
);
4654 vm_pageout_state
.vm_restricted_to_single_processor
= (vm_restricted_to_single_processor
? TRUE
: FALSE
);
4656 assert(num_cpus
> 0);
4658 if (num_cpus
<= 3) {
4660 * on systems with a limited number of CPUS, bind the
4661 * 4 major threads that can free memory and that tend to use
4662 * a fair bit of CPU under pressured conditions to a single processor.
4663 * This insures that these threads don't hog all of the available CPUs
4664 * (important for camera launch), while allowing them to run independently
4665 * w/r to locks... the 4 threads are
4666 * vm_pageout_scan, vm_pageout_iothread_internal (compressor),
4667 * vm_compressor_swap_trigger_thread (minor and major compactions),
4668 * memorystatus_thread (jetsams).
4670 * the first time the thread is run, it is responsible for checking the
4671 * state of vm_restricted_to_single_processor, and if TRUE it calls
4672 * thread_bind_master... someday this should be replaced with a group
4673 * scheduling mechanism and KPI.
4675 vm_pageout_state
.vm_restricted_to_single_processor
= TRUE
;
4677 vm_pageout_state
.vm_restricted_to_single_processor
= FALSE
;
4685 thread_t self
= current_thread();
4687 kern_return_t result
;
4691 * Set thread privileges.
4695 vm_pageout_scan_thread
= self
;
4697 #if CONFIG_VPS_DYNAMIC_PRIO
4699 int vps_dynprio_bootarg
= 0;
4701 if (PE_parse_boot_argn("vps_dynamic_priority_enabled", &vps_dynprio_bootarg
, sizeof(vps_dynprio_bootarg
))) {
4702 vps_dynamic_priority_enabled
= (vps_dynprio_bootarg
? TRUE
: FALSE
);
4703 kprintf("Overriding vps_dynamic_priority_enabled to %d\n", vps_dynamic_priority_enabled
);
4705 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
) {
4706 vps_dynamic_priority_enabled
= TRUE
;
4708 vps_dynamic_priority_enabled
= FALSE
;
4712 if (vps_dynamic_priority_enabled
) {
4713 sched_set_kernel_thread_priority(self
, MAXPRI_THROTTLE
);
4714 thread_set_eager_preempt(self
);
4716 sched_set_kernel_thread_priority(self
, BASEPRI_VM
);
4719 #else /* CONFIG_VPS_DYNAMIC_PRIO */
4721 vps_dynamic_priority_enabled
= FALSE
;
4722 sched_set_kernel_thread_priority(self
, BASEPRI_VM
);
4724 #endif /* CONFIG_VPS_DYNAMIC_PRIO */
4727 self
->options
|= TH_OPT_VMPRIV
;
4728 thread_unlock(self
);
4730 if (!self
->reserved_stack
) {
4731 self
->reserved_stack
= self
->kernel_stack
;
4734 if (vm_pageout_state
.vm_restricted_to_single_processor
== TRUE
&&
4735 vps_dynamic_priority_enabled
== FALSE
) {
4736 thread_vm_bind_group_add();
4740 #if CONFIG_THREAD_GROUPS
4741 thread_group_vm_add();
4742 #endif /* CONFIG_THREAD_GROUPS */
4745 PE_parse_boot_argn("vmpgo_pcluster", &vm_pgo_pbound
, sizeof(vm_pgo_pbound
));
4746 if (vm_pgo_pbound
) {
4748 * Use the soft bound option for vm pageout to allow it to run on
4749 * E-cores if P-cluster is unavailable.
4751 thread_bind_cluster_type(self
, 'P', true);
4753 #endif /* __AMP__ */
4757 thread_set_thread_name(current_thread(), "VM_pageout_scan");
4760 * Initialize some paging parameters.
4763 vm_pageout_state
.vm_pressure_thread_running
= FALSE
;
4764 vm_pageout_state
.vm_pressure_changed
= FALSE
;
4765 vm_pageout_state
.memorystatus_purge_on_warning
= 2;
4766 vm_pageout_state
.memorystatus_purge_on_urgent
= 5;
4767 vm_pageout_state
.memorystatus_purge_on_critical
= 8;
4768 vm_pageout_state
.vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
4769 vm_pageout_state
.vm_page_speculative_percentage
= 5;
4770 vm_pageout_state
.vm_page_speculative_target
= 0;
4772 vm_pageout_state
.vm_pageout_external_iothread
= THREAD_NULL
;
4773 vm_pageout_state
.vm_pageout_internal_iothread
= THREAD_NULL
;
4775 vm_pageout_state
.vm_pageout_swap_wait
= 0;
4776 vm_pageout_state
.vm_pageout_idle_wait
= 0;
4777 vm_pageout_state
.vm_pageout_empty_wait
= 0;
4778 vm_pageout_state
.vm_pageout_burst_wait
= 0;
4779 vm_pageout_state
.vm_pageout_deadlock_wait
= 0;
4780 vm_pageout_state
.vm_pageout_deadlock_relief
= 0;
4781 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= 0;
4783 vm_pageout_state
.vm_pageout_inactive
= 0;
4784 vm_pageout_state
.vm_pageout_inactive_used
= 0;
4785 vm_pageout_state
.vm_pageout_inactive_clean
= 0;
4787 vm_pageout_state
.vm_memory_pressure
= 0;
4788 vm_pageout_state
.vm_page_filecache_min
= 0;
4790 vm_pageout_state
.vm_page_filecache_min_divisor
= 70;
4791 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 40;
4793 vm_pageout_state
.vm_page_filecache_min_divisor
= 27;
4794 vm_pageout_state
.vm_page_xpmapped_min_divisor
= 36;
4796 vm_pageout_state
.vm_page_free_count_init
= vm_page_free_count
;
4798 vm_pageout_state
.vm_pageout_considered_page_last
= 0;
4800 if (vm_pageout_state
.vm_pageout_swap_wait
== 0) {
4801 vm_pageout_state
.vm_pageout_swap_wait
= VM_PAGEOUT_SWAP_WAIT
;
4804 if (vm_pageout_state
.vm_pageout_idle_wait
== 0) {
4805 vm_pageout_state
.vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
4808 if (vm_pageout_state
.vm_pageout_burst_wait
== 0) {
4809 vm_pageout_state
.vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
4812 if (vm_pageout_state
.vm_pageout_empty_wait
== 0) {
4813 vm_pageout_state
.vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
4816 if (vm_pageout_state
.vm_pageout_deadlock_wait
== 0) {
4817 vm_pageout_state
.vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
4820 if (vm_pageout_state
.vm_pageout_deadlock_relief
== 0) {
4821 vm_pageout_state
.vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
4824 if (vm_pageout_state
.vm_pageout_burst_inactive_throttle
== 0) {
4825 vm_pageout_state
.vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
4828 * even if we've already called vm_page_free_reserve
4829 * call it again here to insure that the targets are
4830 * accurately calculated (it uses vm_page_free_count_init)
4831 * calling it with an arg of 0 will not change the reserve
4832 * but will re-calculate free_min and free_target
4834 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
4835 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
4837 vm_page_free_reserve(0);
4841 vm_page_queue_init(&vm_pageout_queue_external
.pgo_pending
);
4842 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
4843 vm_pageout_queue_external
.pgo_laundry
= 0;
4844 vm_pageout_queue_external
.pgo_idle
= FALSE
;
4845 vm_pageout_queue_external
.pgo_busy
= FALSE
;
4846 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
4847 vm_pageout_queue_external
.pgo_draining
= FALSE
;
4848 vm_pageout_queue_external
.pgo_lowpriority
= FALSE
;
4849 vm_pageout_queue_external
.pgo_tid
= -1;
4850 vm_pageout_queue_external
.pgo_inited
= FALSE
;
4852 vm_page_queue_init(&vm_pageout_queue_internal
.pgo_pending
);
4853 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
4854 vm_pageout_queue_internal
.pgo_laundry
= 0;
4855 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
4856 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
4857 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
4858 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
4859 vm_pageout_queue_internal
.pgo_lowpriority
= FALSE
;
4860 vm_pageout_queue_internal
.pgo_tid
= -1;
4861 vm_pageout_queue_internal
.pgo_inited
= FALSE
;
4863 /* internal pageout thread started when default pager registered first time */
4864 /* external pageout and garbage collection threads started here */
4866 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
4868 &vm_pageout_state
.vm_pageout_external_iothread
);
4869 if (result
!= KERN_SUCCESS
) {
4870 panic("vm_pageout_iothread_external: create failed");
4872 thread_set_thread_name(vm_pageout_state
.vm_pageout_external_iothread
, "VM_pageout_external_iothread");
4873 thread_deallocate(vm_pageout_state
.vm_pageout_external_iothread
);
4875 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
4878 if (result
!= KERN_SUCCESS
) {
4879 panic("vm_pageout_garbage_collect: create failed");
4881 thread_set_thread_name(thread
, "VM_pageout_garbage_collect");
4882 thread_deallocate(thread
);
4884 #if VM_PRESSURE_EVENTS
4885 result
= kernel_thread_start_priority((thread_continue_t
)vm_pressure_thread
, NULL
,
4889 if (result
!= KERN_SUCCESS
) {
4890 panic("vm_pressure_thread: create failed");
4893 thread_deallocate(thread
);
4896 vm_object_reaper_init();
4899 bzero(&vm_config
, sizeof(vm_config
));
4901 switch (vm_compressor_mode
) {
4902 case VM_PAGER_DEFAULT
:
4903 printf("mapping deprecated VM_PAGER_DEFAULT to VM_PAGER_COMPRESSOR_WITH_SWAP\n");
4906 case VM_PAGER_COMPRESSOR_WITH_SWAP
:
4907 vm_config
.compressor_is_present
= TRUE
;
4908 vm_config
.swap_is_present
= TRUE
;
4909 vm_config
.compressor_is_active
= TRUE
;
4910 vm_config
.swap_is_active
= TRUE
;
4913 case VM_PAGER_COMPRESSOR_NO_SWAP
:
4914 vm_config
.compressor_is_present
= TRUE
;
4915 vm_config
.swap_is_present
= TRUE
;
4916 vm_config
.compressor_is_active
= TRUE
;
4919 case VM_PAGER_FREEZER_DEFAULT
:
4920 printf("mapping deprecated VM_PAGER_FREEZER_DEFAULT to VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP\n");
4923 case VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP
:
4924 vm_config
.compressor_is_present
= TRUE
;
4925 vm_config
.swap_is_present
= TRUE
;
4928 case VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP
:
4929 vm_config
.compressor_is_present
= TRUE
;
4930 vm_config
.swap_is_present
= TRUE
;
4931 vm_config
.compressor_is_active
= TRUE
;
4932 vm_config
.freezer_swap_is_active
= TRUE
;
4935 case VM_PAGER_NOT_CONFIGURED
:
4939 printf("unknown compressor mode - %x\n", vm_compressor_mode
);
4942 if (VM_CONFIG_COMPRESSOR_IS_PRESENT
) {
4943 vm_compressor_pager_init();
4946 #if VM_PRESSURE_EVENTS
4947 vm_pressure_events_enabled
= TRUE
;
4948 #endif /* VM_PRESSURE_EVENTS */
4950 #if CONFIG_PHANTOM_CACHE
4951 vm_phantom_cache_init();
4953 #if VM_PAGE_BUCKETS_CHECK
4954 #if VM_PAGE_FAKE_BUCKETS
4955 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
4956 (uint64_t) vm_page_fake_buckets_start
,
4957 (uint64_t) vm_page_fake_buckets_end
);
4958 pmap_protect(kernel_pmap
,
4959 vm_page_fake_buckets_start
,
4960 vm_page_fake_buckets_end
,
4962 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
4963 #endif /* VM_PAGE_FAKE_BUCKETS */
4964 #endif /* VM_PAGE_BUCKETS_CHECK */
4966 #if VM_OBJECT_TRACKING
4967 vm_object_tracking_init();
4968 #endif /* VM_OBJECT_TRACKING */
4970 vm_pageout_continue();
4975 * The vm_pageout_continue() call above never returns, so the code below is never
4976 * executed. We take advantage of this to declare several DTrace VM related probe
4977 * points that our kernel doesn't have an analog for. These are probe points that
4978 * exist in Solaris and are in the DTrace documentation, so people may have written
4979 * scripts that use them. Declaring the probe points here means their scripts will
4980 * compile and execute which we want for portability of the scripts, but since this
4981 * section of code is never reached, the probe points will simply never fire. Yes,
4982 * this is basically a hack. The problem is the DTrace probe points were chosen with
4983 * Solaris specific VM events in mind, not portability to different VM implementations.
4986 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
4987 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
4988 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
4989 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
4990 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
4991 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
4992 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
4999 vm_pageout_internal_start(void)
5001 kern_return_t result
;
5002 host_basic_info_data_t hinfo
;
5003 vm_offset_t buf
, bufsize
;
5005 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT
);
5007 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
5009 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
5011 assert(hinfo
.max_cpus
> 0);
5014 vm_pageout_state
.vm_compressor_thread_count
= 1;
5016 if (hinfo
.max_cpus
> 4) {
5017 vm_pageout_state
.vm_compressor_thread_count
= 2;
5019 vm_pageout_state
.vm_compressor_thread_count
= 1;
5022 PE_parse_boot_argn("vmcomp_threads", &vm_pageout_state
.vm_compressor_thread_count
,
5023 sizeof(vm_pageout_state
.vm_compressor_thread_count
));
5026 PE_parse_boot_argn("vmcomp_ecluster", &vm_compressor_ebound
, sizeof(vm_compressor_ebound
));
5027 if (vm_compressor_ebound
) {
5028 vm_pageout_state
.vm_compressor_thread_count
= 2;
5031 if (vm_pageout_state
.vm_compressor_thread_count
>= hinfo
.max_cpus
) {
5032 vm_pageout_state
.vm_compressor_thread_count
= hinfo
.max_cpus
- 1;
5034 if (vm_pageout_state
.vm_compressor_thread_count
<= 0) {
5035 vm_pageout_state
.vm_compressor_thread_count
= 1;
5036 } else if (vm_pageout_state
.vm_compressor_thread_count
> MAX_COMPRESSOR_THREAD_COUNT
) {
5037 vm_pageout_state
.vm_compressor_thread_count
= MAX_COMPRESSOR_THREAD_COUNT
;
5040 vm_pageout_queue_internal
.pgo_maxlaundry
=
5041 (vm_pageout_state
.vm_compressor_thread_count
* 4) * VM_PAGE_LAUNDRY_MAX
;
5043 PE_parse_boot_argn("vmpgoi_maxlaundry",
5044 &vm_pageout_queue_internal
.pgo_maxlaundry
,
5045 sizeof(vm_pageout_queue_internal
.pgo_maxlaundry
));
5047 bufsize
= COMPRESSOR_SCRATCH_BUF_SIZE
;
5048 if (kernel_memory_allocate(kernel_map
, &buf
,
5049 bufsize
* vm_pageout_state
.vm_compressor_thread_count
,
5050 0, KMA_KOBJECT
| KMA_PERMANENT
, VM_KERN_MEMORY_COMPRESSOR
)) {
5051 panic("vm_pageout_internal_start: Unable to allocate %zd bytes",
5052 (size_t)(bufsize
* vm_pageout_state
.vm_compressor_thread_count
));
5055 for (int i
= 0; i
< vm_pageout_state
.vm_compressor_thread_count
; i
++) {
5057 ciq
[i
].q
= &vm_pageout_queue_internal
;
5058 ciq
[i
].current_chead
= NULL
;
5059 ciq
[i
].scratch_buf
= (char *)(buf
+ i
* bufsize
);
5061 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
,
5062 (void *)&ciq
[i
], BASEPRI_VM
,
5063 &vm_pageout_state
.vm_pageout_internal_iothread
);
5065 if (result
== KERN_SUCCESS
) {
5066 thread_deallocate(vm_pageout_state
.vm_pageout_internal_iothread
);
5076 * To support I/O Expedite for compressed files we mark the upls with special flags.
5077 * The way decmpfs works is that we create a big upl which marks all the pages needed to
5078 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
5079 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
5080 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
5081 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
5082 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
5083 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
5084 * unless the real I/O upl is being destroyed).
5089 upl_set_decmp_info(upl_t upl
, upl_t src_upl
)
5091 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
5094 if (src_upl
->decmp_io_upl
) {
5096 * If there is already an alive real I/O UPL, ignore this new UPL.
5097 * This case should rarely happen and even if it does, it just means
5098 * that we might issue a spurious expedite which the driver is expected
5101 upl_unlock(src_upl
);
5104 src_upl
->decmp_io_upl
= (void *)upl
;
5105 src_upl
->ref_count
++;
5107 upl
->flags
|= UPL_DECMP_REAL_IO
;
5108 upl
->decmp_io_upl
= (void *)src_upl
;
5109 upl_unlock(src_upl
);
5111 #endif /* CONFIG_IOSCHED */
5114 int upl_debug_enabled
= 1;
5116 int upl_debug_enabled
= 0;
5120 upl_create(int type
, int flags
, upl_size_t size
)
5123 vm_size_t page_field_size
= 0;
5125 vm_size_t upl_size
= sizeof(struct upl
);
5127 assert(page_aligned(size
));
5129 size
= round_page_32(size
);
5131 if (type
& UPL_CREATE_LITE
) {
5132 page_field_size
= (atop(size
) + 7) >> 3;
5133 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
5135 upl_flags
|= UPL_LITE
;
5137 if (type
& UPL_CREATE_INTERNAL
) {
5138 upl_size
+= sizeof(struct upl_page_info
) * atop(size
);
5140 upl_flags
|= UPL_INTERNAL
;
5142 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
5144 if (page_field_size
) {
5145 bzero((char *)upl
+ upl_size
, page_field_size
);
5148 upl
->flags
= upl_flags
| flags
;
5149 upl
->kaddr
= (vm_offset_t
)0;
5152 upl
->map_object
= NULL
;
5154 upl
->ext_ref_count
= 0;
5155 upl
->highest_page
= 0;
5157 upl
->vector_upl
= NULL
;
5158 upl
->associated_upl
= NULL
;
5159 upl
->upl_iodone
= NULL
;
5161 if (type
& UPL_CREATE_IO_TRACKING
) {
5162 upl
->upl_priority
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
5165 upl
->upl_reprio_info
= 0;
5166 upl
->decmp_io_upl
= 0;
5167 if ((type
& UPL_CREATE_INTERNAL
) && (type
& UPL_CREATE_EXPEDITE_SUP
)) {
5168 /* Only support expedite on internal UPLs */
5169 thread_t curthread
= current_thread();
5170 upl
->upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * atop(size
));
5171 bzero(upl
->upl_reprio_info
, (sizeof(uint64_t) * atop(size
)));
5172 upl
->flags
|= UPL_EXPEDITE_SUPPORTED
;
5173 if (curthread
->decmp_upl
!= NULL
) {
5174 upl_set_decmp_info(upl
, curthread
->decmp_upl
);
5178 #if CONFIG_IOSCHED || UPL_DEBUG
5179 if ((type
& UPL_CREATE_IO_TRACKING
) || upl_debug_enabled
) {
5180 upl
->upl_creator
= current_thread();
5183 upl
->flags
|= UPL_TRACKED_BY_OBJECT
;
5188 upl
->ubc_alias1
= 0;
5189 upl
->ubc_alias2
= 0;
5192 upl
->upl_commit_index
= 0;
5193 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
5195 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
5196 #endif /* UPL_DEBUG */
5202 upl_destroy(upl_t upl
)
5204 int page_field_size
; /* bit field in word size buf */
5207 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object);
5209 if (upl
->ext_ref_count
) {
5210 panic("upl(%p) ext_ref_count", upl
);
5214 if ((upl
->flags
& UPL_DECMP_REAL_IO
) && upl
->decmp_io_upl
) {
5216 src_upl
= upl
->decmp_io_upl
;
5217 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
5219 src_upl
->decmp_io_upl
= NULL
;
5220 upl_unlock(src_upl
);
5221 upl_deallocate(src_upl
);
5223 #endif /* CONFIG_IOSCHED */
5225 #if CONFIG_IOSCHED || UPL_DEBUG
5226 if (((upl
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) &&
5227 !(upl
->flags
& UPL_VECTOR
)) {
5230 if (upl
->flags
& UPL_SHADOWED
) {
5231 object
= upl
->map_object
->shadow
;
5233 object
= upl
->map_object
;
5236 vm_object_lock(object
);
5237 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
5238 vm_object_activity_end(object
);
5239 vm_object_collapse(object
, 0, TRUE
);
5240 vm_object_unlock(object
);
5244 * drop a reference on the map_object whether or
5245 * not a pageout object is inserted
5247 if (upl
->flags
& UPL_SHADOWED
) {
5248 vm_object_deallocate(upl
->map_object
);
5251 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
5254 size
= upl_adjusted_size(upl
, PAGE_MASK
);
5256 page_field_size
= 0;
5258 if (upl
->flags
& UPL_LITE
) {
5259 page_field_size
= ((size
/ PAGE_SIZE
) + 7) >> 3;
5260 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
5262 upl_lock_destroy(upl
);
5263 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
5266 if (upl
->flags
& UPL_EXPEDITE_SUPPORTED
) {
5267 kfree(upl
->upl_reprio_info
, sizeof(uint64_t) * (size
/ PAGE_SIZE
));
5271 if (upl
->flags
& UPL_INTERNAL
) {
5273 sizeof(struct upl
) +
5274 (sizeof(struct upl_page_info
) * (size
/ PAGE_SIZE
))
5277 kfree(upl
, sizeof(struct upl
) + page_field_size
);
5282 upl_deallocate(upl_t upl
)
5286 if (--upl
->ref_count
== 0) {
5287 if (vector_upl_is_valid(upl
)) {
5288 vector_upl_deallocate(upl
);
5292 if (upl
->upl_iodone
) {
5293 upl_callout_iodone(upl
);
5304 upl_mark_decmp(upl_t upl
)
5306 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
5307 upl
->flags
|= UPL_DECMP_REQ
;
5308 upl
->upl_creator
->decmp_upl
= (void *)upl
;
5313 upl_unmark_decmp(upl_t upl
)
5315 if (upl
&& (upl
->flags
& UPL_DECMP_REQ
)) {
5316 upl
->upl_creator
->decmp_upl
= NULL
;
5320 #endif /* CONFIG_IOSCHED */
5322 #define VM_PAGE_Q_BACKING_UP(q) \
5323 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
5325 boolean_t
must_throttle_writes(void);
5328 must_throttle_writes()
5330 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external
) &&
5331 vm_page_pageable_external_count
> (AVAILABLE_NON_COMPRESSED_MEMORY
* 6) / 10) {
5338 #define MIN_DELAYED_WORK_CTX_ALLOCATED (16)
5339 #define MAX_DELAYED_WORK_CTX_ALLOCATED (512)
5341 int vm_page_delayed_work_ctx_needed
= 0;
5342 zone_t dw_ctx_zone
= ZONE_NULL
;
5345 vm_page_delayed_work_init_ctx(void)
5347 int nelems
= 0, elem_size
= 0;
5349 elem_size
= sizeof(struct vm_page_delayed_work_ctx
);
5351 dw_ctx_zone
= zone_create_ext("delayed-work-ctx", elem_size
,
5352 ZC_NOGC
, ZONE_ID_ANY
, ^(zone_t z
) {
5353 zone_set_exhaustible(z
, MAX_DELAYED_WORK_CTX_ALLOCATED
* elem_size
);
5356 nelems
= zfill(dw_ctx_zone
, MIN_DELAYED_WORK_CTX_ALLOCATED
);
5357 if (nelems
< MIN_DELAYED_WORK_CTX_ALLOCATED
) {
5358 printf("vm_page_delayed_work_init_ctx: Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems
, MIN_DELAYED_WORK_CTX_ALLOCATED
);
5359 #if DEVELOPMENT || DEBUG
5360 panic("Failed to preallocate minimum delayed work contexts (%d vs %d).\n", nelems
, MIN_DELAYED_WORK_CTX_ALLOCATED
);
5361 #endif /* DEVELOPMENT || DEBUG */
5365 struct vm_page_delayed_work
*
5366 vm_page_delayed_work_get_ctx(void)
5368 struct vm_page_delayed_work_ctx
* dw_ctx
= NULL
;
5370 dw_ctx
= (struct vm_page_delayed_work_ctx
*) zalloc_noblock(dw_ctx_zone
);
5373 dw_ctx
->delayed_owner
= current_thread();
5375 vm_page_delayed_work_ctx_needed
++;
5377 return dw_ctx
? dw_ctx
->dwp
: NULL
;
5381 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work
* dwp
)
5383 struct vm_page_delayed_work_ctx
*ldw_ctx
;
5385 ldw_ctx
= (struct vm_page_delayed_work_ctx
*)dwp
;
5386 ldw_ctx
->delayed_owner
= NULL
;
5388 zfree(dw_ctx_zone
, ldw_ctx
);
5392 * Routine: vm_object_upl_request
5394 * Cause the population of a portion of a vm_object.
5395 * Depending on the nature of the request, the pages
5396 * returned may be contain valid data or be uninitialized.
5397 * A page list structure, listing the physical pages
5398 * will be returned upon request.
5399 * This function is called by the file system or any other
5400 * supplier of backing store to a pager.
5401 * IMPORTANT NOTE: The caller must still respect the relationship
5402 * between the vm_object and its backing memory object. The
5403 * caller MUST NOT substitute changes in the backing file
5404 * without first doing a memory_object_lock_request on the
5405 * target range unless it is know that the pages are not
5406 * shared with another entity at the pager level.
5408 * if a page list structure is present
5409 * return the mapped physical pages, where a
5410 * page is not present, return a non-initialized
5411 * one. If the no_sync bit is turned on, don't
5412 * call the pager unlock to synchronize with other
5413 * possible copies of the page. Leave pages busy
5414 * in the original object, if a page list structure
5415 * was specified. When a commit of the page list
5416 * pages is done, the dirty bit will be set for each one.
5418 * If a page list structure is present, return
5419 * all mapped pages. Where a page does not exist
5420 * map a zero filled one. Leave pages busy in
5421 * the original object. If a page list structure
5422 * is not specified, this call is a no-op.
5424 * Note: access of default pager objects has a rather interesting
5425 * twist. The caller of this routine, presumably the file system
5426 * page cache handling code, will never actually make a request
5427 * against a default pager backed object. Only the default
5428 * pager will make requests on backing store related vm_objects
5429 * In this way the default pager can maintain the relationship
5430 * between backing store files (abstract memory objects) and
5431 * the vm_objects (cache objects), they support.
5435 __private_extern__ kern_return_t
5436 vm_object_upl_request(
5438 vm_object_offset_t offset
,
5441 upl_page_info_array_t user_page_list
,
5442 unsigned int *page_list_count
,
5443 upl_control_flags_t cntrl_flags
,
5446 vm_page_t dst_page
= VM_PAGE_NULL
;
5447 vm_object_offset_t dst_offset
;
5448 upl_size_t xfer_size
;
5449 unsigned int size_in_pages
;
5454 vm_page_t alias_page
= NULL
;
5455 int refmod_state
= 0;
5456 wpl_array_t lite_list
= NULL
;
5457 vm_object_t last_copy_object
;
5458 struct vm_page_delayed_work dw_array
;
5459 struct vm_page_delayed_work
*dwp
, *dwp_start
;
5460 bool dwp_finish_ctx
= TRUE
;
5463 int io_tracking_flag
= 0;
5465 int page_grab_count
= 0;
5467 pmap_flush_context pmap_flush_context_storage
;
5468 boolean_t pmap_flushes_delayed
= FALSE
;
5469 #if DEVELOPMENT || DEBUG
5470 task_t task
= current_task();
5471 #endif /* DEVELOPMENT || DEBUG */
5473 dwp_start
= dwp
= NULL
;
5475 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
5477 * For forward compatibility's sake,
5478 * reject any unknown flag.
5480 return KERN_INVALID_VALUE
;
5482 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
5483 panic("vm_object_upl_request: external object with non-zero paging offset\n");
5485 if (object
->phys_contiguous
) {
5486 panic("vm_object_upl_request: contiguous object specified\n");
5489 assertf(page_aligned(offset
) && page_aligned(size
),
5490 "offset 0x%llx size 0x%x",
5493 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, 0, 0);
5496 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5497 dwp_start
= vm_page_delayed_work_get_ctx();
5498 if (dwp_start
== NULL
) {
5499 dwp_start
= &dw_array
;
5501 dwp_finish_ctx
= FALSE
;
5506 if (size
> MAX_UPL_SIZE_BYTES
) {
5507 size
= MAX_UPL_SIZE_BYTES
;
5510 if ((cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
) {
5511 *page_list_count
= MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
;
5514 #if CONFIG_IOSCHED || UPL_DEBUG
5515 if (object
->io_tracking
|| upl_debug_enabled
) {
5516 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
5520 if (object
->io_tracking
) {
5521 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
5525 if (cntrl_flags
& UPL_SET_INTERNAL
) {
5526 if (cntrl_flags
& UPL_SET_LITE
) {
5527 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
5529 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
5530 lite_list
= (wpl_array_t
)
5531 (((uintptr_t)user_page_list
) +
5532 ((size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5534 user_page_list
= NULL
;
5538 upl
= upl_create(UPL_CREATE_INTERNAL
| io_tracking_flag
, 0, size
);
5540 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
5542 user_page_list
= NULL
;
5546 if (cntrl_flags
& UPL_SET_LITE
) {
5547 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
5549 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
5554 upl
= upl_create(UPL_CREATE_EXTERNAL
| io_tracking_flag
, 0, size
);
5559 if (user_page_list
) {
5560 user_page_list
[0].device
= FALSE
;
5563 if (cntrl_flags
& UPL_SET_LITE
) {
5564 upl
->map_object
= object
;
5566 upl
->map_object
= vm_object_allocate(size
);
5568 * No neeed to lock the new object: nobody else knows
5569 * about it yet, so it's all ours so far.
5571 upl
->map_object
->shadow
= object
;
5572 upl
->map_object
->pageout
= TRUE
;
5573 upl
->map_object
->can_persist
= FALSE
;
5574 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
5575 upl
->map_object
->vo_shadow_offset
= offset
;
5576 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
5577 assertf(page_aligned(upl
->map_object
->vo_shadow_offset
),
5578 "object %p shadow_offset 0x%llx",
5579 upl
->map_object
, upl
->map_object
->vo_shadow_offset
);
5581 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5583 upl
->flags
|= UPL_SHADOWED
;
5585 if (cntrl_flags
& UPL_FOR_PAGEOUT
) {
5586 upl
->flags
|= UPL_PAGEOUT
;
5589 vm_object_lock(object
);
5590 vm_object_activity_begin(object
);
5593 #if CONFIG_SECLUDED_MEMORY
5594 if (object
->can_grab_secluded
) {
5595 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
5597 #endif /* CONFIG_SECLUDED_MEMORY */
5600 * we can lock in the paging_offset once paging_in_progress is set
5603 upl
->u_offset
= offset
+ object
->paging_offset
;
5605 #if CONFIG_IOSCHED || UPL_DEBUG
5606 if (object
->io_tracking
|| upl_debug_enabled
) {
5607 vm_object_activity_begin(object
);
5608 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
5611 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
5613 * Honor copy-on-write obligations
5615 * The caller is gathering these pages and
5616 * might modify their contents. We need to
5617 * make sure that the copy object has its own
5618 * private copies of these pages before we let
5619 * the caller modify them.
5621 vm_object_update(object
,
5626 FALSE
, /* should_return */
5627 MEMORY_OBJECT_COPY_SYNC
,
5630 VM_PAGEOUT_DEBUG(upl_cow
, 1);
5631 VM_PAGEOUT_DEBUG(upl_cow_pages
, (size
>> PAGE_SHIFT
));
5634 * remember which copy object we synchronized with
5636 last_copy_object
= object
->copy
;
5640 dst_offset
= offset
;
5641 size_in_pages
= size
/ PAGE_SIZE
;
5643 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
5644 object
->resident_page_count
< ((MAX_UPL_SIZE_BYTES
* 2) >> PAGE_SHIFT
)) {
5645 object
->scan_collisions
= 0;
5648 if ((cntrl_flags
& UPL_WILL_MODIFY
) && must_throttle_writes() == TRUE
) {
5649 boolean_t isSSD
= FALSE
;
5654 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
5656 vm_object_unlock(object
);
5658 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5660 if (isSSD
== TRUE
) {
5661 delay(1000 * size_in_pages
);
5663 delay(5000 * size_in_pages
);
5665 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5667 vm_object_lock(object
);
5673 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
5674 vm_object_unlock(object
);
5675 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5676 vm_object_lock(object
);
5678 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
5679 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
5681 if (((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
5682 dst_page
->vmp_fictitious
||
5683 dst_page
->vmp_absent
||
5684 dst_page
->vmp_error
||
5685 dst_page
->vmp_cleaning
||
5686 (VM_PAGE_WIRED(dst_page
))) {
5687 if (user_page_list
) {
5688 user_page_list
[entry
].phys_addr
= 0;
5693 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
5696 * grab this up front...
5697 * a high percentange of the time we're going to
5698 * need the hardware modification state a bit later
5699 * anyway... so we can eliminate an extra call into
5700 * the pmap layer by grabbing it here and recording it
5702 if (dst_page
->vmp_pmapped
) {
5703 refmod_state
= pmap_get_refmod(phys_page
);
5708 if ((refmod_state
& VM_MEM_REFERENCED
) && VM_PAGE_INACTIVE(dst_page
)) {
5710 * page is on inactive list and referenced...
5711 * reactivate it now... this gets it out of the
5712 * way of vm_pageout_scan which would have to
5713 * reactivate it upon tripping over it
5715 dwp
->dw_mask
|= DW_vm_page_activate
;
5717 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
5719 * we're only asking for DIRTY pages to be returned
5721 if (dst_page
->vmp_laundry
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
5723 * if we were the page stolen by vm_pageout_scan to be
5724 * cleaned (as opposed to a buddy being clustered in
5725 * or this request is not being driven by a PAGEOUT cluster
5726 * then we only need to check for the page being dirty or
5727 * precious to decide whether to return it
5729 if (dst_page
->vmp_dirty
|| dst_page
->vmp_precious
|| (refmod_state
& VM_MEM_MODIFIED
)) {
5735 * this is a request for a PAGEOUT cluster and this page
5736 * is merely along for the ride as a 'buddy'... not only
5737 * does it have to be dirty to be returned, but it also
5738 * can't have been referenced recently...
5740 if ((hibernate_cleaning_in_progress
== TRUE
||
5741 (!((refmod_state
& VM_MEM_REFERENCED
) || dst_page
->vmp_reference
) ||
5742 (dst_page
->vmp_q_state
== VM_PAGE_ON_THROTTLED_Q
))) &&
5743 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->vmp_dirty
|| dst_page
->vmp_precious
)) {
5748 * if we reach here, we're not to return
5749 * the page... go on to the next one
5751 if (dst_page
->vmp_laundry
== TRUE
) {
5753 * if we get here, the page is not 'cleaning' (filtered out above).
5754 * since it has been referenced, remove it from the laundry
5755 * so we don't pay the cost of an I/O to clean a page
5756 * we're just going to take back
5758 vm_page_lockspin_queues();
5760 vm_pageout_steal_laundry(dst_page
, TRUE
);
5761 vm_page_activate(dst_page
);
5763 vm_page_unlock_queues();
5765 if (user_page_list
) {
5766 user_page_list
[entry
].phys_addr
= 0;
5772 if (dst_page
->vmp_busy
) {
5773 if (cntrl_flags
& UPL_NOBLOCK
) {
5774 if (user_page_list
) {
5775 user_page_list
[entry
].phys_addr
= 0;
5782 * someone else is playing with the
5783 * page. We will have to wait.
5785 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5789 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5790 vm_page_lockspin_queues();
5792 if (dst_page
->vmp_q_state
== VM_PAGE_ON_PAGEOUT_Q
) {
5794 * we've buddied up a page for a clustered pageout
5795 * that has already been moved to the pageout
5796 * queue by pageout_scan... we need to remove
5797 * it from the queue and drop the laundry count
5800 vm_pageout_throttle_up(dst_page
);
5802 vm_page_unlock_queues();
5804 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5805 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
5807 if (phys_page
> upl
->highest_page
) {
5808 upl
->highest_page
= phys_page
;
5811 assert(!pmap_is_noencrypt(phys_page
));
5813 if (cntrl_flags
& UPL_SET_LITE
) {
5814 unsigned int pg_num
;
5816 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
5817 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
5818 lite_list
[pg_num
>> 5] |= 1U << (pg_num
& 31);
5821 if (pmap_flushes_delayed
== FALSE
) {
5822 pmap_flush_context_init(&pmap_flush_context_storage
);
5823 pmap_flushes_delayed
= TRUE
;
5825 pmap_clear_refmod_options(phys_page
,
5827 PMAP_OPTIONS_NOFLUSH
| PMAP_OPTIONS_CLEAR_WRITE
,
5828 &pmap_flush_context_storage
);
5832 * Mark original page as cleaning
5835 dst_page
->vmp_cleaning
= TRUE
;
5836 dst_page
->vmp_precious
= FALSE
;
5839 * use pageclean setup, it is more
5840 * convenient even for the pageout
5843 vm_object_lock(upl
->map_object
);
5844 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5845 vm_object_unlock(upl
->map_object
);
5847 alias_page
->vmp_absent
= FALSE
;
5851 SET_PAGE_DIRTY(dst_page
, FALSE
);
5853 dst_page
->vmp_dirty
= FALSE
;
5857 dst_page
->vmp_precious
= TRUE
;
5860 if (!(cntrl_flags
& UPL_CLEAN_IN_PLACE
)) {
5861 if (!VM_PAGE_WIRED(dst_page
)) {
5862 dst_page
->vmp_free_when_done
= TRUE
;
5866 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
5868 * Honor copy-on-write obligations
5870 * The copy object has changed since we
5871 * last synchronized for copy-on-write.
5872 * Another copy object might have been
5873 * inserted while we released the object's
5874 * lock. Since someone could have seen the
5875 * original contents of the remaining pages
5876 * through that new object, we have to
5877 * synchronize with it again for the remaining
5878 * pages only. The previous pages are "busy"
5879 * so they can not be seen through the new
5880 * mapping. The new mapping will see our
5881 * upcoming changes for those previous pages,
5882 * but that's OK since they couldn't see what
5883 * was there before. It's just a race anyway
5884 * and there's no guarantee of consistency or
5885 * atomicity. We just don't want new mappings
5886 * to see both the *before* and *after* pages.
5888 if (object
->copy
!= VM_OBJECT_NULL
) {
5891 dst_offset
,/* current offset */
5892 xfer_size
, /* remaining size */
5895 FALSE
, /* should_return */
5896 MEMORY_OBJECT_COPY_SYNC
,
5899 VM_PAGEOUT_DEBUG(upl_cow_again
, 1);
5900 VM_PAGEOUT_DEBUG(upl_cow_again_pages
, (xfer_size
>> PAGE_SHIFT
));
5903 * remember the copy object we synced with
5905 last_copy_object
= object
->copy
;
5907 dst_page
= vm_page_lookup(object
, dst_offset
);
5909 if (dst_page
!= VM_PAGE_NULL
) {
5910 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5912 * skip over pages already present in the cache
5914 if (user_page_list
) {
5915 user_page_list
[entry
].phys_addr
= 0;
5920 if (dst_page
->vmp_fictitious
) {
5921 panic("need corner case for fictitious page");
5924 if (dst_page
->vmp_busy
|| dst_page
->vmp_cleaning
) {
5926 * someone else is playing with the
5927 * page. We will have to wait.
5929 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5933 if (dst_page
->vmp_laundry
) {
5934 vm_pageout_steal_laundry(dst_page
, FALSE
);
5937 if (object
->private) {
5939 * This is a nasty wrinkle for users
5940 * of upl who encounter device or
5941 * private memory however, it is
5942 * unavoidable, only a fault can
5943 * resolve the actual backing
5944 * physical page by asking the
5947 if (user_page_list
) {
5948 user_page_list
[entry
].phys_addr
= 0;
5953 if (object
->scan_collisions
) {
5955 * the pageout_scan thread is trying to steal
5956 * pages from this object, but has run into our
5957 * lock... grab 2 pages from the head of the object...
5958 * the first is freed on behalf of pageout_scan, the
5959 * 2nd is for our own use... we use vm_object_page_grab
5960 * in both cases to avoid taking pages from the free
5961 * list since we are under memory pressure and our
5962 * lock on this object is getting in the way of
5965 dst_page
= vm_object_page_grab(object
);
5967 if (dst_page
!= VM_PAGE_NULL
) {
5968 vm_page_release(dst_page
,
5972 dst_page
= vm_object_page_grab(object
);
5974 if (dst_page
== VM_PAGE_NULL
) {
5976 * need to allocate a page
5978 dst_page
= vm_page_grab_options(grab_options
);
5979 if (dst_page
!= VM_PAGE_NULL
) {
5983 if (dst_page
== VM_PAGE_NULL
) {
5984 if ((cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
5986 * we don't want to stall waiting for pages to come onto the free list
5987 * while we're already holding absent pages in this UPL
5988 * the caller will deal with the empty slots
5990 if (user_page_list
) {
5991 user_page_list
[entry
].phys_addr
= 0;
5997 * no pages available... wait
5998 * then try again for the same
6001 vm_object_unlock(object
);
6003 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
6005 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
6008 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
6010 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
6012 vm_object_lock(object
);
6016 vm_page_insert(dst_page
, object
, dst_offset
);
6018 dst_page
->vmp_absent
= TRUE
;
6019 dst_page
->vmp_busy
= FALSE
;
6021 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
6023 * if UPL_RET_ONLY_ABSENT was specified,
6024 * than we're definitely setting up a
6025 * upl for a clustered read/pagein
6026 * operation... mark the pages as clustered
6027 * so upl_commit_range can put them on the
6030 dst_page
->vmp_clustered
= TRUE
;
6032 if (!(cntrl_flags
& UPL_FILE_IO
)) {
6033 VM_STAT_INCR(pageins
);
6037 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
6039 dst_page
->vmp_overwriting
= TRUE
;
6041 if (dst_page
->vmp_pmapped
) {
6042 if (!(cntrl_flags
& UPL_FILE_IO
)) {
6044 * eliminate all mappings from the
6045 * original object and its prodigy
6047 refmod_state
= pmap_disconnect(phys_page
);
6049 refmod_state
= pmap_get_refmod(phys_page
);
6055 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
6056 dirty
= hw_dirty
? TRUE
: dst_page
->vmp_dirty
;
6058 if (cntrl_flags
& UPL_SET_LITE
) {
6059 unsigned int pg_num
;
6061 pg_num
= (unsigned int) ((dst_offset
- offset
) / PAGE_SIZE
);
6062 assert(pg_num
== (dst_offset
- offset
) / PAGE_SIZE
);
6063 lite_list
[pg_num
>> 5] |= 1U << (pg_num
& 31);
6066 pmap_clear_modify(phys_page
);
6070 * Mark original page as cleaning
6073 dst_page
->vmp_cleaning
= TRUE
;
6074 dst_page
->vmp_precious
= FALSE
;
6077 * use pageclean setup, it is more
6078 * convenient even for the pageout
6081 vm_object_lock(upl
->map_object
);
6082 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
6083 vm_object_unlock(upl
->map_object
);
6085 alias_page
->vmp_absent
= FALSE
;
6089 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
6090 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
6091 upl
->flags
|= UPL_SET_DIRTY
;
6094 * Page belonging to a code-signed object is about to
6095 * be written. Mark it tainted and disconnect it from
6096 * all pmaps so processes have to fault it back in and
6097 * deal with the tainted bit.
6099 if (object
->code_signed
&& dst_page
->vmp_cs_tainted
!= VMP_CS_ALL_TRUE
) {
6100 dst_page
->vmp_cs_tainted
= VMP_CS_ALL_TRUE
;
6101 vm_page_upl_tainted
++;
6102 if (dst_page
->vmp_pmapped
) {
6103 refmod_state
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
6104 if (refmod_state
& VM_MEM_REFERENCED
) {
6105 dst_page
->vmp_reference
= TRUE
;
6109 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
6111 * clean in place for read implies
6112 * that a write will be done on all
6113 * the pages that are dirty before
6114 * a upl commit is done. The caller
6115 * is obligated to preserve the
6116 * contents of all pages marked dirty
6118 upl
->flags
|= UPL_CLEAR_DIRTY
;
6120 dst_page
->vmp_dirty
= dirty
;
6123 dst_page
->vmp_precious
= TRUE
;
6126 if (!VM_PAGE_WIRED(dst_page
)) {
6128 * deny access to the target page while
6129 * it is being worked on
6131 dst_page
->vmp_busy
= TRUE
;
6133 dwp
->dw_mask
|= DW_vm_page_wire
;
6137 * We might be about to satisfy a fault which has been
6138 * requested. So no need for the "restart" bit.
6140 dst_page
->vmp_restart
= FALSE
;
6141 if (!dst_page
->vmp_absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
6143 * expect the page to be used
6145 dwp
->dw_mask
|= DW_set_reference
;
6147 if (cntrl_flags
& UPL_PRECIOUS
) {
6148 if (object
->internal
) {
6149 SET_PAGE_DIRTY(dst_page
, FALSE
);
6150 dst_page
->vmp_precious
= FALSE
;
6152 dst_page
->vmp_precious
= TRUE
;
6155 dst_page
->vmp_precious
= FALSE
;
6158 if (dst_page
->vmp_busy
) {
6159 upl
->flags
|= UPL_HAS_BUSY
;
6162 if (phys_page
> upl
->highest_page
) {
6163 upl
->highest_page
= phys_page
;
6165 assert(!pmap_is_noencrypt(phys_page
));
6166 if (user_page_list
) {
6167 user_page_list
[entry
].phys_addr
= phys_page
;
6168 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
6169 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
6170 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
6171 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
6172 user_page_list
[entry
].device
= FALSE
;
6173 user_page_list
[entry
].needed
= FALSE
;
6174 if (dst_page
->vmp_clustered
== TRUE
) {
6175 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
6177 user_page_list
[entry
].speculative
= FALSE
;
6179 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
6180 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
6181 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
6182 user_page_list
[entry
].mark
= FALSE
;
6185 * if UPL_RET_ONLY_ABSENT is set, then
6186 * we are working with a fresh page and we've
6187 * just set the clustered flag on it to
6188 * indicate that it was drug in as part of a
6189 * speculative cluster... so leave it alone
6191 if (!(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
6193 * someone is explicitly grabbing this page...
6194 * update clustered and speculative state
6197 if (dst_page
->vmp_clustered
) {
6198 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
6203 if (dwp
->dw_mask
& DW_vm_page_activate
) {
6204 VM_STAT_INCR(reactivations
);
6207 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
6209 if (dw_count
>= dw_limit
) {
6210 vm_page_do_delayed_work(object
, tag
, dwp_start
, dw_count
);
6217 dst_offset
+= PAGE_SIZE_64
;
6218 xfer_size
-= PAGE_SIZE
;
6221 vm_page_do_delayed_work(object
, tag
, dwp_start
, dw_count
);
6226 if (alias_page
!= NULL
) {
6227 VM_PAGE_FREE(alias_page
);
6229 if (pmap_flushes_delayed
== TRUE
) {
6230 pmap_flush(&pmap_flush_context_storage
);
6233 if (page_list_count
!= NULL
) {
6234 if (upl
->flags
& UPL_INTERNAL
) {
6235 *page_list_count
= 0;
6236 } else if (*page_list_count
> entry
) {
6237 *page_list_count
= entry
;
6243 vm_object_unlock(object
);
6245 VM_DEBUG_CONSTANT_EVENT(vm_object_upl_request
, VM_UPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, 0, 0, 0);
6246 #if DEVELOPMENT || DEBUG
6248 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_upl
, page_grab_count
);
6250 #endif /* DEVELOPMENT || DEBUG */
6252 if (dwp_start
&& dwp_finish_ctx
) {
6253 vm_page_delayed_work_finish_ctx(dwp_start
);
6254 dwp_start
= dwp
= NULL
;
6257 return KERN_SUCCESS
;
6261 * Routine: vm_object_super_upl_request
6263 * Cause the population of a portion of a vm_object
6264 * in much the same way as memory_object_upl_request.
6265 * Depending on the nature of the request, the pages
6266 * returned may be contain valid data or be uninitialized.
6267 * However, the region may be expanded up to the super
6268 * cluster size provided.
6271 __private_extern__ kern_return_t
6272 vm_object_super_upl_request(
6274 vm_object_offset_t offset
,
6276 upl_size_t super_cluster
,
6278 upl_page_info_t
*user_page_list
,
6279 unsigned int *page_list_count
,
6280 upl_control_flags_t cntrl_flags
,
6283 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
6284 return KERN_FAILURE
;
6287 assert(object
->paging_in_progress
);
6288 offset
= offset
- object
->paging_offset
;
6290 if (super_cluster
> size
) {
6291 vm_object_offset_t base_offset
;
6292 upl_size_t super_size
;
6293 vm_object_size_t super_size_64
;
6295 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
6296 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<< 1 : super_cluster
;
6297 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
6298 super_size
= (upl_size_t
) super_size_64
;
6299 assert(super_size
== super_size_64
);
6301 if (offset
> (base_offset
+ super_size
)) {
6302 panic("vm_object_super_upl_request: Missed target pageout"
6303 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
6304 offset
, base_offset
, super_size
, super_cluster
,
6305 size
, object
->paging_offset
);
6308 * apparently there is a case where the vm requests a
6309 * page to be written out who's offset is beyond the
6312 if ((offset
+ size
) > (base_offset
+ super_size
)) {
6313 super_size_64
= (offset
+ size
) - base_offset
;
6314 super_size
= (upl_size_t
) super_size_64
;
6315 assert(super_size
== super_size_64
);
6318 offset
= base_offset
;
6321 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
, tag
);
6324 int cs_executable_create_upl
= 0;
6325 extern int proc_selfpid(void);
6326 extern char *proc_name_address(void *p
);
6331 vm_map_address_t offset
,
6332 upl_size_t
*upl_size
,
6334 upl_page_info_array_t page_list
,
6335 unsigned int *count
,
6336 upl_control_flags_t
*flags
,
6339 vm_map_entry_t entry
;
6340 upl_control_flags_t caller_flags
;
6341 int force_data_sync
;
6343 vm_object_t local_object
;
6344 vm_map_offset_t local_offset
;
6345 vm_map_offset_t local_start
;
6347 vm_map_address_t original_offset
;
6348 vm_map_size_t original_size
, adjusted_size
;
6349 vm_map_offset_t local_entry_start
;
6350 vm_object_offset_t local_entry_offset
;
6351 vm_object_offset_t offset_in_mapped_page
;
6352 boolean_t release_map
= FALSE
;
6356 original_offset
= offset
;
6357 original_size
= *upl_size
;
6358 adjusted_size
= original_size
;
6360 caller_flags
= *flags
;
6362 if (caller_flags
& ~UPL_VALID_FLAGS
) {
6364 * For forward compatibility's sake,
6365 * reject any unknown flag.
6367 ret
= KERN_INVALID_VALUE
;
6370 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
6371 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
6374 ret
= KERN_INVALID_ARGUMENT
;
6379 vm_map_lock_read(map
);
6381 if (!vm_map_lookup_entry(map
, offset
, &entry
)) {
6382 vm_map_unlock_read(map
);
6387 local_entry_start
= entry
->vme_start
;
6388 local_entry_offset
= VME_OFFSET(entry
);
6390 if (VM_MAP_PAGE_SHIFT(map
) < PAGE_SHIFT
) {
6391 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%x flags 0x%llx\n", map
, VM_MAP_PAGE_SHIFT(map
), (uint64_t)offset
, *upl_size
, *flags
);
6394 if (entry
->vme_end
- original_offset
< adjusted_size
) {
6395 adjusted_size
= entry
->vme_end
- original_offset
;
6396 assert(adjusted_size
> 0);
6397 *upl_size
= (upl_size_t
) adjusted_size
;
6398 assert(*upl_size
== adjusted_size
);
6401 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
6404 if (!entry
->is_sub_map
&&
6405 VME_OBJECT(entry
) != VM_OBJECT_NULL
) {
6406 if (VME_OBJECT(entry
)->private) {
6407 *flags
= UPL_DEV_MEMORY
;
6410 if (VME_OBJECT(entry
)->phys_contiguous
) {
6411 *flags
|= UPL_PHYS_CONTIG
;
6414 vm_map_unlock_read(map
);
6419 offset_in_mapped_page
= 0;
6420 if (VM_MAP_PAGE_SIZE(map
) < PAGE_SIZE
) {
6421 offset
= vm_map_trunc_page(original_offset
, VM_MAP_PAGE_MASK(map
));
6422 *upl_size
= (upl_size_t
)
6423 (vm_map_round_page(original_offset
+ adjusted_size
,
6424 VM_MAP_PAGE_MASK(map
))
6427 offset_in_mapped_page
= original_offset
- offset
;
6428 assert(offset_in_mapped_page
< VM_MAP_PAGE_SIZE(map
));
6430 DEBUG4K_UPL("map %p (%d) offset 0x%llx size 0x%llx flags 0x%llx -> offset 0x%llx adjusted_size 0x%llx *upl_size 0x%x offset_in_mapped_page 0x%llx\n", map
, VM_MAP_PAGE_SHIFT(map
), (uint64_t)original_offset
, (uint64_t)original_size
, *flags
, (uint64_t)offset
, (uint64_t)adjusted_size
, *upl_size
, offset_in_mapped_page
);
6433 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
||
6434 !VME_OBJECT(entry
)->phys_contiguous
) {
6435 if (*upl_size
> MAX_UPL_SIZE_BYTES
) {
6436 *upl_size
= MAX_UPL_SIZE_BYTES
;
6441 * Create an object if necessary.
6443 if (VME_OBJECT(entry
) == VM_OBJECT_NULL
) {
6444 if (vm_map_lock_read_to_write(map
)) {
6445 goto REDISCOVER_ENTRY
;
6448 VME_OBJECT_SET(entry
,
6449 vm_object_allocate((vm_size_t
)
6450 vm_object_round_page((entry
->vme_end
- entry
->vme_start
))));
6451 VME_OFFSET_SET(entry
, 0);
6452 assert(entry
->use_pmap
);
6454 vm_map_lock_write_to_read(map
);
6457 if (!(caller_flags
& UPL_COPYOUT_FROM
) &&
6458 !entry
->is_sub_map
&&
6459 !(entry
->protection
& VM_PROT_WRITE
)) {
6460 vm_map_unlock_read(map
);
6461 ret
= KERN_PROTECTION_FAILURE
;
6466 if (map
->pmap
!= kernel_pmap
&&
6467 (caller_flags
& UPL_COPYOUT_FROM
) &&
6468 (entry
->protection
& VM_PROT_EXECUTE
) &&
6469 !(entry
->protection
& VM_PROT_WRITE
)) {
6474 * We're about to create a read-only UPL backed by
6475 * memory from an executable mapping.
6476 * Wiring the pages would result in the pages being copied
6477 * (due to the "MAP_PRIVATE" mapping) and no longer
6478 * code-signed, so no longer eligible for execution.
6479 * Instead, let's copy the data into a kernel buffer and
6480 * create the UPL from this kernel buffer.
6481 * The kernel buffer is then freed, leaving the UPL holding
6482 * the last reference on the VM object, so the memory will
6483 * be released when the UPL is committed.
6486 vm_map_unlock_read(map
);
6487 entry
= VM_MAP_ENTRY_NULL
;
6488 /* allocate kernel buffer */
6489 ksize
= round_page(*upl_size
);
6491 ret
= kmem_alloc_pageable(kernel_map
,
6495 if (ret
== KERN_SUCCESS
) {
6496 /* copyin the user data */
6497 ret
= copyinmap(map
, offset
, (void *)kaddr
, *upl_size
);
6499 if (ret
== KERN_SUCCESS
) {
6500 if (ksize
> *upl_size
) {
6501 /* zero out the extra space in kernel buffer */
6502 memset((void *)(kaddr
+ *upl_size
),
6506 /* create the UPL from the kernel buffer */
6507 vm_object_offset_t offset_in_object
;
6508 vm_object_offset_t offset_in_object_page
;
6510 offset_in_object
= offset
- local_entry_start
+ local_entry_offset
;
6511 offset_in_object_page
= offset_in_object
- vm_object_trunc_page(offset_in_object
);
6512 assert(offset_in_object_page
< PAGE_SIZE
);
6513 assert(offset_in_object_page
+ offset_in_mapped_page
< PAGE_SIZE
);
6514 *upl_size
-= offset_in_object_page
+ offset_in_mapped_page
;
6515 ret
= vm_map_create_upl(kernel_map
,
6516 (vm_map_address_t
)(kaddr
+ offset_in_object_page
+ offset_in_mapped_page
),
6517 upl_size
, upl
, page_list
, count
, flags
, tag
);
6520 /* free the kernel buffer */
6521 kmem_free(kernel_map
, kaddr
, ksize
);
6525 #if DEVELOPMENT || DEBUG
6526 DTRACE_VM4(create_upl_from_executable
,
6528 vm_map_address_t
, offset
,
6529 upl_size_t
, *upl_size
,
6530 kern_return_t
, ret
);
6531 #endif /* DEVELOPMENT || DEBUG */
6534 #endif /* CONFIG_EMBEDDED */
6536 local_object
= VME_OBJECT(entry
);
6537 assert(local_object
!= VM_OBJECT_NULL
);
6539 if (!entry
->is_sub_map
&&
6540 !entry
->needs_copy
&&
6542 local_object
->vo_size
> *upl_size
&& /* partial UPL */
6543 entry
->wired_count
== 0 && /* No COW for entries that are wired */
6544 (map
->pmap
!= kernel_pmap
) && /* alias checks */
6545 (vm_map_entry_should_cow_for_true_share(entry
) /* case 1 */
6548 local_object
->internal
&&
6549 (local_object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) &&
6550 local_object
->ref_count
> 1))) {
6555 * Set up the targeted range for copy-on-write to avoid
6556 * applying true_share/copy_delay to the entire object.
6559 * This map entry covers only part of an internal
6560 * object. There could be other map entries covering
6561 * other areas of this object and some of these map
6562 * entries could be marked as "needs_copy", which
6563 * assumes that the object is COPY_SYMMETRIC.
6564 * To avoid marking this object as COPY_DELAY and
6565 * "true_share", let's shadow it and mark the new
6566 * (smaller) object as "true_share" and COPY_DELAY.
6569 if (vm_map_lock_read_to_write(map
)) {
6570 goto REDISCOVER_ENTRY
;
6572 vm_map_lock_assert_exclusive(map
);
6573 assert(VME_OBJECT(entry
) == local_object
);
6575 vm_map_clip_start(map
,
6577 vm_map_trunc_page(offset
,
6578 VM_MAP_PAGE_MASK(map
)));
6579 vm_map_clip_end(map
,
6581 vm_map_round_page(offset
+ *upl_size
,
6582 VM_MAP_PAGE_MASK(map
)));
6583 if ((entry
->vme_end
- offset
) < *upl_size
) {
6584 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
6585 assert(*upl_size
== entry
->vme_end
- offset
);
6588 prot
= entry
->protection
& ~VM_PROT_WRITE
;
6589 if (override_nx(map
, VME_ALIAS(entry
)) && prot
) {
6590 prot
|= VM_PROT_EXECUTE
;
6592 vm_object_pmap_protect(local_object
,
6594 entry
->vme_end
- entry
->vme_start
,
6595 ((entry
->is_shared
||
6596 map
->mapped_in_other_pmaps
)
6599 VM_MAP_PAGE_SIZE(map
),
6603 assert(entry
->wired_count
== 0);
6606 * Lock the VM object and re-check its status: if it's mapped
6607 * in another address space, we could still be racing with
6608 * another thread holding that other VM map exclusively.
6610 vm_object_lock(local_object
);
6611 if (local_object
->true_share
) {
6612 /* object is already in proper state: no COW needed */
6613 assert(local_object
->copy_strategy
!=
6614 MEMORY_OBJECT_COPY_SYMMETRIC
);
6616 /* not true_share: ask for copy-on-write below */
6617 assert(local_object
->copy_strategy
==
6618 MEMORY_OBJECT_COPY_SYMMETRIC
);
6619 entry
->needs_copy
= TRUE
;
6621 vm_object_unlock(local_object
);
6623 vm_map_lock_write_to_read(map
);
6626 if (entry
->needs_copy
) {
6628 * Honor copy-on-write for COPY_SYMMETRIC
6633 vm_object_offset_t new_offset
;
6636 vm_map_version_t version
;
6638 vm_prot_t fault_type
;
6640 if (entry
->vme_start
< VM_MAP_TRUNC_PAGE(offset
, VM_MAP_PAGE_MASK(map
)) ||
6641 entry
->vme_end
> VM_MAP_ROUND_PAGE(offset
+ *upl_size
, VM_MAP_PAGE_MASK(map
))) {
6643 * Clip the requested range first to minimize the
6644 * amount of potential copying...
6646 if (vm_map_lock_read_to_write(map
)) {
6647 goto REDISCOVER_ENTRY
;
6649 vm_map_lock_assert_exclusive(map
);
6650 assert(VME_OBJECT(entry
) == local_object
);
6651 vm_map_clip_start(map
, entry
,
6652 VM_MAP_TRUNC_PAGE(offset
, VM_MAP_PAGE_MASK(map
)));
6653 vm_map_clip_end(map
, entry
,
6654 VM_MAP_ROUND_PAGE(offset
+ *upl_size
, VM_MAP_PAGE_MASK(map
)));
6655 vm_map_lock_write_to_read(map
);
6660 if (caller_flags
& UPL_COPYOUT_FROM
) {
6661 fault_type
= VM_PROT_READ
| VM_PROT_COPY
;
6662 vm_counters
.create_upl_extra_cow
++;
6663 vm_counters
.create_upl_extra_cow_pages
+=
6664 (entry
->vme_end
- entry
->vme_start
) / PAGE_SIZE
;
6666 fault_type
= VM_PROT_WRITE
;
6668 if (vm_map_lookup_locked(&local_map
,
6670 OBJECT_LOCK_EXCLUSIVE
,
6672 &new_offset
, &prot
, &wired
,
6674 &real_map
, NULL
) != KERN_SUCCESS
) {
6675 if (fault_type
== VM_PROT_WRITE
) {
6676 vm_counters
.create_upl_lookup_failure_write
++;
6678 vm_counters
.create_upl_lookup_failure_copy
++;
6680 vm_map_unlock_read(local_map
);
6684 if (real_map
!= local_map
) {
6685 vm_map_unlock(real_map
);
6687 vm_map_unlock_read(local_map
);
6689 vm_object_unlock(object
);
6691 goto REDISCOVER_ENTRY
;
6694 if (entry
->is_sub_map
) {
6697 submap
= VME_SUBMAP(entry
);
6698 local_start
= entry
->vme_start
;
6699 local_offset
= (vm_map_offset_t
)VME_OFFSET(entry
);
6701 vm_map_reference(submap
);
6702 vm_map_unlock_read(map
);
6704 DEBUG4K_UPL("map %p offset 0x%llx (0x%llx) size 0x%x (adjusted 0x%llx original 0x%llx) offset_in_mapped_page 0x%llx submap %p\n", map
, (uint64_t)offset
, (uint64_t)original_offset
, *upl_size
, (uint64_t)adjusted_size
, (uint64_t)original_size
, offset_in_mapped_page
, submap
);
6705 offset
+= offset_in_mapped_page
;
6706 *upl_size
-= offset_in_mapped_page
;
6709 vm_map_deallocate(map
);
6713 offset
= local_offset
+ (offset
- local_start
);
6714 goto start_with_map
;
6717 if (sync_cow_data
&&
6718 (VME_OBJECT(entry
)->shadow
||
6719 VME_OBJECT(entry
)->copy
)) {
6720 local_object
= VME_OBJECT(entry
);
6721 local_start
= entry
->vme_start
;
6722 local_offset
= (vm_map_offset_t
)VME_OFFSET(entry
);
6724 vm_object_reference(local_object
);
6725 vm_map_unlock_read(map
);
6727 if (local_object
->shadow
&& local_object
->copy
) {
6728 vm_object_lock_request(local_object
->shadow
,
6729 ((vm_object_offset_t
)
6730 ((offset
- local_start
) +
6732 local_object
->vo_shadow_offset
),
6734 MEMORY_OBJECT_DATA_SYNC
,
6737 sync_cow_data
= FALSE
;
6738 vm_object_deallocate(local_object
);
6740 goto REDISCOVER_ENTRY
;
6742 if (force_data_sync
) {
6743 local_object
= VME_OBJECT(entry
);
6744 local_start
= entry
->vme_start
;
6745 local_offset
= (vm_map_offset_t
)VME_OFFSET(entry
);
6747 vm_object_reference(local_object
);
6748 vm_map_unlock_read(map
);
6750 vm_object_lock_request(local_object
,
6751 ((vm_object_offset_t
)
6752 ((offset
- local_start
) +
6754 (vm_object_size_t
)*upl_size
,
6756 MEMORY_OBJECT_DATA_SYNC
,
6759 force_data_sync
= FALSE
;
6760 vm_object_deallocate(local_object
);
6762 goto REDISCOVER_ENTRY
;
6764 if (VME_OBJECT(entry
)->private) {
6765 *flags
= UPL_DEV_MEMORY
;
6770 if (VME_OBJECT(entry
)->phys_contiguous
) {
6771 *flags
|= UPL_PHYS_CONTIG
;
6774 local_object
= VME_OBJECT(entry
);
6775 local_offset
= (vm_map_offset_t
)VME_OFFSET(entry
);
6776 local_start
= entry
->vme_start
;
6779 * Wiring will copy the pages to the shadow object.
6780 * The shadow object will not be code-signed so
6781 * attempting to execute code from these copied pages
6782 * would trigger a code-signing violation.
6784 if (entry
->protection
& VM_PROT_EXECUTE
) {
6786 printf("pid %d[%s] create_upl out of executable range from "
6787 "0x%llx to 0x%llx: side effects may include "
6788 "code-signing violations later on\n",
6790 (current_task()->bsd_info
6791 ? proc_name_address(current_task()->bsd_info
)
6793 (uint64_t) entry
->vme_start
,
6794 (uint64_t) entry
->vme_end
);
6795 #endif /* MACH_ASSERT */
6796 DTRACE_VM2(cs_executable_create_upl
,
6797 uint64_t, (uint64_t)entry
->vme_start
,
6798 uint64_t, (uint64_t)entry
->vme_end
);
6799 cs_executable_create_upl
++;
6802 vm_object_lock(local_object
);
6805 * Ensure that this object is "true_share" and "copy_delay" now,
6806 * while we're still holding the VM map lock. After we unlock the map,
6807 * anything could happen to that mapping, including some copy-on-write
6808 * activity. We need to make sure that the IOPL will point at the
6809 * same memory as the mapping.
6811 if (local_object
->true_share
) {
6812 assert(local_object
->copy_strategy
!=
6813 MEMORY_OBJECT_COPY_SYMMETRIC
);
6814 } else if (local_object
!= kernel_object
&&
6815 local_object
!= compressor_object
&&
6816 !local_object
->phys_contiguous
) {
6817 #if VM_OBJECT_TRACKING_OP_TRUESHARE
6818 if (!local_object
->true_share
&&
6819 vm_object_tracking_inited
) {
6820 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
6822 num
= OSBacktrace(bt
,
6823 VM_OBJECT_TRACKING_BTDEPTH
);
6824 btlog_add_entry(vm_object_tracking_btlog
,
6826 VM_OBJECT_TRACKING_OP_TRUESHARE
,
6830 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
6831 local_object
->true_share
= TRUE
;
6832 if (local_object
->copy_strategy
==
6833 MEMORY_OBJECT_COPY_SYMMETRIC
) {
6834 local_object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6838 vm_object_reference_locked(local_object
);
6839 vm_object_unlock(local_object
);
6841 vm_map_unlock_read(map
);
6843 offset
+= offset_in_mapped_page
;
6844 assert(*upl_size
> offset_in_mapped_page
);
6845 *upl_size
-= offset_in_mapped_page
;
6847 ret
= vm_object_iopl_request(local_object
,
6848 ((vm_object_offset_t
)
6849 ((offset
- local_start
) + local_offset
)),
6856 vm_object_deallocate(local_object
);
6860 vm_map_deallocate(map
);
6867 * Internal routine to enter a UPL into a VM map.
6869 * JMM - This should just be doable through the standard
6870 * vm_map_enter() API.
6876 vm_map_offset_t
*dst_addr
)
6879 vm_object_offset_t offset
;
6880 vm_map_offset_t addr
;
6883 int isVectorUPL
= 0, curr_upl
= 0;
6884 upl_t vector_upl
= NULL
;
6885 vm_offset_t vector_upl_dst_addr
= 0;
6886 vm_map_t vector_upl_submap
= NULL
;
6887 upl_offset_t subupl_offset
= 0;
6888 upl_size_t subupl_size
= 0;
6890 if (upl
== UPL_NULL
) {
6891 return KERN_INVALID_ARGUMENT
;
6894 DEBUG4K_UPL("map %p upl %p flags 0x%x object %p offset 0x%llx size 0x%x \n", map
, upl
, upl
->flags
, upl
->map_object
, upl
->u_offset
, upl
->u_size
);
6895 assert(map
== kernel_map
);
6897 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
6898 int mapped
= 0, valid_upls
= 0;
6901 upl_lock(vector_upl
);
6902 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6903 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6908 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6914 if (mapped
!= valid_upls
) {
6915 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
6917 upl_unlock(vector_upl
);
6918 return KERN_FAILURE
;
6922 if (VM_MAP_PAGE_MASK(map
) < PAGE_MASK
) {
6923 panic("TODO4K: vector UPL not implemented");
6926 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
,
6929 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
6930 &vector_upl_submap
);
6931 if (kr
!= KERN_SUCCESS
) {
6932 panic("Vector UPL submap allocation failed\n");
6934 map
= vector_upl_submap
;
6935 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
6941 process_upl_to_enter
:
6943 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6944 *dst_addr
= vector_upl_dst_addr
;
6945 upl_unlock(vector_upl
);
6946 return KERN_SUCCESS
;
6948 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6950 goto process_upl_to_enter
;
6953 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
6954 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
6957 * check to see if already mapped
6959 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
6961 return KERN_FAILURE
;
6965 size
= upl_adjusted_size(upl
, VM_MAP_PAGE_MASK(map
));
6967 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
6968 ((upl
->flags
& UPL_HAS_BUSY
) ||
6969 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
6971 vm_page_t alias_page
;
6972 vm_object_offset_t new_offset
;
6973 unsigned int pg_num
;
6974 wpl_array_t lite_list
;
6976 if (upl
->flags
& UPL_INTERNAL
) {
6977 lite_list
= (wpl_array_t
)
6978 ((((uintptr_t)upl
) + sizeof(struct upl
))
6979 + ((size
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6981 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
6983 object
= upl
->map_object
;
6984 upl
->map_object
= vm_object_allocate(vm_object_round_page(size
));
6986 vm_object_lock(upl
->map_object
);
6988 upl
->map_object
->shadow
= object
;
6989 upl
->map_object
->pageout
= TRUE
;
6990 upl
->map_object
->can_persist
= FALSE
;
6991 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
6992 upl
->map_object
->vo_shadow_offset
= upl_adjusted_offset(upl
, PAGE_MASK
) - object
->paging_offset
;
6993 assertf(page_aligned(upl
->map_object
->vo_shadow_offset
),
6994 "object %p shadow_offset 0x%llx",
6996 (uint64_t)upl
->map_object
->vo_shadow_offset
);
6997 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
6998 assertf(page_aligned(upl
->map_object
->vo_shadow_offset
),
6999 "object %p shadow_offset 0x%llx",
7000 upl
->map_object
, upl
->map_object
->vo_shadow_offset
);
7001 offset
= upl
->map_object
->vo_shadow_offset
;
7003 size
= upl_adjusted_size(upl
, VM_MAP_PAGE_MASK(map
));
7005 upl
->flags
|= UPL_SHADOWED
;
7008 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
7009 assert(pg_num
== new_offset
/ PAGE_SIZE
);
7011 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
7012 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
7014 vm_object_lock(object
);
7016 m
= vm_page_lookup(object
, offset
);
7017 if (m
== VM_PAGE_NULL
) {
7018 panic("vm_upl_map: page missing\n");
7022 * Convert the fictitious page to a private
7023 * shadow of the real page.
7025 assert(alias_page
->vmp_fictitious
);
7026 alias_page
->vmp_fictitious
= FALSE
;
7027 alias_page
->vmp_private
= TRUE
;
7028 alias_page
->vmp_free_when_done
= TRUE
;
7030 * since m is a page in the upl it must
7031 * already be wired or BUSY, so it's
7032 * safe to assign the underlying physical
7035 VM_PAGE_SET_PHYS_PAGE(alias_page
, VM_PAGE_GET_PHYS_PAGE(m
));
7037 vm_object_unlock(object
);
7039 vm_page_lockspin_queues();
7040 vm_page_wire(alias_page
, VM_KERN_MEMORY_NONE
, TRUE
);
7041 vm_page_unlock_queues();
7043 vm_page_insert_wired(alias_page
, upl
->map_object
, new_offset
, VM_KERN_MEMORY_NONE
);
7045 assert(!alias_page
->vmp_wanted
);
7046 alias_page
->vmp_busy
= FALSE
;
7047 alias_page
->vmp_absent
= FALSE
;
7050 offset
+= PAGE_SIZE_64
;
7051 new_offset
+= PAGE_SIZE_64
;
7053 vm_object_unlock(upl
->map_object
);
7055 if (upl
->flags
& UPL_SHADOWED
) {
7058 offset
= upl_adjusted_offset(upl
, VM_MAP_PAGE_MASK(map
)) - upl
->map_object
->paging_offset
;
7061 size
= upl_adjusted_size(upl
, VM_MAP_PAGE_MASK(map
));
7063 vm_object_reference(upl
->map_object
);
7068 * NEED A UPL_MAP ALIAS
7070 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
7071 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
7072 upl
->map_object
, offset
, FALSE
,
7073 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
7075 if (kr
!= KERN_SUCCESS
) {
7076 vm_object_deallocate(upl
->map_object
);
7081 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
7082 VM_FLAGS_FIXED
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_OSFMK
,
7083 upl
->map_object
, offset
, FALSE
,
7084 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
7086 panic("vm_map_enter failed for a Vector UPL\n");
7089 vm_object_lock(upl
->map_object
);
7091 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
7092 m
= vm_page_lookup(upl
->map_object
, offset
);
7095 m
->vmp_pmapped
= TRUE
;
7097 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
7098 * but only in kernel space. If this was on a user map,
7099 * we'd have to set the wpmapped bit. */
7100 /* m->vmp_wpmapped = TRUE; */
7101 assert(map
->pmap
== kernel_pmap
);
7103 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_DEFAULT
, VM_PROT_NONE
, 0, TRUE
, kr
);
7105 assert(kr
== KERN_SUCCESS
);
7107 kasan_notify_address(addr
, PAGE_SIZE_64
);
7110 offset
+= PAGE_SIZE_64
;
7112 vm_object_unlock(upl
->map_object
);
7115 * hold a reference for the mapping
7118 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
7119 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
7120 assert(upl
->kaddr
== *dst_addr
);
7123 goto process_upl_to_enter
;
7127 vm_map_offset_t addr_adjustment
;
7129 addr_adjustment
= (vm_map_offset_t
)(upl
->u_offset
- upl_adjusted_offset(upl
, VM_MAP_PAGE_MASK(map
)));
7130 if (addr_adjustment
) {
7131 assert(VM_MAP_PAGE_MASK(map
) != PAGE_MASK
);
7132 DEBUG4K_UPL("dst_addr 0x%llx (+ 0x%llx) -> 0x%llx\n", (uint64_t)*dst_addr
, (uint64_t)addr_adjustment
, (uint64_t)(*dst_addr
+ addr_adjustment
));
7133 *dst_addr
+= addr_adjustment
;
7139 return KERN_SUCCESS
;
7143 * Internal routine to remove a UPL mapping from a VM map.
7145 * XXX - This should just be doable through a standard
7146 * vm_map_remove() operation. Otherwise, implicit clean-up
7147 * of the target map won't be able to correctly remove
7148 * these (and release the reference on the UPL). Having
7149 * to do this means we can't map these into user-space
7159 int isVectorUPL
= 0, curr_upl
= 0;
7160 upl_t vector_upl
= NULL
;
7162 if (upl
== UPL_NULL
) {
7163 return KERN_INVALID_ARGUMENT
;
7166 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
7167 int unmapped
= 0, valid_upls
= 0;
7169 upl_lock(vector_upl
);
7170 for (curr_upl
= 0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
7171 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
7176 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
)) {
7182 if (unmapped
!= valid_upls
) {
7183 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
7185 upl_unlock(vector_upl
);
7186 return KERN_FAILURE
;
7194 process_upl_to_remove
:
7196 if (curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
7197 vm_map_t v_upl_submap
;
7198 vm_offset_t v_upl_submap_dst_addr
;
7199 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
7201 vm_map_remove(map
, v_upl_submap_dst_addr
,
7202 v_upl_submap_dst_addr
+ vector_upl
->u_size
,
7203 VM_MAP_REMOVE_NO_FLAGS
);
7204 vm_map_deallocate(v_upl_submap
);
7205 upl_unlock(vector_upl
);
7206 return KERN_SUCCESS
;
7209 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
7211 goto process_upl_to_remove
;
7215 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
7217 size
= upl_adjusted_size(upl
, VM_MAP_PAGE_MASK(map
));
7219 assert(upl
->ref_count
> 1);
7220 upl
->ref_count
--; /* removing mapping ref */
7222 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
7223 upl
->kaddr
= (vm_offset_t
) 0;
7230 vm_map_trunc_page(addr
,
7231 VM_MAP_PAGE_MASK(map
)),
7232 vm_map_round_page(addr
+ size
,
7233 VM_MAP_PAGE_MASK(map
)),
7234 VM_MAP_REMOVE_NO_FLAGS
);
7235 return KERN_SUCCESS
;
7238 * If it's a Vectored UPL, we'll be removing the entire
7239 * submap anyways, so no need to remove individual UPL
7240 * element mappings from within the submap
7242 goto process_upl_to_remove
;
7247 return KERN_FAILURE
;
7254 upl_offset_t offset
,
7257 upl_page_info_t
*page_list
,
7258 mach_msg_type_number_t count
,
7261 upl_size_t xfer_size
, subupl_size
;
7262 vm_object_t shadow_object
;
7264 vm_object_t m_object
;
7265 vm_object_offset_t target_offset
;
7266 upl_offset_t subupl_offset
= offset
;
7268 wpl_array_t lite_list
;
7270 int clear_refmod
= 0;
7271 int pgpgout_count
= 0;
7272 struct vm_page_delayed_work dw_array
;
7273 struct vm_page_delayed_work
*dwp
, *dwp_start
;
7274 bool dwp_finish_ctx
= TRUE
;
7277 int isVectorUPL
= 0;
7278 upl_t vector_upl
= NULL
;
7279 boolean_t should_be_throttled
= FALSE
;
7281 vm_page_t nxt_page
= VM_PAGE_NULL
;
7282 int fast_path_possible
= 0;
7283 int fast_path_full_commit
= 0;
7284 int throttle_page
= 0;
7285 int unwired_count
= 0;
7286 int local_queue_count
= 0;
7287 vm_page_t first_local
, last_local
;
7288 vm_object_offset_t obj_start
, obj_end
, obj_offset
;
7289 kern_return_t kr
= KERN_SUCCESS
;
7291 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx flags 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, flags);
7293 dwp_start
= dwp
= NULL
;
7298 if (upl
== UPL_NULL
) {
7299 return KERN_INVALID_ARGUMENT
;
7303 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
7304 dwp_start
= vm_page_delayed_work_get_ctx();
7305 if (dwp_start
== NULL
) {
7306 dwp_start
= &dw_array
;
7308 dwp_finish_ctx
= FALSE
;
7317 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
7319 upl_lock(vector_upl
);
7324 process_upl_to_commit
:
7328 offset
= subupl_offset
;
7330 upl_unlock(vector_upl
);
7334 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
7336 upl_unlock(vector_upl
);
7340 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
7341 subupl_size
-= size
;
7342 subupl_offset
+= size
;
7346 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
7347 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
7349 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
7350 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
7352 upl
->upl_commit_index
++;
7355 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7357 } else if ((offset
+ size
) <= upl_adjusted_size(upl
, PAGE_MASK
)) {
7363 upl_unlock(vector_upl
);
7365 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl
, upl
->u_offset
, upl
->u_size
, offset
, size
);
7369 if (upl
->flags
& UPL_SET_DIRTY
) {
7370 flags
|= UPL_COMMIT_SET_DIRTY
;
7372 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
7373 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
7376 if (upl
->flags
& UPL_INTERNAL
) {
7377 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
7378 + ((upl_adjusted_size(upl
, PAGE_MASK
) / PAGE_SIZE
) * sizeof(upl_page_info_t
)));
7380 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
7383 object
= upl
->map_object
;
7385 if (upl
->flags
& UPL_SHADOWED
) {
7386 vm_object_lock(object
);
7387 shadow_object
= object
->shadow
;
7389 shadow_object
= object
;
7391 entry
= offset
/ PAGE_SIZE
;
7392 target_offset
= (vm_object_offset_t
)offset
;
7394 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
7395 vm_object_lock_shared(shadow_object
);
7397 vm_object_lock(shadow_object
);
7400 VM_OBJECT_WIRED_PAGE_UPDATE_START(shadow_object
);
7402 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7403 assert(shadow_object
->blocked_access
);
7404 shadow_object
->blocked_access
= FALSE
;
7405 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
7408 if (shadow_object
->code_signed
) {
7411 * If the object is code-signed, do not let this UPL tell
7412 * us if the pages are valid or not. Let the pages be
7413 * validated by VM the normal way (when they get mapped or
7416 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
7420 * No page list to get the code-signing info from !?
7422 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
7424 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
) {
7425 should_be_throttled
= TRUE
;
7428 if ((upl
->flags
& UPL_IO_WIRE
) &&
7429 !(flags
& UPL_COMMIT_FREE_ABSENT
) &&
7431 shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
&&
7432 shadow_object
->purgable
!= VM_PURGABLE_EMPTY
) {
7433 if (!vm_page_queue_empty(&shadow_object
->memq
)) {
7434 if (size
== shadow_object
->vo_size
) {
7435 nxt_page
= (vm_page_t
)vm_page_queue_first(&shadow_object
->memq
);
7436 fast_path_full_commit
= 1;
7438 fast_path_possible
= 1;
7440 if (!VM_DYNAMIC_PAGING_ENABLED() && shadow_object
->internal
&&
7441 (shadow_object
->purgable
== VM_PURGABLE_DENY
||
7442 shadow_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
7443 shadow_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
7448 first_local
= VM_PAGE_NULL
;
7449 last_local
= VM_PAGE_NULL
;
7451 obj_start
= target_offset
+ upl
->u_offset
- shadow_object
->paging_offset
;
7452 obj_end
= obj_start
+ xfer_size
;
7453 obj_start
= vm_object_trunc_page(obj_start
);
7454 obj_end
= vm_object_round_page(obj_end
);
7455 for (obj_offset
= obj_start
;
7456 obj_offset
< obj_end
;
7457 obj_offset
+= PAGE_SIZE
) {
7465 if (upl
->flags
& UPL_LITE
) {
7466 unsigned int pg_num
;
7468 if (nxt_page
!= VM_PAGE_NULL
) {
7470 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
7471 target_offset
= m
->vmp_offset
;
7473 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
7474 assert(pg_num
== target_offset
/ PAGE_SIZE
);
7476 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
7477 lite_list
[pg_num
>> 5] &= ~(1U << (pg_num
& 31));
7479 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
7480 m
= vm_page_lookup(shadow_object
, obj_offset
);
7486 if (upl
->flags
& UPL_SHADOWED
) {
7487 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
7488 t
->vmp_free_when_done
= FALSE
;
7492 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
) {
7493 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
7497 if (m
== VM_PAGE_NULL
) {
7498 goto commit_next_page
;
7501 m_object
= VM_PAGE_OBJECT(m
);
7503 if (m
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
7504 assert(m
->vmp_busy
);
7506 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7507 goto commit_next_page
;
7510 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
7513 * Set the code signing bits according to
7514 * what the UPL says they should be.
7516 m
->vmp_cs_validated
|= page_list
[entry
].cs_validated
;
7517 m
->vmp_cs_tainted
|= page_list
[entry
].cs_tainted
;
7518 m
->vmp_cs_nx
|= page_list
[entry
].cs_nx
;
7520 if (flags
& UPL_COMMIT_WRITTEN_BY_KERNEL
) {
7521 m
->vmp_written_by_kernel
= TRUE
;
7524 if (upl
->flags
& UPL_IO_WIRE
) {
7526 page_list
[entry
].phys_addr
= 0;
7529 if (flags
& UPL_COMMIT_SET_DIRTY
) {
7530 SET_PAGE_DIRTY(m
, FALSE
);
7531 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
7532 m
->vmp_dirty
= FALSE
;
7534 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
7535 m
->vmp_cs_validated
&&
7536 m
->vmp_cs_tainted
!= VMP_CS_ALL_TRUE
) {
7539 * This page is no longer dirty
7540 * but could have been modified,
7541 * so it will need to be
7544 m
->vmp_cs_validated
= VMP_CS_ALL_FALSE
;
7546 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
7548 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7550 clear_refmod
|= VM_MEM_MODIFIED
;
7552 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7554 * We blocked access to the pages in this UPL.
7555 * Clear the "busy" bit and wake up any waiter
7558 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7560 if (fast_path_possible
) {
7561 assert(m_object
->purgable
!= VM_PURGABLE_EMPTY
);
7562 assert(m_object
->purgable
!= VM_PURGABLE_VOLATILE
);
7563 if (m
->vmp_absent
) {
7564 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
7565 assert(m
->vmp_wire_count
== 0);
7566 assert(m
->vmp_busy
);
7568 m
->vmp_absent
= FALSE
;
7569 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7571 if (m
->vmp_wire_count
== 0) {
7572 panic("wire_count == 0, m = %p, obj = %p\n", m
, shadow_object
);
7574 assert(m
->vmp_q_state
== VM_PAGE_IS_WIRED
);
7577 * XXX FBDP need to update some other
7578 * counters here (purgeable_wired_count)
7581 assert(m
->vmp_wire_count
> 0);
7582 m
->vmp_wire_count
--;
7584 if (m
->vmp_wire_count
== 0) {
7585 m
->vmp_q_state
= VM_PAGE_NOT_ON_Q
;
7589 if (m
->vmp_wire_count
== 0) {
7590 assert(m
->vmp_pageq
.next
== 0 && m
->vmp_pageq
.prev
== 0);
7592 if (last_local
== VM_PAGE_NULL
) {
7593 assert(first_local
== VM_PAGE_NULL
);
7598 assert(first_local
!= VM_PAGE_NULL
);
7600 m
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
7601 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(m
);
7604 local_queue_count
++;
7606 if (throttle_page
) {
7607 m
->vmp_q_state
= VM_PAGE_ON_THROTTLED_Q
;
7609 if (flags
& UPL_COMMIT_INACTIVATE
) {
7610 if (shadow_object
->internal
) {
7611 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_INTERNAL_Q
;
7613 m
->vmp_q_state
= VM_PAGE_ON_INACTIVE_EXTERNAL_Q
;
7616 m
->vmp_q_state
= VM_PAGE_ON_ACTIVE_Q
;
7621 if (flags
& UPL_COMMIT_INACTIVATE
) {
7622 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7623 clear_refmod
|= VM_MEM_REFERENCED
;
7625 if (m
->vmp_absent
) {
7626 if (flags
& UPL_COMMIT_FREE_ABSENT
) {
7627 dwp
->dw_mask
|= DW_vm_page_free
;
7629 m
->vmp_absent
= FALSE
;
7630 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7632 if (!(dwp
->dw_mask
& DW_vm_page_deactivate_internal
)) {
7633 dwp
->dw_mask
|= DW_vm_page_activate
;
7637 dwp
->dw_mask
|= DW_vm_page_unwire
;
7640 goto commit_next_page
;
7642 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
7645 page_list
[entry
].phys_addr
= 0;
7649 * make sure to clear the hardware
7650 * modify or reference bits before
7651 * releasing the BUSY bit on this page
7652 * otherwise we risk losing a legitimate
7655 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
7656 m
->vmp_dirty
= FALSE
;
7658 clear_refmod
|= VM_MEM_MODIFIED
;
7660 if (m
->vmp_laundry
) {
7661 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
7664 if (VM_PAGE_WIRED(m
)) {
7665 m
->vmp_free_when_done
= FALSE
;
7668 if (!(flags
& UPL_COMMIT_CS_VALIDATED
) &&
7669 m
->vmp_cs_validated
&&
7670 m
->vmp_cs_tainted
!= VMP_CS_ALL_TRUE
) {
7673 * This page is no longer dirty
7674 * but could have been modified,
7675 * so it will need to be
7678 m
->vmp_cs_validated
= VMP_CS_ALL_FALSE
;
7680 VM_PAGEOUT_DEBUG(vm_cs_validated_resets
, 1);
7682 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
7684 if (m
->vmp_overwriting
) {
7686 * the (COPY_OUT_FROM == FALSE) request_page_list case
7689 #if CONFIG_PHANTOM_CACHE
7690 if (m
->vmp_absent
&& !m_object
->internal
) {
7691 dwp
->dw_mask
|= DW_vm_phantom_cache_update
;
7694 m
->vmp_absent
= FALSE
;
7696 dwp
->dw_mask
|= DW_clear_busy
;
7699 * alternate (COPY_OUT_FROM == FALSE) page_list case
7700 * Occurs when the original page was wired
7701 * at the time of the list request
7703 assert(VM_PAGE_WIRED(m
));
7705 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
7707 m
->vmp_overwriting
= FALSE
;
7709 m
->vmp_cleaning
= FALSE
;
7711 if (m
->vmp_free_when_done
) {
7713 * With the clean queue enabled, UPL_PAGEOUT should
7714 * no longer set the pageout bit. Its pages now go
7715 * to the clean queue.
7717 * We don't use the cleaned Q anymore and so this
7718 * assert isn't correct. The code for the clean Q
7719 * still exists and might be used in the future. If we
7720 * go back to the cleaned Q, we will re-enable this
7723 * assert(!(upl->flags & UPL_PAGEOUT));
7725 assert(!m_object
->internal
);
7727 m
->vmp_free_when_done
= FALSE
;
7729 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
7730 (m
->vmp_pmapped
&& (pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
)) & VM_MEM_MODIFIED
))) {
7732 * page was re-dirtied after we started
7733 * the pageout... reactivate it since
7734 * we don't know whether the on-disk
7735 * copy matches what is now in memory
7737 SET_PAGE_DIRTY(m
, FALSE
);
7739 dwp
->dw_mask
|= DW_vm_page_activate
| DW_PAGE_WAKEUP
;
7741 if (upl
->flags
& UPL_PAGEOUT
) {
7742 VM_STAT_INCR(reactivations
);
7743 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
7747 * page has been successfully cleaned
7748 * go ahead and free it for other use
7750 if (m_object
->internal
) {
7751 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
7753 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
7755 m
->vmp_dirty
= FALSE
;
7758 dwp
->dw_mask
|= DW_vm_page_free
;
7760 goto commit_next_page
;
7763 * It is a part of the semantic of COPYOUT_FROM
7764 * UPLs that a commit implies cache sync
7765 * between the vm page and the backing store
7766 * this can be used to strip the precious bit
7769 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
)) {
7770 m
->vmp_precious
= FALSE
;
7773 if (flags
& UPL_COMMIT_SET_DIRTY
) {
7774 SET_PAGE_DIRTY(m
, FALSE
);
7776 m
->vmp_dirty
= FALSE
;
7779 /* with the clean queue on, move *all* cleaned pages to the clean queue */
7780 if (hibernate_cleaning_in_progress
== FALSE
&& !m
->vmp_dirty
&& (upl
->flags
& UPL_PAGEOUT
)) {
7783 VM_STAT_INCR(pageouts
);
7784 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
7786 dwp
->dw_mask
|= DW_enqueue_cleaned
;
7787 } else if (should_be_throttled
== TRUE
&& (m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
)) {
7789 * page coming back in from being 'frozen'...
7790 * it was dirty before it was frozen, so keep it so
7791 * the vm_page_activate will notice that it really belongs
7792 * on the throttle queue and put it there
7794 SET_PAGE_DIRTY(m
, FALSE
);
7795 dwp
->dw_mask
|= DW_vm_page_activate
;
7797 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->vmp_clustered
&& (m
->vmp_q_state
!= VM_PAGE_ON_SPECULATIVE_Q
)) {
7798 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7799 clear_refmod
|= VM_MEM_REFERENCED
;
7800 } else if (!VM_PAGE_PAGEABLE(m
)) {
7801 if (m
->vmp_clustered
|| (flags
& UPL_COMMIT_SPECULATE
)) {
7802 dwp
->dw_mask
|= DW_vm_page_speculate
;
7803 } else if (m
->vmp_reference
) {
7804 dwp
->dw_mask
|= DW_vm_page_activate
;
7806 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7807 clear_refmod
|= VM_MEM_REFERENCED
;
7811 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7813 * We blocked access to the pages in this URL.
7814 * Clear the "busy" bit on this page before we
7815 * wake up any waiter.
7817 dwp
->dw_mask
|= DW_clear_busy
;
7820 * Wakeup any thread waiting for the page to be un-cleaning.
7822 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
7826 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m
), clear_refmod
);
7829 target_offset
+= PAGE_SIZE_64
;
7830 xfer_size
-= PAGE_SIZE
;
7834 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
7835 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
7837 if (dw_count
>= dw_limit
) {
7838 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
7844 if (dwp
->dw_mask
& DW_clear_busy
) {
7845 m
->vmp_busy
= FALSE
;
7848 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
7855 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
7860 if (fast_path_possible
) {
7861 assert(shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
);
7862 assert(shadow_object
->purgable
!= VM_PURGABLE_EMPTY
);
7864 if (local_queue_count
|| unwired_count
) {
7865 if (local_queue_count
) {
7866 vm_page_t first_target
;
7867 vm_page_queue_head_t
*target_queue
;
7869 if (throttle_page
) {
7870 target_queue
= &vm_page_queue_throttled
;
7872 if (flags
& UPL_COMMIT_INACTIVATE
) {
7873 if (shadow_object
->internal
) {
7874 target_queue
= &vm_page_queue_anonymous
;
7876 target_queue
= &vm_page_queue_inactive
;
7879 target_queue
= &vm_page_queue_active
;
7883 * Transfer the entire local queue to a regular LRU page queues.
7885 vm_page_lockspin_queues();
7887 first_target
= (vm_page_t
) vm_page_queue_first(target_queue
);
7889 if (vm_page_queue_empty(target_queue
)) {
7890 target_queue
->prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7892 first_target
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local
);
7895 target_queue
->next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local
);
7896 first_local
->vmp_pageq
.prev
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(target_queue
);
7897 last_local
->vmp_pageq
.next
= VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_target
);
7900 * Adjust the global page counts.
7902 if (throttle_page
) {
7903 vm_page_throttled_count
+= local_queue_count
;
7905 if (flags
& UPL_COMMIT_INACTIVATE
) {
7906 if (shadow_object
->internal
) {
7907 vm_page_anonymous_count
+= local_queue_count
;
7909 vm_page_inactive_count
+= local_queue_count
;
7911 token_new_pagecount
+= local_queue_count
;
7913 vm_page_active_count
+= local_queue_count
;
7916 if (shadow_object
->internal
) {
7917 vm_page_pageable_internal_count
+= local_queue_count
;
7919 vm_page_pageable_external_count
+= local_queue_count
;
7923 vm_page_lockspin_queues();
7925 if (unwired_count
) {
7926 vm_page_wire_count
-= unwired_count
;
7927 VM_CHECK_MEMORYSTATUS
;
7929 vm_page_unlock_queues();
7931 VM_OBJECT_WIRED_PAGE_COUNT(shadow_object
, -unwired_count
);
7936 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7938 } else if (upl
->flags
& UPL_LITE
) {
7944 if (!fast_path_full_commit
) {
7945 pg_num
= upl_adjusted_size(upl
, PAGE_MASK
) / PAGE_SIZE
;
7946 pg_num
= (pg_num
+ 31) >> 5;
7948 for (i
= 0; i
< pg_num
; i
++) {
7949 if (lite_list
[i
] != 0) {
7956 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
7960 if (occupied
== 0) {
7962 * If this UPL element belongs to a Vector UPL and is
7963 * empty, then this is the right function to deallocate
7964 * it. So go ahead set the *empty variable. The flag
7965 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7966 * should be considered relevant for the Vector UPL and not
7967 * the internal UPLs.
7969 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
7973 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7975 * this is not a paging object
7976 * so we need to drop the paging reference
7977 * that was taken when we created the UPL
7978 * against this object
7980 vm_object_activity_end(shadow_object
);
7981 vm_object_collapse(shadow_object
, 0, TRUE
);
7984 * we dontated the paging reference to
7985 * the map object... vm_pageout_object_terminate
7986 * will drop this reference
7990 VM_OBJECT_WIRED_PAGE_UPDATE_END(shadow_object
, shadow_object
->wire_tag
);
7991 vm_object_unlock(shadow_object
);
7992 if (object
!= shadow_object
) {
7993 vm_object_unlock(object
);
8000 * If we completed our operations on an UPL that is
8001 * part of a Vectored UPL and if empty is TRUE, then
8002 * we should go ahead and deallocate this UPL element.
8003 * Then we check if this was the last of the UPL elements
8004 * within that Vectored UPL. If so, set empty to TRUE
8005 * so that in ubc_upl_commit_range or ubc_upl_commit, we
8006 * can go ahead and deallocate the Vector UPL too.
8008 if (*empty
== TRUE
) {
8009 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
8010 upl_deallocate(upl
);
8012 goto process_upl_to_commit
;
8014 if (pgpgout_count
) {
8015 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
8020 if (dwp_start
&& dwp_finish_ctx
) {
8021 vm_page_delayed_work_finish_ctx(dwp_start
);
8022 dwp_start
= dwp
= NULL
;
8031 upl_offset_t offset
,
8036 upl_page_info_t
*user_page_list
= NULL
;
8037 upl_size_t xfer_size
, subupl_size
;
8038 vm_object_t shadow_object
;
8040 vm_object_offset_t target_offset
;
8041 upl_offset_t subupl_offset
= offset
;
8043 wpl_array_t lite_list
;
8045 struct vm_page_delayed_work dw_array
;
8046 struct vm_page_delayed_work
*dwp
, *dwp_start
;
8047 bool dwp_finish_ctx
= TRUE
;
8050 int isVectorUPL
= 0;
8051 upl_t vector_upl
= NULL
;
8052 vm_object_offset_t obj_start
, obj_end
, obj_offset
;
8053 kern_return_t kr
= KERN_SUCCESS
;
8055 // DEBUG4K_UPL("upl %p (u_offset 0x%llx u_size 0x%llx) object %p offset 0x%llx size 0x%llx error 0x%x\n", upl, (uint64_t)upl->u_offset, (uint64_t)upl->u_size, upl->map_object, (uint64_t)offset, (uint64_t)size, error);
8057 dwp_start
= dwp
= NULL
;
8062 if (upl
== UPL_NULL
) {
8063 return KERN_INVALID_ARGUMENT
;
8066 if ((upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
)) {
8067 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
8071 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
8072 dwp_start
= vm_page_delayed_work_get_ctx();
8073 if (dwp_start
== NULL
) {
8074 dwp_start
= &dw_array
;
8076 dwp_finish_ctx
= FALSE
;
8081 if ((isVectorUPL
= vector_upl_is_valid(upl
))) {
8083 upl_lock(vector_upl
);
8088 process_upl_to_abort
:
8091 offset
= subupl_offset
;
8093 upl_unlock(vector_upl
);
8097 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
8099 upl_unlock(vector_upl
);
8103 subupl_size
-= size
;
8104 subupl_offset
+= size
;
8110 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
8111 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
8113 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
8114 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
8115 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
8117 upl
->upl_commit_index
++;
8120 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
8122 } else if ((offset
+ size
) <= upl_adjusted_size(upl
, PAGE_MASK
)) {
8128 upl_unlock(vector_upl
);
8130 DEBUG4K_ERROR("upl %p (u_offset 0x%llx u_size 0x%x) offset 0x%x size 0x%x\n", upl
, upl
->u_offset
, upl
->u_size
, offset
, size
);
8134 if (upl
->flags
& UPL_INTERNAL
) {
8135 lite_list
= (wpl_array_t
)
8136 ((((uintptr_t)upl
) + sizeof(struct upl
))
8137 + ((upl_adjusted_size(upl
, PAGE_MASK
) / PAGE_SIZE
) * sizeof(upl_page_info_t
)));
8139 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
8141 lite_list
= (wpl_array_t
)
8142 (((uintptr_t)upl
) + sizeof(struct upl
));
8144 object
= upl
->map_object
;
8146 if (upl
->flags
& UPL_SHADOWED
) {
8147 vm_object_lock(object
);
8148 shadow_object
= object
->shadow
;
8150 shadow_object
= object
;
8153 entry
= offset
/ PAGE_SIZE
;
8154 target_offset
= (vm_object_offset_t
)offset
;
8156 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
8157 vm_object_lock_shared(shadow_object
);
8159 vm_object_lock(shadow_object
);
8162 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
8163 assert(shadow_object
->blocked_access
);
8164 shadow_object
->blocked_access
= FALSE
;
8165 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
8168 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
)) {
8169 panic("upl_abort_range: kernel_object being DUMPED");
8172 obj_start
= target_offset
+ upl
->u_offset
- shadow_object
->paging_offset
;
8173 obj_end
= obj_start
+ xfer_size
;
8174 obj_start
= vm_object_trunc_page(obj_start
);
8175 obj_end
= vm_object_round_page(obj_end
);
8176 for (obj_offset
= obj_start
;
8177 obj_offset
< obj_end
;
8178 obj_offset
+= PAGE_SIZE
) {
8180 unsigned int pg_num
;
8183 pg_num
= (unsigned int) (target_offset
/ PAGE_SIZE
);
8184 assert(pg_num
== target_offset
/ PAGE_SIZE
);
8188 if (user_page_list
) {
8189 needed
= user_page_list
[pg_num
].needed
;
8195 if (upl
->flags
& UPL_LITE
) {
8196 if (lite_list
[pg_num
>> 5] & (1U << (pg_num
& 31))) {
8197 lite_list
[pg_num
>> 5] &= ~(1U << (pg_num
& 31));
8199 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
8200 m
= vm_page_lookup(shadow_object
, obj_offset
);
8204 if (upl
->flags
& UPL_SHADOWED
) {
8205 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
8206 t
->vmp_free_when_done
= FALSE
;
8210 if (m
== VM_PAGE_NULL
) {
8211 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
8215 if ((upl
->flags
& UPL_KERNEL_OBJECT
)) {
8216 goto abort_next_page
;
8219 if (m
!= VM_PAGE_NULL
) {
8220 assert(m
->vmp_q_state
!= VM_PAGE_USED_BY_COMPRESSOR
);
8222 if (m
->vmp_absent
) {
8223 boolean_t must_free
= TRUE
;
8226 * COPYOUT = FALSE case
8227 * check for error conditions which must
8228 * be passed back to the pages customer
8230 if (error
& UPL_ABORT_RESTART
) {
8231 m
->vmp_restart
= TRUE
;
8232 m
->vmp_absent
= FALSE
;
8233 m
->vmp_unusual
= TRUE
;
8235 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
8236 m
->vmp_restart
= FALSE
;
8237 m
->vmp_unusual
= TRUE
;
8239 } else if (error
& UPL_ABORT_ERROR
) {
8240 m
->vmp_restart
= FALSE
;
8241 m
->vmp_absent
= FALSE
;
8242 m
->vmp_error
= TRUE
;
8243 m
->vmp_unusual
= TRUE
;
8246 if (m
->vmp_clustered
&& needed
== FALSE
) {
8248 * This page was a part of a speculative
8249 * read-ahead initiated by the kernel
8250 * itself. No one is expecting this
8251 * page and no one will clean up its
8252 * error state if it ever becomes valid
8254 * We have to free it here.
8258 m
->vmp_cleaning
= FALSE
;
8260 if (m
->vmp_overwriting
&& !m
->vmp_busy
) {
8262 * this shouldn't happen since
8263 * this is an 'absent' page, but
8264 * it doesn't hurt to check for
8265 * the 'alternate' method of
8266 * stabilizing the page...
8267 * we will mark 'busy' to be cleared
8268 * in the following code which will
8269 * take care of the primary stabilzation
8270 * method (i.e. setting 'busy' to TRUE)
8272 dwp
->dw_mask
|= DW_vm_page_unwire
;
8274 m
->vmp_overwriting
= FALSE
;
8276 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
8278 if (must_free
== TRUE
) {
8279 dwp
->dw_mask
|= DW_vm_page_free
;
8281 dwp
->dw_mask
|= DW_vm_page_activate
;
8285 * Handle the trusted pager throttle.
8287 if (m
->vmp_laundry
) {
8288 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
8291 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
8293 * We blocked access to the pages in this UPL.
8294 * Clear the "busy" bit and wake up any waiter
8297 dwp
->dw_mask
|= DW_clear_busy
;
8299 if (m
->vmp_overwriting
) {
8301 dwp
->dw_mask
|= DW_clear_busy
;
8304 * deal with the 'alternate' method
8305 * of stabilizing the page...
8306 * we will either free the page
8307 * or mark 'busy' to be cleared
8308 * in the following code which will
8309 * take care of the primary stabilzation
8310 * method (i.e. setting 'busy' to TRUE)
8312 dwp
->dw_mask
|= DW_vm_page_unwire
;
8314 m
->vmp_overwriting
= FALSE
;
8316 m
->vmp_free_when_done
= FALSE
;
8317 m
->vmp_cleaning
= FALSE
;
8319 if (error
& UPL_ABORT_DUMP_PAGES
) {
8320 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m
));
8322 dwp
->dw_mask
|= DW_vm_page_free
;
8324 if (!(dwp
->dw_mask
& DW_vm_page_unwire
)) {
8325 if (error
& UPL_ABORT_REFERENCE
) {
8327 * we've been told to explictly
8328 * reference this page... for
8329 * file I/O, this is done by
8330 * implementing an LRU on the inactive q
8332 dwp
->dw_mask
|= DW_vm_page_lru
;
8333 } else if (!VM_PAGE_PAGEABLE(m
)) {
8334 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
8337 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
8342 target_offset
+= PAGE_SIZE_64
;
8343 xfer_size
-= PAGE_SIZE
;
8347 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
8348 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
8350 if (dw_count
>= dw_limit
) {
8351 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
8357 if (dwp
->dw_mask
& DW_clear_busy
) {
8358 m
->vmp_busy
= FALSE
;
8361 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
) {
8368 vm_page_do_delayed_work(shadow_object
, VM_KERN_MEMORY_NONE
, dwp_start
, dw_count
);
8375 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
8377 } else if (upl
->flags
& UPL_LITE
) {
8381 pg_num
= upl_adjusted_size(upl
, PAGE_MASK
) / PAGE_SIZE
;
8382 pg_num
= (pg_num
+ 31) >> 5;
8385 for (i
= 0; i
< pg_num
; i
++) {
8386 if (lite_list
[i
] != 0) {
8392 if (vm_page_queue_empty(&upl
->map_object
->memq
)) {
8396 if (occupied
== 0) {
8398 * If this UPL element belongs to a Vector UPL and is
8399 * empty, then this is the right function to deallocate
8400 * it. So go ahead set the *empty variable. The flag
8401 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
8402 * should be considered relevant for the Vector UPL and
8403 * not the internal UPLs.
8405 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
) {
8409 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
8411 * this is not a paging object
8412 * so we need to drop the paging reference
8413 * that was taken when we created the UPL
8414 * against this object
8416 vm_object_activity_end(shadow_object
);
8417 vm_object_collapse(shadow_object
, 0, TRUE
);
8420 * we dontated the paging reference to
8421 * the map object... vm_pageout_object_terminate
8422 * will drop this reference
8426 vm_object_unlock(shadow_object
);
8427 if (object
!= shadow_object
) {
8428 vm_object_unlock(object
);
8435 * If we completed our operations on an UPL that is
8436 * part of a Vectored UPL and if empty is TRUE, then
8437 * we should go ahead and deallocate this UPL element.
8438 * Then we check if this was the last of the UPL elements
8439 * within that Vectored UPL. If so, set empty to TRUE
8440 * so that in ubc_upl_abort_range or ubc_upl_abort, we
8441 * can go ahead and deallocate the Vector UPL too.
8443 if (*empty
== TRUE
) {
8444 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
8445 upl_deallocate(upl
);
8447 goto process_upl_to_abort
;
8453 if (dwp_start
&& dwp_finish_ctx
) {
8454 vm_page_delayed_work_finish_ctx(dwp_start
);
8455 dwp_start
= dwp
= NULL
;
8469 if (upl
== UPL_NULL
) {
8470 return KERN_INVALID_ARGUMENT
;
8473 return upl_abort_range(upl
, 0, upl
->u_size
, error
, &empty
);
8477 /* an option on commit should be wire */
8481 upl_page_info_t
*page_list
,
8482 mach_msg_type_number_t count
)
8486 if (upl
== UPL_NULL
) {
8487 return KERN_INVALID_ARGUMENT
;
8490 return upl_commit_range(upl
, 0, upl
->u_size
, 0,
8491 page_list
, count
, &empty
);
8502 vm_page_t m
, nxt_page
= VM_PAGE_NULL
;
8504 int wired_count
= 0;
8507 panic("iopl_valid_data: NULL upl");
8509 if (vector_upl_is_valid(upl
)) {
8510 panic("iopl_valid_data: vector upl");
8512 if ((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_SHADOWED
| UPL_ACCESS_BLOCKED
| UPL_IO_WIRE
| UPL_INTERNAL
)) != UPL_IO_WIRE
) {
8513 panic("iopl_valid_data: unsupported upl, flags = %x", upl
->flags
);
8516 object
= upl
->map_object
;
8518 if (object
== kernel_object
|| object
== compressor_object
) {
8519 panic("iopl_valid_data: object == kernel or compressor");
8522 if (object
->purgable
== VM_PURGABLE_VOLATILE
||
8523 object
->purgable
== VM_PURGABLE_EMPTY
) {
8524 panic("iopl_valid_data: object %p purgable %d",
8525 object
, object
->purgable
);
8528 size
= upl_adjusted_size(upl
, PAGE_MASK
);
8530 vm_object_lock(object
);
8531 VM_OBJECT_WIRED_PAGE_UPDATE_START(object
);
8533 if (object
->vo_size
== size
&& object
->resident_page_count
== (size
/ PAGE_SIZE
)) {
8534 nxt_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8536 offset
= (vm_offset_t
)(upl_adjusted_offset(upl
, PAGE_MASK
) - object
->paging_offset
);
8540 if (nxt_page
!= VM_PAGE_NULL
) {
8542 nxt_page
= (vm_page_t
)vm_page_queue_next(&nxt_page
->vmp_listq
);
8544 m
= vm_page_lookup(object
, offset
);
8545 offset
+= PAGE_SIZE
;
8547 if (m
== VM_PAGE_NULL
) {
8548 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset
);
8552 if (!m
->vmp_absent
) {
8553 panic("iopl_valid_data: busy page w/o absent");
8556 if (m
->vmp_pageq
.next
|| m
->vmp_pageq
.prev
) {
8557 panic("iopl_valid_data: busy+absent page on page queue");
8559 if (m
->vmp_reusable
) {
8560 panic("iopl_valid_data: %p is reusable", m
);
8563 m
->vmp_absent
= FALSE
;
8564 m
->vmp_dirty
= TRUE
;
8565 assert(m
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8566 assert(m
->vmp_wire_count
== 0);
8567 m
->vmp_wire_count
++;
8568 assert(m
->vmp_wire_count
);
8569 if (m
->vmp_wire_count
== 1) {
8570 m
->vmp_q_state
= VM_PAGE_IS_WIRED
;
8573 panic("iopl_valid_data: %p already wired\n", m
);
8576 PAGE_WAKEUP_DONE(m
);
8581 VM_OBJECT_WIRED_PAGE_COUNT(object
, wired_count
);
8582 assert(object
->resident_page_count
>= object
->wired_page_count
);
8584 /* no need to adjust purgeable accounting for this object: */
8585 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8586 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8588 vm_page_lockspin_queues();
8589 vm_page_wire_count
+= wired_count
;
8590 vm_page_unlock_queues();
8592 VM_OBJECT_WIRED_PAGE_UPDATE_END(object
, tag
);
8593 vm_object_unlock(object
);
8598 vm_object_set_pmap_cache_attr(
8600 upl_page_info_array_t user_page_list
,
8601 unsigned int num_pages
,
8602 boolean_t batch_pmap_op
)
8604 unsigned int cache_attr
= 0;
8606 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
8607 assert(user_page_list
);
8608 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
8609 PMAP_BATCH_SET_CACHE_ATTR(object
, user_page_list
, cache_attr
, num_pages
, batch_pmap_op
);
8614 boolean_t
vm_object_iopl_wire_full(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
);
8615 kern_return_t
vm_object_iopl_wire_empty(vm_object_t
, upl_t
, upl_page_info_array_t
, wpl_array_t
, upl_control_flags_t
, vm_tag_t
, vm_object_offset_t
*, int, int*);
8620 vm_object_iopl_wire_full(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
8621 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
)
8626 int delayed_unlock
= 0;
8627 boolean_t retval
= TRUE
;
8630 vm_object_lock_assert_exclusive(object
);
8631 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8632 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8633 assert(object
->pager
== NULL
);
8634 assert(object
->copy
== NULL
);
8635 assert(object
->shadow
== NULL
);
8637 page_count
= object
->resident_page_count
;
8638 dst_page
= (vm_page_t
)vm_page_queue_first(&object
->memq
);
8640 vm_page_lock_queues();
8642 while (page_count
--) {
8643 if (dst_page
->vmp_busy
||
8644 dst_page
->vmp_fictitious
||
8645 dst_page
->vmp_absent
||
8646 dst_page
->vmp_error
||
8647 dst_page
->vmp_cleaning
||
8648 dst_page
->vmp_restart
||
8649 dst_page
->vmp_laundry
) {
8653 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
8657 dst_page
->vmp_reference
= TRUE
;
8659 vm_page_wire(dst_page
, tag
, FALSE
);
8661 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8662 SET_PAGE_DIRTY(dst_page
, FALSE
);
8664 entry
= (unsigned int)(dst_page
->vmp_offset
/ PAGE_SIZE
);
8665 assert(entry
>= 0 && entry
< object
->resident_page_count
);
8666 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
8668 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8670 if (phys_page
> upl
->highest_page
) {
8671 upl
->highest_page
= phys_page
;
8674 if (user_page_list
) {
8675 user_page_list
[entry
].phys_addr
= phys_page
;
8676 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8677 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8678 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
8679 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
8680 user_page_list
[entry
].device
= FALSE
;
8681 user_page_list
[entry
].speculative
= FALSE
;
8682 user_page_list
[entry
].cs_validated
= FALSE
;
8683 user_page_list
[entry
].cs_tainted
= FALSE
;
8684 user_page_list
[entry
].cs_nx
= FALSE
;
8685 user_page_list
[entry
].needed
= FALSE
;
8686 user_page_list
[entry
].mark
= FALSE
;
8688 if (delayed_unlock
++ > 256) {
8690 lck_mtx_yield(&vm_page_queue_lock
);
8692 VM_CHECK_MEMORYSTATUS
;
8694 dst_page
= (vm_page_t
)vm_page_queue_next(&dst_page
->vmp_listq
);
8697 vm_page_unlock_queues();
8699 VM_CHECK_MEMORYSTATUS
;
8706 vm_object_iopl_wire_empty(vm_object_t object
, upl_t upl
, upl_page_info_array_t user_page_list
,
8707 wpl_array_t lite_list
, upl_control_flags_t cntrl_flags
, vm_tag_t tag
, vm_object_offset_t
*dst_offset
,
8708 int page_count
, int* page_grab_count
)
8711 boolean_t no_zero_fill
= FALSE
;
8713 int pages_wired
= 0;
8714 int pages_inserted
= 0;
8716 uint64_t delayed_ledger_update
= 0;
8717 kern_return_t ret
= KERN_SUCCESS
;
8721 vm_object_lock_assert_exclusive(object
);
8722 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
8723 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
8724 assert(object
->pager
== NULL
);
8725 assert(object
->copy
== NULL
);
8726 assert(object
->shadow
== NULL
);
8728 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
8729 interruptible
= THREAD_ABORTSAFE
;
8731 interruptible
= THREAD_UNINT
;
8734 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
8735 no_zero_fill
= TRUE
;
8739 #if CONFIG_SECLUDED_MEMORY
8740 if (object
->can_grab_secluded
) {
8741 grab_options
|= VM_PAGE_GRAB_SECLUDED
;
8743 #endif /* CONFIG_SECLUDED_MEMORY */
8745 while (page_count
--) {
8746 while ((dst_page
= vm_page_grab_options(grab_options
))
8748 OSAddAtomic(page_count
, &vm_upl_wait_for_pages
);
8750 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
8752 if (vm_page_wait(interruptible
) == FALSE
) {
8756 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8758 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
8760 ret
= MACH_SEND_INTERRUPTED
;
8763 OSAddAtomic(-page_count
, &vm_upl_wait_for_pages
);
8765 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
8767 if (no_zero_fill
== FALSE
) {
8768 vm_page_zero_fill(dst_page
);
8770 dst_page
->vmp_absent
= TRUE
;
8773 dst_page
->vmp_reference
= TRUE
;
8775 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
8776 SET_PAGE_DIRTY(dst_page
, FALSE
);
8778 if (dst_page
->vmp_absent
== FALSE
) {
8779 assert(dst_page
->vmp_q_state
== VM_PAGE_NOT_ON_Q
);
8780 assert(dst_page
->vmp_wire_count
== 0);
8781 dst_page
->vmp_wire_count
++;
8782 dst_page
->vmp_q_state
= VM_PAGE_IS_WIRED
;
8783 assert(dst_page
->vmp_wire_count
);
8785 PAGE_WAKEUP_DONE(dst_page
);
8789 vm_page_insert_internal(dst_page
, object
, *dst_offset
, tag
, FALSE
, TRUE
, TRUE
, TRUE
, &delayed_ledger_update
);
8791 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
8793 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
8795 if (phys_page
> upl
->highest_page
) {
8796 upl
->highest_page
= phys_page
;
8799 if (user_page_list
) {
8800 user_page_list
[entry
].phys_addr
= phys_page
;
8801 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
8802 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
8803 user_page_list
[entry
].free_when_done
= FALSE
;
8804 user_page_list
[entry
].precious
= FALSE
;
8805 user_page_list
[entry
].device
= FALSE
;
8806 user_page_list
[entry
].speculative
= FALSE
;
8807 user_page_list
[entry
].cs_validated
= FALSE
;
8808 user_page_list
[entry
].cs_tainted
= FALSE
;
8809 user_page_list
[entry
].cs_nx
= FALSE
;
8810 user_page_list
[entry
].needed
= FALSE
;
8811 user_page_list
[entry
].mark
= FALSE
;
8814 *dst_offset
+= PAGE_SIZE_64
;
8818 vm_page_lockspin_queues();
8819 vm_page_wire_count
+= pages_wired
;
8820 vm_page_unlock_queues();
8822 if (pages_inserted
) {
8823 if (object
->internal
) {
8824 OSAddAtomic(pages_inserted
, &vm_page_internal_count
);
8826 OSAddAtomic(pages_inserted
, &vm_page_external_count
);
8829 if (delayed_ledger_update
) {
8831 int ledger_idx_volatile
;
8832 int ledger_idx_nonvolatile
;
8833 int ledger_idx_volatile_compressed
;
8834 int ledger_idx_nonvolatile_compressed
;
8835 boolean_t do_footprint
;
8837 owner
= VM_OBJECT_OWNER(object
);
8840 vm_object_ledger_tag_ledgers(object
,
8841 &ledger_idx_volatile
,
8842 &ledger_idx_nonvolatile
,
8843 &ledger_idx_volatile_compressed
,
8844 &ledger_idx_nonvolatile_compressed
,
8847 /* more non-volatile bytes */
8848 ledger_credit(owner
->ledger
,
8849 ledger_idx_nonvolatile
,
8850 delayed_ledger_update
);
8852 /* more footprint */
8853 ledger_credit(owner
->ledger
,
8854 task_ledgers
.phys_footprint
,
8855 delayed_ledger_update
);
8859 assert(page_grab_count
);
8860 *page_grab_count
= pages_inserted
;
8868 vm_object_iopl_request(
8870 vm_object_offset_t offset
,
8873 upl_page_info_array_t user_page_list
,
8874 unsigned int *page_list_count
,
8875 upl_control_flags_t cntrl_flags
,
8879 vm_object_offset_t dst_offset
;
8880 upl_size_t xfer_size
;
8883 wpl_array_t lite_list
= NULL
;
8884 int no_zero_fill
= FALSE
;
8885 unsigned int size_in_pages
;
8886 int page_grab_count
= 0;
8890 struct vm_object_fault_info fault_info
= {};
8891 struct vm_page_delayed_work dw_array
;
8892 struct vm_page_delayed_work
*dwp
, *dwp_start
;
8893 bool dwp_finish_ctx
= TRUE
;
8897 boolean_t caller_lookup
;
8898 int io_tracking_flag
= 0;
8902 boolean_t set_cache_attr_needed
= FALSE
;
8903 boolean_t free_wired_pages
= FALSE
;
8904 boolean_t fast_path_empty_req
= FALSE
;
8905 boolean_t fast_path_full_req
= FALSE
;
8907 #if DEVELOPMENT || DEBUG
8908 task_t task
= current_task();
8909 #endif /* DEVELOPMENT || DEBUG */
8911 dwp_start
= dwp
= NULL
;
8913 vm_object_offset_t original_offset
= offset
;
8914 upl_size_t original_size
= size
;
8916 // DEBUG4K_UPL("object %p offset 0x%llx size 0x%llx cntrl_flags 0x%llx\n", object, (uint64_t)offset, (uint64_t)size, cntrl_flags);
8918 size
= (upl_size_t
)(vm_object_round_page(offset
+ size
) - vm_object_trunc_page(offset
));
8919 offset
= vm_object_trunc_page(offset
);
8920 if (size
!= original_size
|| offset
!= original_offset
) {
8921 DEBUG4K_IOKIT("flags 0x%llx object %p offset 0x%llx size 0x%x -> offset 0x%llx size 0x%x\n", cntrl_flags
, object
, original_offset
, original_size
, offset
, size
);
8924 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
8926 * For forward compatibility's sake,
8927 * reject any unknown flag.
8929 return KERN_INVALID_VALUE
;
8931 if (vm_lopage_needed
== FALSE
) {
8932 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
8935 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
8936 if ((cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) {
8937 return KERN_INVALID_VALUE
;
8940 if (object
->phys_contiguous
) {
8941 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8942 return KERN_INVALID_ADDRESS
;
8945 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
) {
8946 return KERN_INVALID_ADDRESS
;
8950 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
)) {
8951 no_zero_fill
= TRUE
;
8954 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
8955 prot
= VM_PROT_READ
;
8957 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
8960 if ((!object
->internal
) && (object
->paging_offset
!= 0)) {
8961 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
8965 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_START
, size
, cntrl_flags
, prot
, 0);
8967 #if CONFIG_IOSCHED || UPL_DEBUG
8968 if ((object
->io_tracking
&& object
!= kernel_object
) || upl_debug_enabled
) {
8969 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
8974 if (object
->io_tracking
) {
8975 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
8976 if (object
!= kernel_object
) {
8977 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
8982 if (object
->phys_contiguous
) {
8988 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
8989 dwp_start
= vm_page_delayed_work_get_ctx();
8990 if (dwp_start
== NULL
) {
8991 dwp_start
= &dw_array
;
8993 dwp_finish_ctx
= FALSE
;
8999 if (cntrl_flags
& UPL_SET_INTERNAL
) {
9000 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
9002 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
9003 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
9004 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
9006 user_page_list
= NULL
;
9010 upl
= upl_create(UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
9012 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
9017 if (user_page_list
) {
9018 user_page_list
[0].device
= FALSE
;
9022 if (cntrl_flags
& UPL_NOZEROFILLIO
) {
9023 DTRACE_VM4(upl_nozerofillio
,
9024 vm_object_t
, object
,
9025 vm_object_offset_t
, offset
,
9030 upl
->map_object
= object
;
9031 upl
->u_offset
= original_offset
;
9032 upl
->u_size
= original_size
;
9034 size_in_pages
= size
/ PAGE_SIZE
;
9036 if (object
== kernel_object
&&
9037 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
9038 upl
->flags
|= UPL_KERNEL_OBJECT
;
9040 vm_object_lock(object
);
9042 vm_object_lock_shared(object
);
9045 vm_object_lock(object
);
9046 vm_object_activity_begin(object
);
9049 * paging in progress also protects the paging_offset
9051 upl
->u_offset
= original_offset
+ object
->paging_offset
;
9053 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
9055 * The user requested that access to the pages in this UPL
9056 * be blocked until the UPL is commited or aborted.
9058 upl
->flags
|= UPL_ACCESS_BLOCKED
;
9061 #if CONFIG_IOSCHED || UPL_DEBUG
9062 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) {
9063 vm_object_activity_begin(object
);
9064 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
9068 if (object
->phys_contiguous
) {
9069 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
9070 assert(!object
->blocked_access
);
9071 object
->blocked_access
= TRUE
;
9074 vm_object_unlock(object
);
9077 * don't need any shadow mappings for this one
9078 * since it is already I/O memory
9080 upl
->flags
|= UPL_DEVICE_MEMORY
;
9082 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1) >> PAGE_SHIFT
);
9084 if (user_page_list
) {
9085 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
) >> PAGE_SHIFT
);
9086 user_page_list
[0].device
= TRUE
;
9088 if (page_list_count
!= NULL
) {
9089 if (upl
->flags
& UPL_INTERNAL
) {
9090 *page_list_count
= 0;
9092 *page_list_count
= 1;
9096 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
9097 #if DEVELOPMENT || DEBUG
9099 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
9101 #endif /* DEVELOPMENT || DEBUG */
9102 return KERN_SUCCESS
;
9104 if (object
!= kernel_object
&& object
!= compressor_object
) {
9106 * Protect user space from future COW operations
9108 #if VM_OBJECT_TRACKING_OP_TRUESHARE
9109 if (!object
->true_share
&&
9110 vm_object_tracking_inited
) {
9111 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
9114 num
= OSBacktrace(bt
,
9115 VM_OBJECT_TRACKING_BTDEPTH
);
9116 btlog_add_entry(vm_object_tracking_btlog
,
9118 VM_OBJECT_TRACKING_OP_TRUESHARE
,
9122 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
9124 vm_object_lock_assert_exclusive(object
);
9125 object
->true_share
= TRUE
;
9127 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
) {
9128 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
9132 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
9133 object
->copy
!= VM_OBJECT_NULL
) {
9135 * Honor copy-on-write obligations
9137 * The caller is gathering these pages and
9138 * might modify their contents. We need to
9139 * make sure that the copy object has its own
9140 * private copies of these pages before we let
9141 * the caller modify them.
9143 * NOTE: someone else could map the original object
9144 * after we've done this copy-on-write here, and they
9145 * could then see an inconsistent picture of the memory
9146 * while it's being modified via the UPL. To prevent this,
9147 * we would have to block access to these pages until the
9148 * UPL is released. We could use the UPL_BLOCK_ACCESS
9149 * code path for that...
9151 vm_object_update(object
,
9156 FALSE
, /* should_return */
9157 MEMORY_OBJECT_COPY_SYNC
,
9159 VM_PAGEOUT_DEBUG(iopl_cow
, 1);
9160 VM_PAGEOUT_DEBUG(iopl_cow_pages
, (size
>> PAGE_SHIFT
));
9162 if (!(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
)) &&
9163 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
9164 object
->purgable
!= VM_PURGABLE_EMPTY
&&
9165 object
->copy
== NULL
&&
9166 size
== object
->vo_size
&&
9168 object
->shadow
== NULL
&&
9169 object
->pager
== NULL
) {
9170 if (object
->resident_page_count
== size_in_pages
) {
9171 assert(object
!= compressor_object
);
9172 assert(object
!= kernel_object
);
9173 fast_path_full_req
= TRUE
;
9174 } else if (object
->resident_page_count
== 0) {
9175 assert(object
!= compressor_object
);
9176 assert(object
!= kernel_object
);
9177 fast_path_empty_req
= TRUE
;
9178 set_cache_attr_needed
= TRUE
;
9182 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
) {
9183 interruptible
= THREAD_ABORTSAFE
;
9185 interruptible
= THREAD_UNINT
;
9191 dst_offset
= offset
;
9193 if (fast_path_full_req
) {
9194 if (vm_object_iopl_wire_full(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
) == TRUE
) {
9198 * we couldn't complete the processing of this request on the fast path
9199 * so fall through to the slow path and finish up
9201 } else if (fast_path_empty_req
) {
9202 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
9203 ret
= KERN_MEMORY_ERROR
;
9206 ret
= vm_object_iopl_wire_empty(object
, upl
, user_page_list
, lite_list
, cntrl_flags
, tag
, &dst_offset
, size_in_pages
, &page_grab_count
);
9209 free_wired_pages
= TRUE
;
9215 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
9216 fault_info
.lo_offset
= offset
;
9217 fault_info
.hi_offset
= offset
+ xfer_size
;
9218 fault_info
.mark_zf_absent
= TRUE
;
9219 fault_info
.interruptible
= interruptible
;
9220 fault_info
.batch_pmap_op
= TRUE
;
9223 vm_fault_return_t result
;
9227 if (fast_path_full_req
) {
9229 * if we get here, it means that we ran into a page
9230 * state we couldn't handle in the fast path and
9231 * bailed out to the slow path... since the order
9232 * we look at pages is different between the 2 paths,
9233 * the following check is needed to determine whether
9234 * this page was already processed in the fast path
9236 if (lite_list
[entry
>> 5] & (1 << (entry
& 31))) {
9240 dst_page
= vm_page_lookup(object
, dst_offset
);
9242 if (dst_page
== VM_PAGE_NULL
||
9243 dst_page
->vmp_busy
||
9244 dst_page
->vmp_error
||
9245 dst_page
->vmp_restart
||
9246 dst_page
->vmp_absent
||
9247 dst_page
->vmp_fictitious
) {
9248 if (object
== kernel_object
) {
9249 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
9251 if (object
== compressor_object
) {
9252 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
9255 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
9256 ret
= KERN_MEMORY_ERROR
;
9259 set_cache_attr_needed
= TRUE
;
9262 * We just looked up the page and the result remains valid
9263 * until the object lock is release, so send it to
9264 * vm_fault_page() (as "dst_page"), to avoid having to
9265 * look it up again there.
9267 caller_lookup
= TRUE
;
9271 kern_return_t error_code
;
9273 fault_info
.cluster_size
= xfer_size
;
9275 vm_object_paging_begin(object
);
9277 result
= vm_fault_page(object
, dst_offset
,
9278 prot
| VM_PROT_WRITE
, FALSE
,
9280 &prot
, &dst_page
, &top_page
,
9282 &error_code
, no_zero_fill
,
9283 FALSE
, &fault_info
);
9285 /* our lookup is no longer valid at this point */
9286 caller_lookup
= FALSE
;
9289 case VM_FAULT_SUCCESS
:
9292 if (!dst_page
->vmp_absent
) {
9293 PAGE_WAKEUP_DONE(dst_page
);
9296 * we only get back an absent page if we
9297 * requested that it not be zero-filled
9298 * because we are about to fill it via I/O
9300 * absent pages should be left BUSY
9301 * to prevent them from being faulted
9302 * into an address space before we've
9303 * had a chance to complete the I/O on
9304 * them since they may contain info that
9305 * shouldn't be seen by the faulting task
9309 * Release paging references and
9310 * top-level placeholder page, if any.
9312 if (top_page
!= VM_PAGE_NULL
) {
9313 vm_object_t local_object
;
9315 local_object
= VM_PAGE_OBJECT(top_page
);
9318 * comparing 2 packed pointers
9320 if (top_page
->vmp_object
!= dst_page
->vmp_object
) {
9321 vm_object_lock(local_object
);
9322 VM_PAGE_FREE(top_page
);
9323 vm_object_paging_end(local_object
);
9324 vm_object_unlock(local_object
);
9326 VM_PAGE_FREE(top_page
);
9327 vm_object_paging_end(local_object
);
9330 vm_object_paging_end(object
);
9333 case VM_FAULT_RETRY
:
9334 vm_object_lock(object
);
9337 case VM_FAULT_MEMORY_SHORTAGE
:
9338 OSAddAtomic((size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9340 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
9342 if (vm_page_wait(interruptible
)) {
9343 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9345 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
9346 vm_object_lock(object
);
9350 OSAddAtomic(-(size_in_pages
- entry
), &vm_upl_wait_for_pages
);
9352 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
9356 case VM_FAULT_INTERRUPTED
:
9357 error_code
= MACH_SEND_INTERRUPTED
;
9359 case VM_FAULT_MEMORY_ERROR
:
9361 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
9363 vm_object_lock(object
);
9366 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
9367 /* success but no page: fail */
9368 vm_object_paging_end(object
);
9369 vm_object_unlock(object
);
9373 panic("vm_object_iopl_request: unexpected error"
9374 " 0x%x from vm_fault_page()\n", result
);
9376 } while (result
!= VM_FAULT_SUCCESS
);
9378 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
9380 if (upl
->flags
& UPL_KERNEL_OBJECT
) {
9381 goto record_phys_addr
;
9384 if (dst_page
->vmp_q_state
== VM_PAGE_USED_BY_COMPRESSOR
) {
9385 dst_page
->vmp_busy
= TRUE
;
9386 goto record_phys_addr
;
9389 if (dst_page
->vmp_cleaning
) {
9391 * Someone else is cleaning this page in place.
9392 * In theory, we should be able to proceed and use this
9393 * page but they'll probably end up clearing the "busy"
9394 * bit on it in upl_commit_range() but they didn't set
9395 * it, so they would clear our "busy" bit and open
9396 * us to race conditions.
9397 * We'd better wait for the cleaning to complete and
9400 VM_PAGEOUT_DEBUG(vm_object_iopl_request_sleep_for_cleaning
, 1);
9401 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
9404 if (dst_page
->vmp_laundry
) {
9405 vm_pageout_steal_laundry(dst_page
, FALSE
);
9408 if ((cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
9409 phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
)) {
9414 * support devices that can't DMA above 32 bits
9415 * by substituting pages from a pool of low address
9416 * memory for any pages we find above the 4G mark
9417 * can't substitute if the page is already wired because
9418 * we don't know whether that physical address has been
9419 * handed out to some other 64 bit capable DMA device to use
9421 if (VM_PAGE_WIRED(dst_page
)) {
9422 ret
= KERN_PROTECTION_FAILURE
;
9425 low_page
= vm_page_grablo();
9427 if (low_page
== VM_PAGE_NULL
) {
9428 ret
= KERN_RESOURCE_SHORTAGE
;
9432 * from here until the vm_page_replace completes
9433 * we musn't drop the object lock... we don't
9434 * want anyone refaulting this page in and using
9435 * it after we disconnect it... we want the fault
9436 * to find the new page being substituted.
9438 if (dst_page
->vmp_pmapped
) {
9439 refmod
= pmap_disconnect(phys_page
);
9444 if (!dst_page
->vmp_absent
) {
9445 vm_page_copy(dst_page
, low_page
);
9448 low_page
->vmp_reference
= dst_page
->vmp_reference
;
9449 low_page
->vmp_dirty
= dst_page
->vmp_dirty
;
9450 low_page
->vmp_absent
= dst_page
->vmp_absent
;
9452 if (refmod
& VM_MEM_REFERENCED
) {
9453 low_page
->vmp_reference
= TRUE
;
9455 if (refmod
& VM_MEM_MODIFIED
) {
9456 SET_PAGE_DIRTY(low_page
, FALSE
);
9459 vm_page_replace(low_page
, object
, dst_offset
);
9461 dst_page
= low_page
;
9463 * vm_page_grablo returned the page marked
9464 * BUSY... we don't need a PAGE_WAKEUP_DONE
9465 * here, because we've never dropped the object lock
9467 if (!dst_page
->vmp_absent
) {
9468 dst_page
->vmp_busy
= FALSE
;
9471 phys_page
= VM_PAGE_GET_PHYS_PAGE(dst_page
);
9473 if (!dst_page
->vmp_busy
) {
9474 dwp
->dw_mask
|= DW_vm_page_wire
;
9477 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
9479 * Mark the page "busy" to block any future page fault
9480 * on this page in addition to wiring it.
9481 * We'll also remove the mapping
9482 * of all these pages before leaving this routine.
9484 assert(!dst_page
->vmp_fictitious
);
9485 dst_page
->vmp_busy
= TRUE
;
9488 * expect the page to be used
9489 * page queues lock must be held to set 'reference'
9491 dwp
->dw_mask
|= DW_set_reference
;
9493 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
9494 SET_PAGE_DIRTY(dst_page
, TRUE
);
9496 * Page belonging to a code-signed object is about to
9497 * be written. Mark it tainted and disconnect it from
9498 * all pmaps so processes have to fault it back in and
9499 * deal with the tainted bit.
9501 if (object
->code_signed
&& dst_page
->vmp_cs_tainted
!= VMP_CS_ALL_TRUE
) {
9502 dst_page
->vmp_cs_tainted
= VMP_CS_ALL_TRUE
;
9503 vm_page_iopl_tainted
++;
9504 if (dst_page
->vmp_pmapped
) {
9505 int refmod
= pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page
));
9506 if (refmod
& VM_MEM_REFERENCED
) {
9507 dst_page
->vmp_reference
= TRUE
;
9512 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->vmp_written_by_kernel
== TRUE
) {
9513 pmap_sync_page_attributes_phys(phys_page
);
9514 dst_page
->vmp_written_by_kernel
= FALSE
;
9518 if (dst_page
->vmp_busy
) {
9519 upl
->flags
|= UPL_HAS_BUSY
;
9522 lite_list
[entry
>> 5] |= 1U << (entry
& 31);
9524 if (phys_page
> upl
->highest_page
) {
9525 upl
->highest_page
= phys_page
;
9528 if (user_page_list
) {
9529 user_page_list
[entry
].phys_addr
= phys_page
;
9530 user_page_list
[entry
].free_when_done
= dst_page
->vmp_free_when_done
;
9531 user_page_list
[entry
].absent
= dst_page
->vmp_absent
;
9532 user_page_list
[entry
].dirty
= dst_page
->vmp_dirty
;
9533 user_page_list
[entry
].precious
= dst_page
->vmp_precious
;
9534 user_page_list
[entry
].device
= FALSE
;
9535 user_page_list
[entry
].needed
= FALSE
;
9536 if (dst_page
->vmp_clustered
== TRUE
) {
9537 user_page_list
[entry
].speculative
= (dst_page
->vmp_q_state
== VM_PAGE_ON_SPECULATIVE_Q
) ? TRUE
: FALSE
;
9539 user_page_list
[entry
].speculative
= FALSE
;
9541 user_page_list
[entry
].cs_validated
= dst_page
->vmp_cs_validated
;
9542 user_page_list
[entry
].cs_tainted
= dst_page
->vmp_cs_tainted
;
9543 user_page_list
[entry
].cs_nx
= dst_page
->vmp_cs_nx
;
9544 user_page_list
[entry
].mark
= FALSE
;
9546 if (object
!= kernel_object
&& object
!= compressor_object
) {
9548 * someone is explicitly grabbing this page...
9549 * update clustered and speculative state
9552 if (dst_page
->vmp_clustered
) {
9553 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
9558 dst_offset
+= PAGE_SIZE_64
;
9559 xfer_size
-= PAGE_SIZE
;
9562 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
9564 if (dw_count
>= dw_limit
) {
9565 vm_page_do_delayed_work(object
, tag
, dwp_start
, dw_count
);
9572 assert(entry
== size_in_pages
);
9575 vm_page_do_delayed_work(object
, tag
, dwp_start
, dw_count
);
9580 if (user_page_list
&& set_cache_attr_needed
== TRUE
) {
9581 vm_object_set_pmap_cache_attr(object
, user_page_list
, size_in_pages
, TRUE
);
9584 if (page_list_count
!= NULL
) {
9585 if (upl
->flags
& UPL_INTERNAL
) {
9586 *page_list_count
= 0;
9587 } else if (*page_list_count
> size_in_pages
) {
9588 *page_list_count
= size_in_pages
;
9591 vm_object_unlock(object
);
9593 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
9595 * We've marked all the pages "busy" so that future
9596 * page faults will block.
9597 * Now remove the mapping for these pages, so that they
9598 * can't be accessed without causing a page fault.
9600 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
9604 assert(!object
->blocked_access
);
9605 object
->blocked_access
= TRUE
;
9608 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, KERN_SUCCESS
, 0, 0);
9609 #if DEVELOPMENT || DEBUG
9611 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
9613 #endif /* DEVELOPMENT || DEBUG */
9615 if (dwp_start
&& dwp_finish_ctx
) {
9616 vm_page_delayed_work_finish_ctx(dwp_start
);
9617 dwp_start
= dwp
= NULL
;
9620 return KERN_SUCCESS
;
9625 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
9626 boolean_t need_unwire
;
9628 dst_page
= vm_page_lookup(object
, offset
);
9630 if (dst_page
== VM_PAGE_NULL
) {
9631 panic("vm_object_iopl_request: Wired page missing. \n");
9635 * if we've already processed this page in an earlier
9636 * dw_do_work, we need to undo the wiring... we will
9637 * leave the dirty and reference bits on if they
9638 * were set, since we don't have a good way of knowing
9639 * what the previous state was and we won't get here
9640 * under any normal circumstances... we will always
9641 * clear BUSY and wakeup any waiters via vm_page_free
9642 * or PAGE_WAKEUP_DONE
9647 if ((dwp_start
)[dw_index
].dw_m
== dst_page
) {
9649 * still in the deferred work list
9650 * which means we haven't yet called
9651 * vm_page_wire on this page
9653 need_unwire
= FALSE
;
9659 vm_page_lock_queues();
9661 if (dst_page
->vmp_absent
|| free_wired_pages
== TRUE
) {
9662 vm_page_free(dst_page
);
9664 need_unwire
= FALSE
;
9666 if (need_unwire
== TRUE
) {
9667 vm_page_unwire(dst_page
, TRUE
);
9670 PAGE_WAKEUP_DONE(dst_page
);
9672 vm_page_unlock_queues();
9674 if (need_unwire
== TRUE
) {
9675 VM_STAT_INCR(reactivations
);
9681 if (!(upl
->flags
& UPL_KERNEL_OBJECT
)) {
9682 vm_object_activity_end(object
);
9683 vm_object_collapse(object
, 0, TRUE
);
9685 vm_object_unlock(object
);
9688 VM_DEBUG_CONSTANT_EVENT(vm_object_iopl_request
, VM_IOPL_REQUEST
, DBG_FUNC_END
, page_grab_count
, ret
, 0, 0);
9689 #if DEVELOPMENT || DEBUG
9691 ledger_credit(task
->ledger
, task_ledgers
.pages_grabbed_iopl
, page_grab_count
);
9693 #endif /* DEVELOPMENT || DEBUG */
9695 if (dwp_start
&& dwp_finish_ctx
) {
9696 vm_page_delayed_work_finish_ctx(dwp_start
);
9697 dwp_start
= dwp
= NULL
;
9707 kern_return_t retval
;
9708 boolean_t upls_locked
;
9709 vm_object_t object1
, object2
;
9711 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
) == UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
9712 return KERN_INVALID_ARGUMENT
;
9715 upls_locked
= FALSE
;
9718 * Since we need to lock both UPLs at the same time,
9719 * avoid deadlocks by always taking locks in the same order.
9728 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
9730 object1
= upl1
->map_object
;
9731 object2
= upl2
->map_object
;
9733 if (upl1
->u_offset
!= 0 || upl2
->u_offset
!= 0 ||
9734 upl1
->u_size
!= upl2
->u_size
) {
9736 * We deal only with full objects, not subsets.
9737 * That's because we exchange the entire backing store info
9738 * for the objects: pager, resident pages, etc... We can't do
9741 retval
= KERN_INVALID_VALUE
;
9746 * Tranpose the VM objects' backing store.
9748 retval
= vm_object_transpose(object1
, object2
,
9749 upl_adjusted_size(upl1
, PAGE_MASK
));
9751 if (retval
== KERN_SUCCESS
) {
9753 * Make each UPL point to the correct VM object, i.e. the
9754 * object holding the pages that the UPL refers to...
9756 #if CONFIG_IOSCHED || UPL_DEBUG
9757 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
9758 vm_object_lock(object1
);
9759 vm_object_lock(object2
);
9761 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) {
9762 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
9764 if ((upl2
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) {
9765 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
9768 upl1
->map_object
= object2
;
9769 upl2
->map_object
= object1
;
9771 #if CONFIG_IOSCHED || UPL_DEBUG
9772 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) {
9773 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
9775 if ((upl2
->flags
& UPL_TRACKED_BY_OBJECT
) || upl_debug_enabled
) {
9776 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
9778 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
9779 vm_object_unlock(object2
);
9780 vm_object_unlock(object1
);
9792 upls_locked
= FALSE
;
9804 upl_page_info_t
*user_page_list
;
9807 if (!(upl
->flags
& UPL_INTERNAL
) || count
<= 0) {
9811 size_in_pages
= upl_adjusted_size(upl
, PAGE_MASK
) / PAGE_SIZE
;
9813 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
9815 while (count
-- && index
< size_in_pages
) {
9816 user_page_list
[index
++].needed
= TRUE
;
9822 * Reserve of virtual addresses in the kernel address space.
9823 * We need to map the physical pages in the kernel, so that we
9824 * can call the code-signing or slide routines with a kernel
9825 * virtual address. We keep this pool of pre-allocated kernel
9826 * virtual addresses so that we don't have to scan the kernel's
9827 * virtaul address space each time we need to work with
9830 SIMPLE_LOCK_DECLARE(vm_paging_lock
, 0);
9831 #define VM_PAGING_NUM_PAGES 64
9832 vm_map_offset_t vm_paging_base_address
= 0;
9833 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
9834 int vm_paging_max_index
= 0;
9835 int vm_paging_page_waiter
= 0;
9836 int vm_paging_page_waiter_total
= 0;
9838 unsigned long vm_paging_no_kernel_page
= 0;
9839 unsigned long vm_paging_objects_mapped
= 0;
9840 unsigned long vm_paging_pages_mapped
= 0;
9841 unsigned long vm_paging_objects_mapped_slow
= 0;
9842 unsigned long vm_paging_pages_mapped_slow
= 0;
9846 vm_paging_map_init(void)
9849 vm_map_offset_t page_map_offset
;
9850 vm_map_entry_t map_entry
;
9852 assert(vm_paging_base_address
== 0);
9855 * Initialize our pool of pre-allocated kernel
9856 * virtual addresses.
9858 page_map_offset
= 0;
9859 kr
= vm_map_find_space(kernel_map
,
9861 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
9864 VM_MAP_KERNEL_FLAGS_NONE
,
9865 VM_KERN_MEMORY_NONE
,
9867 if (kr
!= KERN_SUCCESS
) {
9868 panic("vm_paging_map_init: kernel_map full\n");
9870 VME_OBJECT_SET(map_entry
, kernel_object
);
9871 VME_OFFSET_SET(map_entry
, page_map_offset
);
9872 map_entry
->protection
= VM_PROT_NONE
;
9873 map_entry
->max_protection
= VM_PROT_NONE
;
9874 map_entry
->permanent
= TRUE
;
9875 vm_object_reference(kernel_object
);
9876 vm_map_unlock(kernel_map
);
9878 assert(vm_paging_base_address
== 0);
9879 vm_paging_base_address
= page_map_offset
;
9883 * vm_paging_map_object:
9884 * Maps part of a VM object's pages in the kernel
9885 * virtual address space, using the pre-allocated
9886 * kernel virtual addresses, if possible.
9888 * The VM object is locked. This lock will get
9889 * dropped and re-acquired though, so the caller
9890 * must make sure the VM object is kept alive
9891 * (by holding a VM map that has a reference
9892 * on it, for example, or taking an extra reference).
9893 * The page should also be kept busy to prevent
9894 * it from being reclaimed.
9897 vm_paging_map_object(
9900 vm_object_offset_t offset
,
9901 vm_prot_t protection
,
9902 boolean_t can_unlock_object
,
9903 vm_map_size_t
*size
, /* IN/OUT */
9904 vm_map_offset_t
*address
, /* OUT */
9905 boolean_t
*need_unmap
) /* OUT */
9908 vm_map_offset_t page_map_offset
;
9909 vm_map_size_t map_size
;
9910 vm_object_offset_t object_offset
;
9913 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
9914 /* use permanent 1-to-1 kernel mapping of physical memory ? */
9915 *address
= (vm_map_offset_t
)
9916 phystokv((pmap_paddr_t
)VM_PAGE_GET_PHYS_PAGE(page
) << PAGE_SHIFT
);
9917 *need_unmap
= FALSE
;
9918 return KERN_SUCCESS
;
9920 assert(page
->vmp_busy
);
9922 * Use one of the pre-allocated kernel virtual addresses
9923 * and just enter the VM page in the kernel address space
9924 * at that virtual address.
9926 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9929 * Try and find an available kernel virtual address
9930 * from our pre-allocated pool.
9932 page_map_offset
= 0;
9934 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
9935 if (vm_paging_page_inuse
[i
] == FALSE
) {
9937 vm_paging_base_address
+
9942 if (page_map_offset
!= 0) {
9943 /* found a space to map our page ! */
9947 if (can_unlock_object
) {
9949 * If we can afford to unlock the VM object,
9950 * let's take the slow path now...
9955 * We can't afford to unlock the VM object, so
9956 * let's wait for a space to become available...
9958 vm_paging_page_waiter_total
++;
9959 vm_paging_page_waiter
++;
9960 kr
= assert_wait((event_t
)&vm_paging_page_waiter
, THREAD_UNINT
);
9961 if (kr
== THREAD_WAITING
) {
9962 simple_unlock(&vm_paging_lock
);
9963 kr
= thread_block(THREAD_CONTINUE_NULL
);
9964 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
9966 vm_paging_page_waiter
--;
9967 /* ... and try again */
9970 if (page_map_offset
!= 0) {
9972 * We found a kernel virtual address;
9973 * map the physical page to that virtual address.
9975 if (i
> vm_paging_max_index
) {
9976 vm_paging_max_index
= i
;
9978 vm_paging_page_inuse
[i
] = TRUE
;
9979 simple_unlock(&vm_paging_lock
);
9981 page
->vmp_pmapped
= TRUE
;
9984 * Keep the VM object locked over the PMAP_ENTER
9985 * and the actual use of the page by the kernel,
9986 * or this pmap mapping might get undone by a
9987 * vm_object_pmap_protect() call...
9989 PMAP_ENTER(kernel_pmap
,
9997 assert(kr
== KERN_SUCCESS
);
9998 vm_paging_objects_mapped
++;
9999 vm_paging_pages_mapped
++;
10000 *address
= page_map_offset
;
10001 *need_unmap
= TRUE
;
10004 kasan_notify_address(page_map_offset
, PAGE_SIZE
);
10007 /* all done and mapped, ready to use ! */
10008 return KERN_SUCCESS
;
10012 * We ran out of pre-allocated kernel virtual
10013 * addresses. Just map the page in the kernel
10014 * the slow and regular way.
10016 vm_paging_no_kernel_page
++;
10017 simple_unlock(&vm_paging_lock
);
10020 if (!can_unlock_object
) {
10023 *need_unmap
= FALSE
;
10024 return KERN_NOT_SUPPORTED
;
10027 object_offset
= vm_object_trunc_page(offset
);
10028 map_size
= vm_map_round_page(*size
,
10029 VM_MAP_PAGE_MASK(kernel_map
));
10032 * Try and map the required range of the object
10033 * in the kernel_map
10036 vm_object_reference_locked(object
); /* for the map entry */
10037 vm_object_unlock(object
);
10039 kr
= vm_map_enter(kernel_map
,
10044 VM_MAP_KERNEL_FLAGS_NONE
,
10045 VM_KERN_MEMORY_NONE
,
10052 if (kr
!= KERN_SUCCESS
) {
10055 *need_unmap
= FALSE
;
10056 vm_object_deallocate(object
); /* for the map entry */
10057 vm_object_lock(object
);
10064 * Enter the mapped pages in the page table now.
10066 vm_object_lock(object
);
10068 * VM object must be kept locked from before PMAP_ENTER()
10069 * until after the kernel is done accessing the page(s).
10070 * Otherwise, the pmap mappings in the kernel could be
10071 * undone by a call to vm_object_pmap_protect().
10074 for (page_map_offset
= 0;
10076 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
10077 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
10078 if (page
== VM_PAGE_NULL
) {
10079 printf("vm_paging_map_object: no page !?");
10080 vm_object_unlock(object
);
10081 kr
= vm_map_remove(kernel_map
, *address
, *size
,
10082 VM_MAP_REMOVE_NO_FLAGS
);
10083 assert(kr
== KERN_SUCCESS
);
10086 *need_unmap
= FALSE
;
10087 vm_object_lock(object
);
10088 return KERN_MEMORY_ERROR
;
10090 page
->vmp_pmapped
= TRUE
;
10092 //assert(pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(page)));
10093 PMAP_ENTER(kernel_pmap
,
10094 *address
+ page_map_offset
,
10101 assert(kr
== KERN_SUCCESS
);
10103 kasan_notify_address(*address
+ page_map_offset
, PAGE_SIZE
);
10107 vm_paging_objects_mapped_slow
++;
10108 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
10110 *need_unmap
= TRUE
;
10112 return KERN_SUCCESS
;
10116 * vm_paging_unmap_object:
10117 * Unmaps part of a VM object's pages from the kernel
10118 * virtual address space.
10120 * The VM object is locked. This lock will get
10121 * dropped and re-acquired though.
10124 vm_paging_unmap_object(
10125 vm_object_t object
,
10126 vm_map_offset_t start
,
10127 vm_map_offset_t end
)
10132 if ((vm_paging_base_address
== 0) ||
10133 (start
< vm_paging_base_address
) ||
10134 (end
> (vm_paging_base_address
10135 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
10137 * We didn't use our pre-allocated pool of
10138 * kernel virtual address. Deallocate the
10141 if (object
!= VM_OBJECT_NULL
) {
10142 vm_object_unlock(object
);
10144 kr
= vm_map_remove(kernel_map
, start
, end
,
10145 VM_MAP_REMOVE_NO_FLAGS
);
10146 if (object
!= VM_OBJECT_NULL
) {
10147 vm_object_lock(object
);
10149 assert(kr
== KERN_SUCCESS
);
10152 * We used a kernel virtual address from our
10153 * pre-allocated pool. Put it back in the pool
10156 assert(end
- start
== PAGE_SIZE
);
10157 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
10158 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
10160 /* undo the pmap mapping */
10161 pmap_remove(kernel_pmap
, start
, end
);
10163 simple_lock(&vm_paging_lock
, &vm_pageout_lck_grp
);
10164 vm_paging_page_inuse
[i
] = FALSE
;
10165 if (vm_paging_page_waiter
) {
10166 thread_wakeup(&vm_paging_page_waiter
);
10168 simple_unlock(&vm_paging_lock
);
10174 * page->vmp_object must be locked
10177 vm_pageout_steal_laundry(vm_page_t page
, boolean_t queues_locked
)
10179 if (!queues_locked
) {
10180 vm_page_lockspin_queues();
10183 page
->vmp_free_when_done
= FALSE
;
10185 * need to drop the laundry count...
10186 * we may also need to remove it
10187 * from the I/O paging queue...
10188 * vm_pageout_throttle_up handles both cases
10190 * the laundry and pageout_queue flags are cleared...
10192 vm_pageout_throttle_up(page
);
10194 if (!queues_locked
) {
10195 vm_page_unlock_queues();
10200 vector_upl_create(vm_offset_t upl_offset
)
10202 int vector_upl_size
= sizeof(struct _vector_upl
);
10205 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
10207 upl
= upl_create(0, UPL_VECTOR
, 0);
10208 upl
->vector_upl
= vector_upl
;
10209 upl
->u_offset
= upl_offset
;
10210 vector_upl
->size
= 0;
10211 vector_upl
->offset
= upl_offset
;
10212 vector_upl
->invalid_upls
= 0;
10213 vector_upl
->num_upls
= 0;
10214 vector_upl
->pagelist
= NULL
;
10216 for (i
= 0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
10217 vector_upl
->upl_iostates
[i
].size
= 0;
10218 vector_upl
->upl_iostates
[i
].offset
= 0;
10224 vector_upl_deallocate(upl_t upl
)
10227 vector_upl_t vector_upl
= upl
->vector_upl
;
10229 if (vector_upl
->invalid_upls
!= vector_upl
->num_upls
) {
10230 panic("Deallocating non-empty Vectored UPL\n");
10232 kfree(vector_upl
->pagelist
, (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
10233 vector_upl
->invalid_upls
= 0;
10234 vector_upl
->num_upls
= 0;
10235 vector_upl
->pagelist
= NULL
;
10236 vector_upl
->size
= 0;
10237 vector_upl
->offset
= 0;
10238 kfree(vector_upl
, sizeof(struct _vector_upl
));
10239 vector_upl
= (vector_upl_t
)0xfeedfeed;
10241 panic("vector_upl_deallocate was passed a non-vectored upl\n");
10244 panic("vector_upl_deallocate was passed a NULL upl\n");
10249 vector_upl_is_valid(upl_t upl
)
10251 if (upl
&& ((upl
->flags
& UPL_VECTOR
) == UPL_VECTOR
)) {
10252 vector_upl_t vector_upl
= upl
->vector_upl
;
10253 if (vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xfeedfeed || vector_upl
== (vector_upl_t
)0xfeedbeef) {
10263 vector_upl_set_subupl(upl_t upl
, upl_t subupl
, uint32_t io_size
)
10265 if (vector_upl_is_valid(upl
)) {
10266 vector_upl_t vector_upl
= upl
->vector_upl
;
10271 if (io_size
< PAGE_SIZE
) {
10272 io_size
= PAGE_SIZE
;
10274 subupl
->vector_upl
= (void*)vector_upl
;
10275 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
10276 vector_upl
->size
+= io_size
;
10277 upl
->u_size
+= io_size
;
10279 uint32_t i
= 0, invalid_upls
= 0;
10280 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10281 if (vector_upl
->upl_elems
[i
] == subupl
) {
10285 if (i
== vector_upl
->num_upls
) {
10286 panic("Trying to remove sub-upl when none exists");
10289 vector_upl
->upl_elems
[i
] = NULL
;
10290 invalid_upls
= os_atomic_inc(&(vector_upl
)->invalid_upls
,
10292 if (invalid_upls
== vector_upl
->num_upls
) {
10299 panic("vector_upl_set_subupl was passed a NULL upl element\n");
10302 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
10305 panic("vector_upl_set_subupl was passed a NULL upl\n");
10312 vector_upl_set_pagelist(upl_t upl
)
10314 if (vector_upl_is_valid(upl
)) {
10316 vector_upl_t vector_upl
= upl
->vector_upl
;
10319 vm_offset_t pagelist_size
= 0, cur_upl_pagelist_size
= 0;
10321 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
));
10323 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10324 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * upl_adjusted_size(vector_upl
->upl_elems
[i
], PAGE_MASK
) / PAGE_SIZE
;
10325 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
10326 pagelist_size
+= cur_upl_pagelist_size
;
10327 if (vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
) {
10328 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
10331 assert( pagelist_size
== (sizeof(struct upl_page_info
) * (vector_upl
->size
/ PAGE_SIZE
)));
10333 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
10336 panic("vector_upl_set_pagelist was passed a NULL upl\n");
10341 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
10343 if (vector_upl_is_valid(upl
)) {
10344 vector_upl_t vector_upl
= upl
->vector_upl
;
10346 if (index
< vector_upl
->num_upls
) {
10347 return vector_upl
->upl_elems
[index
];
10350 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
10357 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
10359 if (vector_upl_is_valid(upl
)) {
10361 vector_upl_t vector_upl
= upl
->vector_upl
;
10364 upl_t subupl
= NULL
;
10365 vector_upl_iostates_t subupl_state
;
10367 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10368 subupl
= vector_upl
->upl_elems
[i
];
10369 subupl_state
= vector_upl
->upl_iostates
[i
];
10370 if (*upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
10371 /* We could have been passed an offset/size pair that belongs
10372 * to an UPL element that has already been committed/aborted.
10373 * If so, return NULL.
10375 if (subupl
== NULL
) {
10378 if ((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
10379 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
10380 if (*upl_size
> subupl_state
.size
) {
10381 *upl_size
= subupl_state
.size
;
10384 if (*upl_offset
>= subupl_state
.offset
) {
10385 *upl_offset
-= subupl_state
.offset
;
10387 panic("Vector UPL offset miscalculation\n");
10393 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
10400 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
10402 *v_upl_submap
= NULL
;
10404 if (vector_upl_is_valid(upl
)) {
10405 vector_upl_t vector_upl
= upl
->vector_upl
;
10407 *v_upl_submap
= vector_upl
->submap
;
10408 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
10410 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10413 panic("vector_upl_get_submap was passed a null UPL\n");
10418 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
10420 if (vector_upl_is_valid(upl
)) {
10421 vector_upl_t vector_upl
= upl
->vector_upl
;
10423 vector_upl
->submap
= submap
;
10424 vector_upl
->submap_dst_addr
= submap_dst_addr
;
10426 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
10429 panic("vector_upl_get_submap was passed a NULL UPL\n");
10434 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
10436 if (vector_upl_is_valid(upl
)) {
10438 vector_upl_t vector_upl
= upl
->vector_upl
;
10441 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10442 if (vector_upl
->upl_elems
[i
] == subupl
) {
10447 if (i
== vector_upl
->num_upls
) {
10448 panic("setting sub-upl iostate when none exists");
10451 vector_upl
->upl_iostates
[i
].offset
= offset
;
10452 if (size
< PAGE_SIZE
) {
10455 vector_upl
->upl_iostates
[i
].size
= size
;
10457 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
10460 panic("vector_upl_set_iostate was passed a NULL UPL\n");
10465 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
10467 if (vector_upl_is_valid(upl
)) {
10469 vector_upl_t vector_upl
= upl
->vector_upl
;
10472 for (i
= 0; i
< vector_upl
->num_upls
; i
++) {
10473 if (vector_upl
->upl_elems
[i
] == subupl
) {
10478 if (i
== vector_upl
->num_upls
) {
10479 panic("getting sub-upl iostate when none exists");
10482 *offset
= vector_upl
->upl_iostates
[i
].offset
;
10483 *size
= vector_upl
->upl_iostates
[i
].size
;
10485 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
10488 panic("vector_upl_get_iostate was passed a NULL UPL\n");
10493 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
10495 if (vector_upl_is_valid(upl
)) {
10496 vector_upl_t vector_upl
= upl
->vector_upl
;
10498 if (index
< vector_upl
->num_upls
) {
10499 *offset
= vector_upl
->upl_iostates
[index
].offset
;
10500 *size
= vector_upl
->upl_iostates
[index
].size
;
10502 *offset
= *size
= 0;
10505 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
10508 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
10513 upl_get_internal_vectorupl_pagelist(upl_t upl
)
10515 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
10519 upl_get_internal_vectorupl(upl_t upl
)
10521 return upl
->vector_upl
;
10525 upl_get_internal_pagelist_offset(void)
10527 return sizeof(struct upl
);
10536 upl
->flags
|= UPL_CLEAR_DIRTY
;
10538 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
10543 upl_set_referenced(
10549 upl
->ext_ref_count
++;
10551 if (!upl
->ext_ref_count
) {
10552 panic("upl_set_referenced not %p\n", upl
);
10554 upl
->ext_ref_count
--;
10563 vm_offset_t upl_offset
,
10568 if ((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0) {
10572 assert(upl
->upl_reprio_info
!= 0);
10573 for (i
= (int)(upl_offset
/ PAGE_SIZE
), j
= 0; j
< io_size
; i
++, j
+= PAGE_SIZE
) {
10574 UPL_SET_REPRIO_INFO(upl
, i
, blkno
, io_size
);
10580 memoryshot(unsigned int event
, unsigned int control
)
10582 if (vm_debug_events
) {
10583 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE
, event
)) | control
,
10584 vm_page_active_count
, vm_page_inactive_count
,
10585 vm_page_free_count
, vm_page_speculative_count
,
10586 vm_page_throttled_count
);
10596 upl_device_page(upl_page_info_t
*upl
)
10598 return UPL_DEVICE_PAGE(upl
);
10601 upl_page_present(upl_page_info_t
*upl
, int index
)
10603 return UPL_PAGE_PRESENT(upl
, index
);
10606 upl_speculative_page(upl_page_info_t
*upl
, int index
)
10608 return UPL_SPECULATIVE_PAGE(upl
, index
);
10611 upl_dirty_page(upl_page_info_t
*upl
, int index
)
10613 return UPL_DIRTY_PAGE(upl
, index
);
10616 upl_valid_page(upl_page_info_t
*upl
, int index
)
10618 return UPL_VALID_PAGE(upl
, index
);
10621 upl_phys_page(upl_page_info_t
*upl
, int index
)
10623 return UPL_PHYS_PAGE(upl
, index
);
10627 upl_page_set_mark(upl_page_info_t
*upl
, int index
, boolean_t v
)
10629 upl
[index
].mark
= v
;
10633 upl_page_get_mark(upl_page_info_t
*upl
, int index
)
10635 return upl
[index
].mark
;
10639 vm_countdirtypages(void)
10651 vm_page_lock_queues();
10652 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_inactive
);
10654 if (m
== (vm_page_t
)0) {
10658 if (m
->vmp_dirty
) {
10661 if (m
->vmp_free_when_done
) {
10664 if (m
->vmp_precious
) {
10668 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10669 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10670 if (m
== (vm_page_t
)0) {
10673 } while (!vm_page_queue_end(&vm_page_queue_inactive
, (vm_page_queue_entry_t
) m
));
10674 vm_page_unlock_queues();
10676 vm_page_lock_queues();
10677 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_throttled
);
10679 if (m
== (vm_page_t
)0) {
10684 assert(m
->vmp_dirty
);
10685 assert(!m
->vmp_free_when_done
);
10686 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10687 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10688 if (m
== (vm_page_t
)0) {
10691 } while (!vm_page_queue_end(&vm_page_queue_throttled
, (vm_page_queue_entry_t
) m
));
10692 vm_page_unlock_queues();
10694 vm_page_lock_queues();
10695 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_anonymous
);
10697 if (m
== (vm_page_t
)0) {
10701 if (m
->vmp_dirty
) {
10704 if (m
->vmp_free_when_done
) {
10707 if (m
->vmp_precious
) {
10711 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10712 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10713 if (m
== (vm_page_t
)0) {
10716 } while (!vm_page_queue_end(&vm_page_queue_anonymous
, (vm_page_queue_entry_t
) m
));
10717 vm_page_unlock_queues();
10719 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
10725 vm_page_lock_queues();
10726 m
= (vm_page_t
) vm_page_queue_first(&vm_page_queue_active
);
10729 if (m
== (vm_page_t
)0) {
10732 if (m
->vmp_dirty
) {
10735 if (m
->vmp_free_when_done
) {
10738 if (m
->vmp_precious
) {
10742 assert(VM_PAGE_OBJECT(m
) != kernel_object
);
10743 m
= (vm_page_t
) vm_page_queue_next(&m
->vmp_pageq
);
10744 if (m
== (vm_page_t
)0) {
10747 } while (!vm_page_queue_end(&vm_page_queue_active
, (vm_page_queue_entry_t
) m
));
10748 vm_page_unlock_queues();
10750 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
10752 #endif /* MACH_BSD */
10757 upl_get_cached_tier(upl_t upl
)
10760 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
10761 return upl
->upl_priority
;
10765 #endif /* CONFIG_IOSCHED */
10769 upl_callout_iodone(upl_t upl
)
10771 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
10774 void (*iodone_func
)(void *, int) = upl_ctx
->io_done
;
10776 assert(upl_ctx
->io_done
);
10778 (*iodone_func
)(upl_ctx
->io_context
, upl_ctx
->io_error
);
10783 upl_set_iodone(upl_t upl
, void *upl_iodone
)
10785 upl
->upl_iodone
= (struct upl_io_completion
*)upl_iodone
;
10789 upl_set_iodone_error(upl_t upl
, int error
)
10791 struct upl_io_completion
*upl_ctx
= upl
->upl_iodone
;
10794 upl_ctx
->io_error
= error
;
10800 upl_get_highest_page(
10803 return upl
->highest_page
;
10810 return upl_adjusted_size(upl
, PAGE_MASK
);
10816 vm_map_offset_t pgmask
)
10818 vm_object_offset_t start_offset
, end_offset
;
10820 start_offset
= trunc_page_mask_64(upl
->u_offset
, pgmask
);
10821 end_offset
= round_page_mask_64(upl
->u_offset
+ upl
->u_size
, pgmask
);
10823 return (upl_size_t
)(end_offset
- start_offset
);
10827 upl_adjusted_offset(
10829 vm_map_offset_t pgmask
)
10831 return trunc_page_mask_64(upl
->u_offset
, pgmask
);
10835 upl_get_data_offset(
10838 return upl
->u_offset
- upl_adjusted_offset(upl
, PAGE_MASK
);
10842 upl_associated_upl(upl_t upl
)
10844 return upl
->associated_upl
;
10848 upl_set_associated_upl(upl_t upl
, upl_t associated_upl
)
10850 upl
->associated_upl
= associated_upl
;
10854 upl_lookup_vnode(upl_t upl
)
10856 if (!upl
->map_object
->internal
) {
10857 return vnode_pager_lookup_vnode(upl
->map_object
->pager
);
10865 upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
10867 upl
->ubc_alias1
= alias1
;
10868 upl
->ubc_alias2
= alias2
;
10869 return KERN_SUCCESS
;
10872 upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
10875 *al
= upl
->ubc_alias1
;
10878 *al2
= upl
->ubc_alias2
;
10880 return KERN_SUCCESS
;
10882 #endif /* UPL_DEBUG */
10884 #if VM_PRESSURE_EVENTS
10886 * Upward trajectory.
10888 extern boolean_t
vm_compressor_low_on_space(void);
10891 VM_PRESSURE_NORMAL_TO_WARNING(void)
10893 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10894 /* Available pages below our threshold */
10895 if (memorystatus_available_pages
< memorystatus_available_pages_pressure
) {
10896 /* No frozen processes to kill */
10897 if (memorystatus_frozen_count
== 0) {
10898 /* Not enough suspended processes available. */
10899 if (memorystatus_suspended_count
< MEMORYSTATUS_SUSPENDED_THRESHOLD
) {
10906 return (AVAILABLE_NON_COMPRESSED_MEMORY
< VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) ? 1 : 0;
10911 VM_PRESSURE_WARNING_TO_CRITICAL(void)
10913 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10914 /* Available pages below our threshold */
10915 if (memorystatus_available_pages
< memorystatus_available_pages_critical
) {
10920 return vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY
< ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10925 * Downward trajectory.
10928 VM_PRESSURE_WARNING_TO_NORMAL(void)
10930 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10931 /* Available pages above our threshold */
10932 unsigned int target_threshold
= (unsigned int) (memorystatus_available_pages_pressure
+ ((15 * memorystatus_available_pages_pressure
) / 100));
10933 if (memorystatus_available_pages
> target_threshold
) {
10938 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) / 10)) ? 1 : 0;
10943 VM_PRESSURE_CRITICAL_TO_WARNING(void)
10945 if (!VM_CONFIG_COMPRESSOR_IS_ACTIVE
) {
10946 /* Available pages above our threshold */
10947 unsigned int target_threshold
= (unsigned int)(memorystatus_available_pages_critical
+ ((15 * memorystatus_available_pages_critical
) / 100));
10948 if (memorystatus_available_pages
> target_threshold
) {
10953 return (AVAILABLE_NON_COMPRESSED_MEMORY
> ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0;
10956 #endif /* VM_PRESSURE_EVENTS */