2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
72 #include <mach/mach_types.h>
73 #include <mach/memory_object.h>
74 #include <mach/memory_object_default.h>
75 #include <mach/memory_object_control_server.h>
76 #include <mach/mach_host_server.h>
78 #include <mach/vm_map.h>
79 #include <mach/vm_param.h>
80 #include <mach/vm_statistics.h>
83 #include <kern/kern_types.h>
84 #include <kern/counters.h>
85 #include <kern/host_statistics.h>
86 #include <kern/machine.h>
87 #include <kern/misc_protos.h>
88 #include <kern/sched.h>
89 #include <kern/thread.h>
91 #include <kern/kalloc.h>
93 #include <machine/vm_tuning.h>
94 #include <machine/commpage.h>
97 #include <vm/vm_compressor_pager.h>
98 #include <vm/vm_fault.h>
99 #include <vm/vm_map.h>
100 #include <vm/vm_object.h>
101 #include <vm/vm_page.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_protos.h> /* must be last */
104 #include <vm/memory_object.h>
105 #include <vm/vm_purgeable_internal.h>
106 #include <vm/vm_shared_region.h>
107 #include <vm/vm_compressor.h>
109 #if CONFIG_PHANTOM_CACHE
110 #include <vm/vm_phantom_cache.h>
115 #include <libkern/crypto/aes.h>
116 extern u_int32_t
random(void); /* from <libkern/libkern.h> */
121 #include <libkern/OSDebug.h>
124 extern void m_drain(void);
126 #if VM_PRESSURE_EVENTS
127 extern unsigned int memorystatus_available_pages
;
128 extern unsigned int memorystatus_available_pages_pressure
;
129 extern unsigned int memorystatus_available_pages_critical
;
130 extern unsigned int memorystatus_frozen_count
;
131 extern unsigned int memorystatus_suspended_count
;
133 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
134 int memorystatus_purge_on_warning
= 2;
135 int memorystatus_purge_on_urgent
= 5;
136 int memorystatus_purge_on_critical
= 8;
138 void vm_pressure_response(void);
139 boolean_t vm_pressure_thread_running
= FALSE
;
140 extern void consider_vm_pressure_events(void);
142 #define MEMORYSTATUS_SUSPENDED_THRESHOLD 4
143 #endif /* VM_PRESSURE_EVENTS */
145 boolean_t vm_pressure_changed
= FALSE
;
147 #ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
148 #define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
151 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
152 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
155 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
156 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
159 #ifndef VM_PAGEOUT_INACTIVE_RELIEF
160 #define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
163 #ifndef VM_PAGE_LAUNDRY_MAX
164 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
165 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
167 #ifndef VM_PAGEOUT_BURST_WAIT
168 #define VM_PAGEOUT_BURST_WAIT 10 /* milliseconds */
169 #endif /* VM_PAGEOUT_BURST_WAIT */
171 #ifndef VM_PAGEOUT_EMPTY_WAIT
172 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
173 #endif /* VM_PAGEOUT_EMPTY_WAIT */
175 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
176 #define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
177 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
179 #ifndef VM_PAGEOUT_IDLE_WAIT
180 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
181 #endif /* VM_PAGEOUT_IDLE_WAIT */
183 #ifndef VM_PAGEOUT_SWAP_WAIT
184 #define VM_PAGEOUT_SWAP_WAIT 50 /* milliseconds */
185 #endif /* VM_PAGEOUT_SWAP_WAIT */
187 #ifndef VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED
188 #define VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED 1000 /* maximum pages considered before we issue a pressure event */
189 #endif /* VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED */
191 #ifndef VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS
192 #define VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS 5 /* seconds */
193 #endif /* VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS */
195 unsigned int vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
196 unsigned int vm_page_speculative_percentage
= 5;
198 #ifndef VM_PAGE_SPECULATIVE_TARGET
199 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
200 #endif /* VM_PAGE_SPECULATIVE_TARGET */
203 #ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
204 #define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
205 #endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
209 * To obtain a reasonable LRU approximation, the inactive queue
210 * needs to be large enough to give pages on it a chance to be
211 * referenced a second time. This macro defines the fraction
212 * of active+inactive pages that should be inactive.
213 * The pageout daemon uses it to update vm_page_inactive_target.
215 * If vm_page_free_count falls below vm_page_free_target and
216 * vm_page_inactive_count is below vm_page_inactive_target,
217 * then the pageout daemon starts running.
220 #ifndef VM_PAGE_INACTIVE_TARGET
221 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
222 #endif /* VM_PAGE_INACTIVE_TARGET */
225 * Once the pageout daemon starts running, it keeps going
226 * until vm_page_free_count meets or exceeds vm_page_free_target.
229 #ifndef VM_PAGE_FREE_TARGET
230 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
231 #endif /* VM_PAGE_FREE_TARGET */
235 * The pageout daemon always starts running once vm_page_free_count
236 * falls below vm_page_free_min.
239 #ifndef VM_PAGE_FREE_MIN
240 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
241 #endif /* VM_PAGE_FREE_MIN */
243 #define VM_PAGE_FREE_RESERVED_LIMIT 1700
244 #define VM_PAGE_FREE_MIN_LIMIT 3500
245 #define VM_PAGE_FREE_TARGET_LIMIT 4000
248 * When vm_page_free_count falls below vm_page_free_reserved,
249 * only vm-privileged threads can allocate pages. vm-privilege
250 * allows the pageout daemon and default pager (and any other
251 * associated threads needed for default pageout) to continue
252 * operation by dipping into the reserved pool of pages.
255 #ifndef VM_PAGE_FREE_RESERVED
256 #define VM_PAGE_FREE_RESERVED(n) \
257 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
258 #endif /* VM_PAGE_FREE_RESERVED */
261 * When we dequeue pages from the inactive list, they are
262 * reactivated (ie, put back on the active queue) if referenced.
263 * However, it is possible to starve the free list if other
264 * processors are referencing pages faster than we can turn off
265 * the referenced bit. So we limit the number of reactivations
266 * we will make per call of vm_pageout_scan().
268 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
269 #ifndef VM_PAGE_REACTIVATE_LIMIT
270 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
271 #endif /* VM_PAGE_REACTIVATE_LIMIT */
272 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
275 extern boolean_t hibernate_cleaning_in_progress
;
278 * Exported variable used to broadcast the activation of the pageout scan
279 * Working Set uses this to throttle its use of pmap removes. In this
280 * way, code which runs within memory in an uncontested context does
281 * not keep encountering soft faults.
284 unsigned int vm_pageout_scan_event_counter
= 0;
287 * Forward declarations for internal routines.
290 struct vm_pageout_queue
*q
;
296 #if VM_PRESSURE_EVENTS
297 void vm_pressure_thread(void);
299 boolean_t
VM_PRESSURE_NORMAL_TO_WARNING(void);
300 boolean_t
VM_PRESSURE_WARNING_TO_CRITICAL(void);
302 boolean_t
VM_PRESSURE_WARNING_TO_NORMAL(void);
303 boolean_t
VM_PRESSURE_CRITICAL_TO_WARNING(void);
305 static void vm_pageout_garbage_collect(int);
306 static void vm_pageout_iothread_continue(struct vm_pageout_queue
*);
307 static void vm_pageout_iothread_external(void);
308 static void vm_pageout_iothread_internal(struct cq
*cq
);
309 static void vm_pageout_adjust_io_throttles(struct vm_pageout_queue
*, struct vm_pageout_queue
*, boolean_t
);
311 extern void vm_pageout_continue(void);
312 extern void vm_pageout_scan(void);
314 static thread_t vm_pageout_external_iothread
= THREAD_NULL
;
315 static thread_t vm_pageout_internal_iothread
= THREAD_NULL
;
317 unsigned int vm_pageout_reserved_internal
= 0;
318 unsigned int vm_pageout_reserved_really
= 0;
320 unsigned int vm_pageout_swap_wait
= 0;
321 unsigned int vm_pageout_idle_wait
= 0; /* milliseconds */
322 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
323 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds */
324 unsigned int vm_pageout_deadlock_wait
= 0; /* milliseconds */
325 unsigned int vm_pageout_deadlock_relief
= 0;
326 unsigned int vm_pageout_inactive_relief
= 0;
327 unsigned int vm_pageout_burst_active_throttle
= 0;
328 unsigned int vm_pageout_burst_inactive_throttle
= 0;
330 int vm_upl_wait_for_pages
= 0;
334 * These variables record the pageout daemon's actions:
335 * how many pages it looks at and what happens to those pages.
336 * No locking needed because only one thread modifies the variables.
339 unsigned int vm_pageout_active
= 0; /* debugging */
340 unsigned int vm_pageout_active_busy
= 0; /* debugging */
341 unsigned int vm_pageout_inactive
= 0; /* debugging */
342 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
343 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
344 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
345 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
346 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
347 unsigned int vm_pageout_inactive_error
= 0; /* debugging */
348 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
349 unsigned int vm_pageout_inactive_notalive
= 0; /* debugging */
350 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
351 unsigned int vm_pageout_cache_evicted
= 0; /* debugging */
352 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
353 unsigned int vm_pageout_speculative_clean
= 0; /* debugging */
355 unsigned int vm_pageout_freed_from_cleaned
= 0;
356 unsigned int vm_pageout_freed_from_speculative
= 0;
357 unsigned int vm_pageout_freed_from_inactive_clean
= 0;
359 unsigned int vm_pageout_enqueued_cleaned_from_inactive_clean
= 0;
360 unsigned int vm_pageout_enqueued_cleaned_from_inactive_dirty
= 0;
362 unsigned int vm_pageout_cleaned_reclaimed
= 0; /* debugging; how many cleaned pages are reclaimed by the pageout scan */
363 unsigned int vm_pageout_cleaned_reactivated
= 0; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
364 unsigned int vm_pageout_cleaned_reference_reactivated
= 0;
365 unsigned int vm_pageout_cleaned_volatile_reactivated
= 0;
366 unsigned int vm_pageout_cleaned_fault_reactivated
= 0;
367 unsigned int vm_pageout_cleaned_commit_reactivated
= 0; /* debugging; how many cleaned pages are found to be referenced on commit (and are therefore reactivated) */
368 unsigned int vm_pageout_cleaned_busy
= 0;
369 unsigned int vm_pageout_cleaned_nolock
= 0;
371 unsigned int vm_pageout_inactive_dirty_internal
= 0; /* debugging */
372 unsigned int vm_pageout_inactive_dirty_external
= 0; /* debugging */
373 unsigned int vm_pageout_inactive_deactivated
= 0; /* debugging */
374 unsigned int vm_pageout_inactive_anonymous
= 0; /* debugging */
375 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
376 unsigned int vm_pageout_purged_objects
= 0; /* debugging */
377 unsigned int vm_stat_discard
= 0; /* debugging */
378 unsigned int vm_stat_discard_sent
= 0; /* debugging */
379 unsigned int vm_stat_discard_failure
= 0; /* debugging */
380 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
381 unsigned int vm_pageout_reactivation_limit_exceeded
= 0; /* debugging */
382 unsigned int vm_pageout_catch_ups
= 0; /* debugging */
383 unsigned int vm_pageout_inactive_force_reclaim
= 0; /* debugging */
385 unsigned int vm_pageout_scan_reclaimed_throttled
= 0;
386 unsigned int vm_pageout_scan_active_throttled
= 0;
387 unsigned int vm_pageout_scan_inactive_throttled_internal
= 0;
388 unsigned int vm_pageout_scan_inactive_throttled_external
= 0;
389 unsigned int vm_pageout_scan_throttle
= 0; /* debugging */
390 unsigned int vm_pageout_scan_burst_throttle
= 0; /* debugging */
391 unsigned int vm_pageout_scan_empty_throttle
= 0; /* debugging */
392 unsigned int vm_pageout_scan_swap_throttle
= 0; /* debugging */
393 unsigned int vm_pageout_scan_deadlock_detected
= 0; /* debugging */
394 unsigned int vm_pageout_scan_active_throttle_success
= 0; /* debugging */
395 unsigned int vm_pageout_scan_inactive_throttle_success
= 0; /* debugging */
396 unsigned int vm_pageout_inactive_external_forced_jetsam_count
= 0; /* debugging */
397 unsigned int vm_page_speculative_count_drifts
= 0;
398 unsigned int vm_page_speculative_count_drift_max
= 0;
402 * Backing store throttle when BS is exhausted
404 unsigned int vm_backing_store_low
= 0;
406 unsigned int vm_pageout_out_of_line
= 0;
407 unsigned int vm_pageout_in_place
= 0;
409 unsigned int vm_page_steal_pageout_page
= 0;
413 * counters and statistics...
415 unsigned long vm_page_decrypt_counter
= 0;
416 unsigned long vm_page_decrypt_for_upl_counter
= 0;
417 unsigned long vm_page_encrypt_counter
= 0;
418 unsigned long vm_page_encrypt_abort_counter
= 0;
419 unsigned long vm_page_encrypt_already_encrypted_counter
= 0;
420 boolean_t vm_pages_encrypted
= FALSE
; /* are there encrypted pages ? */
422 struct vm_pageout_queue vm_pageout_queue_internal
;
423 struct vm_pageout_queue vm_pageout_queue_external
;
425 unsigned int vm_page_speculative_target
= 0;
427 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
429 boolean_t (* volatile consider_buffer_cache_collect
)(int) = NULL
;
431 #if DEVELOPMENT || DEBUG
432 unsigned long vm_cs_validated_resets
= 0;
435 int vm_debug_events
= 0;
437 #if CONFIG_MEMORYSTATUS
439 extern boolean_t
memorystatus_idle_exit_from_VM(void);
441 extern boolean_t
memorystatus_kill_on_VM_page_shortage(boolean_t async
);
442 extern void memorystatus_on_pageout_scan_end(void);
445 boolean_t vm_page_compressions_failing
= FALSE
;
448 * Routine: vm_backing_store_disable
450 * Suspend non-privileged threads wishing to extend
451 * backing store when we are low on backing store
452 * (Synchronized by caller)
455 vm_backing_store_disable(
459 vm_backing_store_low
= 1;
461 if(vm_backing_store_low
) {
462 vm_backing_store_low
= 0;
463 thread_wakeup((event_t
) &vm_backing_store_low
);
469 #if MACH_CLUSTER_STATS
470 unsigned long vm_pageout_cluster_dirtied
= 0;
471 unsigned long vm_pageout_cluster_cleaned
= 0;
472 unsigned long vm_pageout_cluster_collisions
= 0;
473 unsigned long vm_pageout_cluster_clusters
= 0;
474 unsigned long vm_pageout_cluster_conversions
= 0;
475 unsigned long vm_pageout_target_collisions
= 0;
476 unsigned long vm_pageout_target_page_dirtied
= 0;
477 unsigned long vm_pageout_target_page_freed
= 0;
478 #define CLUSTER_STAT(clause) clause
479 #else /* MACH_CLUSTER_STATS */
480 #define CLUSTER_STAT(clause)
481 #endif /* MACH_CLUSTER_STATS */
484 * Routine: vm_pageout_object_terminate
486 * Destroy the pageout_object, and perform all of the
487 * required cleanup actions.
490 * The object must be locked, and will be returned locked.
493 vm_pageout_object_terminate(
496 vm_object_t shadow_object
;
499 * Deal with the deallocation (last reference) of a pageout object
500 * (used for cleaning-in-place) by dropping the paging references/
501 * freeing pages in the original object.
504 assert(object
->pageout
);
505 shadow_object
= object
->shadow
;
506 vm_object_lock(shadow_object
);
508 while (!queue_empty(&object
->memq
)) {
510 vm_object_offset_t offset
;
512 p
= (vm_page_t
) queue_first(&object
->memq
);
517 assert(!p
->cleaning
);
524 m
= vm_page_lookup(shadow_object
,
525 offset
+ object
->vo_shadow_offset
);
527 if(m
== VM_PAGE_NULL
)
530 assert((m
->dirty
) || (m
->precious
) ||
531 (m
->busy
&& m
->cleaning
));
534 * Handle the trusted pager throttle.
535 * Also decrement the burst throttle (if external).
537 vm_page_lock_queues();
538 if (m
->pageout_queue
)
539 vm_pageout_throttle_up(m
);
542 * Handle the "target" page(s). These pages are to be freed if
543 * successfully cleaned. Target pages are always busy, and are
544 * wired exactly once. The initial target pages are not mapped,
545 * (so cannot be referenced or modified) but converted target
546 * pages may have been modified between the selection as an
547 * adjacent page and conversion to a target.
551 assert(m
->wire_count
== 1);
553 m
->encrypted_cleaning
= FALSE
;
555 #if MACH_CLUSTER_STATS
556 if (m
->wanted
) vm_pageout_target_collisions
++;
559 * Revoke all access to the page. Since the object is
560 * locked, and the page is busy, this prevents the page
561 * from being dirtied after the pmap_disconnect() call
564 * Since the page is left "dirty" but "not modifed", we
565 * can detect whether the page was redirtied during
566 * pageout by checking the modify state.
568 if (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
) {
569 SET_PAGE_DIRTY(m
, FALSE
);
575 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
576 vm_page_unwire(m
, TRUE
); /* reactivates */
577 VM_STAT_INCR(reactivations
);
580 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
581 vm_page_free(m
);/* clears busy, etc. */
583 vm_page_unlock_queues();
587 * Handle the "adjacent" pages. These pages were cleaned in
588 * place, and should be left alone.
589 * If prep_pin_count is nonzero, then someone is using the
590 * page, so make it active.
592 if (!m
->active
&& !m
->inactive
&& !m
->throttled
&& !m
->private) {
596 vm_page_deactivate(m
);
598 if (m
->overwriting
) {
600 * the (COPY_OUT_FROM == FALSE) request_page_list case
604 * We do not re-set m->dirty !
605 * The page was busy so no extraneous activity
606 * could have occurred. COPY_INTO is a read into the
607 * new pages. CLEAN_IN_PLACE does actually write
608 * out the pages but handling outside of this code
609 * will take care of resetting dirty. We clear the
610 * modify however for the Programmed I/O case.
612 pmap_clear_modify(m
->phys_page
);
618 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
619 * Occurs when the original page was wired
620 * at the time of the list request
622 assert(VM_PAGE_WIRED(m
));
623 vm_page_unwire(m
, TRUE
); /* reactivates */
625 m
->overwriting
= FALSE
;
628 * Set the dirty state according to whether or not the page was
629 * modified during the pageout. Note that we purposefully do
630 * NOT call pmap_clear_modify since the page is still mapped.
631 * If the page were to be dirtied between the 2 calls, this
632 * this fact would be lost. This code is only necessary to
633 * maintain statistics, since the pmap module is always
634 * consulted if m->dirty is false.
636 #if MACH_CLUSTER_STATS
637 m
->dirty
= pmap_is_modified(m
->phys_page
);
639 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
640 else vm_pageout_cluster_cleaned
++;
641 if (m
->wanted
) vm_pageout_cluster_collisions
++;
646 if (m
->encrypted_cleaning
== TRUE
) {
647 m
->encrypted_cleaning
= FALSE
;
653 * Wakeup any thread waiting for the page to be un-cleaning.
656 vm_page_unlock_queues();
659 * Account for the paging reference taken in vm_paging_object_allocate.
661 vm_object_activity_end(shadow_object
);
662 vm_object_unlock(shadow_object
);
664 assert(object
->ref_count
== 0);
665 assert(object
->paging_in_progress
== 0);
666 assert(object
->activity_in_progress
== 0);
667 assert(object
->resident_page_count
== 0);
672 * Routine: vm_pageclean_setup
674 * Purpose: setup a page to be cleaned (made non-dirty), but not
675 * necessarily flushed from the VM page cache.
676 * This is accomplished by cleaning in place.
678 * The page must not be busy, and new_object
686 vm_object_t new_object
,
687 vm_object_offset_t new_offset
)
691 assert(!m
->cleaning
);
695 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
696 m
->object
, m
->offset
, m
,
699 pmap_clear_modify(m
->phys_page
);
702 * Mark original page as cleaning in place.
705 SET_PAGE_DIRTY(m
, FALSE
);
709 * Convert the fictitious page to a private shadow of
712 assert(new_m
->fictitious
);
713 assert(new_m
->phys_page
== vm_page_fictitious_addr
);
714 new_m
->fictitious
= FALSE
;
715 new_m
->private = TRUE
;
716 new_m
->pageout
= TRUE
;
717 new_m
->phys_page
= m
->phys_page
;
719 vm_page_lockspin_queues();
721 vm_page_unlock_queues();
723 vm_page_insert(new_m
, new_object
, new_offset
);
724 assert(!new_m
->wanted
);
729 * Routine: vm_pageout_initialize_page
731 * Causes the specified page to be initialized in
732 * the appropriate memory object. This routine is used to push
733 * pages into a copy-object when they are modified in the
736 * The page is moved to a temporary object and paged out.
739 * The page in question must not be on any pageout queues.
740 * The object to which it belongs must be locked.
741 * The page must be busy, but not hold a paging reference.
744 * Move this page to a completely new object.
747 vm_pageout_initialize_page(
751 vm_object_offset_t paging_offset
;
752 memory_object_t pager
;
755 "vm_pageout_initialize_page, page 0x%X\n",
760 * Verify that we really want to clean this page
767 * Create a paging reference to let us play with the object.
770 paging_offset
= m
->offset
+ object
->paging_offset
;
772 if (m
->absent
|| m
->error
|| m
->restart
|| (!m
->dirty
&& !m
->precious
)) {
774 panic("reservation without pageout?"); /* alan */
775 vm_object_unlock(object
);
781 * If there's no pager, then we can't clean the page. This should
782 * never happen since this should be a copy object and therefore not
783 * an external object, so the pager should always be there.
786 pager
= object
->pager
;
788 if (pager
== MEMORY_OBJECT_NULL
) {
790 panic("missing pager for copy object");
795 * set the page for future call to vm_fault_list_request
797 pmap_clear_modify(m
->phys_page
);
798 SET_PAGE_DIRTY(m
, FALSE
);
802 * keep the object from collapsing or terminating
804 vm_object_paging_begin(object
);
805 vm_object_unlock(object
);
808 * Write the data to its pager.
809 * Note that the data is passed by naming the new object,
810 * not a virtual address; the pager interface has been
811 * manipulated to use the "internal memory" data type.
812 * [The object reference from its allocation is donated
813 * to the eventual recipient.]
815 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
817 vm_object_lock(object
);
818 vm_object_paging_end(object
);
821 #if MACH_CLUSTER_STATS
822 #define MAXCLUSTERPAGES 16
824 unsigned long pages_in_cluster
;
825 unsigned long pages_at_higher_offsets
;
826 unsigned long pages_at_lower_offsets
;
827 } cluster_stats
[MAXCLUSTERPAGES
];
828 #endif /* MACH_CLUSTER_STATS */
832 * vm_pageout_cluster:
834 * Given a page, queue it to the appropriate I/O thread,
835 * which will page it out and attempt to clean adjacent pages
836 * in the same operation.
838 * The object and queues must be locked. We will take a
839 * paging reference to prevent deallocation or collapse when we
840 * release the object lock back at the call site. The I/O thread
841 * is responsible for consuming this reference
843 * The page must not be on any pageout queue.
847 vm_pageout_cluster(vm_page_t m
, boolean_t pageout
)
849 vm_object_t object
= m
->object
;
850 struct vm_pageout_queue
*q
;
854 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
855 object
, m
->offset
, m
, 0, 0);
859 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
861 vm_object_lock_assert_exclusive(object
);
864 * Only a certain kind of page is appreciated here.
866 assert((m
->dirty
|| m
->precious
) && (!VM_PAGE_WIRED(m
)));
867 assert(!m
->cleaning
&& !m
->pageout
&& !m
->laundry
);
868 #ifndef CONFIG_FREEZE
869 assert(!m
->inactive
&& !m
->active
);
870 assert(!m
->throttled
);
874 * protect the object from collapse or termination
876 vm_object_activity_begin(object
);
878 m
->pageout
= pageout
;
880 if (object
->internal
== TRUE
) {
881 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
884 q
= &vm_pageout_queue_internal
;
886 q
= &vm_pageout_queue_external
;
889 * pgo_laundry count is tied to the laundry bit
894 m
->pageout_queue
= TRUE
;
895 queue_enter(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
897 if (q
->pgo_idle
== TRUE
) {
899 thread_wakeup((event_t
) &q
->pgo_pending
);
905 unsigned long vm_pageout_throttle_up_count
= 0;
908 * A page is back from laundry or we are stealing it back from
909 * the laundering state. See if there are some pages waiting to
910 * go to laundry and if we can let some of them go now.
912 * Object and page queues must be locked.
915 vm_pageout_throttle_up(
918 struct vm_pageout_queue
*q
;
920 assert(m
->object
!= VM_OBJECT_NULL
);
921 assert(m
->object
!= kernel_object
);
924 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
925 vm_object_lock_assert_exclusive(m
->object
);
928 vm_pageout_throttle_up_count
++;
930 if (m
->object
->internal
== TRUE
)
931 q
= &vm_pageout_queue_internal
;
933 q
= &vm_pageout_queue_external
;
935 if (m
->pageout_queue
== TRUE
) {
937 queue_remove(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
938 m
->pageout_queue
= FALSE
;
940 m
->pageq
.next
= NULL
;
941 m
->pageq
.prev
= NULL
;
943 vm_object_activity_end(m
->object
);
945 if (m
->laundry
== TRUE
) {
950 if (q
->pgo_throttled
== TRUE
) {
951 q
->pgo_throttled
= FALSE
;
952 thread_wakeup((event_t
) &q
->pgo_laundry
);
954 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
955 q
->pgo_draining
= FALSE
;
956 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
963 vm_pageout_throttle_up_batch(
964 struct vm_pageout_queue
*q
,
968 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
971 vm_pageout_throttle_up_count
+= batch_cnt
;
973 q
->pgo_laundry
-= batch_cnt
;
975 if (q
->pgo_throttled
== TRUE
) {
976 q
->pgo_throttled
= FALSE
;
977 thread_wakeup((event_t
) &q
->pgo_laundry
);
979 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
980 q
->pgo_draining
= FALSE
;
981 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
988 * VM memory pressure monitoring.
990 * vm_pageout_scan() keeps track of the number of pages it considers and
991 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
993 * compute_memory_pressure() is called every second from compute_averages()
994 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
995 * of recalimed pages in a new vm_pageout_stat[] bucket.
997 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
998 * The caller provides the number of seconds ("nsecs") worth of statistics
999 * it wants, up to 30 seconds.
1000 * It computes the number of pages reclaimed in the past "nsecs" seconds and
1001 * also returns the number of pages the system still needs to reclaim at this
1004 #define VM_PAGEOUT_STAT_SIZE 31
1005 struct vm_pageout_stat
{
1006 unsigned int considered
;
1007 unsigned int reclaimed
;
1008 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0,0}, };
1009 unsigned int vm_pageout_stat_now
= 0;
1010 unsigned int vm_memory_pressure
= 0;
1012 #define VM_PAGEOUT_STAT_BEFORE(i) \
1013 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
1014 #define VM_PAGEOUT_STAT_AFTER(i) \
1015 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
1017 #if VM_PAGE_BUCKETS_CHECK
1018 int vm_page_buckets_check_interval
= 10; /* in seconds */
1019 #endif /* VM_PAGE_BUCKETS_CHECK */
1022 * Called from compute_averages().
1025 compute_memory_pressure(
1028 unsigned int vm_pageout_next
;
1030 #if VM_PAGE_BUCKETS_CHECK
1031 /* check the consistency of VM page buckets at regular interval */
1032 static int counter
= 0;
1033 if ((++counter
% vm_page_buckets_check_interval
) == 0) {
1034 vm_page_buckets_check();
1036 #endif /* VM_PAGE_BUCKETS_CHECK */
1038 vm_memory_pressure
=
1039 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].reclaimed
;
1041 commpage_set_memory_pressure( vm_memory_pressure
);
1043 /* move "now" forward */
1044 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
1045 vm_pageout_stats
[vm_pageout_next
].considered
= 0;
1046 vm_pageout_stats
[vm_pageout_next
].reclaimed
= 0;
1047 vm_pageout_stat_now
= vm_pageout_next
;
1053 * mach_vm_ctl_page_free_wanted() is called indirectly, via
1054 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
1055 * it must be safe in the restricted stackshot context. Locks and/or
1056 * blocking are not allowable.
1059 mach_vm_ctl_page_free_wanted(void)
1061 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
1063 page_free_target
= vm_page_free_target
;
1064 page_free_count
= vm_page_free_count
;
1065 if (page_free_target
> page_free_count
) {
1066 page_free_wanted
= page_free_target
- page_free_count
;
1068 page_free_wanted
= 0;
1071 return page_free_wanted
;
1077 * mach_vm_pressure_monitor() is called when taking a stackshot, with
1078 * wait_for_pressure FALSE, so that code path must remain safe in the
1079 * restricted stackshot context. No blocking or locks are allowable.
1080 * on that code path.
1084 mach_vm_pressure_monitor(
1085 boolean_t wait_for_pressure
,
1086 unsigned int nsecs_monitored
,
1087 unsigned int *pages_reclaimed_p
,
1088 unsigned int *pages_wanted_p
)
1091 unsigned int vm_pageout_then
, vm_pageout_now
;
1092 unsigned int pages_reclaimed
;
1095 * We don't take the vm_page_queue_lock here because we don't want
1096 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
1097 * thread when it's trying to reclaim memory. We don't need fully
1098 * accurate monitoring anyway...
1101 if (wait_for_pressure
) {
1102 /* wait until there's memory pressure */
1103 while (vm_page_free_count
>= vm_page_free_target
) {
1104 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
1105 THREAD_INTERRUPTIBLE
);
1106 if (wr
== THREAD_WAITING
) {
1107 wr
= thread_block(THREAD_CONTINUE_NULL
);
1109 if (wr
== THREAD_INTERRUPTED
) {
1110 return KERN_ABORTED
;
1112 if (wr
== THREAD_AWAKENED
) {
1114 * The memory pressure might have already
1115 * been relieved but let's not block again
1116 * and let's report that there was memory
1117 * pressure at some point.
1124 /* provide the number of pages the system wants to reclaim */
1125 if (pages_wanted_p
!= NULL
) {
1126 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1129 if (pages_reclaimed_p
== NULL
) {
1130 return KERN_SUCCESS
;
1133 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1135 vm_pageout_now
= vm_pageout_stat_now
;
1136 pages_reclaimed
= 0;
1137 for (vm_pageout_then
=
1138 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1139 vm_pageout_then
!= vm_pageout_now
&&
1140 nsecs_monitored
-- != 0;
1142 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1143 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].reclaimed
;
1145 } while (vm_pageout_now
!= vm_pageout_stat_now
);
1146 *pages_reclaimed_p
= pages_reclaimed
;
1148 return KERN_SUCCESS
;
1154 * function in BSD to apply I/O throttle to the pageout thread
1156 extern void vm_pageout_io_throttle(void);
1159 * Page States: Used below to maintain the page state
1160 * before it's removed from it's Q. This saved state
1161 * helps us do the right accounting in certain cases
1163 #define PAGE_STATE_SPECULATIVE 1
1164 #define PAGE_STATE_ANONYMOUS 2
1165 #define PAGE_STATE_INACTIVE 3
1166 #define PAGE_STATE_INACTIVE_FIRST 4
1167 #define PAGE_STATE_CLEAN 5
1170 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1173 * If a "reusable" page somehow made it back into \
1174 * the active queue, it's been re-used and is not \
1175 * quite re-usable. \
1176 * If the VM object was "all_reusable", consider it \
1177 * as "all re-used" instead of converting it to \
1178 * "partially re-used", which could be expensive. \
1180 if ((m)->reusable || \
1181 (m)->object->all_reusable) { \
1182 vm_object_reuse_pages((m)->object, \
1184 (m)->offset + PAGE_SIZE_64, \
1190 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1191 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1194 #define FCS_DELAYED 1
1195 #define FCS_DEADLOCK_DETECTED 2
1197 struct flow_control
{
1202 uint32_t vm_pageout_considered_page
= 0;
1203 uint32_t vm_page_filecache_min
= 0;
1205 #define ANONS_GRABBED_LIMIT 2
1208 * vm_pageout_scan does the dirty work for the pageout daemon.
1209 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
1210 * held and vm_page_free_wanted == 0.
1213 vm_pageout_scan(void)
1215 unsigned int loop_count
= 0;
1216 unsigned int inactive_burst_count
= 0;
1217 unsigned int active_burst_count
= 0;
1218 unsigned int reactivated_this_call
;
1219 unsigned int reactivate_limit
;
1220 vm_page_t local_freeq
= NULL
;
1221 int local_freed
= 0;
1223 int delayed_unlock_limit
= 0;
1224 int refmod_state
= 0;
1225 int vm_pageout_deadlock_target
= 0;
1226 struct vm_pageout_queue
*iq
;
1227 struct vm_pageout_queue
*eq
;
1228 struct vm_speculative_age_q
*sq
;
1229 struct flow_control flow_control
= { 0, { 0, 0 } };
1230 boolean_t inactive_throttled
= FALSE
;
1231 boolean_t try_failed
;
1233 unsigned int msecs
= 0;
1235 vm_object_t last_object_tried
;
1236 uint32_t catch_up_count
= 0;
1237 uint32_t inactive_reclaim_run
;
1238 boolean_t forced_reclaim
;
1239 boolean_t exceeded_burst_throttle
;
1240 boolean_t grab_anonymous
= FALSE
;
1241 boolean_t force_anonymous
= FALSE
;
1242 int anons_grabbed
= 0;
1243 int page_prev_state
= 0;
1244 int cache_evict_throttle
= 0;
1245 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
1246 int force_purge
= 0;
1248 #if VM_PRESSURE_EVENTS
1249 vm_pressure_level_t pressure_level
;
1250 #endif /* VM_PRESSURE_EVENTS */
1252 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
1253 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1254 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1256 flow_control
.state
= FCS_IDLE
;
1257 iq
= &vm_pageout_queue_internal
;
1258 eq
= &vm_pageout_queue_external
;
1259 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1262 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1265 vm_page_lock_queues();
1266 delayed_unlock
= 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1269 * Calculate the max number of referenced pages on the inactive
1270 * queue that we will reactivate.
1272 reactivated_this_call
= 0;
1273 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
1274 vm_page_inactive_count
);
1275 inactive_reclaim_run
= 0;
1277 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
1280 * We want to gradually dribble pages from the active queue
1281 * to the inactive queue. If we let the inactive queue get
1282 * very small, and then suddenly dump many pages into it,
1283 * those pages won't get a sufficient chance to be referenced
1284 * before we start taking them from the inactive queue.
1286 * We must limit the rate at which we send pages to the pagers
1287 * so that we don't tie up too many pages in the I/O queues.
1288 * We implement a throttling mechanism using the laundry count
1289 * to limit the number of pages outstanding to the default
1290 * and external pagers. We can bypass the throttles and look
1291 * for clean pages if the pageout queues don't drain in a timely
1292 * fashion since this may indicate that the pageout paths are
1293 * stalled waiting for memory, which only we can provide.
1298 assert(delayed_unlock
!=0);
1301 * Recalculate vm_page_inactivate_target.
1303 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1304 vm_page_inactive_count
+
1305 vm_page_speculative_count
);
1307 vm_page_anonymous_min
= vm_page_inactive_target
/ 20;
1311 * don't want to wake the pageout_scan thread up everytime we fall below
1312 * the targets... set a low water mark at 0.25% below the target
1314 vm_page_inactive_min
= vm_page_inactive_target
- (vm_page_inactive_target
/ 400);
1316 if (vm_page_speculative_percentage
> 50)
1317 vm_page_speculative_percentage
= 50;
1318 else if (vm_page_speculative_percentage
<= 0)
1319 vm_page_speculative_percentage
= 1;
1321 vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1322 vm_page_inactive_count
);
1325 last_object_tried
= NULL
;
1328 if ((vm_page_inactive_count
+ vm_page_speculative_count
) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count
))
1329 catch_up_count
= vm_page_inactive_count
+ vm_page_speculative_count
;
1336 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
1338 if (delayed_unlock
== 0) {
1339 vm_page_lock_queues();
1342 if (vm_upl_wait_for_pages
< 0)
1343 vm_upl_wait_for_pages
= 0;
1345 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
1347 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
)
1348 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
1351 * Move pages from active to inactive if we're below the target
1353 /* if we are trying to make clean, we need to make sure we actually have inactive - mj */
1354 if ((vm_page_inactive_count
+ vm_page_speculative_count
) >= vm_page_inactive_target
)
1355 goto done_moving_active_pages
;
1357 if (object
!= NULL
) {
1358 vm_object_unlock(object
);
1360 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1363 * Don't sweep through active queue more than the throttle
1364 * which should be kept relatively low
1366 active_burst_count
= MIN(vm_pageout_burst_active_throttle
, vm_page_active_count
);
1368 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_START
,
1369 vm_pageout_inactive
, vm_pageout_inactive_used
, vm_page_free_count
, local_freed
);
1371 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_NONE
,
1372 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1373 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1374 memoryshot(VM_PAGEOUT_BALANCE
, DBG_FUNC_START
);
1377 while (!queue_empty(&vm_page_queue_active
) && active_burst_count
--) {
1379 vm_pageout_active
++;
1381 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1383 assert(m
->active
&& !m
->inactive
);
1384 assert(!m
->laundry
);
1385 assert(m
->object
!= kernel_object
);
1386 assert(m
->phys_page
!= vm_page_guard_addr
);
1388 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
1391 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
1393 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
1394 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
1395 * new reference happens. If no futher references happen on the page after that remote TLB flushes
1396 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
1397 * by pageout_scan, which is just fine since the last reference would have happened quite far
1398 * in the past (TLB caches don't hang around for very long), and of course could just as easily
1399 * have happened before we moved the page
1401 pmap_clear_refmod_options(m
->phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1404 * The page might be absent or busy,
1405 * but vm_page_deactivate can handle that.
1406 * FALSE indicates that we don't want a H/W clear reference
1408 vm_page_deactivate_internal(m
, FALSE
);
1410 if (delayed_unlock
++ > delayed_unlock_limit
) {
1413 vm_page_unlock_queues();
1415 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1416 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 1);
1418 vm_page_free_list(local_freeq
, TRUE
);
1420 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1421 vm_page_free_count
, 0, 0, 1);
1425 vm_page_lock_queues();
1427 lck_mtx_yield(&vm_page_queue_lock
);
1433 * continue the while loop processing
1434 * the active queue... need to hold
1435 * the page queues lock
1440 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_END
,
1441 vm_page_active_count
, vm_page_inactive_count
, vm_page_speculative_count
, vm_page_inactive_target
);
1442 memoryshot(VM_PAGEOUT_BALANCE
, DBG_FUNC_END
);
1444 /**********************************************************************
1445 * above this point we're playing with the active queue
1446 * below this point we're playing with the throttling mechanisms
1447 * and the inactive queue
1448 **********************************************************************/
1450 done_moving_active_pages
:
1452 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
1453 if (object
!= NULL
) {
1454 vm_object_unlock(object
);
1457 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1460 vm_page_unlock_queues();
1462 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1463 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 2);
1465 vm_page_free_list(local_freeq
, TRUE
);
1467 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1468 vm_page_free_count
, local_freed
, 0, 2);
1472 vm_page_lock_queues();
1475 * make sure the pageout I/O threads are running
1476 * throttled in case there are still requests
1477 * in the laundry... since we have met our targets
1478 * we don't need the laundry to be cleaned in a timely
1479 * fashion... so let's avoid interfering with foreground
1482 vm_pageout_adjust_io_throttles(iq
, eq
, TRUE
);
1485 * recalculate vm_page_inactivate_target
1487 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1488 vm_page_inactive_count
+
1489 vm_page_speculative_count
);
1490 if (((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) &&
1491 !queue_empty(&vm_page_queue_active
)) {
1493 * inactive target still not met... keep going
1494 * until we get the queues balanced...
1498 lck_mtx_lock(&vm_page_queue_free_lock
);
1500 if ((vm_page_free_count
>= vm_page_free_target
) &&
1501 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1503 * done - we have met our target *and*
1504 * there is no one waiting for a page.
1507 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1509 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
1510 vm_pageout_inactive
, vm_pageout_inactive_used
, 0, 0);
1511 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
1512 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1513 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1517 lck_mtx_unlock(&vm_page_queue_free_lock
);
1521 * Before anything, we check if we have any ripe volatile
1522 * objects around. If so, try to purge the first object.
1523 * If the purge fails, fall through to reclaim a page instead.
1524 * If the purge succeeds, go back to the top and reevalute
1525 * the new memory situation.
1528 assert (available_for_purge
>=0);
1529 force_purge
= 0; /* no force-purging */
1531 #if VM_PRESSURE_EVENTS
1532 pressure_level
= memorystatus_vm_pressure_level
;
1534 if (pressure_level
> kVMPressureNormal
) {
1536 if (pressure_level
>= kVMPressureCritical
) {
1537 force_purge
= memorystatus_purge_on_critical
;
1538 } else if (pressure_level
>= kVMPressureUrgent
) {
1539 force_purge
= memorystatus_purge_on_urgent
;
1540 } else if (pressure_level
>= kVMPressureWarning
) {
1541 force_purge
= memorystatus_purge_on_warning
;
1544 #endif /* VM_PRESSURE_EVENTS */
1546 if (available_for_purge
|| force_purge
) {
1548 if (object
!= NULL
) {
1549 vm_object_unlock(object
);
1553 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
);
1555 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
1556 if (vm_purgeable_object_purge_one(force_purge
, C_DONT_BLOCK
)) {
1558 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
1559 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1562 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
1563 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1566 if (queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
1568 * try to pull pages from the aging bins...
1569 * see vm_page.h for an explanation of how
1570 * this mechanism works
1572 struct vm_speculative_age_q
*aq
;
1573 mach_timespec_t ts_fully_aged
;
1574 boolean_t can_steal
= FALSE
;
1575 int num_scanned_queues
;
1577 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1579 num_scanned_queues
= 0;
1580 while (queue_empty(&aq
->age_q
) &&
1581 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1583 speculative_steal_index
++;
1585 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
)
1586 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
1588 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1591 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
1593 * XXX We've scanned all the speculative
1594 * queues but still haven't found one
1595 * that is not empty, even though
1596 * vm_page_speculative_count is not 0.
1598 * report the anomaly...
1600 printf("vm_pageout_scan: "
1601 "all speculative queues empty "
1602 "but count=%d. Re-adjusting.\n",
1603 vm_page_speculative_count
);
1604 if (vm_page_speculative_count
> vm_page_speculative_count_drift_max
)
1605 vm_page_speculative_count_drift_max
= vm_page_speculative_count
;
1606 vm_page_speculative_count_drifts
++;
1608 Debugger("vm_pageout_scan: no speculative pages");
1611 vm_page_speculative_count
= 0;
1612 /* ... and continue */
1616 if (vm_page_speculative_count
> vm_page_speculative_target
)
1619 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) / 1000;
1620 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) % 1000)
1621 * 1000 * NSEC_PER_USEC
;
1623 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
1627 clock_get_system_nanotime(&sec
, &nsec
);
1628 ts
.tv_sec
= (unsigned int) sec
;
1631 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0)
1634 if (can_steal
== TRUE
)
1635 vm_page_speculate_ageit(aq
);
1637 if (queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
1640 if (object
!= NULL
) {
1641 vm_object_unlock(object
);
1644 pages_evicted
= vm_object_cache_evict(100, 10);
1646 if (pages_evicted
) {
1648 vm_pageout_cache_evicted
+= pages_evicted
;
1650 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
1651 vm_page_free_count
, pages_evicted
, vm_pageout_cache_evicted
, 0);
1652 memoryshot(VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
);
1655 * we just freed up to 100 pages,
1656 * so go back to the top of the main loop
1657 * and re-evaulate the memory situation
1661 cache_evict_throttle
= 100;
1663 if (cache_evict_throttle
)
1664 cache_evict_throttle
--;
1667 * don't let the filecache_min fall below 33% of available memory...
1669 * on systems w/o the compressor/swapper, the filecache is always
1670 * a very large percentage of the AVAILABLE_NON_COMPRESSED_MEMORY
1671 * since most (if not all) of the anonymous pages are in the
1672 * throttled queue (which isn't counted as available) which
1673 * effectively disables this filter
1675 vm_page_filecache_min
= (AVAILABLE_NON_COMPRESSED_MEMORY
/ 3);
1677 exceeded_burst_throttle
= FALSE
;
1679 * Sometimes we have to pause:
1680 * 1) No inactive pages - nothing to do.
1681 * 2) Loop control - no acceptable pages found on the inactive queue
1682 * within the last vm_pageout_burst_inactive_throttle iterations
1683 * 3) Flow control - default pageout queue is full
1685 if (queue_empty(&vm_page_queue_inactive
) && queue_empty(&vm_page_queue_anonymous
) && queue_empty(&sq
->age_q
)) {
1686 vm_pageout_scan_empty_throttle
++;
1687 msecs
= vm_pageout_empty_wait
;
1688 goto vm_pageout_scan_delay
;
1690 } else if (inactive_burst_count
>=
1691 MIN(vm_pageout_burst_inactive_throttle
,
1692 (vm_page_inactive_count
+
1693 vm_page_speculative_count
))) {
1694 vm_pageout_scan_burst_throttle
++;
1695 msecs
= vm_pageout_burst_wait
;
1697 exceeded_burst_throttle
= TRUE
;
1698 goto vm_pageout_scan_delay
;
1700 } else if (vm_page_free_count
> (vm_page_free_reserved
/ 4) &&
1701 VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
1702 vm_pageout_scan_swap_throttle
++;
1703 msecs
= vm_pageout_swap_wait
;
1704 goto vm_pageout_scan_delay
;
1706 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
1707 VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1711 switch (flow_control
.state
) {
1714 if ((vm_page_free_count
+ local_freed
) < vm_page_free_target
) {
1716 if (vm_page_pageable_external_count
> vm_page_filecache_min
&& !queue_empty(&vm_page_queue_inactive
)) {
1717 anons_grabbed
= ANONS_GRABBED_LIMIT
;
1718 goto consider_inactive
;
1720 if (((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) && vm_page_active_count
)
1723 reset_deadlock_timer
:
1724 ts
.tv_sec
= vm_pageout_deadlock_wait
/ 1000;
1725 ts
.tv_nsec
= (vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
1726 clock_get_system_nanotime(&sec
, &nsec
);
1727 flow_control
.ts
.tv_sec
= (unsigned int) sec
;
1728 flow_control
.ts
.tv_nsec
= nsec
;
1729 ADD_MACH_TIMESPEC(&flow_control
.ts
, &ts
);
1731 flow_control
.state
= FCS_DELAYED
;
1732 msecs
= vm_pageout_deadlock_wait
;
1737 clock_get_system_nanotime(&sec
, &nsec
);
1738 ts
.tv_sec
= (unsigned int) sec
;
1741 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
.ts
) >= 0) {
1743 * the pageout thread for the default pager is potentially
1744 * deadlocked since the
1745 * default pager queue has been throttled for more than the
1746 * allowable time... we need to move some clean pages or dirty
1747 * pages belonging to the external pagers if they aren't throttled
1748 * vm_page_free_wanted represents the number of threads currently
1749 * blocked waiting for pages... we'll move one page for each of
1750 * these plus a fixed amount to break the logjam... once we're done
1751 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1752 * with a new timeout target since we have no way of knowing
1753 * whether we've broken the deadlock except through observation
1754 * of the queue associated with the default pager... we need to
1755 * stop moving pages and allow the system to run to see what
1756 * state it settles into.
1758 vm_pageout_deadlock_target
= vm_pageout_deadlock_relief
+ vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
1759 vm_pageout_scan_deadlock_detected
++;
1760 flow_control
.state
= FCS_DEADLOCK_DETECTED
;
1761 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
1762 goto consider_inactive
;
1765 * just resniff instead of trying
1766 * to compute a new delay time... we're going to be
1767 * awakened immediately upon a laundry completion,
1768 * so we won't wait any longer than necessary
1770 msecs
= vm_pageout_idle_wait
;
1773 case FCS_DEADLOCK_DETECTED
:
1774 if (vm_pageout_deadlock_target
)
1775 goto consider_inactive
;
1776 goto reset_deadlock_timer
;
1779 vm_pageout_scan_delay
:
1780 if (object
!= NULL
) {
1781 vm_object_unlock(object
);
1784 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1786 vm_page_unlock_queues();
1790 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1791 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 3);
1793 vm_page_free_list(local_freeq
, TRUE
);
1795 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1796 vm_page_free_count
, local_freed
, 0, 3);
1801 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
1802 vm_consider_waking_compactor_swapper();
1804 vm_page_lock_queues();
1806 if (flow_control
.state
== FCS_DELAYED
&&
1807 !VM_PAGE_Q_THROTTLED(iq
)) {
1808 flow_control
.state
= FCS_IDLE
;
1809 goto consider_inactive
;
1812 if (vm_page_free_count
>= vm_page_free_target
) {
1814 * we're here because
1815 * 1) someone else freed up some pages while we had
1816 * the queues unlocked above
1817 * and we've hit one of the 3 conditions that
1818 * cause us to pause the pageout scan thread
1820 * since we already have enough free pages,
1821 * let's avoid stalling and return normally
1823 * before we return, make sure the pageout I/O threads
1824 * are running throttled in case there are still requests
1825 * in the laundry... since we have enough free pages
1826 * we don't need the laundry to be cleaned in a timely
1827 * fashion... so let's avoid interfering with foreground
1830 * we don't want to hold vm_page_queue_free_lock when
1831 * calling vm_pageout_adjust_io_throttles (since it
1832 * may cause other locks to be taken), we do the intitial
1833 * check outside of the lock. Once we take the lock,
1834 * we recheck the condition since it may have changed.
1835 * if it has, no problem, we will make the threads
1836 * non-throttled before actually blocking
1838 vm_pageout_adjust_io_throttles(iq
, eq
, TRUE
);
1840 lck_mtx_lock(&vm_page_queue_free_lock
);
1842 if (vm_page_free_count
>= vm_page_free_target
&&
1843 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1844 goto return_from_scan
;
1846 lck_mtx_unlock(&vm_page_queue_free_lock
);
1848 if ((vm_page_free_count
+ vm_page_cleaned_count
) < vm_page_free_target
) {
1850 * we're most likely about to block due to one of
1851 * the 3 conditions that cause vm_pageout_scan to
1852 * not be able to make forward progress w/r
1853 * to providing new pages to the free queue,
1854 * so unthrottle the I/O threads in case we
1855 * have laundry to be cleaned... it needs
1856 * to be completed ASAP.
1858 * even if we don't block, we want the io threads
1859 * running unthrottled since the sum of free +
1860 * clean pages is still under our free target
1862 vm_pageout_adjust_io_throttles(iq
, eq
, FALSE
);
1864 if (vm_page_cleaned_count
> 0 && exceeded_burst_throttle
== FALSE
) {
1866 * if we get here we're below our free target and
1867 * we're stalling due to a full laundry queue or
1868 * we don't have any inactive pages other then
1869 * those in the clean queue...
1870 * however, we have pages on the clean queue that
1871 * can be moved to the free queue, so let's not
1872 * stall the pageout scan
1874 flow_control
.state
= FCS_IDLE
;
1875 goto consider_inactive
;
1877 VM_CHECK_MEMORYSTATUS
;
1879 if (flow_control
.state
!= FCS_IDLE
)
1880 vm_pageout_scan_throttle
++;
1881 iq
->pgo_throttled
= TRUE
;
1883 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
1884 vm_consider_waking_compactor_swapper();
1886 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000*NSEC_PER_USEC
);
1887 counter(c_vm_pageout_scan_block
++);
1889 vm_page_unlock_queues();
1891 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1893 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
1894 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1895 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
);
1897 thread_block(THREAD_CONTINUE_NULL
);
1899 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
1900 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1901 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
);
1903 vm_page_lock_queues();
1906 iq
->pgo_throttled
= FALSE
;
1908 if (loop_count
>= vm_page_inactive_count
)
1910 inactive_burst_count
= 0;
1917 flow_control
.state
= FCS_IDLE
;
1919 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
1920 vm_pageout_inactive_external_forced_reactivate_limit
);
1922 inactive_burst_count
++;
1923 vm_pageout_inactive
++;
1932 if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1933 assert(vm_page_throttled_count
== 0);
1934 assert(queue_empty(&vm_page_queue_throttled
));
1937 * The most eligible pages are ones we paged in speculatively,
1938 * but which have not yet been touched.
1940 if (!queue_empty(&sq
->age_q
) && force_anonymous
== FALSE
) {
1941 m
= (vm_page_t
) queue_first(&sq
->age_q
);
1943 page_prev_state
= PAGE_STATE_SPECULATIVE
;
1948 * Try a clean-queue inactive page.
1950 if (!queue_empty(&vm_page_queue_cleaned
)) {
1951 m
= (vm_page_t
) queue_first(&vm_page_queue_cleaned
);
1953 page_prev_state
= PAGE_STATE_CLEAN
;
1958 grab_anonymous
= (vm_page_anonymous_count
> vm_page_anonymous_min
);
1960 if (vm_page_pageable_external_count
< vm_page_filecache_min
|| force_anonymous
== TRUE
) {
1961 grab_anonymous
= TRUE
;
1965 if (grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
|| queue_empty(&vm_page_queue_anonymous
)) {
1967 if ( !queue_empty(&vm_page_queue_inactive
) ) {
1968 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1970 page_prev_state
= PAGE_STATE_INACTIVE
;
1973 if (vm_page_pageable_external_count
< vm_page_filecache_min
) {
1974 if ((++reactivated_this_call
% 100))
1975 goto must_activate_page
;
1977 * steal 1% of the file backed pages even if
1978 * we are under the limit that has been set
1979 * for a healthy filecache
1985 if ( !queue_empty(&vm_page_queue_anonymous
) ) {
1986 m
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
1988 page_prev_state
= PAGE_STATE_ANONYMOUS
;
1995 * if we've gotten here, we have no victim page.
1996 * if making clean, free the local freed list and return.
1997 * if making free, check to see if we've finished balancing the queues
1998 * yet, if we haven't just continue, else panic
2000 vm_page_unlock_queues();
2002 if (object
!= NULL
) {
2003 vm_object_unlock(object
);
2006 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2009 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
2010 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 5);
2012 vm_page_free_list(local_freeq
, TRUE
);
2014 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
2015 vm_page_free_count
, local_freed
, 0, 5);
2020 vm_page_lock_queues();
2023 force_anonymous
= FALSE
;
2025 if ((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
)
2028 if (!queue_empty(&sq
->age_q
))
2031 panic("vm_pageout: no victim");
2035 force_anonymous
= FALSE
;
2038 * we just found this page on one of our queues...
2039 * it can't also be on the pageout queue, so safe
2040 * to call VM_PAGE_QUEUES_REMOVE
2042 assert(!m
->pageout_queue
);
2044 VM_PAGE_QUEUES_REMOVE(m
);
2046 assert(!m
->laundry
);
2047 assert(!m
->private);
2048 assert(!m
->fictitious
);
2049 assert(m
->object
!= kernel_object
);
2050 assert(m
->phys_page
!= vm_page_guard_addr
);
2053 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
2054 vm_pageout_stats
[vm_pageout_stat_now
].considered
++;
2056 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
2059 * check to see if we currently are working
2060 * with the same object... if so, we've
2061 * already got the lock
2063 if (m
->object
!= object
) {
2065 * the object associated with candidate page is
2066 * different from the one we were just working
2067 * with... dump the lock if we still own it
2069 if (object
!= NULL
) {
2070 vm_object_unlock(object
);
2072 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2075 * Try to lock object; since we've alread got the
2076 * page queues lock, we can only 'try' for this one.
2077 * if the 'try' fails, we need to do a mutex_pause
2078 * to allow the owner of the object lock a chance to
2079 * run... otherwise, we're likely to trip over this
2080 * object in the same state as we work our way through
2081 * the queue... clumps of pages associated with the same
2082 * object are fairly typical on the inactive and active queues
2084 if (!vm_object_lock_try_scan(m
->object
)) {
2085 vm_page_t m_want
= NULL
;
2087 vm_pageout_inactive_nolock
++;
2089 if (page_prev_state
== PAGE_STATE_CLEAN
)
2090 vm_pageout_cleaned_nolock
++;
2092 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2093 page_prev_state
= PAGE_STATE_INACTIVE_FIRST
;
2095 pmap_clear_reference(m
->phys_page
);
2096 m
->reference
= FALSE
;
2099 * m->object must be stable since we hold the page queues lock...
2100 * we can update the scan_collisions field sans the object lock
2101 * since it is a separate field and this is the only spot that does
2102 * a read-modify-write operation and it is never executed concurrently...
2103 * we can asynchronously set this field to 0 when creating a UPL, so it
2104 * is possible for the value to be a bit non-determistic, but that's ok
2105 * since it's only used as a hint
2107 m
->object
->scan_collisions
++;
2109 if ( !queue_empty(&sq
->age_q
) )
2110 m_want
= (vm_page_t
) queue_first(&sq
->age_q
);
2111 else if ( !queue_empty(&vm_page_queue_cleaned
))
2112 m_want
= (vm_page_t
) queue_first(&vm_page_queue_cleaned
);
2113 else if (anons_grabbed
>= ANONS_GRABBED_LIMIT
|| queue_empty(&vm_page_queue_anonymous
))
2114 m_want
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
2115 else if ( !queue_empty(&vm_page_queue_anonymous
))
2116 m_want
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
2119 * this is the next object we're going to be interested in
2120 * try to make sure its available after the mutex_yield
2124 vm_pageout_scan_wants_object
= m_want
->object
;
2127 * force us to dump any collected free pages
2128 * and to pause before moving on
2135 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2143 if (m
->encrypted_cleaning
) {
2146 * if this page has already been picked up as
2147 * part of a page-out cluster, it will be busy
2148 * because it is being encrypted (see
2149 * vm_object_upl_request()). But we still
2150 * want to demote it from "clean-in-place"
2151 * (aka "adjacent") to "clean-and-free" (aka
2152 * "target"), so let's ignore its "busy" bit
2153 * here and proceed to check for "cleaning" a
2154 * little bit below...
2157 * A "busy" page should still be left alone for
2158 * most purposes, so we have to be very careful
2159 * not to process that page too much.
2161 assert(m
->cleaning
);
2162 goto consider_inactive_page
;
2166 * Somebody is already playing with this page.
2167 * Put it back on the appropriate queue
2170 vm_pageout_inactive_busy
++;
2172 if (page_prev_state
== PAGE_STATE_CLEAN
)
2173 vm_pageout_cleaned_busy
++;
2176 switch (page_prev_state
) {
2178 case PAGE_STATE_SPECULATIVE
:
2179 vm_page_speculate(m
, FALSE
);
2182 case PAGE_STATE_ANONYMOUS
:
2183 case PAGE_STATE_CLEAN
:
2184 case PAGE_STATE_INACTIVE
:
2185 VM_PAGE_ENQUEUE_INACTIVE(m
, FALSE
);
2188 case PAGE_STATE_INACTIVE_FIRST
:
2189 VM_PAGE_ENQUEUE_INACTIVE(m
, TRUE
);
2192 goto done_with_inactivepage
;
2197 * If it's absent, in error or the object is no longer alive,
2198 * we can reclaim the page... in the no longer alive case,
2199 * there are 2 states the page can be in that preclude us
2200 * from reclaiming it - busy or cleaning - that we've already
2203 if (m
->absent
|| m
->error
|| !object
->alive
) {
2206 vm_pageout_inactive_absent
++;
2207 else if (!object
->alive
)
2208 vm_pageout_inactive_notalive
++;
2210 vm_pageout_inactive_error
++;
2212 if (vm_pageout_deadlock_target
) {
2213 vm_pageout_scan_inactive_throttle_success
++;
2214 vm_pageout_deadlock_target
--;
2217 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
2219 if (object
->internal
) {
2220 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
2222 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
2224 assert(!m
->cleaning
);
2225 assert(!m
->laundry
);
2230 * remove page from object here since we're already
2231 * behind the object lock... defer the rest of the work
2232 * we'd normally do in vm_page_free_prepare_object
2233 * until 'vm_page_free_list' is called
2236 vm_page_remove(m
, TRUE
);
2238 assert(m
->pageq
.next
== NULL
&&
2239 m
->pageq
.prev
== NULL
);
2240 m
->pageq
.next
= (queue_entry_t
)local_freeq
;
2244 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2245 vm_pageout_freed_from_speculative
++;
2246 else if (page_prev_state
== PAGE_STATE_CLEAN
)
2247 vm_pageout_freed_from_cleaned
++;
2249 vm_pageout_freed_from_inactive_clean
++;
2251 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
2252 vm_pageout_stats
[vm_pageout_stat_now
].reclaimed
++;
2254 inactive_burst_count
= 0;
2255 goto done_with_inactivepage
;
2258 * If the object is empty, the page must be reclaimed even
2260 * If the page belongs to a volatile object, we stick it back
2263 if (object
->copy
== VM_OBJECT_NULL
) {
2264 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
2265 if (m
->pmapped
== TRUE
) {
2266 /* unmap the page */
2267 refmod_state
= pmap_disconnect(m
->phys_page
);
2268 if (refmod_state
& VM_MEM_MODIFIED
) {
2269 SET_PAGE_DIRTY(m
, FALSE
);
2272 if (m
->dirty
|| m
->precious
) {
2273 /* we saved the cost of cleaning this page ! */
2274 vm_page_purged_count
++;
2279 if (COMPRESSED_PAGER_IS_ACTIVE
) {
2281 * With the VM compressor, the cost of
2282 * reclaiming a page is much lower (no I/O),
2283 * so if we find a "volatile" page, it's better
2284 * to let it get compressed rather than letting
2285 * it occupy a full page until it gets purged.
2286 * So no need to check for "volatile" here.
2288 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
2290 * Avoid cleaning a "volatile" page which might
2294 /* if it's wired, we can't put it on our queue */
2295 assert(!VM_PAGE_WIRED(m
));
2297 /* just stick it back on! */
2298 reactivated_this_call
++;
2300 if (page_prev_state
== PAGE_STATE_CLEAN
)
2301 vm_pageout_cleaned_volatile_reactivated
++;
2303 goto reactivate_page
;
2307 consider_inactive_page
:
2311 * A "busy" page should always be left alone, except...
2313 if (m
->cleaning
&& m
->encrypted_cleaning
) {
2316 * We could get here with a "busy" page
2317 * if it's being encrypted during a
2318 * "clean-in-place" operation. We'll deal
2319 * with it right away by testing if it has been
2320 * referenced and either reactivating it or
2321 * promoting it from "clean-in-place" to
2325 panic("\"busy\" page considered for pageout\n");
2330 * If it's being used, reactivate.
2331 * (Fictitious pages are either busy or absent.)
2332 * First, update the reference and dirty bits
2333 * to make sure the page is unreferenced.
2337 if (m
->reference
== FALSE
&& m
->pmapped
== TRUE
) {
2338 refmod_state
= pmap_get_refmod(m
->phys_page
);
2340 if (refmod_state
& VM_MEM_REFERENCED
)
2341 m
->reference
= TRUE
;
2342 if (refmod_state
& VM_MEM_MODIFIED
) {
2343 SET_PAGE_DIRTY(m
, FALSE
);
2348 * if (m->cleaning && !m->pageout)
2349 * If already cleaning this page in place and it hasn't
2350 * been recently referenced, just pull off the queue.
2351 * We can leave the page mapped, and upl_commit_range
2352 * will put it on the clean queue.
2354 * note: if m->encrypted_cleaning == TRUE, then
2355 * m->cleaning == TRUE
2356 * and we'll handle it here
2358 * if (m->pageout && !m->cleaning)
2359 * an msync INVALIDATE is in progress...
2360 * this page has been marked for destruction
2361 * after it has been cleaned,
2362 * but not yet gathered into a UPL
2363 * where 'cleaning' will be set...
2364 * just leave it off the paging queues
2366 * if (m->pageout && m->clenaing)
2367 * an msync INVALIDATE is in progress
2368 * and the UPL has already gathered this page...
2369 * just leave it off the paging queues
2373 * page with m->pageout and still on the queues means that an
2374 * MS_INVALIDATE is in progress on this page... leave it alone
2377 goto done_with_inactivepage
;
2380 /* if cleaning, reactivate if referenced. otherwise, just pull off queue */
2382 if (m
->reference
== TRUE
) {
2383 reactivated_this_call
++;
2384 goto reactivate_page
;
2386 goto done_with_inactivepage
;
2390 if (m
->reference
|| m
->dirty
) {
2391 /* deal with a rogue "reusable" page */
2392 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
);
2397 (m
->xpmapped
&& !object
->internal
&& (vm_page_xpmapped_external_count
< (vm_page_external_count
/ 4))))) {
2399 * The page we pulled off the inactive list has
2400 * been referenced. It is possible for other
2401 * processors to be touching pages faster than we
2402 * can clear the referenced bit and traverse the
2403 * inactive queue, so we limit the number of
2406 if (++reactivated_this_call
>= reactivate_limit
) {
2407 vm_pageout_reactivation_limit_exceeded
++;
2408 } else if (catch_up_count
) {
2409 vm_pageout_catch_ups
++;
2410 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
2411 vm_pageout_inactive_force_reclaim
++;
2415 if (page_prev_state
== PAGE_STATE_CLEAN
)
2416 vm_pageout_cleaned_reference_reactivated
++;
2419 if ( !object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
2420 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
2422 * no explict mappings of this object exist
2423 * and it's not open via the filesystem
2425 vm_page_deactivate(m
);
2426 vm_pageout_inactive_deactivated
++;
2430 * The page was/is being used, so put back on active list.
2432 vm_page_activate(m
);
2433 VM_STAT_INCR(reactivations
);
2434 inactive_burst_count
= 0;
2437 if (page_prev_state
== PAGE_STATE_CLEAN
)
2438 vm_pageout_cleaned_reactivated
++;
2440 vm_pageout_inactive_used
++;
2442 goto done_with_inactivepage
;
2445 * Make sure we call pmap_get_refmod() if it
2446 * wasn't already called just above, to update
2449 if ((refmod_state
== -1) && !m
->dirty
&& m
->pmapped
) {
2450 refmod_state
= pmap_get_refmod(m
->phys_page
);
2451 if (refmod_state
& VM_MEM_MODIFIED
) {
2452 SET_PAGE_DIRTY(m
, FALSE
);
2455 forced_reclaim
= TRUE
;
2457 forced_reclaim
= FALSE
;
2461 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
2462 object
, m
->offset
, m
, 0,0);
2465 * we've got a candidate page to steal...
2467 * m->dirty is up to date courtesy of the
2468 * preceding check for m->reference... if
2469 * we get here, then m->reference had to be
2470 * FALSE (or possibly "reactivate_limit" was
2471 * exceeded), but in either case we called
2472 * pmap_get_refmod() and updated both
2473 * m->reference and m->dirty
2475 * if it's dirty or precious we need to
2476 * see if the target queue is throtttled
2477 * it if is, we need to skip over it by moving it back
2478 * to the end of the inactive queue
2481 inactive_throttled
= FALSE
;
2483 if (m
->dirty
|| m
->precious
) {
2484 if (object
->internal
) {
2485 if (VM_PAGE_Q_THROTTLED(iq
))
2486 inactive_throttled
= TRUE
;
2487 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2488 inactive_throttled
= TRUE
;
2492 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) &&
2493 object
->internal
&& m
->dirty
&&
2494 (object
->purgable
== VM_PURGABLE_DENY
||
2495 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
2496 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
2497 queue_enter(&vm_page_queue_throttled
, m
,
2499 m
->throttled
= TRUE
;
2500 vm_page_throttled_count
++;
2502 vm_pageout_scan_reclaimed_throttled
++;
2504 inactive_burst_count
= 0;
2505 goto done_with_inactivepage
;
2507 if (inactive_throttled
== TRUE
) {
2509 if (object
->internal
== FALSE
) {
2511 * we need to break up the following potential deadlock case...
2512 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2513 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2514 * c) Most of the pages in the inactive queue belong to this file.
2516 * we are potentially in this deadlock because...
2517 * a) the external pageout queue is throttled
2518 * b) we're done with the active queue and moved on to the inactive queue
2519 * c) we've got a dirty external page
2521 * since we don't know the reason for the external pageout queue being throttled we
2522 * must suspect that we are deadlocked, so move the current page onto the active queue
2523 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2525 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2526 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2527 * pool the next time we select a victim page... if we can make enough new free pages,
2528 * the deadlock will break, the external pageout queue will empty and it will no longer
2531 * if we have jestam configured, keep a count of the pages reactivated this way so
2532 * that we can try to find clean pages in the active/inactive queues before
2533 * deciding to jetsam a process
2535 vm_pageout_scan_inactive_throttled_external
++;
2537 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
2539 vm_page_active_count
++;
2540 vm_page_pageable_external_count
++;
2542 vm_pageout_adjust_io_throttles(iq
, eq
, FALSE
);
2544 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2545 vm_pageout_inactive_external_forced_reactivate_limit
--;
2547 if (vm_pageout_inactive_external_forced_reactivate_limit
<= 0) {
2548 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2550 * Possible deadlock scenario so request jetsam action
2553 vm_object_unlock(object
);
2554 object
= VM_OBJECT_NULL
;
2555 vm_page_unlock_queues();
2557 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
2558 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2560 /* Kill first suitable process */
2561 if (memorystatus_kill_on_VM_page_shortage(FALSE
) == FALSE
) {
2562 panic("vm_pageout_scan: Jetsam request failed\n");
2565 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
, 0, 0, 0, 0);
2567 vm_pageout_inactive_external_forced_jetsam_count
++;
2568 vm_page_lock_queues();
2571 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2572 force_anonymous
= TRUE
;
2574 inactive_burst_count
= 0;
2575 goto done_with_inactivepage
;
2577 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2578 page_prev_state
= PAGE_STATE_INACTIVE
;
2580 vm_pageout_scan_inactive_throttled_internal
++;
2587 * we've got a page that we can steal...
2588 * eliminate all mappings and make sure
2589 * we have the up-to-date modified state
2591 * if we need to do a pmap_disconnect then we
2592 * need to re-evaluate m->dirty since the pmap_disconnect
2593 * provides the true state atomically... the
2594 * page was still mapped up to the pmap_disconnect
2595 * and may have been dirtied at the last microsecond
2597 * Note that if 'pmapped' is FALSE then the page is not
2598 * and has not been in any map, so there is no point calling
2599 * pmap_disconnect(). m->dirty could have been set in anticipation
2600 * of likely usage of the page.
2602 if (m
->pmapped
== TRUE
) {
2604 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
|| object
->internal
== FALSE
) {
2606 * Don't count this page as going into the compressor if any of these are true:
2607 * 1) We have the dynamic pager i.e. no compressed pager
2608 * 2) Freezer enabled device with a freezer file to hold the app data i.e. no compressed pager
2609 * 3) Freezer enabled device with compressed pager backend (exclusive use) i.e. most of the VM system
2610 (including vm_pageout_scan) has no knowledge of the compressor
2611 * 4) This page belongs to a file and hence will not be sent into the compressor
2614 refmod_state
= pmap_disconnect_options(m
->phys_page
, 0, NULL
);
2616 refmod_state
= pmap_disconnect_options(m
->phys_page
, PMAP_OPTIONS_COMPRESSOR
, NULL
);
2619 if (refmod_state
& VM_MEM_MODIFIED
) {
2620 SET_PAGE_DIRTY(m
, FALSE
);
2624 * reset our count of pages that have been reclaimed
2625 * since the last page was 'stolen'
2627 inactive_reclaim_run
= 0;
2630 * If it's clean and not precious, we can free the page.
2632 if (!m
->dirty
&& !m
->precious
) {
2634 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2635 vm_pageout_speculative_clean
++;
2637 if (page_prev_state
== PAGE_STATE_ANONYMOUS
)
2638 vm_pageout_inactive_anonymous
++;
2639 else if (page_prev_state
== PAGE_STATE_CLEAN
)
2640 vm_pageout_cleaned_reclaimed
++;
2642 vm_pageout_inactive_clean
++;
2646 * OK, at this point we have found a page we are going to free.
2648 #if CONFIG_PHANTOM_CACHE
2649 if (!object
->internal
)
2650 vm_phantom_cache_add_ghost(m
);
2656 * The page may have been dirtied since the last check
2657 * for a throttled target queue (which may have been skipped
2658 * if the page was clean then). With the dirty page
2659 * disconnected here, we can make one final check.
2661 if (object
->internal
) {
2662 if (VM_PAGE_Q_THROTTLED(iq
))
2663 inactive_throttled
= TRUE
;
2664 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2665 inactive_throttled
= TRUE
;
2668 if (inactive_throttled
== TRUE
)
2669 goto throttle_inactive
;
2671 #if VM_PRESSURE_EVENTS
2675 * If Jetsam is enabled, then the sending
2676 * of memory pressure notifications is handled
2677 * from the same thread that takes care of high-water
2678 * and other jetsams i.e. the memorystatus_thread.
2681 #else /* CONFIG_JETSAM */
2683 vm_pressure_response();
2685 #endif /* CONFIG_JETSAM */
2686 #endif /* VM_PRESSURE_EVENTS */
2689 * do NOT set the pageout bit!
2690 * sure, we might need free pages, but this page is going to take time to become free
2691 * anyway, so we may as well put it on the clean queue first and take it from there later
2692 * if necessary. that way, we'll ensure we don't free up too much. -mj
2694 vm_pageout_cluster(m
, FALSE
);
2696 if (page_prev_state
== PAGE_STATE_ANONYMOUS
)
2697 vm_pageout_inactive_anonymous
++;
2698 if (object
->internal
)
2699 vm_pageout_inactive_dirty_internal
++;
2701 vm_pageout_inactive_dirty_external
++;
2704 done_with_inactivepage
:
2706 if (delayed_unlock
++ > delayed_unlock_limit
|| try_failed
== TRUE
) {
2707 boolean_t need_delay
= TRUE
;
2709 if (object
!= NULL
) {
2710 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2711 vm_object_unlock(object
);
2714 vm_page_unlock_queues();
2718 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
2719 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 4);
2721 vm_page_free_list(local_freeq
, TRUE
);
2723 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
2724 vm_page_free_count
, local_freed
, 0, 4);
2730 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2731 vm_consider_waking_compactor_swapper();
2734 vm_page_lock_queues();
2736 if (need_delay
== TRUE
)
2737 lck_mtx_yield(&vm_page_queue_lock
);
2741 vm_pageout_considered_page
++;
2744 * back to top of pageout scan loop
2750 int vm_page_free_count_init
;
2753 vm_page_free_reserve(
2756 int free_after_reserve
;
2758 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2760 if ((vm_page_free_reserved
+ pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
) >= (VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
))
2761 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
;
2763 vm_page_free_reserved
+= (pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
);
2766 if ((vm_page_free_reserved
+ pages
) >= VM_PAGE_FREE_RESERVED_LIMIT
)
2767 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
2769 vm_page_free_reserved
+= pages
;
2771 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
2773 vm_page_free_min
= vm_page_free_reserved
+
2774 VM_PAGE_FREE_MIN(free_after_reserve
);
2776 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
)
2777 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
2779 vm_page_free_target
= vm_page_free_reserved
+
2780 VM_PAGE_FREE_TARGET(free_after_reserve
);
2782 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
)
2783 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
2785 if (vm_page_free_target
< vm_page_free_min
+ 5)
2786 vm_page_free_target
= vm_page_free_min
+ 5;
2788 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 3);
2792 * vm_pageout is the high level pageout daemon.
2796 vm_pageout_continue(void)
2798 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
2799 vm_pageout_scan_event_counter
++;
2803 * we hold both the vm_page_queue_free_lock
2804 * and the vm_page_queues_lock at this point
2806 assert(vm_page_free_wanted
== 0);
2807 assert(vm_page_free_wanted_privileged
== 0);
2808 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
2810 lck_mtx_unlock(&vm_page_queue_free_lock
);
2811 vm_page_unlock_queues();
2813 counter(c_vm_pageout_block
++);
2814 thread_block((thread_continue_t
)vm_pageout_continue
);
2819 #ifdef FAKE_DEADLOCK
2821 #define FAKE_COUNT 5000
2823 int internal_count
= 0;
2824 int fake_deadlock
= 0;
2829 vm_pageout_iothread_continue(struct vm_pageout_queue
*q
)
2833 vm_object_offset_t offset
;
2834 memory_object_t pager
;
2835 thread_t self
= current_thread();
2837 if ((vm_pageout_internal_iothread
!= THREAD_NULL
)
2838 && (self
== vm_pageout_external_iothread
)
2839 && (self
->options
& TH_OPT_VMPRIV
))
2840 self
->options
&= ~TH_OPT_VMPRIV
;
2842 vm_page_lockspin_queues();
2844 while ( !queue_empty(&q
->pgo_pending
) ) {
2847 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
2848 if (m
->object
->object_slid
) {
2849 panic("slid page %p not allowed on this path\n", m
);
2852 m
->pageout_queue
= FALSE
;
2853 m
->pageq
.next
= NULL
;
2854 m
->pageq
.prev
= NULL
;
2857 * grab a snapshot of the object and offset this
2858 * page is tabled in so that we can relookup this
2859 * page after we've taken the object lock - these
2860 * fields are stable while we hold the page queues lock
2861 * but as soon as we drop it, there is nothing to keep
2862 * this page in this object... we hold an activity_in_progress
2863 * on this object which will keep it from terminating
2868 vm_page_unlock_queues();
2870 #ifdef FAKE_DEADLOCK
2871 if (q
== &vm_pageout_queue_internal
) {
2877 if ((internal_count
== FAKE_COUNT
)) {
2879 pg_count
= vm_page_free_count
+ vm_page_free_reserved
;
2881 if (kmem_alloc(kernel_map
, &addr
, PAGE_SIZE
* pg_count
) == KERN_SUCCESS
) {
2882 kmem_free(kernel_map
, addr
, PAGE_SIZE
* pg_count
);
2889 vm_object_lock(object
);
2891 m
= vm_page_lookup(object
, offset
);
2894 m
->busy
|| m
->cleaning
|| m
->pageout_queue
|| !m
->laundry
) {
2896 * it's either the same page that someone else has
2897 * started cleaning (or it's finished cleaning or
2898 * been put back on the pageout queue), or
2899 * the page has been freed or we have found a
2900 * new page at this offset... in all of these cases
2901 * we merely need to release the activity_in_progress
2902 * we took when we put the page on the pageout queue
2904 vm_object_activity_end(object
);
2905 vm_object_unlock(object
);
2907 vm_page_lockspin_queues();
2910 if (!object
->pager_initialized
) {
2913 * If there is no memory object for the page, create
2914 * one and hand it to the default pager.
2917 if (!object
->pager_initialized
)
2918 vm_object_collapse(object
,
2919 (vm_object_offset_t
) 0,
2921 if (!object
->pager_initialized
)
2922 vm_object_pager_create(object
);
2923 if (!object
->pager_initialized
) {
2925 * Still no pager for the object.
2926 * Reactivate the page.
2928 * Should only happen if there is no
2933 vm_page_lockspin_queues();
2935 vm_pageout_throttle_up(m
);
2936 vm_page_activate(m
);
2937 vm_pageout_dirty_no_pager
++;
2939 vm_page_unlock_queues();
2942 * And we are done with it.
2944 vm_object_activity_end(object
);
2945 vm_object_unlock(object
);
2947 vm_page_lockspin_queues();
2951 pager
= object
->pager
;
2953 if (pager
== MEMORY_OBJECT_NULL
) {
2955 * This pager has been destroyed by either
2956 * memory_object_destroy or vm_object_destroy, and
2957 * so there is nowhere for the page to go.
2961 * Just free the page... VM_PAGE_FREE takes
2962 * care of cleaning up all the state...
2963 * including doing the vm_pageout_throttle_up
2967 vm_page_lockspin_queues();
2969 vm_pageout_throttle_up(m
);
2970 vm_page_activate(m
);
2972 vm_page_unlock_queues();
2975 * And we are done with it.
2978 vm_object_activity_end(object
);
2979 vm_object_unlock(object
);
2981 vm_page_lockspin_queues();
2986 * we don't hold the page queue lock
2987 * so this check isn't safe to make
2992 * give back the activity_in_progress reference we
2993 * took when we queued up this page and replace it
2994 * it with a paging_in_progress reference that will
2995 * also hold the paging offset from changing and
2996 * prevent the object from terminating
2998 vm_object_activity_end(object
);
2999 vm_object_paging_begin(object
);
3000 vm_object_unlock(object
);
3003 * Send the data to the pager.
3004 * any pageout clustering happens there
3006 memory_object_data_return(pager
,
3007 m
->offset
+ object
->paging_offset
,
3015 vm_object_lock(object
);
3016 vm_object_paging_end(object
);
3017 vm_object_unlock(object
);
3019 vm_pageout_io_throttle();
3021 vm_page_lockspin_queues();
3023 q
->pgo_busy
= FALSE
;
3026 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3027 vm_page_unlock_queues();
3029 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_continue
, (void *) q
);
3035 vm_pageout_iothread_external_continue(struct vm_pageout_queue
*q
)
3039 vm_object_offset_t offset
;
3040 memory_object_t pager
;
3043 if (vm_pageout_internal_iothread
!= THREAD_NULL
)
3044 current_thread()->options
&= ~TH_OPT_VMPRIV
;
3046 vm_page_lockspin_queues();
3048 while ( !queue_empty(&q
->pgo_pending
) ) {
3051 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
3052 if (m
->object
->object_slid
) {
3053 panic("slid page %p not allowed on this path\n", m
);
3056 m
->pageout_queue
= FALSE
;
3057 m
->pageq
.next
= NULL
;
3058 m
->pageq
.prev
= NULL
;
3061 * grab a snapshot of the object and offset this
3062 * page is tabled in so that we can relookup this
3063 * page after we've taken the object lock - these
3064 * fields are stable while we hold the page queues lock
3065 * but as soon as we drop it, there is nothing to keep
3066 * this page in this object... we hold an activity_in_progress
3067 * on this object which will keep it from terminating
3072 vm_page_unlock_queues();
3074 vm_object_lock(object
);
3076 m
= vm_page_lookup(object
, offset
);
3079 m
->busy
|| m
->cleaning
|| m
->pageout_queue
|| !m
->laundry
) {
3081 * it's either the same page that someone else has
3082 * started cleaning (or it's finished cleaning or
3083 * been put back on the pageout queue), or
3084 * the page has been freed or we have found a
3085 * new page at this offset... in all of these cases
3086 * we merely need to release the activity_in_progress
3087 * we took when we put the page on the pageout queue
3089 vm_object_activity_end(object
);
3090 vm_object_unlock(object
);
3092 vm_page_lockspin_queues();
3095 pager
= object
->pager
;
3097 if (pager
== MEMORY_OBJECT_NULL
) {
3099 * This pager has been destroyed by either
3100 * memory_object_destroy or vm_object_destroy, and
3101 * so there is nowhere for the page to go.
3105 * Just free the page... VM_PAGE_FREE takes
3106 * care of cleaning up all the state...
3107 * including doing the vm_pageout_throttle_up
3111 vm_page_lockspin_queues();
3113 vm_pageout_throttle_up(m
);
3114 vm_page_activate(m
);
3116 vm_page_unlock_queues();
3119 * And we are done with it.
3122 vm_object_activity_end(object
);
3123 vm_object_unlock(object
);
3125 vm_page_lockspin_queues();
3130 * we don't hold the page queue lock
3131 * so this check isn't safe to make
3136 * give back the activity_in_progress reference we
3137 * took when we queued up this page and replace it
3138 * it with a paging_in_progress reference that will
3139 * also hold the paging offset from changing and
3140 * prevent the object from terminating
3142 vm_object_activity_end(object
);
3143 vm_object_paging_begin(object
);
3144 vm_object_unlock(object
);
3147 * Send the data to the pager.
3148 * any pageout clustering happens there
3150 memory_object_data_return(pager
,
3151 m
->offset
+ object
->paging_offset
,
3159 vm_object_lock(object
);
3160 vm_object_paging_end(object
);
3161 vm_object_unlock(object
);
3163 vm_pageout_io_throttle();
3165 vm_page_lockspin_queues();
3167 q
->pgo_busy
= FALSE
;
3170 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3171 vm_page_unlock_queues();
3173 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_external_continue
, (void *) q
);
3178 uint32_t vm_compressor_failed
;
3181 vm_pageout_iothread_internal_continue(struct cq
*cq
)
3183 struct vm_pageout_queue
*q
;
3186 memory_object_t pager
;
3187 boolean_t pgo_draining
;
3190 vm_page_t local_freeq
= NULL
;
3191 int local_freed
= 0;
3192 int local_batch_size
;
3193 kern_return_t retval
;
3194 int compressed_count_delta
;
3197 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3200 local_batch_size
= q
->pgo_maxlaundry
/ (vm_compressor_thread_count
* 4);
3207 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3209 vm_page_lock_queues();
3211 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3213 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3215 while ( !queue_empty(&q
->pgo_pending
) && local_cnt
< local_batch_size
) {
3217 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
3221 m
->pageout_queue
= FALSE
;
3222 m
->pageq
.prev
= NULL
;
3224 m
->pageq
.next
= (queue_entry_t
)local_q
;
3228 if (local_q
== NULL
)
3233 if ((pgo_draining
= q
->pgo_draining
) == FALSE
)
3234 vm_pageout_throttle_up_batch(q
, local_cnt
);
3236 vm_page_unlock_queues();
3238 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3243 local_q
= (vm_page_t
)m
->pageq
.next
;
3244 m
->pageq
.next
= NULL
;
3246 if (m
->object
->object_slid
) {
3247 panic("slid page %p not allowed on this path\n", m
);
3251 pager
= object
->pager
;
3253 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
3255 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START
, object
, pager
, 0, 0, 0);
3257 vm_object_lock(object
);
3260 * If there is no memory object for the page, create
3261 * one and hand it to the compression pager.
3264 if (!object
->pager_initialized
)
3265 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
3266 if (!object
->pager_initialized
)
3267 vm_object_compressor_pager_create(object
);
3269 if (!object
->pager_initialized
) {
3271 * Still no pager for the object.
3272 * Reactivate the page.
3274 * Should only happen if there is no
3279 PAGE_WAKEUP_DONE(m
);
3281 vm_page_lockspin_queues();
3282 vm_page_activate(m
);
3283 vm_pageout_dirty_no_pager
++;
3284 vm_page_unlock_queues();
3287 * And we are done with it.
3289 vm_object_activity_end(object
);
3290 vm_object_unlock(object
);
3294 pager
= object
->pager
;
3296 if (pager
== MEMORY_OBJECT_NULL
) {
3298 * This pager has been destroyed by either
3299 * memory_object_destroy or vm_object_destroy, and
3300 * so there is nowhere for the page to go.
3304 * Just free the page... VM_PAGE_FREE takes
3305 * care of cleaning up all the state...
3306 * including doing the vm_pageout_throttle_up
3311 PAGE_WAKEUP_DONE(m
);
3313 vm_page_lockspin_queues();
3314 vm_page_activate(m
);
3315 vm_page_unlock_queues();
3318 * And we are done with it.
3321 vm_object_activity_end(object
);
3322 vm_object_unlock(object
);
3326 vm_object_unlock(object
);
3328 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END
, object
, pager
, 0, 0, 0);
3330 while (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3331 kern_return_t wait_result
;
3332 int need_wakeup
= 0;
3335 vm_page_free_list(local_freeq
, TRUE
);
3342 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3344 if (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3346 if (vm_page_free_wanted_privileged
++ == 0)
3348 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, THREAD_UNINT
);
3350 lck_mtx_unlock(&vm_page_queue_free_lock
);
3353 thread_wakeup((event_t
)&vm_page_free_wanted
);
3355 if (wait_result
== THREAD_WAITING
)
3356 thread_block(THREAD_CONTINUE_NULL
);
3358 lck_mtx_unlock(&vm_page_queue_free_lock
);
3361 assert(object
->activity_in_progress
> 0);
3363 retval
= vm_compressor_pager_put(
3365 m
->offset
+ object
->paging_offset
,
3369 &compressed_count_delta
);
3371 vm_object_lock(object
);
3372 assert(object
->activity_in_progress
> 0);
3374 assert(m
->object
== object
);
3376 vm_compressor_pager_count(pager
,
3377 compressed_count_delta
,
3378 FALSE
, /* shared_lock */
3384 if (retval
== KERN_SUCCESS
) {
3386 * If the object is purgeable, its owner's
3387 * purgeable ledgers will be updated in
3388 * vm_page_remove() but the page still
3389 * contributes to the owner's memory footprint,
3390 * so account for it as such.
3392 if (object
->purgable
!= VM_PURGABLE_DENY
&&
3393 object
->vo_purgeable_owner
!= NULL
) {
3394 /* one more compressed purgeable page */
3395 vm_purgeable_compressed_update(object
,
3399 vm_page_compressions_failing
= FALSE
;
3401 VM_STAT_INCR(compressions
);
3404 vm_page_remove(m
, TRUE
);
3405 vm_object_activity_end(object
);
3406 vm_object_unlock(object
);
3408 m
->pageq
.next
= (queue_entry_t
)local_freeq
;
3413 PAGE_WAKEUP_DONE(m
);
3415 vm_page_lockspin_queues();
3417 vm_page_activate(m
);
3418 vm_compressor_failed
++;
3420 vm_page_compressions_failing
= TRUE
;
3422 vm_page_unlock_queues();
3424 vm_object_activity_end(object
);
3425 vm_object_unlock(object
);
3429 vm_page_free_list(local_freeq
, TRUE
);
3434 if (pgo_draining
== TRUE
) {
3435 vm_page_lockspin_queues();
3436 vm_pageout_throttle_up_batch(q
, local_cnt
);
3437 vm_page_unlock_queues();
3440 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3443 * queue lock is held and our q is empty
3445 q
->pgo_busy
= FALSE
;
3448 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3449 vm_page_unlock_queues();
3451 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3453 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_internal_continue
, (void *) cq
);
3460 vm_pageout_adjust_io_throttles(struct vm_pageout_queue
*iq
, struct vm_pageout_queue
*eq
, boolean_t req_lowpriority
)
3463 boolean_t set_iq
= FALSE
;
3464 boolean_t set_eq
= FALSE
;
3466 if (hibernate_cleaning_in_progress
== TRUE
)
3467 req_lowpriority
= FALSE
;
3469 if ((DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
) && iq
->pgo_inited
== TRUE
&& iq
->pgo_lowpriority
!= req_lowpriority
)
3472 if (eq
->pgo_inited
== TRUE
&& eq
->pgo_lowpriority
!= req_lowpriority
)
3475 if (set_iq
== TRUE
|| set_eq
== TRUE
) {
3477 vm_page_unlock_queues();
3479 if (req_lowpriority
== TRUE
) {
3480 policy
= THROTTLE_LEVEL_PAGEOUT_THROTTLED
;
3481 DTRACE_VM(laundrythrottle
);
3483 policy
= THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED
;
3484 DTRACE_VM(laundryunthrottle
);
3486 if (set_iq
== TRUE
) {
3487 proc_set_task_policy_thread(kernel_task
, iq
->pgo_tid
, TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
3489 iq
->pgo_lowpriority
= req_lowpriority
;
3491 if (set_eq
== TRUE
) {
3492 proc_set_task_policy_thread(kernel_task
, eq
->pgo_tid
, TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
3494 eq
->pgo_lowpriority
= req_lowpriority
;
3496 vm_page_lock_queues();
3502 vm_pageout_iothread_external(void)
3504 thread_t self
= current_thread();
3506 self
->options
|= TH_OPT_VMPRIV
;
3508 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
3510 proc_set_task_policy_thread(kernel_task
, self
->thread_id
, TASK_POLICY_EXTERNAL
,
3511 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
3513 vm_page_lock_queues();
3515 vm_pageout_queue_external
.pgo_tid
= self
->thread_id
;
3516 vm_pageout_queue_external
.pgo_lowpriority
= TRUE
;
3517 vm_pageout_queue_external
.pgo_inited
= TRUE
;
3519 vm_page_unlock_queues();
3521 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
3522 vm_pageout_iothread_external_continue(&vm_pageout_queue_external
);
3524 vm_pageout_iothread_continue(&vm_pageout_queue_external
);
3531 vm_pageout_iothread_internal(struct cq
*cq
)
3533 thread_t self
= current_thread();
3535 self
->options
|= TH_OPT_VMPRIV
;
3537 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
) {
3538 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
3540 proc_set_task_policy_thread(kernel_task
, self
->thread_id
, TASK_POLICY_EXTERNAL
,
3541 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
3543 vm_page_lock_queues();
3545 vm_pageout_queue_internal
.pgo_tid
= self
->thread_id
;
3546 vm_pageout_queue_internal
.pgo_lowpriority
= TRUE
;
3547 vm_pageout_queue_internal
.pgo_inited
= TRUE
;
3549 vm_page_unlock_queues();
3551 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
3552 cq
->q
= &vm_pageout_queue_internal
;
3553 cq
->current_chead
= NULL
;
3554 cq
->scratch_buf
= kalloc(COMPRESSOR_SCRATCH_BUF_SIZE
);
3556 vm_pageout_iothread_internal_continue(cq
);
3558 vm_pageout_iothread_continue(&vm_pageout_queue_internal
);
3564 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
3566 if (OSCompareAndSwapPtr(NULL
, func
, (void * volatile *) &consider_buffer_cache_collect
)) {
3567 return KERN_SUCCESS
;
3569 return KERN_FAILURE
; /* Already set */
3573 extern boolean_t memorystatus_manual_testing_on
;
3574 extern unsigned int memorystatus_level
;
3577 #if VM_PRESSURE_EVENTS
3579 boolean_t vm_pressure_events_enabled
= FALSE
;
3582 vm_pressure_response(void)
3585 vm_pressure_level_t old_level
= kVMPressureNormal
;
3588 uint64_t available_memory
= 0;
3590 if (vm_pressure_events_enabled
== FALSE
)
3594 available_memory
= (((uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
) * 100);
3597 memorystatus_level
= (unsigned int) (available_memory
/ atop_64(max_mem
));
3599 if (memorystatus_manual_testing_on
) {
3603 old_level
= memorystatus_vm_pressure_level
;
3605 switch (memorystatus_vm_pressure_level
) {
3607 case kVMPressureNormal
:
3609 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3610 new_level
= kVMPressureCritical
;
3611 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
3612 new_level
= kVMPressureWarning
;
3617 case kVMPressureWarning
:
3618 case kVMPressureUrgent
:
3620 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3621 new_level
= kVMPressureNormal
;
3622 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3623 new_level
= kVMPressureCritical
;
3628 case kVMPressureCritical
:
3630 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3631 new_level
= kVMPressureNormal
;
3632 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
3633 new_level
= kVMPressureWarning
;
3642 if (new_level
!= -1) {
3643 memorystatus_vm_pressure_level
= (vm_pressure_level_t
) new_level
;
3645 if ((memorystatus_vm_pressure_level
!= kVMPressureNormal
) || (old_level
!= new_level
)) {
3646 if (vm_pressure_thread_running
== FALSE
) {
3647 thread_wakeup(&vm_pressure_thread
);
3650 if (old_level
!= new_level
) {
3651 thread_wakeup(&vm_pressure_changed
);
3657 #endif /* VM_PRESSURE_EVENTS */
3660 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure
, __unused
unsigned int *pressure_level
) {
3662 #if !VM_PRESSURE_EVENTS
3664 return KERN_FAILURE
;
3666 #else /* VM_PRESSURE_EVENTS */
3668 kern_return_t kr
= KERN_SUCCESS
;
3670 if (pressure_level
!= NULL
) {
3672 vm_pressure_level_t old_level
= memorystatus_vm_pressure_level
;
3674 if (wait_for_pressure
== TRUE
) {
3675 wait_result_t wr
= 0;
3677 while (old_level
== *pressure_level
) {
3678 wr
= assert_wait((event_t
) &vm_pressure_changed
,
3679 THREAD_INTERRUPTIBLE
);
3680 if (wr
== THREAD_WAITING
) {
3681 wr
= thread_block(THREAD_CONTINUE_NULL
);
3683 if (wr
== THREAD_INTERRUPTED
) {
3684 return KERN_ABORTED
;
3686 if (wr
== THREAD_AWAKENED
) {
3688 old_level
= memorystatus_vm_pressure_level
;
3690 if (old_level
!= *pressure_level
) {
3697 *pressure_level
= old_level
;
3700 kr
= KERN_INVALID_ARGUMENT
;
3704 #endif /* VM_PRESSURE_EVENTS */
3707 #if VM_PRESSURE_EVENTS
3709 vm_pressure_thread(void) {
3710 static boolean_t thread_initialized
= FALSE
;
3712 if (thread_initialized
== TRUE
) {
3713 vm_pressure_thread_running
= TRUE
;
3714 consider_vm_pressure_events();
3715 vm_pressure_thread_running
= FALSE
;
3718 thread_initialized
= TRUE
;
3719 assert_wait((event_t
) &vm_pressure_thread
, THREAD_UNINT
);
3720 thread_block((thread_continue_t
)vm_pressure_thread
);
3722 #endif /* VM_PRESSURE_EVENTS */
3725 uint32_t vm_pageout_considered_page_last
= 0;
3728 * called once per-second via "compute_averages"
3731 compute_pageout_gc_throttle()
3733 if (vm_pageout_considered_page
!= vm_pageout_considered_page_last
) {
3735 vm_pageout_considered_page_last
= vm_pageout_considered_page
;
3737 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
3743 vm_pageout_garbage_collect(int collect
)
3747 boolean_t buf_large_zfree
= FALSE
;
3748 boolean_t first_try
= TRUE
;
3752 consider_machine_collect();
3756 if (consider_buffer_cache_collect
!= NULL
) {
3757 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
3759 if (first_try
== TRUE
|| buf_large_zfree
== TRUE
) {
3761 * consider_zone_gc should be last, because the other operations
3762 * might return memory to zones.
3764 consider_zone_gc(buf_large_zfree
);
3768 } while (buf_large_zfree
== TRUE
&& vm_page_free_count
< vm_page_free_target
);
3770 consider_machine_adjust();
3772 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
3774 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
3779 void vm_pageout_reinit_tuneables(void);
3782 vm_pageout_reinit_tuneables(void)
3784 vm_compressor_minorcompact_threshold_divisor
= 18;
3785 vm_compressor_majorcompact_threshold_divisor
= 22;
3786 vm_compressor_unthrottle_threshold_divisor
= 32;
3790 #if VM_PAGE_BUCKETS_CHECK
3791 #if VM_PAGE_FAKE_BUCKETS
3792 extern vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
3793 #endif /* VM_PAGE_FAKE_BUCKETS */
3794 #endif /* VM_PAGE_BUCKETS_CHECK */
3796 #define FBDP_TEST_COLLAPSE_COMPRESSOR 0
3797 #if FBDP_TEST_COLLAPSE_COMPRESSOR
3798 extern boolean_t vm_object_collapse_compressor_allowed
;
3799 #include <IOKit/IOLib.h>
3800 #endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
3802 #define FBDP_TEST_WIRE_AND_EXTRACT 0
3803 #if FBDP_TEST_WIRE_AND_EXTRACT
3804 extern ledger_template_t task_ledger_template
;
3805 #include <mach/mach_vm.h>
3806 extern ppnum_t
vm_map_get_phys_page(vm_map_t map
,
3807 vm_offset_t offset
);
3808 #endif /* FBDP_TEST_WIRE_AND_EXTRACT */
3813 thread_t self
= current_thread();
3815 kern_return_t result
;
3819 * Set thread privileges.
3823 self
->priority
= BASEPRI_PREEMPT
- 1;
3824 set_sched_pri(self
, self
->priority
);
3825 thread_unlock(self
);
3827 if (!self
->reserved_stack
)
3828 self
->reserved_stack
= self
->kernel_stack
;
3833 * Initialize some paging parameters.
3836 if (vm_pageout_swap_wait
== 0)
3837 vm_pageout_swap_wait
= VM_PAGEOUT_SWAP_WAIT
;
3839 if (vm_pageout_idle_wait
== 0)
3840 vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
3842 if (vm_pageout_burst_wait
== 0)
3843 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
3845 if (vm_pageout_empty_wait
== 0)
3846 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
3848 if (vm_pageout_deadlock_wait
== 0)
3849 vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
3851 if (vm_pageout_deadlock_relief
== 0)
3852 vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
3854 if (vm_pageout_inactive_relief
== 0)
3855 vm_pageout_inactive_relief
= VM_PAGEOUT_INACTIVE_RELIEF
;
3857 if (vm_pageout_burst_active_throttle
== 0)
3858 vm_pageout_burst_active_throttle
= VM_PAGEOUT_BURST_ACTIVE_THROTTLE
;
3860 if (vm_pageout_burst_inactive_throttle
== 0)
3861 vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
3864 * Set kernel task to low backing store privileged
3867 task_lock(kernel_task
);
3868 kernel_task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
3869 task_unlock(kernel_task
);
3871 vm_page_free_count_init
= vm_page_free_count
;
3874 * even if we've already called vm_page_free_reserve
3875 * call it again here to insure that the targets are
3876 * accurately calculated (it uses vm_page_free_count_init)
3877 * calling it with an arg of 0 will not change the reserve
3878 * but will re-calculate free_min and free_target
3880 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
3881 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
3883 vm_page_free_reserve(0);
3886 queue_init(&vm_pageout_queue_external
.pgo_pending
);
3887 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
3888 vm_pageout_queue_external
.pgo_laundry
= 0;
3889 vm_pageout_queue_external
.pgo_idle
= FALSE
;
3890 vm_pageout_queue_external
.pgo_busy
= FALSE
;
3891 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
3892 vm_pageout_queue_external
.pgo_draining
= FALSE
;
3893 vm_pageout_queue_external
.pgo_lowpriority
= FALSE
;
3894 vm_pageout_queue_external
.pgo_tid
= -1;
3895 vm_pageout_queue_external
.pgo_inited
= FALSE
;
3898 queue_init(&vm_pageout_queue_internal
.pgo_pending
);
3899 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
3900 vm_pageout_queue_internal
.pgo_laundry
= 0;
3901 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
3902 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
3903 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
3904 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
3905 vm_pageout_queue_internal
.pgo_lowpriority
= FALSE
;
3906 vm_pageout_queue_internal
.pgo_tid
= -1;
3907 vm_pageout_queue_internal
.pgo_inited
= FALSE
;
3909 /* internal pageout thread started when default pager registered first time */
3910 /* external pageout and garbage collection threads started here */
3912 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
3913 BASEPRI_PREEMPT
- 1,
3914 &vm_pageout_external_iothread
);
3915 if (result
!= KERN_SUCCESS
)
3916 panic("vm_pageout_iothread_external: create failed");
3918 thread_deallocate(vm_pageout_external_iothread
);
3920 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
3923 if (result
!= KERN_SUCCESS
)
3924 panic("vm_pageout_garbage_collect: create failed");
3926 thread_deallocate(thread
);
3928 #if VM_PRESSURE_EVENTS
3929 result
= kernel_thread_start_priority((thread_continue_t
)vm_pressure_thread
, NULL
,
3933 if (result
!= KERN_SUCCESS
)
3934 panic("vm_pressure_thread: create failed");
3936 thread_deallocate(thread
);
3939 vm_object_reaper_init();
3941 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
3942 vm_compressor_pager_init();
3944 #if VM_PRESSURE_EVENTS
3945 vm_pressure_events_enabled
= TRUE
;
3946 #endif /* VM_PRESSURE_EVENTS */
3948 #if CONFIG_PHANTOM_CACHE
3949 vm_phantom_cache_init();
3951 #if VM_PAGE_BUCKETS_CHECK
3952 #if VM_PAGE_FAKE_BUCKETS
3953 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
3954 (uint64_t) vm_page_fake_buckets_start
,
3955 (uint64_t) vm_page_fake_buckets_end
);
3956 pmap_protect(kernel_pmap
,
3957 vm_page_fake_buckets_start
,
3958 vm_page_fake_buckets_end
,
3960 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
3961 #endif /* VM_PAGE_FAKE_BUCKETS */
3962 #endif /* VM_PAGE_BUCKETS_CHECK */
3964 #if VM_OBJECT_TRACKING
3965 vm_object_tracking_init();
3966 #endif /* VM_OBJECT_TRACKING */
3969 #if FBDP_TEST_COLLAPSE_COMPRESSOR
3970 vm_object_size_t backing_size
, top_size
;
3971 vm_object_t backing_object
, top_object
;
3972 vm_map_offset_t backing_offset
, top_offset
;
3973 unsigned char *backing_address
, *top_address
;
3976 printf("FBDP_TEST_COLLAPSE_COMPRESSOR:\n");
3978 /* create backing object */
3979 backing_size
= 15 * PAGE_SIZE
;
3980 backing_object
= vm_object_allocate(backing_size
);
3981 assert(backing_object
!= VM_OBJECT_NULL
);
3982 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
3984 /* map backing object */
3986 kr
= vm_map_enter(kernel_map
, &backing_offset
, backing_size
, 0,
3987 VM_FLAGS_ANYWHERE
, backing_object
, 0, FALSE
,
3988 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
3989 assert(kr
== KERN_SUCCESS
);
3990 backing_address
= (unsigned char *) backing_offset
;
3991 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
3992 "mapped backing object %p at 0x%llx\n",
3993 backing_object
, (uint64_t) backing_offset
);
3994 /* populate with pages to be compressed in backing object */
3995 backing_address
[0x1*PAGE_SIZE
] = 0xB1;
3996 backing_address
[0x4*PAGE_SIZE
] = 0xB4;
3997 backing_address
[0x7*PAGE_SIZE
] = 0xB7;
3998 backing_address
[0xa*PAGE_SIZE
] = 0xBA;
3999 backing_address
[0xd*PAGE_SIZE
] = 0xBD;
4000 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4001 "populated pages to be compressed in "
4002 "backing_object %p\n", backing_object
);
4003 /* compress backing object */
4004 vm_object_pageout(backing_object
);
4005 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
4007 /* wait for all the pages to be gone */
4008 while (*(volatile int *)&backing_object
->resident_page_count
!= 0)
4010 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
4012 /* populate with pages to be resident in backing object */
4013 backing_address
[0x0*PAGE_SIZE
] = 0xB0;
4014 backing_address
[0x3*PAGE_SIZE
] = 0xB3;
4015 backing_address
[0x6*PAGE_SIZE
] = 0xB6;
4016 backing_address
[0x9*PAGE_SIZE
] = 0xB9;
4017 backing_address
[0xc*PAGE_SIZE
] = 0xBC;
4018 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4019 "populated pages to be resident in "
4020 "backing_object %p\n", backing_object
);
4021 /* leave the other pages absent */
4022 /* mess with the paging_offset of the backing_object */
4023 assert(backing_object
->paging_offset
== 0);
4024 backing_object
->paging_offset
= 0x3000;
4026 /* create top object */
4027 top_size
= 9 * PAGE_SIZE
;
4028 top_object
= vm_object_allocate(top_size
);
4029 assert(top_object
!= VM_OBJECT_NULL
);
4030 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
4032 /* map top object */
4034 kr
= vm_map_enter(kernel_map
, &top_offset
, top_size
, 0,
4035 VM_FLAGS_ANYWHERE
, top_object
, 0, FALSE
,
4036 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
4037 assert(kr
== KERN_SUCCESS
);
4038 top_address
= (unsigned char *) top_offset
;
4039 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4040 "mapped top object %p at 0x%llx\n",
4041 top_object
, (uint64_t) top_offset
);
4042 /* populate with pages to be compressed in top object */
4043 top_address
[0x3*PAGE_SIZE
] = 0xA3;
4044 top_address
[0x4*PAGE_SIZE
] = 0xA4;
4045 top_address
[0x5*PAGE_SIZE
] = 0xA5;
4046 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4047 "populated pages to be compressed in "
4048 "top_object %p\n", top_object
);
4049 /* compress top object */
4050 vm_object_pageout(top_object
);
4051 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
4053 /* wait for all the pages to be gone */
4054 while (top_object
->resident_page_count
!= 0);
4055 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
4057 /* populate with pages to be resident in top object */
4058 top_address
[0x0*PAGE_SIZE
] = 0xA0;
4059 top_address
[0x1*PAGE_SIZE
] = 0xA1;
4060 top_address
[0x2*PAGE_SIZE
] = 0xA2;
4061 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4062 "populated pages to be resident in "
4063 "top_object %p\n", top_object
);
4064 /* leave the other pages absent */
4066 /* link the 2 objects */
4067 vm_object_reference(backing_object
);
4068 top_object
->shadow
= backing_object
;
4069 top_object
->vo_shadow_offset
= 0x3000;
4070 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
4071 top_object
, backing_object
);
4073 /* unmap backing object */
4074 vm_map_remove(kernel_map
,
4076 backing_offset
+ backing_size
,
4078 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4079 "unmapped backing_object %p [0x%llx:0x%llx]\n",
4081 (uint64_t) backing_offset
,
4082 (uint64_t) (backing_offset
+ backing_size
));
4085 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object
);
4086 vm_object_lock(top_object
);
4087 vm_object_collapse(top_object
, 0, FALSE
);
4088 vm_object_unlock(top_object
);
4089 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object
);
4092 if (top_object
->shadow
!= VM_OBJECT_NULL
) {
4093 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
4094 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
4095 if (vm_object_collapse_compressor_allowed
) {
4096 panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
4099 /* check the contents of the mapping */
4100 unsigned char expect
[9] =
4101 { 0xA0, 0xA1, 0xA2, /* resident in top */
4102 0xA3, 0xA4, 0xA5, /* compressed in top */
4103 0xB9, /* resident in backing + shadow_offset */
4104 0xBD, /* compressed in backing + shadow_offset + paging_offset */
4105 0x00 }; /* absent in both */
4106 unsigned char actual
[9];
4107 unsigned int i
, errors
;
4110 for (i
= 0; i
< sizeof (actual
); i
++) {
4111 actual
[i
] = (unsigned char) top_address
[i
*PAGE_SIZE
];
4112 if (actual
[i
] != expect
[i
]) {
4116 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: "
4117 "actual [%x %x %x %x %x %x %x %x %x] "
4118 "expect [%x %x %x %x %x %x %x %x %x] "
4120 actual
[0], actual
[1], actual
[2], actual
[3],
4121 actual
[4], actual
[5], actual
[6], actual
[7],
4123 expect
[0], expect
[1], expect
[2], expect
[3],
4124 expect
[4], expect
[5], expect
[6], expect
[7],
4128 panic("FBDP_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
4130 printf("FBDP_TEST_COLLAPSE_COMPRESSOR: PASS\n");
4133 #endif /* FBDP_TEST_COLLAPSE_COMPRESSOR */
4135 #if FBDP_TEST_WIRE_AND_EXTRACT
4137 vm_map_t user_map
, wire_map
;
4138 mach_vm_address_t user_addr
, wire_addr
;
4139 mach_vm_size_t user_size
, wire_size
;
4140 mach_vm_offset_t cur_offset
;
4141 vm_prot_t cur_prot
, max_prot
;
4142 ppnum_t user_ppnum
, wire_ppnum
;
4145 ledger
= ledger_instantiate(task_ledger_template
,
4146 LEDGER_CREATE_ACTIVE_ENTRIES
);
4147 user_map
= vm_map_create(pmap_create(ledger
, 0, TRUE
),
4151 wire_map
= vm_map_create(NULL
,
4156 user_size
= 0x10000;
4157 kr
= mach_vm_allocate(user_map
,
4161 assert(kr
== KERN_SUCCESS
);
4163 wire_size
= user_size
;
4164 kr
= mach_vm_remap(wire_map
,
4175 assert(kr
== KERN_SUCCESS
);
4176 for (cur_offset
= 0;
4177 cur_offset
< wire_size
;
4178 cur_offset
+= PAGE_SIZE
) {
4179 kr
= vm_map_wire_and_extract(wire_map
,
4180 wire_addr
+ cur_offset
,
4184 assert(kr
== KERN_SUCCESS
);
4185 user_ppnum
= vm_map_get_phys_page(user_map
,
4186 user_addr
+ cur_offset
);
4187 printf("FBDP_TEST_WIRE_AND_EXTRACT: kr=0x%x "
4188 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
4190 user_map
, user_addr
+ cur_offset
, user_ppnum
,
4191 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
4192 if (kr
!= KERN_SUCCESS
||
4194 wire_ppnum
!= user_ppnum
) {
4195 panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
4198 cur_offset
-= PAGE_SIZE
;
4199 kr
= vm_map_wire_and_extract(wire_map
,
4200 wire_addr
+ cur_offset
,
4204 assert(kr
== KERN_SUCCESS
);
4205 printf("FBDP_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
4206 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
4208 user_map
, user_addr
+ cur_offset
, user_ppnum
,
4209 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
4210 if (kr
!= KERN_SUCCESS
||
4212 wire_ppnum
!= user_ppnum
) {
4213 panic("FBDP_TEST_WIRE_AND_EXTRACT: FAIL\n");
4216 printf("FBDP_TEST_WIRE_AND_EXTRACT: PASS\n");
4217 #endif /* FBDP_TEST_WIRE_AND_EXTRACT */
4220 vm_pageout_continue();
4225 * The vm_pageout_continue() call above never returns, so the code below is never
4226 * executed. We take advantage of this to declare several DTrace VM related probe
4227 * points that our kernel doesn't have an analog for. These are probe points that
4228 * exist in Solaris and are in the DTrace documentation, so people may have written
4229 * scripts that use them. Declaring the probe points here means their scripts will
4230 * compile and execute which we want for portability of the scripts, but since this
4231 * section of code is never reached, the probe points will simply never fire. Yes,
4232 * this is basically a hack. The problem is the DTrace probe points were chosen with
4233 * Solaris specific VM events in mind, not portability to different VM implementations.
4236 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
4237 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
4238 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
4239 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
4240 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
4241 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
4242 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
4248 #define MAX_COMRPESSOR_THREAD_COUNT 8
4250 struct cq ciq
[MAX_COMRPESSOR_THREAD_COUNT
];
4252 int vm_compressor_thread_count
= 2;
4255 vm_pageout_internal_start(void)
4257 kern_return_t result
;
4259 host_basic_info_data_t hinfo
;
4261 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
4262 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
4264 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
4266 assert(hinfo
.max_cpus
> 0);
4268 if (vm_compressor_thread_count
>= hinfo
.max_cpus
)
4269 vm_compressor_thread_count
= hinfo
.max_cpus
- 1;
4270 if (vm_compressor_thread_count
<= 0)
4271 vm_compressor_thread_count
= 1;
4272 else if (vm_compressor_thread_count
> MAX_COMRPESSOR_THREAD_COUNT
)
4273 vm_compressor_thread_count
= MAX_COMRPESSOR_THREAD_COUNT
;
4275 vm_pageout_queue_internal
.pgo_maxlaundry
= (vm_compressor_thread_count
* 4) * VM_PAGE_LAUNDRY_MAX
;
4277 vm_compressor_thread_count
= 1;
4278 vm_pageout_queue_internal
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
4281 for (i
= 0; i
< vm_compressor_thread_count
; i
++) {
4283 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
, (void *)&ciq
[i
], BASEPRI_PREEMPT
- 1, &vm_pageout_internal_iothread
);
4284 if (result
== KERN_SUCCESS
)
4285 thread_deallocate(vm_pageout_internal_iothread
);
4294 * To support I/O Expedite for compressed files we mark the upls with special flags.
4295 * The way decmpfs works is that we create a big upl which marks all the pages needed to
4296 * represent the compressed file as busy. We tag this upl with the flag UPL_DECMP_REQ. Decmpfs
4297 * then issues smaller I/Os for compressed I/Os, deflates them and puts the data into the pages
4298 * being held in the big original UPL. We mark each of these smaller UPLs with the flag
4299 * UPL_DECMP_REAL_IO. Any outstanding real I/O UPL is tracked by the big req upl using the
4300 * decmp_io_upl field (in the upl structure). This link is protected in the forward direction
4301 * by the req upl lock (the reverse link doesnt need synch. since we never inspect this link
4302 * unless the real I/O upl is being destroyed).
4307 upl_set_decmp_info(upl_t upl
, upl_t src_upl
)
4309 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
4312 if (src_upl
->decmp_io_upl
) {
4314 * If there is already an alive real I/O UPL, ignore this new UPL.
4315 * This case should rarely happen and even if it does, it just means
4316 * that we might issue a spurious expedite which the driver is expected
4319 upl_unlock(src_upl
);
4322 src_upl
->decmp_io_upl
= (void *)upl
;
4323 src_upl
->ref_count
++;
4325 upl
->flags
|= UPL_DECMP_REAL_IO
;
4326 upl
->decmp_io_upl
= (void *)src_upl
;
4327 upl_unlock(src_upl
);
4329 #endif /* CONFIG_IOSCHED */
4332 int upl_debug_enabled
= 1;
4334 int upl_debug_enabled
= 0;
4338 upl_create(int type
, int flags
, upl_size_t size
)
4341 vm_size_t page_field_size
= 0;
4343 vm_size_t upl_size
= sizeof(struct upl
);
4345 size
= round_page_32(size
);
4347 if (type
& UPL_CREATE_LITE
) {
4348 page_field_size
= (atop(size
) + 7) >> 3;
4349 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
4351 upl_flags
|= UPL_LITE
;
4353 if (type
& UPL_CREATE_INTERNAL
) {
4354 upl_size
+= sizeof(struct upl_page_info
) * atop(size
);
4356 upl_flags
|= UPL_INTERNAL
;
4358 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
4360 if (page_field_size
)
4361 bzero((char *)upl
+ upl_size
, page_field_size
);
4363 upl
->flags
= upl_flags
| flags
;
4364 upl
->src_object
= NULL
;
4365 upl
->kaddr
= (vm_offset_t
)0;
4367 upl
->map_object
= NULL
;
4369 upl
->ext_ref_count
= 0;
4370 upl
->highest_page
= 0;
4372 upl
->vector_upl
= NULL
;
4374 if (type
& UPL_CREATE_IO_TRACKING
) {
4375 upl
->upl_priority
= proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO
);
4378 upl
->upl_reprio_info
= 0;
4379 upl
->decmp_io_upl
= 0;
4380 if ((type
& UPL_CREATE_INTERNAL
) && (type
& UPL_CREATE_EXPEDITE_SUP
)) {
4381 /* Only support expedite on internal UPLs */
4382 thread_t curthread
= current_thread();
4383 upl
->upl_reprio_info
= (uint64_t *)kalloc(sizeof(uint64_t) * atop(size
));
4384 bzero(upl
->upl_reprio_info
, (sizeof(uint64_t) * atop(size
)));
4385 upl
->flags
|= UPL_EXPEDITE_SUPPORTED
;
4386 if (curthread
->decmp_upl
!= NULL
)
4387 upl_set_decmp_info(upl
, curthread
->decmp_upl
);
4390 #if CONFIG_IOSCHED || UPL_DEBUG
4391 if ((type
& UPL_CREATE_IO_TRACKING
) || upl_debug_enabled
) {
4392 upl
->upl_creator
= current_thread();
4395 upl
->flags
|= UPL_TRACKED_BY_OBJECT
;
4400 upl
->ubc_alias1
= 0;
4401 upl
->ubc_alias2
= 0;
4404 upl
->upl_commit_index
= 0;
4405 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
4407 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
4408 #endif /* UPL_DEBUG */
4414 upl_destroy(upl_t upl
)
4416 int page_field_size
; /* bit field in word size buf */
4419 if (upl
->ext_ref_count
) {
4420 panic("upl(%p) ext_ref_count", upl
);
4424 if ((upl
->flags
& UPL_DECMP_REAL_IO
) && upl
->decmp_io_upl
) {
4426 src_upl
= upl
->decmp_io_upl
;
4427 assert((src_upl
->flags
& UPL_DECMP_REQ
) != 0);
4429 src_upl
->decmp_io_upl
= NULL
;
4430 upl_unlock(src_upl
);
4431 upl_deallocate(src_upl
);
4433 #endif /* CONFIG_IOSCHED */
4435 #if CONFIG_IOSCHED || UPL_DEBUG
4436 if ((upl
->flags
& UPL_TRACKED_BY_OBJECT
) && !(upl
->flags
& UPL_VECTOR
)) {
4439 if (upl
->flags
& UPL_SHADOWED
) {
4440 object
= upl
->map_object
->shadow
;
4442 object
= upl
->map_object
;
4445 vm_object_lock(object
);
4446 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
4447 vm_object_activity_end(object
);
4448 vm_object_collapse(object
, 0, TRUE
);
4449 vm_object_unlock(object
);
4453 * drop a reference on the map_object whether or
4454 * not a pageout object is inserted
4456 if (upl
->flags
& UPL_SHADOWED
)
4457 vm_object_deallocate(upl
->map_object
);
4459 if (upl
->flags
& UPL_DEVICE_MEMORY
)
4463 page_field_size
= 0;
4465 if (upl
->flags
& UPL_LITE
) {
4466 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
4467 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
4469 upl_lock_destroy(upl
);
4470 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
4473 if (upl
->flags
& UPL_EXPEDITE_SUPPORTED
)
4474 kfree(upl
->upl_reprio_info
, sizeof(uint64_t) * (size
/PAGE_SIZE
));
4477 if (upl
->flags
& UPL_INTERNAL
) {
4479 sizeof(struct upl
) +
4480 (sizeof(struct upl_page_info
) * (size
/PAGE_SIZE
))
4483 kfree(upl
, sizeof(struct upl
) + page_field_size
);
4488 upl_deallocate(upl_t upl
)
4491 if (--upl
->ref_count
== 0) {
4492 if(vector_upl_is_valid(upl
))
4493 vector_upl_deallocate(upl
);
4503 upl_mark_decmp(upl_t upl
)
4505 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
4506 upl
->flags
|= UPL_DECMP_REQ
;
4507 upl
->upl_creator
->decmp_upl
= (void *)upl
;
4512 upl_unmark_decmp(upl_t upl
)
4514 if(upl
&& (upl
->flags
& UPL_DECMP_REQ
)) {
4515 upl
->upl_creator
->decmp_upl
= NULL
;
4519 #endif /* CONFIG_IOSCHED */
4521 #define VM_PAGE_Q_BACKING_UP(q) \
4522 ((q)->pgo_laundry >= (((q)->pgo_maxlaundry * 8) / 10))
4524 boolean_t
must_throttle_writes(void);
4527 must_throttle_writes()
4529 if (VM_PAGE_Q_BACKING_UP(&vm_pageout_queue_external
) &&
4530 vm_page_pageable_external_count
> (AVAILABLE_NON_COMPRESSED_MEMORY
* 6) / 10)
4537 #if DEVELOPMENT || DEBUG
4539 * Statistics about UPL enforcement of copy-on-write obligations.
4541 unsigned long upl_cow
= 0;
4542 unsigned long upl_cow_again
= 0;
4543 unsigned long upl_cow_pages
= 0;
4544 unsigned long upl_cow_again_pages
= 0;
4546 unsigned long iopl_cow
= 0;
4547 unsigned long iopl_cow_pages
= 0;
4551 * Routine: vm_object_upl_request
4553 * Cause the population of a portion of a vm_object.
4554 * Depending on the nature of the request, the pages
4555 * returned may be contain valid data or be uninitialized.
4556 * A page list structure, listing the physical pages
4557 * will be returned upon request.
4558 * This function is called by the file system or any other
4559 * supplier of backing store to a pager.
4560 * IMPORTANT NOTE: The caller must still respect the relationship
4561 * between the vm_object and its backing memory object. The
4562 * caller MUST NOT substitute changes in the backing file
4563 * without first doing a memory_object_lock_request on the
4564 * target range unless it is know that the pages are not
4565 * shared with another entity at the pager level.
4567 * if a page list structure is present
4568 * return the mapped physical pages, where a
4569 * page is not present, return a non-initialized
4570 * one. If the no_sync bit is turned on, don't
4571 * call the pager unlock to synchronize with other
4572 * possible copies of the page. Leave pages busy
4573 * in the original object, if a page list structure
4574 * was specified. When a commit of the page list
4575 * pages is done, the dirty bit will be set for each one.
4577 * If a page list structure is present, return
4578 * all mapped pages. Where a page does not exist
4579 * map a zero filled one. Leave pages busy in
4580 * the original object. If a page list structure
4581 * is not specified, this call is a no-op.
4583 * Note: access of default pager objects has a rather interesting
4584 * twist. The caller of this routine, presumably the file system
4585 * page cache handling code, will never actually make a request
4586 * against a default pager backed object. Only the default
4587 * pager will make requests on backing store related vm_objects
4588 * In this way the default pager can maintain the relationship
4589 * between backing store files (abstract memory objects) and
4590 * the vm_objects (cache objects), they support.
4594 __private_extern__ kern_return_t
4595 vm_object_upl_request(
4597 vm_object_offset_t offset
,
4600 upl_page_info_array_t user_page_list
,
4601 unsigned int *page_list_count
,
4604 vm_page_t dst_page
= VM_PAGE_NULL
;
4605 vm_object_offset_t dst_offset
;
4606 upl_size_t xfer_size
;
4607 unsigned int size_in_pages
;
4612 #if MACH_CLUSTER_STATS
4613 boolean_t encountered_lrp
= FALSE
;
4615 vm_page_t alias_page
= NULL
;
4616 int refmod_state
= 0;
4617 wpl_array_t lite_list
= NULL
;
4618 vm_object_t last_copy_object
;
4619 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
4620 struct vm_page_delayed_work
*dwp
;
4623 int io_tracking_flag
= 0;
4625 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
4627 * For forward compatibility's sake,
4628 * reject any unknown flag.
4630 return KERN_INVALID_VALUE
;
4632 if ( (!object
->internal
) && (object
->paging_offset
!= 0) )
4633 panic("vm_object_upl_request: external object with non-zero paging offset\n");
4634 if (object
->phys_contiguous
)
4635 panic("vm_object_upl_request: contiguous object specified\n");
4638 if (size
> MAX_UPL_SIZE_BYTES
)
4639 size
= MAX_UPL_SIZE_BYTES
;
4641 if ( (cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
)
4642 *page_list_count
= MAX_UPL_SIZE_BYTES
>> PAGE_SHIFT
;
4644 #if CONFIG_IOSCHED || UPL_DEBUG
4645 if (object
->io_tracking
|| upl_debug_enabled
)
4646 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
4649 if (object
->io_tracking
)
4650 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
4653 if (cntrl_flags
& UPL_SET_INTERNAL
) {
4654 if (cntrl_flags
& UPL_SET_LITE
) {
4656 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
4658 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4659 lite_list
= (wpl_array_t
)
4660 (((uintptr_t)user_page_list
) +
4661 ((size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
4663 user_page_list
= NULL
;
4667 upl
= upl_create(UPL_CREATE_INTERNAL
| io_tracking_flag
, 0, size
);
4669 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4671 user_page_list
= NULL
;
4675 if (cntrl_flags
& UPL_SET_LITE
) {
4677 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, 0, size
);
4679 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
4684 upl
= upl_create(UPL_CREATE_EXTERNAL
| io_tracking_flag
, 0, size
);
4690 user_page_list
[0].device
= FALSE
;
4692 if (cntrl_flags
& UPL_SET_LITE
) {
4693 upl
->map_object
= object
;
4695 upl
->map_object
= vm_object_allocate(size
);
4697 * No neeed to lock the new object: nobody else knows
4698 * about it yet, so it's all ours so far.
4700 upl
->map_object
->shadow
= object
;
4701 upl
->map_object
->pageout
= TRUE
;
4702 upl
->map_object
->can_persist
= FALSE
;
4703 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
4704 upl
->map_object
->vo_shadow_offset
= offset
;
4705 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
4707 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
4709 upl
->flags
|= UPL_SHADOWED
;
4713 * Just mark the UPL as "encrypted" here.
4714 * We'll actually encrypt the pages later,
4715 * in upl_encrypt(), when the caller has
4716 * selected which pages need to go to swap.
4718 if (cntrl_flags
& UPL_ENCRYPT
)
4719 upl
->flags
|= UPL_ENCRYPTED
;
4721 if (cntrl_flags
& UPL_FOR_PAGEOUT
)
4722 upl
->flags
|= UPL_PAGEOUT
;
4724 vm_object_lock(object
);
4725 vm_object_activity_begin(object
);
4728 * we can lock in the paging_offset once paging_in_progress is set
4731 upl
->offset
= offset
+ object
->paging_offset
;
4733 #if CONFIG_IOSCHED || UPL_DEBUG
4734 if (object
->io_tracking
|| upl_debug_enabled
) {
4735 vm_object_activity_begin(object
);
4736 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
4739 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
4741 * Honor copy-on-write obligations
4743 * The caller is gathering these pages and
4744 * might modify their contents. We need to
4745 * make sure that the copy object has its own
4746 * private copies of these pages before we let
4747 * the caller modify them.
4749 vm_object_update(object
,
4754 FALSE
, /* should_return */
4755 MEMORY_OBJECT_COPY_SYNC
,
4757 #if DEVELOPMENT || DEBUG
4759 upl_cow_pages
+= size
>> PAGE_SHIFT
;
4763 * remember which copy object we synchronized with
4765 last_copy_object
= object
->copy
;
4769 dst_offset
= offset
;
4770 size_in_pages
= size
/ PAGE_SIZE
;
4774 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
4776 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
4777 object
->resident_page_count
< ((MAX_UPL_SIZE_BYTES
* 2) >> PAGE_SHIFT
))
4778 object
->scan_collisions
= 0;
4780 if ((cntrl_flags
& UPL_WILL_MODIFY
) && must_throttle_writes() == TRUE
) {
4781 boolean_t isSSD
= FALSE
;
4783 vnode_pager_get_isSSD(object
->pager
, &isSSD
);
4784 vm_object_unlock(object
);
4786 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
4789 delay(1000 * size_in_pages
);
4791 delay(5000 * size_in_pages
);
4792 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
4794 vm_object_lock(object
);
4801 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
4802 vm_object_unlock(object
);
4803 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
4804 vm_object_lock(object
);
4806 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
4807 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
4809 if ( ((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
4810 dst_page
->fictitious
||
4813 dst_page
->cleaning
||
4814 (VM_PAGE_WIRED(dst_page
))) {
4817 user_page_list
[entry
].phys_addr
= 0;
4822 * grab this up front...
4823 * a high percentange of the time we're going to
4824 * need the hardware modification state a bit later
4825 * anyway... so we can eliminate an extra call into
4826 * the pmap layer by grabbing it here and recording it
4828 if (dst_page
->pmapped
)
4829 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
4833 if ( (refmod_state
& VM_MEM_REFERENCED
) && dst_page
->inactive
) {
4835 * page is on inactive list and referenced...
4836 * reactivate it now... this gets it out of the
4837 * way of vm_pageout_scan which would have to
4838 * reactivate it upon tripping over it
4840 dwp
->dw_mask
|= DW_vm_page_activate
;
4842 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
4844 * we're only asking for DIRTY pages to be returned
4846 if (dst_page
->laundry
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
4848 * if we were the page stolen by vm_pageout_scan to be
4849 * cleaned (as opposed to a buddy being clustered in
4850 * or this request is not being driven by a PAGEOUT cluster
4851 * then we only need to check for the page being dirty or
4852 * precious to decide whether to return it
4854 if (dst_page
->dirty
|| dst_page
->precious
|| (refmod_state
& VM_MEM_MODIFIED
))
4859 * this is a request for a PAGEOUT cluster and this page
4860 * is merely along for the ride as a 'buddy'... not only
4861 * does it have to be dirty to be returned, but it also
4862 * can't have been referenced recently...
4864 if ( (hibernate_cleaning_in_progress
== TRUE
||
4865 (!((refmod_state
& VM_MEM_REFERENCED
) || dst_page
->reference
) || dst_page
->throttled
)) &&
4866 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->dirty
|| dst_page
->precious
) ) {
4871 * if we reach here, we're not to return
4872 * the page... go on to the next one
4874 if (dst_page
->laundry
== TRUE
) {
4876 * if we get here, the page is not 'cleaning' (filtered out above).
4877 * since it has been referenced, remove it from the laundry
4878 * so we don't pay the cost of an I/O to clean a page
4879 * we're just going to take back
4881 vm_page_lockspin_queues();
4883 vm_pageout_steal_laundry(dst_page
, TRUE
);
4884 vm_page_activate(dst_page
);
4886 vm_page_unlock_queues();
4889 user_page_list
[entry
].phys_addr
= 0;
4894 if (dst_page
->busy
) {
4895 if (cntrl_flags
& UPL_NOBLOCK
) {
4897 user_page_list
[entry
].phys_addr
= 0;
4902 * someone else is playing with the
4903 * page. We will have to wait.
4905 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
4911 * The caller is gathering this page and might
4912 * access its contents later on. Decrypt the
4913 * page before adding it to the UPL, so that
4914 * the caller never sees encrypted data.
4916 if (! (cntrl_flags
& UPL_ENCRYPT
) && dst_page
->encrypted
) {
4920 * save the current state of busy
4921 * mark page as busy while decrypt
4922 * is in progress since it will drop
4923 * the object lock...
4925 was_busy
= dst_page
->busy
;
4926 dst_page
->busy
= TRUE
;
4928 vm_page_decrypt(dst_page
, 0);
4929 vm_page_decrypt_for_upl_counter
++;
4931 * restore to original busy state
4933 dst_page
->busy
= was_busy
;
4935 if (dst_page
->pageout_queue
== TRUE
) {
4937 vm_page_lockspin_queues();
4939 if (dst_page
->pageout_queue
== TRUE
) {
4941 * we've buddied up a page for a clustered pageout
4942 * that has already been moved to the pageout
4943 * queue by pageout_scan... we need to remove
4944 * it from the queue and drop the laundry count
4947 vm_pageout_throttle_up(dst_page
);
4949 vm_page_unlock_queues();
4951 #if MACH_CLUSTER_STATS
4953 * pageout statistics gathering. count
4954 * all the pages we will page out that
4955 * were not counted in the initial
4956 * vm_pageout_scan work
4958 if (dst_page
->pageout
)
4959 encountered_lrp
= TRUE
;
4960 if ((dst_page
->dirty
|| (dst_page
->object
->internal
&& dst_page
->precious
))) {
4961 if (encountered_lrp
)
4962 CLUSTER_STAT(pages_at_higher_offsets
++;)
4964 CLUSTER_STAT(pages_at_lower_offsets
++;)
4967 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
4968 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
4970 if (dst_page
->phys_page
> upl
->highest_page
)
4971 upl
->highest_page
= dst_page
->phys_page
;
4973 if (cntrl_flags
& UPL_SET_LITE
) {
4974 unsigned int pg_num
;
4976 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
4977 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
4978 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
4981 pmap_clear_modify(dst_page
->phys_page
);
4984 * Mark original page as cleaning
4987 dst_page
->cleaning
= TRUE
;
4988 dst_page
->precious
= FALSE
;
4991 * use pageclean setup, it is more
4992 * convenient even for the pageout
4995 vm_object_lock(upl
->map_object
);
4996 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
4997 vm_object_unlock(upl
->map_object
);
4999 alias_page
->absent
= FALSE
;
5004 * Record that this page has been
5007 vm_external_state_set(object
->existence_map
, dst_page
->offset
);
5008 #endif /*MACH_PAGEMAP*/
5010 SET_PAGE_DIRTY(dst_page
, FALSE
);
5012 dst_page
->dirty
= FALSE
;
5016 dst_page
->precious
= TRUE
;
5018 if ( (cntrl_flags
& UPL_ENCRYPT
) ) {
5021 * We want to deny access to the target page
5022 * because its contents are about to be
5023 * encrypted and the user would be very
5024 * confused to see encrypted data instead
5026 * We also set "encrypted_cleaning" to allow
5027 * vm_pageout_scan() to demote that page
5028 * from "adjacent/clean-in-place" to
5029 * "target/clean-and-free" if it bumps into
5030 * this page during its scanning while we're
5031 * still processing this cluster.
5033 dst_page
->busy
= TRUE
;
5034 dst_page
->encrypted_cleaning
= TRUE
;
5036 if ( !(cntrl_flags
& UPL_CLEAN_IN_PLACE
) ) {
5037 if ( !VM_PAGE_WIRED(dst_page
))
5038 dst_page
->pageout
= TRUE
;
5041 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
5043 * Honor copy-on-write obligations
5045 * The copy object has changed since we
5046 * last synchronized for copy-on-write.
5047 * Another copy object might have been
5048 * inserted while we released the object's
5049 * lock. Since someone could have seen the
5050 * original contents of the remaining pages
5051 * through that new object, we have to
5052 * synchronize with it again for the remaining
5053 * pages only. The previous pages are "busy"
5054 * so they can not be seen through the new
5055 * mapping. The new mapping will see our
5056 * upcoming changes for those previous pages,
5057 * but that's OK since they couldn't see what
5058 * was there before. It's just a race anyway
5059 * and there's no guarantee of consistency or
5060 * atomicity. We just don't want new mappings
5061 * to see both the *before* and *after* pages.
5063 if (object
->copy
!= VM_OBJECT_NULL
) {
5066 dst_offset
,/* current offset */
5067 xfer_size
, /* remaining size */
5070 FALSE
, /* should_return */
5071 MEMORY_OBJECT_COPY_SYNC
,
5074 #if DEVELOPMENT || DEBUG
5076 upl_cow_again_pages
+= xfer_size
>> PAGE_SHIFT
;
5080 * remember the copy object we synced with
5082 last_copy_object
= object
->copy
;
5084 dst_page
= vm_page_lookup(object
, dst_offset
);
5086 if (dst_page
!= VM_PAGE_NULL
) {
5088 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5090 * skip over pages already present in the cache
5093 user_page_list
[entry
].phys_addr
= 0;
5097 if (dst_page
->fictitious
) {
5098 panic("need corner case for fictitious page");
5101 if (dst_page
->busy
|| dst_page
->cleaning
) {
5103 * someone else is playing with the
5104 * page. We will have to wait.
5106 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5110 if (dst_page
->laundry
) {
5111 dst_page
->pageout
= FALSE
;
5113 vm_pageout_steal_laundry(dst_page
, FALSE
);
5116 if (object
->private) {
5118 * This is a nasty wrinkle for users
5119 * of upl who encounter device or
5120 * private memory however, it is
5121 * unavoidable, only a fault can
5122 * resolve the actual backing
5123 * physical page by asking the
5127 user_page_list
[entry
].phys_addr
= 0;
5131 if (object
->scan_collisions
) {
5133 * the pageout_scan thread is trying to steal
5134 * pages from this object, but has run into our
5135 * lock... grab 2 pages from the head of the object...
5136 * the first is freed on behalf of pageout_scan, the
5137 * 2nd is for our own use... we use vm_object_page_grab
5138 * in both cases to avoid taking pages from the free
5139 * list since we are under memory pressure and our
5140 * lock on this object is getting in the way of
5143 dst_page
= vm_object_page_grab(object
);
5145 if (dst_page
!= VM_PAGE_NULL
)
5146 vm_page_release(dst_page
);
5148 dst_page
= vm_object_page_grab(object
);
5150 if (dst_page
== VM_PAGE_NULL
) {
5152 * need to allocate a page
5154 dst_page
= vm_page_grab();
5156 if (dst_page
== VM_PAGE_NULL
) {
5157 if ( (cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
5159 * we don't want to stall waiting for pages to come onto the free list
5160 * while we're already holding absent pages in this UPL
5161 * the caller will deal with the empty slots
5164 user_page_list
[entry
].phys_addr
= 0;
5169 * no pages available... wait
5170 * then try again for the same
5173 vm_object_unlock(object
);
5175 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5177 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
5180 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5182 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
5184 vm_object_lock(object
);
5188 vm_page_insert(dst_page
, object
, dst_offset
);
5190 dst_page
->absent
= TRUE
;
5191 dst_page
->busy
= FALSE
;
5193 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
5195 * if UPL_RET_ONLY_ABSENT was specified,
5196 * than we're definitely setting up a
5197 * upl for a clustered read/pagein
5198 * operation... mark the pages as clustered
5199 * so upl_commit_range can put them on the
5202 dst_page
->clustered
= TRUE
;
5204 if ( !(cntrl_flags
& UPL_FILE_IO
))
5205 VM_STAT_INCR(pageins
);
5211 if (cntrl_flags
& UPL_ENCRYPT
) {
5213 * The page is going to be encrypted when we
5214 * get it from the pager, so mark it so.
5216 dst_page
->encrypted
= TRUE
;
5219 * Otherwise, the page will not contain
5222 dst_page
->encrypted
= FALSE
;
5224 dst_page
->overwriting
= TRUE
;
5226 if (dst_page
->pmapped
) {
5227 if ( !(cntrl_flags
& UPL_FILE_IO
))
5229 * eliminate all mappings from the
5230 * original object and its prodigy
5232 refmod_state
= pmap_disconnect(dst_page
->phys_page
);
5234 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
5238 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
5239 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
5241 if (cntrl_flags
& UPL_SET_LITE
) {
5242 unsigned int pg_num
;
5244 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
5245 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
5246 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
5249 pmap_clear_modify(dst_page
->phys_page
);
5252 * Mark original page as cleaning
5255 dst_page
->cleaning
= TRUE
;
5256 dst_page
->precious
= FALSE
;
5259 * use pageclean setup, it is more
5260 * convenient even for the pageout
5263 vm_object_lock(upl
->map_object
);
5264 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
5265 vm_object_unlock(upl
->map_object
);
5267 alias_page
->absent
= FALSE
;
5271 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
5272 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
5273 upl
->flags
|= UPL_SET_DIRTY
;
5275 upl
->flags
|= UPL_SET_DIRTY
;
5276 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
5278 * clean in place for read implies
5279 * that a write will be done on all
5280 * the pages that are dirty before
5281 * a upl commit is done. The caller
5282 * is obligated to preserve the
5283 * contents of all pages marked dirty
5285 upl
->flags
|= UPL_CLEAR_DIRTY
;
5287 dst_page
->dirty
= dirty
;
5290 dst_page
->precious
= TRUE
;
5292 if ( !VM_PAGE_WIRED(dst_page
)) {
5294 * deny access to the target page while
5295 * it is being worked on
5297 dst_page
->busy
= TRUE
;
5299 dwp
->dw_mask
|= DW_vm_page_wire
;
5302 * We might be about to satisfy a fault which has been
5303 * requested. So no need for the "restart" bit.
5305 dst_page
->restart
= FALSE
;
5306 if (!dst_page
->absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
5308 * expect the page to be used
5310 dwp
->dw_mask
|= DW_set_reference
;
5312 if (cntrl_flags
& UPL_PRECIOUS
) {
5313 if (dst_page
->object
->internal
) {
5314 SET_PAGE_DIRTY(dst_page
, FALSE
);
5315 dst_page
->precious
= FALSE
;
5317 dst_page
->precious
= TRUE
;
5320 dst_page
->precious
= FALSE
;
5324 upl
->flags
|= UPL_HAS_BUSY
;
5326 if (dst_page
->phys_page
> upl
->highest_page
)
5327 upl
->highest_page
= dst_page
->phys_page
;
5328 if (user_page_list
) {
5329 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
5330 user_page_list
[entry
].pageout
= dst_page
->pageout
;
5331 user_page_list
[entry
].absent
= dst_page
->absent
;
5332 user_page_list
[entry
].dirty
= dst_page
->dirty
;
5333 user_page_list
[entry
].precious
= dst_page
->precious
;
5334 user_page_list
[entry
].device
= FALSE
;
5335 user_page_list
[entry
].needed
= FALSE
;
5336 if (dst_page
->clustered
== TRUE
)
5337 user_page_list
[entry
].speculative
= dst_page
->speculative
;
5339 user_page_list
[entry
].speculative
= FALSE
;
5340 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
5341 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
5342 user_page_list
[entry
].cs_nx
= dst_page
->cs_nx
;
5345 * if UPL_RET_ONLY_ABSENT is set, then
5346 * we are working with a fresh page and we've
5347 * just set the clustered flag on it to
5348 * indicate that it was drug in as part of a
5349 * speculative cluster... so leave it alone
5351 if ( !(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
5353 * someone is explicitly grabbing this page...
5354 * update clustered and speculative state
5357 if (dst_page
->clustered
)
5358 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
5362 if (dwp
->dw_mask
& DW_vm_page_activate
)
5363 VM_STAT_INCR(reactivations
);
5365 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
5367 if (dw_count
>= dw_limit
) {
5368 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
5375 dst_offset
+= PAGE_SIZE_64
;
5376 xfer_size
-= PAGE_SIZE
;
5379 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
5381 if (alias_page
!= NULL
) {
5382 VM_PAGE_FREE(alias_page
);
5385 if (page_list_count
!= NULL
) {
5386 if (upl
->flags
& UPL_INTERNAL
)
5387 *page_list_count
= 0;
5388 else if (*page_list_count
> entry
)
5389 *page_list_count
= entry
;
5394 vm_object_unlock(object
);
5396 return KERN_SUCCESS
;
5399 /* JMM - Backward compatability for now */
5401 vm_fault_list_request( /* forward */
5402 memory_object_control_t control
,
5403 vm_object_offset_t offset
,
5406 upl_page_info_t
**user_page_list_ptr
,
5407 unsigned int page_list_count
,
5410 vm_fault_list_request(
5411 memory_object_control_t control
,
5412 vm_object_offset_t offset
,
5415 upl_page_info_t
**user_page_list_ptr
,
5416 unsigned int page_list_count
,
5419 unsigned int local_list_count
;
5420 upl_page_info_t
*user_page_list
;
5423 if((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
)
5424 return KERN_INVALID_ARGUMENT
;
5426 if (user_page_list_ptr
!= NULL
) {
5427 local_list_count
= page_list_count
;
5428 user_page_list
= *user_page_list_ptr
;
5430 local_list_count
= 0;
5431 user_page_list
= NULL
;
5433 kr
= memory_object_upl_request(control
,
5441 if(kr
!= KERN_SUCCESS
)
5444 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
5445 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
5448 return KERN_SUCCESS
;
5454 * Routine: vm_object_super_upl_request
5456 * Cause the population of a portion of a vm_object
5457 * in much the same way as memory_object_upl_request.
5458 * Depending on the nature of the request, the pages
5459 * returned may be contain valid data or be uninitialized.
5460 * However, the region may be expanded up to the super
5461 * cluster size provided.
5464 __private_extern__ kern_return_t
5465 vm_object_super_upl_request(
5467 vm_object_offset_t offset
,
5469 upl_size_t super_cluster
,
5471 upl_page_info_t
*user_page_list
,
5472 unsigned int *page_list_count
,
5475 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
))
5476 return KERN_FAILURE
;
5478 assert(object
->paging_in_progress
);
5479 offset
= offset
- object
->paging_offset
;
5481 if (super_cluster
> size
) {
5483 vm_object_offset_t base_offset
;
5484 upl_size_t super_size
;
5485 vm_object_size_t super_size_64
;
5487 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
5488 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<<1 : super_cluster
;
5489 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
5490 super_size
= (upl_size_t
) super_size_64
;
5491 assert(super_size
== super_size_64
);
5493 if (offset
> (base_offset
+ super_size
)) {
5494 panic("vm_object_super_upl_request: Missed target pageout"
5495 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
5496 offset
, base_offset
, super_size
, super_cluster
,
5497 size
, object
->paging_offset
);
5500 * apparently there is a case where the vm requests a
5501 * page to be written out who's offset is beyond the
5504 if ((offset
+ size
) > (base_offset
+ super_size
)) {
5505 super_size_64
= (offset
+ size
) - base_offset
;
5506 super_size
= (upl_size_t
) super_size_64
;
5507 assert(super_size
== super_size_64
);
5510 offset
= base_offset
;
5513 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
);
5520 vm_map_address_t offset
,
5521 upl_size_t
*upl_size
,
5523 upl_page_info_array_t page_list
,
5524 unsigned int *count
,
5527 vm_map_entry_t entry
;
5529 int force_data_sync
;
5531 vm_object_t local_object
;
5532 vm_map_offset_t local_offset
;
5533 vm_map_offset_t local_start
;
5536 caller_flags
= *flags
;
5538 if (caller_flags
& ~UPL_VALID_FLAGS
) {
5540 * For forward compatibility's sake,
5541 * reject any unknown flag.
5543 return KERN_INVALID_VALUE
;
5545 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
5546 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
5549 return KERN_INVALID_ARGUMENT
;
5552 vm_map_lock_read(map
);
5554 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
5556 if ((entry
->vme_end
- offset
) < *upl_size
) {
5557 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
5558 assert(*upl_size
== entry
->vme_end
- offset
);
5561 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
5564 if ( !entry
->is_sub_map
&& entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
5565 if (entry
->object
.vm_object
->private)
5566 *flags
= UPL_DEV_MEMORY
;
5568 if (entry
->object
.vm_object
->phys_contiguous
)
5569 *flags
|= UPL_PHYS_CONTIG
;
5571 vm_map_unlock_read(map
);
5573 return KERN_SUCCESS
;
5576 if (entry
->is_sub_map
) {
5579 submap
= entry
->object
.sub_map
;
5580 local_start
= entry
->vme_start
;
5581 local_offset
= entry
->offset
;
5583 vm_map_reference(submap
);
5584 vm_map_unlock_read(map
);
5586 ret
= vm_map_create_upl(submap
,
5587 local_offset
+ (offset
- local_start
),
5588 upl_size
, upl
, page_list
, count
, flags
);
5589 vm_map_deallocate(submap
);
5594 if (entry
->object
.vm_object
== VM_OBJECT_NULL
|| !entry
->object
.vm_object
->phys_contiguous
) {
5595 if (*upl_size
> MAX_UPL_SIZE_BYTES
)
5596 *upl_size
= MAX_UPL_SIZE_BYTES
;
5599 * Create an object if necessary.
5601 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
5603 if (vm_map_lock_read_to_write(map
))
5604 goto REDISCOVER_ENTRY
;
5606 entry
->object
.vm_object
= vm_object_allocate((vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
5609 vm_map_lock_write_to_read(map
);
5611 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
5612 if (!(entry
->protection
& VM_PROT_WRITE
)) {
5613 vm_map_unlock_read(map
);
5614 return KERN_PROTECTION_FAILURE
;
5618 local_object
= entry
->object
.vm_object
;
5619 if (vm_map_entry_should_cow_for_true_share(entry
) &&
5620 local_object
->vo_size
> *upl_size
&&
5625 * Set up the targeted range for copy-on-write to avoid
5626 * applying true_share/copy_delay to the entire object.
5629 if (vm_map_lock_read_to_write(map
)) {
5630 goto REDISCOVER_ENTRY
;
5633 vm_map_clip_start(map
,
5635 vm_map_trunc_page(offset
,
5636 VM_MAP_PAGE_MASK(map
)));
5637 vm_map_clip_end(map
,
5639 vm_map_round_page(offset
+ *upl_size
,
5640 VM_MAP_PAGE_MASK(map
)));
5641 if ((entry
->vme_end
- offset
) < *upl_size
) {
5642 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
5643 assert(*upl_size
== entry
->vme_end
- offset
);
5646 prot
= entry
->protection
& ~VM_PROT_WRITE
;
5647 if (override_nx(map
, entry
->alias
) && prot
)
5648 prot
|= VM_PROT_EXECUTE
;
5649 vm_object_pmap_protect(local_object
,
5651 entry
->vme_end
- entry
->vme_start
,
5652 ((entry
->is_shared
|| map
->mapped_in_other_pmaps
)
5657 entry
->needs_copy
= TRUE
;
5659 vm_map_lock_write_to_read(map
);
5662 if (entry
->needs_copy
) {
5664 * Honor copy-on-write for COPY_SYMMETRIC
5669 vm_object_offset_t new_offset
;
5672 vm_map_version_t version
;
5674 vm_prot_t fault_type
;
5678 if (caller_flags
& UPL_COPYOUT_FROM
) {
5679 fault_type
= VM_PROT_READ
| VM_PROT_COPY
;
5680 vm_counters
.create_upl_extra_cow
++;
5681 vm_counters
.create_upl_extra_cow_pages
+= (entry
->vme_end
- entry
->vme_start
) / PAGE_SIZE
;
5683 fault_type
= VM_PROT_WRITE
;
5685 if (vm_map_lookup_locked(&local_map
,
5687 OBJECT_LOCK_EXCLUSIVE
,
5689 &new_offset
, &prot
, &wired
,
5691 &real_map
) != KERN_SUCCESS
) {
5692 if (fault_type
== VM_PROT_WRITE
) {
5693 vm_counters
.create_upl_lookup_failure_write
++;
5695 vm_counters
.create_upl_lookup_failure_copy
++;
5697 vm_map_unlock_read(local_map
);
5698 return KERN_FAILURE
;
5700 if (real_map
!= map
)
5701 vm_map_unlock(real_map
);
5702 vm_map_unlock_read(local_map
);
5704 vm_object_unlock(object
);
5706 goto REDISCOVER_ENTRY
;
5709 if (sync_cow_data
) {
5710 if (entry
->object
.vm_object
->shadow
|| entry
->object
.vm_object
->copy
) {
5711 local_object
= entry
->object
.vm_object
;
5712 local_start
= entry
->vme_start
;
5713 local_offset
= entry
->offset
;
5715 vm_object_reference(local_object
);
5716 vm_map_unlock_read(map
);
5718 if (local_object
->shadow
&& local_object
->copy
) {
5719 vm_object_lock_request(
5720 local_object
->shadow
,
5721 (vm_object_offset_t
)
5722 ((offset
- local_start
) +
5724 local_object
->vo_shadow_offset
,
5726 MEMORY_OBJECT_DATA_SYNC
,
5729 sync_cow_data
= FALSE
;
5730 vm_object_deallocate(local_object
);
5732 goto REDISCOVER_ENTRY
;
5735 if (force_data_sync
) {
5736 local_object
= entry
->object
.vm_object
;
5737 local_start
= entry
->vme_start
;
5738 local_offset
= entry
->offset
;
5740 vm_object_reference(local_object
);
5741 vm_map_unlock_read(map
);
5743 vm_object_lock_request(
5745 (vm_object_offset_t
)
5746 ((offset
- local_start
) + local_offset
),
5747 (vm_object_size_t
)*upl_size
, FALSE
,
5748 MEMORY_OBJECT_DATA_SYNC
,
5751 force_data_sync
= FALSE
;
5752 vm_object_deallocate(local_object
);
5754 goto REDISCOVER_ENTRY
;
5756 if (entry
->object
.vm_object
->private)
5757 *flags
= UPL_DEV_MEMORY
;
5761 if (entry
->object
.vm_object
->phys_contiguous
)
5762 *flags
|= UPL_PHYS_CONTIG
;
5764 local_object
= entry
->object
.vm_object
;
5765 local_offset
= entry
->offset
;
5766 local_start
= entry
->vme_start
;
5768 vm_object_reference(local_object
);
5769 vm_map_unlock_read(map
);
5771 ret
= vm_object_iopl_request(local_object
,
5772 (vm_object_offset_t
) ((offset
- local_start
) + local_offset
),
5778 vm_object_deallocate(local_object
);
5782 vm_map_unlock_read(map
);
5784 return(KERN_FAILURE
);
5788 * Internal routine to enter a UPL into a VM map.
5790 * JMM - This should just be doable through the standard
5791 * vm_map_enter() API.
5797 vm_map_offset_t
*dst_addr
)
5800 vm_object_offset_t offset
;
5801 vm_map_offset_t addr
;
5804 int isVectorUPL
= 0, curr_upl
=0;
5805 upl_t vector_upl
= NULL
;
5806 vm_offset_t vector_upl_dst_addr
= 0;
5807 vm_map_t vector_upl_submap
= NULL
;
5808 upl_offset_t subupl_offset
= 0;
5809 upl_size_t subupl_size
= 0;
5811 if (upl
== UPL_NULL
)
5812 return KERN_INVALID_ARGUMENT
;
5814 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
5815 int mapped
=0,valid_upls
=0;
5818 upl_lock(vector_upl
);
5819 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
5820 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
5824 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
)
5829 if(mapped
!= valid_upls
)
5830 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
5832 upl_unlock(vector_upl
);
5833 return KERN_FAILURE
;
5837 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
, vector_upl
->size
, FALSE
, VM_FLAGS_ANYWHERE
, &vector_upl_submap
);
5838 if( kr
!= KERN_SUCCESS
)
5839 panic("Vector UPL submap allocation failed\n");
5840 map
= vector_upl_submap
;
5841 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
5847 process_upl_to_enter
:
5849 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
5850 *dst_addr
= vector_upl_dst_addr
;
5851 upl_unlock(vector_upl
);
5852 return KERN_SUCCESS
;
5854 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
5856 goto process_upl_to_enter
;
5858 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
5859 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
5862 * check to see if already mapped
5864 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
5866 return KERN_FAILURE
;
5869 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
5870 ((upl
->flags
& UPL_HAS_BUSY
) ||
5871 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
5874 vm_page_t alias_page
;
5875 vm_object_offset_t new_offset
;
5876 unsigned int pg_num
;
5877 wpl_array_t lite_list
;
5879 if (upl
->flags
& UPL_INTERNAL
) {
5880 lite_list
= (wpl_array_t
)
5881 ((((uintptr_t)upl
) + sizeof(struct upl
))
5882 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5884 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
5886 object
= upl
->map_object
;
5887 upl
->map_object
= vm_object_allocate(upl
->size
);
5889 vm_object_lock(upl
->map_object
);
5891 upl
->map_object
->shadow
= object
;
5892 upl
->map_object
->pageout
= TRUE
;
5893 upl
->map_object
->can_persist
= FALSE
;
5894 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
5895 upl
->map_object
->vo_shadow_offset
= upl
->offset
- object
->paging_offset
;
5896 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
5897 offset
= upl
->map_object
->vo_shadow_offset
;
5901 upl
->flags
|= UPL_SHADOWED
;
5904 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
5905 assert(pg_num
== new_offset
/ PAGE_SIZE
);
5907 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
5909 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5911 vm_object_lock(object
);
5913 m
= vm_page_lookup(object
, offset
);
5914 if (m
== VM_PAGE_NULL
) {
5915 panic("vm_upl_map: page missing\n");
5919 * Convert the fictitious page to a private
5920 * shadow of the real page.
5922 assert(alias_page
->fictitious
);
5923 alias_page
->fictitious
= FALSE
;
5924 alias_page
->private = TRUE
;
5925 alias_page
->pageout
= TRUE
;
5927 * since m is a page in the upl it must
5928 * already be wired or BUSY, so it's
5929 * safe to assign the underlying physical
5932 alias_page
->phys_page
= m
->phys_page
;
5934 vm_object_unlock(object
);
5936 vm_page_lockspin_queues();
5937 vm_page_wire(alias_page
);
5938 vm_page_unlock_queues();
5942 * The virtual page ("m") has to be wired in some way
5943 * here or its physical page ("m->phys_page") could
5944 * be recycled at any time.
5945 * Assuming this is enforced by the caller, we can't
5946 * get an encrypted page here. Since the encryption
5947 * key depends on the VM page's "pager" object and
5948 * the "paging_offset", we couldn't handle 2 pageable
5949 * VM pages (with different pagers and paging_offsets)
5950 * sharing the same physical page: we could end up
5951 * encrypting with one key (via one VM page) and
5952 * decrypting with another key (via the alias VM page).
5954 ASSERT_PAGE_DECRYPTED(m
);
5956 vm_page_insert(alias_page
, upl
->map_object
, new_offset
);
5958 assert(!alias_page
->wanted
);
5959 alias_page
->busy
= FALSE
;
5960 alias_page
->absent
= FALSE
;
5963 offset
+= PAGE_SIZE_64
;
5964 new_offset
+= PAGE_SIZE_64
;
5966 vm_object_unlock(upl
->map_object
);
5968 if (upl
->flags
& UPL_SHADOWED
)
5971 offset
= upl
->offset
- upl
->map_object
->paging_offset
;
5975 vm_object_reference(upl
->map_object
);
5980 * NEED A UPL_MAP ALIAS
5982 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
5983 VM_FLAGS_ANYWHERE
, upl
->map_object
, offset
, FALSE
,
5984 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
5986 if (kr
!= KERN_SUCCESS
) {
5992 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
5993 VM_FLAGS_FIXED
, upl
->map_object
, offset
, FALSE
,
5994 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
5996 panic("vm_map_enter failed for a Vector UPL\n");
5998 vm_object_lock(upl
->map_object
);
6000 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
6001 m
= vm_page_lookup(upl
->map_object
, offset
);
6006 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
6007 * but only in kernel space. If this was on a user map,
6008 * we'd have to set the wpmapped bit. */
6009 /* m->wpmapped = TRUE; */
6010 assert(map
->pmap
== kernel_pmap
);
6012 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_DEFAULT
, VM_PROT_NONE
, 0, TRUE
);
6014 offset
+= PAGE_SIZE_64
;
6016 vm_object_unlock(upl
->map_object
);
6019 * hold a reference for the mapping
6022 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
6023 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
6024 assert(upl
->kaddr
== *dst_addr
);
6027 goto process_upl_to_enter
;
6031 return KERN_SUCCESS
;
6035 * Internal routine to remove a UPL mapping from a VM map.
6037 * XXX - This should just be doable through a standard
6038 * vm_map_remove() operation. Otherwise, implicit clean-up
6039 * of the target map won't be able to correctly remove
6040 * these (and release the reference on the UPL). Having
6041 * to do this means we can't map these into user-space
6051 int isVectorUPL
= 0, curr_upl
= 0;
6052 upl_t vector_upl
= NULL
;
6054 if (upl
== UPL_NULL
)
6055 return KERN_INVALID_ARGUMENT
;
6057 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
6058 int unmapped
=0, valid_upls
=0;
6060 upl_lock(vector_upl
);
6061 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
6062 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
6066 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
))
6071 if(unmapped
!= valid_upls
)
6072 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
6074 upl_unlock(vector_upl
);
6075 return KERN_FAILURE
;
6083 process_upl_to_remove
:
6085 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
6086 vm_map_t v_upl_submap
;
6087 vm_offset_t v_upl_submap_dst_addr
;
6088 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
6090 vm_map_remove(map
, v_upl_submap_dst_addr
, v_upl_submap_dst_addr
+ vector_upl
->size
, VM_MAP_NO_FLAGS
);
6091 vm_map_deallocate(v_upl_submap
);
6092 upl_unlock(vector_upl
);
6093 return KERN_SUCCESS
;
6096 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
6098 goto process_upl_to_remove
;
6101 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
6105 assert(upl
->ref_count
> 1);
6106 upl
->ref_count
--; /* removing mapping ref */
6108 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
6109 upl
->kaddr
= (vm_offset_t
) 0;
6116 vm_map_trunc_page(addr
,
6117 VM_MAP_PAGE_MASK(map
)),
6118 vm_map_round_page(addr
+ size
,
6119 VM_MAP_PAGE_MASK(map
)),
6122 return KERN_SUCCESS
;
6126 * If it's a Vectored UPL, we'll be removing the entire
6127 * submap anyways, so no need to remove individual UPL
6128 * element mappings from within the submap
6130 goto process_upl_to_remove
;
6135 return KERN_FAILURE
;
6141 upl_offset_t offset
,
6144 upl_page_info_t
*page_list
,
6145 mach_msg_type_number_t count
,
6148 upl_size_t xfer_size
, subupl_size
= size
;
6149 vm_object_t shadow_object
;
6151 vm_object_offset_t target_offset
;
6152 upl_offset_t subupl_offset
= offset
;
6154 wpl_array_t lite_list
;
6156 int clear_refmod
= 0;
6157 int pgpgout_count
= 0;
6158 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
6159 struct vm_page_delayed_work
*dwp
;
6162 int isVectorUPL
= 0;
6163 upl_t vector_upl
= NULL
;
6164 boolean_t should_be_throttled
= FALSE
;
6166 vm_page_t nxt_page
= VM_PAGE_NULL
;
6167 int fast_path_possible
= 0;
6168 int fast_path_full_commit
= 0;
6169 int throttle_page
= 0;
6170 int unwired_count
= 0;
6171 int local_queue_count
= 0;
6172 queue_head_t local_queue
;
6176 if (upl
== UPL_NULL
)
6177 return KERN_INVALID_ARGUMENT
;
6182 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
6184 upl_lock(vector_upl
);
6189 process_upl_to_commit
:
6193 offset
= subupl_offset
;
6195 upl_unlock(vector_upl
);
6196 return KERN_SUCCESS
;
6198 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
6200 upl_unlock(vector_upl
);
6201 return KERN_FAILURE
;
6203 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
6204 subupl_size
-= size
;
6205 subupl_offset
+= size
;
6209 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
6210 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
6212 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
6213 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
6215 upl
->upl_commit_index
++;
6218 if (upl
->flags
& UPL_DEVICE_MEMORY
)
6220 else if ((offset
+ size
) <= upl
->size
)
6226 upl_unlock(vector_upl
);
6228 return KERN_FAILURE
;
6230 if (upl
->flags
& UPL_SET_DIRTY
)
6231 flags
|= UPL_COMMIT_SET_DIRTY
;
6232 if (upl
->flags
& UPL_CLEAR_DIRTY
)
6233 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
6235 if (upl
->flags
& UPL_INTERNAL
)
6236 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
6237 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6239 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
6241 object
= upl
->map_object
;
6243 if (upl
->flags
& UPL_SHADOWED
) {
6244 vm_object_lock(object
);
6245 shadow_object
= object
->shadow
;
6247 shadow_object
= object
;
6249 entry
= offset
/PAGE_SIZE
;
6250 target_offset
= (vm_object_offset_t
)offset
;
6252 if (upl
->flags
& UPL_KERNEL_OBJECT
)
6253 vm_object_lock_shared(shadow_object
);
6255 vm_object_lock(shadow_object
);
6257 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6258 assert(shadow_object
->blocked_access
);
6259 shadow_object
->blocked_access
= FALSE
;
6260 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
6263 if (shadow_object
->code_signed
) {
6266 * If the object is code-signed, do not let this UPL tell
6267 * us if the pages are valid or not. Let the pages be
6268 * validated by VM the normal way (when they get mapped or
6271 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
6275 * No page list to get the code-signing info from !?
6277 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
6279 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) && shadow_object
->internal
)
6280 should_be_throttled
= TRUE
;
6284 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
6286 if ((upl
->flags
& UPL_IO_WIRE
) &&
6287 !(flags
& UPL_COMMIT_FREE_ABSENT
) &&
6289 shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
&&
6290 shadow_object
->purgable
!= VM_PURGABLE_EMPTY
) {
6292 if (!queue_empty(&shadow_object
->memq
)) {
6293 queue_init(&local_queue
);
6294 if (size
== shadow_object
->vo_size
) {
6295 nxt_page
= (vm_page_t
)queue_first(&shadow_object
->memq
);
6296 fast_path_full_commit
= 1;
6298 fast_path_possible
= 1;
6300 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) && shadow_object
->internal
&&
6301 (shadow_object
->purgable
== VM_PURGABLE_DENY
||
6302 shadow_object
->purgable
== VM_PURGABLE_NONVOLATILE
||
6303 shadow_object
->purgable
== VM_PURGABLE_VOLATILE
)) {
6317 if (upl
->flags
& UPL_LITE
) {
6318 unsigned int pg_num
;
6320 if (nxt_page
!= VM_PAGE_NULL
) {
6322 nxt_page
= (vm_page_t
)queue_next(&nxt_page
->listq
);
6323 target_offset
= m
->offset
;
6325 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
6326 assert(pg_num
== target_offset
/PAGE_SIZE
);
6328 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
6329 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
6331 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
)
6332 m
= vm_page_lookup(shadow_object
, target_offset
+ (upl
->offset
- shadow_object
->paging_offset
));
6336 if (upl
->flags
& UPL_SHADOWED
) {
6337 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
6343 if (!(upl
->flags
& UPL_KERNEL_OBJECT
) && m
== VM_PAGE_NULL
)
6344 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
6347 if (m
== VM_PAGE_NULL
)
6348 goto commit_next_page
;
6350 if (m
->compressor
) {
6353 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6354 goto commit_next_page
;
6357 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
6360 * Set the code signing bits according to
6361 * what the UPL says they should be.
6363 m
->cs_validated
= page_list
[entry
].cs_validated
;
6364 m
->cs_tainted
= page_list
[entry
].cs_tainted
;
6365 m
->cs_nx
= page_list
[entry
].cs_nx
;
6367 if (flags
& UPL_COMMIT_WRITTEN_BY_KERNEL
)
6368 m
->written_by_kernel
= TRUE
;
6370 if (upl
->flags
& UPL_IO_WIRE
) {
6373 page_list
[entry
].phys_addr
= 0;
6375 if (flags
& UPL_COMMIT_SET_DIRTY
) {
6376 SET_PAGE_DIRTY(m
, FALSE
);
6377 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
6380 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
6381 m
->cs_validated
&& !m
->cs_tainted
) {
6384 * This page is no longer dirty
6385 * but could have been modified,
6386 * so it will need to be
6390 panic("upl_commit_range(%p): page %p was slid\n",
6394 m
->cs_validated
= FALSE
;
6395 #if DEVELOPMENT || DEBUG
6396 vm_cs_validated_resets
++;
6398 pmap_disconnect(m
->phys_page
);
6400 clear_refmod
|= VM_MEM_MODIFIED
;
6402 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6404 * We blocked access to the pages in this UPL.
6405 * Clear the "busy" bit and wake up any waiter
6408 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6410 if (fast_path_possible
) {
6411 assert(m
->object
->purgable
!= VM_PURGABLE_EMPTY
);
6412 assert(m
->object
->purgable
!= VM_PURGABLE_VOLATILE
);
6414 assert(m
->wire_count
== 0);
6418 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6420 if (m
->wire_count
== 0)
6421 panic("wire_count == 0, m = %p, obj = %p\n", m
, shadow_object
);
6424 * XXX FBDP need to update some other
6425 * counters here (purgeable_wired_count)
6428 assert(m
->wire_count
);
6431 if (m
->wire_count
== 0)
6434 if (m
->wire_count
== 0) {
6435 queue_enter(&local_queue
, m
, vm_page_t
, pageq
);
6436 local_queue_count
++;
6438 if (throttle_page
) {
6439 m
->throttled
= TRUE
;
6441 if (flags
& UPL_COMMIT_INACTIVATE
)
6448 if (flags
& UPL_COMMIT_INACTIVATE
) {
6449 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6450 clear_refmod
|= VM_MEM_REFERENCED
;
6453 if (flags
& UPL_COMMIT_FREE_ABSENT
)
6454 dwp
->dw_mask
|= DW_vm_page_free
;
6457 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6459 if ( !(dwp
->dw_mask
& DW_vm_page_deactivate_internal
))
6460 dwp
->dw_mask
|= DW_vm_page_activate
;
6463 dwp
->dw_mask
|= DW_vm_page_unwire
;
6465 goto commit_next_page
;
6467 assert(!m
->compressor
);
6470 page_list
[entry
].phys_addr
= 0;
6473 * make sure to clear the hardware
6474 * modify or reference bits before
6475 * releasing the BUSY bit on this page
6476 * otherwise we risk losing a legitimate
6479 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
6482 clear_refmod
|= VM_MEM_MODIFIED
;
6485 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
6487 if (VM_PAGE_WIRED(m
))
6490 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
6491 m
->cs_validated
&& !m
->cs_tainted
) {
6494 * This page is no longer dirty
6495 * but could have been modified,
6496 * so it will need to be
6500 panic("upl_commit_range(%p): page %p was slid\n",
6504 m
->cs_validated
= FALSE
;
6505 #if DEVELOPMENT || DEBUG
6506 vm_cs_validated_resets
++;
6508 pmap_disconnect(m
->phys_page
);
6510 if (m
->overwriting
) {
6512 * the (COPY_OUT_FROM == FALSE) request_page_list case
6515 #if CONFIG_PHANTOM_CACHE
6516 if (m
->absent
&& !m
->object
->internal
)
6517 dwp
->dw_mask
|= DW_vm_phantom_cache_update
;
6521 dwp
->dw_mask
|= DW_clear_busy
;
6524 * alternate (COPY_OUT_FROM == FALSE) page_list case
6525 * Occurs when the original page was wired
6526 * at the time of the list request
6528 assert(VM_PAGE_WIRED(m
));
6530 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
6532 m
->overwriting
= FALSE
;
6534 if (m
->encrypted_cleaning
== TRUE
) {
6535 m
->encrypted_cleaning
= FALSE
;
6537 dwp
->dw_mask
|= DW_clear_busy
| DW_PAGE_WAKEUP
;
6539 m
->cleaning
= FALSE
;
6543 * With the clean queue enabled, UPL_PAGEOUT should
6544 * no longer set the pageout bit. It's pages now go
6545 * to the clean queue.
6547 assert(!(flags
& UPL_PAGEOUT
));
6550 #if MACH_CLUSTER_STATS
6551 if (m
->wanted
) vm_pageout_target_collisions
++;
6553 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
6554 (m
->pmapped
&& (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
))) {
6556 * page was re-dirtied after we started
6557 * the pageout... reactivate it since
6558 * we don't know whether the on-disk
6559 * copy matches what is now in memory
6561 SET_PAGE_DIRTY(m
, FALSE
);
6563 dwp
->dw_mask
|= DW_vm_page_activate
| DW_PAGE_WAKEUP
;
6565 if (upl
->flags
& UPL_PAGEOUT
) {
6566 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
6567 VM_STAT_INCR(reactivations
);
6568 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
6572 * page has been successfully cleaned
6573 * go ahead and free it for other use
6575 if (m
->object
->internal
) {
6576 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
6578 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
6583 dwp
->dw_mask
|= DW_vm_page_free
;
6585 goto commit_next_page
;
6587 #if MACH_CLUSTER_STATS
6589 m
->dirty
= pmap_is_modified(m
->phys_page
);
6591 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
6592 else vm_pageout_cluster_cleaned
++;
6593 if (m
->wanted
) vm_pageout_cluster_collisions
++;
6596 * It is a part of the semantic of COPYOUT_FROM
6597 * UPLs that a commit implies cache sync
6598 * between the vm page and the backing store
6599 * this can be used to strip the precious bit
6602 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
))
6603 m
->precious
= FALSE
;
6605 if (flags
& UPL_COMMIT_SET_DIRTY
) {
6606 SET_PAGE_DIRTY(m
, FALSE
);
6611 /* with the clean queue on, move *all* cleaned pages to the clean queue */
6612 if (hibernate_cleaning_in_progress
== FALSE
&& !m
->dirty
&& (upl
->flags
& UPL_PAGEOUT
)) {
6615 VM_STAT_INCR(pageouts
);
6616 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
6618 dwp
->dw_mask
|= DW_enqueue_cleaned
;
6619 vm_pageout_enqueued_cleaned_from_inactive_dirty
++;
6620 } else if (should_be_throttled
== TRUE
&& !m
->active
&& !m
->inactive
&& !m
->speculative
&& !m
->throttled
) {
6622 * page coming back in from being 'frozen'...
6623 * it was dirty before it was frozen, so keep it so
6624 * the vm_page_activate will notice that it really belongs
6625 * on the throttle queue and put it there
6627 SET_PAGE_DIRTY(m
, FALSE
);
6628 dwp
->dw_mask
|= DW_vm_page_activate
;
6631 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->clustered
&& !m
->speculative
) {
6632 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6633 clear_refmod
|= VM_MEM_REFERENCED
;
6634 } else if (!m
->active
&& !m
->inactive
&& !m
->speculative
) {
6636 if (m
->clustered
|| (flags
& UPL_COMMIT_SPECULATE
))
6637 dwp
->dw_mask
|= DW_vm_page_speculate
;
6638 else if (m
->reference
)
6639 dwp
->dw_mask
|= DW_vm_page_activate
;
6641 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6642 clear_refmod
|= VM_MEM_REFERENCED
;
6646 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6648 * We blocked access to the pages in this URL.
6649 * Clear the "busy" bit on this page before we
6650 * wake up any waiter.
6652 dwp
->dw_mask
|= DW_clear_busy
;
6655 * Wakeup any thread waiting for the page to be un-cleaning.
6657 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
6661 pmap_clear_refmod(m
->phys_page
, clear_refmod
);
6663 target_offset
+= PAGE_SIZE_64
;
6664 xfer_size
-= PAGE_SIZE
;
6668 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
6669 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
6671 if (dw_count
>= dw_limit
) {
6672 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6678 if (dwp
->dw_mask
& DW_clear_busy
)
6681 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
6687 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6689 if (fast_path_possible
) {
6691 assert(shadow_object
->purgable
!= VM_PURGABLE_VOLATILE
);
6692 assert(shadow_object
->purgable
!= VM_PURGABLE_EMPTY
);
6694 if (local_queue_count
|| unwired_count
) {
6696 if (local_queue_count
) {
6697 vm_page_t first_local
, last_local
;
6698 vm_page_t first_target
;
6699 queue_head_t
*target_queue
;
6702 target_queue
= &vm_page_queue_throttled
;
6704 if (flags
& UPL_COMMIT_INACTIVATE
) {
6705 if (shadow_object
->internal
)
6706 target_queue
= &vm_page_queue_anonymous
;
6708 target_queue
= &vm_page_queue_inactive
;
6710 target_queue
= &vm_page_queue_active
;
6713 * Transfer the entire local queue to a regular LRU page queues.
6715 first_local
= (vm_page_t
) queue_first(&local_queue
);
6716 last_local
= (vm_page_t
) queue_last(&local_queue
);
6718 vm_page_lockspin_queues();
6720 first_target
= (vm_page_t
) queue_first(target_queue
);
6722 if (queue_empty(target_queue
))
6723 queue_last(target_queue
) = (queue_entry_t
) last_local
;
6725 queue_prev(&first_target
->pageq
) = (queue_entry_t
) last_local
;
6727 queue_first(target_queue
) = (queue_entry_t
) first_local
;
6728 queue_prev(&first_local
->pageq
) = (queue_entry_t
) target_queue
;
6729 queue_next(&last_local
->pageq
) = (queue_entry_t
) first_target
;
6732 * Adjust the global page counts.
6734 if (throttle_page
) {
6735 vm_page_throttled_count
+= local_queue_count
;
6737 if (flags
& UPL_COMMIT_INACTIVATE
) {
6738 if (shadow_object
->internal
)
6739 vm_page_anonymous_count
+= local_queue_count
;
6740 vm_page_inactive_count
+= local_queue_count
;
6742 token_new_pagecount
+= local_queue_count
;
6744 vm_page_active_count
+= local_queue_count
;
6746 if (shadow_object
->internal
)
6747 vm_page_pageable_internal_count
+= local_queue_count
;
6749 vm_page_pageable_external_count
+= local_queue_count
;
6752 vm_page_lockspin_queues();
6754 if (unwired_count
) {
6755 vm_page_wire_count
-= unwired_count
;
6756 VM_CHECK_MEMORYSTATUS
;
6758 vm_page_unlock_queues();
6760 shadow_object
->wired_page_count
-= unwired_count
;
6765 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
6767 } else if (upl
->flags
& UPL_LITE
) {
6773 if (!fast_path_full_commit
) {
6774 pg_num
= upl
->size
/PAGE_SIZE
;
6775 pg_num
= (pg_num
+ 31) >> 5;
6777 for (i
= 0; i
< pg_num
; i
++) {
6778 if (lite_list
[i
] != 0) {
6785 if (queue_empty(&upl
->map_object
->memq
))
6788 if (occupied
== 0) {
6790 * If this UPL element belongs to a Vector UPL and is
6791 * empty, then this is the right function to deallocate
6792 * it. So go ahead set the *empty variable. The flag
6793 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
6794 * should be considered relevant for the Vector UPL and not
6795 * the internal UPLs.
6797 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
6800 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
6802 * this is not a paging object
6803 * so we need to drop the paging reference
6804 * that was taken when we created the UPL
6805 * against this object
6807 vm_object_activity_end(shadow_object
);
6808 vm_object_collapse(shadow_object
, 0, TRUE
);
6811 * we dontated the paging reference to
6812 * the map object... vm_pageout_object_terminate
6813 * will drop this reference
6817 vm_object_unlock(shadow_object
);
6818 if (object
!= shadow_object
)
6819 vm_object_unlock(object
);
6825 * If we completed our operations on an UPL that is
6826 * part of a Vectored UPL and if empty is TRUE, then
6827 * we should go ahead and deallocate this UPL element.
6828 * Then we check if this was the last of the UPL elements
6829 * within that Vectored UPL. If so, set empty to TRUE
6830 * so that in ubc_upl_commit_range or ubc_upl_commit, we
6831 * can go ahead and deallocate the Vector UPL too.
6834 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
6835 upl_deallocate(upl
);
6837 goto process_upl_to_commit
;
6840 if (pgpgout_count
) {
6841 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
6844 return KERN_SUCCESS
;
6850 upl_offset_t offset
,
6855 upl_page_info_t
*user_page_list
= NULL
;
6856 upl_size_t xfer_size
, subupl_size
= size
;
6857 vm_object_t shadow_object
;
6859 vm_object_offset_t target_offset
;
6860 upl_offset_t subupl_offset
= offset
;
6862 wpl_array_t lite_list
;
6864 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
6865 struct vm_page_delayed_work
*dwp
;
6868 int isVectorUPL
= 0;
6869 upl_t vector_upl
= NULL
;
6873 if (upl
== UPL_NULL
)
6874 return KERN_INVALID_ARGUMENT
;
6876 if ( (upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
) )
6877 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
6879 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
6881 upl_lock(vector_upl
);
6886 process_upl_to_abort
:
6889 offset
= subupl_offset
;
6891 upl_unlock(vector_upl
);
6892 return KERN_SUCCESS
;
6894 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
6896 upl_unlock(vector_upl
);
6897 return KERN_FAILURE
;
6899 subupl_size
-= size
;
6900 subupl_offset
+= size
;
6906 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
6907 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
6909 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
6910 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
6911 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
6913 upl
->upl_commit_index
++;
6916 if (upl
->flags
& UPL_DEVICE_MEMORY
)
6918 else if ((offset
+ size
) <= upl
->size
)
6924 upl_unlock(vector_upl
);
6927 return KERN_FAILURE
;
6929 if (upl
->flags
& UPL_INTERNAL
) {
6930 lite_list
= (wpl_array_t
)
6931 ((((uintptr_t)upl
) + sizeof(struct upl
))
6932 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6934 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
6936 lite_list
= (wpl_array_t
)
6937 (((uintptr_t)upl
) + sizeof(struct upl
));
6939 object
= upl
->map_object
;
6941 if (upl
->flags
& UPL_SHADOWED
) {
6942 vm_object_lock(object
);
6943 shadow_object
= object
->shadow
;
6945 shadow_object
= object
;
6947 entry
= offset
/PAGE_SIZE
;
6948 target_offset
= (vm_object_offset_t
)offset
;
6950 if (upl
->flags
& UPL_KERNEL_OBJECT
)
6951 vm_object_lock_shared(shadow_object
);
6953 vm_object_lock(shadow_object
);
6955 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6956 assert(shadow_object
->blocked_access
);
6957 shadow_object
->blocked_access
= FALSE
;
6958 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
6963 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
6965 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
))
6966 panic("upl_abort_range: kernel_object being DUMPED");
6970 unsigned int pg_num
;
6973 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
6974 assert(pg_num
== target_offset
/PAGE_SIZE
);
6979 needed
= user_page_list
[pg_num
].needed
;
6984 if (upl
->flags
& UPL_LITE
) {
6986 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
6987 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
6989 if ( !(upl
->flags
& UPL_KERNEL_OBJECT
))
6990 m
= vm_page_lookup(shadow_object
, target_offset
+
6991 (upl
->offset
- shadow_object
->paging_offset
));
6994 if (upl
->flags
& UPL_SHADOWED
) {
6995 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
7000 if (m
== VM_PAGE_NULL
)
7001 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
7004 if ((upl
->flags
& UPL_KERNEL_OBJECT
))
7005 goto abort_next_page
;
7007 if (m
!= VM_PAGE_NULL
) {
7009 assert(!m
->compressor
);
7012 boolean_t must_free
= TRUE
;
7015 * COPYOUT = FALSE case
7016 * check for error conditions which must
7017 * be passed back to the pages customer
7019 if (error
& UPL_ABORT_RESTART
) {
7024 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
7028 } else if (error
& UPL_ABORT_ERROR
) {
7035 if (m
->clustered
&& needed
== FALSE
) {
7037 * This page was a part of a speculative
7038 * read-ahead initiated by the kernel
7039 * itself. No one is expecting this
7040 * page and no one will clean up its
7041 * error state if it ever becomes valid
7043 * We have to free it here.
7050 * If the page was already encrypted,
7051 * we don't really need to decrypt it
7052 * now. It will get decrypted later,
7053 * on demand, as soon as someone needs
7054 * to access its contents.
7057 m
->cleaning
= FALSE
;
7058 m
->encrypted_cleaning
= FALSE
;
7060 if (m
->overwriting
&& !m
->busy
) {
7062 * this shouldn't happen since
7063 * this is an 'absent' page, but
7064 * it doesn't hurt to check for
7065 * the 'alternate' method of
7066 * stabilizing the page...
7067 * we will mark 'busy' to be cleared
7068 * in the following code which will
7069 * take care of the primary stabilzation
7070 * method (i.e. setting 'busy' to TRUE)
7072 dwp
->dw_mask
|= DW_vm_page_unwire
;
7074 m
->overwriting
= FALSE
;
7076 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
7078 if (must_free
== TRUE
)
7079 dwp
->dw_mask
|= DW_vm_page_free
;
7081 dwp
->dw_mask
|= DW_vm_page_activate
;
7084 * Handle the trusted pager throttle.
7087 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
7089 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7091 * We blocked access to the pages in this UPL.
7092 * Clear the "busy" bit and wake up any waiter
7095 dwp
->dw_mask
|= DW_clear_busy
;
7097 if (m
->overwriting
) {
7099 dwp
->dw_mask
|= DW_clear_busy
;
7102 * deal with the 'alternate' method
7103 * of stabilizing the page...
7104 * we will either free the page
7105 * or mark 'busy' to be cleared
7106 * in the following code which will
7107 * take care of the primary stabilzation
7108 * method (i.e. setting 'busy' to TRUE)
7110 dwp
->dw_mask
|= DW_vm_page_unwire
;
7112 m
->overwriting
= FALSE
;
7114 if (m
->encrypted_cleaning
== TRUE
) {
7115 m
->encrypted_cleaning
= FALSE
;
7117 dwp
->dw_mask
|= DW_clear_busy
;
7120 m
->cleaning
= FALSE
;
7122 vm_external_state_clr(m
->object
->existence_map
, m
->offset
);
7123 #endif /* MACH_PAGEMAP */
7124 if (error
& UPL_ABORT_DUMP_PAGES
) {
7125 pmap_disconnect(m
->phys_page
);
7127 dwp
->dw_mask
|= DW_vm_page_free
;
7129 if (!(dwp
->dw_mask
& DW_vm_page_unwire
)) {
7130 if (error
& UPL_ABORT_REFERENCE
) {
7132 * we've been told to explictly
7133 * reference this page... for
7134 * file I/O, this is done by
7135 * implementing an LRU on the inactive q
7137 dwp
->dw_mask
|= DW_vm_page_lru
;
7139 } else if (!m
->active
&& !m
->inactive
&& !m
->speculative
)
7140 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
7142 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
7147 target_offset
+= PAGE_SIZE_64
;
7148 xfer_size
-= PAGE_SIZE
;
7152 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
7153 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
7155 if (dw_count
>= dw_limit
) {
7156 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
7162 if (dwp
->dw_mask
& DW_clear_busy
)
7165 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
7171 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
7175 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
7177 } else if (upl
->flags
& UPL_LITE
) {
7181 pg_num
= upl
->size
/PAGE_SIZE
;
7182 pg_num
= (pg_num
+ 31) >> 5;
7185 for (i
= 0; i
< pg_num
; i
++) {
7186 if (lite_list
[i
] != 0) {
7192 if (queue_empty(&upl
->map_object
->memq
))
7195 if (occupied
== 0) {
7197 * If this UPL element belongs to a Vector UPL and is
7198 * empty, then this is the right function to deallocate
7199 * it. So go ahead set the *empty variable. The flag
7200 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
7201 * should be considered relevant for the Vector UPL and
7202 * not the internal UPLs.
7204 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
7207 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
7209 * this is not a paging object
7210 * so we need to drop the paging reference
7211 * that was taken when we created the UPL
7212 * against this object
7214 vm_object_activity_end(shadow_object
);
7215 vm_object_collapse(shadow_object
, 0, TRUE
);
7218 * we dontated the paging reference to
7219 * the map object... vm_pageout_object_terminate
7220 * will drop this reference
7224 vm_object_unlock(shadow_object
);
7225 if (object
!= shadow_object
)
7226 vm_object_unlock(object
);
7232 * If we completed our operations on an UPL that is
7233 * part of a Vectored UPL and if empty is TRUE, then
7234 * we should go ahead and deallocate this UPL element.
7235 * Then we check if this was the last of the UPL elements
7236 * within that Vectored UPL. If so, set empty to TRUE
7237 * so that in ubc_upl_abort_range or ubc_upl_abort, we
7238 * can go ahead and deallocate the Vector UPL too.
7240 if(*empty
== TRUE
) {
7241 *empty
= vector_upl_set_subupl(vector_upl
, upl
,0);
7242 upl_deallocate(upl
);
7244 goto process_upl_to_abort
;
7247 return KERN_SUCCESS
;
7258 return upl_abort_range(upl
, 0, upl
->size
, error
, &empty
);
7262 /* an option on commit should be wire */
7266 upl_page_info_t
*page_list
,
7267 mach_msg_type_number_t count
)
7271 return upl_commit_range(upl
, 0, upl
->size
, 0, page_list
, count
, &empty
);
7281 vm_page_t m
, nxt_page
= VM_PAGE_NULL
;
7283 int wired_count
= 0;
7286 panic("iopl_valid_data: NULL upl");
7287 if (vector_upl_is_valid(upl
))
7288 panic("iopl_valid_data: vector upl");
7289 if ((upl
->flags
& (UPL_DEVICE_MEMORY
|UPL_SHADOWED
|UPL_ACCESS_BLOCKED
|UPL_IO_WIRE
|UPL_INTERNAL
)) != UPL_IO_WIRE
)
7290 panic("iopl_valid_data: unsupported upl, flags = %x", upl
->flags
);
7292 object
= upl
->map_object
;
7294 if (object
== kernel_object
|| object
== compressor_object
)
7295 panic("iopl_valid_data: object == kernel or compressor");
7297 if (object
->purgable
== VM_PURGABLE_VOLATILE
)
7298 panic("iopl_valid_data: object == VM_PURGABLE_VOLATILE");
7302 vm_object_lock(object
);
7304 if (object
->vo_size
== size
&& object
->resident_page_count
== (size
/ PAGE_SIZE
))
7305 nxt_page
= (vm_page_t
)queue_first(&object
->memq
);
7307 offset
= 0 + upl
->offset
- object
->paging_offset
;
7311 if (nxt_page
!= VM_PAGE_NULL
) {
7313 nxt_page
= (vm_page_t
)queue_next(&nxt_page
->listq
);
7315 m
= vm_page_lookup(object
, offset
);
7316 offset
+= PAGE_SIZE
;
7318 if (m
== VM_PAGE_NULL
)
7319 panic("iopl_valid_data: missing expected page at offset %lx", (long)offset
);
7323 panic("iopl_valid_data: busy page w/o absent");
7325 if (m
->pageq
.next
|| m
->pageq
.prev
)
7326 panic("iopl_valid_data: busy+absent page on page queue");
7333 PAGE_WAKEUP_DONE(m
);
7338 object
->wired_page_count
+= wired_count
;
7340 vm_page_lockspin_queues();
7341 vm_page_wire_count
+= wired_count
;
7342 vm_page_unlock_queues();
7344 vm_object_unlock(object
);
7351 vm_object_set_pmap_cache_attr(
7353 upl_page_info_array_t user_page_list
,
7354 unsigned int num_pages
,
7355 boolean_t batch_pmap_op
)
7357 unsigned int cache_attr
= 0;
7359 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
7360 assert(user_page_list
);
7361 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
7362 PMAP_BATCH_SET_CACHE_ATTR(object
, user_page_list
, cache_attr
, num_pages
, batch_pmap_op
);
7366 unsigned int vm_object_iopl_request_sleep_for_cleaning
= 0;
7369 vm_object_iopl_request(
7371 vm_object_offset_t offset
,
7374 upl_page_info_array_t user_page_list
,
7375 unsigned int *page_list_count
,
7379 vm_object_offset_t dst_offset
;
7380 upl_size_t xfer_size
;
7383 wpl_array_t lite_list
= NULL
;
7384 int no_zero_fill
= FALSE
;
7385 unsigned int size_in_pages
;
7389 struct vm_object_fault_info fault_info
;
7390 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
7391 struct vm_page_delayed_work
*dwp
;
7395 boolean_t caller_lookup
;
7396 int io_tracking_flag
= 0;
7399 boolean_t set_cache_attr_needed
= FALSE
;
7400 boolean_t free_wired_pages
= FALSE
;
7401 int fast_path_possible
= 0;
7404 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
7406 * For forward compatibility's sake,
7407 * reject any unknown flag.
7409 return KERN_INVALID_VALUE
;
7411 if (vm_lopage_needed
== FALSE
)
7412 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
7414 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
7415 if ( (cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
))
7416 return KERN_INVALID_VALUE
;
7418 if (object
->phys_contiguous
) {
7419 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
)
7420 return KERN_INVALID_ADDRESS
;
7422 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
)
7423 return KERN_INVALID_ADDRESS
;
7427 if (cntrl_flags
& UPL_ENCRYPT
) {
7430 * The paging path doesn't use this interface,
7431 * so we don't support the UPL_ENCRYPT flag
7432 * here. We won't encrypt the pages.
7434 assert(! (cntrl_flags
& UPL_ENCRYPT
));
7436 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
))
7437 no_zero_fill
= TRUE
;
7439 if (cntrl_flags
& UPL_COPYOUT_FROM
)
7440 prot
= VM_PROT_READ
;
7442 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
7444 if ((!object
->internal
) && (object
->paging_offset
!= 0))
7445 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
7447 #if CONFIG_IOSCHED || UPL_DEBUG
7448 if ((object
->io_tracking
&& object
!= kernel_object
) || upl_debug_enabled
)
7449 io_tracking_flag
|= UPL_CREATE_IO_TRACKING
;
7453 if (object
->io_tracking
) {
7454 /* Check if we're dealing with the kernel object. We do not support expedite on kernel object UPLs */
7455 if (object
!= kernel_object
)
7456 io_tracking_flag
|= UPL_CREATE_EXPEDITE_SUP
;
7460 if (object
->phys_contiguous
)
7465 if (cntrl_flags
& UPL_SET_INTERNAL
) {
7466 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
7468 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
7469 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
7470 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
7472 user_page_list
= NULL
;
7476 upl
= upl_create(UPL_CREATE_LITE
| io_tracking_flag
, UPL_IO_WIRE
, psize
);
7478 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
7484 user_page_list
[0].device
= FALSE
;
7487 upl
->map_object
= object
;
7490 size_in_pages
= size
/ PAGE_SIZE
;
7492 if (object
== kernel_object
&&
7493 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
7494 upl
->flags
|= UPL_KERNEL_OBJECT
;
7496 vm_object_lock(object
);
7498 vm_object_lock_shared(object
);
7501 vm_object_lock(object
);
7502 vm_object_activity_begin(object
);
7505 * paging in progress also protects the paging_offset
7507 upl
->offset
= offset
+ object
->paging_offset
;
7509 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
7511 * The user requested that access to the pages in this UPL
7512 * be blocked until the UPL is commited or aborted.
7514 upl
->flags
|= UPL_ACCESS_BLOCKED
;
7517 if (!(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
)) &&
7518 object
->purgable
!= VM_PURGABLE_VOLATILE
&&
7519 object
->purgable
!= VM_PURGABLE_EMPTY
&&
7520 object
->copy
== NULL
&&
7521 size
== object
->vo_size
&&
7523 object
->resident_page_count
== 0 &&
7524 object
->shadow
== NULL
&&
7525 object
->pager
== NULL
)
7527 fast_path_possible
= 1;
7528 set_cache_attr_needed
= TRUE
;
7531 #if CONFIG_IOSCHED || UPL_DEBUG
7532 if (upl
->flags
& UPL_TRACKED_BY_OBJECT
) {
7533 vm_object_activity_begin(object
);
7534 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
7538 if (object
->phys_contiguous
) {
7540 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
7541 assert(!object
->blocked_access
);
7542 object
->blocked_access
= TRUE
;
7545 vm_object_unlock(object
);
7548 * don't need any shadow mappings for this one
7549 * since it is already I/O memory
7551 upl
->flags
|= UPL_DEVICE_MEMORY
;
7553 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1)>>PAGE_SHIFT
);
7555 if (user_page_list
) {
7556 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
)>>PAGE_SHIFT
);
7557 user_page_list
[0].device
= TRUE
;
7559 if (page_list_count
!= NULL
) {
7560 if (upl
->flags
& UPL_INTERNAL
)
7561 *page_list_count
= 0;
7563 *page_list_count
= 1;
7565 return KERN_SUCCESS
;
7567 if (object
!= kernel_object
&& object
!= compressor_object
) {
7569 * Protect user space from future COW operations
7571 #if VM_OBJECT_TRACKING_OP_TRUESHARE
7572 if (!object
->true_share
&&
7573 vm_object_tracking_inited
) {
7574 void *bt
[VM_OBJECT_TRACKING_BTDEPTH
];
7577 num
= OSBacktrace(bt
,
7578 VM_OBJECT_TRACKING_BTDEPTH
);
7579 btlog_add_entry(vm_object_tracking_btlog
,
7581 VM_OBJECT_TRACKING_OP_TRUESHARE
,
7585 #endif /* VM_OBJECT_TRACKING_OP_TRUESHARE */
7587 object
->true_share
= TRUE
;
7589 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
7590 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
7593 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
7594 object
->copy
!= VM_OBJECT_NULL
) {
7596 * Honor copy-on-write obligations
7598 * The caller is gathering these pages and
7599 * might modify their contents. We need to
7600 * make sure that the copy object has its own
7601 * private copies of these pages before we let
7602 * the caller modify them.
7604 * NOTE: someone else could map the original object
7605 * after we've done this copy-on-write here, and they
7606 * could then see an inconsistent picture of the memory
7607 * while it's being modified via the UPL. To prevent this,
7608 * we would have to block access to these pages until the
7609 * UPL is released. We could use the UPL_BLOCK_ACCESS
7610 * code path for that...
7612 vm_object_update(object
,
7617 FALSE
, /* should_return */
7618 MEMORY_OBJECT_COPY_SYNC
,
7620 #if DEVELOPMENT || DEBUG
7622 iopl_cow_pages
+= size
>> PAGE_SHIFT
;
7625 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
)
7626 interruptible
= THREAD_ABORTSAFE
;
7628 interruptible
= THREAD_UNINT
;
7633 dst_offset
= offset
;
7636 if (fast_path_possible
) {
7637 int wired_count
= 0;
7641 while ( (dst_page
= vm_page_grab()) == VM_PAGE_NULL
) {
7642 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
7644 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
7646 if (vm_page_wait(interruptible
) == FALSE
) {
7650 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7652 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
7655 vm_page_lockspin_queues();
7656 vm_page_wire_count
+= wired_count
;
7657 vm_page_unlock_queues();
7659 free_wired_pages
= TRUE
;
7661 ret
= MACH_SEND_INTERRUPTED
;
7665 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7667 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
7669 if (no_zero_fill
== FALSE
)
7670 vm_page_zero_fill(dst_page
);
7672 dst_page
->absent
= TRUE
;
7674 dst_page
->reference
= TRUE
;
7676 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
7677 SET_PAGE_DIRTY(dst_page
, FALSE
);
7679 if (dst_page
->absent
== FALSE
) {
7680 assert(object
->purgable
!= VM_PURGABLE_VOLATILE
);
7681 assert(object
->purgable
!= VM_PURGABLE_EMPTY
);
7682 dst_page
->wire_count
++;
7685 PAGE_WAKEUP_DONE(dst_page
);
7687 vm_page_insert_internal(dst_page
, object
, dst_offset
, FALSE
, TRUE
, TRUE
);
7689 lite_list
[entry
>>5] |= 1 << (entry
& 31);
7691 if (dst_page
->phys_page
> upl
->highest_page
)
7692 upl
->highest_page
= dst_page
->phys_page
;
7694 if (user_page_list
) {
7695 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
7696 user_page_list
[entry
].absent
= dst_page
->absent
;
7697 user_page_list
[entry
].dirty
= dst_page
->dirty
;
7698 user_page_list
[entry
].precious
= FALSE
;
7699 user_page_list
[entry
].pageout
= FALSE
;
7700 user_page_list
[entry
].device
= FALSE
;
7701 user_page_list
[entry
].needed
= FALSE
;
7702 user_page_list
[entry
].speculative
= FALSE
;
7703 user_page_list
[entry
].cs_validated
= FALSE
;
7704 user_page_list
[entry
].cs_tainted
= FALSE
;
7707 dst_offset
+= PAGE_SIZE_64
;
7708 xfer_size
-= PAGE_SIZE
;
7712 vm_page_lockspin_queues();
7713 vm_page_wire_count
+= wired_count
;
7714 vm_page_unlock_queues();
7719 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
7720 fault_info
.user_tag
= 0;
7721 fault_info
.lo_offset
= offset
;
7722 fault_info
.hi_offset
= offset
+ xfer_size
;
7723 fault_info
.no_cache
= FALSE
;
7724 fault_info
.stealth
= FALSE
;
7725 fault_info
.io_sync
= FALSE
;
7726 fault_info
.cs_bypass
= FALSE
;
7727 fault_info
.mark_zf_absent
= TRUE
;
7728 fault_info
.interruptible
= interruptible
;
7729 fault_info
.batch_pmap_op
= TRUE
;
7732 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
7735 vm_fault_return_t result
;
7736 unsigned int pg_num
;
7740 dst_page
= vm_page_lookup(object
, dst_offset
);
7744 * If the page is encrypted, we need to decrypt it,
7745 * so force a soft page fault.
7747 if (dst_page
== VM_PAGE_NULL
||
7749 dst_page
->encrypted
||
7751 dst_page
->restart
||
7753 dst_page
->fictitious
) {
7755 if (object
== kernel_object
)
7756 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
7757 if (object
== compressor_object
)
7758 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
7760 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
7761 ret
= KERN_MEMORY_ERROR
;
7764 set_cache_attr_needed
= TRUE
;
7767 * We just looked up the page and the result remains valid
7768 * until the object lock is release, so send it to
7769 * vm_fault_page() (as "dst_page"), to avoid having to
7770 * look it up again there.
7772 caller_lookup
= TRUE
;
7776 kern_return_t error_code
;
7778 fault_info
.cluster_size
= xfer_size
;
7780 vm_object_paging_begin(object
);
7782 result
= vm_fault_page(object
, dst_offset
,
7783 prot
| VM_PROT_WRITE
, FALSE
,
7785 &prot
, &dst_page
, &top_page
,
7787 &error_code
, no_zero_fill
,
7788 FALSE
, &fault_info
);
7790 /* our lookup is no longer valid at this point */
7791 caller_lookup
= FALSE
;
7795 case VM_FAULT_SUCCESS
:
7797 if ( !dst_page
->absent
) {
7798 PAGE_WAKEUP_DONE(dst_page
);
7801 * we only get back an absent page if we
7802 * requested that it not be zero-filled
7803 * because we are about to fill it via I/O
7805 * absent pages should be left BUSY
7806 * to prevent them from being faulted
7807 * into an address space before we've
7808 * had a chance to complete the I/O on
7809 * them since they may contain info that
7810 * shouldn't be seen by the faulting task
7814 * Release paging references and
7815 * top-level placeholder page, if any.
7817 if (top_page
!= VM_PAGE_NULL
) {
7818 vm_object_t local_object
;
7820 local_object
= top_page
->object
;
7822 if (top_page
->object
!= dst_page
->object
) {
7823 vm_object_lock(local_object
);
7824 VM_PAGE_FREE(top_page
);
7825 vm_object_paging_end(local_object
);
7826 vm_object_unlock(local_object
);
7828 VM_PAGE_FREE(top_page
);
7829 vm_object_paging_end(local_object
);
7832 vm_object_paging_end(object
);
7835 case VM_FAULT_RETRY
:
7836 vm_object_lock(object
);
7839 case VM_FAULT_MEMORY_SHORTAGE
:
7840 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
7842 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
7844 if (vm_page_wait(interruptible
)) {
7845 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7847 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
7848 vm_object_lock(object
);
7852 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7854 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
7858 case VM_FAULT_INTERRUPTED
:
7859 error_code
= MACH_SEND_INTERRUPTED
;
7860 case VM_FAULT_MEMORY_ERROR
:
7862 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
7864 vm_object_lock(object
);
7867 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
7868 /* success but no page: fail */
7869 vm_object_paging_end(object
);
7870 vm_object_unlock(object
);
7874 panic("vm_object_iopl_request: unexpected error"
7875 " 0x%x from vm_fault_page()\n", result
);
7877 } while (result
!= VM_FAULT_SUCCESS
);
7880 if (upl
->flags
& UPL_KERNEL_OBJECT
)
7881 goto record_phys_addr
;
7883 if (dst_page
->compressor
) {
7884 dst_page
->busy
= TRUE
;
7885 goto record_phys_addr
;
7888 if (dst_page
->cleaning
) {
7890 * Someone else is cleaning this page in place.
7891 * In theory, we should be able to proceed and use this
7892 * page but they'll probably end up clearing the "busy"
7893 * bit on it in upl_commit_range() but they didn't set
7894 * it, so they would clear our "busy" bit and open
7895 * us to race conditions.
7896 * We'd better wait for the cleaning to complete and
7899 vm_object_iopl_request_sleep_for_cleaning
++;
7900 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7903 if (dst_page
->laundry
) {
7904 dst_page
->pageout
= FALSE
;
7906 vm_pageout_steal_laundry(dst_page
, FALSE
);
7908 if ( (cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
7909 dst_page
->phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
) ) {
7914 * support devices that can't DMA above 32 bits
7915 * by substituting pages from a pool of low address
7916 * memory for any pages we find above the 4G mark
7917 * can't substitute if the page is already wired because
7918 * we don't know whether that physical address has been
7919 * handed out to some other 64 bit capable DMA device to use
7921 if (VM_PAGE_WIRED(dst_page
)) {
7922 ret
= KERN_PROTECTION_FAILURE
;
7925 low_page
= vm_page_grablo();
7927 if (low_page
== VM_PAGE_NULL
) {
7928 ret
= KERN_RESOURCE_SHORTAGE
;
7932 * from here until the vm_page_replace completes
7933 * we musn't drop the object lock... we don't
7934 * want anyone refaulting this page in and using
7935 * it after we disconnect it... we want the fault
7936 * to find the new page being substituted.
7938 if (dst_page
->pmapped
)
7939 refmod
= pmap_disconnect(dst_page
->phys_page
);
7943 if (!dst_page
->absent
)
7944 vm_page_copy(dst_page
, low_page
);
7946 low_page
->reference
= dst_page
->reference
;
7947 low_page
->dirty
= dst_page
->dirty
;
7948 low_page
->absent
= dst_page
->absent
;
7950 if (refmod
& VM_MEM_REFERENCED
)
7951 low_page
->reference
= TRUE
;
7952 if (refmod
& VM_MEM_MODIFIED
) {
7953 SET_PAGE_DIRTY(low_page
, FALSE
);
7956 vm_page_replace(low_page
, object
, dst_offset
);
7958 dst_page
= low_page
;
7960 * vm_page_grablo returned the page marked
7961 * BUSY... we don't need a PAGE_WAKEUP_DONE
7962 * here, because we've never dropped the object lock
7964 if ( !dst_page
->absent
)
7965 dst_page
->busy
= FALSE
;
7967 if ( !dst_page
->busy
)
7968 dwp
->dw_mask
|= DW_vm_page_wire
;
7970 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
7972 * Mark the page "busy" to block any future page fault
7973 * on this page in addition to wiring it.
7974 * We'll also remove the mapping
7975 * of all these pages before leaving this routine.
7977 assert(!dst_page
->fictitious
);
7978 dst_page
->busy
= TRUE
;
7981 * expect the page to be used
7982 * page queues lock must be held to set 'reference'
7984 dwp
->dw_mask
|= DW_set_reference
;
7986 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
7987 SET_PAGE_DIRTY(dst_page
, TRUE
);
7989 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->written_by_kernel
== TRUE
) {
7990 pmap_sync_page_attributes_phys(dst_page
->phys_page
);
7991 dst_page
->written_by_kernel
= FALSE
;
7996 upl
->flags
|= UPL_HAS_BUSY
;
7998 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
7999 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
8000 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
8002 if (dst_page
->phys_page
> upl
->highest_page
)
8003 upl
->highest_page
= dst_page
->phys_page
;
8005 if (user_page_list
) {
8006 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
8007 user_page_list
[entry
].pageout
= dst_page
->pageout
;
8008 user_page_list
[entry
].absent
= dst_page
->absent
;
8009 user_page_list
[entry
].dirty
= dst_page
->dirty
;
8010 user_page_list
[entry
].precious
= dst_page
->precious
;
8011 user_page_list
[entry
].device
= FALSE
;
8012 user_page_list
[entry
].needed
= FALSE
;
8013 if (dst_page
->clustered
== TRUE
)
8014 user_page_list
[entry
].speculative
= dst_page
->speculative
;
8016 user_page_list
[entry
].speculative
= FALSE
;
8017 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
8018 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
8019 user_page_list
[entry
].cs_nx
= dst_page
->cs_nx
;
8021 if (object
!= kernel_object
&& object
!= compressor_object
) {
8023 * someone is explicitly grabbing this page...
8024 * update clustered and speculative state
8027 if (dst_page
->clustered
)
8028 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
8031 dst_offset
+= PAGE_SIZE_64
;
8032 xfer_size
-= PAGE_SIZE
;
8036 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
8038 if (dw_count
>= dw_limit
) {
8039 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
8047 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
8050 if (user_page_list
&& set_cache_attr_needed
== TRUE
)
8051 vm_object_set_pmap_cache_attr(object
, user_page_list
, entry
, TRUE
);
8053 if (page_list_count
!= NULL
) {
8054 if (upl
->flags
& UPL_INTERNAL
)
8055 *page_list_count
= 0;
8056 else if (*page_list_count
> entry
)
8057 *page_list_count
= entry
;
8059 vm_object_unlock(object
);
8061 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
8063 * We've marked all the pages "busy" so that future
8064 * page faults will block.
8065 * Now remove the mapping for these pages, so that they
8066 * can't be accessed without causing a page fault.
8068 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
8069 PMAP_NULL
, 0, VM_PROT_NONE
);
8070 assert(!object
->blocked_access
);
8071 object
->blocked_access
= TRUE
;
8073 return KERN_SUCCESS
;
8078 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
8079 boolean_t need_unwire
;
8081 dst_page
= vm_page_lookup(object
, offset
);
8083 if (dst_page
== VM_PAGE_NULL
)
8084 panic("vm_object_iopl_request: Wired page missing. \n");
8087 * if we've already processed this page in an earlier
8088 * dw_do_work, we need to undo the wiring... we will
8089 * leave the dirty and reference bits on if they
8090 * were set, since we don't have a good way of knowing
8091 * what the previous state was and we won't get here
8092 * under any normal circumstances... we will always
8093 * clear BUSY and wakeup any waiters via vm_page_free
8094 * or PAGE_WAKEUP_DONE
8099 if (dw_array
[dw_index
].dw_m
== dst_page
) {
8101 * still in the deferred work list
8102 * which means we haven't yet called
8103 * vm_page_wire on this page
8105 need_unwire
= FALSE
;
8111 vm_page_lock_queues();
8113 if (dst_page
->absent
|| free_wired_pages
== TRUE
) {
8114 vm_page_free(dst_page
);
8116 need_unwire
= FALSE
;
8118 if (need_unwire
== TRUE
)
8119 vm_page_unwire(dst_page
, TRUE
);
8121 PAGE_WAKEUP_DONE(dst_page
);
8123 vm_page_unlock_queues();
8125 if (need_unwire
== TRUE
)
8126 VM_STAT_INCR(reactivations
);
8131 if (! (upl
->flags
& UPL_KERNEL_OBJECT
)) {
8132 vm_object_activity_end(object
);
8133 vm_object_collapse(object
, 0, TRUE
);
8135 vm_object_unlock(object
);
8146 kern_return_t retval
;
8147 boolean_t upls_locked
;
8148 vm_object_t object1
, object2
;
8150 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
)==UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
8151 return KERN_INVALID_ARGUMENT
;
8154 upls_locked
= FALSE
;
8157 * Since we need to lock both UPLs at the same time,
8158 * avoid deadlocks by always taking locks in the same order.
8167 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
8169 object1
= upl1
->map_object
;
8170 object2
= upl2
->map_object
;
8172 if (upl1
->offset
!= 0 || upl2
->offset
!= 0 ||
8173 upl1
->size
!= upl2
->size
) {
8175 * We deal only with full objects, not subsets.
8176 * That's because we exchange the entire backing store info
8177 * for the objects: pager, resident pages, etc... We can't do
8180 retval
= KERN_INVALID_VALUE
;
8185 * Tranpose the VM objects' backing store.
8187 retval
= vm_object_transpose(object1
, object2
,
8188 (vm_object_size_t
) upl1
->size
);
8190 if (retval
== KERN_SUCCESS
) {
8192 * Make each UPL point to the correct VM object, i.e. the
8193 * object holding the pages that the UPL refers to...
8195 #if CONFIG_IOSCHED || UPL_DEBUG
8196 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
8197 vm_object_lock(object1
);
8198 vm_object_lock(object2
);
8200 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
)
8201 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
8202 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)
8203 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
8205 upl1
->map_object
= object2
;
8206 upl2
->map_object
= object1
;
8208 #if CONFIG_IOSCHED || UPL_DEBUG
8209 if (upl1
->flags
& UPL_TRACKED_BY_OBJECT
)
8210 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
8211 if (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)
8212 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
8213 if ((upl1
->flags
& UPL_TRACKED_BY_OBJECT
) || (upl2
->flags
& UPL_TRACKED_BY_OBJECT
)) {
8214 vm_object_unlock(object2
);
8215 vm_object_unlock(object1
);
8227 upls_locked
= FALSE
;
8239 upl_page_info_t
*user_page_list
;
8242 if ( !(upl
->flags
& UPL_INTERNAL
) || count
<= 0)
8245 size_in_pages
= upl
->size
/ PAGE_SIZE
;
8247 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
8249 while (count
-- && index
< size_in_pages
)
8250 user_page_list
[index
++].needed
= TRUE
;
8257 * Rationale: the user might have some encrypted data on disk (via
8258 * FileVault or any other mechanism). That data is then decrypted in
8259 * memory, which is safe as long as the machine is secure. But that
8260 * decrypted data in memory could be paged out to disk by the default
8261 * pager. The data would then be stored on disk in clear (not encrypted)
8262 * and it could be accessed by anyone who gets physical access to the
8263 * disk (if the laptop or the disk gets stolen for example). This weakens
8264 * the security offered by FileVault.
8266 * Solution: the default pager will optionally request that all the
8267 * pages it gathers for pageout be encrypted, via the UPL interfaces,
8268 * before it sends this UPL to disk via the vnode_pageout() path.
8272 * To avoid disrupting the VM LRU algorithms, we want to keep the
8273 * clean-in-place mechanisms, which allow us to send some extra pages to
8274 * swap (clustering) without actually removing them from the user's
8275 * address space. We don't want the user to unknowingly access encrypted
8276 * data, so we have to actually remove the encrypted pages from the page
8277 * table. When the user accesses the data, the hardware will fail to
8278 * locate the virtual page in its page table and will trigger a page
8279 * fault. We can then decrypt the page and enter it in the page table
8280 * again. Whenever we allow the user to access the contents of a page,
8281 * we have to make sure it's not encrypted.
8287 * Reserve of virtual addresses in the kernel address space.
8288 * We need to map the physical pages in the kernel, so that we
8289 * can call the encryption/decryption routines with a kernel
8290 * virtual address. We keep this pool of pre-allocated kernel
8291 * virtual addresses so that we don't have to scan the kernel's
8292 * virtaul address space each time we need to encrypt or decrypt
8294 * It would be nice to be able to encrypt and decrypt in physical
8295 * mode but that might not always be more efficient...
8297 decl_simple_lock_data(,vm_paging_lock
)
8298 #define VM_PAGING_NUM_PAGES 64
8299 vm_map_offset_t vm_paging_base_address
= 0;
8300 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
8301 int vm_paging_max_index
= 0;
8302 int vm_paging_page_waiter
= 0;
8303 int vm_paging_page_waiter_total
= 0;
8304 unsigned long vm_paging_no_kernel_page
= 0;
8305 unsigned long vm_paging_objects_mapped
= 0;
8306 unsigned long vm_paging_pages_mapped
= 0;
8307 unsigned long vm_paging_objects_mapped_slow
= 0;
8308 unsigned long vm_paging_pages_mapped_slow
= 0;
8311 vm_paging_map_init(void)
8314 vm_map_offset_t page_map_offset
;
8315 vm_map_entry_t map_entry
;
8317 assert(vm_paging_base_address
== 0);
8320 * Initialize our pool of pre-allocated kernel
8321 * virtual addresses.
8323 page_map_offset
= 0;
8324 kr
= vm_map_find_space(kernel_map
,
8326 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
8330 if (kr
!= KERN_SUCCESS
) {
8331 panic("vm_paging_map_init: kernel_map full\n");
8333 map_entry
->object
.vm_object
= kernel_object
;
8334 map_entry
->offset
= page_map_offset
;
8335 map_entry
->protection
= VM_PROT_NONE
;
8336 map_entry
->max_protection
= VM_PROT_NONE
;
8337 map_entry
->permanent
= TRUE
;
8338 vm_object_reference(kernel_object
);
8339 vm_map_unlock(kernel_map
);
8341 assert(vm_paging_base_address
== 0);
8342 vm_paging_base_address
= page_map_offset
;
8347 * vm_paging_map_object:
8348 * Maps part of a VM object's pages in the kernel
8349 * virtual address space, using the pre-allocated
8350 * kernel virtual addresses, if possible.
8352 * The VM object is locked. This lock will get
8353 * dropped and re-acquired though, so the caller
8354 * must make sure the VM object is kept alive
8355 * (by holding a VM map that has a reference
8356 * on it, for example, or taking an extra reference).
8357 * The page should also be kept busy to prevent
8358 * it from being reclaimed.
8361 vm_paging_map_object(
8364 vm_object_offset_t offset
,
8365 vm_prot_t protection
,
8366 boolean_t can_unlock_object
,
8367 vm_map_size_t
*size
, /* IN/OUT */
8368 vm_map_offset_t
*address
, /* OUT */
8369 boolean_t
*need_unmap
) /* OUT */
8372 vm_map_offset_t page_map_offset
;
8373 vm_map_size_t map_size
;
8374 vm_object_offset_t object_offset
;
8377 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
8378 /* use permanent 1-to-1 kernel mapping of physical memory ? */
8380 *address
= (vm_map_offset_t
)
8381 PHYSMAP_PTOV((pmap_paddr_t
)page
->phys_page
<<
8383 *need_unmap
= FALSE
;
8384 return KERN_SUCCESS
;
8386 #warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
8391 * Use one of the pre-allocated kernel virtual addresses
8392 * and just enter the VM page in the kernel address space
8393 * at that virtual address.
8395 simple_lock(&vm_paging_lock
);
8398 * Try and find an available kernel virtual address
8399 * from our pre-allocated pool.
8401 page_map_offset
= 0;
8403 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
8404 if (vm_paging_page_inuse
[i
] == FALSE
) {
8406 vm_paging_base_address
+
8411 if (page_map_offset
!= 0) {
8412 /* found a space to map our page ! */
8416 if (can_unlock_object
) {
8418 * If we can afford to unlock the VM object,
8419 * let's take the slow path now...
8424 * We can't afford to unlock the VM object, so
8425 * let's wait for a space to become available...
8427 vm_paging_page_waiter_total
++;
8428 vm_paging_page_waiter
++;
8429 kr
= assert_wait((event_t
)&vm_paging_page_waiter
, THREAD_UNINT
);
8430 if (kr
== THREAD_WAITING
) {
8431 simple_unlock(&vm_paging_lock
);
8432 kr
= thread_block(THREAD_CONTINUE_NULL
);
8433 simple_lock(&vm_paging_lock
);
8435 vm_paging_page_waiter
--;
8436 /* ... and try again */
8439 if (page_map_offset
!= 0) {
8441 * We found a kernel virtual address;
8442 * map the physical page to that virtual address.
8444 if (i
> vm_paging_max_index
) {
8445 vm_paging_max_index
= i
;
8447 vm_paging_page_inuse
[i
] = TRUE
;
8448 simple_unlock(&vm_paging_lock
);
8450 page
->pmapped
= TRUE
;
8453 * Keep the VM object locked over the PMAP_ENTER
8454 * and the actual use of the page by the kernel,
8455 * or this pmap mapping might get undone by a
8456 * vm_object_pmap_protect() call...
8458 PMAP_ENTER(kernel_pmap
,
8465 vm_paging_objects_mapped
++;
8466 vm_paging_pages_mapped
++;
8467 *address
= page_map_offset
;
8470 /* all done and mapped, ready to use ! */
8471 return KERN_SUCCESS
;
8475 * We ran out of pre-allocated kernel virtual
8476 * addresses. Just map the page in the kernel
8477 * the slow and regular way.
8479 vm_paging_no_kernel_page
++;
8480 simple_unlock(&vm_paging_lock
);
8483 if (! can_unlock_object
) {
8486 *need_unmap
= FALSE
;
8487 return KERN_NOT_SUPPORTED
;
8490 object_offset
= vm_object_trunc_page(offset
);
8491 map_size
= vm_map_round_page(*size
,
8492 VM_MAP_PAGE_MASK(kernel_map
));
8495 * Try and map the required range of the object
8499 vm_object_reference_locked(object
); /* for the map entry */
8500 vm_object_unlock(object
);
8502 kr
= vm_map_enter(kernel_map
,
8513 if (kr
!= KERN_SUCCESS
) {
8516 *need_unmap
= FALSE
;
8517 vm_object_deallocate(object
); /* for the map entry */
8518 vm_object_lock(object
);
8525 * Enter the mapped pages in the page table now.
8527 vm_object_lock(object
);
8529 * VM object must be kept locked from before PMAP_ENTER()
8530 * until after the kernel is done accessing the page(s).
8531 * Otherwise, the pmap mappings in the kernel could be
8532 * undone by a call to vm_object_pmap_protect().
8535 for (page_map_offset
= 0;
8537 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
8539 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
8540 if (page
== VM_PAGE_NULL
) {
8541 printf("vm_paging_map_object: no page !?");
8542 vm_object_unlock(object
);
8543 kr
= vm_map_remove(kernel_map
, *address
, *size
,
8545 assert(kr
== KERN_SUCCESS
);
8548 *need_unmap
= FALSE
;
8549 vm_object_lock(object
);
8550 return KERN_MEMORY_ERROR
;
8552 page
->pmapped
= TRUE
;
8554 //assert(pmap_verify_free(page->phys_page));
8555 PMAP_ENTER(kernel_pmap
,
8556 *address
+ page_map_offset
,
8564 vm_paging_objects_mapped_slow
++;
8565 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
8569 return KERN_SUCCESS
;
8574 * vm_paging_unmap_object:
8575 * Unmaps part of a VM object's pages from the kernel
8576 * virtual address space.
8578 * The VM object is locked. This lock will get
8579 * dropped and re-acquired though.
8582 vm_paging_unmap_object(
8584 vm_map_offset_t start
,
8585 vm_map_offset_t end
)
8590 if ((vm_paging_base_address
== 0) ||
8591 (start
< vm_paging_base_address
) ||
8592 (end
> (vm_paging_base_address
8593 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
8595 * We didn't use our pre-allocated pool of
8596 * kernel virtual address. Deallocate the
8599 if (object
!= VM_OBJECT_NULL
) {
8600 vm_object_unlock(object
);
8602 kr
= vm_map_remove(kernel_map
, start
, end
, VM_MAP_NO_FLAGS
);
8603 if (object
!= VM_OBJECT_NULL
) {
8604 vm_object_lock(object
);
8606 assert(kr
== KERN_SUCCESS
);
8609 * We used a kernel virtual address from our
8610 * pre-allocated pool. Put it back in the pool
8613 assert(end
- start
== PAGE_SIZE
);
8614 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
8615 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
8617 /* undo the pmap mapping */
8618 pmap_remove(kernel_pmap
, start
, end
);
8620 simple_lock(&vm_paging_lock
);
8621 vm_paging_page_inuse
[i
] = FALSE
;
8622 if (vm_paging_page_waiter
) {
8623 thread_wakeup(&vm_paging_page_waiter
);
8625 simple_unlock(&vm_paging_lock
);
8632 * "iv" is the "initial vector". Ideally, we want to
8633 * have a different one for each page we encrypt, so that
8634 * crackers can't find encryption patterns too easily.
8636 #define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
8637 boolean_t swap_crypt_ctx_initialized
= FALSE
;
8638 uint32_t swap_crypt_key
[8]; /* big enough for a 256 key */
8639 aes_ctx swap_crypt_ctx
;
8640 const unsigned char swap_crypt_null_iv
[AES_BLOCK_SIZE
] = {0xa, };
8643 boolean_t swap_crypt_ctx_tested
= FALSE
;
8644 unsigned char swap_crypt_test_page_ref
[4096] __attribute__((aligned(4096)));
8645 unsigned char swap_crypt_test_page_encrypt
[4096] __attribute__((aligned(4096)));
8646 unsigned char swap_crypt_test_page_decrypt
[4096] __attribute__((aligned(4096)));
8650 * Initialize the encryption context: key and key size.
8652 void swap_crypt_ctx_initialize(void); /* forward */
8654 swap_crypt_ctx_initialize(void)
8659 * No need for locking to protect swap_crypt_ctx_initialized
8660 * because the first use of encryption will come from the
8661 * pageout thread (we won't pagein before there's been a pageout)
8662 * and there's only one pageout thread.
8664 if (swap_crypt_ctx_initialized
== FALSE
) {
8666 i
< (sizeof (swap_crypt_key
) /
8667 sizeof (swap_crypt_key
[0]));
8669 swap_crypt_key
[i
] = random();
8671 aes_encrypt_key((const unsigned char *) swap_crypt_key
,
8672 SWAP_CRYPT_AES_KEY_SIZE
,
8673 &swap_crypt_ctx
.encrypt
);
8674 aes_decrypt_key((const unsigned char *) swap_crypt_key
,
8675 SWAP_CRYPT_AES_KEY_SIZE
,
8676 &swap_crypt_ctx
.decrypt
);
8677 swap_crypt_ctx_initialized
= TRUE
;
8682 * Validate the encryption algorithms.
8684 if (swap_crypt_ctx_tested
== FALSE
) {
8686 for (i
= 0; i
< 4096; i
++) {
8687 swap_crypt_test_page_ref
[i
] = (char) i
;
8690 aes_encrypt_cbc(swap_crypt_test_page_ref
,
8692 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8693 swap_crypt_test_page_encrypt
,
8694 &swap_crypt_ctx
.encrypt
);
8696 aes_decrypt_cbc(swap_crypt_test_page_encrypt
,
8698 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8699 swap_crypt_test_page_decrypt
,
8700 &swap_crypt_ctx
.decrypt
);
8701 /* compare result with original */
8702 for (i
= 0; i
< 4096; i
++) {
8703 if (swap_crypt_test_page_decrypt
[i
] !=
8704 swap_crypt_test_page_ref
[i
]) {
8705 panic("encryption test failed");
8710 aes_encrypt_cbc(swap_crypt_test_page_decrypt
,
8712 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8713 swap_crypt_test_page_decrypt
,
8714 &swap_crypt_ctx
.encrypt
);
8715 /* decrypt in place */
8716 aes_decrypt_cbc(swap_crypt_test_page_decrypt
,
8718 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8719 swap_crypt_test_page_decrypt
,
8720 &swap_crypt_ctx
.decrypt
);
8721 for (i
= 0; i
< 4096; i
++) {
8722 if (swap_crypt_test_page_decrypt
[i
] !=
8723 swap_crypt_test_page_ref
[i
]) {
8724 panic("in place encryption test failed");
8728 swap_crypt_ctx_tested
= TRUE
;
8736 * Encrypt the given page, for secure paging.
8737 * The page might already be mapped at kernel virtual
8738 * address "kernel_mapping_offset". Otherwise, we need
8742 * The page's object is locked, but this lock will be released
8744 * The page is busy and not accessible by users (not entered in any pmap).
8749 vm_map_offset_t kernel_mapping_offset
)
8752 vm_map_size_t kernel_mapping_size
;
8753 boolean_t kernel_mapping_needs_unmap
;
8754 vm_offset_t kernel_vaddr
;
8756 unsigned char aes_iv
[AES_BLOCK_SIZE
];
8758 memory_object_t pager_object
;
8759 vm_object_offset_t paging_offset
;
8763 if (! vm_pages_encrypted
) {
8764 vm_pages_encrypted
= TRUE
;
8769 if (page
->encrypted
) {
8771 * Already encrypted: no need to do it again.
8773 vm_page_encrypt_already_encrypted_counter
++;
8776 assert(page
->dirty
|| page
->precious
);
8778 ASSERT_PAGE_DECRYPTED(page
);
8781 * Take a paging-in-progress reference to keep the object
8782 * alive even if we have to unlock it (in vm_paging_map_object()
8785 vm_object_paging_begin(page
->object
);
8787 if (kernel_mapping_offset
== 0) {
8789 * The page hasn't already been mapped in kernel space
8790 * by the caller. Map it now, so that we can access
8791 * its contents and encrypt them.
8793 kernel_mapping_size
= PAGE_SIZE
;
8794 kernel_mapping_needs_unmap
= FALSE
;
8795 kr
= vm_paging_map_object(page
,
8798 VM_PROT_READ
| VM_PROT_WRITE
,
8800 &kernel_mapping_size
,
8801 &kernel_mapping_offset
,
8802 &kernel_mapping_needs_unmap
);
8803 if (kr
!= KERN_SUCCESS
) {
8804 panic("vm_page_encrypt: "
8805 "could not map page in kernel: 0x%x\n",
8809 kernel_mapping_size
= 0;
8810 kernel_mapping_needs_unmap
= FALSE
;
8812 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
8814 if (swap_crypt_ctx_initialized
== FALSE
) {
8815 swap_crypt_ctx_initialize();
8817 assert(swap_crypt_ctx_initialized
);
8820 * Prepare an "initial vector" for the encryption.
8821 * We use the "pager" and the "paging_offset" for that
8822 * page to obfuscate the encrypted data a bit more and
8823 * prevent crackers from finding patterns that they could
8824 * use to break the key.
8826 bzero(&encrypt_iv
.aes_iv
[0], sizeof (encrypt_iv
.aes_iv
));
8827 encrypt_iv
.vm
.pager_object
= page
->object
->pager
;
8828 encrypt_iv
.vm
.paging_offset
=
8829 page
->object
->paging_offset
+ page
->offset
;
8831 /* encrypt the "initial vector" */
8832 aes_encrypt_cbc((const unsigned char *) &encrypt_iv
.aes_iv
[0],
8835 &encrypt_iv
.aes_iv
[0],
8836 &swap_crypt_ctx
.encrypt
);
8841 aes_encrypt_cbc((const unsigned char *) kernel_vaddr
,
8842 &encrypt_iv
.aes_iv
[0],
8843 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8844 (unsigned char *) kernel_vaddr
,
8845 &swap_crypt_ctx
.encrypt
);
8847 vm_page_encrypt_counter
++;
8850 * Unmap the page from the kernel's address space,
8851 * if we had to map it ourselves. Otherwise, let
8852 * the caller undo the mapping if needed.
8854 if (kernel_mapping_needs_unmap
) {
8855 vm_paging_unmap_object(page
->object
,
8856 kernel_mapping_offset
,
8857 kernel_mapping_offset
+ kernel_mapping_size
);
8861 * Clear the "reference" and "modified" bits.
8862 * This should clean up any impact the encryption had
8864 * The page was kept busy and disconnected from all pmaps,
8865 * so it can't have been referenced or modified from user
8867 * The software bits will be reset later after the I/O
8868 * has completed (in upl_commit_range()).
8870 pmap_clear_refmod(page
->phys_page
, VM_MEM_REFERENCED
| VM_MEM_MODIFIED
);
8872 page
->encrypted
= TRUE
;
8874 vm_object_paging_end(page
->object
);
8880 * Decrypt the given page.
8881 * The page might already be mapped at kernel virtual
8882 * address "kernel_mapping_offset". Otherwise, we need
8886 * The page's VM object is locked but will be unlocked and relocked.
8887 * The page is busy and not accessible by users (not entered in any pmap).
8892 vm_map_offset_t kernel_mapping_offset
)
8895 vm_map_size_t kernel_mapping_size
;
8896 vm_offset_t kernel_vaddr
;
8897 boolean_t kernel_mapping_needs_unmap
;
8899 unsigned char aes_iv
[AES_BLOCK_SIZE
];
8901 memory_object_t pager_object
;
8902 vm_object_offset_t paging_offset
;
8905 boolean_t was_dirty
;
8908 assert(page
->encrypted
);
8910 was_dirty
= page
->dirty
;
8913 * Take a paging-in-progress reference to keep the object
8914 * alive even if we have to unlock it (in vm_paging_map_object()
8917 vm_object_paging_begin(page
->object
);
8919 if (kernel_mapping_offset
== 0) {
8921 * The page hasn't already been mapped in kernel space
8922 * by the caller. Map it now, so that we can access
8923 * its contents and decrypt them.
8925 kernel_mapping_size
= PAGE_SIZE
;
8926 kernel_mapping_needs_unmap
= FALSE
;
8927 kr
= vm_paging_map_object(page
,
8930 VM_PROT_READ
| VM_PROT_WRITE
,
8932 &kernel_mapping_size
,
8933 &kernel_mapping_offset
,
8934 &kernel_mapping_needs_unmap
);
8935 if (kr
!= KERN_SUCCESS
) {
8936 panic("vm_page_decrypt: "
8937 "could not map page in kernel: 0x%x\n",
8941 kernel_mapping_size
= 0;
8942 kernel_mapping_needs_unmap
= FALSE
;
8944 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
8946 assert(swap_crypt_ctx_initialized
);
8949 * Prepare an "initial vector" for the decryption.
8950 * It has to be the same as the "initial vector" we
8951 * used to encrypt that page.
8953 bzero(&decrypt_iv
.aes_iv
[0], sizeof (decrypt_iv
.aes_iv
));
8954 decrypt_iv
.vm
.pager_object
= page
->object
->pager
;
8955 decrypt_iv
.vm
.paging_offset
=
8956 page
->object
->paging_offset
+ page
->offset
;
8958 /* encrypt the "initial vector" */
8959 aes_encrypt_cbc((const unsigned char *) &decrypt_iv
.aes_iv
[0],
8962 &decrypt_iv
.aes_iv
[0],
8963 &swap_crypt_ctx
.encrypt
);
8968 aes_decrypt_cbc((const unsigned char *) kernel_vaddr
,
8969 &decrypt_iv
.aes_iv
[0],
8970 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8971 (unsigned char *) kernel_vaddr
,
8972 &swap_crypt_ctx
.decrypt
);
8973 vm_page_decrypt_counter
++;
8976 * Unmap the page from the kernel's address space,
8977 * if we had to map it ourselves. Otherwise, let
8978 * the caller undo the mapping if needed.
8980 if (kernel_mapping_needs_unmap
) {
8981 vm_paging_unmap_object(page
->object
,
8983 kernel_vaddr
+ PAGE_SIZE
);
8988 * The pager did not specify that the page would be
8989 * clean when it got paged in, so let's not clean it here
8994 * After decryption, the page is actually still clean.
8995 * It was encrypted as part of paging, which "cleans"
8996 * the "dirty" pages.
8997 * Noone could access it after it was encrypted
8998 * and the decryption doesn't count.
9000 page
->dirty
= FALSE
;
9001 assert (page
->cs_validated
== FALSE
);
9002 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
9004 page
->encrypted
= FALSE
;
9007 * We've just modified the page's contents via the data cache and part
9008 * of the new contents might still be in the cache and not yet in RAM.
9009 * Since the page is now available and might get gathered in a UPL to
9010 * be part of a DMA transfer from a driver that expects the memory to
9011 * be coherent at this point, we have to flush the data cache.
9013 pmap_sync_page_attributes_phys(page
->phys_page
);
9015 * Since the page is not mapped yet, some code might assume that it
9016 * doesn't need to invalidate the instruction cache when writing to
9017 * that page. That code relies on "pmapped" being FALSE, so that the
9018 * caches get synchronized when the page is first mapped.
9020 assert(pmap_verify_free(page
->phys_page
));
9021 page
->pmapped
= FALSE
;
9022 page
->wpmapped
= FALSE
;
9024 vm_object_paging_end(page
->object
);
9027 #if DEVELOPMENT || DEBUG
9028 unsigned long upl_encrypt_upls
= 0;
9029 unsigned long upl_encrypt_pages
= 0;
9036 * Encrypts all the pages in the UPL, within the specified range.
9042 upl_offset_t crypt_offset
,
9043 upl_size_t crypt_size
)
9045 upl_size_t upl_size
, subupl_size
=crypt_size
;
9046 upl_offset_t offset_in_upl
, subupl_offset
=crypt_offset
;
9047 vm_object_t upl_object
;
9048 vm_object_offset_t upl_offset
;
9050 vm_object_t shadow_object
;
9051 vm_object_offset_t shadow_offset
;
9052 vm_object_offset_t paging_offset
;
9053 vm_object_offset_t base_offset
;
9054 int isVectorUPL
= 0;
9055 upl_t vector_upl
= NULL
;
9057 if((isVectorUPL
= vector_upl_is_valid(upl
)))
9060 process_upl_to_encrypt
:
9062 crypt_size
= subupl_size
;
9063 crypt_offset
= subupl_offset
;
9064 upl
= vector_upl_subupl_byoffset(vector_upl
, &crypt_offset
, &crypt_size
);
9066 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
9067 subupl_size
-= crypt_size
;
9068 subupl_offset
+= crypt_size
;
9071 #if DEVELOPMENT || DEBUG
9073 upl_encrypt_pages
+= crypt_size
/ PAGE_SIZE
;
9075 upl_object
= upl
->map_object
;
9076 upl_offset
= upl
->offset
;
9077 upl_size
= upl
->size
;
9079 vm_object_lock(upl_object
);
9082 * Find the VM object that contains the actual pages.
9084 if (upl_object
->pageout
) {
9085 shadow_object
= upl_object
->shadow
;
9087 * The offset in the shadow object is actually also
9088 * accounted for in upl->offset. It possibly shouldn't be
9089 * this way, but for now don't account for it twice.
9092 assert(upl_object
->paging_offset
== 0); /* XXX ? */
9093 vm_object_lock(shadow_object
);
9095 shadow_object
= upl_object
;
9099 paging_offset
= shadow_object
->paging_offset
;
9100 vm_object_paging_begin(shadow_object
);
9102 if (shadow_object
!= upl_object
)
9103 vm_object_unlock(upl_object
);
9106 base_offset
= shadow_offset
;
9107 base_offset
+= upl_offset
;
9108 base_offset
+= crypt_offset
;
9109 base_offset
-= paging_offset
;
9111 assert(crypt_offset
+ crypt_size
<= upl_size
);
9113 for (offset_in_upl
= 0;
9114 offset_in_upl
< crypt_size
;
9115 offset_in_upl
+= PAGE_SIZE
) {
9116 page
= vm_page_lookup(shadow_object
,
9117 base_offset
+ offset_in_upl
);
9118 if (page
== VM_PAGE_NULL
) {
9119 panic("upl_encrypt: "
9120 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
9126 * Disconnect the page from all pmaps, so that nobody can
9127 * access it while it's encrypted. After that point, all
9128 * accesses to this page will cause a page fault and block
9129 * while the page is busy being encrypted. After the
9130 * encryption completes, any access will cause a
9131 * page fault and the page gets decrypted at that time.
9133 pmap_disconnect(page
->phys_page
);
9134 vm_page_encrypt(page
, 0);
9136 if (vm_object_lock_avoid(shadow_object
)) {
9138 * Give vm_pageout_scan() a chance to convert more
9139 * pages from "clean-in-place" to "clean-and-free",
9140 * if it's interested in the same pages we selected
9143 vm_object_unlock(shadow_object
);
9145 vm_object_lock(shadow_object
);
9149 vm_object_paging_end(shadow_object
);
9150 vm_object_unlock(shadow_object
);
9152 if(isVectorUPL
&& subupl_size
)
9153 goto process_upl_to_encrypt
;
9156 #else /* ENCRYPTED_SWAP */
9160 __unused upl_offset_t crypt_offset
,
9161 __unused upl_size_t crypt_size
)
9167 __unused vm_page_t page
,
9168 __unused vm_map_offset_t kernel_mapping_offset
)
9174 __unused vm_page_t page
,
9175 __unused vm_map_offset_t kernel_mapping_offset
)
9179 #endif /* ENCRYPTED_SWAP */
9182 * page->object must be locked
9185 vm_pageout_steal_laundry(vm_page_t page
, boolean_t queues_locked
)
9187 if (!queues_locked
) {
9188 vm_page_lockspin_queues();
9192 * need to drop the laundry count...
9193 * we may also need to remove it
9194 * from the I/O paging queue...
9195 * vm_pageout_throttle_up handles both cases
9197 * the laundry and pageout_queue flags are cleared...
9199 vm_pageout_throttle_up(page
);
9201 vm_page_steal_pageout_page
++;
9203 if (!queues_locked
) {
9204 vm_page_unlock_queues();
9209 vector_upl_create(vm_offset_t upl_offset
)
9211 int vector_upl_size
= sizeof(struct _vector_upl
);
9214 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
9216 upl
= upl_create(0,UPL_VECTOR
,0);
9217 upl
->vector_upl
= vector_upl
;
9218 upl
->offset
= upl_offset
;
9219 vector_upl
->size
= 0;
9220 vector_upl
->offset
= upl_offset
;
9221 vector_upl
->invalid_upls
=0;
9222 vector_upl
->num_upls
=0;
9223 vector_upl
->pagelist
= NULL
;
9225 for(i
=0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
9226 vector_upl
->upl_iostates
[i
].size
= 0;
9227 vector_upl
->upl_iostates
[i
].offset
= 0;
9234 vector_upl_deallocate(upl_t upl
)
9237 vector_upl_t vector_upl
= upl
->vector_upl
;
9239 if(vector_upl
->invalid_upls
!= vector_upl
->num_upls
)
9240 panic("Deallocating non-empty Vectored UPL\n");
9241 kfree(vector_upl
->pagelist
,(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)));
9242 vector_upl
->invalid_upls
=0;
9243 vector_upl
->num_upls
= 0;
9244 vector_upl
->pagelist
= NULL
;
9245 vector_upl
->size
= 0;
9246 vector_upl
->offset
= 0;
9247 kfree(vector_upl
, sizeof(struct _vector_upl
));
9248 vector_upl
= (vector_upl_t
)0xfeedfeed;
9251 panic("vector_upl_deallocate was passed a non-vectored upl\n");
9254 panic("vector_upl_deallocate was passed a NULL upl\n");
9258 vector_upl_is_valid(upl_t upl
)
9260 if(upl
&& ((upl
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
9261 vector_upl_t vector_upl
= upl
->vector_upl
;
9262 if(vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xfeedfeed || vector_upl
== (vector_upl_t
)0xfeedbeef)
9271 vector_upl_set_subupl(upl_t upl
,upl_t subupl
, uint32_t io_size
)
9273 if(vector_upl_is_valid(upl
)) {
9274 vector_upl_t vector_upl
= upl
->vector_upl
;
9279 if(io_size
< PAGE_SIZE
)
9280 io_size
= PAGE_SIZE
;
9281 subupl
->vector_upl
= (void*)vector_upl
;
9282 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
9283 vector_upl
->size
+= io_size
;
9284 upl
->size
+= io_size
;
9287 uint32_t i
=0,invalid_upls
=0;
9288 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
9289 if(vector_upl
->upl_elems
[i
] == subupl
)
9292 if(i
== vector_upl
->num_upls
)
9293 panic("Trying to remove sub-upl when none exists");
9295 vector_upl
->upl_elems
[i
] = NULL
;
9296 invalid_upls
= hw_atomic_add(&(vector_upl
)->invalid_upls
, 1);
9297 if(invalid_upls
== vector_upl
->num_upls
)
9304 panic("vector_upl_set_subupl was passed a NULL upl element\n");
9307 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
9310 panic("vector_upl_set_subupl was passed a NULL upl\n");
9316 vector_upl_set_pagelist(upl_t upl
)
9318 if(vector_upl_is_valid(upl
)) {
9320 vector_upl_t vector_upl
= upl
->vector_upl
;
9323 vm_offset_t pagelist_size
=0, cur_upl_pagelist_size
=0;
9325 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
));
9327 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
9328 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * vector_upl
->upl_elems
[i
]->size
/PAGE_SIZE
;
9329 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
9330 pagelist_size
+= cur_upl_pagelist_size
;
9331 if(vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
)
9332 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
9334 assert( pagelist_size
== (sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)) );
9337 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
9340 panic("vector_upl_set_pagelist was passed a NULL upl\n");
9345 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
9347 if(vector_upl_is_valid(upl
)) {
9348 vector_upl_t vector_upl
= upl
->vector_upl
;
9350 if(index
< vector_upl
->num_upls
)
9351 return vector_upl
->upl_elems
[index
];
9354 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
9360 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
9362 if(vector_upl_is_valid(upl
)) {
9364 vector_upl_t vector_upl
= upl
->vector_upl
;
9367 upl_t subupl
= NULL
;
9368 vector_upl_iostates_t subupl_state
;
9370 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
9371 subupl
= vector_upl
->upl_elems
[i
];
9372 subupl_state
= vector_upl
->upl_iostates
[i
];
9373 if( *upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
9374 /* We could have been passed an offset/size pair that belongs
9375 * to an UPL element that has already been committed/aborted.
9376 * If so, return NULL.
9380 if((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
9381 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
9382 if(*upl_size
> subupl_state
.size
)
9383 *upl_size
= subupl_state
.size
;
9385 if(*upl_offset
>= subupl_state
.offset
)
9386 *upl_offset
-= subupl_state
.offset
;
9388 panic("Vector UPL offset miscalculation\n");
9394 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
9400 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
9402 *v_upl_submap
= NULL
;
9404 if(vector_upl_is_valid(upl
)) {
9405 vector_upl_t vector_upl
= upl
->vector_upl
;
9407 *v_upl_submap
= vector_upl
->submap
;
9408 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
9411 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9414 panic("vector_upl_get_submap was passed a null UPL\n");
9418 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
9420 if(vector_upl_is_valid(upl
)) {
9421 vector_upl_t vector_upl
= upl
->vector_upl
;
9423 vector_upl
->submap
= submap
;
9424 vector_upl
->submap_dst_addr
= submap_dst_addr
;
9427 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
9430 panic("vector_upl_get_submap was passed a NULL UPL\n");
9434 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
9436 if(vector_upl_is_valid(upl
)) {
9438 vector_upl_t vector_upl
= upl
->vector_upl
;
9441 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
9442 if(vector_upl
->upl_elems
[i
] == subupl
)
9446 if(i
== vector_upl
->num_upls
)
9447 panic("setting sub-upl iostate when none exists");
9449 vector_upl
->upl_iostates
[i
].offset
= offset
;
9450 if(size
< PAGE_SIZE
)
9452 vector_upl
->upl_iostates
[i
].size
= size
;
9455 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
9458 panic("vector_upl_set_iostate was passed a NULL UPL\n");
9462 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
9464 if(vector_upl_is_valid(upl
)) {
9466 vector_upl_t vector_upl
= upl
->vector_upl
;
9469 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
9470 if(vector_upl
->upl_elems
[i
] == subupl
)
9474 if(i
== vector_upl
->num_upls
)
9475 panic("getting sub-upl iostate when none exists");
9477 *offset
= vector_upl
->upl_iostates
[i
].offset
;
9478 *size
= vector_upl
->upl_iostates
[i
].size
;
9481 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
9484 panic("vector_upl_get_iostate was passed a NULL UPL\n");
9488 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
9490 if(vector_upl_is_valid(upl
)) {
9491 vector_upl_t vector_upl
= upl
->vector_upl
;
9493 if(index
< vector_upl
->num_upls
) {
9494 *offset
= vector_upl
->upl_iostates
[index
].offset
;
9495 *size
= vector_upl
->upl_iostates
[index
].size
;
9498 *offset
= *size
= 0;
9501 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
9504 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
9508 upl_get_internal_vectorupl_pagelist(upl_t upl
)
9510 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
9514 upl_get_internal_vectorupl(upl_t upl
)
9516 return upl
->vector_upl
;
9520 upl_get_internal_pagelist_offset(void)
9522 return sizeof(struct upl
);
9531 upl
->flags
|= UPL_CLEAR_DIRTY
;
9533 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
9544 upl
->ext_ref_count
++;
9546 if (!upl
->ext_ref_count
) {
9547 panic("upl_set_referenced not %p\n", upl
);
9549 upl
->ext_ref_count
--;
9558 vm_offset_t upl_offset
,
9563 if ((upl
->flags
& UPL_EXPEDITE_SUPPORTED
) == 0)
9566 assert(upl
->upl_reprio_info
!= 0);
9567 for(i
= (int)(upl_offset
/ PAGE_SIZE
), j
= 0; j
< io_size
; i
++, j
+= PAGE_SIZE
) {
9568 UPL_SET_REPRIO_INFO(upl
, i
, blkno
, io_size
);
9574 vm_page_is_slideable(vm_page_t m
)
9576 boolean_t result
= FALSE
;
9577 vm_shared_region_slide_info_t si
;
9579 vm_object_lock_assert_held(m
->object
);
9581 /* make sure our page belongs to the one object allowed to do this */
9582 if (!m
->object
->object_slid
) {
9586 si
= m
->object
->vo_slide_info
;
9591 if(!m
->slid
&& (si
->start
<= m
->offset
&& si
->end
> m
->offset
)) {
9599 int vm_page_slide_counter
= 0;
9600 int vm_page_slide_errors
= 0;
9604 vm_map_offset_t kernel_mapping_offset
)
9607 vm_map_size_t kernel_mapping_size
;
9608 boolean_t kernel_mapping_needs_unmap
;
9609 vm_offset_t kernel_vaddr
;
9610 uint32_t pageIndex
= 0;
9612 assert(!page
->slid
);
9613 assert(page
->object
->object_slid
);
9614 vm_object_lock_assert_exclusive(page
->object
);
9617 return KERN_FAILURE
;
9620 * Take a paging-in-progress reference to keep the object
9621 * alive even if we have to unlock it (in vm_paging_map_object()
9624 vm_object_paging_begin(page
->object
);
9626 if (kernel_mapping_offset
== 0) {
9628 * The page hasn't already been mapped in kernel space
9629 * by the caller. Map it now, so that we can access
9630 * its contents and decrypt them.
9632 kernel_mapping_size
= PAGE_SIZE
;
9633 kernel_mapping_needs_unmap
= FALSE
;
9634 kr
= vm_paging_map_object(page
,
9637 VM_PROT_READ
| VM_PROT_WRITE
,
9639 &kernel_mapping_size
,
9640 &kernel_mapping_offset
,
9641 &kernel_mapping_needs_unmap
);
9642 if (kr
!= KERN_SUCCESS
) {
9643 panic("vm_page_slide: "
9644 "could not map page in kernel: 0x%x\n",
9648 kernel_mapping_size
= 0;
9649 kernel_mapping_needs_unmap
= FALSE
;
9651 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
9654 * Slide the pointers on the page.
9657 /*assert that slide_file_info.start/end are page-aligned?*/
9659 assert(!page
->slid
);
9660 assert(page
->object
->object_slid
);
9662 /* on some platforms this is an extern int, on others it's a cpp macro */
9663 __unreachable_ok_push
9664 /* TODO: Consider this */
9665 if (!TEST_PAGE_SIZE_4K
) {
9666 for (int i
= 0; i
< 4; i
++) {
9667 pageIndex
= (uint32_t)((page
->offset
- page
->object
->vo_slide_info
->start
)/0x1000);
9668 kr
= vm_shared_region_slide_page(page
->object
->vo_slide_info
, kernel_vaddr
+ (0x1000*i
), pageIndex
+ i
);
9671 pageIndex
= (uint32_t)((page
->offset
- page
->object
->vo_slide_info
->start
)/PAGE_SIZE
);
9672 kr
= vm_shared_region_slide_page(page
->object
->vo_slide_info
, kernel_vaddr
, pageIndex
);
9674 __unreachable_ok_pop
9676 vm_page_slide_counter
++;
9679 * Unmap the page from the kernel's address space,
9681 if (kernel_mapping_needs_unmap
) {
9682 vm_paging_unmap_object(page
->object
,
9684 kernel_vaddr
+ PAGE_SIZE
);
9687 page
->dirty
= FALSE
;
9688 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
9690 if (kr
!= KERN_SUCCESS
|| cs_debug
> 1) {
9691 printf("vm_page_slide(%p): "
9692 "obj %p off 0x%llx mobj %p moff 0x%llx\n",
9694 page
->object
, page
->offset
,
9695 page
->object
->pager
,
9696 page
->offset
+ page
->object
->paging_offset
);
9699 if (kr
== KERN_SUCCESS
) {
9703 vm_page_slide_errors
++;
9706 vm_object_paging_end(page
->object
);
9711 void inline memoryshot(unsigned int event
, unsigned int control
)
9713 if (vm_debug_events
) {
9714 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE
, event
)) | control
,
9715 vm_page_active_count
, vm_page_inactive_count
,
9716 vm_page_free_count
, vm_page_speculative_count
,
9717 vm_page_throttled_count
);
9727 boolean_t
upl_device_page(upl_page_info_t
*upl
)
9729 return(UPL_DEVICE_PAGE(upl
));
9731 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
9733 return(UPL_PAGE_PRESENT(upl
, index
));
9735 boolean_t
upl_speculative_page(upl_page_info_t
*upl
, int index
)
9737 return(UPL_SPECULATIVE_PAGE(upl
, index
));
9739 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
9741 return(UPL_DIRTY_PAGE(upl
, index
));
9743 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
9745 return(UPL_VALID_PAGE(upl
, index
));
9747 ppnum_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
9749 return(UPL_PHYS_PAGE(upl
, index
));
9753 vm_countdirtypages(void)
9765 vm_page_lock_queues();
9766 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
9768 if (m
==(vm_page_t
)0) break;
9770 if(m
->dirty
) dpages
++;
9771 if(m
->pageout
) pgopages
++;
9772 if(m
->precious
) precpages
++;
9774 assert(m
->object
!= kernel_object
);
9775 m
= (vm_page_t
) queue_next(&m
->pageq
);
9776 if (m
==(vm_page_t
)0) break;
9778 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
9779 vm_page_unlock_queues();
9781 vm_page_lock_queues();
9782 m
= (vm_page_t
) queue_first(&vm_page_queue_throttled
);
9784 if (m
==(vm_page_t
)0) break;
9788 assert(!m
->pageout
);
9789 assert(m
->object
!= kernel_object
);
9790 m
= (vm_page_t
) queue_next(&m
->pageq
);
9791 if (m
==(vm_page_t
)0) break;
9793 } while (!queue_end(&vm_page_queue_throttled
,(queue_entry_t
) m
));
9794 vm_page_unlock_queues();
9796 vm_page_lock_queues();
9797 m
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
9799 if (m
==(vm_page_t
)0) break;
9801 if(m
->dirty
) dpages
++;
9802 if(m
->pageout
) pgopages
++;
9803 if(m
->precious
) precpages
++;
9805 assert(m
->object
!= kernel_object
);
9806 m
= (vm_page_t
) queue_next(&m
->pageq
);
9807 if (m
==(vm_page_t
)0) break;
9809 } while (!queue_end(&vm_page_queue_anonymous
,(queue_entry_t
) m
));
9810 vm_page_unlock_queues();
9812 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
9818 vm_page_lock_queues();
9819 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
9822 if(m
== (vm_page_t
)0) break;
9823 if(m
->dirty
) dpages
++;
9824 if(m
->pageout
) pgopages
++;
9825 if(m
->precious
) precpages
++;
9827 assert(m
->object
!= kernel_object
);
9828 m
= (vm_page_t
) queue_next(&m
->pageq
);
9829 if(m
== (vm_page_t
)0) break;
9831 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
9832 vm_page_unlock_queues();
9834 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
9837 #endif /* MACH_BSD */
9839 ppnum_t
upl_get_highest_page(
9842 return upl
->highest_page
;
9845 upl_size_t
upl_get_size(
9852 kern_return_t
upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
9854 upl
->ubc_alias1
= alias1
;
9855 upl
->ubc_alias2
= alias2
;
9856 return KERN_SUCCESS
;
9858 int upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
9861 *al
= upl
->ubc_alias1
;
9863 *al2
= upl
->ubc_alias2
;
9864 return KERN_SUCCESS
;
9866 #endif /* UPL_DEBUG */
9868 #if VM_PRESSURE_EVENTS
9870 * Upward trajectory.
9872 extern boolean_t
vm_compressor_low_on_space(void);
9875 VM_PRESSURE_NORMAL_TO_WARNING(void) {
9877 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS
) {
9879 /* Available pages below our threshold */
9880 if (memorystatus_available_pages
< memorystatus_available_pages_pressure
) {
9881 /* No frozen processes to kill */
9882 if (memorystatus_frozen_count
== 0) {
9883 /* Not enough suspended processes available. */
9884 if (memorystatus_suspended_count
< MEMORYSTATUS_SUSPENDED_THRESHOLD
) {
9892 return ((AVAILABLE_NON_COMPRESSED_MEMORY
< VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) ? 1 : 0);
9897 VM_PRESSURE_WARNING_TO_CRITICAL(void) {
9899 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS
) {
9900 /* Available pages below our threshold */
9901 if (memorystatus_available_pages
< memorystatus_available_pages_critical
) {
9906 return (vm_compressor_low_on_space() || (AVAILABLE_NON_COMPRESSED_MEMORY
< ((12 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0);
9911 * Downward trajectory.
9914 VM_PRESSURE_WARNING_TO_NORMAL(void) {
9916 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS
) {
9917 /* Available pages above our threshold */
9918 unsigned int target_threshold
= memorystatus_available_pages_pressure
+ ((15 * memorystatus_available_pages_pressure
) / 100);
9919 if (memorystatus_available_pages
> target_threshold
) {
9924 return ((AVAILABLE_NON_COMPRESSED_MEMORY
> ((12 * VM_PAGE_COMPRESSOR_COMPACT_THRESHOLD
) / 10)) ? 1 : 0);
9929 VM_PRESSURE_CRITICAL_TO_WARNING(void) {
9931 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_SWAPLESS
) {
9932 /* Available pages above our threshold */
9933 unsigned int target_threshold
= memorystatus_available_pages_critical
+ ((15 * memorystatus_available_pages_critical
) / 100);
9934 if (memorystatus_available_pages
> target_threshold
) {
9939 return ((AVAILABLE_NON_COMPRESSED_MEMORY
> ((14 * VM_PAGE_COMPRESSOR_SWAP_UNTHROTTLE_THRESHOLD
) / 10)) ? 1 : 0);
9942 #endif /* VM_PRESSURE_EVENTS */