2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
71 #include <advisory_pageout.h>
73 #include <mach/mach_types.h>
74 #include <mach/memory_object.h>
75 #include <mach/memory_object_default.h>
76 #include <mach/memory_object_control_server.h>
77 #include <mach/mach_host_server.h>
79 #include <mach/vm_map.h>
80 #include <mach/vm_param.h>
81 #include <mach/vm_statistics.h>
84 #include <kern/kern_types.h>
85 #include <kern/counters.h>
86 #include <kern/host_statistics.h>
87 #include <kern/machine.h>
88 #include <kern/misc_protos.h>
89 #include <kern/sched.h>
90 #include <kern/thread.h>
92 #include <kern/kalloc.h>
94 #include <machine/vm_tuning.h>
95 #include <machine/commpage.h>
98 #include <vm/vm_compressor_pager.h>
99 #include <vm/vm_fault.h>
100 #include <vm/vm_map.h>
101 #include <vm/vm_object.h>
102 #include <vm/vm_page.h>
103 #include <vm/vm_pageout.h>
104 #include <vm/vm_protos.h> /* must be last */
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h>
107 #include <vm/vm_shared_region.h>
108 #include <vm/vm_compressor.h>
113 #include <libkern/crypto/aes.h>
114 extern u_int32_t
random(void); /* from <libkern/libkern.h> */
119 #include <libkern/OSDebug.h>
122 extern vm_pressure_level_t memorystatus_vm_pressure_level
;
123 int memorystatus_purge_on_warning
= 2;
124 int memorystatus_purge_on_urgent
= 5;
125 int memorystatus_purge_on_critical
= 8;
127 #if VM_PRESSURE_EVENTS
128 void vm_pressure_response(void);
129 boolean_t vm_pressure_thread_running
= FALSE
;
130 extern void consider_vm_pressure_events(void);
132 boolean_t vm_pressure_changed
= FALSE
;
134 #ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
135 #define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
138 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
139 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
142 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
143 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
146 #ifndef VM_PAGEOUT_INACTIVE_RELIEF
147 #define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
150 #ifndef VM_PAGE_LAUNDRY_MAX
151 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
152 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
154 #ifndef VM_PAGEOUT_BURST_WAIT
155 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds */
156 #endif /* VM_PAGEOUT_BURST_WAIT */
158 #ifndef VM_PAGEOUT_EMPTY_WAIT
159 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
160 #endif /* VM_PAGEOUT_EMPTY_WAIT */
162 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
163 #define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
164 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
166 #ifndef VM_PAGEOUT_IDLE_WAIT
167 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
168 #endif /* VM_PAGEOUT_IDLE_WAIT */
170 #ifndef VM_PAGEOUT_SWAP_WAIT
171 #define VM_PAGEOUT_SWAP_WAIT 50 /* milliseconds */
172 #endif /* VM_PAGEOUT_SWAP_WAIT */
174 #ifndef VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED
175 #define VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED 1000 /* maximum pages considered before we issue a pressure event */
176 #endif /* VM_PAGEOUT_PRESSURE_PAGES_CONSIDERED */
178 #ifndef VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS
179 #define VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS 5 /* seconds */
180 #endif /* VM_PAGEOUT_PRESSURE_EVENT_MONITOR_SECS */
182 unsigned int vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
183 unsigned int vm_page_speculative_percentage
= 5;
185 #ifndef VM_PAGE_SPECULATIVE_TARGET
186 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
187 #endif /* VM_PAGE_SPECULATIVE_TARGET */
190 #ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
191 #define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
192 #endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
196 * To obtain a reasonable LRU approximation, the inactive queue
197 * needs to be large enough to give pages on it a chance to be
198 * referenced a second time. This macro defines the fraction
199 * of active+inactive pages that should be inactive.
200 * The pageout daemon uses it to update vm_page_inactive_target.
202 * If vm_page_free_count falls below vm_page_free_target and
203 * vm_page_inactive_count is below vm_page_inactive_target,
204 * then the pageout daemon starts running.
207 #ifndef VM_PAGE_INACTIVE_TARGET
208 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 2)
209 #endif /* VM_PAGE_INACTIVE_TARGET */
212 * Once the pageout daemon starts running, it keeps going
213 * until vm_page_free_count meets or exceeds vm_page_free_target.
216 #ifndef VM_PAGE_FREE_TARGET
217 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
218 #endif /* VM_PAGE_FREE_TARGET */
222 * The pageout daemon always starts running once vm_page_free_count
223 * falls below vm_page_free_min.
226 #ifndef VM_PAGE_FREE_MIN
227 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
228 #endif /* VM_PAGE_FREE_MIN */
230 #define VM_PAGE_FREE_RESERVED_LIMIT 100
231 #define VM_PAGE_FREE_MIN_LIMIT 1500
232 #define VM_PAGE_FREE_TARGET_LIMIT 2000
236 * When vm_page_free_count falls below vm_page_free_reserved,
237 * only vm-privileged threads can allocate pages. vm-privilege
238 * allows the pageout daemon and default pager (and any other
239 * associated threads needed for default pageout) to continue
240 * operation by dipping into the reserved pool of pages.
243 #ifndef VM_PAGE_FREE_RESERVED
244 #define VM_PAGE_FREE_RESERVED(n) \
245 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
246 #endif /* VM_PAGE_FREE_RESERVED */
249 * When we dequeue pages from the inactive list, they are
250 * reactivated (ie, put back on the active queue) if referenced.
251 * However, it is possible to starve the free list if other
252 * processors are referencing pages faster than we can turn off
253 * the referenced bit. So we limit the number of reactivations
254 * we will make per call of vm_pageout_scan().
256 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
257 #ifndef VM_PAGE_REACTIVATE_LIMIT
258 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
259 #endif /* VM_PAGE_REACTIVATE_LIMIT */
260 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
263 extern boolean_t hibernate_cleaning_in_progress
;
266 * Exported variable used to broadcast the activation of the pageout scan
267 * Working Set uses this to throttle its use of pmap removes. In this
268 * way, code which runs within memory in an uncontested context does
269 * not keep encountering soft faults.
272 unsigned int vm_pageout_scan_event_counter
= 0;
275 * Forward declarations for internal routines.
278 struct vm_pageout_queue
*q
;
284 #if VM_PRESSURE_EVENTS
285 void vm_pressure_thread(void);
287 static void vm_pageout_garbage_collect(int);
288 static void vm_pageout_iothread_continue(struct vm_pageout_queue
*);
289 static void vm_pageout_iothread_external(void);
290 static void vm_pageout_iothread_internal(struct cq
*cq
);
291 static void vm_pageout_adjust_io_throttles(struct vm_pageout_queue
*, struct vm_pageout_queue
*, boolean_t
);
293 extern void vm_pageout_continue(void);
294 extern void vm_pageout_scan(void);
296 static thread_t vm_pageout_external_iothread
= THREAD_NULL
;
297 static thread_t vm_pageout_internal_iothread
= THREAD_NULL
;
299 unsigned int vm_pageout_reserved_internal
= 0;
300 unsigned int vm_pageout_reserved_really
= 0;
302 unsigned int vm_pageout_swap_wait
= 0;
303 unsigned int vm_pageout_idle_wait
= 0; /* milliseconds */
304 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
305 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds */
306 unsigned int vm_pageout_deadlock_wait
= 0; /* milliseconds */
307 unsigned int vm_pageout_deadlock_relief
= 0;
308 unsigned int vm_pageout_inactive_relief
= 0;
309 unsigned int vm_pageout_burst_active_throttle
= 0;
310 unsigned int vm_pageout_burst_inactive_throttle
= 0;
312 int vm_upl_wait_for_pages
= 0;
316 * These variables record the pageout daemon's actions:
317 * how many pages it looks at and what happens to those pages.
318 * No locking needed because only one thread modifies the variables.
321 unsigned int vm_pageout_active
= 0; /* debugging */
322 unsigned int vm_pageout_active_busy
= 0; /* debugging */
323 unsigned int vm_pageout_inactive
= 0; /* debugging */
324 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
325 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
326 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
327 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
328 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
329 unsigned int vm_pageout_inactive_error
= 0; /* debugging */
330 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
331 unsigned int vm_pageout_inactive_notalive
= 0; /* debugging */
332 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
333 unsigned int vm_pageout_cache_evicted
= 0; /* debugging */
334 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
335 unsigned int vm_pageout_speculative_clean
= 0; /* debugging */
337 unsigned int vm_pageout_freed_from_cleaned
= 0;
338 unsigned int vm_pageout_freed_from_speculative
= 0;
339 unsigned int vm_pageout_freed_from_inactive_clean
= 0;
341 unsigned int vm_pageout_enqueued_cleaned_from_inactive_clean
= 0;
342 unsigned int vm_pageout_enqueued_cleaned_from_inactive_dirty
= 0;
344 unsigned int vm_pageout_cleaned_reclaimed
= 0; /* debugging; how many cleaned pages are reclaimed by the pageout scan */
345 unsigned int vm_pageout_cleaned_reactivated
= 0; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */
346 unsigned int vm_pageout_cleaned_reference_reactivated
= 0;
347 unsigned int vm_pageout_cleaned_volatile_reactivated
= 0;
348 unsigned int vm_pageout_cleaned_fault_reactivated
= 0;
349 unsigned int vm_pageout_cleaned_commit_reactivated
= 0; /* debugging; how many cleaned pages are found to be referenced on commit (and are therefore reactivated) */
350 unsigned int vm_pageout_cleaned_busy
= 0;
351 unsigned int vm_pageout_cleaned_nolock
= 0;
353 unsigned int vm_pageout_inactive_dirty_internal
= 0; /* debugging */
354 unsigned int vm_pageout_inactive_dirty_external
= 0; /* debugging */
355 unsigned int vm_pageout_inactive_deactivated
= 0; /* debugging */
356 unsigned int vm_pageout_inactive_anonymous
= 0; /* debugging */
357 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
358 unsigned int vm_pageout_purged_objects
= 0; /* debugging */
359 unsigned int vm_stat_discard
= 0; /* debugging */
360 unsigned int vm_stat_discard_sent
= 0; /* debugging */
361 unsigned int vm_stat_discard_failure
= 0; /* debugging */
362 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
363 unsigned int vm_pageout_reactivation_limit_exceeded
= 0; /* debugging */
364 unsigned int vm_pageout_catch_ups
= 0; /* debugging */
365 unsigned int vm_pageout_inactive_force_reclaim
= 0; /* debugging */
367 unsigned int vm_pageout_scan_reclaimed_throttled
= 0;
368 unsigned int vm_pageout_scan_active_throttled
= 0;
369 unsigned int vm_pageout_scan_inactive_throttled_internal
= 0;
370 unsigned int vm_pageout_scan_inactive_throttled_external
= 0;
371 unsigned int vm_pageout_scan_throttle
= 0; /* debugging */
372 unsigned int vm_pageout_scan_burst_throttle
= 0; /* debugging */
373 unsigned int vm_pageout_scan_empty_throttle
= 0; /* debugging */
374 unsigned int vm_pageout_scan_swap_throttle
= 0; /* debugging */
375 unsigned int vm_pageout_scan_deadlock_detected
= 0; /* debugging */
376 unsigned int vm_pageout_scan_active_throttle_success
= 0; /* debugging */
377 unsigned int vm_pageout_scan_inactive_throttle_success
= 0; /* debugging */
378 unsigned int vm_pageout_inactive_external_forced_jetsam_count
= 0; /* debugging */
379 unsigned int vm_page_speculative_count_drifts
= 0;
380 unsigned int vm_page_speculative_count_drift_max
= 0;
384 * Backing store throttle when BS is exhausted
386 unsigned int vm_backing_store_low
= 0;
388 unsigned int vm_pageout_out_of_line
= 0;
389 unsigned int vm_pageout_in_place
= 0;
391 unsigned int vm_page_steal_pageout_page
= 0;
395 * counters and statistics...
397 unsigned long vm_page_decrypt_counter
= 0;
398 unsigned long vm_page_decrypt_for_upl_counter
= 0;
399 unsigned long vm_page_encrypt_counter
= 0;
400 unsigned long vm_page_encrypt_abort_counter
= 0;
401 unsigned long vm_page_encrypt_already_encrypted_counter
= 0;
402 boolean_t vm_pages_encrypted
= FALSE
; /* are there encrypted pages ? */
404 struct vm_pageout_queue vm_pageout_queue_internal
;
405 struct vm_pageout_queue vm_pageout_queue_external
;
407 unsigned int vm_page_speculative_target
= 0;
409 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
411 boolean_t (* volatile consider_buffer_cache_collect
)(int) = NULL
;
413 #if DEVELOPMENT || DEBUG
414 unsigned long vm_cs_validated_resets
= 0;
417 int vm_debug_events
= 0;
419 #if CONFIG_MEMORYSTATUS
421 extern boolean_t
memorystatus_idle_exit_from_VM(void);
423 extern boolean_t
memorystatus_kill_on_VM_page_shortage(boolean_t async
);
424 extern void memorystatus_on_pageout_scan_end(void);
427 boolean_t vm_page_compressions_failing
= FALSE
;
430 * Routine: vm_backing_store_disable
432 * Suspend non-privileged threads wishing to extend
433 * backing store when we are low on backing store
434 * (Synchronized by caller)
437 vm_backing_store_disable(
441 vm_backing_store_low
= 1;
443 if(vm_backing_store_low
) {
444 vm_backing_store_low
= 0;
445 thread_wakeup((event_t
) &vm_backing_store_low
);
451 #if MACH_CLUSTER_STATS
452 unsigned long vm_pageout_cluster_dirtied
= 0;
453 unsigned long vm_pageout_cluster_cleaned
= 0;
454 unsigned long vm_pageout_cluster_collisions
= 0;
455 unsigned long vm_pageout_cluster_clusters
= 0;
456 unsigned long vm_pageout_cluster_conversions
= 0;
457 unsigned long vm_pageout_target_collisions
= 0;
458 unsigned long vm_pageout_target_page_dirtied
= 0;
459 unsigned long vm_pageout_target_page_freed
= 0;
460 #define CLUSTER_STAT(clause) clause
461 #else /* MACH_CLUSTER_STATS */
462 #define CLUSTER_STAT(clause)
463 #endif /* MACH_CLUSTER_STATS */
466 * Routine: vm_pageout_object_terminate
468 * Destroy the pageout_object, and perform all of the
469 * required cleanup actions.
472 * The object must be locked, and will be returned locked.
475 vm_pageout_object_terminate(
478 vm_object_t shadow_object
;
481 * Deal with the deallocation (last reference) of a pageout object
482 * (used for cleaning-in-place) by dropping the paging references/
483 * freeing pages in the original object.
486 assert(object
->pageout
);
487 shadow_object
= object
->shadow
;
488 vm_object_lock(shadow_object
);
490 while (!queue_empty(&object
->memq
)) {
492 vm_object_offset_t offset
;
494 p
= (vm_page_t
) queue_first(&object
->memq
);
499 assert(!p
->cleaning
);
506 m
= vm_page_lookup(shadow_object
,
507 offset
+ object
->vo_shadow_offset
);
509 if(m
== VM_PAGE_NULL
)
512 assert((m
->dirty
) || (m
->precious
) ||
513 (m
->busy
&& m
->cleaning
));
516 * Handle the trusted pager throttle.
517 * Also decrement the burst throttle (if external).
519 vm_page_lock_queues();
520 if (m
->pageout_queue
)
521 vm_pageout_throttle_up(m
);
524 * Handle the "target" page(s). These pages are to be freed if
525 * successfully cleaned. Target pages are always busy, and are
526 * wired exactly once. The initial target pages are not mapped,
527 * (so cannot be referenced or modified) but converted target
528 * pages may have been modified between the selection as an
529 * adjacent page and conversion to a target.
533 assert(m
->wire_count
== 1);
535 m
->encrypted_cleaning
= FALSE
;
537 #if MACH_CLUSTER_STATS
538 if (m
->wanted
) vm_pageout_target_collisions
++;
541 * Revoke all access to the page. Since the object is
542 * locked, and the page is busy, this prevents the page
543 * from being dirtied after the pmap_disconnect() call
546 * Since the page is left "dirty" but "not modifed", we
547 * can detect whether the page was redirtied during
548 * pageout by checking the modify state.
550 if (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
) {
551 SET_PAGE_DIRTY(m
, FALSE
);
557 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
558 vm_page_unwire(m
, TRUE
); /* reactivates */
559 VM_STAT_INCR(reactivations
);
562 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
563 vm_page_free(m
);/* clears busy, etc. */
565 vm_page_unlock_queues();
569 * Handle the "adjacent" pages. These pages were cleaned in
570 * place, and should be left alone.
571 * If prep_pin_count is nonzero, then someone is using the
572 * page, so make it active.
574 if (!m
->active
&& !m
->inactive
&& !m
->throttled
&& !m
->private) {
578 vm_page_deactivate(m
);
580 if (m
->overwriting
) {
582 * the (COPY_OUT_FROM == FALSE) request_page_list case
586 * We do not re-set m->dirty !
587 * The page was busy so no extraneous activity
588 * could have occurred. COPY_INTO is a read into the
589 * new pages. CLEAN_IN_PLACE does actually write
590 * out the pages but handling outside of this code
591 * will take care of resetting dirty. We clear the
592 * modify however for the Programmed I/O case.
594 pmap_clear_modify(m
->phys_page
);
600 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
601 * Occurs when the original page was wired
602 * at the time of the list request
604 assert(VM_PAGE_WIRED(m
));
605 vm_page_unwire(m
, TRUE
); /* reactivates */
607 m
->overwriting
= FALSE
;
610 * Set the dirty state according to whether or not the page was
611 * modified during the pageout. Note that we purposefully do
612 * NOT call pmap_clear_modify since the page is still mapped.
613 * If the page were to be dirtied between the 2 calls, this
614 * this fact would be lost. This code is only necessary to
615 * maintain statistics, since the pmap module is always
616 * consulted if m->dirty is false.
618 #if MACH_CLUSTER_STATS
619 m
->dirty
= pmap_is_modified(m
->phys_page
);
621 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
622 else vm_pageout_cluster_cleaned
++;
623 if (m
->wanted
) vm_pageout_cluster_collisions
++;
628 if (m
->encrypted_cleaning
== TRUE
) {
629 m
->encrypted_cleaning
= FALSE
;
635 * Wakeup any thread waiting for the page to be un-cleaning.
638 vm_page_unlock_queues();
641 * Account for the paging reference taken in vm_paging_object_allocate.
643 vm_object_activity_end(shadow_object
);
644 vm_object_unlock(shadow_object
);
646 assert(object
->ref_count
== 0);
647 assert(object
->paging_in_progress
== 0);
648 assert(object
->activity_in_progress
== 0);
649 assert(object
->resident_page_count
== 0);
654 * Routine: vm_pageclean_setup
656 * Purpose: setup a page to be cleaned (made non-dirty), but not
657 * necessarily flushed from the VM page cache.
658 * This is accomplished by cleaning in place.
660 * The page must not be busy, and new_object
668 vm_object_t new_object
,
669 vm_object_offset_t new_offset
)
673 assert(!m
->cleaning
);
677 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
678 m
->object
, m
->offset
, m
,
681 pmap_clear_modify(m
->phys_page
);
684 * Mark original page as cleaning in place.
687 SET_PAGE_DIRTY(m
, FALSE
);
691 * Convert the fictitious page to a private shadow of
694 assert(new_m
->fictitious
);
695 assert(new_m
->phys_page
== vm_page_fictitious_addr
);
696 new_m
->fictitious
= FALSE
;
697 new_m
->private = TRUE
;
698 new_m
->pageout
= TRUE
;
699 new_m
->phys_page
= m
->phys_page
;
701 vm_page_lockspin_queues();
703 vm_page_unlock_queues();
705 vm_page_insert(new_m
, new_object
, new_offset
);
706 assert(!new_m
->wanted
);
711 * Routine: vm_pageout_initialize_page
713 * Causes the specified page to be initialized in
714 * the appropriate memory object. This routine is used to push
715 * pages into a copy-object when they are modified in the
718 * The page is moved to a temporary object and paged out.
721 * The page in question must not be on any pageout queues.
722 * The object to which it belongs must be locked.
723 * The page must be busy, but not hold a paging reference.
726 * Move this page to a completely new object.
729 vm_pageout_initialize_page(
733 vm_object_offset_t paging_offset
;
734 memory_object_t pager
;
737 "vm_pageout_initialize_page, page 0x%X\n",
742 * Verify that we really want to clean this page
749 * Create a paging reference to let us play with the object.
752 paging_offset
= m
->offset
+ object
->paging_offset
;
754 if (m
->absent
|| m
->error
|| m
->restart
|| (!m
->dirty
&& !m
->precious
)) {
756 panic("reservation without pageout?"); /* alan */
757 vm_object_unlock(object
);
763 * If there's no pager, then we can't clean the page. This should
764 * never happen since this should be a copy object and therefore not
765 * an external object, so the pager should always be there.
768 pager
= object
->pager
;
770 if (pager
== MEMORY_OBJECT_NULL
) {
772 panic("missing pager for copy object");
777 * set the page for future call to vm_fault_list_request
779 pmap_clear_modify(m
->phys_page
);
780 SET_PAGE_DIRTY(m
, FALSE
);
784 * keep the object from collapsing or terminating
786 vm_object_paging_begin(object
);
787 vm_object_unlock(object
);
790 * Write the data to its pager.
791 * Note that the data is passed by naming the new object,
792 * not a virtual address; the pager interface has been
793 * manipulated to use the "internal memory" data type.
794 * [The object reference from its allocation is donated
795 * to the eventual recipient.]
797 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
799 vm_object_lock(object
);
800 vm_object_paging_end(object
);
803 #if MACH_CLUSTER_STATS
804 #define MAXCLUSTERPAGES 16
806 unsigned long pages_in_cluster
;
807 unsigned long pages_at_higher_offsets
;
808 unsigned long pages_at_lower_offsets
;
809 } cluster_stats
[MAXCLUSTERPAGES
];
810 #endif /* MACH_CLUSTER_STATS */
814 * vm_pageout_cluster:
816 * Given a page, queue it to the appropriate I/O thread,
817 * which will page it out and attempt to clean adjacent pages
818 * in the same operation.
820 * The object and queues must be locked. We will take a
821 * paging reference to prevent deallocation or collapse when we
822 * release the object lock back at the call site. The I/O thread
823 * is responsible for consuming this reference
825 * The page must not be on any pageout queue.
829 vm_pageout_cluster(vm_page_t m
, boolean_t pageout
)
831 vm_object_t object
= m
->object
;
832 struct vm_pageout_queue
*q
;
836 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
837 object
, m
->offset
, m
, 0, 0);
841 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
843 vm_object_lock_assert_exclusive(object
);
846 * Only a certain kind of page is appreciated here.
848 assert((m
->dirty
|| m
->precious
) && (!VM_PAGE_WIRED(m
)));
849 assert(!m
->cleaning
&& !m
->pageout
&& !m
->laundry
);
850 #ifndef CONFIG_FREEZE
851 assert(!m
->inactive
&& !m
->active
);
852 assert(!m
->throttled
);
856 * protect the object from collapse or termination
858 vm_object_activity_begin(object
);
860 m
->pageout
= pageout
;
862 if (object
->internal
== TRUE
) {
863 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
866 q
= &vm_pageout_queue_internal
;
868 q
= &vm_pageout_queue_external
;
871 * pgo_laundry count is tied to the laundry bit
876 m
->pageout_queue
= TRUE
;
877 queue_enter(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
879 if (q
->pgo_idle
== TRUE
) {
881 thread_wakeup((event_t
) &q
->pgo_pending
);
887 unsigned long vm_pageout_throttle_up_count
= 0;
890 * A page is back from laundry or we are stealing it back from
891 * the laundering state. See if there are some pages waiting to
892 * go to laundry and if we can let some of them go now.
894 * Object and page queues must be locked.
897 vm_pageout_throttle_up(
900 struct vm_pageout_queue
*q
;
902 assert(m
->object
!= VM_OBJECT_NULL
);
903 assert(m
->object
!= kernel_object
);
906 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
907 vm_object_lock_assert_exclusive(m
->object
);
910 vm_pageout_throttle_up_count
++;
912 if (m
->object
->internal
== TRUE
)
913 q
= &vm_pageout_queue_internal
;
915 q
= &vm_pageout_queue_external
;
917 if (m
->pageout_queue
== TRUE
) {
919 queue_remove(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
920 m
->pageout_queue
= FALSE
;
922 m
->pageq
.next
= NULL
;
923 m
->pageq
.prev
= NULL
;
925 vm_object_activity_end(m
->object
);
927 if (m
->laundry
== TRUE
) {
932 if (q
->pgo_throttled
== TRUE
) {
933 q
->pgo_throttled
= FALSE
;
934 thread_wakeup((event_t
) &q
->pgo_laundry
);
936 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
937 q
->pgo_draining
= FALSE
;
938 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
945 vm_pageout_throttle_up_batch(
946 struct vm_pageout_queue
*q
,
950 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
953 vm_pageout_throttle_up_count
+= batch_cnt
;
955 q
->pgo_laundry
-= batch_cnt
;
957 if (q
->pgo_throttled
== TRUE
) {
958 q
->pgo_throttled
= FALSE
;
959 thread_wakeup((event_t
) &q
->pgo_laundry
);
961 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
962 q
->pgo_draining
= FALSE
;
963 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
970 * VM memory pressure monitoring.
972 * vm_pageout_scan() keeps track of the number of pages it considers and
973 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
975 * compute_memory_pressure() is called every second from compute_averages()
976 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
977 * of recalimed pages in a new vm_pageout_stat[] bucket.
979 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
980 * The caller provides the number of seconds ("nsecs") worth of statistics
981 * it wants, up to 30 seconds.
982 * It computes the number of pages reclaimed in the past "nsecs" seconds and
983 * also returns the number of pages the system still needs to reclaim at this
986 #define VM_PAGEOUT_STAT_SIZE 31
987 struct vm_pageout_stat
{
988 unsigned int considered
;
989 unsigned int reclaimed
;
990 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0,0}, };
991 unsigned int vm_pageout_stat_now
= 0;
992 unsigned int vm_memory_pressure
= 0;
994 #define VM_PAGEOUT_STAT_BEFORE(i) \
995 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
996 #define VM_PAGEOUT_STAT_AFTER(i) \
997 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
999 #if VM_PAGE_BUCKETS_CHECK
1000 int vm_page_buckets_check_interval
= 10; /* in seconds */
1001 #endif /* VM_PAGE_BUCKETS_CHECK */
1004 * Called from compute_averages().
1007 compute_memory_pressure(
1010 unsigned int vm_pageout_next
;
1012 #if VM_PAGE_BUCKETS_CHECK
1013 /* check the consistency of VM page buckets at regular interval */
1014 static int counter
= 0;
1015 if ((++counter
% vm_page_buckets_check_interval
) == 0) {
1016 vm_page_buckets_check();
1018 #endif /* VM_PAGE_BUCKETS_CHECK */
1020 vm_memory_pressure
=
1021 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].reclaimed
;
1023 commpage_set_memory_pressure( vm_memory_pressure
);
1025 /* move "now" forward */
1026 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
1027 vm_pageout_stats
[vm_pageout_next
].considered
= 0;
1028 vm_pageout_stats
[vm_pageout_next
].reclaimed
= 0;
1029 vm_pageout_stat_now
= vm_pageout_next
;
1035 * mach_vm_ctl_page_free_wanted() is called indirectly, via
1036 * mach_vm_pressure_monitor(), when taking a stackshot. Therefore,
1037 * it must be safe in the restricted stackshot context. Locks and/or
1038 * blocking are not allowable.
1041 mach_vm_ctl_page_free_wanted(void)
1043 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
1045 page_free_target
= vm_page_free_target
;
1046 page_free_count
= vm_page_free_count
;
1047 if (page_free_target
> page_free_count
) {
1048 page_free_wanted
= page_free_target
- page_free_count
;
1050 page_free_wanted
= 0;
1053 return page_free_wanted
;
1059 * mach_vm_pressure_monitor() is called when taking a stackshot, with
1060 * wait_for_pressure FALSE, so that code path must remain safe in the
1061 * restricted stackshot context. No blocking or locks are allowable.
1062 * on that code path.
1066 mach_vm_pressure_monitor(
1067 boolean_t wait_for_pressure
,
1068 unsigned int nsecs_monitored
,
1069 unsigned int *pages_reclaimed_p
,
1070 unsigned int *pages_wanted_p
)
1073 unsigned int vm_pageout_then
, vm_pageout_now
;
1074 unsigned int pages_reclaimed
;
1077 * We don't take the vm_page_queue_lock here because we don't want
1078 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
1079 * thread when it's trying to reclaim memory. We don't need fully
1080 * accurate monitoring anyway...
1083 if (wait_for_pressure
) {
1084 /* wait until there's memory pressure */
1085 while (vm_page_free_count
>= vm_page_free_target
) {
1086 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
1087 THREAD_INTERRUPTIBLE
);
1088 if (wr
== THREAD_WAITING
) {
1089 wr
= thread_block(THREAD_CONTINUE_NULL
);
1091 if (wr
== THREAD_INTERRUPTED
) {
1092 return KERN_ABORTED
;
1094 if (wr
== THREAD_AWAKENED
) {
1096 * The memory pressure might have already
1097 * been relieved but let's not block again
1098 * and let's report that there was memory
1099 * pressure at some point.
1106 /* provide the number of pages the system wants to reclaim */
1107 if (pages_wanted_p
!= NULL
) {
1108 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1111 if (pages_reclaimed_p
== NULL
) {
1112 return KERN_SUCCESS
;
1115 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1117 vm_pageout_now
= vm_pageout_stat_now
;
1118 pages_reclaimed
= 0;
1119 for (vm_pageout_then
=
1120 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1121 vm_pageout_then
!= vm_pageout_now
&&
1122 nsecs_monitored
-- != 0;
1124 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1125 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].reclaimed
;
1127 } while (vm_pageout_now
!= vm_pageout_stat_now
);
1128 *pages_reclaimed_p
= pages_reclaimed
;
1130 return KERN_SUCCESS
;
1136 * function in BSD to apply I/O throttle to the pageout thread
1138 extern void vm_pageout_io_throttle(void);
1142 boolean_t jlp_init
= FALSE
;
1143 uint64_t jlp_time
= 0, jlp_current
= 0;
1144 struct vm_page jetsam_latency_page
[NUM_OF_JETSAM_LATENCY_TOKENS
];
1145 unsigned int latency_jetsam_wakeup
= 0;
1146 #endif /* LATENCY_JETSAM */
1149 * Page States: Used below to maintain the page state
1150 * before it's removed from it's Q. This saved state
1151 * helps us do the right accounting in certain cases
1153 #define PAGE_STATE_SPECULATIVE 1
1154 #define PAGE_STATE_ANONYMOUS 2
1155 #define PAGE_STATE_INACTIVE 3
1156 #define PAGE_STATE_INACTIVE_FIRST 4
1157 #define PAGE_STATE_CLEAN 5
1160 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1163 * If a "reusable" page somehow made it back into \
1164 * the active queue, it's been re-used and is not \
1165 * quite re-usable. \
1166 * If the VM object was "all_reusable", consider it \
1167 * as "all re-used" instead of converting it to \
1168 * "partially re-used", which could be expensive. \
1170 if ((m)->reusable || \
1171 (m)->object->all_reusable) { \
1172 vm_object_reuse_pages((m)->object, \
1174 (m)->offset + PAGE_SIZE_64, \
1180 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 64
1181 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1184 #define FCS_DELAYED 1
1185 #define FCS_DEADLOCK_DETECTED 2
1187 struct flow_control
{
1192 uint32_t vm_pageout_considered_page
= 0;
1193 uint32_t vm_page_filecache_min
= 0;
1195 #define VM_PAGE_FILECACHE_MIN 50000
1196 #define ANONS_GRABBED_LIMIT 2
1199 * vm_pageout_scan does the dirty work for the pageout daemon.
1200 * It returns with both vm_page_queue_free_lock and vm_page_queue_lock
1201 * held and vm_page_free_wanted == 0.
1204 vm_pageout_scan(void)
1206 unsigned int loop_count
= 0;
1207 unsigned int inactive_burst_count
= 0;
1208 unsigned int active_burst_count
= 0;
1209 unsigned int reactivated_this_call
;
1210 unsigned int reactivate_limit
;
1211 vm_page_t local_freeq
= NULL
;
1212 int local_freed
= 0;
1214 int delayed_unlock_limit
= 0;
1215 int refmod_state
= 0;
1216 int vm_pageout_deadlock_target
= 0;
1217 struct vm_pageout_queue
*iq
;
1218 struct vm_pageout_queue
*eq
;
1219 struct vm_speculative_age_q
*sq
;
1220 struct flow_control flow_control
= { 0, { 0, 0 } };
1221 boolean_t inactive_throttled
= FALSE
;
1222 boolean_t try_failed
;
1224 unsigned int msecs
= 0;
1226 vm_object_t last_object_tried
;
1227 uint32_t catch_up_count
= 0;
1228 uint32_t inactive_reclaim_run
;
1229 boolean_t forced_reclaim
;
1230 boolean_t exceeded_burst_throttle
;
1231 boolean_t grab_anonymous
= FALSE
;
1232 boolean_t force_anonymous
= FALSE
;
1233 int anons_grabbed
= 0;
1234 int page_prev_state
= 0;
1235 int cache_evict_throttle
= 0;
1236 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
1237 vm_pressure_level_t pressure_level
;
1239 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
1240 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1241 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1244 if (jlp_init
== FALSE
) {
1247 for(; i
< NUM_OF_JETSAM_LATENCY_TOKENS
; i
++) {
1248 jlp
= &jetsam_latency_page
[i
];
1249 jlp
->fictitious
= TRUE
;
1253 jlp
= &jetsam_latency_page
[0];
1254 queue_enter(&vm_page_queue_active
, jlp
, vm_page_t
, pageq
);
1257 jlp
->offset
= mach_absolute_time();
1258 jlp_time
= jlp
->offset
;
1262 #endif /* LATENCY_JETSAM */
1264 flow_control
.state
= FCS_IDLE
;
1265 iq
= &vm_pageout_queue_internal
;
1266 eq
= &vm_pageout_queue_external
;
1267 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1270 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1273 vm_page_lock_queues();
1274 delayed_unlock
= 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1277 * Calculate the max number of referenced pages on the inactive
1278 * queue that we will reactivate.
1280 reactivated_this_call
= 0;
1281 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
1282 vm_page_inactive_count
);
1283 inactive_reclaim_run
= 0;
1285 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
1288 * We want to gradually dribble pages from the active queue
1289 * to the inactive queue. If we let the inactive queue get
1290 * very small, and then suddenly dump many pages into it,
1291 * those pages won't get a sufficient chance to be referenced
1292 * before we start taking them from the inactive queue.
1294 * We must limit the rate at which we send pages to the pagers
1295 * so that we don't tie up too many pages in the I/O queues.
1296 * We implement a throttling mechanism using the laundry count
1297 * to limit the number of pages outstanding to the default
1298 * and external pagers. We can bypass the throttles and look
1299 * for clean pages if the pageout queues don't drain in a timely
1300 * fashion since this may indicate that the pageout paths are
1301 * stalled waiting for memory, which only we can provide.
1306 assert(delayed_unlock
!=0);
1309 * Recalculate vm_page_inactivate_target.
1311 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1312 vm_page_inactive_count
+
1313 vm_page_speculative_count
);
1315 vm_page_anonymous_min
= vm_page_inactive_target
/ 20;
1319 * don't want to wake the pageout_scan thread up everytime we fall below
1320 * the targets... set a low water mark at 0.25% below the target
1322 vm_page_inactive_min
= vm_page_inactive_target
- (vm_page_inactive_target
/ 400);
1324 if (vm_page_speculative_percentage
> 50)
1325 vm_page_speculative_percentage
= 50;
1326 else if (vm_page_speculative_percentage
<= 0)
1327 vm_page_speculative_percentage
= 1;
1329 vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1330 vm_page_inactive_count
);
1333 last_object_tried
= NULL
;
1336 if ((vm_page_inactive_count
+ vm_page_speculative_count
) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count
))
1337 catch_up_count
= vm_page_inactive_count
+ vm_page_speculative_count
;
1344 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
1346 if (delayed_unlock
== 0) {
1347 vm_page_lock_queues();
1350 if (vm_upl_wait_for_pages
< 0)
1351 vm_upl_wait_for_pages
= 0;
1353 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
1355 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
)
1356 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
1359 * Move pages from active to inactive if we're below the target
1361 /* if we are trying to make clean, we need to make sure we actually have inactive - mj */
1362 if ((vm_page_inactive_count
+ vm_page_speculative_count
) >= vm_page_inactive_target
)
1363 goto done_moving_active_pages
;
1365 if (object
!= NULL
) {
1366 vm_object_unlock(object
);
1368 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1371 * Don't sweep through active queue more than the throttle
1372 * which should be kept relatively low
1374 active_burst_count
= MIN(vm_pageout_burst_active_throttle
, vm_page_active_count
);
1376 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_START
,
1377 vm_pageout_inactive
, vm_pageout_inactive_used
, vm_page_free_count
, local_freed
);
1379 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_NONE
,
1380 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1381 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1382 memoryshot(VM_PAGEOUT_BALANCE
, DBG_FUNC_START
);
1385 while (!queue_empty(&vm_page_queue_active
) && active_burst_count
--) {
1387 vm_pageout_active
++;
1389 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1391 assert(m
->active
&& !m
->inactive
);
1392 assert(!m
->laundry
);
1393 assert(m
->object
!= kernel_object
);
1394 assert(m
->phys_page
!= vm_page_guard_addr
);
1396 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
1399 if (m
->fictitious
) {
1400 const uint32_t FREE_TARGET_MULTIPLIER
= 2;
1402 uint64_t now
= mach_absolute_time();
1403 uint64_t delta
= now
- m
->offset
;
1404 clock_sec_t jl_secs
= 0;
1405 clock_usec_t jl_usecs
= 0;
1406 boolean_t issue_jetsam
= FALSE
;
1408 absolutetime_to_microtime(delta
, &jl_secs
, &jl_usecs
);
1409 jl_usecs
+= jl_secs
* USEC_PER_SEC
;
1411 /* Jetsam only if the token hasn't aged sufficiently and the free count is close to the target (avoiding spurious triggers) */
1412 if ((jl_usecs
<= JETSAM_AGE_NOTIFY_CRITICAL
) && (vm_page_free_count
< (FREE_TARGET_MULTIPLIER
* vm_page_free_target
))) {
1413 issue_jetsam
= TRUE
;
1416 VM_DEBUG_EVENT(vm_pageout_page_token
, VM_PAGEOUT_PAGE_TOKEN
, DBG_FUNC_NONE
,
1417 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, jl_usecs
);
1420 queue_remove(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
1421 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
1427 vm_page_unlock_queues();
1430 vm_page_free_list(local_freeq
, TRUE
);
1435 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
1436 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, 0);
1438 assert_wait_timeout(&latency_jetsam_wakeup
, THREAD_INTERRUPTIBLE
, 10 /* msecs */, 1000*NSEC_PER_USEC
);
1439 /* Kill the top process asynchronously */
1440 memorystatus_kill_on_VM_page_shortage(TRUE
);
1441 thread_block(THREAD_CONTINUE_NULL
);
1443 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
, 0, 0, 0, 0);
1445 vm_page_lock_queues();
1448 #endif /* LATENCY_JETSAM */
1450 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
1452 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
1453 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
1454 * new reference happens. If no futher references happen on the page after that remote TLB flushes
1455 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
1456 * by pageout_scan, which is just fine since the last reference would have happened quite far
1457 * in the past (TLB caches don't hang around for very long), and of course could just as easily
1458 * have happened before we moved the page
1460 pmap_clear_refmod_options(m
->phys_page
, VM_MEM_REFERENCED
, PMAP_OPTIONS_NOFLUSH
, (void *)NULL
);
1463 * The page might be absent or busy,
1464 * but vm_page_deactivate can handle that.
1465 * FALSE indicates that we don't want a H/W clear reference
1467 vm_page_deactivate_internal(m
, FALSE
);
1469 if (delayed_unlock
++ > delayed_unlock_limit
) {
1472 vm_page_unlock_queues();
1474 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1475 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 1);
1477 vm_page_free_list(local_freeq
, TRUE
);
1479 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1480 vm_page_free_count
, 0, 0, 1);
1484 vm_page_lock_queues();
1486 lck_mtx_yield(&vm_page_queue_lock
);
1491 * continue the while loop processing
1492 * the active queue... need to hold
1493 * the page queues lock
1498 #endif /* LATENCY_JETSAM */
1501 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_END
,
1502 vm_page_active_count
, vm_page_inactive_count
, vm_page_speculative_count
, vm_page_inactive_target
);
1503 memoryshot(VM_PAGEOUT_BALANCE
, DBG_FUNC_END
);
1505 /**********************************************************************
1506 * above this point we're playing with the active queue
1507 * below this point we're playing with the throttling mechanisms
1508 * and the inactive queue
1509 **********************************************************************/
1511 done_moving_active_pages
:
1513 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
1514 if (object
!= NULL
) {
1515 vm_object_unlock(object
);
1518 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1521 vm_page_unlock_queues();
1523 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1524 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 2);
1526 vm_page_free_list(local_freeq
, TRUE
);
1528 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1529 vm_page_free_count
, local_freed
, 0, 2);
1533 vm_page_lock_queues();
1536 * make sure the pageout I/O threads are running
1537 * throttled in case there are still requests
1538 * in the laundry... since we have met our targets
1539 * we don't need the laundry to be cleaned in a timely
1540 * fashion... so let's avoid interfering with foreground
1543 vm_pageout_adjust_io_throttles(iq
, eq
, TRUE
);
1546 * recalculate vm_page_inactivate_target
1548 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1549 vm_page_inactive_count
+
1550 vm_page_speculative_count
);
1551 if (((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) &&
1552 !queue_empty(&vm_page_queue_active
)) {
1554 * inactive target still not met... keep going
1555 * until we get the queues balanced...
1559 lck_mtx_lock(&vm_page_queue_free_lock
);
1561 if ((vm_page_free_count
>= vm_page_free_target
) &&
1562 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1564 * done - we have met our target *and*
1565 * there is no one waiting for a page.
1568 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1570 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
1571 vm_pageout_inactive
, vm_pageout_inactive_used
, 0, 0);
1572 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
1573 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1574 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1578 lck_mtx_unlock(&vm_page_queue_free_lock
);
1582 * Before anything, we check if we have any ripe volatile
1583 * objects around. If so, try to purge the first object.
1584 * If the purge fails, fall through to reclaim a page instead.
1585 * If the purge succeeds, go back to the top and reevalute
1586 * the new memory situation.
1588 pressure_level
= memorystatus_vm_pressure_level
;
1589 assert (available_for_purge
>=0);
1591 if (available_for_purge
1592 || pressure_level
> kVMPressureNormal
1596 if (object
!= NULL
) {
1597 vm_object_unlock(object
);
1601 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
1602 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
);
1604 force_purge
= 0; /* no force-purging */
1605 if (pressure_level
>= kVMPressureCritical
) {
1606 force_purge
= memorystatus_purge_on_critical
;
1607 } else if (pressure_level
>= kVMPressureUrgent
) {
1608 force_purge
= memorystatus_purge_on_urgent
;
1609 } else if (pressure_level
>= kVMPressureWarning
) {
1610 force_purge
= memorystatus_purge_on_warning
;
1614 if (vm_purgeable_object_purge_one(force_purge
)) {
1616 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
1617 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1620 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
1621 memoryshot(VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
);
1623 if (queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
1625 * try to pull pages from the aging bins...
1626 * see vm_page.h for an explanation of how
1627 * this mechanism works
1629 struct vm_speculative_age_q
*aq
;
1630 mach_timespec_t ts_fully_aged
;
1631 boolean_t can_steal
= FALSE
;
1632 int num_scanned_queues
;
1634 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1636 num_scanned_queues
= 0;
1637 while (queue_empty(&aq
->age_q
) &&
1638 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1640 speculative_steal_index
++;
1642 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
)
1643 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
1645 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1648 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
1650 * XXX We've scanned all the speculative
1651 * queues but still haven't found one
1652 * that is not empty, even though
1653 * vm_page_speculative_count is not 0.
1655 * report the anomaly...
1657 printf("vm_pageout_scan: "
1658 "all speculative queues empty "
1659 "but count=%d. Re-adjusting.\n",
1660 vm_page_speculative_count
);
1661 if (vm_page_speculative_count
> vm_page_speculative_count_drift_max
)
1662 vm_page_speculative_count_drift_max
= vm_page_speculative_count
;
1663 vm_page_speculative_count_drifts
++;
1665 Debugger("vm_pageout_scan: no speculative pages");
1668 vm_page_speculative_count
= 0;
1669 /* ... and continue */
1673 if (vm_page_speculative_count
> vm_page_speculative_target
)
1676 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) / 1000;
1677 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) % 1000)
1678 * 1000 * NSEC_PER_USEC
;
1680 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
1684 clock_get_system_nanotime(&sec
, &nsec
);
1685 ts
.tv_sec
= (unsigned int) sec
;
1688 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0)
1691 if (can_steal
== TRUE
)
1692 vm_page_speculate_ageit(aq
);
1694 if (queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
1697 if (object
!= NULL
) {
1698 vm_object_unlock(object
);
1701 pages_evicted
= vm_object_cache_evict(100, 10);
1703 if (pages_evicted
) {
1705 vm_pageout_cache_evicted
+= pages_evicted
;
1707 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
1708 vm_page_free_count
, pages_evicted
, vm_pageout_cache_evicted
, 0);
1709 memoryshot(VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
);
1712 * we just freed up to 100 pages,
1713 * so go back to the top of the main loop
1714 * and re-evaulate the memory situation
1718 cache_evict_throttle
= 100;
1720 if (cache_evict_throttle
)
1721 cache_evict_throttle
--;
1724 exceeded_burst_throttle
= FALSE
;
1726 * Sometimes we have to pause:
1727 * 1) No inactive pages - nothing to do.
1728 * 2) Loop control - no acceptable pages found on the inactive queue
1729 * within the last vm_pageout_burst_inactive_throttle iterations
1730 * 3) Flow control - default pageout queue is full
1732 if (queue_empty(&vm_page_queue_inactive
) && queue_empty(&vm_page_queue_anonymous
) && queue_empty(&sq
->age_q
)) {
1733 vm_pageout_scan_empty_throttle
++;
1734 msecs
= vm_pageout_empty_wait
;
1735 goto vm_pageout_scan_delay
;
1737 } else if (inactive_burst_count
>=
1738 MIN(vm_pageout_burst_inactive_throttle
,
1739 (vm_page_inactive_count
+
1740 vm_page_speculative_count
))) {
1741 vm_pageout_scan_burst_throttle
++;
1742 msecs
= vm_pageout_burst_wait
;
1744 exceeded_burst_throttle
= TRUE
;
1745 goto vm_pageout_scan_delay
;
1747 } else if (vm_page_free_count
> (vm_page_free_reserved
/ 4) &&
1748 VM_PAGEOUT_SCAN_NEEDS_TO_THROTTLE()) {
1749 vm_pageout_scan_swap_throttle
++;
1750 msecs
= vm_pageout_swap_wait
;
1751 goto vm_pageout_scan_delay
;
1753 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
1754 VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1758 switch (flow_control
.state
) {
1761 if ((vm_page_free_count
+ local_freed
) < vm_page_free_target
) {
1763 if (vm_page_pageable_external_count
> vm_page_filecache_min
&& !queue_empty(&vm_page_queue_inactive
)) {
1764 anons_grabbed
= ANONS_GRABBED_LIMIT
;
1765 goto consider_inactive
;
1767 if (((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) && vm_page_active_count
)
1770 reset_deadlock_timer
:
1771 ts
.tv_sec
= vm_pageout_deadlock_wait
/ 1000;
1772 ts
.tv_nsec
= (vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
1773 clock_get_system_nanotime(&sec
, &nsec
);
1774 flow_control
.ts
.tv_sec
= (unsigned int) sec
;
1775 flow_control
.ts
.tv_nsec
= nsec
;
1776 ADD_MACH_TIMESPEC(&flow_control
.ts
, &ts
);
1778 flow_control
.state
= FCS_DELAYED
;
1779 msecs
= vm_pageout_deadlock_wait
;
1784 clock_get_system_nanotime(&sec
, &nsec
);
1785 ts
.tv_sec
= (unsigned int) sec
;
1788 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
.ts
) >= 0) {
1790 * the pageout thread for the default pager is potentially
1791 * deadlocked since the
1792 * default pager queue has been throttled for more than the
1793 * allowable time... we need to move some clean pages or dirty
1794 * pages belonging to the external pagers if they aren't throttled
1795 * vm_page_free_wanted represents the number of threads currently
1796 * blocked waiting for pages... we'll move one page for each of
1797 * these plus a fixed amount to break the logjam... once we're done
1798 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1799 * with a new timeout target since we have no way of knowing
1800 * whether we've broken the deadlock except through observation
1801 * of the queue associated with the default pager... we need to
1802 * stop moving pages and allow the system to run to see what
1803 * state it settles into.
1805 vm_pageout_deadlock_target
= vm_pageout_deadlock_relief
+ vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
1806 vm_pageout_scan_deadlock_detected
++;
1807 flow_control
.state
= FCS_DEADLOCK_DETECTED
;
1808 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
1809 goto consider_inactive
;
1812 * just resniff instead of trying
1813 * to compute a new delay time... we're going to be
1814 * awakened immediately upon a laundry completion,
1815 * so we won't wait any longer than necessary
1817 msecs
= vm_pageout_idle_wait
;
1820 case FCS_DEADLOCK_DETECTED
:
1821 if (vm_pageout_deadlock_target
)
1822 goto consider_inactive
;
1823 goto reset_deadlock_timer
;
1826 vm_pageout_scan_delay
:
1827 if (object
!= NULL
) {
1828 vm_object_unlock(object
);
1831 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1834 vm_page_unlock_queues();
1836 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1837 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 3);
1839 vm_page_free_list(local_freeq
, TRUE
);
1841 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1842 vm_page_free_count
, local_freed
, 0, 3);
1846 vm_page_lock_queues();
1848 if (flow_control
.state
== FCS_DELAYED
&&
1849 !VM_PAGE_Q_THROTTLED(iq
)) {
1850 flow_control
.state
= FCS_IDLE
;
1851 goto consider_inactive
;
1855 if (vm_page_free_count
>= vm_page_free_target
) {
1857 * we're here because
1858 * 1) someone else freed up some pages while we had
1859 * the queues unlocked above
1860 * and we've hit one of the 3 conditions that
1861 * cause us to pause the pageout scan thread
1863 * since we already have enough free pages,
1864 * let's avoid stalling and return normally
1866 * before we return, make sure the pageout I/O threads
1867 * are running throttled in case there are still requests
1868 * in the laundry... since we have enough free pages
1869 * we don't need the laundry to be cleaned in a timely
1870 * fashion... so let's avoid interfering with foreground
1873 * we don't want to hold vm_page_queue_free_lock when
1874 * calling vm_pageout_adjust_io_throttles (since it
1875 * may cause other locks to be taken), we do the intitial
1876 * check outside of the lock. Once we take the lock,
1877 * we recheck the condition since it may have changed.
1878 * if it has, no problem, we will make the threads
1879 * non-throttled before actually blocking
1881 vm_pageout_adjust_io_throttles(iq
, eq
, TRUE
);
1883 lck_mtx_lock(&vm_page_queue_free_lock
);
1885 if (vm_page_free_count
>= vm_page_free_target
&&
1886 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1887 goto return_from_scan
;
1889 lck_mtx_unlock(&vm_page_queue_free_lock
);
1891 if ((vm_page_free_count
+ vm_page_cleaned_count
) < vm_page_free_target
) {
1893 * we're most likely about to block due to one of
1894 * the 3 conditions that cause vm_pageout_scan to
1895 * not be able to make forward progress w/r
1896 * to providing new pages to the free queue,
1897 * so unthrottle the I/O threads in case we
1898 * have laundry to be cleaned... it needs
1899 * to be completed ASAP.
1901 * even if we don't block, we want the io threads
1902 * running unthrottled since the sum of free +
1903 * clean pages is still under our free target
1905 vm_pageout_adjust_io_throttles(iq
, eq
, FALSE
);
1907 if (vm_page_cleaned_count
> 0 && exceeded_burst_throttle
== FALSE
) {
1909 * if we get here we're below our free target and
1910 * we're stalling due to a full laundry queue or
1911 * we don't have any inactive pages other then
1912 * those in the clean queue...
1913 * however, we have pages on the clean queue that
1914 * can be moved to the free queue, so let's not
1915 * stall the pageout scan
1917 flow_control
.state
= FCS_IDLE
;
1918 goto consider_inactive
;
1920 VM_CHECK_MEMORYSTATUS
;
1922 if (flow_control
.state
!= FCS_IDLE
)
1923 vm_pageout_scan_throttle
++;
1924 iq
->pgo_throttled
= TRUE
;
1926 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
1927 vm_consider_waking_compactor_swapper();
1929 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000*NSEC_PER_USEC
);
1930 counter(c_vm_pageout_scan_block
++);
1932 vm_page_unlock_queues();
1934 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1936 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
1937 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1938 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
);
1940 thread_block(THREAD_CONTINUE_NULL
);
1942 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
1943 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1944 memoryshot(VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
);
1946 vm_page_lock_queues();
1949 iq
->pgo_throttled
= FALSE
;
1951 if (loop_count
>= vm_page_inactive_count
)
1953 inactive_burst_count
= 0;
1960 flow_control
.state
= FCS_IDLE
;
1962 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
1963 vm_pageout_inactive_external_forced_reactivate_limit
);
1965 inactive_burst_count
++;
1966 vm_pageout_inactive
++;
1975 if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1976 assert(vm_page_throttled_count
== 0);
1977 assert(queue_empty(&vm_page_queue_throttled
));
1980 * The most eligible pages are ones we paged in speculatively,
1981 * but which have not yet been touched.
1983 if (!queue_empty(&sq
->age_q
) ) {
1984 m
= (vm_page_t
) queue_first(&sq
->age_q
);
1986 page_prev_state
= PAGE_STATE_SPECULATIVE
;
1991 * Try a clean-queue inactive page.
1993 if (!queue_empty(&vm_page_queue_cleaned
)) {
1994 m
= (vm_page_t
) queue_first(&vm_page_queue_cleaned
);
1996 page_prev_state
= PAGE_STATE_CLEAN
;
2001 grab_anonymous
= (vm_page_anonymous_count
> vm_page_anonymous_min
);
2003 if (vm_page_pageable_external_count
< vm_page_filecache_min
|| force_anonymous
== TRUE
) {
2004 grab_anonymous
= TRUE
;
2008 if (grab_anonymous
== TRUE
&& vm_compression_available() == FALSE
)
2009 grab_anonymous
= FALSE
;
2011 if (grab_anonymous
== FALSE
|| anons_grabbed
>= ANONS_GRABBED_LIMIT
|| queue_empty(&vm_page_queue_anonymous
)) {
2013 if ( !queue_empty(&vm_page_queue_inactive
) ) {
2014 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
2016 page_prev_state
= PAGE_STATE_INACTIVE
;
2022 if ( !queue_empty(&vm_page_queue_anonymous
) ) {
2023 m
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
2025 page_prev_state
= PAGE_STATE_ANONYMOUS
;
2032 * if we've gotten here, we have no victim page.
2033 * if making clean, free the local freed list and return.
2034 * if making free, check to see if we've finished balancing the queues
2035 * yet, if we haven't just continue, else panic
2037 vm_page_unlock_queues();
2039 if (object
!= NULL
) {
2040 vm_object_unlock(object
);
2043 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2046 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
2047 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 5);
2049 vm_page_free_list(local_freeq
, TRUE
);
2051 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
2052 vm_page_free_count
, local_freed
, 0, 5);
2057 vm_page_lock_queues();
2060 if ((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
)
2063 panic("vm_pageout: no victim");
2067 force_anonymous
= FALSE
;
2070 * we just found this page on one of our queues...
2071 * it can't also be on the pageout queue, so safe
2072 * to call VM_PAGE_QUEUES_REMOVE
2074 assert(!m
->pageout_queue
);
2076 VM_PAGE_QUEUES_REMOVE(m
);
2078 assert(!m
->laundry
);
2079 assert(!m
->private);
2080 assert(!m
->fictitious
);
2081 assert(m
->object
!= kernel_object
);
2082 assert(m
->phys_page
!= vm_page_guard_addr
);
2085 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
2086 vm_pageout_stats
[vm_pageout_stat_now
].considered
++;
2088 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
2091 * check to see if we currently are working
2092 * with the same object... if so, we've
2093 * already got the lock
2095 if (m
->object
!= object
) {
2097 * the object associated with candidate page is
2098 * different from the one we were just working
2099 * with... dump the lock if we still own it
2101 if (object
!= NULL
) {
2102 vm_object_unlock(object
);
2104 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2107 * Try to lock object; since we've alread got the
2108 * page queues lock, we can only 'try' for this one.
2109 * if the 'try' fails, we need to do a mutex_pause
2110 * to allow the owner of the object lock a chance to
2111 * run... otherwise, we're likely to trip over this
2112 * object in the same state as we work our way through
2113 * the queue... clumps of pages associated with the same
2114 * object are fairly typical on the inactive and active queues
2116 if (!vm_object_lock_try_scan(m
->object
)) {
2117 vm_page_t m_want
= NULL
;
2119 vm_pageout_inactive_nolock
++;
2121 if (page_prev_state
== PAGE_STATE_CLEAN
)
2122 vm_pageout_cleaned_nolock
++;
2124 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2125 page_prev_state
= PAGE_STATE_INACTIVE_FIRST
;
2127 pmap_clear_reference(m
->phys_page
);
2128 m
->reference
= FALSE
;
2131 * m->object must be stable since we hold the page queues lock...
2132 * we can update the scan_collisions field sans the object lock
2133 * since it is a separate field and this is the only spot that does
2134 * a read-modify-write operation and it is never executed concurrently...
2135 * we can asynchronously set this field to 0 when creating a UPL, so it
2136 * is possible for the value to be a bit non-determistic, but that's ok
2137 * since it's only used as a hint
2139 m
->object
->scan_collisions
++;
2141 if ( !queue_empty(&sq
->age_q
) )
2142 m_want
= (vm_page_t
) queue_first(&sq
->age_q
);
2143 else if ( !queue_empty(&vm_page_queue_cleaned
))
2144 m_want
= (vm_page_t
) queue_first(&vm_page_queue_cleaned
);
2145 else if (anons_grabbed
>= ANONS_GRABBED_LIMIT
|| queue_empty(&vm_page_queue_anonymous
))
2146 m_want
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
2147 else if ( !queue_empty(&vm_page_queue_anonymous
))
2148 m_want
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
2151 * this is the next object we're going to be interested in
2152 * try to make sure its available after the mutex_yield
2156 vm_pageout_scan_wants_object
= m_want
->object
;
2159 * force us to dump any collected free pages
2160 * and to pause before moving on
2167 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2175 if (m
->encrypted_cleaning
) {
2178 * if this page has already been picked up as
2179 * part of a page-out cluster, it will be busy
2180 * because it is being encrypted (see
2181 * vm_object_upl_request()). But we still
2182 * want to demote it from "clean-in-place"
2183 * (aka "adjacent") to "clean-and-free" (aka
2184 * "target"), so let's ignore its "busy" bit
2185 * here and proceed to check for "cleaning" a
2186 * little bit below...
2189 * A "busy" page should still be left alone for
2190 * most purposes, so we have to be very careful
2191 * not to process that page too much.
2193 assert(m
->cleaning
);
2194 goto consider_inactive_page
;
2198 * Somebody is already playing with this page.
2199 * Put it back on the appropriate queue
2202 vm_pageout_inactive_busy
++;
2204 if (page_prev_state
== PAGE_STATE_CLEAN
)
2205 vm_pageout_cleaned_busy
++;
2208 switch (page_prev_state
) {
2210 case PAGE_STATE_SPECULATIVE
:
2211 vm_page_speculate(m
, FALSE
);
2214 case PAGE_STATE_ANONYMOUS
:
2215 case PAGE_STATE_CLEAN
:
2216 case PAGE_STATE_INACTIVE
:
2217 VM_PAGE_ENQUEUE_INACTIVE(m
, FALSE
);
2220 case PAGE_STATE_INACTIVE_FIRST
:
2221 VM_PAGE_ENQUEUE_INACTIVE(m
, TRUE
);
2224 goto done_with_inactivepage
;
2229 * If it's absent, in error or the object is no longer alive,
2230 * we can reclaim the page... in the no longer alive case,
2231 * there are 2 states the page can be in that preclude us
2232 * from reclaiming it - busy or cleaning - that we've already
2235 if (m
->absent
|| m
->error
|| !object
->alive
) {
2238 vm_pageout_inactive_absent
++;
2239 else if (!object
->alive
)
2240 vm_pageout_inactive_notalive
++;
2242 vm_pageout_inactive_error
++;
2244 if (vm_pageout_deadlock_target
) {
2245 vm_pageout_scan_inactive_throttle_success
++;
2246 vm_pageout_deadlock_target
--;
2249 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
2251 if (object
->internal
) {
2252 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
2254 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
2256 assert(!m
->cleaning
);
2257 assert(!m
->laundry
);
2262 * remove page from object here since we're already
2263 * behind the object lock... defer the rest of the work
2264 * we'd normally do in vm_page_free_prepare_object
2265 * until 'vm_page_free_list' is called
2268 vm_page_remove(m
, TRUE
);
2270 assert(m
->pageq
.next
== NULL
&&
2271 m
->pageq
.prev
== NULL
);
2272 m
->pageq
.next
= (queue_entry_t
)local_freeq
;
2276 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2277 vm_pageout_freed_from_speculative
++;
2278 else if (page_prev_state
== PAGE_STATE_CLEAN
)
2279 vm_pageout_freed_from_cleaned
++;
2281 vm_pageout_freed_from_inactive_clean
++;
2283 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
2284 vm_pageout_stats
[vm_pageout_stat_now
].reclaimed
++;
2286 goto done_with_inactivepage
;
2289 * If the object is empty, the page must be reclaimed even
2291 * If the page belongs to a volatile object, we stick it back
2294 if (object
->copy
== VM_OBJECT_NULL
) {
2295 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
2296 if (m
->pmapped
== TRUE
) {
2297 /* unmap the page */
2298 refmod_state
= pmap_disconnect(m
->phys_page
);
2299 if (refmod_state
& VM_MEM_MODIFIED
) {
2300 SET_PAGE_DIRTY(m
, FALSE
);
2303 if (m
->dirty
|| m
->precious
) {
2304 /* we saved the cost of cleaning this page ! */
2305 vm_page_purged_count
++;
2310 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2312 * With the VM compressor, the cost of
2313 * reclaiming a page is much lower (no I/O),
2314 * so if we find a "volatile" page, it's better
2315 * to let it get compressed rather than letting
2316 * it occupy a full page until it gets purged.
2317 * So no need to check for "volatile" here.
2319 } else if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
2321 * Avoid cleaning a "volatile" page which might
2325 /* if it's wired, we can't put it on our queue */
2326 assert(!VM_PAGE_WIRED(m
));
2328 /* just stick it back on! */
2329 reactivated_this_call
++;
2331 if (page_prev_state
== PAGE_STATE_CLEAN
)
2332 vm_pageout_cleaned_volatile_reactivated
++;
2334 goto reactivate_page
;
2338 consider_inactive_page
:
2342 * A "busy" page should always be left alone, except...
2344 if (m
->cleaning
&& m
->encrypted_cleaning
) {
2347 * We could get here with a "busy" page
2348 * if it's being encrypted during a
2349 * "clean-in-place" operation. We'll deal
2350 * with it right away by testing if it has been
2351 * referenced and either reactivating it or
2352 * promoting it from "clean-in-place" to
2356 panic("\"busy\" page considered for pageout\n");
2361 * If it's being used, reactivate.
2362 * (Fictitious pages are either busy or absent.)
2363 * First, update the reference and dirty bits
2364 * to make sure the page is unreferenced.
2368 if (m
->reference
== FALSE
&& m
->pmapped
== TRUE
) {
2369 refmod_state
= pmap_get_refmod(m
->phys_page
);
2371 if (refmod_state
& VM_MEM_REFERENCED
)
2372 m
->reference
= TRUE
;
2373 if (refmod_state
& VM_MEM_MODIFIED
) {
2374 SET_PAGE_DIRTY(m
, FALSE
);
2379 * if (m->cleaning && !m->pageout)
2380 * If already cleaning this page in place and it hasn't
2381 * been recently referenced, just pull off the queue.
2382 * We can leave the page mapped, and upl_commit_range
2383 * will put it on the clean queue.
2385 * note: if m->encrypted_cleaning == TRUE, then
2386 * m->cleaning == TRUE
2387 * and we'll handle it here
2389 * if (m->pageout && !m->cleaning)
2390 * an msync INVALIDATE is in progress...
2391 * this page has been marked for destruction
2392 * after it has been cleaned,
2393 * but not yet gathered into a UPL
2394 * where 'cleaning' will be set...
2395 * just leave it off the paging queues
2397 * if (m->pageout && m->clenaing)
2398 * an msync INVALIDATE is in progress
2399 * and the UPL has already gathered this page...
2400 * just leave it off the paging queues
2404 * page with m->pageout and still on the queues means that an
2405 * MS_INVALIDATE is in progress on this page... leave it alone
2408 goto done_with_inactivepage
;
2411 /* if cleaning, reactivate if referenced. otherwise, just pull off queue */
2413 if (m
->reference
== TRUE
) {
2414 reactivated_this_call
++;
2415 goto reactivate_page
;
2417 goto done_with_inactivepage
;
2421 if (m
->reference
|| m
->dirty
) {
2422 /* deal with a rogue "reusable" page */
2423 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
);
2426 if (m
->reference
&& !m
->no_cache
) {
2428 * The page we pulled off the inactive list has
2429 * been referenced. It is possible for other
2430 * processors to be touching pages faster than we
2431 * can clear the referenced bit and traverse the
2432 * inactive queue, so we limit the number of
2435 if (++reactivated_this_call
>= reactivate_limit
) {
2436 vm_pageout_reactivation_limit_exceeded
++;
2437 } else if (catch_up_count
) {
2438 vm_pageout_catch_ups
++;
2439 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
2440 vm_pageout_inactive_force_reclaim
++;
2444 if (page_prev_state
== PAGE_STATE_CLEAN
)
2445 vm_pageout_cleaned_reference_reactivated
++;
2448 if ( !object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
2449 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
2451 * no explict mappings of this object exist
2452 * and it's not open via the filesystem
2454 vm_page_deactivate(m
);
2455 vm_pageout_inactive_deactivated
++;
2458 * The page was/is being used, so put back on active list.
2460 vm_page_activate(m
);
2461 VM_STAT_INCR(reactivations
);
2464 if (page_prev_state
== PAGE_STATE_CLEAN
)
2465 vm_pageout_cleaned_reactivated
++;
2467 vm_pageout_inactive_used
++;
2469 goto done_with_inactivepage
;
2472 * Make sure we call pmap_get_refmod() if it
2473 * wasn't already called just above, to update
2476 if ((refmod_state
== -1) && !m
->dirty
&& m
->pmapped
) {
2477 refmod_state
= pmap_get_refmod(m
->phys_page
);
2478 if (refmod_state
& VM_MEM_MODIFIED
) {
2479 SET_PAGE_DIRTY(m
, FALSE
);
2482 forced_reclaim
= TRUE
;
2484 forced_reclaim
= FALSE
;
2488 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
2489 object
, m
->offset
, m
, 0,0);
2492 * we've got a candidate page to steal...
2494 * m->dirty is up to date courtesy of the
2495 * preceding check for m->reference... if
2496 * we get here, then m->reference had to be
2497 * FALSE (or possibly "reactivate_limit" was
2498 * exceeded), but in either case we called
2499 * pmap_get_refmod() and updated both
2500 * m->reference and m->dirty
2502 * if it's dirty or precious we need to
2503 * see if the target queue is throtttled
2504 * it if is, we need to skip over it by moving it back
2505 * to the end of the inactive queue
2508 inactive_throttled
= FALSE
;
2510 if (m
->dirty
|| m
->precious
) {
2511 if (object
->internal
) {
2512 if (VM_PAGE_Q_THROTTLED(iq
))
2513 inactive_throttled
= TRUE
;
2514 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2515 inactive_throttled
= TRUE
;
2519 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) &&
2520 object
->internal
&& m
->dirty
&&
2521 (object
->purgable
== VM_PURGABLE_DENY
||
2522 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
2523 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
2524 queue_enter(&vm_page_queue_throttled
, m
,
2526 m
->throttled
= TRUE
;
2527 vm_page_throttled_count
++;
2529 vm_pageout_scan_reclaimed_throttled
++;
2531 goto done_with_inactivepage
;
2533 if (inactive_throttled
== TRUE
) {
2535 if (object
->internal
== FALSE
) {
2537 * we need to break up the following potential deadlock case...
2538 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2539 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2540 * c) Most of the pages in the inactive queue belong to this file.
2542 * we are potentially in this deadlock because...
2543 * a) the external pageout queue is throttled
2544 * b) we're done with the active queue and moved on to the inactive queue
2545 * c) we've got a dirty external page
2547 * since we don't know the reason for the external pageout queue being throttled we
2548 * must suspect that we are deadlocked, so move the current page onto the active queue
2549 * in an effort to cause a page from the active queue to 'age' to the inactive queue
2551 * if we don't have jetsam configured (i.e. we have a dynamic pager), set
2552 * 'force_anonymous' to TRUE to cause us to grab a page from the cleaned/anonymous
2553 * pool the next time we select a victim page... if we can make enough new free pages,
2554 * the deadlock will break, the external pageout queue will empty and it will no longer
2557 * if we have jestam configured, keep a count of the pages reactivated this way so
2558 * that we can try to find clean pages in the active/inactive queues before
2559 * deciding to jetsam a process
2561 vm_pageout_scan_inactive_throttled_external
++;
2563 queue_enter(&vm_page_queue_active
, m
, vm_page_t
, pageq
);
2565 vm_page_active_count
++;
2566 if (m
->object
->internal
) {
2567 vm_page_pageable_internal_count
++;
2569 vm_page_pageable_external_count
++;
2572 vm_pageout_adjust_io_throttles(iq
, eq
, FALSE
);
2574 #if CONFIG_MEMORYSTATUS && CONFIG_JETSAM
2575 vm_pageout_inactive_external_forced_reactivate_limit
--;
2577 if (vm_pageout_inactive_external_forced_reactivate_limit
<= 0) {
2578 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2580 * Possible deadlock scenario so request jetsam action
2583 vm_object_unlock(object
);
2584 object
= VM_OBJECT_NULL
;
2585 vm_page_unlock_queues();
2587 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_START
,
2588 vm_page_active_count
, vm_page_inactive_count
, vm_page_free_count
, vm_page_free_count
);
2590 /* Kill first suitable process */
2591 if (memorystatus_kill_on_VM_page_shortage(FALSE
) == FALSE
) {
2592 panic("vm_pageout_scan: Jetsam request failed\n");
2595 VM_DEBUG_EVENT(vm_pageout_jetsam
, VM_PAGEOUT_JETSAM
, DBG_FUNC_END
, 0, 0, 0, 0);
2597 vm_pageout_inactive_external_forced_jetsam_count
++;
2598 vm_page_lock_queues();
2601 #else /* CONFIG_MEMORYSTATUS && CONFIG_JETSAM */
2602 force_anonymous
= TRUE
;
2604 goto done_with_inactivepage
;
2606 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2607 page_prev_state
= PAGE_STATE_INACTIVE
;
2609 vm_pageout_scan_inactive_throttled_internal
++;
2616 * we've got a page that we can steal...
2617 * eliminate all mappings and make sure
2618 * we have the up-to-date modified state
2620 * if we need to do a pmap_disconnect then we
2621 * need to re-evaluate m->dirty since the pmap_disconnect
2622 * provides the true state atomically... the
2623 * page was still mapped up to the pmap_disconnect
2624 * and may have been dirtied at the last microsecond
2626 * Note that if 'pmapped' is FALSE then the page is not
2627 * and has not been in any map, so there is no point calling
2628 * pmap_disconnect(). m->dirty could have been set in anticipation
2629 * of likely usage of the page.
2631 if (m
->pmapped
== TRUE
) {
2633 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
|| object
->internal
== FALSE
) {
2634 refmod_state
= pmap_disconnect_options(m
->phys_page
, 0, NULL
);
2636 refmod_state
= pmap_disconnect_options(m
->phys_page
, PMAP_OPTIONS_COMPRESSOR
, NULL
);
2639 if (refmod_state
& VM_MEM_MODIFIED
) {
2640 SET_PAGE_DIRTY(m
, FALSE
);
2644 * reset our count of pages that have been reclaimed
2645 * since the last page was 'stolen'
2647 inactive_reclaim_run
= 0;
2650 * If it's clean and not precious, we can free the page.
2652 if (!m
->dirty
&& !m
->precious
) {
2654 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2655 vm_pageout_speculative_clean
++;
2657 if (page_prev_state
== PAGE_STATE_ANONYMOUS
)
2658 vm_pageout_inactive_anonymous
++;
2659 else if (page_prev_state
== PAGE_STATE_CLEAN
)
2660 vm_pageout_cleaned_reclaimed
++;
2663 /* page on clean queue used to be dirty; we should increment the vm_stat pageout count here */
2664 VM_STAT_INCR(pageouts
);
2665 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
2667 vm_pageout_inactive_clean
++;
2671 * OK, at this point we have found a page we are going to free.
2677 * The page may have been dirtied since the last check
2678 * for a throttled target queue (which may have been skipped
2679 * if the page was clean then). With the dirty page
2680 * disconnected here, we can make one final check.
2682 if (object
->internal
) {
2683 if (VM_PAGE_Q_THROTTLED(iq
))
2684 inactive_throttled
= TRUE
;
2685 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2686 inactive_throttled
= TRUE
;
2689 if (inactive_throttled
== TRUE
)
2690 goto throttle_inactive
;
2692 #if VM_PRESSURE_EVENTS
2693 vm_pressure_response();
2694 #endif /* VM_PRESSURE_EVENTS */
2697 * do NOT set the pageout bit!
2698 * sure, we might need free pages, but this page is going to take time to become free
2699 * anyway, so we may as well put it on the clean queue first and take it from there later
2700 * if necessary. that way, we'll ensure we don't free up too much. -mj
2702 vm_pageout_cluster(m
, FALSE
);
2704 if (page_prev_state
== PAGE_STATE_ANONYMOUS
)
2705 vm_pageout_inactive_anonymous
++;
2706 if (object
->internal
)
2707 vm_pageout_inactive_dirty_internal
++;
2709 vm_pageout_inactive_dirty_external
++;
2712 done_with_inactivepage
:
2713 inactive_burst_count
= 0;
2715 if (delayed_unlock
++ > delayed_unlock_limit
|| try_failed
== TRUE
) {
2717 if (object
!= NULL
) {
2718 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2719 vm_object_unlock(object
);
2723 vm_page_unlock_queues();
2725 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
2726 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 4);
2728 vm_page_free_list(local_freeq
, TRUE
);
2730 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
2731 vm_page_free_count
, local_freed
, 0, 4);
2735 vm_page_lock_queues();
2737 lck_mtx_yield(&vm_page_queue_lock
);
2741 vm_pageout_considered_page
++;
2743 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
2744 vm_consider_waking_compactor_swapper();
2747 * back to top of pageout scan loop
2753 int vm_page_free_count_init
;
2756 vm_page_free_reserve(
2759 int free_after_reserve
;
2761 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
2763 if ((vm_page_free_reserved
+ pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
) >= (VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
))
2764 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
+ COMPRESSOR_FREE_RESERVED_LIMIT
;
2766 vm_page_free_reserved
+= (pages
+ COMPRESSOR_FREE_RESERVED_LIMIT
);
2769 if ((vm_page_free_reserved
+ pages
) >= VM_PAGE_FREE_RESERVED_LIMIT
)
2770 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
2772 vm_page_free_reserved
+= pages
;
2774 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
2776 vm_page_free_min
= vm_page_free_reserved
+
2777 VM_PAGE_FREE_MIN(free_after_reserve
);
2779 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
)
2780 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
2782 vm_page_free_target
= vm_page_free_reserved
+
2783 VM_PAGE_FREE_TARGET(free_after_reserve
);
2785 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
)
2786 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
2788 if (vm_page_free_target
< vm_page_free_min
+ 5)
2789 vm_page_free_target
= vm_page_free_min
+ 5;
2791 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 3);
2792 vm_page_creation_throttle
= vm_page_free_target
* 3;
2796 * vm_pageout is the high level pageout daemon.
2800 vm_pageout_continue(void)
2802 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
2803 vm_pageout_scan_event_counter
++;
2807 * we hold both the vm_page_queue_free_lock
2808 * and the vm_page_queues_lock at this point
2810 assert(vm_page_free_wanted
== 0);
2811 assert(vm_page_free_wanted_privileged
== 0);
2812 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
2814 lck_mtx_unlock(&vm_page_queue_free_lock
);
2815 vm_page_unlock_queues();
2817 counter(c_vm_pageout_block
++);
2818 thread_block((thread_continue_t
)vm_pageout_continue
);
2823 #ifdef FAKE_DEADLOCK
2825 #define FAKE_COUNT 5000
2827 int internal_count
= 0;
2828 int fake_deadlock
= 0;
2833 vm_pageout_iothread_continue(struct vm_pageout_queue
*q
)
2837 vm_object_offset_t offset
;
2838 memory_object_t pager
;
2839 thread_t self
= current_thread();
2841 if ((vm_pageout_internal_iothread
!= THREAD_NULL
)
2842 && (self
== vm_pageout_external_iothread
)
2843 && (self
->options
& TH_OPT_VMPRIV
))
2844 self
->options
&= ~TH_OPT_VMPRIV
;
2846 vm_page_lockspin_queues();
2848 while ( !queue_empty(&q
->pgo_pending
) ) {
2851 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
2852 if (m
->object
->object_slid
) {
2853 panic("slid page %p not allowed on this path\n", m
);
2856 m
->pageout_queue
= FALSE
;
2857 m
->pageq
.next
= NULL
;
2858 m
->pageq
.prev
= NULL
;
2861 * grab a snapshot of the object and offset this
2862 * page is tabled in so that we can relookup this
2863 * page after we've taken the object lock - these
2864 * fields are stable while we hold the page queues lock
2865 * but as soon as we drop it, there is nothing to keep
2866 * this page in this object... we hold an activity_in_progress
2867 * on this object which will keep it from terminating
2872 vm_page_unlock_queues();
2874 #ifdef FAKE_DEADLOCK
2875 if (q
== &vm_pageout_queue_internal
) {
2881 if ((internal_count
== FAKE_COUNT
)) {
2883 pg_count
= vm_page_free_count
+ vm_page_free_reserved
;
2885 if (kmem_alloc(kernel_map
, &addr
, PAGE_SIZE
* pg_count
) == KERN_SUCCESS
) {
2886 kmem_free(kernel_map
, addr
, PAGE_SIZE
* pg_count
);
2893 vm_object_lock(object
);
2895 m
= vm_page_lookup(object
, offset
);
2898 m
->busy
|| m
->cleaning
|| m
->pageout_queue
|| !m
->laundry
) {
2900 * it's either the same page that someone else has
2901 * started cleaning (or it's finished cleaning or
2902 * been put back on the pageout queue), or
2903 * the page has been freed or we have found a
2904 * new page at this offset... in all of these cases
2905 * we merely need to release the activity_in_progress
2906 * we took when we put the page on the pageout queue
2908 vm_object_activity_end(object
);
2909 vm_object_unlock(object
);
2911 vm_page_lockspin_queues();
2914 if (!object
->pager_initialized
) {
2917 * If there is no memory object for the page, create
2918 * one and hand it to the default pager.
2921 if (!object
->pager_initialized
)
2922 vm_object_collapse(object
,
2923 (vm_object_offset_t
) 0,
2925 if (!object
->pager_initialized
)
2926 vm_object_pager_create(object
);
2927 if (!object
->pager_initialized
) {
2929 * Still no pager for the object.
2930 * Reactivate the page.
2932 * Should only happen if there is no
2937 vm_page_lockspin_queues();
2939 vm_pageout_throttle_up(m
);
2940 vm_page_activate(m
);
2941 vm_pageout_dirty_no_pager
++;
2943 vm_page_unlock_queues();
2946 * And we are done with it.
2948 vm_object_activity_end(object
);
2949 vm_object_unlock(object
);
2951 vm_page_lockspin_queues();
2955 pager
= object
->pager
;
2957 if (pager
== MEMORY_OBJECT_NULL
) {
2959 * This pager has been destroyed by either
2960 * memory_object_destroy or vm_object_destroy, and
2961 * so there is nowhere for the page to go.
2965 * Just free the page... VM_PAGE_FREE takes
2966 * care of cleaning up all the state...
2967 * including doing the vm_pageout_throttle_up
2971 vm_page_lockspin_queues();
2973 vm_pageout_throttle_up(m
);
2974 vm_page_activate(m
);
2976 vm_page_unlock_queues();
2979 * And we are done with it.
2982 vm_object_activity_end(object
);
2983 vm_object_unlock(object
);
2985 vm_page_lockspin_queues();
2990 * we don't hold the page queue lock
2991 * so this check isn't safe to make
2996 * give back the activity_in_progress reference we
2997 * took when we queued up this page and replace it
2998 * it with a paging_in_progress reference that will
2999 * also hold the paging offset from changing and
3000 * prevent the object from terminating
3002 vm_object_activity_end(object
);
3003 vm_object_paging_begin(object
);
3004 vm_object_unlock(object
);
3007 * Send the data to the pager.
3008 * any pageout clustering happens there
3010 memory_object_data_return(pager
,
3011 m
->offset
+ object
->paging_offset
,
3019 vm_object_lock(object
);
3020 vm_object_paging_end(object
);
3021 vm_object_unlock(object
);
3023 vm_pageout_io_throttle();
3025 vm_page_lockspin_queues();
3027 q
->pgo_busy
= FALSE
;
3030 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3031 vm_page_unlock_queues();
3033 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_continue
, (void *) q
);
3039 vm_pageout_iothread_external_continue(struct vm_pageout_queue
*q
)
3043 vm_object_offset_t offset
;
3044 memory_object_t pager
;
3047 if (vm_pageout_internal_iothread
!= THREAD_NULL
)
3048 current_thread()->options
&= ~TH_OPT_VMPRIV
;
3050 vm_page_lockspin_queues();
3052 while ( !queue_empty(&q
->pgo_pending
) ) {
3055 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
3056 if (m
->object
->object_slid
) {
3057 panic("slid page %p not allowed on this path\n", m
);
3060 m
->pageout_queue
= FALSE
;
3061 m
->pageq
.next
= NULL
;
3062 m
->pageq
.prev
= NULL
;
3065 * grab a snapshot of the object and offset this
3066 * page is tabled in so that we can relookup this
3067 * page after we've taken the object lock - these
3068 * fields are stable while we hold the page queues lock
3069 * but as soon as we drop it, there is nothing to keep
3070 * this page in this object... we hold an activity_in_progress
3071 * on this object which will keep it from terminating
3076 vm_page_unlock_queues();
3078 vm_object_lock(object
);
3080 m
= vm_page_lookup(object
, offset
);
3083 m
->busy
|| m
->cleaning
|| m
->pageout_queue
|| !m
->laundry
) {
3085 * it's either the same page that someone else has
3086 * started cleaning (or it's finished cleaning or
3087 * been put back on the pageout queue), or
3088 * the page has been freed or we have found a
3089 * new page at this offset... in all of these cases
3090 * we merely need to release the activity_in_progress
3091 * we took when we put the page on the pageout queue
3093 vm_object_activity_end(object
);
3094 vm_object_unlock(object
);
3096 vm_page_lockspin_queues();
3099 pager
= object
->pager
;
3101 if (pager
== MEMORY_OBJECT_NULL
) {
3103 * This pager has been destroyed by either
3104 * memory_object_destroy or vm_object_destroy, and
3105 * so there is nowhere for the page to go.
3109 * Just free the page... VM_PAGE_FREE takes
3110 * care of cleaning up all the state...
3111 * including doing the vm_pageout_throttle_up
3115 vm_page_lockspin_queues();
3117 vm_pageout_throttle_up(m
);
3118 vm_page_activate(m
);
3120 vm_page_unlock_queues();
3123 * And we are done with it.
3126 vm_object_activity_end(object
);
3127 vm_object_unlock(object
);
3129 vm_page_lockspin_queues();
3134 * we don't hold the page queue lock
3135 * so this check isn't safe to make
3140 * give back the activity_in_progress reference we
3141 * took when we queued up this page and replace it
3142 * it with a paging_in_progress reference that will
3143 * also hold the paging offset from changing and
3144 * prevent the object from terminating
3146 vm_object_activity_end(object
);
3147 vm_object_paging_begin(object
);
3148 vm_object_unlock(object
);
3151 * Send the data to the pager.
3152 * any pageout clustering happens there
3154 memory_object_data_return(pager
,
3155 m
->offset
+ object
->paging_offset
,
3163 vm_object_lock(object
);
3164 vm_object_paging_end(object
);
3165 vm_object_unlock(object
);
3167 vm_pageout_io_throttle();
3169 vm_page_lockspin_queues();
3171 q
->pgo_busy
= FALSE
;
3174 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3175 vm_page_unlock_queues();
3177 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_external_continue
, (void *) q
);
3182 uint32_t vm_compressor_failed
;
3185 vm_pageout_iothread_internal_continue(struct cq
*cq
)
3187 struct vm_pageout_queue
*q
;
3190 memory_object_t pager
;
3191 boolean_t pgo_draining
;
3194 vm_page_t local_freeq
= NULL
;
3195 int local_freed
= 0;
3196 int local_batch_size
;
3197 kern_return_t retval
;
3200 KERNEL_DEBUG(0xe040000c | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3203 local_batch_size
= q
->pgo_maxlaundry
/ (vm_compressor_thread_count
* 4);
3210 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3212 vm_page_lock_queues();
3214 KERNEL_DEBUG(0xe0400014 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3216 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3218 while ( !queue_empty(&q
->pgo_pending
) && local_cnt
< local_batch_size
) {
3220 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
3224 m
->pageout_queue
= FALSE
;
3225 m
->pageq
.prev
= NULL
;
3227 m
->pageq
.next
= (queue_entry_t
)local_q
;
3231 if (local_q
== NULL
)
3236 if ((pgo_draining
= q
->pgo_draining
) == FALSE
)
3237 vm_pageout_throttle_up_batch(q
, local_cnt
);
3239 vm_page_unlock_queues();
3241 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3246 local_q
= (vm_page_t
)m
->pageq
.next
;
3247 m
->pageq
.next
= NULL
;
3249 if (m
->object
->object_slid
) {
3250 panic("slid page %p not allowed on this path\n", m
);
3254 pager
= object
->pager
;
3256 if (!object
->pager_initialized
|| pager
== MEMORY_OBJECT_NULL
) {
3258 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_START
, object
, pager
, 0, 0, 0);
3260 vm_object_lock(object
);
3263 * If there is no memory object for the page, create
3264 * one and hand it to the compression pager.
3267 if (!object
->pager_initialized
)
3268 vm_object_collapse(object
, (vm_object_offset_t
) 0, TRUE
);
3269 if (!object
->pager_initialized
)
3270 vm_object_compressor_pager_create(object
);
3272 if (!object
->pager_initialized
) {
3274 * Still no pager for the object.
3275 * Reactivate the page.
3277 * Should only happen if there is no
3282 PAGE_WAKEUP_DONE(m
);
3284 vm_page_lockspin_queues();
3285 vm_page_activate(m
);
3286 vm_pageout_dirty_no_pager
++;
3287 vm_page_unlock_queues();
3290 * And we are done with it.
3292 vm_object_activity_end(object
);
3293 vm_object_unlock(object
);
3297 pager
= object
->pager
;
3299 if (pager
== MEMORY_OBJECT_NULL
) {
3301 * This pager has been destroyed by either
3302 * memory_object_destroy or vm_object_destroy, and
3303 * so there is nowhere for the page to go.
3307 * Just free the page... VM_PAGE_FREE takes
3308 * care of cleaning up all the state...
3309 * including doing the vm_pageout_throttle_up
3314 PAGE_WAKEUP_DONE(m
);
3316 vm_page_lockspin_queues();
3317 vm_page_activate(m
);
3318 vm_page_unlock_queues();
3321 * And we are done with it.
3324 vm_object_activity_end(object
);
3325 vm_object_unlock(object
);
3329 vm_object_unlock(object
);
3331 KERNEL_DEBUG(0xe0400010 | DBG_FUNC_END
, object
, pager
, 0, 0, 0);
3333 while (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3334 kern_return_t wait_result
;
3335 int need_wakeup
= 0;
3338 vm_page_free_list(local_freeq
, TRUE
);
3345 lck_mtx_lock_spin(&vm_page_queue_free_lock
);
3347 if (vm_page_free_count
< (vm_page_free_reserved
- COMPRESSOR_FREE_RESERVED_LIMIT
)) {
3349 if (vm_page_free_wanted_privileged
++ == 0)
3351 wait_result
= assert_wait((event_t
)&vm_page_free_wanted_privileged
, THREAD_UNINT
);
3353 lck_mtx_unlock(&vm_page_queue_free_lock
);
3356 thread_wakeup((event_t
)&vm_page_free_wanted
);
3358 if (wait_result
== THREAD_WAITING
)
3359 thread_block(THREAD_CONTINUE_NULL
);
3361 lck_mtx_unlock(&vm_page_queue_free_lock
);
3363 retval
= vm_compressor_pager_put(pager
, m
->offset
+ object
->paging_offset
, m
->phys_page
, &cq
->current_chead
, cq
->scratch_buf
);
3365 vm_object_lock(object
);
3369 if (retval
== KERN_SUCCESS
) {
3371 vm_page_compressions_failing
= FALSE
;
3373 VM_STAT_INCR(compressions
);
3376 vm_page_remove(m
, TRUE
);
3377 vm_object_activity_end(object
);
3378 vm_object_unlock(object
);
3380 m
->pageq
.next
= (queue_entry_t
)local_freeq
;
3385 PAGE_WAKEUP_DONE(m
);
3387 vm_page_lockspin_queues();
3389 vm_page_activate(m
);
3390 vm_compressor_failed
++;
3392 vm_page_compressions_failing
= TRUE
;
3394 vm_page_unlock_queues();
3396 vm_object_activity_end(object
);
3397 vm_object_unlock(object
);
3401 vm_page_free_list(local_freeq
, TRUE
);
3406 if (pgo_draining
== TRUE
) {
3407 vm_page_lockspin_queues();
3408 vm_pageout_throttle_up_batch(q
, local_cnt
);
3409 vm_page_unlock_queues();
3412 KERNEL_DEBUG(0xe040000c | DBG_FUNC_START
, 0, 0, 0, 0, 0);
3415 * queue lock is held and our q is empty
3417 q
->pgo_busy
= FALSE
;
3420 assert_wait((event_t
) &q
->pgo_pending
, THREAD_UNINT
);
3421 vm_page_unlock_queues();
3423 KERNEL_DEBUG(0xe0400018 | DBG_FUNC_END
, 0, 0, 0, 0, 0);
3425 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_internal_continue
, (void *) cq
);
3432 vm_pageout_adjust_io_throttles(struct vm_pageout_queue
*iq
, struct vm_pageout_queue
*eq
, boolean_t req_lowpriority
)
3435 boolean_t set_iq
= FALSE
;
3436 boolean_t set_eq
= FALSE
;
3438 if (hibernate_cleaning_in_progress
== TRUE
)
3439 req_lowpriority
= FALSE
;
3441 if ((DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
) && iq
->pgo_inited
== TRUE
&& iq
->pgo_lowpriority
!= req_lowpriority
)
3444 if (eq
->pgo_inited
== TRUE
&& eq
->pgo_lowpriority
!= req_lowpriority
)
3447 if (set_iq
== TRUE
|| set_eq
== TRUE
) {
3449 vm_page_unlock_queues();
3451 if (req_lowpriority
== TRUE
) {
3452 policy
= THROTTLE_LEVEL_PAGEOUT_THROTTLED
;
3453 DTRACE_VM(laundrythrottle
);
3455 policy
= THROTTLE_LEVEL_PAGEOUT_UNTHROTTLED
;
3456 DTRACE_VM(laundryunthrottle
);
3458 if (set_iq
== TRUE
) {
3459 proc_set_task_policy_thread(kernel_task
, iq
->pgo_tid
, TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
3461 iq
->pgo_lowpriority
= req_lowpriority
;
3463 if (set_eq
== TRUE
) {
3464 proc_set_task_policy_thread(kernel_task
, eq
->pgo_tid
, TASK_POLICY_EXTERNAL
, TASK_POLICY_IO
, policy
);
3466 eq
->pgo_lowpriority
= req_lowpriority
;
3468 vm_page_lock_queues();
3474 vm_pageout_iothread_external(void)
3476 thread_t self
= current_thread();
3478 self
->options
|= TH_OPT_VMPRIV
;
3480 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
3482 proc_set_task_policy_thread(kernel_task
, self
->thread_id
, TASK_POLICY_EXTERNAL
,
3483 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
3485 vm_page_lock_queues();
3487 vm_pageout_queue_external
.pgo_tid
= self
->thread_id
;
3488 vm_pageout_queue_external
.pgo_lowpriority
= TRUE
;
3489 vm_pageout_queue_external
.pgo_inited
= TRUE
;
3491 vm_page_unlock_queues();
3493 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
3494 vm_pageout_iothread_external_continue(&vm_pageout_queue_external
);
3496 vm_pageout_iothread_continue(&vm_pageout_queue_external
);
3503 vm_pageout_iothread_internal(struct cq
*cq
)
3505 thread_t self
= current_thread();
3507 self
->options
|= TH_OPT_VMPRIV
;
3509 if (DEFAULT_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_IS_ACTIVE
) {
3510 DTRACE_VM2(laundrythrottle
, int, 1, (uint64_t *), NULL
);
3512 proc_set_task_policy_thread(kernel_task
, self
->thread_id
, TASK_POLICY_EXTERNAL
,
3513 TASK_POLICY_IO
, THROTTLE_LEVEL_PAGEOUT_THROTTLED
);
3515 vm_page_lock_queues();
3517 vm_pageout_queue_internal
.pgo_tid
= self
->thread_id
;
3518 vm_pageout_queue_internal
.pgo_lowpriority
= TRUE
;
3519 vm_pageout_queue_internal
.pgo_inited
= TRUE
;
3521 vm_page_unlock_queues();
3523 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
3524 cq
->q
= &vm_pageout_queue_internal
;
3525 cq
->current_chead
= NULL
;
3526 cq
->scratch_buf
= kalloc(COMPRESSOR_SCRATCH_BUF_SIZE
);
3528 vm_pageout_iothread_internal_continue(cq
);
3530 vm_pageout_iothread_continue(&vm_pageout_queue_internal
);
3536 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
3538 if (OSCompareAndSwapPtr(NULL
, func
, (void * volatile *) &consider_buffer_cache_collect
)) {
3539 return KERN_SUCCESS
;
3541 return KERN_FAILURE
; /* Already set */
3546 extern boolean_t memorystatus_manual_testing_on
;
3547 extern unsigned int memorystatus_level
;
3551 #if VM_PRESSURE_EVENTS
3554 vm_pressure_response(void)
3558 vm_pressure_level_t old_level
= kVMPressureNormal
;
3561 uint64_t available_memory
= (((uint64_t) AVAILABLE_NON_COMPRESSED_MEMORY
) * 100);
3563 memorystatus_level
= (unsigned int) (available_memory
/ atop_64(max_mem
));
3565 if (memorystatus_manual_testing_on
) {
3569 old_level
= memorystatus_vm_pressure_level
;
3571 switch (memorystatus_vm_pressure_level
) {
3573 case kVMPressureNormal
:
3575 if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3576 new_level
= kVMPressureCritical
;
3577 } else if (VM_PRESSURE_NORMAL_TO_WARNING()) {
3578 new_level
= kVMPressureWarning
;
3583 case kVMPressureWarning
:
3584 case kVMPressureUrgent
:
3586 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3587 new_level
= kVMPressureNormal
;
3588 } else if (VM_PRESSURE_WARNING_TO_CRITICAL()) {
3589 new_level
= kVMPressureCritical
;
3594 case kVMPressureCritical
:
3596 if (VM_PRESSURE_WARNING_TO_NORMAL()) {
3597 new_level
= kVMPressureNormal
;
3598 } else if (VM_PRESSURE_CRITICAL_TO_WARNING()) {
3599 new_level
= kVMPressureWarning
;
3608 if (new_level
!= -1) {
3609 memorystatus_vm_pressure_level
= (vm_pressure_level_t
) new_level
;
3611 if (old_level
!= new_level
) {
3612 if (vm_pressure_thread_running
== FALSE
) {
3613 thread_wakeup(&vm_pressure_thread
);
3615 thread_wakeup(&vm_pressure_changed
);
3620 #endif /* VM_PRESSURE_EVENTS */
3623 mach_vm_pressure_level_monitor(__unused boolean_t wait_for_pressure
, __unused
unsigned int *pressure_level
) {
3625 #if !VM_PRESSURE_EVENTS
3627 return KERN_FAILURE
;
3629 #else /* VM_PRESSURE_EVENTS */
3631 kern_return_t kr
= KERN_SUCCESS
;
3633 if (pressure_level
!= NULL
) {
3635 vm_pressure_level_t old_level
= memorystatus_vm_pressure_level
;
3637 if (wait_for_pressure
== TRUE
) {
3638 wait_result_t wr
= 0;
3640 while (old_level
== *pressure_level
) {
3641 wr
= assert_wait((event_t
) &vm_pressure_changed
,
3642 THREAD_INTERRUPTIBLE
);
3643 if (wr
== THREAD_WAITING
) {
3644 wr
= thread_block(THREAD_CONTINUE_NULL
);
3646 if (wr
== THREAD_INTERRUPTED
) {
3647 return KERN_ABORTED
;
3649 if (wr
== THREAD_AWAKENED
) {
3651 old_level
= memorystatus_vm_pressure_level
;
3653 if (old_level
!= *pressure_level
) {
3660 *pressure_level
= old_level
;
3663 kr
= KERN_INVALID_ARGUMENT
;
3667 #endif /* VM_PRESSURE_EVENTS */
3670 #if VM_PRESSURE_EVENTS
3672 vm_pressure_thread(void) {
3673 static boolean_t set_up_thread
= FALSE
;
3675 if (set_up_thread
) {
3676 vm_pressure_thread_running
= TRUE
;
3677 consider_vm_pressure_events();
3678 vm_pressure_thread_running
= FALSE
;
3681 set_up_thread
= TRUE
;
3682 assert_wait((event_t
) &vm_pressure_thread
, THREAD_UNINT
);
3683 thread_block((thread_continue_t
)vm_pressure_thread
);
3685 #endif /* VM_PRESSURE_EVENTS */
3688 uint32_t vm_pageout_considered_page_last
= 0;
3691 * called once per-second via "compute_averages"
3694 compute_pageout_gc_throttle()
3696 if (vm_pageout_considered_page
!= vm_pageout_considered_page_last
) {
3698 vm_pageout_considered_page_last
= vm_pageout_considered_page
;
3700 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
3706 vm_pageout_garbage_collect(int collect
)
3710 boolean_t buf_large_zfree
= FALSE
;
3711 boolean_t first_try
= TRUE
;
3715 consider_machine_collect();
3718 if (consider_buffer_cache_collect
!= NULL
) {
3719 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
3721 if (first_try
== TRUE
|| buf_large_zfree
== TRUE
) {
3723 * consider_zone_gc should be last, because the other operations
3724 * might return memory to zones.
3726 consider_zone_gc(buf_large_zfree
);
3730 } while (buf_large_zfree
== TRUE
&& vm_page_free_count
< vm_page_free_target
);
3732 consider_machine_adjust();
3734 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
3736 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
3741 #if VM_PAGE_BUCKETS_CHECK
3742 #if VM_PAGE_FAKE_BUCKETS
3743 extern vm_map_offset_t vm_page_fake_buckets_start
, vm_page_fake_buckets_end
;
3744 #endif /* VM_PAGE_FAKE_BUCKETS */
3745 #endif /* VM_PAGE_BUCKETS_CHECK */
3750 thread_t self
= current_thread();
3752 kern_return_t result
;
3756 * Set thread privileges.
3760 self
->priority
= BASEPRI_PREEMPT
- 1;
3761 set_sched_pri(self
, self
->priority
);
3762 thread_unlock(self
);
3764 if (!self
->reserved_stack
)
3765 self
->reserved_stack
= self
->kernel_stack
;
3770 * Initialize some paging parameters.
3773 if (vm_pageout_swap_wait
== 0)
3774 vm_pageout_swap_wait
= VM_PAGEOUT_SWAP_WAIT
;
3776 if (vm_pageout_idle_wait
== 0)
3777 vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
3779 if (vm_pageout_burst_wait
== 0)
3780 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
3782 if (vm_pageout_empty_wait
== 0)
3783 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
3785 if (vm_pageout_deadlock_wait
== 0)
3786 vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
3788 if (vm_pageout_deadlock_relief
== 0)
3789 vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
3791 if (vm_pageout_inactive_relief
== 0)
3792 vm_pageout_inactive_relief
= VM_PAGEOUT_INACTIVE_RELIEF
;
3794 if (vm_pageout_burst_active_throttle
== 0)
3795 vm_pageout_burst_active_throttle
= VM_PAGEOUT_BURST_ACTIVE_THROTTLE
;
3797 if (vm_pageout_burst_inactive_throttle
== 0)
3798 vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
3801 vm_page_filecache_min
= (uint32_t) (max_mem
/ PAGE_SIZE
) / 20;
3802 if (vm_page_filecache_min
< VM_PAGE_FILECACHE_MIN
)
3803 vm_page_filecache_min
= VM_PAGE_FILECACHE_MIN
;
3807 * Set kernel task to low backing store privileged
3810 task_lock(kernel_task
);
3811 kernel_task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
3812 task_unlock(kernel_task
);
3814 vm_page_free_count_init
= vm_page_free_count
;
3817 * even if we've already called vm_page_free_reserve
3818 * call it again here to insure that the targets are
3819 * accurately calculated (it uses vm_page_free_count_init)
3820 * calling it with an arg of 0 will not change the reserve
3821 * but will re-calculate free_min and free_target
3823 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
3824 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
3826 vm_page_free_reserve(0);
3829 queue_init(&vm_pageout_queue_external
.pgo_pending
);
3830 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
3831 vm_pageout_queue_external
.pgo_laundry
= 0;
3832 vm_pageout_queue_external
.pgo_idle
= FALSE
;
3833 vm_pageout_queue_external
.pgo_busy
= FALSE
;
3834 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
3835 vm_pageout_queue_external
.pgo_draining
= FALSE
;
3836 vm_pageout_queue_external
.pgo_lowpriority
= FALSE
;
3837 vm_pageout_queue_external
.pgo_tid
= -1;
3838 vm_pageout_queue_external
.pgo_inited
= FALSE
;
3841 queue_init(&vm_pageout_queue_internal
.pgo_pending
);
3842 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
3843 vm_pageout_queue_internal
.pgo_laundry
= 0;
3844 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
3845 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
3846 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
3847 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
3848 vm_pageout_queue_internal
.pgo_lowpriority
= FALSE
;
3849 vm_pageout_queue_internal
.pgo_tid
= -1;
3850 vm_pageout_queue_internal
.pgo_inited
= FALSE
;
3852 /* internal pageout thread started when default pager registered first time */
3853 /* external pageout and garbage collection threads started here */
3855 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
3856 BASEPRI_PREEMPT
- 1,
3857 &vm_pageout_external_iothread
);
3858 if (result
!= KERN_SUCCESS
)
3859 panic("vm_pageout_iothread_external: create failed");
3861 thread_deallocate(vm_pageout_external_iothread
);
3863 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
3866 if (result
!= KERN_SUCCESS
)
3867 panic("vm_pageout_garbage_collect: create failed");
3869 thread_deallocate(thread
);
3871 #if VM_PRESSURE_EVENTS
3872 result
= kernel_thread_start_priority((thread_continue_t
)vm_pressure_thread
, NULL
,
3876 if (result
!= KERN_SUCCESS
)
3877 panic("vm_pressure_thread: create failed");
3879 thread_deallocate(thread
);
3882 vm_object_reaper_init();
3884 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
)
3885 vm_compressor_pager_init();
3887 #if VM_PAGE_BUCKETS_CHECK
3888 #if VM_PAGE_FAKE_BUCKETS
3889 printf("**** DEBUG: protecting fake buckets [0x%llx:0x%llx]\n",
3890 vm_page_fake_buckets_start
, vm_page_fake_buckets_end
);
3891 pmap_protect(kernel_pmap
,
3892 vm_page_fake_buckets_start
,
3893 vm_page_fake_buckets_end
,
3895 // *(char *) vm_page_fake_buckets_start = 'x'; /* panic! */
3896 #endif /* VM_PAGE_FAKE_BUCKETS */
3897 #endif /* VM_PAGE_BUCKETS_CHECK */
3899 vm_pageout_continue();
3904 * The vm_pageout_continue() call above never returns, so the code below is never
3905 * executed. We take advantage of this to declare several DTrace VM related probe
3906 * points that our kernel doesn't have an analog for. These are probe points that
3907 * exist in Solaris and are in the DTrace documentation, so people may have written
3908 * scripts that use them. Declaring the probe points here means their scripts will
3909 * compile and execute which we want for portability of the scripts, but since this
3910 * section of code is never reached, the probe points will simply never fire. Yes,
3911 * this is basically a hack. The problem is the DTrace probe points were chosen with
3912 * Solaris specific VM events in mind, not portability to different VM implementations.
3915 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
3916 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
3917 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
3918 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
3919 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
3920 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
3921 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
3927 #define MAX_COMRPESSOR_THREAD_COUNT 8
3929 struct cq ciq
[MAX_COMRPESSOR_THREAD_COUNT
];
3931 int vm_compressor_thread_count
= 2;
3934 vm_pageout_internal_start(void)
3936 kern_return_t result
;
3938 host_basic_info_data_t hinfo
;
3940 if (COMPRESSED_PAGER_IS_ACTIVE
|| DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE
) {
3941 mach_msg_type_number_t count
= HOST_BASIC_INFO_COUNT
;
3943 host_info((host_t
)BSD_HOST
, HOST_BASIC_INFO
, (host_info_t
)&hinfo
, &count
);
3945 assert(hinfo
.max_cpus
> 0);
3947 if (vm_compressor_thread_count
>= hinfo
.max_cpus
)
3948 vm_compressor_thread_count
= hinfo
.max_cpus
- 1;
3949 if (vm_compressor_thread_count
<= 0)
3950 vm_compressor_thread_count
= 1;
3951 else if (vm_compressor_thread_count
> MAX_COMRPESSOR_THREAD_COUNT
)
3952 vm_compressor_thread_count
= MAX_COMRPESSOR_THREAD_COUNT
;
3954 vm_pageout_queue_internal
.pgo_maxlaundry
= (vm_compressor_thread_count
* 4) * VM_PAGE_LAUNDRY_MAX
;
3956 vm_compressor_thread_count
= 1;
3957 vm_pageout_queue_internal
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
3960 for (i
= 0; i
< vm_compressor_thread_count
; i
++) {
3962 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
, (void *)&ciq
[i
], BASEPRI_PREEMPT
- 1, &vm_pageout_internal_iothread
);
3963 if (result
== KERN_SUCCESS
)
3964 thread_deallocate(vm_pageout_internal_iothread
);
3973 upl_create(int type
, int flags
, upl_size_t size
)
3976 vm_size_t page_field_size
= 0;
3978 vm_size_t upl_size
= sizeof(struct upl
);
3980 size
= round_page_32(size
);
3982 if (type
& UPL_CREATE_LITE
) {
3983 page_field_size
= (atop(size
) + 7) >> 3;
3984 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
3986 upl_flags
|= UPL_LITE
;
3988 if (type
& UPL_CREATE_INTERNAL
) {
3989 upl_size
+= sizeof(struct upl_page_info
) * atop(size
);
3991 upl_flags
|= UPL_INTERNAL
;
3993 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
3995 if (page_field_size
)
3996 bzero((char *)upl
+ upl_size
, page_field_size
);
3998 upl
->flags
= upl_flags
| flags
;
3999 upl
->src_object
= NULL
;
4000 upl
->kaddr
= (vm_offset_t
)0;
4002 upl
->map_object
= NULL
;
4004 upl
->ext_ref_count
= 0;
4005 upl
->highest_page
= 0;
4007 upl
->vector_upl
= NULL
;
4009 upl
->ubc_alias1
= 0;
4010 upl
->ubc_alias2
= 0;
4012 upl
->upl_creator
= current_thread();
4014 upl
->upl_commit_index
= 0;
4015 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
4020 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
4021 #endif /* UPL_DEBUG */
4027 upl_destroy(upl_t upl
)
4029 int page_field_size
; /* bit field in word size buf */
4032 if (upl
->ext_ref_count
) {
4033 panic("upl(%p) ext_ref_count", upl
);
4037 if ( !(upl
->flags
& UPL_VECTOR
)) {
4040 if (upl
->flags
& UPL_SHADOWED
) {
4041 object
= upl
->map_object
->shadow
;
4043 object
= upl
->map_object
;
4045 vm_object_lock(object
);
4046 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
4047 vm_object_activity_end(object
);
4048 vm_object_collapse(object
, 0, TRUE
);
4049 vm_object_unlock(object
);
4051 #endif /* UPL_DEBUG */
4053 * drop a reference on the map_object whether or
4054 * not a pageout object is inserted
4056 if (upl
->flags
& UPL_SHADOWED
)
4057 vm_object_deallocate(upl
->map_object
);
4059 if (upl
->flags
& UPL_DEVICE_MEMORY
)
4063 page_field_size
= 0;
4065 if (upl
->flags
& UPL_LITE
) {
4066 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
4067 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
4069 upl_lock_destroy(upl
);
4070 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
4072 if (upl
->flags
& UPL_INTERNAL
) {
4074 sizeof(struct upl
) +
4075 (sizeof(struct upl_page_info
) * (size
/PAGE_SIZE
))
4078 kfree(upl
, sizeof(struct upl
) + page_field_size
);
4083 upl_deallocate(upl_t upl
)
4085 if (--upl
->ref_count
== 0) {
4086 if(vector_upl_is_valid(upl
))
4087 vector_upl_deallocate(upl
);
4092 #if DEVELOPMENT || DEBUG
4094 * Statistics about UPL enforcement of copy-on-write obligations.
4096 unsigned long upl_cow
= 0;
4097 unsigned long upl_cow_again
= 0;
4098 unsigned long upl_cow_pages
= 0;
4099 unsigned long upl_cow_again_pages
= 0;
4101 unsigned long iopl_cow
= 0;
4102 unsigned long iopl_cow_pages
= 0;
4106 * Routine: vm_object_upl_request
4108 * Cause the population of a portion of a vm_object.
4109 * Depending on the nature of the request, the pages
4110 * returned may be contain valid data or be uninitialized.
4111 * A page list structure, listing the physical pages
4112 * will be returned upon request.
4113 * This function is called by the file system or any other
4114 * supplier of backing store to a pager.
4115 * IMPORTANT NOTE: The caller must still respect the relationship
4116 * between the vm_object and its backing memory object. The
4117 * caller MUST NOT substitute changes in the backing file
4118 * without first doing a memory_object_lock_request on the
4119 * target range unless it is know that the pages are not
4120 * shared with another entity at the pager level.
4122 * if a page list structure is present
4123 * return the mapped physical pages, where a
4124 * page is not present, return a non-initialized
4125 * one. If the no_sync bit is turned on, don't
4126 * call the pager unlock to synchronize with other
4127 * possible copies of the page. Leave pages busy
4128 * in the original object, if a page list structure
4129 * was specified. When a commit of the page list
4130 * pages is done, the dirty bit will be set for each one.
4132 * If a page list structure is present, return
4133 * all mapped pages. Where a page does not exist
4134 * map a zero filled one. Leave pages busy in
4135 * the original object. If a page list structure
4136 * is not specified, this call is a no-op.
4138 * Note: access of default pager objects has a rather interesting
4139 * twist. The caller of this routine, presumably the file system
4140 * page cache handling code, will never actually make a request
4141 * against a default pager backed object. Only the default
4142 * pager will make requests on backing store related vm_objects
4143 * In this way the default pager can maintain the relationship
4144 * between backing store files (abstract memory objects) and
4145 * the vm_objects (cache objects), they support.
4149 __private_extern__ kern_return_t
4150 vm_object_upl_request(
4152 vm_object_offset_t offset
,
4155 upl_page_info_array_t user_page_list
,
4156 unsigned int *page_list_count
,
4159 vm_page_t dst_page
= VM_PAGE_NULL
;
4160 vm_object_offset_t dst_offset
;
4161 upl_size_t xfer_size
;
4162 unsigned int size_in_pages
;
4167 #if MACH_CLUSTER_STATS
4168 boolean_t encountered_lrp
= FALSE
;
4170 vm_page_t alias_page
= NULL
;
4171 int refmod_state
= 0;
4172 wpl_array_t lite_list
= NULL
;
4173 vm_object_t last_copy_object
;
4174 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
4175 struct vm_page_delayed_work
*dwp
;
4179 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
4181 * For forward compatibility's sake,
4182 * reject any unknown flag.
4184 return KERN_INVALID_VALUE
;
4186 if ( (!object
->internal
) && (object
->paging_offset
!= 0) )
4187 panic("vm_object_upl_request: external object with non-zero paging offset\n");
4188 if (object
->phys_contiguous
)
4189 panic("vm_object_upl_request: contiguous object specified\n");
4192 if ((size
/ PAGE_SIZE
) > MAX_UPL_SIZE
)
4193 size
= MAX_UPL_SIZE
* PAGE_SIZE
;
4195 if ( (cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
)
4196 *page_list_count
= MAX_UPL_SIZE
;
4198 if (cntrl_flags
& UPL_SET_INTERNAL
) {
4199 if (cntrl_flags
& UPL_SET_LITE
) {
4201 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
, 0, size
);
4203 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4204 lite_list
= (wpl_array_t
)
4205 (((uintptr_t)user_page_list
) +
4206 ((size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
4208 user_page_list
= NULL
;
4212 upl
= upl_create(UPL_CREATE_INTERNAL
, 0, size
);
4214 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
4216 user_page_list
= NULL
;
4220 if (cntrl_flags
& UPL_SET_LITE
) {
4222 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
, 0, size
);
4224 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
4229 upl
= upl_create(UPL_CREATE_EXTERNAL
, 0, size
);
4235 user_page_list
[0].device
= FALSE
;
4237 if (cntrl_flags
& UPL_SET_LITE
) {
4238 upl
->map_object
= object
;
4240 upl
->map_object
= vm_object_allocate(size
);
4242 * No neeed to lock the new object: nobody else knows
4243 * about it yet, so it's all ours so far.
4245 upl
->map_object
->shadow
= object
;
4246 upl
->map_object
->pageout
= TRUE
;
4247 upl
->map_object
->can_persist
= FALSE
;
4248 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
4249 upl
->map_object
->vo_shadow_offset
= offset
;
4250 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
4252 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
4254 upl
->flags
|= UPL_SHADOWED
;
4258 * Just mark the UPL as "encrypted" here.
4259 * We'll actually encrypt the pages later,
4260 * in upl_encrypt(), when the caller has
4261 * selected which pages need to go to swap.
4263 if (cntrl_flags
& UPL_ENCRYPT
)
4264 upl
->flags
|= UPL_ENCRYPTED
;
4266 if (cntrl_flags
& UPL_FOR_PAGEOUT
)
4267 upl
->flags
|= UPL_PAGEOUT
;
4269 vm_object_lock(object
);
4270 vm_object_activity_begin(object
);
4273 * we can lock in the paging_offset once paging_in_progress is set
4276 upl
->offset
= offset
+ object
->paging_offset
;
4279 vm_object_activity_begin(object
);
4280 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
4281 #endif /* UPL_DEBUG */
4283 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
4285 * Honor copy-on-write obligations
4287 * The caller is gathering these pages and
4288 * might modify their contents. We need to
4289 * make sure that the copy object has its own
4290 * private copies of these pages before we let
4291 * the caller modify them.
4293 vm_object_update(object
,
4298 FALSE
, /* should_return */
4299 MEMORY_OBJECT_COPY_SYNC
,
4301 #if DEVELOPMENT || DEBUG
4303 upl_cow_pages
+= size
>> PAGE_SHIFT
;
4307 * remember which copy object we synchronized with
4309 last_copy_object
= object
->copy
;
4313 dst_offset
= offset
;
4314 size_in_pages
= size
/ PAGE_SIZE
;
4318 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
4320 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
4321 object
->resident_page_count
< (MAX_UPL_SIZE
* 2))
4322 object
->scan_collisions
= 0;
4328 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
4329 vm_object_unlock(object
);
4330 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
4331 vm_object_lock(object
);
4333 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
4334 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
4336 if ( ((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
4337 dst_page
->fictitious
||
4340 dst_page
->cleaning
||
4341 (VM_PAGE_WIRED(dst_page
))) {
4344 user_page_list
[entry
].phys_addr
= 0;
4349 * grab this up front...
4350 * a high percentange of the time we're going to
4351 * need the hardware modification state a bit later
4352 * anyway... so we can eliminate an extra call into
4353 * the pmap layer by grabbing it here and recording it
4355 if (dst_page
->pmapped
)
4356 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
4360 if ( (refmod_state
& VM_MEM_REFERENCED
) && dst_page
->inactive
) {
4362 * page is on inactive list and referenced...
4363 * reactivate it now... this gets it out of the
4364 * way of vm_pageout_scan which would have to
4365 * reactivate it upon tripping over it
4367 dwp
->dw_mask
|= DW_vm_page_activate
;
4369 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
4371 * we're only asking for DIRTY pages to be returned
4373 if (dst_page
->laundry
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
4375 * if we were the page stolen by vm_pageout_scan to be
4376 * cleaned (as opposed to a buddy being clustered in
4377 * or this request is not being driven by a PAGEOUT cluster
4378 * then we only need to check for the page being dirty or
4379 * precious to decide whether to return it
4381 if (dst_page
->dirty
|| dst_page
->precious
|| (refmod_state
& VM_MEM_MODIFIED
))
4386 * this is a request for a PAGEOUT cluster and this page
4387 * is merely along for the ride as a 'buddy'... not only
4388 * does it have to be dirty to be returned, but it also
4389 * can't have been referenced recently...
4391 if ( (hibernate_cleaning_in_progress
== TRUE
||
4392 (!((refmod_state
& VM_MEM_REFERENCED
) || dst_page
->reference
) || dst_page
->throttled
)) &&
4393 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->dirty
|| dst_page
->precious
) ) {
4398 * if we reach here, we're not to return
4399 * the page... go on to the next one
4401 if (dst_page
->laundry
== TRUE
) {
4403 * if we get here, the page is not 'cleaning' (filtered out above).
4404 * since it has been referenced, remove it from the laundry
4405 * so we don't pay the cost of an I/O to clean a page
4406 * we're just going to take back
4408 vm_page_lockspin_queues();
4410 vm_pageout_steal_laundry(dst_page
, TRUE
);
4411 vm_page_activate(dst_page
);
4413 vm_page_unlock_queues();
4416 user_page_list
[entry
].phys_addr
= 0;
4421 if (dst_page
->busy
) {
4422 if (cntrl_flags
& UPL_NOBLOCK
) {
4424 user_page_list
[entry
].phys_addr
= 0;
4429 * someone else is playing with the
4430 * page. We will have to wait.
4432 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
4438 * The caller is gathering this page and might
4439 * access its contents later on. Decrypt the
4440 * page before adding it to the UPL, so that
4441 * the caller never sees encrypted data.
4443 if (! (cntrl_flags
& UPL_ENCRYPT
) && dst_page
->encrypted
) {
4447 * save the current state of busy
4448 * mark page as busy while decrypt
4449 * is in progress since it will drop
4450 * the object lock...
4452 was_busy
= dst_page
->busy
;
4453 dst_page
->busy
= TRUE
;
4455 vm_page_decrypt(dst_page
, 0);
4456 vm_page_decrypt_for_upl_counter
++;
4458 * restore to original busy state
4460 dst_page
->busy
= was_busy
;
4462 if (dst_page
->pageout_queue
== TRUE
) {
4464 vm_page_lockspin_queues();
4466 if (dst_page
->pageout_queue
== TRUE
) {
4468 * we've buddied up a page for a clustered pageout
4469 * that has already been moved to the pageout
4470 * queue by pageout_scan... we need to remove
4471 * it from the queue and drop the laundry count
4474 vm_pageout_throttle_up(dst_page
);
4476 vm_page_unlock_queues();
4478 #if MACH_CLUSTER_STATS
4480 * pageout statistics gathering. count
4481 * all the pages we will page out that
4482 * were not counted in the initial
4483 * vm_pageout_scan work
4485 if (dst_page
->pageout
)
4486 encountered_lrp
= TRUE
;
4487 if ((dst_page
->dirty
|| (dst_page
->object
->internal
&& dst_page
->precious
))) {
4488 if (encountered_lrp
)
4489 CLUSTER_STAT(pages_at_higher_offsets
++;)
4491 CLUSTER_STAT(pages_at_lower_offsets
++;)
4494 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
4495 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
4497 if (dst_page
->phys_page
> upl
->highest_page
)
4498 upl
->highest_page
= dst_page
->phys_page
;
4500 if (cntrl_flags
& UPL_SET_LITE
) {
4501 unsigned int pg_num
;
4503 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
4504 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
4505 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
4508 pmap_clear_modify(dst_page
->phys_page
);
4511 * Mark original page as cleaning
4514 dst_page
->cleaning
= TRUE
;
4515 dst_page
->precious
= FALSE
;
4518 * use pageclean setup, it is more
4519 * convenient even for the pageout
4522 vm_object_lock(upl
->map_object
);
4523 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
4524 vm_object_unlock(upl
->map_object
);
4526 alias_page
->absent
= FALSE
;
4531 * Record that this page has been
4534 vm_external_state_set(object
->existence_map
, dst_page
->offset
);
4535 #endif /*MACH_PAGEMAP*/
4537 SET_PAGE_DIRTY(dst_page
, FALSE
);
4539 dst_page
->dirty
= FALSE
;
4543 dst_page
->precious
= TRUE
;
4545 if ( (cntrl_flags
& UPL_ENCRYPT
) ) {
4548 * We want to deny access to the target page
4549 * because its contents are about to be
4550 * encrypted and the user would be very
4551 * confused to see encrypted data instead
4553 * We also set "encrypted_cleaning" to allow
4554 * vm_pageout_scan() to demote that page
4555 * from "adjacent/clean-in-place" to
4556 * "target/clean-and-free" if it bumps into
4557 * this page during its scanning while we're
4558 * still processing this cluster.
4560 dst_page
->busy
= TRUE
;
4561 dst_page
->encrypted_cleaning
= TRUE
;
4563 if ( !(cntrl_flags
& UPL_CLEAN_IN_PLACE
) ) {
4564 if ( !VM_PAGE_WIRED(dst_page
))
4565 dst_page
->pageout
= TRUE
;
4568 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
4570 * Honor copy-on-write obligations
4572 * The copy object has changed since we
4573 * last synchronized for copy-on-write.
4574 * Another copy object might have been
4575 * inserted while we released the object's
4576 * lock. Since someone could have seen the
4577 * original contents of the remaining pages
4578 * through that new object, we have to
4579 * synchronize with it again for the remaining
4580 * pages only. The previous pages are "busy"
4581 * so they can not be seen through the new
4582 * mapping. The new mapping will see our
4583 * upcoming changes for those previous pages,
4584 * but that's OK since they couldn't see what
4585 * was there before. It's just a race anyway
4586 * and there's no guarantee of consistency or
4587 * atomicity. We just don't want new mappings
4588 * to see both the *before* and *after* pages.
4590 if (object
->copy
!= VM_OBJECT_NULL
) {
4593 dst_offset
,/* current offset */
4594 xfer_size
, /* remaining size */
4597 FALSE
, /* should_return */
4598 MEMORY_OBJECT_COPY_SYNC
,
4601 #if DEVELOPMENT || DEBUG
4603 upl_cow_again_pages
+= xfer_size
>> PAGE_SHIFT
;
4607 * remember the copy object we synced with
4609 last_copy_object
= object
->copy
;
4611 dst_page
= vm_page_lookup(object
, dst_offset
);
4613 if (dst_page
!= VM_PAGE_NULL
) {
4615 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
4617 * skip over pages already present in the cache
4620 user_page_list
[entry
].phys_addr
= 0;
4624 if (dst_page
->fictitious
) {
4625 panic("need corner case for fictitious page");
4628 if (dst_page
->busy
|| dst_page
->cleaning
) {
4630 * someone else is playing with the
4631 * page. We will have to wait.
4633 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
4637 if (dst_page
->laundry
) {
4638 dst_page
->pageout
= FALSE
;
4640 vm_pageout_steal_laundry(dst_page
, FALSE
);
4643 if (object
->private) {
4645 * This is a nasty wrinkle for users
4646 * of upl who encounter device or
4647 * private memory however, it is
4648 * unavoidable, only a fault can
4649 * resolve the actual backing
4650 * physical page by asking the
4654 user_page_list
[entry
].phys_addr
= 0;
4658 if (object
->scan_collisions
) {
4660 * the pageout_scan thread is trying to steal
4661 * pages from this object, but has run into our
4662 * lock... grab 2 pages from the head of the object...
4663 * the first is freed on behalf of pageout_scan, the
4664 * 2nd is for our own use... we use vm_object_page_grab
4665 * in both cases to avoid taking pages from the free
4666 * list since we are under memory pressure and our
4667 * lock on this object is getting in the way of
4670 dst_page
= vm_object_page_grab(object
);
4672 if (dst_page
!= VM_PAGE_NULL
)
4673 vm_page_release(dst_page
);
4675 dst_page
= vm_object_page_grab(object
);
4677 if (dst_page
== VM_PAGE_NULL
) {
4679 * need to allocate a page
4681 dst_page
= vm_page_grab();
4683 if (dst_page
== VM_PAGE_NULL
) {
4684 if ( (cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
4686 * we don't want to stall waiting for pages to come onto the free list
4687 * while we're already holding absent pages in this UPL
4688 * the caller will deal with the empty slots
4691 user_page_list
[entry
].phys_addr
= 0;
4696 * no pages available... wait
4697 * then try again for the same
4700 vm_object_unlock(object
);
4702 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
4704 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
4707 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
4709 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
4711 vm_object_lock(object
);
4715 vm_page_insert(dst_page
, object
, dst_offset
);
4717 dst_page
->absent
= TRUE
;
4718 dst_page
->busy
= FALSE
;
4720 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
4722 * if UPL_RET_ONLY_ABSENT was specified,
4723 * than we're definitely setting up a
4724 * upl for a clustered read/pagein
4725 * operation... mark the pages as clustered
4726 * so upl_commit_range can put them on the
4729 dst_page
->clustered
= TRUE
;
4735 if (cntrl_flags
& UPL_ENCRYPT
) {
4737 * The page is going to be encrypted when we
4738 * get it from the pager, so mark it so.
4740 dst_page
->encrypted
= TRUE
;
4743 * Otherwise, the page will not contain
4746 dst_page
->encrypted
= FALSE
;
4748 dst_page
->overwriting
= TRUE
;
4750 if (dst_page
->pmapped
) {
4751 if ( !(cntrl_flags
& UPL_FILE_IO
))
4753 * eliminate all mappings from the
4754 * original object and its prodigy
4756 refmod_state
= pmap_disconnect(dst_page
->phys_page
);
4758 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
4762 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
4763 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
4765 if (cntrl_flags
& UPL_SET_LITE
) {
4766 unsigned int pg_num
;
4768 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
4769 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
4770 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
4773 pmap_clear_modify(dst_page
->phys_page
);
4776 * Mark original page as cleaning
4779 dst_page
->cleaning
= TRUE
;
4780 dst_page
->precious
= FALSE
;
4783 * use pageclean setup, it is more
4784 * convenient even for the pageout
4787 vm_object_lock(upl
->map_object
);
4788 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
4789 vm_object_unlock(upl
->map_object
);
4791 alias_page
->absent
= FALSE
;
4795 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
4796 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
4797 upl
->flags
|= UPL_SET_DIRTY
;
4799 upl
->flags
|= UPL_SET_DIRTY
;
4800 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
4802 * clean in place for read implies
4803 * that a write will be done on all
4804 * the pages that are dirty before
4805 * a upl commit is done. The caller
4806 * is obligated to preserve the
4807 * contents of all pages marked dirty
4809 upl
->flags
|= UPL_CLEAR_DIRTY
;
4811 dst_page
->dirty
= dirty
;
4814 dst_page
->precious
= TRUE
;
4816 if ( !VM_PAGE_WIRED(dst_page
)) {
4818 * deny access to the target page while
4819 * it is being worked on
4821 dst_page
->busy
= TRUE
;
4823 dwp
->dw_mask
|= DW_vm_page_wire
;
4826 * We might be about to satisfy a fault which has been
4827 * requested. So no need for the "restart" bit.
4829 dst_page
->restart
= FALSE
;
4830 if (!dst_page
->absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
4832 * expect the page to be used
4834 dwp
->dw_mask
|= DW_set_reference
;
4836 if (cntrl_flags
& UPL_PRECIOUS
) {
4837 if (dst_page
->object
->internal
) {
4838 SET_PAGE_DIRTY(dst_page
, FALSE
);
4839 dst_page
->precious
= FALSE
;
4841 dst_page
->precious
= TRUE
;
4844 dst_page
->precious
= FALSE
;
4848 upl
->flags
|= UPL_HAS_BUSY
;
4850 if (dst_page
->phys_page
> upl
->highest_page
)
4851 upl
->highest_page
= dst_page
->phys_page
;
4852 if (user_page_list
) {
4853 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
4854 user_page_list
[entry
].pageout
= dst_page
->pageout
;
4855 user_page_list
[entry
].absent
= dst_page
->absent
;
4856 user_page_list
[entry
].dirty
= dst_page
->dirty
;
4857 user_page_list
[entry
].precious
= dst_page
->precious
;
4858 user_page_list
[entry
].device
= FALSE
;
4859 user_page_list
[entry
].needed
= FALSE
;
4860 if (dst_page
->clustered
== TRUE
)
4861 user_page_list
[entry
].speculative
= dst_page
->speculative
;
4863 user_page_list
[entry
].speculative
= FALSE
;
4864 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
4865 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
4868 * if UPL_RET_ONLY_ABSENT is set, then
4869 * we are working with a fresh page and we've
4870 * just set the clustered flag on it to
4871 * indicate that it was drug in as part of a
4872 * speculative cluster... so leave it alone
4874 if ( !(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
4876 * someone is explicitly grabbing this page...
4877 * update clustered and speculative state
4880 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
4884 if (dwp
->dw_mask
& DW_vm_page_activate
)
4885 VM_STAT_INCR(reactivations
);
4887 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
4889 if (dw_count
>= dw_limit
) {
4890 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
4897 dst_offset
+= PAGE_SIZE_64
;
4898 xfer_size
-= PAGE_SIZE
;
4901 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
4903 if (alias_page
!= NULL
) {
4904 VM_PAGE_FREE(alias_page
);
4907 if (page_list_count
!= NULL
) {
4908 if (upl
->flags
& UPL_INTERNAL
)
4909 *page_list_count
= 0;
4910 else if (*page_list_count
> entry
)
4911 *page_list_count
= entry
;
4916 vm_object_unlock(object
);
4918 return KERN_SUCCESS
;
4921 /* JMM - Backward compatability for now */
4923 vm_fault_list_request( /* forward */
4924 memory_object_control_t control
,
4925 vm_object_offset_t offset
,
4928 upl_page_info_t
**user_page_list_ptr
,
4929 unsigned int page_list_count
,
4932 vm_fault_list_request(
4933 memory_object_control_t control
,
4934 vm_object_offset_t offset
,
4937 upl_page_info_t
**user_page_list_ptr
,
4938 unsigned int page_list_count
,
4941 unsigned int local_list_count
;
4942 upl_page_info_t
*user_page_list
;
4945 if((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
)
4946 return KERN_INVALID_ARGUMENT
;
4948 if (user_page_list_ptr
!= NULL
) {
4949 local_list_count
= page_list_count
;
4950 user_page_list
= *user_page_list_ptr
;
4952 local_list_count
= 0;
4953 user_page_list
= NULL
;
4955 kr
= memory_object_upl_request(control
,
4963 if(kr
!= KERN_SUCCESS
)
4966 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
4967 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
4970 return KERN_SUCCESS
;
4976 * Routine: vm_object_super_upl_request
4978 * Cause the population of a portion of a vm_object
4979 * in much the same way as memory_object_upl_request.
4980 * Depending on the nature of the request, the pages
4981 * returned may be contain valid data or be uninitialized.
4982 * However, the region may be expanded up to the super
4983 * cluster size provided.
4986 __private_extern__ kern_return_t
4987 vm_object_super_upl_request(
4989 vm_object_offset_t offset
,
4991 upl_size_t super_cluster
,
4993 upl_page_info_t
*user_page_list
,
4994 unsigned int *page_list_count
,
4997 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
))
4998 return KERN_FAILURE
;
5000 assert(object
->paging_in_progress
);
5001 offset
= offset
- object
->paging_offset
;
5003 if (super_cluster
> size
) {
5005 vm_object_offset_t base_offset
;
5006 upl_size_t super_size
;
5007 vm_object_size_t super_size_64
;
5009 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
5010 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<<1 : super_cluster
;
5011 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
5012 super_size
= (upl_size_t
) super_size_64
;
5013 assert(super_size
== super_size_64
);
5015 if (offset
> (base_offset
+ super_size
)) {
5016 panic("vm_object_super_upl_request: Missed target pageout"
5017 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
5018 offset
, base_offset
, super_size
, super_cluster
,
5019 size
, object
->paging_offset
);
5022 * apparently there is a case where the vm requests a
5023 * page to be written out who's offset is beyond the
5026 if ((offset
+ size
) > (base_offset
+ super_size
)) {
5027 super_size_64
= (offset
+ size
) - base_offset
;
5028 super_size
= (upl_size_t
) super_size_64
;
5029 assert(super_size
== super_size_64
);
5032 offset
= base_offset
;
5035 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
);
5042 vm_map_address_t offset
,
5043 upl_size_t
*upl_size
,
5045 upl_page_info_array_t page_list
,
5046 unsigned int *count
,
5049 vm_map_entry_t entry
;
5051 int force_data_sync
;
5053 vm_object_t local_object
;
5054 vm_map_offset_t local_offset
;
5055 vm_map_offset_t local_start
;
5058 caller_flags
= *flags
;
5060 if (caller_flags
& ~UPL_VALID_FLAGS
) {
5062 * For forward compatibility's sake,
5063 * reject any unknown flag.
5065 return KERN_INVALID_VALUE
;
5067 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
5068 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
5071 return KERN_INVALID_ARGUMENT
;
5074 vm_map_lock_read(map
);
5076 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
5078 if ((entry
->vme_end
- offset
) < *upl_size
) {
5079 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
5080 assert(*upl_size
== entry
->vme_end
- offset
);
5083 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
5086 if ( !entry
->is_sub_map
&& entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
5087 if (entry
->object
.vm_object
->private)
5088 *flags
= UPL_DEV_MEMORY
;
5090 if (entry
->object
.vm_object
->phys_contiguous
)
5091 *flags
|= UPL_PHYS_CONTIG
;
5093 vm_map_unlock_read(map
);
5095 return KERN_SUCCESS
;
5098 if (entry
->is_sub_map
) {
5101 submap
= entry
->object
.sub_map
;
5102 local_start
= entry
->vme_start
;
5103 local_offset
= entry
->offset
;
5105 vm_map_reference(submap
);
5106 vm_map_unlock_read(map
);
5108 ret
= vm_map_create_upl(submap
,
5109 local_offset
+ (offset
- local_start
),
5110 upl_size
, upl
, page_list
, count
, flags
);
5111 vm_map_deallocate(submap
);
5116 if (entry
->object
.vm_object
== VM_OBJECT_NULL
|| !entry
->object
.vm_object
->phys_contiguous
) {
5117 if ((*upl_size
/PAGE_SIZE
) > MAX_UPL_SIZE
)
5118 *upl_size
= MAX_UPL_SIZE
* PAGE_SIZE
;
5121 * Create an object if necessary.
5123 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
5125 if (vm_map_lock_read_to_write(map
))
5126 goto REDISCOVER_ENTRY
;
5128 entry
->object
.vm_object
= vm_object_allocate((vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
5131 vm_map_lock_write_to_read(map
);
5133 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
5134 if (!(entry
->protection
& VM_PROT_WRITE
)) {
5135 vm_map_unlock_read(map
);
5136 return KERN_PROTECTION_FAILURE
;
5139 local_object
= entry
->object
.vm_object
;
5140 if (vm_map_entry_should_cow_for_true_share(entry
) &&
5141 local_object
->vo_size
> *upl_size
&&
5146 * Set up the targeted range for copy-on-write to avoid
5147 * applying true_share/copy_delay to the entire object.
5150 if (vm_map_lock_read_to_write(map
)) {
5151 goto REDISCOVER_ENTRY
;
5154 vm_map_clip_start(map
,
5156 vm_map_trunc_page(offset
,
5157 VM_MAP_PAGE_MASK(map
)));
5158 vm_map_clip_end(map
,
5160 vm_map_round_page(offset
+ *upl_size
,
5161 VM_MAP_PAGE_MASK(map
)));
5162 prot
= entry
->protection
& ~VM_PROT_WRITE
;
5163 if (override_nx(map
, entry
->alias
) && prot
)
5164 prot
|= VM_PROT_EXECUTE
;
5165 vm_object_pmap_protect(local_object
,
5167 entry
->vme_end
- entry
->vme_start
,
5168 ((entry
->is_shared
|| map
->mapped_in_other_pmaps
)
5173 entry
->needs_copy
= TRUE
;
5175 vm_map_lock_write_to_read(map
);
5178 if (entry
->needs_copy
) {
5180 * Honor copy-on-write for COPY_SYMMETRIC
5185 vm_object_offset_t new_offset
;
5188 vm_map_version_t version
;
5193 if (vm_map_lookup_locked(&local_map
,
5194 offset
, VM_PROT_WRITE
,
5195 OBJECT_LOCK_EXCLUSIVE
,
5197 &new_offset
, &prot
, &wired
,
5199 &real_map
) != KERN_SUCCESS
) {
5200 vm_map_unlock_read(local_map
);
5201 return KERN_FAILURE
;
5203 if (real_map
!= map
)
5204 vm_map_unlock(real_map
);
5205 vm_map_unlock_read(local_map
);
5207 vm_object_unlock(object
);
5209 goto REDISCOVER_ENTRY
;
5212 if (sync_cow_data
) {
5213 if (entry
->object
.vm_object
->shadow
|| entry
->object
.vm_object
->copy
) {
5214 local_object
= entry
->object
.vm_object
;
5215 local_start
= entry
->vme_start
;
5216 local_offset
= entry
->offset
;
5218 vm_object_reference(local_object
);
5219 vm_map_unlock_read(map
);
5221 if (local_object
->shadow
&& local_object
->copy
) {
5222 vm_object_lock_request(
5223 local_object
->shadow
,
5224 (vm_object_offset_t
)
5225 ((offset
- local_start
) +
5227 local_object
->vo_shadow_offset
,
5229 MEMORY_OBJECT_DATA_SYNC
,
5232 sync_cow_data
= FALSE
;
5233 vm_object_deallocate(local_object
);
5235 goto REDISCOVER_ENTRY
;
5238 if (force_data_sync
) {
5239 local_object
= entry
->object
.vm_object
;
5240 local_start
= entry
->vme_start
;
5241 local_offset
= entry
->offset
;
5243 vm_object_reference(local_object
);
5244 vm_map_unlock_read(map
);
5246 vm_object_lock_request(
5248 (vm_object_offset_t
)
5249 ((offset
- local_start
) + local_offset
),
5250 (vm_object_size_t
)*upl_size
, FALSE
,
5251 MEMORY_OBJECT_DATA_SYNC
,
5254 force_data_sync
= FALSE
;
5255 vm_object_deallocate(local_object
);
5257 goto REDISCOVER_ENTRY
;
5259 if (entry
->object
.vm_object
->private)
5260 *flags
= UPL_DEV_MEMORY
;
5264 if (entry
->object
.vm_object
->phys_contiguous
)
5265 *flags
|= UPL_PHYS_CONTIG
;
5267 local_object
= entry
->object
.vm_object
;
5268 local_offset
= entry
->offset
;
5269 local_start
= entry
->vme_start
;
5271 vm_object_reference(local_object
);
5272 vm_map_unlock_read(map
);
5274 ret
= vm_object_iopl_request(local_object
,
5275 (vm_object_offset_t
) ((offset
- local_start
) + local_offset
),
5281 vm_object_deallocate(local_object
);
5285 vm_map_unlock_read(map
);
5287 return(KERN_FAILURE
);
5291 * Internal routine to enter a UPL into a VM map.
5293 * JMM - This should just be doable through the standard
5294 * vm_map_enter() API.
5300 vm_map_offset_t
*dst_addr
)
5303 vm_object_offset_t offset
;
5304 vm_map_offset_t addr
;
5307 int isVectorUPL
= 0, curr_upl
=0;
5308 upl_t vector_upl
= NULL
;
5309 vm_offset_t vector_upl_dst_addr
= 0;
5310 vm_map_t vector_upl_submap
= NULL
;
5311 upl_offset_t subupl_offset
= 0;
5312 upl_size_t subupl_size
= 0;
5314 if (upl
== UPL_NULL
)
5315 return KERN_INVALID_ARGUMENT
;
5317 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
5318 int mapped
=0,valid_upls
=0;
5321 upl_lock(vector_upl
);
5322 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
5323 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
5327 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
)
5332 if(mapped
!= valid_upls
)
5333 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
5335 upl_unlock(vector_upl
);
5336 return KERN_FAILURE
;
5340 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
, vector_upl
->size
, FALSE
, VM_FLAGS_ANYWHERE
, &vector_upl_submap
);
5341 if( kr
!= KERN_SUCCESS
)
5342 panic("Vector UPL submap allocation failed\n");
5343 map
= vector_upl_submap
;
5344 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
5350 process_upl_to_enter
:
5352 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
5353 *dst_addr
= vector_upl_dst_addr
;
5354 upl_unlock(vector_upl
);
5355 return KERN_SUCCESS
;
5357 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
5359 goto process_upl_to_enter
;
5361 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
5362 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
5365 * check to see if already mapped
5367 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
5369 return KERN_FAILURE
;
5372 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
5373 ((upl
->flags
& UPL_HAS_BUSY
) ||
5374 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
5377 vm_page_t alias_page
;
5378 vm_object_offset_t new_offset
;
5379 unsigned int pg_num
;
5380 wpl_array_t lite_list
;
5382 if (upl
->flags
& UPL_INTERNAL
) {
5383 lite_list
= (wpl_array_t
)
5384 ((((uintptr_t)upl
) + sizeof(struct upl
))
5385 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5387 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
5389 object
= upl
->map_object
;
5390 upl
->map_object
= vm_object_allocate(upl
->size
);
5392 vm_object_lock(upl
->map_object
);
5394 upl
->map_object
->shadow
= object
;
5395 upl
->map_object
->pageout
= TRUE
;
5396 upl
->map_object
->can_persist
= FALSE
;
5397 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
5398 upl
->map_object
->vo_shadow_offset
= upl
->offset
- object
->paging_offset
;
5399 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
5400 offset
= upl
->map_object
->vo_shadow_offset
;
5404 upl
->flags
|= UPL_SHADOWED
;
5407 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
5408 assert(pg_num
== new_offset
/ PAGE_SIZE
);
5410 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
5412 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
5414 vm_object_lock(object
);
5416 m
= vm_page_lookup(object
, offset
);
5417 if (m
== VM_PAGE_NULL
) {
5418 panic("vm_upl_map: page missing\n");
5422 * Convert the fictitious page to a private
5423 * shadow of the real page.
5425 assert(alias_page
->fictitious
);
5426 alias_page
->fictitious
= FALSE
;
5427 alias_page
->private = TRUE
;
5428 alias_page
->pageout
= TRUE
;
5430 * since m is a page in the upl it must
5431 * already be wired or BUSY, so it's
5432 * safe to assign the underlying physical
5435 alias_page
->phys_page
= m
->phys_page
;
5437 vm_object_unlock(object
);
5439 vm_page_lockspin_queues();
5440 vm_page_wire(alias_page
);
5441 vm_page_unlock_queues();
5445 * The virtual page ("m") has to be wired in some way
5446 * here or its physical page ("m->phys_page") could
5447 * be recycled at any time.
5448 * Assuming this is enforced by the caller, we can't
5449 * get an encrypted page here. Since the encryption
5450 * key depends on the VM page's "pager" object and
5451 * the "paging_offset", we couldn't handle 2 pageable
5452 * VM pages (with different pagers and paging_offsets)
5453 * sharing the same physical page: we could end up
5454 * encrypting with one key (via one VM page) and
5455 * decrypting with another key (via the alias VM page).
5457 ASSERT_PAGE_DECRYPTED(m
);
5459 vm_page_insert(alias_page
, upl
->map_object
, new_offset
);
5461 assert(!alias_page
->wanted
);
5462 alias_page
->busy
= FALSE
;
5463 alias_page
->absent
= FALSE
;
5466 offset
+= PAGE_SIZE_64
;
5467 new_offset
+= PAGE_SIZE_64
;
5469 vm_object_unlock(upl
->map_object
);
5471 if (upl
->flags
& UPL_SHADOWED
)
5474 offset
= upl
->offset
- upl
->map_object
->paging_offset
;
5478 vm_object_reference(upl
->map_object
);
5483 * NEED A UPL_MAP ALIAS
5485 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
5486 VM_FLAGS_ANYWHERE
, upl
->map_object
, offset
, FALSE
,
5487 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
5489 if (kr
!= KERN_SUCCESS
) {
5495 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
5496 VM_FLAGS_FIXED
, upl
->map_object
, offset
, FALSE
,
5497 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
5499 panic("vm_map_enter failed for a Vector UPL\n");
5501 vm_object_lock(upl
->map_object
);
5503 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
5504 m
= vm_page_lookup(upl
->map_object
, offset
);
5509 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
5510 * but only in kernel space. If this was on a user map,
5511 * we'd have to set the wpmapped bit. */
5512 /* m->wpmapped = TRUE; */
5513 assert(map
==kernel_map
);
5515 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_ALL
, VM_PROT_NONE
, 0, TRUE
);
5517 offset
+= PAGE_SIZE_64
;
5519 vm_object_unlock(upl
->map_object
);
5522 * hold a reference for the mapping
5525 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
5526 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
5527 assert(upl
->kaddr
== *dst_addr
);
5530 goto process_upl_to_enter
;
5534 return KERN_SUCCESS
;
5538 * Internal routine to remove a UPL mapping from a VM map.
5540 * XXX - This should just be doable through a standard
5541 * vm_map_remove() operation. Otherwise, implicit clean-up
5542 * of the target map won't be able to correctly remove
5543 * these (and release the reference on the UPL). Having
5544 * to do this means we can't map these into user-space
5554 int isVectorUPL
= 0, curr_upl
= 0;
5555 upl_t vector_upl
= NULL
;
5557 if (upl
== UPL_NULL
)
5558 return KERN_INVALID_ARGUMENT
;
5560 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
5561 int unmapped
=0, valid_upls
=0;
5563 upl_lock(vector_upl
);
5564 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
5565 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
5569 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
))
5574 if(unmapped
!= valid_upls
)
5575 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
5577 upl_unlock(vector_upl
);
5578 return KERN_FAILURE
;
5586 process_upl_to_remove
:
5588 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
5589 vm_map_t v_upl_submap
;
5590 vm_offset_t v_upl_submap_dst_addr
;
5591 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
5593 vm_map_remove(map
, v_upl_submap_dst_addr
, v_upl_submap_dst_addr
+ vector_upl
->size
, VM_MAP_NO_FLAGS
);
5594 vm_map_deallocate(v_upl_submap
);
5595 upl_unlock(vector_upl
);
5596 return KERN_SUCCESS
;
5599 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
5601 goto process_upl_to_remove
;
5604 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
5608 assert(upl
->ref_count
> 1);
5609 upl
->ref_count
--; /* removing mapping ref */
5611 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
5612 upl
->kaddr
= (vm_offset_t
) 0;
5619 vm_map_trunc_page(addr
,
5620 VM_MAP_PAGE_MASK(map
)),
5621 vm_map_round_page(addr
+ size
,
5622 VM_MAP_PAGE_MASK(map
)),
5625 return KERN_SUCCESS
;
5629 * If it's a Vectored UPL, we'll be removing the entire
5630 * submap anyways, so no need to remove individual UPL
5631 * element mappings from within the submap
5633 goto process_upl_to_remove
;
5638 return KERN_FAILURE
;
5641 extern int panic_on_cs_killed
;
5645 upl_offset_t offset
,
5648 upl_page_info_t
*page_list
,
5649 mach_msg_type_number_t count
,
5652 upl_size_t xfer_size
, subupl_size
= size
;
5653 vm_object_t shadow_object
;
5655 vm_object_offset_t target_offset
;
5656 upl_offset_t subupl_offset
= offset
;
5658 wpl_array_t lite_list
;
5660 int clear_refmod
= 0;
5661 int pgpgout_count
= 0;
5662 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
5663 struct vm_page_delayed_work
*dwp
;
5666 int isVectorUPL
= 0;
5667 upl_t vector_upl
= NULL
;
5668 boolean_t should_be_throttled
= FALSE
;
5672 if (upl
== UPL_NULL
)
5673 return KERN_INVALID_ARGUMENT
;
5678 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
5680 upl_lock(vector_upl
);
5685 process_upl_to_commit
:
5689 offset
= subupl_offset
;
5691 upl_unlock(vector_upl
);
5692 return KERN_SUCCESS
;
5694 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
5696 upl_unlock(vector_upl
);
5697 return KERN_FAILURE
;
5699 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
5700 subupl_size
-= size
;
5701 subupl_offset
+= size
;
5705 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
5706 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
5708 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
5709 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
5711 upl
->upl_commit_index
++;
5714 if (upl
->flags
& UPL_DEVICE_MEMORY
)
5716 else if ((offset
+ size
) <= upl
->size
)
5722 upl_unlock(vector_upl
);
5724 return KERN_FAILURE
;
5726 if (upl
->flags
& UPL_SET_DIRTY
)
5727 flags
|= UPL_COMMIT_SET_DIRTY
;
5728 if (upl
->flags
& UPL_CLEAR_DIRTY
)
5729 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
5731 if (upl
->flags
& UPL_INTERNAL
)
5732 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
5733 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5735 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
5737 object
= upl
->map_object
;
5739 if (upl
->flags
& UPL_SHADOWED
) {
5740 vm_object_lock(object
);
5741 shadow_object
= object
->shadow
;
5743 shadow_object
= object
;
5745 entry
= offset
/PAGE_SIZE
;
5746 target_offset
= (vm_object_offset_t
)offset
;
5748 if (upl
->flags
& UPL_KERNEL_OBJECT
)
5749 vm_object_lock_shared(shadow_object
);
5751 vm_object_lock(shadow_object
);
5753 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
5754 assert(shadow_object
->blocked_access
);
5755 shadow_object
->blocked_access
= FALSE
;
5756 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
5759 if (shadow_object
->code_signed
) {
5762 * If the object is code-signed, do not let this UPL tell
5763 * us if the pages are valid or not. Let the pages be
5764 * validated by VM the normal way (when they get mapped or
5767 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
5771 * No page list to get the code-signing info from !?
5773 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
5775 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) && shadow_object
->internal
)
5776 should_be_throttled
= TRUE
;
5780 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5790 if (upl
->flags
& UPL_LITE
) {
5791 unsigned int pg_num
;
5793 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
5794 assert(pg_num
== target_offset
/PAGE_SIZE
);
5796 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
5797 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
5799 if (!(upl
->flags
& UPL_KERNEL_OBJECT
))
5800 m
= vm_page_lookup(shadow_object
, target_offset
+ (upl
->offset
- shadow_object
->paging_offset
));
5803 if (upl
->flags
& UPL_SHADOWED
) {
5804 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
5810 if (m
== VM_PAGE_NULL
)
5811 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
5814 if ((upl
->flags
& UPL_KERNEL_OBJECT
) || m
== VM_PAGE_NULL
)
5815 goto commit_next_page
;
5817 if (m
->compressor
) {
5820 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
5821 goto commit_next_page
;
5824 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
5827 * Set the code signing bits according to
5828 * what the UPL says they should be.
5830 m
->cs_validated
= page_list
[entry
].cs_validated
;
5831 m
->cs_tainted
= page_list
[entry
].cs_tainted
;
5833 if (flags
& UPL_COMMIT_WRITTEN_BY_KERNEL
)
5834 m
->written_by_kernel
= TRUE
;
5836 if (upl
->flags
& UPL_IO_WIRE
) {
5839 page_list
[entry
].phys_addr
= 0;
5841 if (flags
& UPL_COMMIT_SET_DIRTY
) {
5842 SET_PAGE_DIRTY(m
, FALSE
);
5843 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
5846 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
5847 m
->cs_validated
&& !m
->cs_tainted
) {
5850 * This page is no longer dirty
5851 * but could have been modified,
5852 * so it will need to be
5855 if (panic_on_cs_killed
&&
5857 panic("upl_commit_range(%p): page %p was slid\n",
5861 m
->cs_validated
= FALSE
;
5862 #if DEVELOPMENT || DEBUG
5863 vm_cs_validated_resets
++;
5865 pmap_disconnect(m
->phys_page
);
5867 clear_refmod
|= VM_MEM_MODIFIED
;
5869 if (flags
& UPL_COMMIT_INACTIVATE
) {
5870 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
5871 clear_refmod
|= VM_MEM_REFERENCED
;
5873 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
5875 * We blocked access to the pages in this UPL.
5876 * Clear the "busy" bit and wake up any waiter
5879 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
5882 if (flags
& UPL_COMMIT_FREE_ABSENT
)
5883 dwp
->dw_mask
|= DW_vm_page_free
;
5886 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
5888 if ( !(dwp
->dw_mask
& DW_vm_page_deactivate_internal
))
5889 dwp
->dw_mask
|= DW_vm_page_activate
;
5892 dwp
->dw_mask
|= DW_vm_page_unwire
;
5894 goto commit_next_page
;
5896 assert(!m
->compressor
);
5899 page_list
[entry
].phys_addr
= 0;
5902 * make sure to clear the hardware
5903 * modify or reference bits before
5904 * releasing the BUSY bit on this page
5905 * otherwise we risk losing a legitimate
5908 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
5911 clear_refmod
|= VM_MEM_MODIFIED
;
5914 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
5916 if (VM_PAGE_WIRED(m
))
5919 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
5920 m
->cs_validated
&& !m
->cs_tainted
) {
5923 * This page is no longer dirty
5924 * but could have been modified,
5925 * so it will need to be
5928 if (panic_on_cs_killed
&&
5930 panic("upl_commit_range(%p): page %p was slid\n",
5934 m
->cs_validated
= FALSE
;
5935 #if DEVELOPMENT || DEBUG
5936 vm_cs_validated_resets
++;
5938 pmap_disconnect(m
->phys_page
);
5940 if (m
->overwriting
) {
5942 * the (COPY_OUT_FROM == FALSE) request_page_list case
5947 dwp
->dw_mask
|= DW_clear_busy
;
5950 * alternate (COPY_OUT_FROM == FALSE) page_list case
5951 * Occurs when the original page was wired
5952 * at the time of the list request
5954 assert(VM_PAGE_WIRED(m
));
5956 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
5958 m
->overwriting
= FALSE
;
5960 if (m
->encrypted_cleaning
== TRUE
) {
5961 m
->encrypted_cleaning
= FALSE
;
5963 dwp
->dw_mask
|= DW_clear_busy
| DW_PAGE_WAKEUP
;
5965 m
->cleaning
= FALSE
;
5969 * With the clean queue enabled, UPL_PAGEOUT should
5970 * no longer set the pageout bit. It's pages now go
5971 * to the clean queue.
5973 assert(!(flags
& UPL_PAGEOUT
));
5976 #if MACH_CLUSTER_STATS
5977 if (m
->wanted
) vm_pageout_target_collisions
++;
5979 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
5980 (m
->pmapped
&& (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
))) {
5982 * page was re-dirtied after we started
5983 * the pageout... reactivate it since
5984 * we don't know whether the on-disk
5985 * copy matches what is now in memory
5987 SET_PAGE_DIRTY(m
, FALSE
);
5989 dwp
->dw_mask
|= DW_vm_page_activate
| DW_PAGE_WAKEUP
;
5991 if (upl
->flags
& UPL_PAGEOUT
) {
5992 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
5993 VM_STAT_INCR(reactivations
);
5994 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
5998 * page has been successfully cleaned
5999 * go ahead and free it for other use
6001 if (m
->object
->internal
) {
6002 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
6004 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
6009 dwp
->dw_mask
|= DW_vm_page_free
;
6011 goto commit_next_page
;
6013 #if MACH_CLUSTER_STATS
6015 m
->dirty
= pmap_is_modified(m
->phys_page
);
6017 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
6018 else vm_pageout_cluster_cleaned
++;
6019 if (m
->wanted
) vm_pageout_cluster_collisions
++;
6022 * It is a part of the semantic of COPYOUT_FROM
6023 * UPLs that a commit implies cache sync
6024 * between the vm page and the backing store
6025 * this can be used to strip the precious bit
6028 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
))
6029 m
->precious
= FALSE
;
6031 if (flags
& UPL_COMMIT_SET_DIRTY
) {
6032 SET_PAGE_DIRTY(m
, FALSE
);
6037 /* with the clean queue on, move *all* cleaned pages to the clean queue */
6038 if (hibernate_cleaning_in_progress
== FALSE
&& !m
->dirty
&& (upl
->flags
& UPL_PAGEOUT
)) {
6041 /* this page used to be dirty; now it's on the clean queue. */
6042 m
->was_dirty
= TRUE
;
6044 dwp
->dw_mask
|= DW_enqueue_cleaned
;
6045 vm_pageout_enqueued_cleaned_from_inactive_dirty
++;
6046 } else if (should_be_throttled
== TRUE
&& !m
->active
&& !m
->inactive
&& !m
->speculative
&& !m
->throttled
) {
6048 * page coming back in from being 'frozen'...
6049 * it was dirty before it was frozen, so keep it so
6050 * the vm_page_activate will notice that it really belongs
6051 * on the throttle queue and put it there
6053 SET_PAGE_DIRTY(m
, FALSE
);
6054 dwp
->dw_mask
|= DW_vm_page_activate
;
6057 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->clustered
&& !m
->speculative
) {
6058 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6059 clear_refmod
|= VM_MEM_REFERENCED
;
6060 } else if (!m
->active
&& !m
->inactive
&& !m
->speculative
) {
6062 if (m
->clustered
|| (flags
& UPL_COMMIT_SPECULATE
))
6063 dwp
->dw_mask
|= DW_vm_page_speculate
;
6064 else if (m
->reference
)
6065 dwp
->dw_mask
|= DW_vm_page_activate
;
6067 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6068 clear_refmod
|= VM_MEM_REFERENCED
;
6072 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6074 * We blocked access to the pages in this URL.
6075 * Clear the "busy" bit on this page before we
6076 * wake up any waiter.
6078 dwp
->dw_mask
|= DW_clear_busy
;
6082 * Wakeup any thread waiting for the page to be un-cleaning.
6084 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
6088 pmap_clear_refmod(m
->phys_page
, clear_refmod
);
6090 target_offset
+= PAGE_SIZE_64
;
6091 xfer_size
-= PAGE_SIZE
;
6095 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
6096 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
6098 if (dw_count
>= dw_limit
) {
6099 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6105 if (dwp
->dw_mask
& DW_clear_busy
)
6108 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
6114 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6118 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
6120 } else if (upl
->flags
& UPL_LITE
) {
6124 pg_num
= upl
->size
/PAGE_SIZE
;
6125 pg_num
= (pg_num
+ 31) >> 5;
6128 for (i
= 0; i
< pg_num
; i
++) {
6129 if (lite_list
[i
] != 0) {
6135 if (queue_empty(&upl
->map_object
->memq
))
6138 if (occupied
== 0) {
6140 * If this UPL element belongs to a Vector UPL and is
6141 * empty, then this is the right function to deallocate
6142 * it. So go ahead set the *empty variable. The flag
6143 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
6144 * should be considered relevant for the Vector UPL and not
6145 * the internal UPLs.
6147 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
6150 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
6152 * this is not a paging object
6153 * so we need to drop the paging reference
6154 * that was taken when we created the UPL
6155 * against this object
6157 vm_object_activity_end(shadow_object
);
6158 vm_object_collapse(shadow_object
, 0, TRUE
);
6161 * we dontated the paging reference to
6162 * the map object... vm_pageout_object_terminate
6163 * will drop this reference
6167 vm_object_unlock(shadow_object
);
6168 if (object
!= shadow_object
)
6169 vm_object_unlock(object
);
6175 * If we completed our operations on an UPL that is
6176 * part of a Vectored UPL and if empty is TRUE, then
6177 * we should go ahead and deallocate this UPL element.
6178 * Then we check if this was the last of the UPL elements
6179 * within that Vectored UPL. If so, set empty to TRUE
6180 * so that in ubc_upl_commit_range or ubc_upl_commit, we
6181 * can go ahead and deallocate the Vector UPL too.
6184 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
6185 upl_deallocate(upl
);
6187 goto process_upl_to_commit
;
6190 if (pgpgout_count
) {
6191 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
6194 return KERN_SUCCESS
;
6200 upl_offset_t offset
,
6205 upl_page_info_t
*user_page_list
= NULL
;
6206 upl_size_t xfer_size
, subupl_size
= size
;
6207 vm_object_t shadow_object
;
6209 vm_object_offset_t target_offset
;
6210 upl_offset_t subupl_offset
= offset
;
6212 wpl_array_t lite_list
;
6214 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
6215 struct vm_page_delayed_work
*dwp
;
6218 int isVectorUPL
= 0;
6219 upl_t vector_upl
= NULL
;
6223 if (upl
== UPL_NULL
)
6224 return KERN_INVALID_ARGUMENT
;
6226 if ( (upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
) )
6227 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
6229 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
6231 upl_lock(vector_upl
);
6236 process_upl_to_abort
:
6239 offset
= subupl_offset
;
6241 upl_unlock(vector_upl
);
6242 return KERN_SUCCESS
;
6244 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
6246 upl_unlock(vector_upl
);
6247 return KERN_FAILURE
;
6249 subupl_size
-= size
;
6250 subupl_offset
+= size
;
6256 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
6257 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
6259 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
6260 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
6261 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
6263 upl
->upl_commit_index
++;
6266 if (upl
->flags
& UPL_DEVICE_MEMORY
)
6268 else if ((offset
+ size
) <= upl
->size
)
6274 upl_unlock(vector_upl
);
6277 return KERN_FAILURE
;
6279 if (upl
->flags
& UPL_INTERNAL
) {
6280 lite_list
= (wpl_array_t
)
6281 ((((uintptr_t)upl
) + sizeof(struct upl
))
6282 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6284 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
6286 lite_list
= (wpl_array_t
)
6287 (((uintptr_t)upl
) + sizeof(struct upl
));
6289 object
= upl
->map_object
;
6291 if (upl
->flags
& UPL_SHADOWED
) {
6292 vm_object_lock(object
);
6293 shadow_object
= object
->shadow
;
6295 shadow_object
= object
;
6297 entry
= offset
/PAGE_SIZE
;
6298 target_offset
= (vm_object_offset_t
)offset
;
6300 if (upl
->flags
& UPL_KERNEL_OBJECT
)
6301 vm_object_lock_shared(shadow_object
);
6303 vm_object_lock(shadow_object
);
6305 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6306 assert(shadow_object
->blocked_access
);
6307 shadow_object
->blocked_access
= FALSE
;
6308 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
6313 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
6315 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
))
6316 panic("upl_abort_range: kernel_object being DUMPED");
6320 unsigned int pg_num
;
6323 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
6324 assert(pg_num
== target_offset
/PAGE_SIZE
);
6329 needed
= user_page_list
[pg_num
].needed
;
6334 if (upl
->flags
& UPL_LITE
) {
6336 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
6337 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
6339 if ( !(upl
->flags
& UPL_KERNEL_OBJECT
))
6340 m
= vm_page_lookup(shadow_object
, target_offset
+
6341 (upl
->offset
- shadow_object
->paging_offset
));
6344 if (upl
->flags
& UPL_SHADOWED
) {
6345 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
6350 if (m
== VM_PAGE_NULL
)
6351 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
6354 if ((upl
->flags
& UPL_KERNEL_OBJECT
))
6355 goto abort_next_page
;
6357 if (m
!= VM_PAGE_NULL
) {
6359 assert(!m
->compressor
);
6362 boolean_t must_free
= TRUE
;
6365 * COPYOUT = FALSE case
6366 * check for error conditions which must
6367 * be passed back to the pages customer
6369 if (error
& UPL_ABORT_RESTART
) {
6374 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
6378 } else if (error
& UPL_ABORT_ERROR
) {
6385 if (m
->clustered
&& needed
== FALSE
) {
6387 * This page was a part of a speculative
6388 * read-ahead initiated by the kernel
6389 * itself. No one is expecting this
6390 * page and no one will clean up its
6391 * error state if it ever becomes valid
6393 * We have to free it here.
6400 * If the page was already encrypted,
6401 * we don't really need to decrypt it
6402 * now. It will get decrypted later,
6403 * on demand, as soon as someone needs
6404 * to access its contents.
6407 m
->cleaning
= FALSE
;
6408 m
->encrypted_cleaning
= FALSE
;
6410 if (m
->overwriting
&& !m
->busy
) {
6412 * this shouldn't happen since
6413 * this is an 'absent' page, but
6414 * it doesn't hurt to check for
6415 * the 'alternate' method of
6416 * stabilizing the page...
6417 * we will mark 'busy' to be cleared
6418 * in the following code which will
6419 * take care of the primary stabilzation
6420 * method (i.e. setting 'busy' to TRUE)
6422 dwp
->dw_mask
|= DW_vm_page_unwire
;
6424 m
->overwriting
= FALSE
;
6426 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
6428 if (must_free
== TRUE
)
6429 dwp
->dw_mask
|= DW_vm_page_free
;
6431 dwp
->dw_mask
|= DW_vm_page_activate
;
6434 * Handle the trusted pager throttle.
6437 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
6439 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6441 * We blocked access to the pages in this UPL.
6442 * Clear the "busy" bit and wake up any waiter
6445 dwp
->dw_mask
|= DW_clear_busy
;
6447 if (m
->overwriting
) {
6449 dwp
->dw_mask
|= DW_clear_busy
;
6452 * deal with the 'alternate' method
6453 * of stabilizing the page...
6454 * we will either free the page
6455 * or mark 'busy' to be cleared
6456 * in the following code which will
6457 * take care of the primary stabilzation
6458 * method (i.e. setting 'busy' to TRUE)
6460 dwp
->dw_mask
|= DW_vm_page_unwire
;
6462 m
->overwriting
= FALSE
;
6464 if (m
->encrypted_cleaning
== TRUE
) {
6465 m
->encrypted_cleaning
= FALSE
;
6467 dwp
->dw_mask
|= DW_clear_busy
;
6470 m
->cleaning
= FALSE
;
6472 vm_external_state_clr(m
->object
->existence_map
, m
->offset
);
6473 #endif /* MACH_PAGEMAP */
6474 if (error
& UPL_ABORT_DUMP_PAGES
) {
6475 pmap_disconnect(m
->phys_page
);
6477 dwp
->dw_mask
|= DW_vm_page_free
;
6479 if (!(dwp
->dw_mask
& DW_vm_page_unwire
)) {
6480 if (error
& UPL_ABORT_REFERENCE
) {
6482 * we've been told to explictly
6483 * reference this page... for
6484 * file I/O, this is done by
6485 * implementing an LRU on the inactive q
6487 dwp
->dw_mask
|= DW_vm_page_lru
;
6489 } else if (!m
->active
&& !m
->inactive
&& !m
->speculative
)
6490 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
6492 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
6497 target_offset
+= PAGE_SIZE_64
;
6498 xfer_size
-= PAGE_SIZE
;
6502 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
6503 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
6505 if (dw_count
>= dw_limit
) {
6506 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6512 if (dwp
->dw_mask
& DW_clear_busy
)
6515 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
6521 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
6525 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
6527 } else if (upl
->flags
& UPL_LITE
) {
6531 pg_num
= upl
->size
/PAGE_SIZE
;
6532 pg_num
= (pg_num
+ 31) >> 5;
6535 for (i
= 0; i
< pg_num
; i
++) {
6536 if (lite_list
[i
] != 0) {
6542 if (queue_empty(&upl
->map_object
->memq
))
6545 if (occupied
== 0) {
6547 * If this UPL element belongs to a Vector UPL and is
6548 * empty, then this is the right function to deallocate
6549 * it. So go ahead set the *empty variable. The flag
6550 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
6551 * should be considered relevant for the Vector UPL and
6552 * not the internal UPLs.
6554 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
6557 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
6559 * this is not a paging object
6560 * so we need to drop the paging reference
6561 * that was taken when we created the UPL
6562 * against this object
6564 vm_object_activity_end(shadow_object
);
6565 vm_object_collapse(shadow_object
, 0, TRUE
);
6568 * we dontated the paging reference to
6569 * the map object... vm_pageout_object_terminate
6570 * will drop this reference
6574 vm_object_unlock(shadow_object
);
6575 if (object
!= shadow_object
)
6576 vm_object_unlock(object
);
6582 * If we completed our operations on an UPL that is
6583 * part of a Vectored UPL and if empty is TRUE, then
6584 * we should go ahead and deallocate this UPL element.
6585 * Then we check if this was the last of the UPL elements
6586 * within that Vectored UPL. If so, set empty to TRUE
6587 * so that in ubc_upl_abort_range or ubc_upl_abort, we
6588 * can go ahead and deallocate the Vector UPL too.
6590 if(*empty
== TRUE
) {
6591 *empty
= vector_upl_set_subupl(vector_upl
, upl
,0);
6592 upl_deallocate(upl
);
6594 goto process_upl_to_abort
;
6597 return KERN_SUCCESS
;
6608 return upl_abort_range(upl
, 0, upl
->size
, error
, &empty
);
6612 /* an option on commit should be wire */
6616 upl_page_info_t
*page_list
,
6617 mach_msg_type_number_t count
)
6621 return upl_commit_range(upl
, 0, upl
->size
, 0, page_list
, count
, &empty
);
6625 vm_object_set_pmap_cache_attr(
6627 upl_page_info_array_t user_page_list
,
6628 unsigned int num_pages
,
6629 boolean_t batch_pmap_op
)
6631 unsigned int cache_attr
= 0;
6633 cache_attr
= object
->wimg_bits
& VM_WIMG_MASK
;
6634 assert(user_page_list
);
6635 if (cache_attr
!= VM_WIMG_USE_DEFAULT
) {
6636 PMAP_BATCH_SET_CACHE_ATTR(object
, user_page_list
, cache_attr
, num_pages
, batch_pmap_op
);
6640 unsigned int vm_object_iopl_request_sleep_for_cleaning
= 0;
6643 vm_object_iopl_request(
6645 vm_object_offset_t offset
,
6648 upl_page_info_array_t user_page_list
,
6649 unsigned int *page_list_count
,
6653 vm_object_offset_t dst_offset
;
6654 upl_size_t xfer_size
;
6657 wpl_array_t lite_list
= NULL
;
6658 int no_zero_fill
= FALSE
;
6659 unsigned int size_in_pages
;
6663 struct vm_object_fault_info fault_info
;
6664 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
6665 struct vm_page_delayed_work
*dwp
;
6669 boolean_t caller_lookup
;
6671 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
6673 * For forward compatibility's sake,
6674 * reject any unknown flag.
6676 return KERN_INVALID_VALUE
;
6678 if (vm_lopage_needed
== FALSE
)
6679 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
6681 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
6682 if ( (cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
))
6683 return KERN_INVALID_VALUE
;
6685 if (object
->phys_contiguous
) {
6686 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
)
6687 return KERN_INVALID_ADDRESS
;
6689 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
)
6690 return KERN_INVALID_ADDRESS
;
6694 if (cntrl_flags
& UPL_ENCRYPT
) {
6697 * The paging path doesn't use this interface,
6698 * so we don't support the UPL_ENCRYPT flag
6699 * here. We won't encrypt the pages.
6701 assert(! (cntrl_flags
& UPL_ENCRYPT
));
6703 if (cntrl_flags
& (UPL_NOZEROFILL
| UPL_NOZEROFILLIO
))
6704 no_zero_fill
= TRUE
;
6706 if (cntrl_flags
& UPL_COPYOUT_FROM
)
6707 prot
= VM_PROT_READ
;
6709 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
6711 if (((size
/PAGE_SIZE
) > MAX_UPL_SIZE
) && !object
->phys_contiguous
)
6712 size
= MAX_UPL_SIZE
* PAGE_SIZE
;
6714 if (cntrl_flags
& UPL_SET_INTERNAL
) {
6715 if (page_list_count
!= NULL
)
6716 *page_list_count
= MAX_UPL_SIZE
;
6718 if (((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
6719 ((page_list_count
!= NULL
) && (*page_list_count
!= 0) && *page_list_count
< (size
/page_size
)))
6720 return KERN_INVALID_ARGUMENT
;
6722 if ((!object
->internal
) && (object
->paging_offset
!= 0))
6723 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
6726 if (object
->phys_contiguous
)
6731 if (cntrl_flags
& UPL_SET_INTERNAL
) {
6732 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
, UPL_IO_WIRE
, psize
);
6734 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
6735 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
6736 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
6738 user_page_list
= NULL
;
6742 upl
= upl_create(UPL_CREATE_LITE
, UPL_IO_WIRE
, psize
);
6744 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
6750 user_page_list
[0].device
= FALSE
;
6753 upl
->map_object
= object
;
6756 size_in_pages
= size
/ PAGE_SIZE
;
6758 if (object
== kernel_object
&&
6759 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
6760 upl
->flags
|= UPL_KERNEL_OBJECT
;
6762 vm_object_lock(object
);
6764 vm_object_lock_shared(object
);
6767 vm_object_lock(object
);
6768 vm_object_activity_begin(object
);
6771 * paging in progress also protects the paging_offset
6773 upl
->offset
= offset
+ object
->paging_offset
;
6775 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
6777 * The user requested that access to the pages in this UPL
6778 * be blocked until the UPL is commited or aborted.
6780 upl
->flags
|= UPL_ACCESS_BLOCKED
;
6783 if (object
->phys_contiguous
) {
6785 vm_object_activity_begin(object
);
6786 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
6787 #endif /* UPL_DEBUG */
6789 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
6790 assert(!object
->blocked_access
);
6791 object
->blocked_access
= TRUE
;
6794 vm_object_unlock(object
);
6797 * don't need any shadow mappings for this one
6798 * since it is already I/O memory
6800 upl
->flags
|= UPL_DEVICE_MEMORY
;
6802 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1)>>PAGE_SHIFT
);
6804 if (user_page_list
) {
6805 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
)>>PAGE_SHIFT
);
6806 user_page_list
[0].device
= TRUE
;
6808 if (page_list_count
!= NULL
) {
6809 if (upl
->flags
& UPL_INTERNAL
)
6810 *page_list_count
= 0;
6812 *page_list_count
= 1;
6814 return KERN_SUCCESS
;
6816 if (object
!= kernel_object
&& object
!= compressor_object
) {
6818 * Protect user space from future COW operations
6820 object
->true_share
= TRUE
;
6822 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
6823 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
6827 vm_object_activity_begin(object
);
6828 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
6829 #endif /* UPL_DEBUG */
6831 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
6832 object
->copy
!= VM_OBJECT_NULL
) {
6834 * Honor copy-on-write obligations
6836 * The caller is gathering these pages and
6837 * might modify their contents. We need to
6838 * make sure that the copy object has its own
6839 * private copies of these pages before we let
6840 * the caller modify them.
6842 * NOTE: someone else could map the original object
6843 * after we've done this copy-on-write here, and they
6844 * could then see an inconsistent picture of the memory
6845 * while it's being modified via the UPL. To prevent this,
6846 * we would have to block access to these pages until the
6847 * UPL is released. We could use the UPL_BLOCK_ACCESS
6848 * code path for that...
6850 vm_object_update(object
,
6855 FALSE
, /* should_return */
6856 MEMORY_OBJECT_COPY_SYNC
,
6858 #if DEVELOPMENT || DEBUG
6860 iopl_cow_pages
+= size
>> PAGE_SHIFT
;
6868 dst_offset
= offset
;
6870 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
6871 fault_info
.user_tag
= 0;
6872 fault_info
.lo_offset
= offset
;
6873 fault_info
.hi_offset
= offset
+ xfer_size
;
6874 fault_info
.no_cache
= FALSE
;
6875 fault_info
.stealth
= FALSE
;
6876 fault_info
.io_sync
= FALSE
;
6877 fault_info
.cs_bypass
= FALSE
;
6878 fault_info
.mark_zf_absent
= (0 == (cntrl_flags
& UPL_NOZEROFILLIO
));
6882 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
6885 vm_fault_return_t result
;
6886 unsigned int pg_num
;
6890 dst_page
= vm_page_lookup(object
, dst_offset
);
6894 * If the page is encrypted, we need to decrypt it,
6895 * so force a soft page fault.
6897 if (dst_page
== VM_PAGE_NULL
||
6899 dst_page
->encrypted
||
6901 dst_page
->restart
||
6903 dst_page
->fictitious
) {
6905 if (object
== kernel_object
)
6906 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
6907 if (object
== compressor_object
)
6908 panic("vm_object_iopl_request: missing/bad page in compressor object\n");
6910 if (cntrl_flags
& UPL_REQUEST_NO_FAULT
) {
6911 ret
= KERN_MEMORY_ERROR
;
6916 * We just looked up the page and the result remains valid
6917 * until the object lock is release, so send it to
6918 * vm_fault_page() (as "dst_page"), to avoid having to
6919 * look it up again there.
6921 caller_lookup
= TRUE
;
6925 kern_return_t error_code
;
6928 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
)
6929 interruptible
= THREAD_ABORTSAFE
;
6931 interruptible
= THREAD_UNINT
;
6933 fault_info
.interruptible
= interruptible
;
6934 fault_info
.cluster_size
= xfer_size
;
6935 fault_info
.batch_pmap_op
= TRUE
;
6937 vm_object_paging_begin(object
);
6939 result
= vm_fault_page(object
, dst_offset
,
6940 prot
| VM_PROT_WRITE
, FALSE
,
6942 &prot
, &dst_page
, &top_page
,
6944 &error_code
, no_zero_fill
,
6945 FALSE
, &fault_info
);
6947 /* our lookup is no longer valid at this point */
6948 caller_lookup
= FALSE
;
6952 case VM_FAULT_SUCCESS
:
6954 if ( !dst_page
->absent
) {
6955 PAGE_WAKEUP_DONE(dst_page
);
6958 * we only get back an absent page if we
6959 * requested that it not be zero-filled
6960 * because we are about to fill it via I/O
6962 * absent pages should be left BUSY
6963 * to prevent them from being faulted
6964 * into an address space before we've
6965 * had a chance to complete the I/O on
6966 * them since they may contain info that
6967 * shouldn't be seen by the faulting task
6971 * Release paging references and
6972 * top-level placeholder page, if any.
6974 if (top_page
!= VM_PAGE_NULL
) {
6975 vm_object_t local_object
;
6977 local_object
= top_page
->object
;
6979 if (top_page
->object
!= dst_page
->object
) {
6980 vm_object_lock(local_object
);
6981 VM_PAGE_FREE(top_page
);
6982 vm_object_paging_end(local_object
);
6983 vm_object_unlock(local_object
);
6985 VM_PAGE_FREE(top_page
);
6986 vm_object_paging_end(local_object
);
6989 vm_object_paging_end(object
);
6992 case VM_FAULT_RETRY
:
6993 vm_object_lock(object
);
6996 case VM_FAULT_MEMORY_SHORTAGE
:
6997 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
6999 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
7001 if (vm_page_wait(interruptible
)) {
7002 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7004 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
7005 vm_object_lock(object
);
7009 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
7011 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
7015 case VM_FAULT_INTERRUPTED
:
7016 error_code
= MACH_SEND_INTERRUPTED
;
7017 case VM_FAULT_MEMORY_ERROR
:
7019 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
7021 vm_object_lock(object
);
7024 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
7025 /* success but no page: fail */
7026 vm_object_paging_end(object
);
7027 vm_object_unlock(object
);
7031 panic("vm_object_iopl_request: unexpected error"
7032 " 0x%x from vm_fault_page()\n", result
);
7034 } while (result
!= VM_FAULT_SUCCESS
);
7037 if (upl
->flags
& UPL_KERNEL_OBJECT
)
7038 goto record_phys_addr
;
7040 if (dst_page
->compressor
) {
7041 dst_page
->busy
= TRUE
;
7042 goto record_phys_addr
;
7045 if (dst_page
->cleaning
) {
7047 * Someone else is cleaning this page in place.
7048 * In theory, we should be able to proceed and use this
7049 * page but they'll probably end up clearing the "busy"
7050 * bit on it in upl_commit_range() but they didn't set
7051 * it, so they would clear our "busy" bit and open
7052 * us to race conditions.
7053 * We'd better wait for the cleaning to complete and
7056 vm_object_iopl_request_sleep_for_cleaning
++;
7057 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
7060 if (dst_page
->laundry
) {
7061 dst_page
->pageout
= FALSE
;
7063 vm_pageout_steal_laundry(dst_page
, FALSE
);
7065 if ( (cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
7066 dst_page
->phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
) ) {
7071 * support devices that can't DMA above 32 bits
7072 * by substituting pages from a pool of low address
7073 * memory for any pages we find above the 4G mark
7074 * can't substitute if the page is already wired because
7075 * we don't know whether that physical address has been
7076 * handed out to some other 64 bit capable DMA device to use
7078 if (VM_PAGE_WIRED(dst_page
)) {
7079 ret
= KERN_PROTECTION_FAILURE
;
7082 low_page
= vm_page_grablo();
7084 if (low_page
== VM_PAGE_NULL
) {
7085 ret
= KERN_RESOURCE_SHORTAGE
;
7089 * from here until the vm_page_replace completes
7090 * we musn't drop the object lock... we don't
7091 * want anyone refaulting this page in and using
7092 * it after we disconnect it... we want the fault
7093 * to find the new page being substituted.
7095 if (dst_page
->pmapped
)
7096 refmod
= pmap_disconnect(dst_page
->phys_page
);
7100 if (!dst_page
->absent
)
7101 vm_page_copy(dst_page
, low_page
);
7103 low_page
->reference
= dst_page
->reference
;
7104 low_page
->dirty
= dst_page
->dirty
;
7105 low_page
->absent
= dst_page
->absent
;
7107 if (refmod
& VM_MEM_REFERENCED
)
7108 low_page
->reference
= TRUE
;
7109 if (refmod
& VM_MEM_MODIFIED
) {
7110 SET_PAGE_DIRTY(low_page
, FALSE
);
7113 vm_page_replace(low_page
, object
, dst_offset
);
7115 dst_page
= low_page
;
7117 * vm_page_grablo returned the page marked
7118 * BUSY... we don't need a PAGE_WAKEUP_DONE
7119 * here, because we've never dropped the object lock
7121 if ( !dst_page
->absent
)
7122 dst_page
->busy
= FALSE
;
7124 if ( !dst_page
->busy
)
7125 dwp
->dw_mask
|= DW_vm_page_wire
;
7127 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
7129 * Mark the page "busy" to block any future page fault
7130 * on this page in addition to wiring it.
7131 * We'll also remove the mapping
7132 * of all these pages before leaving this routine.
7134 assert(!dst_page
->fictitious
);
7135 dst_page
->busy
= TRUE
;
7138 * expect the page to be used
7139 * page queues lock must be held to set 'reference'
7141 dwp
->dw_mask
|= DW_set_reference
;
7143 if (!(cntrl_flags
& UPL_COPYOUT_FROM
)) {
7144 SET_PAGE_DIRTY(dst_page
, TRUE
);
7146 if ((cntrl_flags
& UPL_REQUEST_FORCE_COHERENCY
) && dst_page
->written_by_kernel
== TRUE
) {
7147 pmap_sync_page_attributes_phys(dst_page
->phys_page
);
7148 dst_page
->written_by_kernel
= FALSE
;
7153 upl
->flags
|= UPL_HAS_BUSY
;
7155 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
7156 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
7157 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
7159 if (dst_page
->phys_page
> upl
->highest_page
)
7160 upl
->highest_page
= dst_page
->phys_page
;
7162 if (user_page_list
) {
7163 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
7164 user_page_list
[entry
].pageout
= dst_page
->pageout
;
7165 user_page_list
[entry
].absent
= dst_page
->absent
;
7166 user_page_list
[entry
].dirty
= dst_page
->dirty
;
7167 user_page_list
[entry
].precious
= dst_page
->precious
;
7168 user_page_list
[entry
].device
= FALSE
;
7169 user_page_list
[entry
].needed
= FALSE
;
7170 if (dst_page
->clustered
== TRUE
)
7171 user_page_list
[entry
].speculative
= dst_page
->speculative
;
7173 user_page_list
[entry
].speculative
= FALSE
;
7174 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
7175 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
7177 if (object
!= kernel_object
&& object
!= compressor_object
) {
7179 * someone is explicitly grabbing this page...
7180 * update clustered and speculative state
7183 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
7186 dst_offset
+= PAGE_SIZE_64
;
7187 xfer_size
-= PAGE_SIZE
;
7190 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
7192 if (dw_count
>= dw_limit
) {
7193 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
7201 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
7203 vm_object_set_pmap_cache_attr(object
, user_page_list
, entry
, TRUE
);
7205 if (page_list_count
!= NULL
) {
7206 if (upl
->flags
& UPL_INTERNAL
)
7207 *page_list_count
= 0;
7208 else if (*page_list_count
> entry
)
7209 *page_list_count
= entry
;
7211 vm_object_unlock(object
);
7213 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
7215 * We've marked all the pages "busy" so that future
7216 * page faults will block.
7217 * Now remove the mapping for these pages, so that they
7218 * can't be accessed without causing a page fault.
7220 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
7221 PMAP_NULL
, 0, VM_PROT_NONE
);
7222 assert(!object
->blocked_access
);
7223 object
->blocked_access
= TRUE
;
7225 return KERN_SUCCESS
;
7230 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
7231 boolean_t need_unwire
;
7233 dst_page
= vm_page_lookup(object
, offset
);
7235 if (dst_page
== VM_PAGE_NULL
)
7236 panic("vm_object_iopl_request: Wired page missing. \n");
7239 * if we've already processed this page in an earlier
7240 * dw_do_work, we need to undo the wiring... we will
7241 * leave the dirty and reference bits on if they
7242 * were set, since we don't have a good way of knowing
7243 * what the previous state was and we won't get here
7244 * under any normal circumstances... we will always
7245 * clear BUSY and wakeup any waiters via vm_page_free
7246 * or PAGE_WAKEUP_DONE
7251 if (dw_array
[dw_index
].dw_m
== dst_page
) {
7253 * still in the deferred work list
7254 * which means we haven't yet called
7255 * vm_page_wire on this page
7257 need_unwire
= FALSE
;
7263 vm_page_lock_queues();
7265 if (dst_page
->absent
) {
7266 vm_page_free(dst_page
);
7268 need_unwire
= FALSE
;
7270 if (need_unwire
== TRUE
)
7271 vm_page_unwire(dst_page
, TRUE
);
7273 PAGE_WAKEUP_DONE(dst_page
);
7275 vm_page_unlock_queues();
7277 if (need_unwire
== TRUE
)
7278 VM_STAT_INCR(reactivations
);
7283 if (! (upl
->flags
& UPL_KERNEL_OBJECT
)) {
7284 vm_object_activity_end(object
);
7285 vm_object_collapse(object
, 0, TRUE
);
7287 vm_object_unlock(object
);
7298 kern_return_t retval
;
7299 boolean_t upls_locked
;
7300 vm_object_t object1
, object2
;
7302 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
)==UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
7303 return KERN_INVALID_ARGUMENT
;
7306 upls_locked
= FALSE
;
7309 * Since we need to lock both UPLs at the same time,
7310 * avoid deadlocks by always taking locks in the same order.
7319 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
7321 object1
= upl1
->map_object
;
7322 object2
= upl2
->map_object
;
7324 if (upl1
->offset
!= 0 || upl2
->offset
!= 0 ||
7325 upl1
->size
!= upl2
->size
) {
7327 * We deal only with full objects, not subsets.
7328 * That's because we exchange the entire backing store info
7329 * for the objects: pager, resident pages, etc... We can't do
7332 retval
= KERN_INVALID_VALUE
;
7337 * Tranpose the VM objects' backing store.
7339 retval
= vm_object_transpose(object1
, object2
,
7340 (vm_object_size_t
) upl1
->size
);
7342 if (retval
== KERN_SUCCESS
) {
7344 * Make each UPL point to the correct VM object, i.e. the
7345 * object holding the pages that the UPL refers to...
7348 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
7349 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
7351 upl1
->map_object
= object2
;
7352 upl2
->map_object
= object1
;
7354 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
7355 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
7366 upls_locked
= FALSE
;
7378 upl_page_info_t
*user_page_list
;
7381 if ( !(upl
->flags
& UPL_INTERNAL
) || count
<= 0)
7384 size_in_pages
= upl
->size
/ PAGE_SIZE
;
7386 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
7388 while (count
-- && index
< size_in_pages
)
7389 user_page_list
[index
++].needed
= TRUE
;
7396 * Rationale: the user might have some encrypted data on disk (via
7397 * FileVault or any other mechanism). That data is then decrypted in
7398 * memory, which is safe as long as the machine is secure. But that
7399 * decrypted data in memory could be paged out to disk by the default
7400 * pager. The data would then be stored on disk in clear (not encrypted)
7401 * and it could be accessed by anyone who gets physical access to the
7402 * disk (if the laptop or the disk gets stolen for example). This weakens
7403 * the security offered by FileVault.
7405 * Solution: the default pager will optionally request that all the
7406 * pages it gathers for pageout be encrypted, via the UPL interfaces,
7407 * before it sends this UPL to disk via the vnode_pageout() path.
7411 * To avoid disrupting the VM LRU algorithms, we want to keep the
7412 * clean-in-place mechanisms, which allow us to send some extra pages to
7413 * swap (clustering) without actually removing them from the user's
7414 * address space. We don't want the user to unknowingly access encrypted
7415 * data, so we have to actually remove the encrypted pages from the page
7416 * table. When the user accesses the data, the hardware will fail to
7417 * locate the virtual page in its page table and will trigger a page
7418 * fault. We can then decrypt the page and enter it in the page table
7419 * again. Whenever we allow the user to access the contents of a page,
7420 * we have to make sure it's not encrypted.
7426 * Reserve of virtual addresses in the kernel address space.
7427 * We need to map the physical pages in the kernel, so that we
7428 * can call the encryption/decryption routines with a kernel
7429 * virtual address. We keep this pool of pre-allocated kernel
7430 * virtual addresses so that we don't have to scan the kernel's
7431 * virtaul address space each time we need to encrypt or decrypt
7433 * It would be nice to be able to encrypt and decrypt in physical
7434 * mode but that might not always be more efficient...
7436 decl_simple_lock_data(,vm_paging_lock
)
7437 #define VM_PAGING_NUM_PAGES 64
7438 vm_map_offset_t vm_paging_base_address
= 0;
7439 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
7440 int vm_paging_max_index
= 0;
7441 int vm_paging_page_waiter
= 0;
7442 int vm_paging_page_waiter_total
= 0;
7443 unsigned long vm_paging_no_kernel_page
= 0;
7444 unsigned long vm_paging_objects_mapped
= 0;
7445 unsigned long vm_paging_pages_mapped
= 0;
7446 unsigned long vm_paging_objects_mapped_slow
= 0;
7447 unsigned long vm_paging_pages_mapped_slow
= 0;
7450 vm_paging_map_init(void)
7453 vm_map_offset_t page_map_offset
;
7454 vm_map_entry_t map_entry
;
7456 assert(vm_paging_base_address
== 0);
7459 * Initialize our pool of pre-allocated kernel
7460 * virtual addresses.
7462 page_map_offset
= 0;
7463 kr
= vm_map_find_space(kernel_map
,
7465 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
7469 if (kr
!= KERN_SUCCESS
) {
7470 panic("vm_paging_map_init: kernel_map full\n");
7472 map_entry
->object
.vm_object
= kernel_object
;
7473 map_entry
->offset
= page_map_offset
;
7474 map_entry
->protection
= VM_PROT_NONE
;
7475 map_entry
->max_protection
= VM_PROT_NONE
;
7476 map_entry
->permanent
= TRUE
;
7477 vm_object_reference(kernel_object
);
7478 vm_map_unlock(kernel_map
);
7480 assert(vm_paging_base_address
== 0);
7481 vm_paging_base_address
= page_map_offset
;
7486 * vm_paging_map_object:
7487 * Maps part of a VM object's pages in the kernel
7488 * virtual address space, using the pre-allocated
7489 * kernel virtual addresses, if possible.
7491 * The VM object is locked. This lock will get
7492 * dropped and re-acquired though, so the caller
7493 * must make sure the VM object is kept alive
7494 * (by holding a VM map that has a reference
7495 * on it, for example, or taking an extra reference).
7496 * The page should also be kept busy to prevent
7497 * it from being reclaimed.
7500 vm_paging_map_object(
7503 vm_object_offset_t offset
,
7504 vm_prot_t protection
,
7505 boolean_t can_unlock_object
,
7506 vm_map_size_t
*size
, /* IN/OUT */
7507 vm_map_offset_t
*address
, /* OUT */
7508 boolean_t
*need_unmap
) /* OUT */
7511 vm_map_offset_t page_map_offset
;
7512 vm_map_size_t map_size
;
7513 vm_object_offset_t object_offset
;
7516 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
7517 /* use permanent 1-to-1 kernel mapping of physical memory ? */
7519 *address
= (vm_map_offset_t
)
7520 PHYSMAP_PTOV((pmap_paddr_t
)page
->phys_page
<<
7522 *need_unmap
= FALSE
;
7523 return KERN_SUCCESS
;
7525 #warn "vm_paging_map_object: no 1-to-1 kernel mapping of physical memory..."
7530 * Use one of the pre-allocated kernel virtual addresses
7531 * and just enter the VM page in the kernel address space
7532 * at that virtual address.
7534 simple_lock(&vm_paging_lock
);
7537 * Try and find an available kernel virtual address
7538 * from our pre-allocated pool.
7540 page_map_offset
= 0;
7542 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
7543 if (vm_paging_page_inuse
[i
] == FALSE
) {
7545 vm_paging_base_address
+
7550 if (page_map_offset
!= 0) {
7551 /* found a space to map our page ! */
7555 if (can_unlock_object
) {
7557 * If we can afford to unlock the VM object,
7558 * let's take the slow path now...
7563 * We can't afford to unlock the VM object, so
7564 * let's wait for a space to become available...
7566 vm_paging_page_waiter_total
++;
7567 vm_paging_page_waiter
++;
7568 thread_sleep_fast_usimple_lock(&vm_paging_page_waiter
,
7571 vm_paging_page_waiter
--;
7572 /* ... and try again */
7575 if (page_map_offset
!= 0) {
7577 * We found a kernel virtual address;
7578 * map the physical page to that virtual address.
7580 if (i
> vm_paging_max_index
) {
7581 vm_paging_max_index
= i
;
7583 vm_paging_page_inuse
[i
] = TRUE
;
7584 simple_unlock(&vm_paging_lock
);
7586 page
->pmapped
= TRUE
;
7589 * Keep the VM object locked over the PMAP_ENTER
7590 * and the actual use of the page by the kernel,
7591 * or this pmap mapping might get undone by a
7592 * vm_object_pmap_protect() call...
7594 PMAP_ENTER(kernel_pmap
,
7601 vm_paging_objects_mapped
++;
7602 vm_paging_pages_mapped
++;
7603 *address
= page_map_offset
;
7606 /* all done and mapped, ready to use ! */
7607 return KERN_SUCCESS
;
7611 * We ran out of pre-allocated kernel virtual
7612 * addresses. Just map the page in the kernel
7613 * the slow and regular way.
7615 vm_paging_no_kernel_page
++;
7616 simple_unlock(&vm_paging_lock
);
7619 if (! can_unlock_object
) {
7622 *need_unmap
= FALSE
;
7623 return KERN_NOT_SUPPORTED
;
7626 object_offset
= vm_object_trunc_page(offset
);
7627 map_size
= vm_map_round_page(*size
,
7628 VM_MAP_PAGE_MASK(kernel_map
));
7631 * Try and map the required range of the object
7635 vm_object_reference_locked(object
); /* for the map entry */
7636 vm_object_unlock(object
);
7638 kr
= vm_map_enter(kernel_map
,
7649 if (kr
!= KERN_SUCCESS
) {
7652 *need_unmap
= FALSE
;
7653 vm_object_deallocate(object
); /* for the map entry */
7654 vm_object_lock(object
);
7661 * Enter the mapped pages in the page table now.
7663 vm_object_lock(object
);
7665 * VM object must be kept locked from before PMAP_ENTER()
7666 * until after the kernel is done accessing the page(s).
7667 * Otherwise, the pmap mappings in the kernel could be
7668 * undone by a call to vm_object_pmap_protect().
7671 for (page_map_offset
= 0;
7673 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
7675 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
7676 if (page
== VM_PAGE_NULL
) {
7677 printf("vm_paging_map_object: no page !?");
7678 vm_object_unlock(object
);
7679 kr
= vm_map_remove(kernel_map
, *address
, *size
,
7681 assert(kr
== KERN_SUCCESS
);
7684 *need_unmap
= FALSE
;
7685 vm_object_lock(object
);
7686 return KERN_MEMORY_ERROR
;
7688 page
->pmapped
= TRUE
;
7690 //assert(pmap_verify_free(page->phys_page));
7691 PMAP_ENTER(kernel_pmap
,
7692 *address
+ page_map_offset
,
7700 vm_paging_objects_mapped_slow
++;
7701 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
7705 return KERN_SUCCESS
;
7710 * vm_paging_unmap_object:
7711 * Unmaps part of a VM object's pages from the kernel
7712 * virtual address space.
7714 * The VM object is locked. This lock will get
7715 * dropped and re-acquired though.
7718 vm_paging_unmap_object(
7720 vm_map_offset_t start
,
7721 vm_map_offset_t end
)
7726 if ((vm_paging_base_address
== 0) ||
7727 (start
< vm_paging_base_address
) ||
7728 (end
> (vm_paging_base_address
7729 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
7731 * We didn't use our pre-allocated pool of
7732 * kernel virtual address. Deallocate the
7735 if (object
!= VM_OBJECT_NULL
) {
7736 vm_object_unlock(object
);
7738 kr
= vm_map_remove(kernel_map
, start
, end
, VM_MAP_NO_FLAGS
);
7739 if (object
!= VM_OBJECT_NULL
) {
7740 vm_object_lock(object
);
7742 assert(kr
== KERN_SUCCESS
);
7745 * We used a kernel virtual address from our
7746 * pre-allocated pool. Put it back in the pool
7749 assert(end
- start
== PAGE_SIZE
);
7750 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
7751 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
7753 /* undo the pmap mapping */
7754 pmap_remove(kernel_pmap
, start
, end
);
7756 simple_lock(&vm_paging_lock
);
7757 vm_paging_page_inuse
[i
] = FALSE
;
7758 if (vm_paging_page_waiter
) {
7759 thread_wakeup(&vm_paging_page_waiter
);
7761 simple_unlock(&vm_paging_lock
);
7768 * "iv" is the "initial vector". Ideally, we want to
7769 * have a different one for each page we encrypt, so that
7770 * crackers can't find encryption patterns too easily.
7772 #define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
7773 boolean_t swap_crypt_ctx_initialized
= FALSE
;
7774 uint32_t swap_crypt_key
[8]; /* big enough for a 256 key */
7775 aes_ctx swap_crypt_ctx
;
7776 const unsigned char swap_crypt_null_iv
[AES_BLOCK_SIZE
] = {0xa, };
7779 boolean_t swap_crypt_ctx_tested
= FALSE
;
7780 unsigned char swap_crypt_test_page_ref
[4096] __attribute__((aligned(4096)));
7781 unsigned char swap_crypt_test_page_encrypt
[4096] __attribute__((aligned(4096)));
7782 unsigned char swap_crypt_test_page_decrypt
[4096] __attribute__((aligned(4096)));
7786 * Initialize the encryption context: key and key size.
7788 void swap_crypt_ctx_initialize(void); /* forward */
7790 swap_crypt_ctx_initialize(void)
7795 * No need for locking to protect swap_crypt_ctx_initialized
7796 * because the first use of encryption will come from the
7797 * pageout thread (we won't pagein before there's been a pageout)
7798 * and there's only one pageout thread.
7800 if (swap_crypt_ctx_initialized
== FALSE
) {
7802 i
< (sizeof (swap_crypt_key
) /
7803 sizeof (swap_crypt_key
[0]));
7805 swap_crypt_key
[i
] = random();
7807 aes_encrypt_key((const unsigned char *) swap_crypt_key
,
7808 SWAP_CRYPT_AES_KEY_SIZE
,
7809 &swap_crypt_ctx
.encrypt
);
7810 aes_decrypt_key((const unsigned char *) swap_crypt_key
,
7811 SWAP_CRYPT_AES_KEY_SIZE
,
7812 &swap_crypt_ctx
.decrypt
);
7813 swap_crypt_ctx_initialized
= TRUE
;
7818 * Validate the encryption algorithms.
7820 if (swap_crypt_ctx_tested
== FALSE
) {
7822 for (i
= 0; i
< 4096; i
++) {
7823 swap_crypt_test_page_ref
[i
] = (char) i
;
7826 aes_encrypt_cbc(swap_crypt_test_page_ref
,
7828 PAGE_SIZE
/ AES_BLOCK_SIZE
,
7829 swap_crypt_test_page_encrypt
,
7830 &swap_crypt_ctx
.encrypt
);
7832 aes_decrypt_cbc(swap_crypt_test_page_encrypt
,
7834 PAGE_SIZE
/ AES_BLOCK_SIZE
,
7835 swap_crypt_test_page_decrypt
,
7836 &swap_crypt_ctx
.decrypt
);
7837 /* compare result with original */
7838 for (i
= 0; i
< 4096; i
++) {
7839 if (swap_crypt_test_page_decrypt
[i
] !=
7840 swap_crypt_test_page_ref
[i
]) {
7841 panic("encryption test failed");
7846 aes_encrypt_cbc(swap_crypt_test_page_decrypt
,
7848 PAGE_SIZE
/ AES_BLOCK_SIZE
,
7849 swap_crypt_test_page_decrypt
,
7850 &swap_crypt_ctx
.encrypt
);
7851 /* decrypt in place */
7852 aes_decrypt_cbc(swap_crypt_test_page_decrypt
,
7854 PAGE_SIZE
/ AES_BLOCK_SIZE
,
7855 swap_crypt_test_page_decrypt
,
7856 &swap_crypt_ctx
.decrypt
);
7857 for (i
= 0; i
< 4096; i
++) {
7858 if (swap_crypt_test_page_decrypt
[i
] !=
7859 swap_crypt_test_page_ref
[i
]) {
7860 panic("in place encryption test failed");
7864 swap_crypt_ctx_tested
= TRUE
;
7872 * Encrypt the given page, for secure paging.
7873 * The page might already be mapped at kernel virtual
7874 * address "kernel_mapping_offset". Otherwise, we need
7878 * The page's object is locked, but this lock will be released
7880 * The page is busy and not accessible by users (not entered in any pmap).
7885 vm_map_offset_t kernel_mapping_offset
)
7888 vm_map_size_t kernel_mapping_size
;
7889 boolean_t kernel_mapping_needs_unmap
;
7890 vm_offset_t kernel_vaddr
;
7892 unsigned char aes_iv
[AES_BLOCK_SIZE
];
7894 memory_object_t pager_object
;
7895 vm_object_offset_t paging_offset
;
7899 if (! vm_pages_encrypted
) {
7900 vm_pages_encrypted
= TRUE
;
7905 if (page
->encrypted
) {
7907 * Already encrypted: no need to do it again.
7909 vm_page_encrypt_already_encrypted_counter
++;
7912 assert(page
->dirty
|| page
->precious
);
7914 ASSERT_PAGE_DECRYPTED(page
);
7917 * Take a paging-in-progress reference to keep the object
7918 * alive even if we have to unlock it (in vm_paging_map_object()
7921 vm_object_paging_begin(page
->object
);
7923 if (kernel_mapping_offset
== 0) {
7925 * The page hasn't already been mapped in kernel space
7926 * by the caller. Map it now, so that we can access
7927 * its contents and encrypt them.
7929 kernel_mapping_size
= PAGE_SIZE
;
7930 kernel_mapping_needs_unmap
= FALSE
;
7931 kr
= vm_paging_map_object(page
,
7934 VM_PROT_READ
| VM_PROT_WRITE
,
7936 &kernel_mapping_size
,
7937 &kernel_mapping_offset
,
7938 &kernel_mapping_needs_unmap
);
7939 if (kr
!= KERN_SUCCESS
) {
7940 panic("vm_page_encrypt: "
7941 "could not map page in kernel: 0x%x\n",
7945 kernel_mapping_size
= 0;
7946 kernel_mapping_needs_unmap
= FALSE
;
7948 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
7950 if (swap_crypt_ctx_initialized
== FALSE
) {
7951 swap_crypt_ctx_initialize();
7953 assert(swap_crypt_ctx_initialized
);
7956 * Prepare an "initial vector" for the encryption.
7957 * We use the "pager" and the "paging_offset" for that
7958 * page to obfuscate the encrypted data a bit more and
7959 * prevent crackers from finding patterns that they could
7960 * use to break the key.
7962 bzero(&encrypt_iv
.aes_iv
[0], sizeof (encrypt_iv
.aes_iv
));
7963 encrypt_iv
.vm
.pager_object
= page
->object
->pager
;
7964 encrypt_iv
.vm
.paging_offset
=
7965 page
->object
->paging_offset
+ page
->offset
;
7967 /* encrypt the "initial vector" */
7968 aes_encrypt_cbc((const unsigned char *) &encrypt_iv
.aes_iv
[0],
7971 &encrypt_iv
.aes_iv
[0],
7972 &swap_crypt_ctx
.encrypt
);
7977 aes_encrypt_cbc((const unsigned char *) kernel_vaddr
,
7978 &encrypt_iv
.aes_iv
[0],
7979 PAGE_SIZE
/ AES_BLOCK_SIZE
,
7980 (unsigned char *) kernel_vaddr
,
7981 &swap_crypt_ctx
.encrypt
);
7983 vm_page_encrypt_counter
++;
7986 * Unmap the page from the kernel's address space,
7987 * if we had to map it ourselves. Otherwise, let
7988 * the caller undo the mapping if needed.
7990 if (kernel_mapping_needs_unmap
) {
7991 vm_paging_unmap_object(page
->object
,
7992 kernel_mapping_offset
,
7993 kernel_mapping_offset
+ kernel_mapping_size
);
7997 * Clear the "reference" and "modified" bits.
7998 * This should clean up any impact the encryption had
8000 * The page was kept busy and disconnected from all pmaps,
8001 * so it can't have been referenced or modified from user
8003 * The software bits will be reset later after the I/O
8004 * has completed (in upl_commit_range()).
8006 pmap_clear_refmod(page
->phys_page
, VM_MEM_REFERENCED
| VM_MEM_MODIFIED
);
8008 page
->encrypted
= TRUE
;
8010 vm_object_paging_end(page
->object
);
8016 * Decrypt the given page.
8017 * The page might already be mapped at kernel virtual
8018 * address "kernel_mapping_offset". Otherwise, we need
8022 * The page's VM object is locked but will be unlocked and relocked.
8023 * The page is busy and not accessible by users (not entered in any pmap).
8028 vm_map_offset_t kernel_mapping_offset
)
8031 vm_map_size_t kernel_mapping_size
;
8032 vm_offset_t kernel_vaddr
;
8033 boolean_t kernel_mapping_needs_unmap
;
8035 unsigned char aes_iv
[AES_BLOCK_SIZE
];
8037 memory_object_t pager_object
;
8038 vm_object_offset_t paging_offset
;
8041 boolean_t was_dirty
;
8044 assert(page
->encrypted
);
8046 was_dirty
= page
->dirty
;
8049 * Take a paging-in-progress reference to keep the object
8050 * alive even if we have to unlock it (in vm_paging_map_object()
8053 vm_object_paging_begin(page
->object
);
8055 if (kernel_mapping_offset
== 0) {
8057 * The page hasn't already been mapped in kernel space
8058 * by the caller. Map it now, so that we can access
8059 * its contents and decrypt them.
8061 kernel_mapping_size
= PAGE_SIZE
;
8062 kernel_mapping_needs_unmap
= FALSE
;
8063 kr
= vm_paging_map_object(page
,
8066 VM_PROT_READ
| VM_PROT_WRITE
,
8068 &kernel_mapping_size
,
8069 &kernel_mapping_offset
,
8070 &kernel_mapping_needs_unmap
);
8071 if (kr
!= KERN_SUCCESS
) {
8072 panic("vm_page_decrypt: "
8073 "could not map page in kernel: 0x%x\n",
8077 kernel_mapping_size
= 0;
8078 kernel_mapping_needs_unmap
= FALSE
;
8080 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
8082 assert(swap_crypt_ctx_initialized
);
8085 * Prepare an "initial vector" for the decryption.
8086 * It has to be the same as the "initial vector" we
8087 * used to encrypt that page.
8089 bzero(&decrypt_iv
.aes_iv
[0], sizeof (decrypt_iv
.aes_iv
));
8090 decrypt_iv
.vm
.pager_object
= page
->object
->pager
;
8091 decrypt_iv
.vm
.paging_offset
=
8092 page
->object
->paging_offset
+ page
->offset
;
8094 /* encrypt the "initial vector" */
8095 aes_encrypt_cbc((const unsigned char *) &decrypt_iv
.aes_iv
[0],
8098 &decrypt_iv
.aes_iv
[0],
8099 &swap_crypt_ctx
.encrypt
);
8104 aes_decrypt_cbc((const unsigned char *) kernel_vaddr
,
8105 &decrypt_iv
.aes_iv
[0],
8106 PAGE_SIZE
/ AES_BLOCK_SIZE
,
8107 (unsigned char *) kernel_vaddr
,
8108 &swap_crypt_ctx
.decrypt
);
8109 vm_page_decrypt_counter
++;
8112 * Unmap the page from the kernel's address space,
8113 * if we had to map it ourselves. Otherwise, let
8114 * the caller undo the mapping if needed.
8116 if (kernel_mapping_needs_unmap
) {
8117 vm_paging_unmap_object(page
->object
,
8119 kernel_vaddr
+ PAGE_SIZE
);
8124 * The pager did not specify that the page would be
8125 * clean when it got paged in, so let's not clean it here
8130 * After decryption, the page is actually still clean.
8131 * It was encrypted as part of paging, which "cleans"
8132 * the "dirty" pages.
8133 * Noone could access it after it was encrypted
8134 * and the decryption doesn't count.
8136 page
->dirty
= FALSE
;
8137 assert (page
->cs_validated
== FALSE
);
8138 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
8140 page
->encrypted
= FALSE
;
8143 * We've just modified the page's contents via the data cache and part
8144 * of the new contents might still be in the cache and not yet in RAM.
8145 * Since the page is now available and might get gathered in a UPL to
8146 * be part of a DMA transfer from a driver that expects the memory to
8147 * be coherent at this point, we have to flush the data cache.
8149 pmap_sync_page_attributes_phys(page
->phys_page
);
8151 * Since the page is not mapped yet, some code might assume that it
8152 * doesn't need to invalidate the instruction cache when writing to
8153 * that page. That code relies on "pmapped" being FALSE, so that the
8154 * caches get synchronized when the page is first mapped.
8156 assert(pmap_verify_free(page
->phys_page
));
8157 page
->pmapped
= FALSE
;
8158 page
->wpmapped
= FALSE
;
8160 vm_object_paging_end(page
->object
);
8163 #if DEVELOPMENT || DEBUG
8164 unsigned long upl_encrypt_upls
= 0;
8165 unsigned long upl_encrypt_pages
= 0;
8172 * Encrypts all the pages in the UPL, within the specified range.
8178 upl_offset_t crypt_offset
,
8179 upl_size_t crypt_size
)
8181 upl_size_t upl_size
, subupl_size
=crypt_size
;
8182 upl_offset_t offset_in_upl
, subupl_offset
=crypt_offset
;
8183 vm_object_t upl_object
;
8184 vm_object_offset_t upl_offset
;
8186 vm_object_t shadow_object
;
8187 vm_object_offset_t shadow_offset
;
8188 vm_object_offset_t paging_offset
;
8189 vm_object_offset_t base_offset
;
8190 int isVectorUPL
= 0;
8191 upl_t vector_upl
= NULL
;
8193 if((isVectorUPL
= vector_upl_is_valid(upl
)))
8196 process_upl_to_encrypt
:
8198 crypt_size
= subupl_size
;
8199 crypt_offset
= subupl_offset
;
8200 upl
= vector_upl_subupl_byoffset(vector_upl
, &crypt_offset
, &crypt_size
);
8202 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
8203 subupl_size
-= crypt_size
;
8204 subupl_offset
+= crypt_size
;
8207 #if DEVELOPMENT || DEBUG
8209 upl_encrypt_pages
+= crypt_size
/ PAGE_SIZE
;
8211 upl_object
= upl
->map_object
;
8212 upl_offset
= upl
->offset
;
8213 upl_size
= upl
->size
;
8215 vm_object_lock(upl_object
);
8218 * Find the VM object that contains the actual pages.
8220 if (upl_object
->pageout
) {
8221 shadow_object
= upl_object
->shadow
;
8223 * The offset in the shadow object is actually also
8224 * accounted for in upl->offset. It possibly shouldn't be
8225 * this way, but for now don't account for it twice.
8228 assert(upl_object
->paging_offset
== 0); /* XXX ? */
8229 vm_object_lock(shadow_object
);
8231 shadow_object
= upl_object
;
8235 paging_offset
= shadow_object
->paging_offset
;
8236 vm_object_paging_begin(shadow_object
);
8238 if (shadow_object
!= upl_object
)
8239 vm_object_unlock(upl_object
);
8242 base_offset
= shadow_offset
;
8243 base_offset
+= upl_offset
;
8244 base_offset
+= crypt_offset
;
8245 base_offset
-= paging_offset
;
8247 assert(crypt_offset
+ crypt_size
<= upl_size
);
8249 for (offset_in_upl
= 0;
8250 offset_in_upl
< crypt_size
;
8251 offset_in_upl
+= PAGE_SIZE
) {
8252 page
= vm_page_lookup(shadow_object
,
8253 base_offset
+ offset_in_upl
);
8254 if (page
== VM_PAGE_NULL
) {
8255 panic("upl_encrypt: "
8256 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
8262 * Disconnect the page from all pmaps, so that nobody can
8263 * access it while it's encrypted. After that point, all
8264 * accesses to this page will cause a page fault and block
8265 * while the page is busy being encrypted. After the
8266 * encryption completes, any access will cause a
8267 * page fault and the page gets decrypted at that time.
8269 pmap_disconnect(page
->phys_page
);
8270 vm_page_encrypt(page
, 0);
8272 if (vm_object_lock_avoid(shadow_object
)) {
8274 * Give vm_pageout_scan() a chance to convert more
8275 * pages from "clean-in-place" to "clean-and-free",
8276 * if it's interested in the same pages we selected
8279 vm_object_unlock(shadow_object
);
8281 vm_object_lock(shadow_object
);
8285 vm_object_paging_end(shadow_object
);
8286 vm_object_unlock(shadow_object
);
8288 if(isVectorUPL
&& subupl_size
)
8289 goto process_upl_to_encrypt
;
8296 __unused upl_offset_t crypt_offset
,
8297 __unused upl_size_t crypt_size
)
8303 __unused vm_page_t page
,
8304 __unused vm_map_offset_t kernel_mapping_offset
)
8310 __unused vm_page_t page
,
8311 __unused vm_map_offset_t kernel_mapping_offset
)
8318 * page->object must be locked
8321 vm_pageout_steal_laundry(vm_page_t page
, boolean_t queues_locked
)
8323 if (!queues_locked
) {
8324 vm_page_lockspin_queues();
8328 * need to drop the laundry count...
8329 * we may also need to remove it
8330 * from the I/O paging queue...
8331 * vm_pageout_throttle_up handles both cases
8333 * the laundry and pageout_queue flags are cleared...
8335 vm_pageout_throttle_up(page
);
8337 vm_page_steal_pageout_page
++;
8339 if (!queues_locked
) {
8340 vm_page_unlock_queues();
8345 vector_upl_create(vm_offset_t upl_offset
)
8347 int vector_upl_size
= sizeof(struct _vector_upl
);
8350 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
8352 upl
= upl_create(0,UPL_VECTOR
,0);
8353 upl
->vector_upl
= vector_upl
;
8354 upl
->offset
= upl_offset
;
8355 vector_upl
->size
= 0;
8356 vector_upl
->offset
= upl_offset
;
8357 vector_upl
->invalid_upls
=0;
8358 vector_upl
->num_upls
=0;
8359 vector_upl
->pagelist
= NULL
;
8361 for(i
=0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
8362 vector_upl
->upl_iostates
[i
].size
= 0;
8363 vector_upl
->upl_iostates
[i
].offset
= 0;
8370 vector_upl_deallocate(upl_t upl
)
8373 vector_upl_t vector_upl
= upl
->vector_upl
;
8375 if(vector_upl
->invalid_upls
!= vector_upl
->num_upls
)
8376 panic("Deallocating non-empty Vectored UPL\n");
8377 kfree(vector_upl
->pagelist
,(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)));
8378 vector_upl
->invalid_upls
=0;
8379 vector_upl
->num_upls
= 0;
8380 vector_upl
->pagelist
= NULL
;
8381 vector_upl
->size
= 0;
8382 vector_upl
->offset
= 0;
8383 kfree(vector_upl
, sizeof(struct _vector_upl
));
8384 vector_upl
= (vector_upl_t
)0xfeedfeed;
8387 panic("vector_upl_deallocate was passed a non-vectored upl\n");
8390 panic("vector_upl_deallocate was passed a NULL upl\n");
8394 vector_upl_is_valid(upl_t upl
)
8396 if(upl
&& ((upl
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
8397 vector_upl_t vector_upl
= upl
->vector_upl
;
8398 if(vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xfeedfeed || vector_upl
== (vector_upl_t
)0xfeedbeef)
8407 vector_upl_set_subupl(upl_t upl
,upl_t subupl
, uint32_t io_size
)
8409 if(vector_upl_is_valid(upl
)) {
8410 vector_upl_t vector_upl
= upl
->vector_upl
;
8415 if(io_size
< PAGE_SIZE
)
8416 io_size
= PAGE_SIZE
;
8417 subupl
->vector_upl
= (void*)vector_upl
;
8418 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
8419 vector_upl
->size
+= io_size
;
8420 upl
->size
+= io_size
;
8423 uint32_t i
=0,invalid_upls
=0;
8424 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
8425 if(vector_upl
->upl_elems
[i
] == subupl
)
8428 if(i
== vector_upl
->num_upls
)
8429 panic("Trying to remove sub-upl when none exists");
8431 vector_upl
->upl_elems
[i
] = NULL
;
8432 invalid_upls
= hw_atomic_add(&(vector_upl
)->invalid_upls
, 1);
8433 if(invalid_upls
== vector_upl
->num_upls
)
8440 panic("vector_upl_set_subupl was passed a NULL upl element\n");
8443 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
8446 panic("vector_upl_set_subupl was passed a NULL upl\n");
8452 vector_upl_set_pagelist(upl_t upl
)
8454 if(vector_upl_is_valid(upl
)) {
8456 vector_upl_t vector_upl
= upl
->vector_upl
;
8459 vm_offset_t pagelist_size
=0, cur_upl_pagelist_size
=0;
8461 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
));
8463 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
8464 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * vector_upl
->upl_elems
[i
]->size
/PAGE_SIZE
;
8465 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
8466 pagelist_size
+= cur_upl_pagelist_size
;
8467 if(vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
)
8468 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
8470 assert( pagelist_size
== (sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)) );
8473 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
8476 panic("vector_upl_set_pagelist was passed a NULL upl\n");
8481 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
8483 if(vector_upl_is_valid(upl
)) {
8484 vector_upl_t vector_upl
= upl
->vector_upl
;
8486 if(index
< vector_upl
->num_upls
)
8487 return vector_upl
->upl_elems
[index
];
8490 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
8496 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
8498 if(vector_upl_is_valid(upl
)) {
8500 vector_upl_t vector_upl
= upl
->vector_upl
;
8503 upl_t subupl
= NULL
;
8504 vector_upl_iostates_t subupl_state
;
8506 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
8507 subupl
= vector_upl
->upl_elems
[i
];
8508 subupl_state
= vector_upl
->upl_iostates
[i
];
8509 if( *upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
8510 /* We could have been passed an offset/size pair that belongs
8511 * to an UPL element that has already been committed/aborted.
8512 * If so, return NULL.
8516 if((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
8517 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
8518 if(*upl_size
> subupl_state
.size
)
8519 *upl_size
= subupl_state
.size
;
8521 if(*upl_offset
>= subupl_state
.offset
)
8522 *upl_offset
-= subupl_state
.offset
;
8524 panic("Vector UPL offset miscalculation\n");
8530 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
8536 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
8538 *v_upl_submap
= NULL
;
8540 if(vector_upl_is_valid(upl
)) {
8541 vector_upl_t vector_upl
= upl
->vector_upl
;
8543 *v_upl_submap
= vector_upl
->submap
;
8544 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
8547 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
8550 panic("vector_upl_get_submap was passed a null UPL\n");
8554 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
8556 if(vector_upl_is_valid(upl
)) {
8557 vector_upl_t vector_upl
= upl
->vector_upl
;
8559 vector_upl
->submap
= submap
;
8560 vector_upl
->submap_dst_addr
= submap_dst_addr
;
8563 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
8566 panic("vector_upl_get_submap was passed a NULL UPL\n");
8570 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
8572 if(vector_upl_is_valid(upl
)) {
8574 vector_upl_t vector_upl
= upl
->vector_upl
;
8577 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
8578 if(vector_upl
->upl_elems
[i
] == subupl
)
8582 if(i
== vector_upl
->num_upls
)
8583 panic("setting sub-upl iostate when none exists");
8585 vector_upl
->upl_iostates
[i
].offset
= offset
;
8586 if(size
< PAGE_SIZE
)
8588 vector_upl
->upl_iostates
[i
].size
= size
;
8591 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
8594 panic("vector_upl_set_iostate was passed a NULL UPL\n");
8598 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
8600 if(vector_upl_is_valid(upl
)) {
8602 vector_upl_t vector_upl
= upl
->vector_upl
;
8605 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
8606 if(vector_upl
->upl_elems
[i
] == subupl
)
8610 if(i
== vector_upl
->num_upls
)
8611 panic("getting sub-upl iostate when none exists");
8613 *offset
= vector_upl
->upl_iostates
[i
].offset
;
8614 *size
= vector_upl
->upl_iostates
[i
].size
;
8617 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
8620 panic("vector_upl_get_iostate was passed a NULL UPL\n");
8624 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
8626 if(vector_upl_is_valid(upl
)) {
8627 vector_upl_t vector_upl
= upl
->vector_upl
;
8629 if(index
< vector_upl
->num_upls
) {
8630 *offset
= vector_upl
->upl_iostates
[index
].offset
;
8631 *size
= vector_upl
->upl_iostates
[index
].size
;
8634 *offset
= *size
= 0;
8637 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
8640 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
8644 upl_get_internal_vectorupl_pagelist(upl_t upl
)
8646 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
8650 upl_get_internal_vectorupl(upl_t upl
)
8652 return upl
->vector_upl
;
8656 upl_get_internal_pagelist_offset(void)
8658 return sizeof(struct upl
);
8667 upl
->flags
|= UPL_CLEAR_DIRTY
;
8669 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
8680 upl
->ext_ref_count
++;
8682 if (!upl
->ext_ref_count
) {
8683 panic("upl_set_referenced not %p\n", upl
);
8685 upl
->ext_ref_count
--;
8691 vm_page_is_slideable(vm_page_t m
)
8693 boolean_t result
= FALSE
;
8694 vm_shared_region_slide_info_t si
;
8696 vm_object_lock_assert_held(m
->object
);
8698 /* make sure our page belongs to the one object allowed to do this */
8699 if (!m
->object
->object_slid
) {
8703 si
= m
->object
->vo_slide_info
;
8708 if(!m
->slid
&& (si
->start
<= m
->offset
&& si
->end
> m
->offset
)) {
8716 int vm_page_slide_counter
= 0;
8717 int vm_page_slide_errors
= 0;
8721 vm_map_offset_t kernel_mapping_offset
)
8724 vm_map_size_t kernel_mapping_size
;
8725 boolean_t kernel_mapping_needs_unmap
;
8726 vm_offset_t kernel_vaddr
;
8727 uint32_t pageIndex
= 0;
8729 assert(!page
->slid
);
8730 assert(page
->object
->object_slid
);
8731 vm_object_lock_assert_exclusive(page
->object
);
8734 return KERN_FAILURE
;
8737 * Take a paging-in-progress reference to keep the object
8738 * alive even if we have to unlock it (in vm_paging_map_object()
8741 vm_object_paging_begin(page
->object
);
8743 if (kernel_mapping_offset
== 0) {
8745 * The page hasn't already been mapped in kernel space
8746 * by the caller. Map it now, so that we can access
8747 * its contents and decrypt them.
8749 kernel_mapping_size
= PAGE_SIZE
;
8750 kernel_mapping_needs_unmap
= FALSE
;
8751 kr
= vm_paging_map_object(page
,
8754 VM_PROT_READ
| VM_PROT_WRITE
,
8756 &kernel_mapping_size
,
8757 &kernel_mapping_offset
,
8758 &kernel_mapping_needs_unmap
);
8759 if (kr
!= KERN_SUCCESS
) {
8760 panic("vm_page_slide: "
8761 "could not map page in kernel: 0x%x\n",
8765 kernel_mapping_size
= 0;
8766 kernel_mapping_needs_unmap
= FALSE
;
8768 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
8771 * Slide the pointers on the page.
8774 /*assert that slide_file_info.start/end are page-aligned?*/
8776 assert(!page
->slid
);
8777 assert(page
->object
->object_slid
);
8779 pageIndex
= (uint32_t)((page
->offset
- page
->object
->vo_slide_info
->start
)/PAGE_SIZE
);
8780 kr
= vm_shared_region_slide_page(page
->object
->vo_slide_info
, kernel_vaddr
, pageIndex
);
8781 vm_page_slide_counter
++;
8784 * Unmap the page from the kernel's address space,
8786 if (kernel_mapping_needs_unmap
) {
8787 vm_paging_unmap_object(page
->object
,
8789 kernel_vaddr
+ PAGE_SIZE
);
8792 page
->dirty
= FALSE
;
8793 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
8795 if (kr
!= KERN_SUCCESS
|| cs_debug
> 1) {
8796 printf("vm_page_slide(%p): "
8797 "obj %p off 0x%llx mobj %p moff 0x%llx\n",
8799 page
->object
, page
->offset
,
8800 page
->object
->pager
,
8801 page
->offset
+ page
->object
->paging_offset
);
8804 if (kr
== KERN_SUCCESS
) {
8808 vm_page_slide_errors
++;
8811 vm_object_paging_end(page
->object
);
8816 void inline memoryshot(unsigned int event
, unsigned int control
)
8818 if (vm_debug_events
) {
8819 KERNEL_DEBUG_CONSTANT1((MACHDBG_CODE(DBG_MACH_VM_PRESSURE
, event
)) | control
,
8820 vm_page_active_count
, vm_page_inactive_count
,
8821 vm_page_free_count
, vm_page_speculative_count
,
8822 vm_page_throttled_count
);
8832 boolean_t
upl_device_page(upl_page_info_t
*upl
)
8834 return(UPL_DEVICE_PAGE(upl
));
8836 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
8838 return(UPL_PAGE_PRESENT(upl
, index
));
8840 boolean_t
upl_speculative_page(upl_page_info_t
*upl
, int index
)
8842 return(UPL_SPECULATIVE_PAGE(upl
, index
));
8844 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
8846 return(UPL_DIRTY_PAGE(upl
, index
));
8848 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
8850 return(UPL_VALID_PAGE(upl
, index
));
8852 ppnum_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
8854 return(UPL_PHYS_PAGE(upl
, index
));
8859 vm_countdirtypages(void)
8871 vm_page_lock_queues();
8872 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
8874 if (m
==(vm_page_t
)0) break;
8876 if(m
->dirty
) dpages
++;
8877 if(m
->pageout
) pgopages
++;
8878 if(m
->precious
) precpages
++;
8880 assert(m
->object
!= kernel_object
);
8881 m
= (vm_page_t
) queue_next(&m
->pageq
);
8882 if (m
==(vm_page_t
)0) break;
8884 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
8885 vm_page_unlock_queues();
8887 vm_page_lock_queues();
8888 m
= (vm_page_t
) queue_first(&vm_page_queue_throttled
);
8890 if (m
==(vm_page_t
)0) break;
8894 assert(!m
->pageout
);
8895 assert(m
->object
!= kernel_object
);
8896 m
= (vm_page_t
) queue_next(&m
->pageq
);
8897 if (m
==(vm_page_t
)0) break;
8899 } while (!queue_end(&vm_page_queue_throttled
,(queue_entry_t
) m
));
8900 vm_page_unlock_queues();
8902 vm_page_lock_queues();
8903 m
= (vm_page_t
) queue_first(&vm_page_queue_anonymous
);
8905 if (m
==(vm_page_t
)0) break;
8907 if(m
->dirty
) dpages
++;
8908 if(m
->pageout
) pgopages
++;
8909 if(m
->precious
) precpages
++;
8911 assert(m
->object
!= kernel_object
);
8912 m
= (vm_page_t
) queue_next(&m
->pageq
);
8913 if (m
==(vm_page_t
)0) break;
8915 } while (!queue_end(&vm_page_queue_anonymous
,(queue_entry_t
) m
));
8916 vm_page_unlock_queues();
8918 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
8924 vm_page_lock_queues();
8925 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
8928 if(m
== (vm_page_t
)0) break;
8929 if(m
->dirty
) dpages
++;
8930 if(m
->pageout
) pgopages
++;
8931 if(m
->precious
) precpages
++;
8933 assert(m
->object
!= kernel_object
);
8934 m
= (vm_page_t
) queue_next(&m
->pageq
);
8935 if(m
== (vm_page_t
)0) break;
8937 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
8938 vm_page_unlock_queues();
8940 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
8943 #endif /* MACH_BSD */
8945 ppnum_t
upl_get_highest_page(
8948 return upl
->highest_page
;
8951 upl_size_t
upl_get_size(
8958 kern_return_t
upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
8960 upl
->ubc_alias1
= alias1
;
8961 upl
->ubc_alias2
= alias2
;
8962 return KERN_SUCCESS
;
8964 int upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
8967 *al
= upl
->ubc_alias1
;
8969 *al2
= upl
->ubc_alias2
;
8970 return KERN_SUCCESS
;
8972 #endif /* UPL_DEBUG */