2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: vm/vm_pageout.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
63 * The proverbial page-out daemon.
69 #include <mach_pagemap.h>
70 #include <mach_cluster_stats.h>
72 #include <advisory_pageout.h>
74 #include <mach/mach_types.h>
75 #include <mach/memory_object.h>
76 #include <mach/memory_object_default.h>
77 #include <mach/memory_object_control_server.h>
78 #include <mach/mach_host_server.h>
80 #include <mach/vm_map.h>
81 #include <mach/vm_param.h>
82 #include <mach/vm_statistics.h>
85 #include <kern/kern_types.h>
86 #include <kern/counters.h>
87 #include <kern/host_statistics.h>
88 #include <kern/machine.h>
89 #include <kern/misc_protos.h>
90 #include <kern/sched.h>
91 #include <kern/thread.h>
93 #include <kern/kalloc.h>
95 #include <machine/vm_tuning.h>
96 #include <machine/commpage.h>
98 #include <sys/kern_memorystatus.h>
101 #include <vm/vm_fault.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_page.h>
105 #include <vm/vm_pageout.h>
106 #include <vm/vm_protos.h> /* must be last */
107 #include <vm/memory_object.h>
108 #include <vm/vm_purgeable_internal.h>
109 #include <vm/vm_shared_region.h>
113 #include <../bsd/crypto/aes/aes.h>
114 extern u_int32_t
random(void); /* from <libkern/libkern.h> */
117 #include <libkern/OSDebug.h>
120 extern void consider_pressure_events(void);
122 #ifndef VM_PAGEOUT_BURST_ACTIVE_THROTTLE /* maximum iterations of the active queue to move pages to inactive */
123 #define VM_PAGEOUT_BURST_ACTIVE_THROTTLE 100
126 #ifndef VM_PAGEOUT_BURST_INACTIVE_THROTTLE /* maximum iterations of the inactive queue w/o stealing/cleaning a page */
127 #ifdef CONFIG_EMBEDDED
128 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 1024
130 #define VM_PAGEOUT_BURST_INACTIVE_THROTTLE 4096
134 #ifndef VM_PAGEOUT_DEADLOCK_RELIEF
135 #define VM_PAGEOUT_DEADLOCK_RELIEF 100 /* number of pages to move to break deadlock */
138 #ifndef VM_PAGEOUT_INACTIVE_RELIEF
139 #define VM_PAGEOUT_INACTIVE_RELIEF 50 /* minimum number of pages to move to the inactive q */
142 #ifndef VM_PAGE_LAUNDRY_MAX
143 #define VM_PAGE_LAUNDRY_MAX 128UL /* maximum pageouts on a given pageout queue */
144 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
146 #ifndef VM_PAGEOUT_BURST_WAIT
147 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds */
148 #endif /* VM_PAGEOUT_BURST_WAIT */
150 #ifndef VM_PAGEOUT_EMPTY_WAIT
151 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
152 #endif /* VM_PAGEOUT_EMPTY_WAIT */
154 #ifndef VM_PAGEOUT_DEADLOCK_WAIT
155 #define VM_PAGEOUT_DEADLOCK_WAIT 300 /* milliseconds */
156 #endif /* VM_PAGEOUT_DEADLOCK_WAIT */
158 #ifndef VM_PAGEOUT_IDLE_WAIT
159 #define VM_PAGEOUT_IDLE_WAIT 10 /* milliseconds */
160 #endif /* VM_PAGEOUT_IDLE_WAIT */
162 unsigned int vm_page_speculative_q_age_ms
= VM_PAGE_SPECULATIVE_Q_AGE_MS
;
163 unsigned int vm_page_speculative_percentage
= 5;
165 #ifndef VM_PAGE_SPECULATIVE_TARGET
166 #define VM_PAGE_SPECULATIVE_TARGET(total) ((total) * 1 / (100 / vm_page_speculative_percentage))
167 #endif /* VM_PAGE_SPECULATIVE_TARGET */
170 #ifndef VM_PAGE_INACTIVE_HEALTHY_LIMIT
171 #define VM_PAGE_INACTIVE_HEALTHY_LIMIT(total) ((total) * 1 / 200)
172 #endif /* VM_PAGE_INACTIVE_HEALTHY_LIMIT */
176 * To obtain a reasonable LRU approximation, the inactive queue
177 * needs to be large enough to give pages on it a chance to be
178 * referenced a second time. This macro defines the fraction
179 * of active+inactive pages that should be inactive.
180 * The pageout daemon uses it to update vm_page_inactive_target.
182 * If vm_page_free_count falls below vm_page_free_target and
183 * vm_page_inactive_count is below vm_page_inactive_target,
184 * then the pageout daemon starts running.
187 #ifndef VM_PAGE_INACTIVE_TARGET
188 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
189 #endif /* VM_PAGE_INACTIVE_TARGET */
192 * Once the pageout daemon starts running, it keeps going
193 * until vm_page_free_count meets or exceeds vm_page_free_target.
196 #ifndef VM_PAGE_FREE_TARGET
197 #ifdef CONFIG_EMBEDDED
198 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 100)
200 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
202 #endif /* VM_PAGE_FREE_TARGET */
205 * The pageout daemon always starts running once vm_page_free_count
206 * falls below vm_page_free_min.
209 #ifndef VM_PAGE_FREE_MIN
210 #ifdef CONFIG_EMBEDDED
211 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 200)
213 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
215 #endif /* VM_PAGE_FREE_MIN */
217 #define VM_PAGE_FREE_RESERVED_LIMIT 100
218 #define VM_PAGE_FREE_MIN_LIMIT 1500
219 #define VM_PAGE_FREE_TARGET_LIMIT 2000
223 * When vm_page_free_count falls below vm_page_free_reserved,
224 * only vm-privileged threads can allocate pages. vm-privilege
225 * allows the pageout daemon and default pager (and any other
226 * associated threads needed for default pageout) to continue
227 * operation by dipping into the reserved pool of pages.
230 #ifndef VM_PAGE_FREE_RESERVED
231 #define VM_PAGE_FREE_RESERVED(n) \
232 ((unsigned) (6 * VM_PAGE_LAUNDRY_MAX) + (n))
233 #endif /* VM_PAGE_FREE_RESERVED */
236 * When we dequeue pages from the inactive list, they are
237 * reactivated (ie, put back on the active queue) if referenced.
238 * However, it is possible to starve the free list if other
239 * processors are referencing pages faster than we can turn off
240 * the referenced bit. So we limit the number of reactivations
241 * we will make per call of vm_pageout_scan().
243 #define VM_PAGE_REACTIVATE_LIMIT_MAX 20000
244 #ifndef VM_PAGE_REACTIVATE_LIMIT
245 #ifdef CONFIG_EMBEDDED
246 #define VM_PAGE_REACTIVATE_LIMIT(avail) (VM_PAGE_INACTIVE_TARGET(avail) / 2)
248 #define VM_PAGE_REACTIVATE_LIMIT(avail) (MAX((avail) * 1 / 20,VM_PAGE_REACTIVATE_LIMIT_MAX))
250 #endif /* VM_PAGE_REACTIVATE_LIMIT */
251 #define VM_PAGEOUT_INACTIVE_FORCE_RECLAIM 100
255 * Exported variable used to broadcast the activation of the pageout scan
256 * Working Set uses this to throttle its use of pmap removes. In this
257 * way, code which runs within memory in an uncontested context does
258 * not keep encountering soft faults.
261 unsigned int vm_pageout_scan_event_counter
= 0;
264 * Forward declarations for internal routines.
267 static void vm_pageout_garbage_collect(int);
268 static void vm_pageout_iothread_continue(struct vm_pageout_queue
*);
269 static void vm_pageout_iothread_external(void);
270 static void vm_pageout_iothread_internal(void);
272 extern void vm_pageout_continue(void);
273 extern void vm_pageout_scan(void);
275 static thread_t vm_pageout_external_iothread
= THREAD_NULL
;
276 static thread_t vm_pageout_internal_iothread
= THREAD_NULL
;
278 unsigned int vm_pageout_reserved_internal
= 0;
279 unsigned int vm_pageout_reserved_really
= 0;
281 unsigned int vm_pageout_idle_wait
= 0; /* milliseconds */
282 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
283 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds */
284 unsigned int vm_pageout_deadlock_wait
= 0; /* milliseconds */
285 unsigned int vm_pageout_deadlock_relief
= 0;
286 unsigned int vm_pageout_inactive_relief
= 0;
287 unsigned int vm_pageout_burst_active_throttle
= 0;
288 unsigned int vm_pageout_burst_inactive_throttle
= 0;
290 int vm_upl_wait_for_pages
= 0;
293 * Protection against zero fill flushing live working sets derived
294 * from existing backing store and files
296 unsigned int vm_accellerate_zf_pageout_trigger
= 400;
297 unsigned int zf_queue_min_count
= 100;
298 unsigned int vm_zf_queue_count
= 0;
300 uint64_t vm_zf_count
__attribute__((aligned(8))) = 0;
303 * These variables record the pageout daemon's actions:
304 * how many pages it looks at and what happens to those pages.
305 * No locking needed because only one thread modifies the variables.
308 unsigned int vm_pageout_active
= 0; /* debugging */
309 unsigned int vm_pageout_active_busy
= 0; /* debugging */
310 unsigned int vm_pageout_inactive
= 0; /* debugging */
311 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
312 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
313 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
314 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
315 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
316 unsigned int vm_pageout_inactive_error
= 0; /* debugging */
317 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
318 unsigned int vm_pageout_inactive_notalive
= 0; /* debugging */
319 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
320 unsigned int vm_pageout_cache_evicted
= 0; /* debugging */
321 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
322 unsigned int vm_pageout_speculative_clean
= 0; /* debugging */
323 unsigned int vm_pageout_inactive_dirty_internal
= 0; /* debugging */
324 unsigned int vm_pageout_inactive_dirty_external
= 0; /* debugging */
325 unsigned int vm_pageout_inactive_deactivated
= 0; /* debugging */
326 unsigned int vm_pageout_inactive_zf
= 0; /* debugging */
327 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
328 unsigned int vm_pageout_purged_objects
= 0; /* debugging */
329 unsigned int vm_stat_discard
= 0; /* debugging */
330 unsigned int vm_stat_discard_sent
= 0; /* debugging */
331 unsigned int vm_stat_discard_failure
= 0; /* debugging */
332 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
333 unsigned int vm_pageout_reactivation_limit_exceeded
= 0; /* debugging */
334 unsigned int vm_pageout_catch_ups
= 0; /* debugging */
335 unsigned int vm_pageout_inactive_force_reclaim
= 0; /* debugging */
337 unsigned int vm_pageout_scan_reclaimed_throttled
= 0;
338 unsigned int vm_pageout_scan_active_throttled
= 0;
339 unsigned int vm_pageout_scan_inactive_throttled_internal
= 0;
340 unsigned int vm_pageout_scan_inactive_throttled_external
= 0;
341 unsigned int vm_pageout_scan_throttle
= 0; /* debugging */
342 unsigned int vm_pageout_scan_throttle_aborted
= 0; /* debugging */
343 unsigned int vm_pageout_scan_burst_throttle
= 0; /* debugging */
344 unsigned int vm_pageout_scan_empty_throttle
= 0; /* debugging */
345 unsigned int vm_pageout_scan_deadlock_detected
= 0; /* debugging */
346 unsigned int vm_pageout_scan_active_throttle_success
= 0; /* debugging */
347 unsigned int vm_pageout_scan_inactive_throttle_success
= 0; /* debugging */
348 unsigned int vm_pageout_inactive_external_forced_reactivate_count
= 0; /* debugging */
349 unsigned int vm_page_speculative_count_drifts
= 0;
350 unsigned int vm_page_speculative_count_drift_max
= 0;
353 * Backing store throttle when BS is exhausted
355 unsigned int vm_backing_store_low
= 0;
357 unsigned int vm_pageout_out_of_line
= 0;
358 unsigned int vm_pageout_in_place
= 0;
360 unsigned int vm_page_steal_pageout_page
= 0;
364 * counters and statistics...
366 unsigned long vm_page_decrypt_counter
= 0;
367 unsigned long vm_page_decrypt_for_upl_counter
= 0;
368 unsigned long vm_page_encrypt_counter
= 0;
369 unsigned long vm_page_encrypt_abort_counter
= 0;
370 unsigned long vm_page_encrypt_already_encrypted_counter
= 0;
371 boolean_t vm_pages_encrypted
= FALSE
; /* are there encrypted pages ? */
373 struct vm_pageout_queue vm_pageout_queue_internal
;
374 struct vm_pageout_queue vm_pageout_queue_external
;
376 unsigned int vm_page_speculative_target
= 0;
378 vm_object_t vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
380 boolean_t (* volatile consider_buffer_cache_collect
)(int) = NULL
;
382 #if DEVELOPMENT || DEBUG
383 unsigned long vm_cs_validated_resets
= 0;
386 int vm_debug_events
= 0;
390 * Routine: vm_backing_store_disable
392 * Suspend non-privileged threads wishing to extend
393 * backing store when we are low on backing store
394 * (Synchronized by caller)
397 vm_backing_store_disable(
401 vm_backing_store_low
= 1;
403 if(vm_backing_store_low
) {
404 vm_backing_store_low
= 0;
405 thread_wakeup((event_t
) &vm_backing_store_low
);
411 #if MACH_CLUSTER_STATS
412 unsigned long vm_pageout_cluster_dirtied
= 0;
413 unsigned long vm_pageout_cluster_cleaned
= 0;
414 unsigned long vm_pageout_cluster_collisions
= 0;
415 unsigned long vm_pageout_cluster_clusters
= 0;
416 unsigned long vm_pageout_cluster_conversions
= 0;
417 unsigned long vm_pageout_target_collisions
= 0;
418 unsigned long vm_pageout_target_page_dirtied
= 0;
419 unsigned long vm_pageout_target_page_freed
= 0;
420 #define CLUSTER_STAT(clause) clause
421 #else /* MACH_CLUSTER_STATS */
422 #define CLUSTER_STAT(clause)
423 #endif /* MACH_CLUSTER_STATS */
426 * Routine: vm_pageout_object_terminate
428 * Destroy the pageout_object, and perform all of the
429 * required cleanup actions.
432 * The object must be locked, and will be returned locked.
435 vm_pageout_object_terminate(
438 vm_object_t shadow_object
;
441 * Deal with the deallocation (last reference) of a pageout object
442 * (used for cleaning-in-place) by dropping the paging references/
443 * freeing pages in the original object.
446 assert(object
->pageout
);
447 shadow_object
= object
->shadow
;
448 vm_object_lock(shadow_object
);
450 while (!queue_empty(&object
->memq
)) {
452 vm_object_offset_t offset
;
454 p
= (vm_page_t
) queue_first(&object
->memq
);
459 assert(!p
->cleaning
);
465 m
= vm_page_lookup(shadow_object
,
466 offset
+ object
->vo_shadow_offset
);
468 if(m
== VM_PAGE_NULL
)
471 /* used as a trigger on upl_commit etc to recognize the */
472 /* pageout daemon's subseqent desire to pageout a cleaning */
473 /* page. When the bit is on the upl commit code will */
474 /* respect the pageout bit in the target page over the */
475 /* caller's page list indication */
476 m
->dump_cleaning
= FALSE
;
478 assert((m
->dirty
) || (m
->precious
) ||
479 (m
->busy
&& m
->cleaning
));
482 * Handle the trusted pager throttle.
483 * Also decrement the burst throttle (if external).
485 vm_page_lock_queues();
487 vm_pageout_throttle_up(m
);
491 * Handle the "target" page(s). These pages are to be freed if
492 * successfully cleaned. Target pages are always busy, and are
493 * wired exactly once. The initial target pages are not mapped,
494 * (so cannot be referenced or modified) but converted target
495 * pages may have been modified between the selection as an
496 * adjacent page and conversion to a target.
500 assert(m
->wire_count
== 1);
502 m
->encrypted_cleaning
= FALSE
;
504 #if MACH_CLUSTER_STATS
505 if (m
->wanted
) vm_pageout_target_collisions
++;
508 * Revoke all access to the page. Since the object is
509 * locked, and the page is busy, this prevents the page
510 * from being dirtied after the pmap_disconnect() call
513 * Since the page is left "dirty" but "not modifed", we
514 * can detect whether the page was redirtied during
515 * pageout by checking the modify state.
517 if (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
)
523 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
524 vm_page_unwire(m
, TRUE
); /* reactivates */
525 VM_STAT_INCR(reactivations
);
528 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
529 vm_page_free(m
);/* clears busy, etc. */
531 vm_page_unlock_queues();
535 * Handle the "adjacent" pages. These pages were cleaned in
536 * place, and should be left alone.
537 * If prep_pin_count is nonzero, then someone is using the
538 * page, so make it active.
540 if (!m
->active
&& !m
->inactive
&& !m
->throttled
&& !m
->private) {
544 vm_page_deactivate(m
);
546 if (m
->overwriting
) {
548 * the (COPY_OUT_FROM == FALSE) request_page_list case
552 * We do not re-set m->dirty !
553 * The page was busy so no extraneous activity
554 * could have occurred. COPY_INTO is a read into the
555 * new pages. CLEAN_IN_PLACE does actually write
556 * out the pages but handling outside of this code
557 * will take care of resetting dirty. We clear the
558 * modify however for the Programmed I/O case.
560 pmap_clear_modify(m
->phys_page
);
566 * alternate (COPY_OUT_FROM == FALSE) request_page_list case
567 * Occurs when the original page was wired
568 * at the time of the list request
570 assert(VM_PAGE_WIRED(m
));
571 vm_page_unwire(m
, TRUE
); /* reactivates */
573 m
->overwriting
= FALSE
;
576 * Set the dirty state according to whether or not the page was
577 * modified during the pageout. Note that we purposefully do
578 * NOT call pmap_clear_modify since the page is still mapped.
579 * If the page were to be dirtied between the 2 calls, this
580 * this fact would be lost. This code is only necessary to
581 * maintain statistics, since the pmap module is always
582 * consulted if m->dirty is false.
584 #if MACH_CLUSTER_STATS
585 m
->dirty
= pmap_is_modified(m
->phys_page
);
587 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
588 else vm_pageout_cluster_cleaned
++;
589 if (m
->wanted
) vm_pageout_cluster_collisions
++;
594 if (m
->encrypted_cleaning
== TRUE
) {
595 m
->encrypted_cleaning
= FALSE
;
601 * Wakeup any thread waiting for the page to be un-cleaning.
604 vm_page_unlock_queues();
607 * Account for the paging reference taken in vm_paging_object_allocate.
609 vm_object_activity_end(shadow_object
);
610 vm_object_unlock(shadow_object
);
612 assert(object
->ref_count
== 0);
613 assert(object
->paging_in_progress
== 0);
614 assert(object
->activity_in_progress
== 0);
615 assert(object
->resident_page_count
== 0);
620 * Routine: vm_pageclean_setup
622 * Purpose: setup a page to be cleaned (made non-dirty), but not
623 * necessarily flushed from the VM page cache.
624 * This is accomplished by cleaning in place.
626 * The page must not be busy, and new_object
634 vm_object_t new_object
,
635 vm_object_offset_t new_offset
)
639 assert(!m
->cleaning
);
643 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
644 m
->object
, m
->offset
, m
,
647 pmap_clear_modify(m
->phys_page
);
650 * Mark original page as cleaning in place.
657 * Convert the fictitious page to a private shadow of
660 assert(new_m
->fictitious
);
661 assert(new_m
->phys_page
== vm_page_fictitious_addr
);
662 new_m
->fictitious
= FALSE
;
663 new_m
->private = TRUE
;
664 new_m
->pageout
= TRUE
;
665 new_m
->phys_page
= m
->phys_page
;
667 vm_page_lockspin_queues();
669 vm_page_unlock_queues();
671 vm_page_insert(new_m
, new_object
, new_offset
);
672 assert(!new_m
->wanted
);
677 * Routine: vm_pageout_initialize_page
679 * Causes the specified page to be initialized in
680 * the appropriate memory object. This routine is used to push
681 * pages into a copy-object when they are modified in the
684 * The page is moved to a temporary object and paged out.
687 * The page in question must not be on any pageout queues.
688 * The object to which it belongs must be locked.
689 * The page must be busy, but not hold a paging reference.
692 * Move this page to a completely new object.
695 vm_pageout_initialize_page(
699 vm_object_offset_t paging_offset
;
700 vm_page_t holding_page
;
701 memory_object_t pager
;
704 "vm_pageout_initialize_page, page 0x%X\n",
709 * Verify that we really want to clean this page
716 * Create a paging reference to let us play with the object.
719 paging_offset
= m
->offset
+ object
->paging_offset
;
721 if (m
->absent
|| m
->error
|| m
->restart
|| (!m
->dirty
&& !m
->precious
)) {
723 panic("reservation without pageout?"); /* alan */
724 vm_object_unlock(object
);
730 * If there's no pager, then we can't clean the page. This should
731 * never happen since this should be a copy object and therefore not
732 * an external object, so the pager should always be there.
735 pager
= object
->pager
;
737 if (pager
== MEMORY_OBJECT_NULL
) {
739 panic("missing pager for copy object");
743 /* set the page for future call to vm_fault_list_request */
744 vm_object_paging_begin(object
);
747 pmap_clear_modify(m
->phys_page
);
750 m
->list_req_pending
= TRUE
;
754 vm_page_lockspin_queues();
756 vm_page_unlock_queues();
758 vm_object_unlock(object
);
761 * Write the data to its pager.
762 * Note that the data is passed by naming the new object,
763 * not a virtual address; the pager interface has been
764 * manipulated to use the "internal memory" data type.
765 * [The object reference from its allocation is donated
766 * to the eventual recipient.]
768 memory_object_data_initialize(pager
, paging_offset
, PAGE_SIZE
);
770 vm_object_lock(object
);
771 vm_object_paging_end(object
);
774 #if MACH_CLUSTER_STATS
775 #define MAXCLUSTERPAGES 16
777 unsigned long pages_in_cluster
;
778 unsigned long pages_at_higher_offsets
;
779 unsigned long pages_at_lower_offsets
;
780 } cluster_stats
[MAXCLUSTERPAGES
];
781 #endif /* MACH_CLUSTER_STATS */
785 * vm_pageout_cluster:
787 * Given a page, queue it to the appropriate I/O thread,
788 * which will page it out and attempt to clean adjacent pages
789 * in the same operation.
791 * The page must be busy, and the object and queues locked. We will take a
792 * paging reference to prevent deallocation or collapse when we
793 * release the object lock back at the call site. The I/O thread
794 * is responsible for consuming this reference
796 * The page must not be on any pageout queue.
800 vm_pageout_cluster(vm_page_t m
)
802 vm_object_t object
= m
->object
;
803 struct vm_pageout_queue
*q
;
807 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
808 object
, m
->offset
, m
, 0, 0);
812 lck_mtx_assert(&vm_page_queue_lock
, LCK_MTX_ASSERT_OWNED
);
814 vm_object_lock_assert_exclusive(object
);
817 * Only a certain kind of page is appreciated here.
819 assert(m
->busy
&& (m
->dirty
|| m
->precious
) && (!VM_PAGE_WIRED(m
)));
820 assert(!m
->cleaning
&& !m
->pageout
);
821 #ifndef CONFIG_FREEZE
822 assert(!m
->inactive
&& !m
->active
);
823 assert(!m
->throttled
);
827 * protect the object from collapse -
828 * locking in the object's paging_offset.
830 vm_object_paging_begin(object
);
833 * set the page for future call to vm_fault_list_request
834 * page should already be marked busy
837 m
->list_req_pending
= TRUE
;
841 if (object
->internal
== TRUE
)
842 q
= &vm_pageout_queue_internal
;
844 q
= &vm_pageout_queue_external
;
847 * pgo_laundry count is tied to the laundry bit
852 m
->pageout_queue
= TRUE
;
853 queue_enter(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
855 if (q
->pgo_idle
== TRUE
) {
857 thread_wakeup((event_t
) &q
->pgo_pending
);
864 unsigned long vm_pageout_throttle_up_count
= 0;
867 * A page is back from laundry or we are stealing it back from
868 * the laundering state. See if there are some pages waiting to
869 * go to laundry and if we can let some of them go now.
871 * Object and page queues must be locked.
874 vm_pageout_throttle_up(
877 struct vm_pageout_queue
*q
;
879 assert(m
->object
!= VM_OBJECT_NULL
);
880 assert(m
->object
!= kernel_object
);
882 vm_pageout_throttle_up_count
++;
884 if (m
->object
->internal
== TRUE
)
885 q
= &vm_pageout_queue_internal
;
887 q
= &vm_pageout_queue_external
;
889 if (m
->pageout_queue
== TRUE
) {
891 queue_remove(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
892 m
->pageout_queue
= FALSE
;
894 m
->pageq
.next
= NULL
;
895 m
->pageq
.prev
= NULL
;
897 vm_object_paging_end(m
->object
);
900 if ( m
->laundry
== TRUE
) {
905 if (q
->pgo_throttled
== TRUE
) {
906 q
->pgo_throttled
= FALSE
;
907 thread_wakeup((event_t
) &q
->pgo_laundry
);
909 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
910 q
->pgo_draining
= FALSE
;
911 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
918 * VM memory pressure monitoring.
920 * vm_pageout_scan() keeps track of the number of pages it considers and
921 * reclaims, in the currently active vm_pageout_stat[vm_pageout_stat_now].
923 * compute_memory_pressure() is called every second from compute_averages()
924 * and moves "vm_pageout_stat_now" forward, to start accumulating the number
925 * of recalimed pages in a new vm_pageout_stat[] bucket.
927 * mach_vm_pressure_monitor() collects past statistics about memory pressure.
928 * The caller provides the number of seconds ("nsecs") worth of statistics
929 * it wants, up to 30 seconds.
930 * It computes the number of pages reclaimed in the past "nsecs" seconds and
931 * also returns the number of pages the system still needs to reclaim at this
934 #define VM_PAGEOUT_STAT_SIZE 31
935 struct vm_pageout_stat
{
936 unsigned int considered
;
937 unsigned int reclaimed
;
938 } vm_pageout_stats
[VM_PAGEOUT_STAT_SIZE
] = {{0,0}, };
939 unsigned int vm_pageout_stat_now
= 0;
940 unsigned int vm_memory_pressure
= 0;
942 #define VM_PAGEOUT_STAT_BEFORE(i) \
943 (((i) == 0) ? VM_PAGEOUT_STAT_SIZE - 1 : (i) - 1)
944 #define VM_PAGEOUT_STAT_AFTER(i) \
945 (((i) == VM_PAGEOUT_STAT_SIZE - 1) ? 0 : (i) + 1)
948 * Called from compute_averages().
951 compute_memory_pressure(
954 unsigned int vm_pageout_next
;
957 vm_pageout_stats
[VM_PAGEOUT_STAT_BEFORE(vm_pageout_stat_now
)].reclaimed
;
959 commpage_set_memory_pressure( vm_memory_pressure
);
961 /* move "now" forward */
962 vm_pageout_next
= VM_PAGEOUT_STAT_AFTER(vm_pageout_stat_now
);
963 vm_pageout_stats
[vm_pageout_next
].considered
= 0;
964 vm_pageout_stats
[vm_pageout_next
].reclaimed
= 0;
965 vm_pageout_stat_now
= vm_pageout_next
;
969 mach_vm_ctl_page_free_wanted(void)
971 unsigned int page_free_target
, page_free_count
, page_free_wanted
;
973 page_free_target
= vm_page_free_target
;
974 page_free_count
= vm_page_free_count
;
975 if (page_free_target
> page_free_count
) {
976 page_free_wanted
= page_free_target
- page_free_count
;
978 page_free_wanted
= 0;
981 return page_free_wanted
;
985 mach_vm_pressure_monitor(
986 boolean_t wait_for_pressure
,
987 unsigned int nsecs_monitored
,
988 unsigned int *pages_reclaimed_p
,
989 unsigned int *pages_wanted_p
)
992 unsigned int vm_pageout_then
, vm_pageout_now
;
993 unsigned int pages_reclaimed
;
996 * We don't take the vm_page_queue_lock here because we don't want
997 * vm_pressure_monitor() to get in the way of the vm_pageout_scan()
998 * thread when it's trying to reclaim memory. We don't need fully
999 * accurate monitoring anyway...
1002 if (wait_for_pressure
) {
1003 /* wait until there's memory pressure */
1004 while (vm_page_free_count
>= vm_page_free_target
) {
1005 wr
= assert_wait((event_t
) &vm_page_free_wanted
,
1006 THREAD_INTERRUPTIBLE
);
1007 if (wr
== THREAD_WAITING
) {
1008 wr
= thread_block(THREAD_CONTINUE_NULL
);
1010 if (wr
== THREAD_INTERRUPTED
) {
1011 return KERN_ABORTED
;
1013 if (wr
== THREAD_AWAKENED
) {
1015 * The memory pressure might have already
1016 * been relieved but let's not block again
1017 * and let's report that there was memory
1018 * pressure at some point.
1025 /* provide the number of pages the system wants to reclaim */
1026 if (pages_wanted_p
!= NULL
) {
1027 *pages_wanted_p
= mach_vm_ctl_page_free_wanted();
1030 if (pages_reclaimed_p
== NULL
) {
1031 return KERN_SUCCESS
;
1034 /* provide number of pages reclaimed in the last "nsecs_monitored" */
1036 vm_pageout_now
= vm_pageout_stat_now
;
1037 pages_reclaimed
= 0;
1038 for (vm_pageout_then
=
1039 VM_PAGEOUT_STAT_BEFORE(vm_pageout_now
);
1040 vm_pageout_then
!= vm_pageout_now
&&
1041 nsecs_monitored
-- != 0;
1043 VM_PAGEOUT_STAT_BEFORE(vm_pageout_then
)) {
1044 pages_reclaimed
+= vm_pageout_stats
[vm_pageout_then
].reclaimed
;
1046 } while (vm_pageout_now
!= vm_pageout_stat_now
);
1047 *pages_reclaimed_p
= pages_reclaimed
;
1049 return KERN_SUCCESS
;
1052 /* Page States: Used below to maintain the page state
1053 before it's removed from it's Q. This saved state
1054 helps us do the right accounting in certain cases
1057 #define PAGE_STATE_SPECULATIVE 1
1058 #define PAGE_STATE_ZEROFILL 2
1059 #define PAGE_STATE_INACTIVE 3
1060 #define PAGE_STATE_INACTIVE_FIRST 4
1062 #define VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m) \
1065 * If a "reusable" page somehow made it back into \
1066 * the active queue, it's been re-used and is not \
1067 * quite re-usable. \
1068 * If the VM object was "all_reusable", consider it \
1069 * as "all re-used" instead of converting it to \
1070 * "partially re-used", which could be expensive. \
1072 if ((m)->reusable || \
1073 (m)->object->all_reusable) { \
1074 vm_object_reuse_pages((m)->object, \
1076 (m)->offset + PAGE_SIZE_64, \
1082 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT 128
1083 #define VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX 1024
1086 #define FCS_DELAYED 1
1087 #define FCS_DEADLOCK_DETECTED 2
1089 struct flow_control
{
1096 * vm_pageout_scan does the dirty work for the pageout daemon.
1097 * It returns with vm_page_queue_free_lock held and
1098 * vm_page_free_wanted == 0.
1101 vm_pageout_scan(void)
1103 unsigned int loop_count
= 0;
1104 unsigned int inactive_burst_count
= 0;
1105 unsigned int active_burst_count
= 0;
1106 unsigned int reactivated_this_call
;
1107 unsigned int reactivate_limit
;
1108 vm_page_t local_freeq
= NULL
;
1109 int local_freed
= 0;
1111 int delayed_unlock_limit
= 0;
1112 int refmod_state
= 0;
1113 int vm_pageout_deadlock_target
= 0;
1114 struct vm_pageout_queue
*iq
;
1115 struct vm_pageout_queue
*eq
;
1116 struct vm_speculative_age_q
*sq
;
1117 struct flow_control flow_control
= { 0, { 0, 0 } };
1118 boolean_t inactive_throttled
= FALSE
;
1119 boolean_t try_failed
;
1121 unsigned int msecs
= 0;
1123 vm_object_t last_object_tried
;
1125 uint64_t zf_run_count
;
1126 uint32_t catch_up_count
= 0;
1127 uint32_t inactive_reclaim_run
;
1128 boolean_t forced_reclaim
;
1129 int page_prev_state
= 0;
1130 int cache_evict_throttle
= 0;
1131 uint32_t vm_pageout_inactive_external_forced_reactivate_limit
= 0;
1133 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_START
,
1134 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1135 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1137 flow_control
.state
= FCS_IDLE
;
1138 iq
= &vm_pageout_queue_internal
;
1139 eq
= &vm_pageout_queue_external
;
1140 sq
= &vm_page_queue_speculative
[VM_PAGE_SPECULATIVE_AGED_Q
];
1143 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1146 vm_page_lock_queues();
1147 delayed_unlock
= 1; /* must be nonzero if Qs are locked, 0 if unlocked */
1150 * Calculate the max number of referenced pages on the inactive
1151 * queue that we will reactivate.
1153 reactivated_this_call
= 0;
1154 reactivate_limit
= VM_PAGE_REACTIVATE_LIMIT(vm_page_active_count
+
1155 vm_page_inactive_count
);
1156 inactive_reclaim_run
= 0;
1160 * We want to gradually dribble pages from the active queue
1161 * to the inactive queue. If we let the inactive queue get
1162 * very small, and then suddenly dump many pages into it,
1163 * those pages won't get a sufficient chance to be referenced
1164 * before we start taking them from the inactive queue.
1166 * We must limit the rate at which we send pages to the pagers
1167 * so that we don't tie up too many pages in the I/O queues.
1168 * We implement a throttling mechanism using the laundry count
1169 * to limit the number of pages outstanding to the default
1170 * and external pagers. We can bypass the throttles and look
1171 * for clean pages if the pageout queues don't drain in a timely
1172 * fashion since this may indicate that the pageout paths are
1173 * stalled waiting for memory, which only we can provide.
1178 assert(delayed_unlock
!=0);
1181 * A page is "zero-filled" if it was not paged in from somewhere,
1182 * and it belongs to an object at least VM_ZF_OBJECT_SIZE_THRESHOLD big.
1183 * Recalculate the zero-filled page ratio. We use this to apportion
1184 * victimized pages between the normal and zero-filled inactive
1185 * queues according to their relative abundance in memory. Thus if a task
1186 * is flooding memory with zf pages, we begin to hunt them down.
1187 * It would be better to throttle greedy tasks at a higher level,
1188 * but at the moment mach vm cannot do this.
1191 uint64_t total
= vm_page_active_count
+ vm_page_inactive_count
;
1192 uint64_t normal
= total
- vm_zf_count
;
1194 /* zf_ratio is the number of zf pages we victimize per normal page */
1196 if (vm_zf_count
< vm_accellerate_zf_pageout_trigger
)
1198 else if ((vm_zf_count
<= normal
) || (normal
== 0))
1201 zf_ratio
= vm_zf_count
/ normal
;
1207 * Recalculate vm_page_inactivate_target.
1209 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1210 vm_page_inactive_count
+
1211 vm_page_speculative_count
);
1213 * don't want to wake the pageout_scan thread up everytime we fall below
1214 * the targets... set a low water mark at 0.25% below the target
1216 vm_page_inactive_min
= vm_page_inactive_target
- (vm_page_inactive_target
/ 400);
1218 if (vm_page_speculative_percentage
> 50)
1219 vm_page_speculative_percentage
= 50;
1220 else if (vm_page_speculative_percentage
<= 0)
1221 vm_page_speculative_percentage
= 1;
1223 vm_page_speculative_target
= VM_PAGE_SPECULATIVE_TARGET(vm_page_active_count
+
1224 vm_page_inactive_count
);
1226 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
1229 last_object_tried
= NULL
;
1232 if ((vm_page_inactive_count
+ vm_page_speculative_count
) < VM_PAGE_INACTIVE_HEALTHY_LIMIT(vm_page_active_count
))
1233 catch_up_count
= vm_page_inactive_count
+ vm_page_speculative_count
;
1240 DTRACE_VM2(rev
, int, 1, (uint64_t *), NULL
);
1242 if (delayed_unlock
== 0) {
1243 vm_page_lock_queues();
1246 if (vm_upl_wait_for_pages
< 0)
1247 vm_upl_wait_for_pages
= 0;
1249 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT
+ vm_upl_wait_for_pages
;
1251 if (delayed_unlock_limit
> VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
)
1252 delayed_unlock_limit
= VM_PAGEOUT_DELAYED_UNLOCK_LIMIT_MAX
;
1255 * Move pages from active to inactive if we're below the target
1257 if ((vm_page_inactive_count
+ vm_page_speculative_count
) >= vm_page_inactive_target
)
1258 goto done_moving_active_pages
;
1260 if (object
!= NULL
) {
1261 vm_object_unlock(object
);
1263 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1266 * Don't sweep through active queue more than the throttle
1267 * which should be kept relatively low
1269 active_burst_count
= MIN(vm_pageout_burst_active_throttle
,
1270 vm_page_active_count
);
1272 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_START
,
1273 vm_pageout_inactive
, vm_pageout_inactive_used
, vm_page_free_count
, local_freed
);
1275 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_NONE
,
1276 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1277 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1279 while (!queue_empty(&vm_page_queue_active
) && active_burst_count
--) {
1281 vm_pageout_active
++;
1283 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1285 assert(m
->active
&& !m
->inactive
);
1286 assert(!m
->laundry
);
1287 assert(m
->object
!= kernel_object
);
1288 assert(m
->phys_page
!= vm_page_guard_addr
);
1290 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
1293 * The page might be absent or busy,
1294 * but vm_page_deactivate can handle that.
1296 vm_page_deactivate(m
);
1298 if (delayed_unlock
++ > delayed_unlock_limit
) {
1301 vm_page_unlock_queues();
1303 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1304 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 1);
1306 vm_page_free_list(local_freeq
, TRUE
);
1308 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1309 vm_page_free_count
, 0, 0, 1);
1313 vm_page_lock_queues();
1315 lck_mtx_yield(&vm_page_queue_lock
);
1320 * continue the while loop processing
1321 * the active queue... need to hold
1322 * the page queues lock
1327 VM_DEBUG_EVENT(vm_pageout_balance
, VM_PAGEOUT_BALANCE
, DBG_FUNC_END
,
1328 vm_page_active_count
, vm_page_inactive_count
, vm_page_speculative_count
, vm_page_inactive_target
);
1331 /**********************************************************************
1332 * above this point we're playing with the active queue
1333 * below this point we're playing with the throttling mechanisms
1334 * and the inactive queue
1335 **********************************************************************/
1337 done_moving_active_pages
:
1339 if (vm_page_free_count
+ local_freed
>= vm_page_free_target
) {
1340 if (object
!= NULL
) {
1341 vm_object_unlock(object
);
1344 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1347 vm_page_unlock_queues();
1349 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1350 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 2);
1352 vm_page_free_list(local_freeq
, TRUE
);
1354 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1355 vm_page_free_count
, local_freed
, 0, 2);
1359 vm_page_lock_queues();
1362 * recalculate vm_page_inactivate_target
1364 vm_page_inactive_target
= VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1365 vm_page_inactive_count
+
1366 vm_page_speculative_count
);
1367 #ifndef CONFIG_EMBEDDED
1368 if (((vm_page_inactive_count
+ vm_page_speculative_count
) < vm_page_inactive_target
) &&
1369 !queue_empty(&vm_page_queue_active
)) {
1371 * inactive target still not met... keep going
1372 * until we get the queues balanced...
1377 lck_mtx_lock(&vm_page_queue_free_lock
);
1379 if ((vm_page_free_count
>= vm_page_free_target
) &&
1380 (vm_page_free_wanted
== 0) && (vm_page_free_wanted_privileged
== 0)) {
1382 * done - we have met our target *and*
1383 * there is no one waiting for a page.
1385 vm_page_unlock_queues();
1387 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
1389 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1391 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_NONE
,
1392 vm_pageout_inactive
, vm_pageout_inactive_used
, 0, 0);
1393 VM_DEBUG_EVENT(vm_pageout_scan
, VM_PAGEOUT_SCAN
, DBG_FUNC_END
,
1394 vm_pageout_speculative_clean
, vm_pageout_inactive_clean
,
1395 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
1399 lck_mtx_unlock(&vm_page_queue_free_lock
);
1403 * Before anything, we check if we have any ripe volatile
1404 * objects around. If so, try to purge the first object.
1405 * If the purge fails, fall through to reclaim a page instead.
1406 * If the purge succeeds, go back to the top and reevalute
1407 * the new memory situation.
1409 assert (available_for_purge
>=0);
1410 if (available_for_purge
)
1412 if (object
!= NULL
) {
1413 vm_object_unlock(object
);
1417 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_START
, vm_page_free_count
, 0, 0, 0);
1419 if (TRUE
== vm_purgeable_object_purge_one()) {
1421 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, vm_page_free_count
, 0, 0, 0);
1425 VM_DEBUG_EVENT(vm_pageout_purgeone
, VM_PAGEOUT_PURGEONE
, DBG_FUNC_END
, 0, 0, 0, -1);
1427 if (queue_empty(&sq
->age_q
) && vm_page_speculative_count
) {
1429 * try to pull pages from the aging bins...
1430 * see vm_page.h for an explanation of how
1431 * this mechanism works
1433 struct vm_speculative_age_q
*aq
;
1434 mach_timespec_t ts_fully_aged
;
1435 boolean_t can_steal
= FALSE
;
1436 int num_scanned_queues
;
1438 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1440 num_scanned_queues
= 0;
1441 while (queue_empty(&aq
->age_q
) &&
1442 num_scanned_queues
++ != VM_PAGE_MAX_SPECULATIVE_AGE_Q
) {
1444 speculative_steal_index
++;
1446 if (speculative_steal_index
> VM_PAGE_MAX_SPECULATIVE_AGE_Q
)
1447 speculative_steal_index
= VM_PAGE_MIN_SPECULATIVE_AGE_Q
;
1449 aq
= &vm_page_queue_speculative
[speculative_steal_index
];
1452 if (num_scanned_queues
== VM_PAGE_MAX_SPECULATIVE_AGE_Q
+ 1) {
1454 * XXX We've scanned all the speculative
1455 * queues but still haven't found one
1456 * that is not empty, even though
1457 * vm_page_speculative_count is not 0.
1459 * report the anomaly...
1461 printf("vm_pageout_scan: "
1462 "all speculative queues empty "
1463 "but count=%d. Re-adjusting.\n",
1464 vm_page_speculative_count
);
1465 if (vm_page_speculative_count
> vm_page_speculative_count_drift_max
)
1466 vm_page_speculative_count_drift_max
= vm_page_speculative_count
;
1467 vm_page_speculative_count_drifts
++;
1469 Debugger("vm_pageout_scan: no speculative pages");
1472 vm_page_speculative_count
= 0;
1473 /* ... and continue */
1477 if (vm_page_speculative_count
> vm_page_speculative_target
)
1480 ts_fully_aged
.tv_sec
= (VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) / 1000;
1481 ts_fully_aged
.tv_nsec
= ((VM_PAGE_MAX_SPECULATIVE_AGE_Q
* vm_page_speculative_q_age_ms
) % 1000)
1482 * 1000 * NSEC_PER_USEC
;
1484 ADD_MACH_TIMESPEC(&ts_fully_aged
, &aq
->age_ts
);
1488 clock_get_system_nanotime(&sec
, &nsec
);
1489 ts
.tv_sec
= (unsigned int) sec
;
1492 if (CMP_MACH_TIMESPEC(&ts
, &ts_fully_aged
) >= 0)
1495 if (can_steal
== TRUE
)
1496 vm_page_speculate_ageit(aq
);
1498 if (queue_empty(&sq
->age_q
) && cache_evict_throttle
== 0) {
1501 if (object
!= NULL
) {
1502 vm_object_unlock(object
);
1505 pages_evicted
= vm_object_cache_evict(100, 10);
1507 if (pages_evicted
) {
1509 vm_pageout_cache_evicted
+= pages_evicted
;
1511 VM_DEBUG_EVENT(vm_pageout_cache_evict
, VM_PAGEOUT_CACHE_EVICT
, DBG_FUNC_NONE
,
1512 vm_page_free_count
, pages_evicted
, vm_pageout_cache_evicted
, 0);
1515 * we just freed up to 100 pages,
1516 * so go back to the top of the main loop
1517 * and re-evaulate the memory situation
1521 cache_evict_throttle
= 100;
1523 if (cache_evict_throttle
)
1524 cache_evict_throttle
--;
1528 * Sometimes we have to pause:
1529 * 1) No inactive pages - nothing to do.
1530 * 2) Flow control - default pageout queue is full
1531 * 3) Loop control - no acceptable pages found on the inactive queue
1532 * within the last vm_pageout_burst_inactive_throttle iterations
1534 if (queue_empty(&vm_page_queue_inactive
) && queue_empty(&vm_page_queue_zf
) && queue_empty(&sq
->age_q
)) {
1535 vm_pageout_scan_empty_throttle
++;
1536 msecs
= vm_pageout_empty_wait
;
1537 goto vm_pageout_scan_delay
;
1539 } else if (inactive_burst_count
>=
1540 MIN(vm_pageout_burst_inactive_throttle
,
1541 (vm_page_inactive_count
+
1542 vm_page_speculative_count
))) {
1543 vm_pageout_scan_burst_throttle
++;
1544 msecs
= vm_pageout_burst_wait
;
1545 goto vm_pageout_scan_delay
;
1547 } else if (VM_PAGE_Q_THROTTLED(iq
) &&
1548 VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1552 switch (flow_control
.state
) {
1555 reset_deadlock_timer
:
1556 ts
.tv_sec
= vm_pageout_deadlock_wait
/ 1000;
1557 ts
.tv_nsec
= (vm_pageout_deadlock_wait
% 1000) * 1000 * NSEC_PER_USEC
;
1558 clock_get_system_nanotime(&sec
, &nsec
);
1559 flow_control
.ts
.tv_sec
= (unsigned int) sec
;
1560 flow_control
.ts
.tv_nsec
= nsec
;
1561 ADD_MACH_TIMESPEC(&flow_control
.ts
, &ts
);
1563 flow_control
.state
= FCS_DELAYED
;
1564 msecs
= vm_pageout_deadlock_wait
;
1569 clock_get_system_nanotime(&sec
, &nsec
);
1570 ts
.tv_sec
= (unsigned int) sec
;
1573 if (CMP_MACH_TIMESPEC(&ts
, &flow_control
.ts
) >= 0) {
1575 * the pageout thread for the default pager is potentially
1576 * deadlocked since the
1577 * default pager queue has been throttled for more than the
1578 * allowable time... we need to move some clean pages or dirty
1579 * pages belonging to the external pagers if they aren't throttled
1580 * vm_page_free_wanted represents the number of threads currently
1581 * blocked waiting for pages... we'll move one page for each of
1582 * these plus a fixed amount to break the logjam... once we're done
1583 * moving this number of pages, we'll re-enter the FSC_DELAYED state
1584 * with a new timeout target since we have no way of knowing
1585 * whether we've broken the deadlock except through observation
1586 * of the queue associated with the default pager... we need to
1587 * stop moving pages and allow the system to run to see what
1588 * state it settles into.
1590 vm_pageout_deadlock_target
= vm_pageout_deadlock_relief
+ vm_page_free_wanted
+ vm_page_free_wanted_privileged
;
1591 vm_pageout_scan_deadlock_detected
++;
1592 flow_control
.state
= FCS_DEADLOCK_DETECTED
;
1594 thread_wakeup((event_t
) &vm_pageout_garbage_collect
);
1595 goto consider_inactive
;
1598 * just resniff instead of trying
1599 * to compute a new delay time... we're going to be
1600 * awakened immediately upon a laundry completion,
1601 * so we won't wait any longer than necessary
1603 msecs
= vm_pageout_idle_wait
;
1606 case FCS_DEADLOCK_DETECTED
:
1607 if (vm_pageout_deadlock_target
)
1608 goto consider_inactive
;
1609 goto reset_deadlock_timer
;
1612 vm_pageout_scan_throttle
++;
1613 iq
->pgo_throttled
= TRUE
;
1614 vm_pageout_scan_delay
:
1615 if (object
!= NULL
) {
1616 vm_object_unlock(object
);
1619 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1622 vm_page_unlock_queues();
1624 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
1625 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 3);
1627 vm_page_free_list(local_freeq
, TRUE
);
1629 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
1630 vm_page_free_count
, local_freed
, 0, 3);
1634 vm_page_lock_queues();
1636 if (flow_control
.state
== FCS_DELAYED
&&
1637 !VM_PAGE_Q_THROTTLED(iq
)) {
1638 flow_control
.state
= FCS_IDLE
;
1639 vm_pageout_scan_throttle_aborted
++;
1640 goto consider_inactive
;
1644 VM_CHECK_MEMORYSTATUS
;
1646 assert_wait_timeout((event_t
) &iq
->pgo_laundry
, THREAD_INTERRUPTIBLE
, msecs
, 1000*NSEC_PER_USEC
);
1647 counter(c_vm_pageout_scan_block
++);
1649 vm_page_unlock_queues();
1651 assert(vm_pageout_scan_wants_object
== VM_OBJECT_NULL
);
1653 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_START
,
1654 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1656 thread_block(THREAD_CONTINUE_NULL
);
1658 VM_DEBUG_EVENT(vm_pageout_thread_block
, VM_PAGEOUT_THREAD_BLOCK
, DBG_FUNC_END
,
1659 iq
->pgo_laundry
, iq
->pgo_maxlaundry
, msecs
, 0);
1661 vm_page_lock_queues();
1664 iq
->pgo_throttled
= FALSE
;
1666 if (loop_count
>= vm_page_inactive_count
)
1668 inactive_burst_count
= 0;
1675 flow_control
.state
= FCS_IDLE
;
1677 vm_pageout_inactive_external_forced_reactivate_limit
= MIN((vm_page_active_count
+ vm_page_inactive_count
),
1678 vm_pageout_inactive_external_forced_reactivate_limit
);
1680 inactive_burst_count
++;
1681 vm_pageout_inactive
++;
1683 /* Choose a victim. */
1688 if (VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
)) {
1689 assert(vm_page_throttled_count
== 0);
1690 assert(queue_empty(&vm_page_queue_throttled
));
1694 * The most eligible pages are ones we paged in speculatively,
1695 * but which have not yet been touched.
1697 if ( !queue_empty(&sq
->age_q
) ) {
1698 m
= (vm_page_t
) queue_first(&sq
->age_q
);
1700 page_prev_state
= PAGE_STATE_SPECULATIVE
;
1704 * Time for a zero-filled inactive page?
1706 if ( ((zf_run_count
< zf_ratio
) && vm_zf_queue_count
>= zf_queue_min_count
) ||
1707 queue_empty(&vm_page_queue_inactive
)) {
1708 if ( !queue_empty(&vm_page_queue_zf
) ) {
1709 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
1711 page_prev_state
= PAGE_STATE_ZEROFILL
;
1717 * It's either a normal inactive page or nothing.
1719 if ( !queue_empty(&vm_page_queue_inactive
) ) {
1720 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1722 page_prev_state
= PAGE_STATE_INACTIVE
;
1727 panic("vm_pageout: no victim");
1729 VM_PAGE_QUEUES_REMOVE(m
);
1731 assert(!m
->laundry
);
1732 assert(!m
->private);
1733 assert(!m
->fictitious
);
1734 assert(m
->object
!= kernel_object
);
1735 assert(m
->phys_page
!= vm_page_guard_addr
);
1738 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
1739 vm_pageout_stats
[vm_pageout_stat_now
].considered
++;
1741 DTRACE_VM2(scan
, int, 1, (uint64_t *), NULL
);
1744 * check to see if we currently are working
1745 * with the same object... if so, we've
1746 * already got the lock
1748 if (m
->object
!= object
) {
1750 * the object associated with candidate page is
1751 * different from the one we were just working
1752 * with... dump the lock if we still own it
1754 if (object
!= NULL
) {
1755 vm_object_unlock(object
);
1757 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1760 * Try to lock object; since we've alread got the
1761 * page queues lock, we can only 'try' for this one.
1762 * if the 'try' fails, we need to do a mutex_pause
1763 * to allow the owner of the object lock a chance to
1764 * run... otherwise, we're likely to trip over this
1765 * object in the same state as we work our way through
1766 * the queue... clumps of pages associated with the same
1767 * object are fairly typical on the inactive and active queues
1769 if (!vm_object_lock_try_scan(m
->object
)) {
1770 vm_page_t m_want
= NULL
;
1772 vm_pageout_inactive_nolock
++;
1774 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
1775 page_prev_state
= PAGE_STATE_INACTIVE_FIRST
;
1777 pmap_clear_reference(m
->phys_page
);
1778 m
->reference
= FALSE
;
1781 * m->object must be stable since we hold the page queues lock...
1782 * we can update the scan_collisions field sans the object lock
1783 * since it is a separate field and this is the only spot that does
1784 * a read-modify-write operation and it is never executed concurrently...
1785 * we can asynchronously set this field to 0 when creating a UPL, so it
1786 * is possible for the value to be a bit non-determistic, but that's ok
1787 * since it's only used as a hint
1789 m
->object
->scan_collisions
++;
1791 if ( !queue_empty(&sq
->age_q
) )
1792 m_want
= (vm_page_t
) queue_first(&sq
->age_q
);
1793 else if ( ((zf_run_count
< zf_ratio
) && vm_zf_queue_count
>= zf_queue_min_count
) ||
1794 queue_empty(&vm_page_queue_inactive
)) {
1795 if ( !queue_empty(&vm_page_queue_zf
) )
1796 m_want
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
1797 } else if ( !queue_empty(&vm_page_queue_inactive
) ) {
1798 m_want
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1801 * this is the next object we're going to be interested in
1802 * try to make sure its available after the mutex_yield
1806 vm_pageout_scan_wants_object
= m_want
->object
;
1809 * force us to dump any collected free pages
1810 * and to pause before moving on
1817 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
1825 if (m
->encrypted_cleaning
) {
1828 * if this page has already been picked up as
1829 * part of a page-out cluster, it will be busy
1830 * because it is being encrypted (see
1831 * vm_object_upl_request()). But we still
1832 * want to demote it from "clean-in-place"
1833 * (aka "adjacent") to "clean-and-free" (aka
1834 * "target"), so let's ignore its "busy" bit
1835 * here and proceed to check for "cleaning" a
1836 * little bit below...
1839 * A "busy" page should still be left alone for
1840 * most purposes, so we have to be very careful
1841 * not to process that page too much.
1843 assert(m
->cleaning
);
1844 goto consider_inactive_page
;
1848 * Somebody is already playing with this page.
1849 * Put it back on the appropriate queue
1852 vm_pageout_inactive_busy
++;
1854 switch (page_prev_state
) {
1856 case PAGE_STATE_SPECULATIVE
:
1857 vm_page_speculate(m
, FALSE
);
1860 case PAGE_STATE_ZEROFILL
:
1861 m
->zero_fill
= TRUE
;
1863 * fall through to add in the
1866 case PAGE_STATE_INACTIVE
:
1867 VM_PAGE_ENQUEUE_INACTIVE(m
, FALSE
);
1870 case PAGE_STATE_INACTIVE_FIRST
:
1871 VM_PAGE_ENQUEUE_INACTIVE(m
, TRUE
);
1874 goto done_with_inactivepage
;
1879 * If it's absent, in error or the object is no longer alive,
1880 * we can reclaim the page... in the no longer alive case,
1881 * there are 2 states the page can be in that preclude us
1882 * from reclaiming it - busy or cleaning - that we've already
1885 if (m
->absent
|| m
->error
|| !object
->alive
) {
1888 vm_pageout_inactive_absent
++;
1889 else if (!object
->alive
)
1890 vm_pageout_inactive_notalive
++;
1892 vm_pageout_inactive_error
++;
1894 if (vm_pageout_deadlock_target
) {
1895 vm_pageout_scan_inactive_throttle_success
++;
1896 vm_pageout_deadlock_target
--;
1899 DTRACE_VM2(dfree
, int, 1, (uint64_t *), NULL
);
1901 if (object
->internal
) {
1902 DTRACE_VM2(anonfree
, int, 1, (uint64_t *), NULL
);
1904 DTRACE_VM2(fsfree
, int, 1, (uint64_t *), NULL
);
1906 vm_page_free_prepare_queues(m
);
1909 * remove page from object here since we're already
1910 * behind the object lock... defer the rest of the work
1911 * we'd normally do in vm_page_free_prepare_object
1912 * until 'vm_page_free_list' is called
1915 vm_page_remove(m
, TRUE
);
1917 assert(m
->pageq
.next
== NULL
&&
1918 m
->pageq
.prev
== NULL
);
1919 m
->pageq
.next
= (queue_entry_t
)local_freeq
;
1923 inactive_burst_count
= 0;
1925 if (page_prev_state
!= PAGE_STATE_SPECULATIVE
)
1926 vm_pageout_stats
[vm_pageout_stat_now
].reclaimed
++;
1928 goto done_with_inactivepage
;
1931 * If the object is empty, the page must be reclaimed even
1933 * If the page belongs to a volatile object, we stick it back
1936 if (object
->copy
== VM_OBJECT_NULL
) {
1937 if (object
->purgable
== VM_PURGABLE_EMPTY
) {
1939 if (m
->pmapped
== TRUE
) {
1940 /* unmap the page */
1941 refmod_state
= pmap_disconnect(m
->phys_page
);
1942 if (refmod_state
& VM_MEM_MODIFIED
) {
1946 if (m
->dirty
|| m
->precious
) {
1947 /* we saved the cost of cleaning this page ! */
1948 vm_page_purged_count
++;
1952 if (object
->purgable
== VM_PURGABLE_VOLATILE
) {
1953 /* if it's wired, we can't put it on our queue */
1954 assert(!VM_PAGE_WIRED(m
));
1956 /* just stick it back on! */
1957 reactivated_this_call
++;
1958 goto reactivate_page
;
1962 consider_inactive_page
:
1966 * A "busy" page should always be left alone, except...
1968 if (m
->cleaning
&& m
->encrypted_cleaning
) {
1971 * We could get here with a "busy" page
1972 * if it's being encrypted during a
1973 * "clean-in-place" operation. We'll deal
1974 * with it right away by testing if it has been
1975 * referenced and either reactivating it or
1976 * promoting it from "clean-in-place" to
1980 panic("\"busy\" page considered for pageout\n");
1985 * If it's being used, reactivate.
1986 * (Fictitious pages are either busy or absent.)
1987 * First, update the reference and dirty bits
1988 * to make sure the page is unreferenced.
1992 if (m
->reference
== FALSE
&& m
->pmapped
== TRUE
) {
1993 refmod_state
= pmap_get_refmod(m
->phys_page
);
1995 if (refmod_state
& VM_MEM_REFERENCED
)
1996 m
->reference
= TRUE
;
1997 if (refmod_state
& VM_MEM_MODIFIED
)
2002 * If already cleaning this page in place and it hasn't
2003 * been recently referenced, convert from
2004 * "adjacent" to "target". We can leave the page mapped,
2005 * and upl_commit_range will determine whether
2006 * to free or reactivate.
2008 * note: if m->encrypted_cleaning == TRUE, then
2009 * m->cleaning == TRUE
2010 * and we'll handle it here
2014 if (m
->reference
== TRUE
) {
2015 reactivated_this_call
++;
2016 goto reactivate_page
;
2020 m
->dump_cleaning
= TRUE
;
2023 CLUSTER_STAT(vm_pageout_cluster_conversions
++);
2025 inactive_burst_count
= 0;
2027 goto done_with_inactivepage
;
2030 if (m
->reference
|| m
->dirty
) {
2031 /* deal with a rogue "reusable" page */
2032 VM_PAGEOUT_SCAN_HANDLE_REUSABLE_PAGE(m
);
2035 if (m
->reference
&& !m
->no_cache
) {
2037 * The page we pulled off the inactive list has
2038 * been referenced. It is possible for other
2039 * processors to be touching pages faster than we
2040 * can clear the referenced bit and traverse the
2041 * inactive queue, so we limit the number of
2044 if (++reactivated_this_call
>= reactivate_limit
) {
2045 vm_pageout_reactivation_limit_exceeded
++;
2046 } else if (catch_up_count
) {
2047 vm_pageout_catch_ups
++;
2048 } else if (++inactive_reclaim_run
>= VM_PAGEOUT_INACTIVE_FORCE_RECLAIM
) {
2049 vm_pageout_inactive_force_reclaim
++;
2053 if ( !object
->internal
&& object
->pager
!= MEMORY_OBJECT_NULL
&&
2054 vnode_pager_get_isinuse(object
->pager
, &isinuse
) == KERN_SUCCESS
&& !isinuse
) {
2056 * no explict mappings of this object exist
2057 * and it's not open via the filesystem
2059 vm_page_deactivate(m
);
2060 vm_pageout_inactive_deactivated
++;
2063 * The page was/is being used, so put back on active list.
2065 vm_page_activate(m
);
2066 VM_STAT_INCR(reactivations
);
2068 vm_pageout_inactive_used
++;
2069 inactive_burst_count
= 0;
2071 goto done_with_inactivepage
;
2074 * Make sure we call pmap_get_refmod() if it
2075 * wasn't already called just above, to update
2078 if ((refmod_state
== -1) && !m
->dirty
&& m
->pmapped
) {
2079 refmod_state
= pmap_get_refmod(m
->phys_page
);
2080 if (refmod_state
& VM_MEM_MODIFIED
)
2083 forced_reclaim
= TRUE
;
2085 forced_reclaim
= FALSE
;
2089 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
2090 object
, m
->offset
, m
, 0,0);
2093 * we've got a candidate page to steal...
2095 * m->dirty is up to date courtesy of the
2096 * preceding check for m->reference... if
2097 * we get here, then m->reference had to be
2098 * FALSE (or possibly "reactivate_limit" was
2099 * exceeded), but in either case we called
2100 * pmap_get_refmod() and updated both
2101 * m->reference and m->dirty
2103 * if it's dirty or precious we need to
2104 * see if the target queue is throtttled
2105 * it if is, we need to skip over it by moving it back
2106 * to the end of the inactive queue
2109 inactive_throttled
= FALSE
;
2111 if (m
->dirty
|| m
->precious
) {
2112 if (object
->internal
) {
2113 if (VM_PAGE_Q_THROTTLED(iq
))
2114 inactive_throttled
= TRUE
;
2115 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2116 inactive_throttled
= TRUE
;
2120 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) &&
2121 object
->internal
&& m
->dirty
&&
2122 (object
->purgable
== VM_PURGABLE_DENY
||
2123 object
->purgable
== VM_PURGABLE_NONVOLATILE
||
2124 object
->purgable
== VM_PURGABLE_VOLATILE
)) {
2125 queue_enter(&vm_page_queue_throttled
, m
,
2127 m
->throttled
= TRUE
;
2128 vm_page_throttled_count
++;
2130 vm_pageout_scan_reclaimed_throttled
++;
2132 goto done_with_inactivepage
;
2134 if (inactive_throttled
== TRUE
) {
2136 if (object
->internal
)
2137 vm_pageout_scan_inactive_throttled_internal
++;
2139 vm_pageout_scan_inactive_throttled_external
++;
2141 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2142 page_prev_state
= PAGE_STATE_INACTIVE
;
2144 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) && object
->internal
== FALSE
) {
2146 * a) The external pageout queue is throttled
2147 * b) We're done with the active queue and moved on to the inactive queue
2148 * c) We start noticing dirty pages and usually we would put them at the end of the inactive queue, but,
2149 * d) We don't have a default pager, and so,
2150 * e) We push these onto the active queue in an effort to cause a re-evaluation of the active queue
2151 * and get back some, possibly clean, pages.
2153 * We also keep a count of the pages of this kind, since, these will be a good indicator of us being in a deadlock
2154 * on systems without a dynamic pager, where:
2155 * a) The external pageout thread is stuck on the truncate lock for a file that is being extended i.e. written.
2156 * b) The thread doing the writing is waiting for pages while holding the truncate lock
2157 * c) Most of the pages in the inactive queue belong to this file.
2160 vm_page_activate(m
);
2161 vm_pageout_inactive_external_forced_reactivate_count
++;
2162 vm_pageout_inactive_external_forced_reactivate_limit
--;
2164 if (vm_pageout_inactive_external_forced_reactivate_limit
<= 0){
2165 vm_pageout_inactive_external_forced_reactivate_limit
= vm_page_active_count
+ vm_page_inactive_count
;
2168 * Possible deadlock scenario so request jetsam action
2171 vm_object_unlock(object
);
2172 object
= VM_OBJECT_NULL
;
2173 vm_page_unlock_queues();
2175 if (jetsam_kill_top_proc(TRUE
, kJetsamFlagsKilledVM
) < 0){
2176 panic("vm_pageout_scan: Jetsam request failed\n");
2179 vm_page_lock_queues();
2183 inactive_burst_count
= 0;
2184 goto done_with_inactivepage
;
2191 * we've got a page that we can steal...
2192 * eliminate all mappings and make sure
2193 * we have the up-to-date modified state
2194 * first take the page BUSY, so that no new
2195 * mappings can be made
2200 * if we need to do a pmap_disconnect then we
2201 * need to re-evaluate m->dirty since the pmap_disconnect
2202 * provides the true state atomically... the
2203 * page was still mapped up to the pmap_disconnect
2204 * and may have been dirtied at the last microsecond
2206 * we also check for the page being referenced 'late'
2207 * if it was, we first need to do a WAKEUP_DONE on it
2208 * since we already set m->busy = TRUE, before
2209 * going off to reactivate it
2211 * Note that if 'pmapped' is FALSE then the page is not
2212 * and has not been in any map, so there is no point calling
2213 * pmap_disconnect(). m->dirty and/or m->reference could
2214 * have been set in anticipation of likely usage of the page.
2216 if (m
->pmapped
== TRUE
) {
2217 refmod_state
= pmap_disconnect(m
->phys_page
);
2219 if (refmod_state
& VM_MEM_MODIFIED
)
2221 if (refmod_state
& VM_MEM_REFERENCED
) {
2223 /* If m->reference is already set, this page must have
2224 * already failed the reactivate_limit test, so don't
2225 * bump the counts twice.
2227 if ( ! m
->reference
) {
2228 m
->reference
= TRUE
;
2229 if (forced_reclaim
||
2230 ++reactivated_this_call
>= reactivate_limit
)
2231 vm_pageout_reactivation_limit_exceeded
++;
2233 PAGE_WAKEUP_DONE(m
);
2234 goto reactivate_page
;
2240 * reset our count of pages that have been reclaimed
2241 * since the last page was 'stolen'
2243 inactive_reclaim_run
= 0;
2246 * If it's clean and not precious, we can free the page.
2248 if (!m
->dirty
&& !m
->precious
) {
2250 if (page_prev_state
== PAGE_STATE_SPECULATIVE
)
2251 vm_pageout_speculative_clean
++;
2253 if (page_prev_state
== PAGE_STATE_ZEROFILL
)
2254 vm_pageout_inactive_zf
++;
2255 vm_pageout_inactive_clean
++;
2261 * The page may have been dirtied since the last check
2262 * for a throttled target queue (which may have been skipped
2263 * if the page was clean then). With the dirty page
2264 * disconnected here, we can make one final check.
2266 if (object
->internal
) {
2267 if (VM_PAGE_Q_THROTTLED(iq
))
2268 inactive_throttled
= TRUE
;
2269 } else if (VM_PAGE_Q_THROTTLED(eq
)) {
2270 inactive_throttled
= TRUE
;
2273 if (inactive_throttled
== TRUE
) {
2275 * we set busy before issuing the pmap_disconnect,
2276 * so clear it and wakeup anyone that happened upon
2279 PAGE_WAKEUP_DONE(m
);
2280 goto throttle_inactive
;
2283 vm_pageout_stats
[vm_pageout_stat_now
].reclaimed
++;
2285 vm_pageout_cluster(m
);
2287 if (page_prev_state
== PAGE_STATE_ZEROFILL
)
2288 vm_pageout_inactive_zf
++;
2289 if (object
->internal
)
2290 vm_pageout_inactive_dirty_internal
++;
2292 vm_pageout_inactive_dirty_external
++;
2294 inactive_burst_count
= 0;
2296 done_with_inactivepage
:
2297 if (delayed_unlock
++ > delayed_unlock_limit
|| try_failed
== TRUE
) {
2299 if (object
!= NULL
) {
2300 vm_pageout_scan_wants_object
= VM_OBJECT_NULL
;
2301 vm_object_unlock(object
);
2305 vm_page_unlock_queues();
2307 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_START
,
2308 vm_page_free_count
, local_freed
, delayed_unlock_limit
, 4);
2310 vm_page_free_list(local_freeq
, TRUE
);
2312 VM_DEBUG_EVENT(vm_pageout_freelist
, VM_PAGEOUT_FREELIST
, DBG_FUNC_END
,
2313 vm_page_free_count
, local_freed
, 0, 4);
2317 vm_page_lock_queues();
2319 lck_mtx_yield(&vm_page_queue_lock
);
2324 * back to top of pageout scan loop
2330 int vm_page_free_count_init
;
2333 vm_page_free_reserve(
2336 int free_after_reserve
;
2338 vm_page_free_reserved
+= pages
;
2340 if (vm_page_free_reserved
> VM_PAGE_FREE_RESERVED_LIMIT
)
2341 vm_page_free_reserved
= VM_PAGE_FREE_RESERVED_LIMIT
;
2343 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
2345 vm_page_free_min
= vm_page_free_reserved
+
2346 VM_PAGE_FREE_MIN(free_after_reserve
);
2348 if (vm_page_free_min
> VM_PAGE_FREE_MIN_LIMIT
)
2349 vm_page_free_min
= VM_PAGE_FREE_MIN_LIMIT
;
2351 vm_page_free_target
= vm_page_free_reserved
+
2352 VM_PAGE_FREE_TARGET(free_after_reserve
);
2354 if (vm_page_free_target
> VM_PAGE_FREE_TARGET_LIMIT
)
2355 vm_page_free_target
= VM_PAGE_FREE_TARGET_LIMIT
;
2357 if (vm_page_free_target
< vm_page_free_min
+ 5)
2358 vm_page_free_target
= vm_page_free_min
+ 5;
2360 vm_page_throttle_limit
= vm_page_free_target
- (vm_page_free_target
/ 3);
2361 vm_page_creation_throttle
= vm_page_free_target
/ 2;
2365 * vm_pageout is the high level pageout daemon.
2369 vm_pageout_continue(void)
2371 DTRACE_VM2(pgrrun
, int, 1, (uint64_t *), NULL
);
2372 vm_pageout_scan_event_counter
++;
2374 /* we hold vm_page_queue_free_lock now */
2375 assert(vm_page_free_wanted
== 0);
2376 assert(vm_page_free_wanted_privileged
== 0);
2377 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
2378 lck_mtx_unlock(&vm_page_queue_free_lock
);
2380 counter(c_vm_pageout_block
++);
2381 thread_block((thread_continue_t
)vm_pageout_continue
);
2386 #ifdef FAKE_DEADLOCK
2388 #define FAKE_COUNT 5000
2390 int internal_count
= 0;
2391 int fake_deadlock
= 0;
2396 vm_pageout_iothread_continue(struct vm_pageout_queue
*q
)
2400 memory_object_t pager
;
2401 thread_t self
= current_thread();
2403 if ((vm_pageout_internal_iothread
!= THREAD_NULL
)
2404 && (self
== vm_pageout_external_iothread
)
2405 && (self
->options
& TH_OPT_VMPRIV
))
2406 self
->options
&= ~TH_OPT_VMPRIV
;
2408 vm_page_lockspin_queues();
2410 while ( !queue_empty(&q
->pgo_pending
) ) {
2413 queue_remove_first(&q
->pgo_pending
, m
, vm_page_t
, pageq
);
2414 if (m
->object
== slide_info
.slide_object
) {
2415 panic("slid page %p not allowed on this path\n", m
);
2418 m
->pageout_queue
= FALSE
;
2419 m
->pageq
.next
= NULL
;
2420 m
->pageq
.prev
= NULL
;
2421 vm_page_unlock_queues();
2423 #ifdef FAKE_DEADLOCK
2424 if (q
== &vm_pageout_queue_internal
) {
2430 if ((internal_count
== FAKE_COUNT
)) {
2432 pg_count
= vm_page_free_count
+ vm_page_free_reserved
;
2434 if (kmem_alloc(kernel_map
, &addr
, PAGE_SIZE
* pg_count
) == KERN_SUCCESS
) {
2435 kmem_free(kernel_map
, addr
, PAGE_SIZE
* pg_count
);
2444 vm_object_lock(object
);
2446 if (!object
->pager_initialized
) {
2449 * If there is no memory object for the page, create
2450 * one and hand it to the default pager.
2453 if (!object
->pager_initialized
)
2454 vm_object_collapse(object
,
2455 (vm_object_offset_t
) 0,
2457 if (!object
->pager_initialized
)
2458 vm_object_pager_create(object
);
2459 if (!object
->pager_initialized
) {
2461 * Still no pager for the object.
2462 * Reactivate the page.
2464 * Should only happen if there is no
2467 vm_page_lockspin_queues();
2469 vm_pageout_queue_steal(m
, TRUE
);
2470 vm_page_activate(m
);
2471 vm_pageout_dirty_no_pager
++;
2473 vm_page_unlock_queues();
2476 * And we are done with it.
2478 PAGE_WAKEUP_DONE(m
);
2480 vm_object_paging_end(object
);
2481 vm_object_unlock(object
);
2483 vm_page_lockspin_queues();
2487 pager
= object
->pager
;
2488 if (pager
== MEMORY_OBJECT_NULL
) {
2490 * This pager has been destroyed by either
2491 * memory_object_destroy or vm_object_destroy, and
2492 * so there is nowhere for the page to go.
2496 * Just free the page... VM_PAGE_FREE takes
2497 * care of cleaning up all the state...
2498 * including doing the vm_pageout_throttle_up
2502 vm_page_lockspin_queues();
2504 vm_pageout_queue_steal(m
, TRUE
);
2505 vm_page_activate(m
);
2507 vm_page_unlock_queues();
2510 * And we are done with it.
2512 PAGE_WAKEUP_DONE(m
);
2514 vm_object_paging_end(object
);
2515 vm_object_unlock(object
);
2517 vm_page_lockspin_queues();
2521 vm_object_unlock(object
);
2523 * we expect the paging_in_progress reference to have
2524 * already been taken on the object before it was added
2525 * to the appropriate pageout I/O queue... this will
2526 * keep the object from being terminated and/or the
2527 * paging_offset from changing until the I/O has
2528 * completed... therefore no need to lock the object to
2529 * pull the paging_offset from it.
2531 * Send the data to the pager.
2532 * any pageout clustering happens there
2534 memory_object_data_return(pager
,
2535 m
->offset
+ object
->paging_offset
,
2543 vm_object_lock(object
);
2544 vm_object_paging_end(object
);
2545 vm_object_unlock(object
);
2547 vm_page_lockspin_queues();
2549 assert_wait((event_t
) q
, THREAD_UNINT
);
2551 if (q
->pgo_throttled
== TRUE
&& !VM_PAGE_Q_THROTTLED(q
)) {
2552 q
->pgo_throttled
= FALSE
;
2553 thread_wakeup((event_t
) &q
->pgo_laundry
);
2555 if (q
->pgo_draining
== TRUE
&& q
->pgo_laundry
== 0) {
2556 q
->pgo_draining
= FALSE
;
2557 thread_wakeup((event_t
) (&q
->pgo_laundry
+1));
2559 q
->pgo_busy
= FALSE
;
2561 vm_page_unlock_queues();
2563 thread_block_parameter((thread_continue_t
)vm_pageout_iothread_continue
, (void *) &q
->pgo_pending
);
2569 vm_pageout_iothread_external(void)
2571 thread_t self
= current_thread();
2573 self
->options
|= TH_OPT_VMPRIV
;
2575 vm_pageout_iothread_continue(&vm_pageout_queue_external
);
2581 vm_pageout_iothread_internal(void)
2583 thread_t self
= current_thread();
2585 self
->options
|= TH_OPT_VMPRIV
;
2587 vm_pageout_iothread_continue(&vm_pageout_queue_internal
);
2592 vm_set_buffer_cleanup_callout(boolean_t (*func
)(int))
2594 if (OSCompareAndSwapPtr(NULL
, func
, (void * volatile *) &consider_buffer_cache_collect
)) {
2595 return KERN_SUCCESS
;
2597 return KERN_FAILURE
; /* Already set */
2602 vm_pageout_garbage_collect(int collect
)
2605 boolean_t buf_large_zfree
= FALSE
;
2609 * consider_zone_gc should be last, because the other operations
2610 * might return memory to zones.
2612 consider_machine_collect();
2613 if (consider_buffer_cache_collect
!= NULL
) {
2614 buf_large_zfree
= (*consider_buffer_cache_collect
)(0);
2616 consider_zone_gc(buf_large_zfree
);
2618 consider_machine_adjust();
2619 consider_pressure_events();
2623 assert_wait((event_t
) &vm_pageout_garbage_collect
, THREAD_UNINT
);
2625 thread_block_parameter((thread_continue_t
) vm_pageout_garbage_collect
, (void *)1);
2634 thread_t self
= current_thread();
2636 kern_return_t result
;
2640 * Set thread privileges.
2644 self
->priority
= BASEPRI_PREEMPT
- 1;
2645 set_sched_pri(self
, self
->priority
);
2646 thread_unlock(self
);
2648 if (!self
->reserved_stack
)
2649 self
->reserved_stack
= self
->kernel_stack
;
2654 * Initialize some paging parameters.
2657 if (vm_pageout_idle_wait
== 0)
2658 vm_pageout_idle_wait
= VM_PAGEOUT_IDLE_WAIT
;
2660 if (vm_pageout_burst_wait
== 0)
2661 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
2663 if (vm_pageout_empty_wait
== 0)
2664 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
2666 if (vm_pageout_deadlock_wait
== 0)
2667 vm_pageout_deadlock_wait
= VM_PAGEOUT_DEADLOCK_WAIT
;
2669 if (vm_pageout_deadlock_relief
== 0)
2670 vm_pageout_deadlock_relief
= VM_PAGEOUT_DEADLOCK_RELIEF
;
2672 if (vm_pageout_inactive_relief
== 0)
2673 vm_pageout_inactive_relief
= VM_PAGEOUT_INACTIVE_RELIEF
;
2675 if (vm_pageout_burst_active_throttle
== 0)
2676 vm_pageout_burst_active_throttle
= VM_PAGEOUT_BURST_ACTIVE_THROTTLE
;
2678 if (vm_pageout_burst_inactive_throttle
== 0)
2679 vm_pageout_burst_inactive_throttle
= VM_PAGEOUT_BURST_INACTIVE_THROTTLE
;
2682 * Set kernel task to low backing store privileged
2685 task_lock(kernel_task
);
2686 kernel_task
->priv_flags
|= VM_BACKING_STORE_PRIV
;
2687 task_unlock(kernel_task
);
2689 vm_page_free_count_init
= vm_page_free_count
;
2692 * even if we've already called vm_page_free_reserve
2693 * call it again here to insure that the targets are
2694 * accurately calculated (it uses vm_page_free_count_init)
2695 * calling it with an arg of 0 will not change the reserve
2696 * but will re-calculate free_min and free_target
2698 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED(processor_count
)) {
2699 vm_page_free_reserve((VM_PAGE_FREE_RESERVED(processor_count
)) - vm_page_free_reserved
);
2701 vm_page_free_reserve(0);
2704 queue_init(&vm_pageout_queue_external
.pgo_pending
);
2705 vm_pageout_queue_external
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
2706 vm_pageout_queue_external
.pgo_laundry
= 0;
2707 vm_pageout_queue_external
.pgo_idle
= FALSE
;
2708 vm_pageout_queue_external
.pgo_busy
= FALSE
;
2709 vm_pageout_queue_external
.pgo_throttled
= FALSE
;
2710 vm_pageout_queue_external
.pgo_draining
= FALSE
;
2712 queue_init(&vm_pageout_queue_internal
.pgo_pending
);
2713 vm_pageout_queue_internal
.pgo_maxlaundry
= 0;
2714 vm_pageout_queue_internal
.pgo_laundry
= 0;
2715 vm_pageout_queue_internal
.pgo_idle
= FALSE
;
2716 vm_pageout_queue_internal
.pgo_busy
= FALSE
;
2717 vm_pageout_queue_internal
.pgo_throttled
= FALSE
;
2718 vm_pageout_queue_internal
.pgo_draining
= FALSE
;
2721 /* internal pageout thread started when default pager registered first time */
2722 /* external pageout and garbage collection threads started here */
2724 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_external
, NULL
,
2725 BASEPRI_PREEMPT
- 1,
2726 &vm_pageout_external_iothread
);
2727 if (result
!= KERN_SUCCESS
)
2728 panic("vm_pageout_iothread_external: create failed");
2730 thread_deallocate(vm_pageout_external_iothread
);
2732 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_garbage_collect
, NULL
,
2735 if (result
!= KERN_SUCCESS
)
2736 panic("vm_pageout_garbage_collect: create failed");
2738 thread_deallocate(thread
);
2740 vm_object_reaper_init();
2743 vm_pageout_continue();
2748 * The vm_pageout_continue() call above never returns, so the code below is never
2749 * executed. We take advantage of this to declare several DTrace VM related probe
2750 * points that our kernel doesn't have an analog for. These are probe points that
2751 * exist in Solaris and are in the DTrace documentation, so people may have written
2752 * scripts that use them. Declaring the probe points here means their scripts will
2753 * compile and execute which we want for portability of the scripts, but since this
2754 * section of code is never reached, the probe points will simply never fire. Yes,
2755 * this is basically a hack. The problem is the DTrace probe points were chosen with
2756 * Solaris specific VM events in mind, not portability to different VM implementations.
2759 DTRACE_VM2(execfree
, int, 1, (uint64_t *), NULL
);
2760 DTRACE_VM2(execpgin
, int, 1, (uint64_t *), NULL
);
2761 DTRACE_VM2(execpgout
, int, 1, (uint64_t *), NULL
);
2762 DTRACE_VM2(pgswapin
, int, 1, (uint64_t *), NULL
);
2763 DTRACE_VM2(pgswapout
, int, 1, (uint64_t *), NULL
);
2764 DTRACE_VM2(swapin
, int, 1, (uint64_t *), NULL
);
2765 DTRACE_VM2(swapout
, int, 1, (uint64_t *), NULL
);
2770 vm_pageout_internal_start(void)
2772 kern_return_t result
;
2774 vm_pageout_queue_internal
.pgo_maxlaundry
= VM_PAGE_LAUNDRY_MAX
;
2775 result
= kernel_thread_start_priority((thread_continue_t
)vm_pageout_iothread_internal
, NULL
, BASEPRI_PREEMPT
- 1, &vm_pageout_internal_iothread
);
2776 if (result
== KERN_SUCCESS
)
2777 thread_deallocate(vm_pageout_internal_iothread
);
2783 upl_create(int type
, int flags
, upl_size_t size
)
2786 int page_field_size
= 0;
2788 int upl_size
= sizeof(struct upl
);
2790 size
= round_page_32(size
);
2792 if (type
& UPL_CREATE_LITE
) {
2793 page_field_size
= (atop(size
) + 7) >> 3;
2794 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
2796 upl_flags
|= UPL_LITE
;
2798 if (type
& UPL_CREATE_INTERNAL
) {
2799 upl_size
+= (int) sizeof(struct upl_page_info
) * atop(size
);
2801 upl_flags
|= UPL_INTERNAL
;
2803 upl
= (upl_t
)kalloc(upl_size
+ page_field_size
);
2805 if (page_field_size
)
2806 bzero((char *)upl
+ upl_size
, page_field_size
);
2808 upl
->flags
= upl_flags
| flags
;
2809 upl
->src_object
= NULL
;
2810 upl
->kaddr
= (vm_offset_t
)0;
2812 upl
->map_object
= NULL
;
2814 upl
->ext_ref_count
= 0;
2815 upl
->highest_page
= 0;
2817 upl
->vector_upl
= NULL
;
2819 upl
->ubc_alias1
= 0;
2820 upl
->ubc_alias2
= 0;
2822 upl
->upl_creator
= current_thread();
2824 upl
->upl_commit_index
= 0;
2825 bzero(&upl
->upl_commit_records
[0], sizeof(upl
->upl_commit_records
));
2827 (void) OSBacktrace(&upl
->upl_create_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
2828 #endif /* UPL_DEBUG */
2834 upl_destroy(upl_t upl
)
2836 int page_field_size
; /* bit field in word size buf */
2839 if (upl
->ext_ref_count
) {
2840 panic("upl(%p) ext_ref_count", upl
);
2847 if (upl
->flags
& UPL_SHADOWED
) {
2848 object
= upl
->map_object
->shadow
;
2850 object
= upl
->map_object
;
2852 vm_object_lock(object
);
2853 queue_remove(&object
->uplq
, upl
, upl_t
, uplq
);
2854 vm_object_unlock(object
);
2856 #endif /* UPL_DEBUG */
2858 * drop a reference on the map_object whether or
2859 * not a pageout object is inserted
2861 if (upl
->flags
& UPL_SHADOWED
)
2862 vm_object_deallocate(upl
->map_object
);
2864 if (upl
->flags
& UPL_DEVICE_MEMORY
)
2868 page_field_size
= 0;
2870 if (upl
->flags
& UPL_LITE
) {
2871 page_field_size
= ((size
/PAGE_SIZE
) + 7) >> 3;
2872 page_field_size
= (page_field_size
+ 3) & 0xFFFFFFFC;
2874 upl_lock_destroy(upl
);
2875 upl
->vector_upl
= (vector_upl_t
) 0xfeedbeef;
2876 if (upl
->flags
& UPL_INTERNAL
) {
2878 sizeof(struct upl
) +
2879 (sizeof(struct upl_page_info
) * (size
/PAGE_SIZE
))
2882 kfree(upl
, sizeof(struct upl
) + page_field_size
);
2887 upl_deallocate(upl_t upl
)
2889 if (--upl
->ref_count
== 0) {
2890 if(vector_upl_is_valid(upl
))
2891 vector_upl_deallocate(upl
);
2896 #if DEVELOPMENT || DEBUG
2898 * Statistics about UPL enforcement of copy-on-write obligations.
2900 unsigned long upl_cow
= 0;
2901 unsigned long upl_cow_again
= 0;
2902 unsigned long upl_cow_pages
= 0;
2903 unsigned long upl_cow_again_pages
= 0;
2905 unsigned long iopl_cow
= 0;
2906 unsigned long iopl_cow_pages
= 0;
2910 * Routine: vm_object_upl_request
2912 * Cause the population of a portion of a vm_object.
2913 * Depending on the nature of the request, the pages
2914 * returned may be contain valid data or be uninitialized.
2915 * A page list structure, listing the physical pages
2916 * will be returned upon request.
2917 * This function is called by the file system or any other
2918 * supplier of backing store to a pager.
2919 * IMPORTANT NOTE: The caller must still respect the relationship
2920 * between the vm_object and its backing memory object. The
2921 * caller MUST NOT substitute changes in the backing file
2922 * without first doing a memory_object_lock_request on the
2923 * target range unless it is know that the pages are not
2924 * shared with another entity at the pager level.
2926 * if a page list structure is present
2927 * return the mapped physical pages, where a
2928 * page is not present, return a non-initialized
2929 * one. If the no_sync bit is turned on, don't
2930 * call the pager unlock to synchronize with other
2931 * possible copies of the page. Leave pages busy
2932 * in the original object, if a page list structure
2933 * was specified. When a commit of the page list
2934 * pages is done, the dirty bit will be set for each one.
2936 * If a page list structure is present, return
2937 * all mapped pages. Where a page does not exist
2938 * map a zero filled one. Leave pages busy in
2939 * the original object. If a page list structure
2940 * is not specified, this call is a no-op.
2942 * Note: access of default pager objects has a rather interesting
2943 * twist. The caller of this routine, presumably the file system
2944 * page cache handling code, will never actually make a request
2945 * against a default pager backed object. Only the default
2946 * pager will make requests on backing store related vm_objects
2947 * In this way the default pager can maintain the relationship
2948 * between backing store files (abstract memory objects) and
2949 * the vm_objects (cache objects), they support.
2953 __private_extern__ kern_return_t
2954 vm_object_upl_request(
2956 vm_object_offset_t offset
,
2959 upl_page_info_array_t user_page_list
,
2960 unsigned int *page_list_count
,
2963 vm_page_t dst_page
= VM_PAGE_NULL
;
2964 vm_object_offset_t dst_offset
;
2965 upl_size_t xfer_size
;
2966 unsigned int size_in_pages
;
2971 #if MACH_CLUSTER_STATS
2972 boolean_t encountered_lrp
= FALSE
;
2974 vm_page_t alias_page
= NULL
;
2975 int refmod_state
= 0;
2976 wpl_array_t lite_list
= NULL
;
2977 vm_object_t last_copy_object
;
2978 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
2979 struct vm_page_delayed_work
*dwp
;
2983 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
2985 * For forward compatibility's sake,
2986 * reject any unknown flag.
2988 return KERN_INVALID_VALUE
;
2990 if ( (!object
->internal
) && (object
->paging_offset
!= 0) )
2991 panic("vm_object_upl_request: external object with non-zero paging offset\n");
2992 if (object
->phys_contiguous
)
2993 panic("vm_object_upl_request: contiguous object specified\n");
2996 if ((size
/ PAGE_SIZE
) > MAX_UPL_SIZE
)
2997 size
= MAX_UPL_SIZE
* PAGE_SIZE
;
2999 if ( (cntrl_flags
& UPL_SET_INTERNAL
) && page_list_count
!= NULL
)
3000 *page_list_count
= MAX_UPL_SIZE
;
3002 if (cntrl_flags
& UPL_SET_INTERNAL
) {
3003 if (cntrl_flags
& UPL_SET_LITE
) {
3005 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
, 0, size
);
3007 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
3008 lite_list
= (wpl_array_t
)
3009 (((uintptr_t)user_page_list
) +
3010 ((size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
3012 user_page_list
= NULL
;
3016 upl
= upl_create(UPL_CREATE_INTERNAL
, 0, size
);
3018 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
3020 user_page_list
= NULL
;
3024 if (cntrl_flags
& UPL_SET_LITE
) {
3026 upl
= upl_create(UPL_CREATE_EXTERNAL
| UPL_CREATE_LITE
, 0, size
);
3028 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
3033 upl
= upl_create(UPL_CREATE_EXTERNAL
, 0, size
);
3039 user_page_list
[0].device
= FALSE
;
3041 if (cntrl_flags
& UPL_SET_LITE
) {
3042 upl
->map_object
= object
;
3044 upl
->map_object
= vm_object_allocate(size
);
3046 * No neeed to lock the new object: nobody else knows
3047 * about it yet, so it's all ours so far.
3049 upl
->map_object
->shadow
= object
;
3050 upl
->map_object
->pageout
= TRUE
;
3051 upl
->map_object
->can_persist
= FALSE
;
3052 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
3053 upl
->map_object
->vo_shadow_offset
= offset
;
3054 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
3056 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
3058 upl
->flags
|= UPL_SHADOWED
;
3062 * Just mark the UPL as "encrypted" here.
3063 * We'll actually encrypt the pages later,
3064 * in upl_encrypt(), when the caller has
3065 * selected which pages need to go to swap.
3067 if (cntrl_flags
& UPL_ENCRYPT
)
3068 upl
->flags
|= UPL_ENCRYPTED
;
3070 if (cntrl_flags
& UPL_FOR_PAGEOUT
)
3071 upl
->flags
|= UPL_PAGEOUT
;
3073 vm_object_lock(object
);
3074 vm_object_activity_begin(object
);
3077 * we can lock in the paging_offset once paging_in_progress is set
3080 upl
->offset
= offset
+ object
->paging_offset
;
3083 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
3084 #endif /* UPL_DEBUG */
3086 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= VM_OBJECT_NULL
) {
3088 * Honor copy-on-write obligations
3090 * The caller is gathering these pages and
3091 * might modify their contents. We need to
3092 * make sure that the copy object has its own
3093 * private copies of these pages before we let
3094 * the caller modify them.
3096 vm_object_update(object
,
3101 FALSE
, /* should_return */
3102 MEMORY_OBJECT_COPY_SYNC
,
3104 #if DEVELOPMENT || DEBUG
3106 upl_cow_pages
+= size
>> PAGE_SHIFT
;
3110 * remember which copy object we synchronized with
3112 last_copy_object
= object
->copy
;
3116 dst_offset
= offset
;
3117 size_in_pages
= size
/ PAGE_SIZE
;
3121 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
3123 if (vm_page_free_count
> (vm_page_free_target
+ size_in_pages
) ||
3124 object
->resident_page_count
< (MAX_UPL_SIZE
* 2))
3125 object
->scan_collisions
= 0;
3131 if ((alias_page
== NULL
) && !(cntrl_flags
& UPL_SET_LITE
)) {
3132 vm_object_unlock(object
);
3133 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
3134 vm_object_lock(object
);
3136 if (cntrl_flags
& UPL_COPYOUT_FROM
) {
3137 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
3139 if ( ((dst_page
= vm_page_lookup(object
, dst_offset
)) == VM_PAGE_NULL
) ||
3140 dst_page
->fictitious
||
3143 (VM_PAGE_WIRED(dst_page
) && !dst_page
->pageout
&& !dst_page
->list_req_pending
)) {
3146 user_page_list
[entry
].phys_addr
= 0;
3151 * grab this up front...
3152 * a high percentange of the time we're going to
3153 * need the hardware modification state a bit later
3154 * anyway... so we can eliminate an extra call into
3155 * the pmap layer by grabbing it here and recording it
3157 if (dst_page
->pmapped
)
3158 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
3162 if ( (refmod_state
& VM_MEM_REFERENCED
) && dst_page
->inactive
) {
3164 * page is on inactive list and referenced...
3165 * reactivate it now... this gets it out of the
3166 * way of vm_pageout_scan which would have to
3167 * reactivate it upon tripping over it
3169 dwp
->dw_mask
|= DW_vm_page_activate
;
3171 if (cntrl_flags
& UPL_RET_ONLY_DIRTY
) {
3173 * we're only asking for DIRTY pages to be returned
3175 if (dst_page
->list_req_pending
|| !(cntrl_flags
& UPL_FOR_PAGEOUT
)) {
3177 * if we were the page stolen by vm_pageout_scan to be
3178 * cleaned (as opposed to a buddy being clustered in
3179 * or this request is not being driven by a PAGEOUT cluster
3180 * then we only need to check for the page being dirty or
3181 * precious to decide whether to return it
3183 if (dst_page
->dirty
|| dst_page
->precious
|| (refmod_state
& VM_MEM_MODIFIED
))
3188 * this is a request for a PAGEOUT cluster and this page
3189 * is merely along for the ride as a 'buddy'... not only
3190 * does it have to be dirty to be returned, but it also
3191 * can't have been referenced recently... note that we've
3192 * already filtered above based on whether this page is
3193 * currently on the inactive queue or it meets the page
3194 * ticket (generation count) check
3196 if ( (cntrl_flags
& UPL_CLEAN_IN_PLACE
|| !(refmod_state
& VM_MEM_REFERENCED
) || dst_page
->throttled
) &&
3197 ((refmod_state
& VM_MEM_MODIFIED
) || dst_page
->dirty
|| dst_page
->precious
) ) {
3202 * if we reach here, we're not to return
3203 * the page... go on to the next one
3206 user_page_list
[entry
].phys_addr
= 0;
3211 if (dst_page
->busy
&& (!(dst_page
->list_req_pending
&& (dst_page
->pageout
|| dst_page
->cleaning
)))) {
3212 if (cntrl_flags
& UPL_NOBLOCK
) {
3214 user_page_list
[entry
].phys_addr
= 0;
3219 * someone else is playing with the
3220 * page. We will have to wait.
3222 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
3227 * Someone else already cleaning the page?
3229 if ((dst_page
->cleaning
|| dst_page
->absent
|| VM_PAGE_WIRED(dst_page
)) && !dst_page
->list_req_pending
) {
3231 user_page_list
[entry
].phys_addr
= 0;
3237 * The caller is gathering this page and might
3238 * access its contents later on. Decrypt the
3239 * page before adding it to the UPL, so that
3240 * the caller never sees encrypted data.
3242 if (! (cntrl_flags
& UPL_ENCRYPT
) && dst_page
->encrypted
) {
3246 * save the current state of busy
3247 * mark page as busy while decrypt
3248 * is in progress since it will drop
3249 * the object lock...
3251 was_busy
= dst_page
->busy
;
3252 dst_page
->busy
= TRUE
;
3254 vm_page_decrypt(dst_page
, 0);
3255 vm_page_decrypt_for_upl_counter
++;
3257 * restore to original busy state
3259 dst_page
->busy
= was_busy
;
3261 if (dst_page
->pageout_queue
== TRUE
) {
3263 vm_page_lockspin_queues();
3265 if (dst_page
->pageout_queue
== TRUE
) {
3267 * we've buddied up a page for a clustered pageout
3268 * that has already been moved to the pageout
3269 * queue by pageout_scan... we need to remove
3270 * it from the queue and drop the laundry count
3273 vm_pageout_throttle_up(dst_page
);
3275 vm_page_unlock_queues();
3277 #if MACH_CLUSTER_STATS
3279 * pageout statistics gathering. count
3280 * all the pages we will page out that
3281 * were not counted in the initial
3282 * vm_pageout_scan work
3284 if (dst_page
->list_req_pending
)
3285 encountered_lrp
= TRUE
;
3286 if ((dst_page
->dirty
|| (dst_page
->object
->internal
&& dst_page
->precious
)) && !dst_page
->list_req_pending
) {
3287 if (encountered_lrp
)
3288 CLUSTER_STAT(pages_at_higher_offsets
++;)
3290 CLUSTER_STAT(pages_at_lower_offsets
++;)
3294 * Turn off busy indication on pending
3295 * pageout. Note: we can only get here
3296 * in the request pending case.
3298 dst_page
->list_req_pending
= FALSE
;
3299 dst_page
->busy
= FALSE
;
3301 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
3302 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
3304 if (dst_page
->phys_page
> upl
->highest_page
)
3305 upl
->highest_page
= dst_page
->phys_page
;
3307 if (cntrl_flags
& UPL_SET_LITE
) {
3308 unsigned int pg_num
;
3310 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
3311 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
3312 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
3315 pmap_clear_modify(dst_page
->phys_page
);
3318 * Mark original page as cleaning
3321 dst_page
->cleaning
= TRUE
;
3322 dst_page
->precious
= FALSE
;
3325 * use pageclean setup, it is more
3326 * convenient even for the pageout
3329 vm_object_lock(upl
->map_object
);
3330 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
3331 vm_object_unlock(upl
->map_object
);
3333 alias_page
->absent
= FALSE
;
3338 * Record that this page has been
3341 vm_external_state_set(object
->existence_map
, dst_page
->offset
);
3342 #endif /*MACH_PAGEMAP*/
3343 dst_page
->dirty
= dirty
;
3346 dst_page
->precious
= TRUE
;
3348 if (dst_page
->pageout
)
3349 dst_page
->busy
= TRUE
;
3351 if ( (cntrl_flags
& UPL_ENCRYPT
) ) {
3354 * We want to deny access to the target page
3355 * because its contents are about to be
3356 * encrypted and the user would be very
3357 * confused to see encrypted data instead
3359 * We also set "encrypted_cleaning" to allow
3360 * vm_pageout_scan() to demote that page
3361 * from "adjacent/clean-in-place" to
3362 * "target/clean-and-free" if it bumps into
3363 * this page during its scanning while we're
3364 * still processing this cluster.
3366 dst_page
->busy
= TRUE
;
3367 dst_page
->encrypted_cleaning
= TRUE
;
3369 if ( !(cntrl_flags
& UPL_CLEAN_IN_PLACE
) ) {
3371 * deny access to the target page
3372 * while it is being worked on
3374 if ((!dst_page
->pageout
) && ( !VM_PAGE_WIRED(dst_page
))) {
3375 dst_page
->busy
= TRUE
;
3376 dst_page
->pageout
= TRUE
;
3378 dwp
->dw_mask
|= DW_vm_page_wire
;
3382 if ((cntrl_flags
& UPL_WILL_MODIFY
) && object
->copy
!= last_copy_object
) {
3384 * Honor copy-on-write obligations
3386 * The copy object has changed since we
3387 * last synchronized for copy-on-write.
3388 * Another copy object might have been
3389 * inserted while we released the object's
3390 * lock. Since someone could have seen the
3391 * original contents of the remaining pages
3392 * through that new object, we have to
3393 * synchronize with it again for the remaining
3394 * pages only. The previous pages are "busy"
3395 * so they can not be seen through the new
3396 * mapping. The new mapping will see our
3397 * upcoming changes for those previous pages,
3398 * but that's OK since they couldn't see what
3399 * was there before. It's just a race anyway
3400 * and there's no guarantee of consistency or
3401 * atomicity. We just don't want new mappings
3402 * to see both the *before* and *after* pages.
3404 if (object
->copy
!= VM_OBJECT_NULL
) {
3407 dst_offset
,/* current offset */
3408 xfer_size
, /* remaining size */
3411 FALSE
, /* should_return */
3412 MEMORY_OBJECT_COPY_SYNC
,
3415 #if DEVELOPMENT || DEBUG
3417 upl_cow_again_pages
+= xfer_size
>> PAGE_SHIFT
;
3421 * remember the copy object we synced with
3423 last_copy_object
= object
->copy
;
3425 dst_page
= vm_page_lookup(object
, dst_offset
);
3427 if (dst_page
!= VM_PAGE_NULL
) {
3429 if ((cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
3431 if ( !(dst_page
->absent
&& dst_page
->list_req_pending
) ) {
3433 * skip over pages already present in the cache
3436 user_page_list
[entry
].phys_addr
= 0;
3441 if ( !(dst_page
->list_req_pending
) ) {
3443 if (dst_page
->cleaning
) {
3445 * someone else is writing to the page... wait...
3447 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
3452 if (dst_page
->fictitious
&&
3453 dst_page
->phys_page
== vm_page_fictitious_addr
) {
3454 assert( !dst_page
->speculative
);
3456 * dump the fictitious page
3458 dst_page
->list_req_pending
= FALSE
;
3460 VM_PAGE_FREE(dst_page
);
3464 } else if (dst_page
->absent
) {
3466 * the default_pager case
3468 dst_page
->list_req_pending
= FALSE
;
3469 PAGE_WAKEUP_DONE(dst_page
);
3471 } else if (dst_page
->pageout
|| dst_page
->cleaning
) {
3473 * page was earmarked by vm_pageout_scan
3474 * to be cleaned and stolen... we're going
3475 * to take it back since we are not attempting
3476 * to read that page and we don't want to stall
3477 * waiting for it to be cleaned for 2 reasons...
3478 * 1 - no use paging it out and back in
3479 * 2 - if we stall, we may casue a deadlock in
3480 * the FS trying to acquire the its locks
3481 * on the VNOP_PAGEOUT path presuming that
3482 * those locks are already held on the read
3483 * path before trying to create this UPL
3485 * so undo all of the state that vm_pageout_scan
3488 vm_pageout_queue_steal(dst_page
, FALSE
);
3489 PAGE_WAKEUP_DONE(dst_page
);
3493 if (dst_page
== VM_PAGE_NULL
) {
3494 if (object
->private) {
3496 * This is a nasty wrinkle for users
3497 * of upl who encounter device or
3498 * private memory however, it is
3499 * unavoidable, only a fault can
3500 * resolve the actual backing
3501 * physical page by asking the
3505 user_page_list
[entry
].phys_addr
= 0;
3509 if (object
->scan_collisions
) {
3511 * the pageout_scan thread is trying to steal
3512 * pages from this object, but has run into our
3513 * lock... grab 2 pages from the head of the object...
3514 * the first is freed on behalf of pageout_scan, the
3515 * 2nd is for our own use... we use vm_object_page_grab
3516 * in both cases to avoid taking pages from the free
3517 * list since we are under memory pressure and our
3518 * lock on this object is getting in the way of
3521 dst_page
= vm_object_page_grab(object
);
3523 if (dst_page
!= VM_PAGE_NULL
)
3524 vm_page_release(dst_page
);
3526 dst_page
= vm_object_page_grab(object
);
3528 if (dst_page
== VM_PAGE_NULL
) {
3530 * need to allocate a page
3532 dst_page
= vm_page_grab();
3534 if (dst_page
== VM_PAGE_NULL
) {
3535 if ( (cntrl_flags
& (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) == (UPL_RET_ONLY_ABSENT
| UPL_NOBLOCK
)) {
3537 * we don't want to stall waiting for pages to come onto the free list
3538 * while we're already holding absent pages in this UPL
3539 * the caller will deal with the empty slots
3542 user_page_list
[entry
].phys_addr
= 0;
3547 * no pages available... wait
3548 * then try again for the same
3551 vm_object_unlock(object
);
3553 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
3555 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
3558 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
3560 VM_DEBUG_EVENT(vm_upl_page_wait
, VM_UPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
3562 vm_object_lock(object
);
3566 vm_page_insert(dst_page
, object
, dst_offset
);
3568 dst_page
->absent
= TRUE
;
3569 dst_page
->busy
= FALSE
;
3571 if (cntrl_flags
& UPL_RET_ONLY_ABSENT
) {
3573 * if UPL_RET_ONLY_ABSENT was specified,
3574 * than we're definitely setting up a
3575 * upl for a clustered read/pagein
3576 * operation... mark the pages as clustered
3577 * so upl_commit_range can put them on the
3580 dst_page
->clustered
= TRUE
;
3583 if (dst_page
->fictitious
) {
3584 panic("need corner case for fictitious page");
3586 if (dst_page
->busy
) {
3588 * someone else is playing with the
3589 * page. We will have to wait.
3591 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
3598 if (cntrl_flags
& UPL_ENCRYPT
) {
3600 * The page is going to be encrypted when we
3601 * get it from the pager, so mark it so.
3603 dst_page
->encrypted
= TRUE
;
3606 * Otherwise, the page will not contain
3609 dst_page
->encrypted
= FALSE
;
3611 dst_page
->overwriting
= TRUE
;
3613 if (dst_page
->pmapped
) {
3614 if ( !(cntrl_flags
& UPL_FILE_IO
))
3616 * eliminate all mappings from the
3617 * original object and its prodigy
3619 refmod_state
= pmap_disconnect(dst_page
->phys_page
);
3621 refmod_state
= pmap_get_refmod(dst_page
->phys_page
);
3625 hw_dirty
= refmod_state
& VM_MEM_MODIFIED
;
3626 dirty
= hw_dirty
? TRUE
: dst_page
->dirty
;
3628 if (cntrl_flags
& UPL_SET_LITE
) {
3629 unsigned int pg_num
;
3631 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
3632 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
3633 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
3636 pmap_clear_modify(dst_page
->phys_page
);
3639 * Mark original page as cleaning
3642 dst_page
->cleaning
= TRUE
;
3643 dst_page
->precious
= FALSE
;
3646 * use pageclean setup, it is more
3647 * convenient even for the pageout
3650 vm_object_lock(upl
->map_object
);
3651 vm_pageclean_setup(dst_page
, alias_page
, upl
->map_object
, size
- xfer_size
);
3652 vm_object_unlock(upl
->map_object
);
3654 alias_page
->absent
= FALSE
;
3658 if (cntrl_flags
& UPL_REQUEST_SET_DIRTY
) {
3659 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
3660 upl
->flags
|= UPL_SET_DIRTY
;
3662 upl
->flags
|= UPL_SET_DIRTY
;
3663 } else if (cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
3665 * clean in place for read implies
3666 * that a write will be done on all
3667 * the pages that are dirty before
3668 * a upl commit is done. The caller
3669 * is obligated to preserve the
3670 * contents of all pages marked dirty
3672 upl
->flags
|= UPL_CLEAR_DIRTY
;
3674 dst_page
->dirty
= dirty
;
3677 dst_page
->precious
= TRUE
;
3679 if ( !VM_PAGE_WIRED(dst_page
)) {
3681 * deny access to the target page while
3682 * it is being worked on
3684 dst_page
->busy
= TRUE
;
3686 dwp
->dw_mask
|= DW_vm_page_wire
;
3689 * We might be about to satisfy a fault which has been
3690 * requested. So no need for the "restart" bit.
3692 dst_page
->restart
= FALSE
;
3693 if (!dst_page
->absent
&& !(cntrl_flags
& UPL_WILL_MODIFY
)) {
3695 * expect the page to be used
3697 dwp
->dw_mask
|= DW_set_reference
;
3699 if (cntrl_flags
& UPL_PRECIOUS
) {
3700 if (dst_page
->object
->internal
) {
3701 dst_page
->dirty
= TRUE
;
3702 dst_page
->precious
= FALSE
;
3704 dst_page
->precious
= TRUE
;
3707 dst_page
->precious
= FALSE
;
3711 upl
->flags
|= UPL_HAS_BUSY
;
3713 if (dst_page
->phys_page
> upl
->highest_page
)
3714 upl
->highest_page
= dst_page
->phys_page
;
3715 if (user_page_list
) {
3716 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
3717 user_page_list
[entry
].pageout
= dst_page
->pageout
;
3718 user_page_list
[entry
].absent
= dst_page
->absent
;
3719 user_page_list
[entry
].dirty
= dst_page
->dirty
;
3720 user_page_list
[entry
].precious
= dst_page
->precious
;
3721 user_page_list
[entry
].device
= FALSE
;
3722 if (dst_page
->clustered
== TRUE
)
3723 user_page_list
[entry
].speculative
= dst_page
->speculative
;
3725 user_page_list
[entry
].speculative
= FALSE
;
3726 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
3727 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
3730 * if UPL_RET_ONLY_ABSENT is set, then
3731 * we are working with a fresh page and we've
3732 * just set the clustered flag on it to
3733 * indicate that it was drug in as part of a
3734 * speculative cluster... so leave it alone
3736 if ( !(cntrl_flags
& UPL_RET_ONLY_ABSENT
)) {
3738 * someone is explicitly grabbing this page...
3739 * update clustered and speculative state
3742 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
3746 if (dwp
->dw_mask
& DW_vm_page_activate
)
3747 VM_STAT_INCR(reactivations
);
3749 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
3751 if (dw_count
>= dw_limit
) {
3752 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
3759 dst_offset
+= PAGE_SIZE_64
;
3760 xfer_size
-= PAGE_SIZE
;
3763 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
3765 if (alias_page
!= NULL
) {
3766 VM_PAGE_FREE(alias_page
);
3769 if (page_list_count
!= NULL
) {
3770 if (upl
->flags
& UPL_INTERNAL
)
3771 *page_list_count
= 0;
3772 else if (*page_list_count
> entry
)
3773 *page_list_count
= entry
;
3778 vm_object_unlock(object
);
3780 return KERN_SUCCESS
;
3783 /* JMM - Backward compatability for now */
3785 vm_fault_list_request( /* forward */
3786 memory_object_control_t control
,
3787 vm_object_offset_t offset
,
3790 upl_page_info_t
**user_page_list_ptr
,
3791 unsigned int page_list_count
,
3794 vm_fault_list_request(
3795 memory_object_control_t control
,
3796 vm_object_offset_t offset
,
3799 upl_page_info_t
**user_page_list_ptr
,
3800 unsigned int page_list_count
,
3803 unsigned int local_list_count
;
3804 upl_page_info_t
*user_page_list
;
3807 if((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
)
3808 return KERN_INVALID_ARGUMENT
;
3810 if (user_page_list_ptr
!= NULL
) {
3811 local_list_count
= page_list_count
;
3812 user_page_list
= *user_page_list_ptr
;
3814 local_list_count
= 0;
3815 user_page_list
= NULL
;
3817 kr
= memory_object_upl_request(control
,
3825 if(kr
!= KERN_SUCCESS
)
3828 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
3829 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
3832 return KERN_SUCCESS
;
3838 * Routine: vm_object_super_upl_request
3840 * Cause the population of a portion of a vm_object
3841 * in much the same way as memory_object_upl_request.
3842 * Depending on the nature of the request, the pages
3843 * returned may be contain valid data or be uninitialized.
3844 * However, the region may be expanded up to the super
3845 * cluster size provided.
3848 __private_extern__ kern_return_t
3849 vm_object_super_upl_request(
3851 vm_object_offset_t offset
,
3853 upl_size_t super_cluster
,
3855 upl_page_info_t
*user_page_list
,
3856 unsigned int *page_list_count
,
3859 if (object
->paging_offset
> offset
|| ((cntrl_flags
& UPL_VECTOR
)==UPL_VECTOR
))
3860 return KERN_FAILURE
;
3862 assert(object
->paging_in_progress
);
3863 offset
= offset
- object
->paging_offset
;
3865 if (super_cluster
> size
) {
3867 vm_object_offset_t base_offset
;
3868 upl_size_t super_size
;
3869 vm_object_size_t super_size_64
;
3871 base_offset
= (offset
& ~((vm_object_offset_t
) super_cluster
- 1));
3872 super_size
= (offset
+ size
) > (base_offset
+ super_cluster
) ? super_cluster
<<1 : super_cluster
;
3873 super_size_64
= ((base_offset
+ super_size
) > object
->vo_size
) ? (object
->vo_size
- base_offset
) : super_size
;
3874 super_size
= (upl_size_t
) super_size_64
;
3875 assert(super_size
== super_size_64
);
3877 if (offset
> (base_offset
+ super_size
)) {
3878 panic("vm_object_super_upl_request: Missed target pageout"
3879 " %#llx,%#llx, %#x, %#x, %#x, %#llx\n",
3880 offset
, base_offset
, super_size
, super_cluster
,
3881 size
, object
->paging_offset
);
3884 * apparently there is a case where the vm requests a
3885 * page to be written out who's offset is beyond the
3888 if ((offset
+ size
) > (base_offset
+ super_size
)) {
3889 super_size_64
= (offset
+ size
) - base_offset
;
3890 super_size
= (upl_size_t
) super_size_64
;
3891 assert(super_size
== super_size_64
);
3894 offset
= base_offset
;
3897 return vm_object_upl_request(object
, offset
, size
, upl
, user_page_list
, page_list_count
, cntrl_flags
);
3904 vm_map_address_t offset
,
3905 upl_size_t
*upl_size
,
3907 upl_page_info_array_t page_list
,
3908 unsigned int *count
,
3911 vm_map_entry_t entry
;
3913 int force_data_sync
;
3915 vm_object_t local_object
;
3916 vm_map_offset_t local_offset
;
3917 vm_map_offset_t local_start
;
3920 caller_flags
= *flags
;
3922 if (caller_flags
& ~UPL_VALID_FLAGS
) {
3924 * For forward compatibility's sake,
3925 * reject any unknown flag.
3927 return KERN_INVALID_VALUE
;
3929 force_data_sync
= (caller_flags
& UPL_FORCE_DATA_SYNC
);
3930 sync_cow_data
= !(caller_flags
& UPL_COPYOUT_FROM
);
3933 return KERN_INVALID_ARGUMENT
;
3936 vm_map_lock_read(map
);
3938 if (vm_map_lookup_entry(map
, offset
, &entry
)) {
3940 if ((entry
->vme_end
- offset
) < *upl_size
) {
3941 *upl_size
= (upl_size_t
) (entry
->vme_end
- offset
);
3942 assert(*upl_size
== entry
->vme_end
- offset
);
3945 if (caller_flags
& UPL_QUERY_OBJECT_TYPE
) {
3948 if ( !entry
->is_sub_map
&& entry
->object
.vm_object
!= VM_OBJECT_NULL
) {
3949 if (entry
->object
.vm_object
->private)
3950 *flags
= UPL_DEV_MEMORY
;
3952 if (entry
->object
.vm_object
->phys_contiguous
)
3953 *flags
|= UPL_PHYS_CONTIG
;
3955 vm_map_unlock_read(map
);
3957 return KERN_SUCCESS
;
3960 if (entry
->is_sub_map
) {
3963 submap
= entry
->object
.sub_map
;
3964 local_start
= entry
->vme_start
;
3965 local_offset
= entry
->offset
;
3967 vm_map_reference(submap
);
3968 vm_map_unlock_read(map
);
3970 ret
= vm_map_create_upl(submap
,
3971 local_offset
+ (offset
- local_start
),
3972 upl_size
, upl
, page_list
, count
, flags
);
3973 vm_map_deallocate(submap
);
3978 if (entry
->object
.vm_object
== VM_OBJECT_NULL
|| !entry
->object
.vm_object
->phys_contiguous
) {
3979 if ((*upl_size
/PAGE_SIZE
) > MAX_UPL_SIZE
)
3980 *upl_size
= MAX_UPL_SIZE
* PAGE_SIZE
;
3984 * Create an object if necessary.
3986 if (entry
->object
.vm_object
== VM_OBJECT_NULL
) {
3988 if (vm_map_lock_read_to_write(map
))
3989 goto REDISCOVER_ENTRY
;
3991 entry
->object
.vm_object
= vm_object_allocate((vm_size_t
)(entry
->vme_end
- entry
->vme_start
));
3994 vm_map_lock_write_to_read(map
);
3996 if (!(caller_flags
& UPL_COPYOUT_FROM
)) {
3997 if (!(entry
->protection
& VM_PROT_WRITE
)) {
3998 vm_map_unlock_read(map
);
3999 return KERN_PROTECTION_FAILURE
;
4002 #if !CONFIG_EMBEDDED
4003 local_object
= entry
->object
.vm_object
;
4004 if (vm_map_entry_should_cow_for_true_share(entry
) &&
4005 local_object
->vo_size
> *upl_size
&&
4010 * Set up the targeted range for copy-on-write to avoid
4011 * applying true_share/copy_delay to the entire object.
4014 if (vm_map_lock_read_to_write(map
)) {
4015 goto REDISCOVER_ENTRY
;
4018 vm_map_clip_start(map
, entry
, vm_map_trunc_page(offset
));
4019 vm_map_clip_end(map
, entry
, vm_map_round_page(offset
+ *upl_size
));
4020 prot
= entry
->protection
& ~VM_PROT_WRITE
;
4021 if (override_nx(map
, entry
->alias
) && prot
)
4022 prot
|= VM_PROT_EXECUTE
;
4023 vm_object_pmap_protect(local_object
,
4025 entry
->vme_end
- entry
->vme_start
,
4026 ((entry
->is_shared
|| map
->mapped
)
4031 entry
->needs_copy
= TRUE
;
4033 vm_map_lock_write_to_read(map
);
4035 #endif /* !CONFIG_EMBEDDED */
4037 if (entry
->needs_copy
) {
4039 * Honor copy-on-write for COPY_SYMMETRIC
4044 vm_object_offset_t new_offset
;
4047 vm_map_version_t version
;
4052 if (vm_map_lookup_locked(&local_map
,
4053 offset
, VM_PROT_WRITE
,
4054 OBJECT_LOCK_EXCLUSIVE
,
4056 &new_offset
, &prot
, &wired
,
4058 &real_map
) != KERN_SUCCESS
) {
4059 vm_map_unlock_read(local_map
);
4060 return KERN_FAILURE
;
4062 if (real_map
!= map
)
4063 vm_map_unlock(real_map
);
4064 vm_map_unlock_read(local_map
);
4066 vm_object_unlock(object
);
4068 goto REDISCOVER_ENTRY
;
4071 if (sync_cow_data
) {
4072 if (entry
->object
.vm_object
->shadow
|| entry
->object
.vm_object
->copy
) {
4073 local_object
= entry
->object
.vm_object
;
4074 local_start
= entry
->vme_start
;
4075 local_offset
= entry
->offset
;
4077 vm_object_reference(local_object
);
4078 vm_map_unlock_read(map
);
4080 if (local_object
->shadow
&& local_object
->copy
) {
4081 vm_object_lock_request(
4082 local_object
->shadow
,
4083 (vm_object_offset_t
)
4084 ((offset
- local_start
) +
4086 local_object
->vo_shadow_offset
,
4088 MEMORY_OBJECT_DATA_SYNC
,
4091 sync_cow_data
= FALSE
;
4092 vm_object_deallocate(local_object
);
4094 goto REDISCOVER_ENTRY
;
4097 if (force_data_sync
) {
4098 local_object
= entry
->object
.vm_object
;
4099 local_start
= entry
->vme_start
;
4100 local_offset
= entry
->offset
;
4102 vm_object_reference(local_object
);
4103 vm_map_unlock_read(map
);
4105 vm_object_lock_request(
4107 (vm_object_offset_t
)
4108 ((offset
- local_start
) + local_offset
),
4109 (vm_object_size_t
)*upl_size
, FALSE
,
4110 MEMORY_OBJECT_DATA_SYNC
,
4113 force_data_sync
= FALSE
;
4114 vm_object_deallocate(local_object
);
4116 goto REDISCOVER_ENTRY
;
4118 if (entry
->object
.vm_object
->private)
4119 *flags
= UPL_DEV_MEMORY
;
4123 if (entry
->object
.vm_object
->phys_contiguous
)
4124 *flags
|= UPL_PHYS_CONTIG
;
4126 local_object
= entry
->object
.vm_object
;
4127 local_offset
= entry
->offset
;
4128 local_start
= entry
->vme_start
;
4130 vm_object_reference(local_object
);
4131 vm_map_unlock_read(map
);
4133 ret
= vm_object_iopl_request(local_object
,
4134 (vm_object_offset_t
) ((offset
- local_start
) + local_offset
),
4140 vm_object_deallocate(local_object
);
4144 vm_map_unlock_read(map
);
4146 return(KERN_FAILURE
);
4150 * Internal routine to enter a UPL into a VM map.
4152 * JMM - This should just be doable through the standard
4153 * vm_map_enter() API.
4159 vm_map_offset_t
*dst_addr
)
4162 vm_object_offset_t offset
;
4163 vm_map_offset_t addr
;
4166 int isVectorUPL
= 0, curr_upl
=0;
4167 upl_t vector_upl
= NULL
;
4168 vm_offset_t vector_upl_dst_addr
= 0;
4169 vm_map_t vector_upl_submap
= NULL
;
4170 upl_offset_t subupl_offset
= 0;
4171 upl_size_t subupl_size
= 0;
4173 if (upl
== UPL_NULL
)
4174 return KERN_INVALID_ARGUMENT
;
4176 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
4177 int mapped
=0,valid_upls
=0;
4180 upl_lock(vector_upl
);
4181 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
4182 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
4186 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
)
4191 if(mapped
!= valid_upls
)
4192 panic("Only %d of the %d sub-upls within the Vector UPL are alread mapped\n", mapped
, valid_upls
);
4194 upl_unlock(vector_upl
);
4195 return KERN_FAILURE
;
4199 kr
= kmem_suballoc(map
, &vector_upl_dst_addr
, vector_upl
->size
, FALSE
, VM_FLAGS_ANYWHERE
, &vector_upl_submap
);
4200 if( kr
!= KERN_SUCCESS
)
4201 panic("Vector UPL submap allocation failed\n");
4202 map
= vector_upl_submap
;
4203 vector_upl_set_submap(vector_upl
, vector_upl_submap
, vector_upl_dst_addr
);
4209 process_upl_to_enter
:
4211 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
4212 *dst_addr
= vector_upl_dst_addr
;
4213 upl_unlock(vector_upl
);
4214 return KERN_SUCCESS
;
4216 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
4218 goto process_upl_to_enter
;
4220 vector_upl_get_iostate(vector_upl
, upl
, &subupl_offset
, &subupl_size
);
4221 *dst_addr
= (vm_map_offset_t
)(vector_upl_dst_addr
+ (vm_map_offset_t
)subupl_offset
);
4224 * check to see if already mapped
4226 if (UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
4228 return KERN_FAILURE
;
4231 if ((!(upl
->flags
& UPL_SHADOWED
)) &&
4232 ((upl
->flags
& UPL_HAS_BUSY
) ||
4233 !((upl
->flags
& (UPL_DEVICE_MEMORY
| UPL_IO_WIRE
)) || (upl
->map_object
->phys_contiguous
)))) {
4236 vm_page_t alias_page
;
4237 vm_object_offset_t new_offset
;
4238 unsigned int pg_num
;
4239 wpl_array_t lite_list
;
4241 if (upl
->flags
& UPL_INTERNAL
) {
4242 lite_list
= (wpl_array_t
)
4243 ((((uintptr_t)upl
) + sizeof(struct upl
))
4244 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
4246 lite_list
= (wpl_array_t
)(((uintptr_t)upl
) + sizeof(struct upl
));
4248 object
= upl
->map_object
;
4249 upl
->map_object
= vm_object_allocate(upl
->size
);
4251 vm_object_lock(upl
->map_object
);
4253 upl
->map_object
->shadow
= object
;
4254 upl
->map_object
->pageout
= TRUE
;
4255 upl
->map_object
->can_persist
= FALSE
;
4256 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
4257 upl
->map_object
->vo_shadow_offset
= upl
->offset
- object
->paging_offset
;
4258 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
4259 offset
= upl
->map_object
->vo_shadow_offset
;
4263 upl
->flags
|= UPL_SHADOWED
;
4266 pg_num
= (unsigned int) (new_offset
/ PAGE_SIZE
);
4267 assert(pg_num
== new_offset
/ PAGE_SIZE
);
4269 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
4271 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
4273 vm_object_lock(object
);
4275 m
= vm_page_lookup(object
, offset
);
4276 if (m
== VM_PAGE_NULL
) {
4277 panic("vm_upl_map: page missing\n");
4281 * Convert the fictitious page to a private
4282 * shadow of the real page.
4284 assert(alias_page
->fictitious
);
4285 alias_page
->fictitious
= FALSE
;
4286 alias_page
->private = TRUE
;
4287 alias_page
->pageout
= TRUE
;
4289 * since m is a page in the upl it must
4290 * already be wired or BUSY, so it's
4291 * safe to assign the underlying physical
4294 alias_page
->phys_page
= m
->phys_page
;
4296 vm_object_unlock(object
);
4298 vm_page_lockspin_queues();
4299 vm_page_wire(alias_page
);
4300 vm_page_unlock_queues();
4304 * The virtual page ("m") has to be wired in some way
4305 * here or its physical page ("m->phys_page") could
4306 * be recycled at any time.
4307 * Assuming this is enforced by the caller, we can't
4308 * get an encrypted page here. Since the encryption
4309 * key depends on the VM page's "pager" object and
4310 * the "paging_offset", we couldn't handle 2 pageable
4311 * VM pages (with different pagers and paging_offsets)
4312 * sharing the same physical page: we could end up
4313 * encrypting with one key (via one VM page) and
4314 * decrypting with another key (via the alias VM page).
4316 ASSERT_PAGE_DECRYPTED(m
);
4318 vm_page_insert(alias_page
, upl
->map_object
, new_offset
);
4320 assert(!alias_page
->wanted
);
4321 alias_page
->busy
= FALSE
;
4322 alias_page
->absent
= FALSE
;
4325 offset
+= PAGE_SIZE_64
;
4326 new_offset
+= PAGE_SIZE_64
;
4328 vm_object_unlock(upl
->map_object
);
4330 if (upl
->flags
& UPL_SHADOWED
)
4333 offset
= upl
->offset
- upl
->map_object
->paging_offset
;
4337 vm_object_reference(upl
->map_object
);
4342 * NEED A UPL_MAP ALIAS
4344 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
4345 VM_FLAGS_ANYWHERE
, upl
->map_object
, offset
, FALSE
,
4346 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
4348 if (kr
!= KERN_SUCCESS
) {
4354 kr
= vm_map_enter(map
, dst_addr
, (vm_map_size_t
)size
, (vm_map_offset_t
) 0,
4355 VM_FLAGS_FIXED
, upl
->map_object
, offset
, FALSE
,
4356 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
4358 panic("vm_map_enter failed for a Vector UPL\n");
4360 vm_object_lock(upl
->map_object
);
4362 for (addr
= *dst_addr
; size
> 0; size
-= PAGE_SIZE
, addr
+= PAGE_SIZE
) {
4363 m
= vm_page_lookup(upl
->map_object
, offset
);
4368 /* CODE SIGNING ENFORCEMENT: page has been wpmapped,
4369 * but only in kernel space. If this was on a user map,
4370 * we'd have to set the wpmapped bit. */
4371 /* m->wpmapped = TRUE; */
4372 assert(map
==kernel_map
);
4374 PMAP_ENTER(map
->pmap
, addr
, m
, VM_PROT_ALL
, 0, TRUE
);
4376 offset
+= PAGE_SIZE_64
;
4378 vm_object_unlock(upl
->map_object
);
4381 * hold a reference for the mapping
4384 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
4385 upl
->kaddr
= (vm_offset_t
) *dst_addr
;
4386 assert(upl
->kaddr
== *dst_addr
);
4389 goto process_upl_to_enter
;
4393 return KERN_SUCCESS
;
4397 * Internal routine to remove a UPL mapping from a VM map.
4399 * XXX - This should just be doable through a standard
4400 * vm_map_remove() operation. Otherwise, implicit clean-up
4401 * of the target map won't be able to correctly remove
4402 * these (and release the reference on the UPL). Having
4403 * to do this means we can't map these into user-space
4413 int isVectorUPL
= 0, curr_upl
= 0;
4414 upl_t vector_upl
= NULL
;
4416 if (upl
== UPL_NULL
)
4417 return KERN_INVALID_ARGUMENT
;
4419 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
4420 int unmapped
=0, valid_upls
=0;
4422 upl_lock(vector_upl
);
4423 for(curr_upl
=0; curr_upl
< MAX_VECTOR_UPL_ELEMENTS
; curr_upl
++) {
4424 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
);
4428 if (!(UPL_PAGE_LIST_MAPPED
& upl
->flags
))
4433 if(unmapped
!= valid_upls
)
4434 panic("%d of the %d sub-upls within the Vector UPL is/are not mapped\n", unmapped
, valid_upls
);
4436 upl_unlock(vector_upl
);
4437 return KERN_FAILURE
;
4445 process_upl_to_remove
:
4447 if(curr_upl
== MAX_VECTOR_UPL_ELEMENTS
) {
4448 vm_map_t v_upl_submap
;
4449 vm_offset_t v_upl_submap_dst_addr
;
4450 vector_upl_get_submap(vector_upl
, &v_upl_submap
, &v_upl_submap_dst_addr
);
4452 vm_map_remove(map
, v_upl_submap_dst_addr
, v_upl_submap_dst_addr
+ vector_upl
->size
, VM_MAP_NO_FLAGS
);
4453 vm_map_deallocate(v_upl_submap
);
4454 upl_unlock(vector_upl
);
4455 return KERN_SUCCESS
;
4458 upl
= vector_upl_subupl_byindex(vector_upl
, curr_upl
++ );
4460 goto process_upl_to_remove
;
4463 if (upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
4467 assert(upl
->ref_count
> 1);
4468 upl
->ref_count
--; /* removing mapping ref */
4470 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
4471 upl
->kaddr
= (vm_offset_t
) 0;
4477 vm_map_trunc_page(addr
),
4478 vm_map_round_page(addr
+ size
),
4481 return KERN_SUCCESS
;
4485 * If it's a Vectored UPL, we'll be removing the entire
4486 * submap anyways, so no need to remove individual UPL
4487 * element mappings from within the submap
4489 goto process_upl_to_remove
;
4494 return KERN_FAILURE
;
4501 upl_offset_t offset
,
4504 upl_page_info_t
*page_list
,
4505 mach_msg_type_number_t count
,
4508 upl_size_t xfer_size
, subupl_size
= size
;
4509 vm_object_t shadow_object
;
4511 vm_object_offset_t target_offset
;
4512 upl_offset_t subupl_offset
= offset
;
4514 wpl_array_t lite_list
;
4516 int clear_refmod
= 0;
4517 int pgpgout_count
= 0;
4518 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
4519 struct vm_page_delayed_work
*dwp
;
4522 int isVectorUPL
= 0;
4523 upl_t vector_upl
= NULL
;
4524 boolean_t should_be_throttled
= FALSE
;
4528 if (upl
== UPL_NULL
)
4529 return KERN_INVALID_ARGUMENT
;
4534 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
4536 upl_lock(vector_upl
);
4541 process_upl_to_commit
:
4545 offset
= subupl_offset
;
4547 upl_unlock(vector_upl
);
4548 return KERN_SUCCESS
;
4550 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
4552 upl_unlock(vector_upl
);
4553 return KERN_FAILURE
;
4555 page_list
= UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(upl
);
4556 subupl_size
-= size
;
4557 subupl_offset
+= size
;
4561 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
4562 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
4564 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
4565 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
4567 upl
->upl_commit_index
++;
4570 if (upl
->flags
& UPL_DEVICE_MEMORY
)
4572 else if ((offset
+ size
) <= upl
->size
)
4578 upl_unlock(vector_upl
);
4580 return KERN_FAILURE
;
4582 if (upl
->flags
& UPL_SET_DIRTY
)
4583 flags
|= UPL_COMMIT_SET_DIRTY
;
4584 if (upl
->flags
& UPL_CLEAR_DIRTY
)
4585 flags
|= UPL_COMMIT_CLEAR_DIRTY
;
4587 if (upl
->flags
& UPL_INTERNAL
)
4588 lite_list
= (wpl_array_t
) ((((uintptr_t)upl
) + sizeof(struct upl
))
4589 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
4591 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
4593 object
= upl
->map_object
;
4595 if (upl
->flags
& UPL_SHADOWED
) {
4596 vm_object_lock(object
);
4597 shadow_object
= object
->shadow
;
4599 shadow_object
= object
;
4601 entry
= offset
/PAGE_SIZE
;
4602 target_offset
= (vm_object_offset_t
)offset
;
4604 if (upl
->flags
& UPL_KERNEL_OBJECT
)
4605 vm_object_lock_shared(shadow_object
);
4607 vm_object_lock(shadow_object
);
4609 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
4610 assert(shadow_object
->blocked_access
);
4611 shadow_object
->blocked_access
= FALSE
;
4612 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
4615 if (shadow_object
->code_signed
) {
4618 * If the object is code-signed, do not let this UPL tell
4619 * us if the pages are valid or not. Let the pages be
4620 * validated by VM the normal way (when they get mapped or
4623 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
4627 * No page list to get the code-signing info from !?
4629 flags
&= ~UPL_COMMIT_CS_VALIDATED
;
4631 if (!VM_DYNAMIC_PAGING_ENABLED(memory_manager_default
) && shadow_object
->internal
)
4632 should_be_throttled
= TRUE
;
4636 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
4646 if (upl
->flags
& UPL_LITE
) {
4647 unsigned int pg_num
;
4649 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
4650 assert(pg_num
== target_offset
/PAGE_SIZE
);
4652 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
4653 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
4655 if (!(upl
->flags
& UPL_KERNEL_OBJECT
))
4656 m
= vm_page_lookup(shadow_object
, target_offset
+ (upl
->offset
- shadow_object
->paging_offset
));
4659 if (upl
->flags
& UPL_SHADOWED
) {
4660 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
4666 if (m
== VM_PAGE_NULL
)
4667 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
4670 if ((upl
->flags
& UPL_KERNEL_OBJECT
) || m
== VM_PAGE_NULL
)
4671 goto commit_next_page
;
4673 if (flags
& UPL_COMMIT_CS_VALIDATED
) {
4676 * Set the code signing bits according to
4677 * what the UPL says they should be.
4679 m
->cs_validated
= page_list
[entry
].cs_validated
;
4680 m
->cs_tainted
= page_list
[entry
].cs_tainted
;
4682 if (upl
->flags
& UPL_IO_WIRE
) {
4685 page_list
[entry
].phys_addr
= 0;
4687 if (flags
& UPL_COMMIT_SET_DIRTY
) {
4689 } else if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
4692 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
4693 m
->cs_validated
&& !m
->cs_tainted
) {
4696 * This page is no longer dirty
4697 * but could have been modified,
4698 * so it will need to be
4701 m
->cs_validated
= FALSE
;
4702 #if DEVELOPMENT || DEBUG
4703 vm_cs_validated_resets
++;
4705 pmap_disconnect(m
->phys_page
);
4707 clear_refmod
|= VM_MEM_MODIFIED
;
4709 if (flags
& UPL_COMMIT_INACTIVATE
) {
4710 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
4711 clear_refmod
|= VM_MEM_REFERENCED
;
4713 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
4715 * We blocked access to the pages in this UPL.
4716 * Clear the "busy" bit and wake up any waiter
4719 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
4722 if (flags
& UPL_COMMIT_FREE_ABSENT
)
4723 dwp
->dw_mask
|= DW_vm_page_free
;
4726 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
4728 if ( !(dwp
->dw_mask
& DW_vm_page_deactivate_internal
))
4729 dwp
->dw_mask
|= DW_vm_page_activate
;
4732 dwp
->dw_mask
|= DW_vm_page_unwire
;
4734 goto commit_next_page
;
4737 * make sure to clear the hardware
4738 * modify or reference bits before
4739 * releasing the BUSY bit on this page
4740 * otherwise we risk losing a legitimate
4743 if (flags
& UPL_COMMIT_CLEAR_DIRTY
) {
4746 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
4747 m
->cs_validated
&& !m
->cs_tainted
) {
4750 * This page is no longer dirty
4751 * but could have been modified,
4752 * so it will need to be
4755 m
->cs_validated
= FALSE
;
4756 #if DEVELOPMENT || DEBUG
4757 vm_cs_validated_resets
++;
4759 pmap_disconnect(m
->phys_page
);
4761 clear_refmod
|= VM_MEM_MODIFIED
;
4766 p
= &(page_list
[entry
]);
4768 if (p
->phys_addr
&& p
->pageout
&& !m
->pageout
) {
4772 dwp
->dw_mask
|= DW_vm_page_wire
;
4774 } else if (p
->phys_addr
&&
4775 !p
->pageout
&& m
->pageout
&&
4776 !m
->dump_cleaning
) {
4779 m
->overwriting
= FALSE
;
4781 dwp
->dw_mask
|= (DW_vm_page_unwire
| DW_clear_busy
| DW_PAGE_WAKEUP
);
4783 page_list
[entry
].phys_addr
= 0;
4785 m
->dump_cleaning
= FALSE
;
4788 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
4791 m
->cleaning
= FALSE
;
4792 m
->encrypted_cleaning
= FALSE
;
4794 #if MACH_CLUSTER_STATS
4795 if (m
->wanted
) vm_pageout_target_collisions
++;
4799 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
4800 m
->cs_validated
&& !m
->cs_tainted
) {
4803 * This page is no longer dirty
4804 * but could have been modified,
4805 * so it will need to be
4808 m
->cs_validated
= FALSE
;
4809 #if DEVELOPMENT || DEBUG
4810 vm_cs_validated_resets
++;
4812 pmap_disconnect(m
->phys_page
);
4815 if ((flags
& UPL_COMMIT_SET_DIRTY
) ||
4816 (m
->pmapped
&& (pmap_disconnect(m
->phys_page
) & VM_MEM_MODIFIED
)))
4821 * page was re-dirtied after we started
4822 * the pageout... reactivate it since
4823 * we don't know whether the on-disk
4824 * copy matches what is now in memory
4826 dwp
->dw_mask
|= (DW_vm_page_unwire
| DW_clear_busy
| DW_PAGE_WAKEUP
);
4828 if (upl
->flags
& UPL_PAGEOUT
) {
4829 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
4830 VM_STAT_INCR(reactivations
);
4831 DTRACE_VM2(pgrec
, int, 1, (uint64_t *), NULL
);
4835 * page has been successfully cleaned
4836 * go ahead and free it for other use
4839 if (m
->object
->internal
) {
4840 DTRACE_VM2(anonpgout
, int, 1, (uint64_t *), NULL
);
4842 DTRACE_VM2(fspgout
, int, 1, (uint64_t *), NULL
);
4844 dwp
->dw_mask
|= DW_vm_page_free
;
4846 if (upl
->flags
& UPL_PAGEOUT
) {
4847 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
4849 if (page_list
[entry
].dirty
) {
4850 VM_STAT_INCR(pageouts
);
4851 DTRACE_VM2(pgout
, int, 1, (uint64_t *), NULL
);
4856 goto commit_next_page
;
4858 #if MACH_CLUSTER_STATS
4860 m
->dirty
= pmap_is_modified(m
->phys_page
);
4862 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
4863 else vm_pageout_cluster_cleaned
++;
4864 if (m
->wanted
) vm_pageout_cluster_collisions
++;
4868 if (! (flags
& UPL_COMMIT_CS_VALIDATED
) &&
4869 m
->cs_validated
&& !m
->cs_tainted
) {
4872 * This page is no longer dirty
4873 * but could have been modified,
4874 * so it will need to be
4877 m
->cs_validated
= FALSE
;
4878 #if DEVELOPMENT || DEBUG
4879 vm_cs_validated_resets
++;
4881 pmap_disconnect(m
->phys_page
);
4884 if (m
->overwriting
) {
4886 * the (COPY_OUT_FROM == FALSE) request_page_list case
4891 dwp
->dw_mask
|= DW_clear_busy
;
4894 * alternate (COPY_OUT_FROM == FALSE) page_list case
4895 * Occurs when the original page was wired
4896 * at the time of the list request
4898 assert(VM_PAGE_WIRED(m
));
4900 dwp
->dw_mask
|= DW_vm_page_unwire
; /* reactivates */
4902 m
->overwriting
= FALSE
;
4904 if (m
->encrypted_cleaning
== TRUE
) {
4905 m
->encrypted_cleaning
= FALSE
;
4907 dwp
->dw_mask
|= DW_clear_busy
;
4909 m
->cleaning
= FALSE
;
4912 * It is a part of the semantic of COPYOUT_FROM
4913 * UPLs that a commit implies cache sync
4914 * between the vm page and the backing store
4915 * this can be used to strip the precious bit
4918 if ((upl
->flags
& UPL_PAGE_SYNC_DONE
) || (flags
& UPL_COMMIT_CLEAR_PRECIOUS
))
4919 m
->precious
= FALSE
;
4921 if (flags
& UPL_COMMIT_SET_DIRTY
)
4924 if (should_be_throttled
== TRUE
&& !m
->active
&& !m
->inactive
&& !m
->speculative
&& !m
->throttled
) {
4926 * page coming back in from being 'frozen'...
4927 * it was dirty before it was frozen, so keep it so
4928 * the vm_page_activate will notice that it really belongs
4929 * on the throttle queue and put it there
4932 dwp
->dw_mask
|= DW_vm_page_activate
;
4935 if ((flags
& UPL_COMMIT_INACTIVATE
) && !m
->clustered
&& !m
->speculative
) {
4936 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
4937 clear_refmod
|= VM_MEM_REFERENCED
;
4938 } else if (!m
->active
&& !m
->inactive
&& !m
->speculative
) {
4940 if (m
->clustered
|| (flags
& UPL_COMMIT_SPECULATE
))
4941 dwp
->dw_mask
|= DW_vm_page_speculate
;
4942 else if (m
->reference
)
4943 dwp
->dw_mask
|= DW_vm_page_activate
;
4945 dwp
->dw_mask
|= DW_vm_page_deactivate_internal
;
4946 clear_refmod
|= VM_MEM_REFERENCED
;
4950 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
4952 * We blocked access to the pages in this URL.
4953 * Clear the "busy" bit on this page before we
4954 * wake up any waiter.
4956 dwp
->dw_mask
|= DW_clear_busy
;
4959 * Wakeup any thread waiting for the page to be un-cleaning.
4961 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
4965 pmap_clear_refmod(m
->phys_page
, clear_refmod
);
4967 target_offset
+= PAGE_SIZE_64
;
4968 xfer_size
-= PAGE_SIZE
;
4972 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
4973 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
4975 if (dw_count
>= dw_limit
) {
4976 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
4982 if (dwp
->dw_mask
& DW_clear_busy
)
4985 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
4991 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
4995 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
4997 } else if (upl
->flags
& UPL_LITE
) {
5001 pg_num
= upl
->size
/PAGE_SIZE
;
5002 pg_num
= (pg_num
+ 31) >> 5;
5005 for (i
= 0; i
< pg_num
; i
++) {
5006 if (lite_list
[i
] != 0) {
5012 if (queue_empty(&upl
->map_object
->memq
))
5015 if (occupied
== 0) {
5017 * If this UPL element belongs to a Vector UPL and is
5018 * empty, then this is the right function to deallocate
5019 * it. So go ahead set the *empty variable. The flag
5020 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
5021 * should be considered relevant for the Vector UPL and not
5022 * the internal UPLs.
5024 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
5027 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
5029 * this is not a paging object
5030 * so we need to drop the paging reference
5031 * that was taken when we created the UPL
5032 * against this object
5034 vm_object_activity_end(shadow_object
);
5037 * we dontated the paging reference to
5038 * the map object... vm_pageout_object_terminate
5039 * will drop this reference
5043 vm_object_unlock(shadow_object
);
5044 if (object
!= shadow_object
)
5045 vm_object_unlock(object
);
5051 * If we completed our operations on an UPL that is
5052 * part of a Vectored UPL and if empty is TRUE, then
5053 * we should go ahead and deallocate this UPL element.
5054 * Then we check if this was the last of the UPL elements
5055 * within that Vectored UPL. If so, set empty to TRUE
5056 * so that in ubc_upl_commit_range or ubc_upl_commit, we
5057 * can go ahead and deallocate the Vector UPL too.
5060 *empty
= vector_upl_set_subupl(vector_upl
, upl
, 0);
5061 upl_deallocate(upl
);
5063 goto process_upl_to_commit
;
5066 if (pgpgout_count
) {
5067 DTRACE_VM2(pgpgout
, int, pgpgout_count
, (uint64_t *), NULL
);
5070 return KERN_SUCCESS
;
5076 upl_offset_t offset
,
5081 upl_size_t xfer_size
, subupl_size
= size
;
5082 vm_object_t shadow_object
;
5084 vm_object_offset_t target_offset
;
5085 upl_offset_t subupl_offset
= offset
;
5087 wpl_array_t lite_list
;
5089 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
5090 struct vm_page_delayed_work
*dwp
;
5093 int isVectorUPL
= 0;
5094 upl_t vector_upl
= NULL
;
5098 if (upl
== UPL_NULL
)
5099 return KERN_INVALID_ARGUMENT
;
5101 if ( (upl
->flags
& UPL_IO_WIRE
) && !(error
& UPL_ABORT_DUMP_PAGES
) )
5102 return upl_commit_range(upl
, offset
, size
, UPL_COMMIT_FREE_ABSENT
, NULL
, 0, empty
);
5104 if((isVectorUPL
= vector_upl_is_valid(upl
))) {
5106 upl_lock(vector_upl
);
5111 process_upl_to_abort
:
5114 offset
= subupl_offset
;
5116 upl_unlock(vector_upl
);
5117 return KERN_SUCCESS
;
5119 upl
= vector_upl_subupl_byoffset(vector_upl
, &offset
, &size
);
5121 upl_unlock(vector_upl
);
5122 return KERN_FAILURE
;
5124 subupl_size
-= size
;
5125 subupl_offset
+= size
;
5131 if (upl
->upl_commit_index
< UPL_DEBUG_COMMIT_RECORDS
) {
5132 (void) OSBacktrace(&upl
->upl_commit_records
[upl
->upl_commit_index
].c_retaddr
[0], UPL_DEBUG_STACK_FRAMES
);
5134 upl
->upl_commit_records
[upl
->upl_commit_index
].c_beg
= offset
;
5135 upl
->upl_commit_records
[upl
->upl_commit_index
].c_end
= (offset
+ size
);
5136 upl
->upl_commit_records
[upl
->upl_commit_index
].c_aborted
= 1;
5138 upl
->upl_commit_index
++;
5141 if (upl
->flags
& UPL_DEVICE_MEMORY
)
5143 else if ((offset
+ size
) <= upl
->size
)
5149 upl_unlock(vector_upl
);
5152 return KERN_FAILURE
;
5154 if (upl
->flags
& UPL_INTERNAL
) {
5155 lite_list
= (wpl_array_t
)
5156 ((((uintptr_t)upl
) + sizeof(struct upl
))
5157 + ((upl
->size
/PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5159 lite_list
= (wpl_array_t
)
5160 (((uintptr_t)upl
) + sizeof(struct upl
));
5162 object
= upl
->map_object
;
5164 if (upl
->flags
& UPL_SHADOWED
) {
5165 vm_object_lock(object
);
5166 shadow_object
= object
->shadow
;
5168 shadow_object
= object
;
5170 entry
= offset
/PAGE_SIZE
;
5171 target_offset
= (vm_object_offset_t
)offset
;
5173 if (upl
->flags
& UPL_KERNEL_OBJECT
)
5174 vm_object_lock_shared(shadow_object
);
5176 vm_object_lock(shadow_object
);
5178 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
5179 assert(shadow_object
->blocked_access
);
5180 shadow_object
->blocked_access
= FALSE
;
5181 vm_object_wakeup(object
, VM_OBJECT_EVENT_UNBLOCKED
);
5186 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5188 if ((error
& UPL_ABORT_DUMP_PAGES
) && (upl
->flags
& UPL_KERNEL_OBJECT
))
5189 panic("upl_abort_range: kernel_object being DUMPED");
5198 if (upl
->flags
& UPL_LITE
) {
5199 unsigned int pg_num
;
5201 pg_num
= (unsigned int) (target_offset
/PAGE_SIZE
);
5202 assert(pg_num
== target_offset
/PAGE_SIZE
);
5205 if (lite_list
[pg_num
>>5] & (1 << (pg_num
& 31))) {
5206 lite_list
[pg_num
>>5] &= ~(1 << (pg_num
& 31));
5208 if ( !(upl
->flags
& UPL_KERNEL_OBJECT
))
5209 m
= vm_page_lookup(shadow_object
, target_offset
+
5210 (upl
->offset
- shadow_object
->paging_offset
));
5213 if (upl
->flags
& UPL_SHADOWED
) {
5214 if ((t
= vm_page_lookup(object
, target_offset
)) != VM_PAGE_NULL
) {
5219 if (m
== VM_PAGE_NULL
)
5220 m
= vm_page_lookup(shadow_object
, target_offset
+ object
->vo_shadow_offset
);
5223 if ((upl
->flags
& UPL_KERNEL_OBJECT
))
5224 goto abort_next_page
;
5226 if (m
!= VM_PAGE_NULL
) {
5229 boolean_t must_free
= TRUE
;
5232 * COPYOUT = FALSE case
5233 * check for error conditions which must
5234 * be passed back to the pages customer
5236 if (error
& UPL_ABORT_RESTART
) {
5241 } else if (error
& UPL_ABORT_UNAVAILABLE
) {
5245 } else if (error
& UPL_ABORT_ERROR
) {
5254 * This page was a part of a speculative
5255 * read-ahead initiated by the kernel
5256 * itself. No one is expecting this
5257 * page and no one will clean up its
5258 * error state if it ever becomes valid
5260 * We have to free it here.
5267 * If the page was already encrypted,
5268 * we don't really need to decrypt it
5269 * now. It will get decrypted later,
5270 * on demand, as soon as someone needs
5271 * to access its contents.
5274 m
->cleaning
= FALSE
;
5275 m
->encrypted_cleaning
= FALSE
;
5277 if (m
->overwriting
&& !m
->busy
) {
5279 * this shouldn't happen since
5280 * this is an 'absent' page, but
5281 * it doesn't hurt to check for
5282 * the 'alternate' method of
5283 * stabilizing the page...
5284 * we will mark 'busy' to be cleared
5285 * in the following code which will
5286 * take care of the primary stabilzation
5287 * method (i.e. setting 'busy' to TRUE)
5289 dwp
->dw_mask
|= DW_vm_page_unwire
;
5291 m
->overwriting
= FALSE
;
5293 dwp
->dw_mask
|= (DW_clear_busy
| DW_PAGE_WAKEUP
);
5295 if (must_free
== TRUE
)
5296 dwp
->dw_mask
|= DW_vm_page_free
;
5298 dwp
->dw_mask
|= DW_vm_page_activate
;
5301 * Handle the trusted pager throttle.
5304 dwp
->dw_mask
|= DW_vm_pageout_throttle_up
;
5306 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
5308 * We blocked access to the pages in this UPL.
5309 * Clear the "busy" bit and wake up any waiter
5312 dwp
->dw_mask
|= DW_clear_busy
;
5316 assert(m
->wire_count
== 1);
5319 dwp
->dw_mask
|= (DW_vm_page_unwire
| DW_clear_busy
);
5321 if (m
->overwriting
) {
5323 dwp
->dw_mask
|= DW_clear_busy
;
5326 * deal with the 'alternate' method
5327 * of stabilizing the page...
5328 * we will either free the page
5329 * or mark 'busy' to be cleared
5330 * in the following code which will
5331 * take care of the primary stabilzation
5332 * method (i.e. setting 'busy' to TRUE)
5334 dwp
->dw_mask
|= DW_vm_page_unwire
;
5336 m
->overwriting
= FALSE
;
5338 if (m
->encrypted_cleaning
== TRUE
) {
5339 m
->encrypted_cleaning
= FALSE
;
5341 dwp
->dw_mask
|= DW_clear_busy
;
5343 m
->dump_cleaning
= FALSE
;
5344 m
->cleaning
= FALSE
;
5346 vm_external_state_clr(m
->object
->existence_map
, m
->offset
);
5347 #endif /* MACH_PAGEMAP */
5348 if (error
& UPL_ABORT_DUMP_PAGES
) {
5349 pmap_disconnect(m
->phys_page
);
5351 dwp
->dw_mask
|= DW_vm_page_free
;
5353 if (error
& UPL_ABORT_REFERENCE
) {
5355 * we've been told to explictly
5356 * reference this page... for
5357 * file I/O, this is done by
5358 * implementing an LRU on the inactive q
5360 dwp
->dw_mask
|= DW_vm_page_lru
;
5362 dwp
->dw_mask
|= DW_PAGE_WAKEUP
;
5367 target_offset
+= PAGE_SIZE_64
;
5368 xfer_size
-= PAGE_SIZE
;
5372 if (dwp
->dw_mask
& ~(DW_clear_busy
| DW_PAGE_WAKEUP
)) {
5373 VM_PAGE_ADD_DELAYED_WORK(dwp
, m
, dw_count
);
5375 if (dw_count
>= dw_limit
) {
5376 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
5382 if (dwp
->dw_mask
& DW_clear_busy
)
5385 if (dwp
->dw_mask
& DW_PAGE_WAKEUP
)
5391 vm_page_do_delayed_work(shadow_object
, &dw_array
[0], dw_count
);
5395 if (upl
->flags
& UPL_DEVICE_MEMORY
) {
5397 } else if (upl
->flags
& UPL_LITE
) {
5401 pg_num
= upl
->size
/PAGE_SIZE
;
5402 pg_num
= (pg_num
+ 31) >> 5;
5405 for (i
= 0; i
< pg_num
; i
++) {
5406 if (lite_list
[i
] != 0) {
5412 if (queue_empty(&upl
->map_object
->memq
))
5415 if (occupied
== 0) {
5417 * If this UPL element belongs to a Vector UPL and is
5418 * empty, then this is the right function to deallocate
5419 * it. So go ahead set the *empty variable. The flag
5420 * UPL_COMMIT_NOTIFY_EMPTY, from the caller's point of view
5421 * should be considered relevant for the Vector UPL and
5422 * not the internal UPLs.
5424 if ((upl
->flags
& UPL_COMMIT_NOTIFY_EMPTY
) || isVectorUPL
)
5427 if (object
== shadow_object
&& !(upl
->flags
& UPL_KERNEL_OBJECT
)) {
5429 * this is not a paging object
5430 * so we need to drop the paging reference
5431 * that was taken when we created the UPL
5432 * against this object
5434 vm_object_activity_end(shadow_object
);
5437 * we dontated the paging reference to
5438 * the map object... vm_pageout_object_terminate
5439 * will drop this reference
5443 vm_object_unlock(shadow_object
);
5444 if (object
!= shadow_object
)
5445 vm_object_unlock(object
);
5451 * If we completed our operations on an UPL that is
5452 * part of a Vectored UPL and if empty is TRUE, then
5453 * we should go ahead and deallocate this UPL element.
5454 * Then we check if this was the last of the UPL elements
5455 * within that Vectored UPL. If so, set empty to TRUE
5456 * so that in ubc_upl_abort_range or ubc_upl_abort, we
5457 * can go ahead and deallocate the Vector UPL too.
5459 if(*empty
== TRUE
) {
5460 *empty
= vector_upl_set_subupl(vector_upl
, upl
,0);
5461 upl_deallocate(upl
);
5463 goto process_upl_to_abort
;
5466 return KERN_SUCCESS
;
5477 return upl_abort_range(upl
, 0, upl
->size
, error
, &empty
);
5481 /* an option on commit should be wire */
5485 upl_page_info_t
*page_list
,
5486 mach_msg_type_number_t count
)
5490 return upl_commit_range(upl
, 0, upl
->size
, 0, page_list
, count
, &empty
);
5494 unsigned int vm_object_iopl_request_sleep_for_cleaning
= 0;
5497 vm_object_iopl_request(
5499 vm_object_offset_t offset
,
5502 upl_page_info_array_t user_page_list
,
5503 unsigned int *page_list_count
,
5507 vm_object_offset_t dst_offset
;
5508 upl_size_t xfer_size
;
5511 wpl_array_t lite_list
= NULL
;
5512 int no_zero_fill
= FALSE
;
5513 unsigned int size_in_pages
;
5517 struct vm_object_fault_info fault_info
;
5518 struct vm_page_delayed_work dw_array
[DEFAULT_DELAYED_WORK_LIMIT
];
5519 struct vm_page_delayed_work
*dwp
;
5524 if (cntrl_flags
& ~UPL_VALID_FLAGS
) {
5526 * For forward compatibility's sake,
5527 * reject any unknown flag.
5529 return KERN_INVALID_VALUE
;
5531 if (vm_lopage_needed
== FALSE
)
5532 cntrl_flags
&= ~UPL_NEED_32BIT_ADDR
;
5534 if (cntrl_flags
& UPL_NEED_32BIT_ADDR
) {
5535 if ( (cntrl_flags
& (UPL_SET_IO_WIRE
| UPL_SET_LITE
)) != (UPL_SET_IO_WIRE
| UPL_SET_LITE
))
5536 return KERN_INVALID_VALUE
;
5538 if (object
->phys_contiguous
) {
5539 if ((offset
+ object
->vo_shadow_offset
) >= (vm_object_offset_t
)max_valid_dma_address
)
5540 return KERN_INVALID_ADDRESS
;
5542 if (((offset
+ object
->vo_shadow_offset
) + size
) >= (vm_object_offset_t
)max_valid_dma_address
)
5543 return KERN_INVALID_ADDRESS
;
5547 if (cntrl_flags
& UPL_ENCRYPT
) {
5550 * The paging path doesn't use this interface,
5551 * so we don't support the UPL_ENCRYPT flag
5552 * here. We won't encrypt the pages.
5554 assert(! (cntrl_flags
& UPL_ENCRYPT
));
5556 if (cntrl_flags
& UPL_NOZEROFILL
)
5557 no_zero_fill
= TRUE
;
5559 if (cntrl_flags
& UPL_COPYOUT_FROM
)
5560 prot
= VM_PROT_READ
;
5562 prot
= VM_PROT_READ
| VM_PROT_WRITE
;
5564 if (((size
/PAGE_SIZE
) > MAX_UPL_SIZE
) && !object
->phys_contiguous
)
5565 size
= MAX_UPL_SIZE
* PAGE_SIZE
;
5567 if (cntrl_flags
& UPL_SET_INTERNAL
) {
5568 if (page_list_count
!= NULL
)
5569 *page_list_count
= MAX_UPL_SIZE
;
5571 if (((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
5572 ((page_list_count
!= NULL
) && (*page_list_count
!= 0) && *page_list_count
< (size
/page_size
)))
5573 return KERN_INVALID_ARGUMENT
;
5575 if ((!object
->internal
) && (object
->paging_offset
!= 0))
5576 panic("vm_object_iopl_request: external object with non-zero paging offset\n");
5579 if (object
->phys_contiguous
)
5584 if (cntrl_flags
& UPL_SET_INTERNAL
) {
5585 upl
= upl_create(UPL_CREATE_INTERNAL
| UPL_CREATE_LITE
, UPL_IO_WIRE
, psize
);
5587 user_page_list
= (upl_page_info_t
*) (((uintptr_t)upl
) + sizeof(struct upl
));
5588 lite_list
= (wpl_array_t
) (((uintptr_t)user_page_list
) +
5589 ((psize
/ PAGE_SIZE
) * sizeof(upl_page_info_t
)));
5591 user_page_list
= NULL
;
5595 upl
= upl_create(UPL_CREATE_LITE
, UPL_IO_WIRE
, psize
);
5597 lite_list
= (wpl_array_t
) (((uintptr_t)upl
) + sizeof(struct upl
));
5603 user_page_list
[0].device
= FALSE
;
5606 upl
->map_object
= object
;
5609 size_in_pages
= size
/ PAGE_SIZE
;
5611 if (object
== kernel_object
&&
5612 !(cntrl_flags
& (UPL_NEED_32BIT_ADDR
| UPL_BLOCK_ACCESS
))) {
5613 upl
->flags
|= UPL_KERNEL_OBJECT
;
5615 vm_object_lock(object
);
5617 vm_object_lock_shared(object
);
5620 vm_object_lock(object
);
5621 vm_object_activity_begin(object
);
5624 * paging in progress also protects the paging_offset
5626 upl
->offset
= offset
+ object
->paging_offset
;
5628 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
5630 * The user requested that access to the pages in this URL
5631 * be blocked until the UPL is commited or aborted.
5633 upl
->flags
|= UPL_ACCESS_BLOCKED
;
5636 if (object
->phys_contiguous
) {
5638 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
5639 #endif /* UPL_DEBUG */
5641 if (upl
->flags
& UPL_ACCESS_BLOCKED
) {
5642 assert(!object
->blocked_access
);
5643 object
->blocked_access
= TRUE
;
5646 vm_object_unlock(object
);
5649 * don't need any shadow mappings for this one
5650 * since it is already I/O memory
5652 upl
->flags
|= UPL_DEVICE_MEMORY
;
5654 upl
->highest_page
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
+ size
- 1)>>PAGE_SHIFT
);
5656 if (user_page_list
) {
5657 user_page_list
[0].phys_addr
= (ppnum_t
) ((offset
+ object
->vo_shadow_offset
)>>PAGE_SHIFT
);
5658 user_page_list
[0].device
= TRUE
;
5660 if (page_list_count
!= NULL
) {
5661 if (upl
->flags
& UPL_INTERNAL
)
5662 *page_list_count
= 0;
5664 *page_list_count
= 1;
5666 return KERN_SUCCESS
;
5668 if (object
!= kernel_object
) {
5670 * Protect user space from future COW operations
5672 object
->true_share
= TRUE
;
5674 if (object
->copy_strategy
== MEMORY_OBJECT_COPY_SYMMETRIC
)
5675 object
->copy_strategy
= MEMORY_OBJECT_COPY_DELAY
;
5679 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
5680 #endif /* UPL_DEBUG */
5682 if (!(cntrl_flags
& UPL_COPYOUT_FROM
) &&
5683 object
->copy
!= VM_OBJECT_NULL
) {
5685 * Honor copy-on-write obligations
5687 * The caller is gathering these pages and
5688 * might modify their contents. We need to
5689 * make sure that the copy object has its own
5690 * private copies of these pages before we let
5691 * the caller modify them.
5693 * NOTE: someone else could map the original object
5694 * after we've done this copy-on-write here, and they
5695 * could then see an inconsistent picture of the memory
5696 * while it's being modified via the UPL. To prevent this,
5697 * we would have to block access to these pages until the
5698 * UPL is released. We could use the UPL_BLOCK_ACCESS
5699 * code path for that...
5701 vm_object_update(object
,
5706 FALSE
, /* should_return */
5707 MEMORY_OBJECT_COPY_SYNC
,
5709 #if DEVELOPMENT || DEBUG
5711 iopl_cow_pages
+= size
>> PAGE_SHIFT
;
5719 dst_offset
= offset
;
5721 fault_info
.behavior
= VM_BEHAVIOR_SEQUENTIAL
;
5722 fault_info
.user_tag
= 0;
5723 fault_info
.lo_offset
= offset
;
5724 fault_info
.hi_offset
= offset
+ xfer_size
;
5725 fault_info
.no_cache
= FALSE
;
5726 fault_info
.stealth
= FALSE
;
5727 fault_info
.io_sync
= FALSE
;
5728 fault_info
.cs_bypass
= FALSE
;
5729 fault_info
.mark_zf_absent
= TRUE
;
5733 dw_limit
= DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT
);
5736 vm_fault_return_t result
;
5737 unsigned int pg_num
;
5741 dst_page
= vm_page_lookup(object
, dst_offset
);
5745 * If the page is encrypted, we need to decrypt it,
5746 * so force a soft page fault.
5748 if (dst_page
== VM_PAGE_NULL
||
5750 dst_page
->encrypted
||
5752 dst_page
->restart
||
5754 dst_page
->fictitious
) {
5756 if (object
== kernel_object
)
5757 panic("vm_object_iopl_request: missing/bad page in kernel object\n");
5761 kern_return_t error_code
;
5764 if (cntrl_flags
& UPL_SET_INTERRUPTIBLE
)
5765 interruptible
= THREAD_ABORTSAFE
;
5767 interruptible
= THREAD_UNINT
;
5769 fault_info
.interruptible
= interruptible
;
5770 fault_info
.cluster_size
= xfer_size
;
5772 vm_object_paging_begin(object
);
5774 result
= vm_fault_page(object
, dst_offset
,
5775 prot
| VM_PROT_WRITE
, FALSE
,
5776 &prot
, &dst_page
, &top_page
,
5778 &error_code
, no_zero_fill
,
5779 FALSE
, &fault_info
);
5783 case VM_FAULT_SUCCESS
:
5785 if ( !dst_page
->absent
) {
5786 PAGE_WAKEUP_DONE(dst_page
);
5789 * we only get back an absent page if we
5790 * requested that it not be zero-filled
5791 * because we are about to fill it via I/O
5793 * absent pages should be left BUSY
5794 * to prevent them from being faulted
5795 * into an address space before we've
5796 * had a chance to complete the I/O on
5797 * them since they may contain info that
5798 * shouldn't be seen by the faulting task
5802 * Release paging references and
5803 * top-level placeholder page, if any.
5805 if (top_page
!= VM_PAGE_NULL
) {
5806 vm_object_t local_object
;
5808 local_object
= top_page
->object
;
5810 if (top_page
->object
!= dst_page
->object
) {
5811 vm_object_lock(local_object
);
5812 VM_PAGE_FREE(top_page
);
5813 vm_object_paging_end(local_object
);
5814 vm_object_unlock(local_object
);
5816 VM_PAGE_FREE(top_page
);
5817 vm_object_paging_end(local_object
);
5820 vm_object_paging_end(object
);
5823 case VM_FAULT_RETRY
:
5824 vm_object_lock(object
);
5827 case VM_FAULT_MEMORY_SHORTAGE
:
5828 OSAddAtomic(size_in_pages
, &vm_upl_wait_for_pages
);
5830 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_START
, vm_upl_wait_for_pages
, 0, 0, 0);
5832 if (vm_page_wait(interruptible
)) {
5833 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5835 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, 0);
5836 vm_object_lock(object
);
5840 OSAddAtomic(-size_in_pages
, &vm_upl_wait_for_pages
);
5842 VM_DEBUG_EVENT(vm_iopl_page_wait
, VM_IOPL_PAGE_WAIT
, DBG_FUNC_END
, vm_upl_wait_for_pages
, 0, 0, -1);
5846 case VM_FAULT_INTERRUPTED
:
5847 error_code
= MACH_SEND_INTERRUPTED
;
5848 case VM_FAULT_MEMORY_ERROR
:
5850 ret
= (error_code
? error_code
: KERN_MEMORY_ERROR
);
5852 vm_object_lock(object
);
5855 case VM_FAULT_SUCCESS_NO_VM_PAGE
:
5856 /* success but no page: fail */
5857 vm_object_paging_end(object
);
5858 vm_object_unlock(object
);
5862 panic("vm_object_iopl_request: unexpected error"
5863 " 0x%x from vm_fault_page()\n", result
);
5865 } while (result
!= VM_FAULT_SUCCESS
);
5868 if (upl
->flags
& UPL_KERNEL_OBJECT
)
5869 goto record_phys_addr
;
5871 if (dst_page
->cleaning
) {
5873 * Someone else is cleaning this page in place.as
5874 * In theory, we should be able to proceed and use this
5875 * page but they'll probably end up clearing the "busy"
5876 * bit on it in upl_commit_range() but they didn't set
5877 * it, so they would clear our "busy" bit and open
5878 * us to race conditions.
5879 * We'd better wait for the cleaning to complete and
5882 vm_object_iopl_request_sleep_for_cleaning
++;
5883 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
5886 if ( (cntrl_flags
& UPL_NEED_32BIT_ADDR
) &&
5887 dst_page
->phys_page
>= (max_valid_dma_address
>> PAGE_SHIFT
) ) {
5892 * support devices that can't DMA above 32 bits
5893 * by substituting pages from a pool of low address
5894 * memory for any pages we find above the 4G mark
5895 * can't substitute if the page is already wired because
5896 * we don't know whether that physical address has been
5897 * handed out to some other 64 bit capable DMA device to use
5899 if (VM_PAGE_WIRED(dst_page
)) {
5900 ret
= KERN_PROTECTION_FAILURE
;
5903 low_page
= vm_page_grablo();
5905 if (low_page
== VM_PAGE_NULL
) {
5906 ret
= KERN_RESOURCE_SHORTAGE
;
5910 * from here until the vm_page_replace completes
5911 * we musn't drop the object lock... we don't
5912 * want anyone refaulting this page in and using
5913 * it after we disconnect it... we want the fault
5914 * to find the new page being substituted.
5916 if (dst_page
->pmapped
)
5917 refmod
= pmap_disconnect(dst_page
->phys_page
);
5921 if (!dst_page
->absent
)
5922 vm_page_copy(dst_page
, low_page
);
5924 low_page
->reference
= dst_page
->reference
;
5925 low_page
->dirty
= dst_page
->dirty
;
5926 low_page
->absent
= dst_page
->absent
;
5928 if (refmod
& VM_MEM_REFERENCED
)
5929 low_page
->reference
= TRUE
;
5930 if (refmod
& VM_MEM_MODIFIED
)
5931 low_page
->dirty
= TRUE
;
5933 vm_page_replace(low_page
, object
, dst_offset
);
5935 dst_page
= low_page
;
5937 * vm_page_grablo returned the page marked
5938 * BUSY... we don't need a PAGE_WAKEUP_DONE
5939 * here, because we've never dropped the object lock
5941 if ( !dst_page
->absent
)
5942 dst_page
->busy
= FALSE
;
5944 if ( !dst_page
->busy
)
5945 dwp
->dw_mask
|= DW_vm_page_wire
;
5947 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
5949 * Mark the page "busy" to block any future page fault
5950 * on this page in addition to wiring it.
5951 * We'll also remove the mapping
5952 * of all these pages before leaving this routine.
5954 assert(!dst_page
->fictitious
);
5955 dst_page
->busy
= TRUE
;
5958 * expect the page to be used
5959 * page queues lock must be held to set 'reference'
5961 dwp
->dw_mask
|= DW_set_reference
;
5963 if (!(cntrl_flags
& UPL_COPYOUT_FROM
))
5964 dst_page
->dirty
= TRUE
;
5967 upl
->flags
|= UPL_HAS_BUSY
;
5969 pg_num
= (unsigned int) ((dst_offset
-offset
)/PAGE_SIZE
);
5970 assert(pg_num
== (dst_offset
-offset
)/PAGE_SIZE
);
5971 lite_list
[pg_num
>>5] |= 1 << (pg_num
& 31);
5973 if (dst_page
->phys_page
> upl
->highest_page
)
5974 upl
->highest_page
= dst_page
->phys_page
;
5976 if (user_page_list
) {
5977 user_page_list
[entry
].phys_addr
= dst_page
->phys_page
;
5978 user_page_list
[entry
].pageout
= dst_page
->pageout
;
5979 user_page_list
[entry
].absent
= dst_page
->absent
;
5980 user_page_list
[entry
].dirty
= dst_page
->dirty
;
5981 user_page_list
[entry
].precious
= dst_page
->precious
;
5982 user_page_list
[entry
].device
= FALSE
;
5983 if (dst_page
->clustered
== TRUE
)
5984 user_page_list
[entry
].speculative
= dst_page
->speculative
;
5986 user_page_list
[entry
].speculative
= FALSE
;
5987 user_page_list
[entry
].cs_validated
= dst_page
->cs_validated
;
5988 user_page_list
[entry
].cs_tainted
= dst_page
->cs_tainted
;
5990 if (object
!= kernel_object
) {
5992 * someone is explicitly grabbing this page...
5993 * update clustered and speculative state
5996 VM_PAGE_CONSUME_CLUSTERED(dst_page
);
5999 dst_offset
+= PAGE_SIZE_64
;
6000 xfer_size
-= PAGE_SIZE
;
6003 VM_PAGE_ADD_DELAYED_WORK(dwp
, dst_page
, dw_count
);
6005 if (dw_count
>= dw_limit
) {
6006 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
6014 vm_page_do_delayed_work(object
, &dw_array
[0], dw_count
);
6016 if (page_list_count
!= NULL
) {
6017 if (upl
->flags
& UPL_INTERNAL
)
6018 *page_list_count
= 0;
6019 else if (*page_list_count
> entry
)
6020 *page_list_count
= entry
;
6022 vm_object_unlock(object
);
6024 if (cntrl_flags
& UPL_BLOCK_ACCESS
) {
6026 * We've marked all the pages "busy" so that future
6027 * page faults will block.
6028 * Now remove the mapping for these pages, so that they
6029 * can't be accessed without causing a page fault.
6031 vm_object_pmap_protect(object
, offset
, (vm_object_size_t
)size
,
6032 PMAP_NULL
, 0, VM_PROT_NONE
);
6033 assert(!object
->blocked_access
);
6034 object
->blocked_access
= TRUE
;
6036 return KERN_SUCCESS
;
6041 for (; offset
< dst_offset
; offset
+= PAGE_SIZE
) {
6042 boolean_t need_unwire
;
6044 dst_page
= vm_page_lookup(object
, offset
);
6046 if (dst_page
== VM_PAGE_NULL
)
6047 panic("vm_object_iopl_request: Wired page missing. \n");
6050 * if we've already processed this page in an earlier
6051 * dw_do_work, we need to undo the wiring... we will
6052 * leave the dirty and reference bits on if they
6053 * were set, since we don't have a good way of knowing
6054 * what the previous state was and we won't get here
6055 * under any normal circumstances... we will always
6056 * clear BUSY and wakeup any waiters via vm_page_free
6057 * or PAGE_WAKEUP_DONE
6062 if (dw_array
[dw_index
].dw_m
== dst_page
) {
6064 * still in the deferred work list
6065 * which means we haven't yet called
6066 * vm_page_wire on this page
6068 need_unwire
= FALSE
;
6074 vm_page_lock_queues();
6076 if (dst_page
->absent
) {
6077 vm_page_free(dst_page
);
6079 need_unwire
= FALSE
;
6081 if (need_unwire
== TRUE
)
6082 vm_page_unwire(dst_page
, TRUE
);
6084 PAGE_WAKEUP_DONE(dst_page
);
6086 vm_page_unlock_queues();
6088 if (need_unwire
== TRUE
)
6089 VM_STAT_INCR(reactivations
);
6094 if (! (upl
->flags
& UPL_KERNEL_OBJECT
)) {
6095 vm_object_activity_end(object
);
6097 vm_object_unlock(object
);
6108 kern_return_t retval
;
6109 boolean_t upls_locked
;
6110 vm_object_t object1
, object2
;
6112 if (upl1
== UPL_NULL
|| upl2
== UPL_NULL
|| upl1
== upl2
|| ((upl1
->flags
& UPL_VECTOR
)==UPL_VECTOR
) || ((upl2
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
6113 return KERN_INVALID_ARGUMENT
;
6116 upls_locked
= FALSE
;
6119 * Since we need to lock both UPLs at the same time,
6120 * avoid deadlocks by always taking locks in the same order.
6129 upls_locked
= TRUE
; /* the UPLs will need to be unlocked */
6131 object1
= upl1
->map_object
;
6132 object2
= upl2
->map_object
;
6134 if (upl1
->offset
!= 0 || upl2
->offset
!= 0 ||
6135 upl1
->size
!= upl2
->size
) {
6137 * We deal only with full objects, not subsets.
6138 * That's because we exchange the entire backing store info
6139 * for the objects: pager, resident pages, etc... We can't do
6142 retval
= KERN_INVALID_VALUE
;
6147 * Tranpose the VM objects' backing store.
6149 retval
= vm_object_transpose(object1
, object2
,
6150 (vm_object_size_t
) upl1
->size
);
6152 if (retval
== KERN_SUCCESS
) {
6154 * Make each UPL point to the correct VM object, i.e. the
6155 * object holding the pages that the UPL refers to...
6158 queue_remove(&object1
->uplq
, upl1
, upl_t
, uplq
);
6159 queue_remove(&object2
->uplq
, upl2
, upl_t
, uplq
);
6161 upl1
->map_object
= object2
;
6162 upl2
->map_object
= object1
;
6164 queue_enter(&object1
->uplq
, upl2
, upl_t
, uplq
);
6165 queue_enter(&object2
->uplq
, upl1
, upl_t
, uplq
);
6176 upls_locked
= FALSE
;
6185 * Rationale: the user might have some encrypted data on disk (via
6186 * FileVault or any other mechanism). That data is then decrypted in
6187 * memory, which is safe as long as the machine is secure. But that
6188 * decrypted data in memory could be paged out to disk by the default
6189 * pager. The data would then be stored on disk in clear (not encrypted)
6190 * and it could be accessed by anyone who gets physical access to the
6191 * disk (if the laptop or the disk gets stolen for example). This weakens
6192 * the security offered by FileVault.
6194 * Solution: the default pager will optionally request that all the
6195 * pages it gathers for pageout be encrypted, via the UPL interfaces,
6196 * before it sends this UPL to disk via the vnode_pageout() path.
6200 * To avoid disrupting the VM LRU algorithms, we want to keep the
6201 * clean-in-place mechanisms, which allow us to send some extra pages to
6202 * swap (clustering) without actually removing them from the user's
6203 * address space. We don't want the user to unknowingly access encrypted
6204 * data, so we have to actually remove the encrypted pages from the page
6205 * table. When the user accesses the data, the hardware will fail to
6206 * locate the virtual page in its page table and will trigger a page
6207 * fault. We can then decrypt the page and enter it in the page table
6208 * again. Whenever we allow the user to access the contents of a page,
6209 * we have to make sure it's not encrypted.
6215 * Reserve of virtual addresses in the kernel address space.
6216 * We need to map the physical pages in the kernel, so that we
6217 * can call the encryption/decryption routines with a kernel
6218 * virtual address. We keep this pool of pre-allocated kernel
6219 * virtual addresses so that we don't have to scan the kernel's
6220 * virtual address space each time we need to encrypt or decrypt
6222 * It would be nice to be able to encrypt and decrypt in physical
6223 * mode but that might not always be more efficient...
6225 decl_simple_lock_data(,vm_paging_lock
)
6226 #define VM_PAGING_NUM_PAGES 64
6227 vm_map_offset_t vm_paging_base_address
= 0;
6228 boolean_t vm_paging_page_inuse
[VM_PAGING_NUM_PAGES
] = { FALSE
, };
6229 int vm_paging_max_index
= 0;
6230 int vm_paging_page_waiter
= 0;
6231 int vm_paging_page_waiter_total
= 0;
6232 unsigned long vm_paging_no_kernel_page
= 0;
6233 unsigned long vm_paging_objects_mapped
= 0;
6234 unsigned long vm_paging_pages_mapped
= 0;
6235 unsigned long vm_paging_objects_mapped_slow
= 0;
6236 unsigned long vm_paging_pages_mapped_slow
= 0;
6239 vm_paging_map_init(void)
6242 vm_map_offset_t page_map_offset
;
6243 vm_map_entry_t map_entry
;
6245 assert(vm_paging_base_address
== 0);
6248 * Initialize our pool of pre-allocated kernel
6249 * virtual addresses.
6251 page_map_offset
= 0;
6252 kr
= vm_map_find_space(kernel_map
,
6254 VM_PAGING_NUM_PAGES
* PAGE_SIZE
,
6258 if (kr
!= KERN_SUCCESS
) {
6259 panic("vm_paging_map_init: kernel_map full\n");
6261 map_entry
->object
.vm_object
= kernel_object
;
6262 map_entry
->offset
= page_map_offset
;
6263 map_entry
->protection
= VM_PROT_NONE
;
6264 map_entry
->max_protection
= VM_PROT_NONE
;
6265 map_entry
->permanent
= TRUE
;
6266 vm_object_reference(kernel_object
);
6267 vm_map_unlock(kernel_map
);
6269 assert(vm_paging_base_address
== 0);
6270 vm_paging_base_address
= page_map_offset
;
6275 * vm_paging_map_object:
6276 * Maps part of a VM object's pages in the kernel
6277 * virtual address space, using the pre-allocated
6278 * kernel virtual addresses, if possible.
6280 * The VM object is locked. This lock will get
6281 * dropped and re-acquired though, so the caller
6282 * must make sure the VM object is kept alive
6283 * (by holding a VM map that has a reference
6284 * on it, for example, or taking an extra reference).
6285 * The page should also be kept busy to prevent
6286 * it from being reclaimed.
6289 vm_paging_map_object(
6290 vm_map_offset_t
*address
,
6293 vm_object_offset_t offset
,
6294 vm_map_size_t
*size
,
6295 vm_prot_t protection
,
6296 boolean_t can_unlock_object
)
6299 vm_map_offset_t page_map_offset
;
6300 vm_map_size_t map_size
;
6301 vm_object_offset_t object_offset
;
6305 if (page
!= VM_PAGE_NULL
&& *size
== PAGE_SIZE
) {
6308 * Use one of the pre-allocated kernel virtual addresses
6309 * and just enter the VM page in the kernel address space
6310 * at that virtual address.
6312 simple_lock(&vm_paging_lock
);
6315 * Try and find an available kernel virtual address
6316 * from our pre-allocated pool.
6318 page_map_offset
= 0;
6320 for (i
= 0; i
< VM_PAGING_NUM_PAGES
; i
++) {
6321 if (vm_paging_page_inuse
[i
] == FALSE
) {
6323 vm_paging_base_address
+
6328 if (page_map_offset
!= 0) {
6329 /* found a space to map our page ! */
6333 if (can_unlock_object
) {
6335 * If we can afford to unlock the VM object,
6336 * let's take the slow path now...
6341 * We can't afford to unlock the VM object, so
6342 * let's wait for a space to become available...
6344 vm_paging_page_waiter_total
++;
6345 vm_paging_page_waiter
++;
6346 thread_sleep_fast_usimple_lock(&vm_paging_page_waiter
,
6349 vm_paging_page_waiter
--;
6350 /* ... and try again */
6353 if (page_map_offset
!= 0) {
6355 * We found a kernel virtual address;
6356 * map the physical page to that virtual address.
6358 if (i
> vm_paging_max_index
) {
6359 vm_paging_max_index
= i
;
6361 vm_paging_page_inuse
[i
] = TRUE
;
6362 simple_unlock(&vm_paging_lock
);
6364 page
->pmapped
= TRUE
;
6367 * Keep the VM object locked over the PMAP_ENTER
6368 * and the actual use of the page by the kernel,
6369 * or this pmap mapping might get undone by a
6370 * vm_object_pmap_protect() call...
6372 PMAP_ENTER(kernel_pmap
,
6378 vm_paging_objects_mapped
++;
6379 vm_paging_pages_mapped
++;
6380 *address
= page_map_offset
;
6382 /* all done and mapped, ready to use ! */
6383 return KERN_SUCCESS
;
6387 * We ran out of pre-allocated kernel virtual
6388 * addresses. Just map the page in the kernel
6389 * the slow and regular way.
6391 vm_paging_no_kernel_page
++;
6392 simple_unlock(&vm_paging_lock
);
6395 if (! can_unlock_object
) {
6396 return KERN_NOT_SUPPORTED
;
6399 object_offset
= vm_object_trunc_page(offset
);
6400 map_size
= vm_map_round_page(*size
);
6403 * Try and map the required range of the object
6407 vm_object_reference_locked(object
); /* for the map entry */
6408 vm_object_unlock(object
);
6410 kr
= vm_map_enter(kernel_map
,
6421 if (kr
!= KERN_SUCCESS
) {
6424 vm_object_deallocate(object
); /* for the map entry */
6425 vm_object_lock(object
);
6432 * Enter the mapped pages in the page table now.
6434 vm_object_lock(object
);
6436 * VM object must be kept locked from before PMAP_ENTER()
6437 * until after the kernel is done accessing the page(s).
6438 * Otherwise, the pmap mappings in the kernel could be
6439 * undone by a call to vm_object_pmap_protect().
6442 for (page_map_offset
= 0;
6444 map_size
-= PAGE_SIZE_64
, page_map_offset
+= PAGE_SIZE_64
) {
6446 page
= vm_page_lookup(object
, offset
+ page_map_offset
);
6447 if (page
== VM_PAGE_NULL
) {
6448 printf("vm_paging_map_object: no page !?");
6449 vm_object_unlock(object
);
6450 kr
= vm_map_remove(kernel_map
, *address
, *size
,
6452 assert(kr
== KERN_SUCCESS
);
6455 vm_object_lock(object
);
6456 return KERN_MEMORY_ERROR
;
6458 page
->pmapped
= TRUE
;
6460 //assert(pmap_verify_free(page->phys_page));
6461 PMAP_ENTER(kernel_pmap
,
6462 *address
+ page_map_offset
,
6469 vm_paging_objects_mapped_slow
++;
6470 vm_paging_pages_mapped_slow
+= (unsigned long) (map_size
/ PAGE_SIZE_64
);
6472 return KERN_SUCCESS
;
6477 * vm_paging_unmap_object:
6478 * Unmaps part of a VM object's pages from the kernel
6479 * virtual address space.
6481 * The VM object is locked. This lock will get
6482 * dropped and re-acquired though.
6485 vm_paging_unmap_object(
6487 vm_map_offset_t start
,
6488 vm_map_offset_t end
)
6493 if ((vm_paging_base_address
== 0) ||
6494 (start
< vm_paging_base_address
) ||
6495 (end
> (vm_paging_base_address
6496 + (VM_PAGING_NUM_PAGES
* PAGE_SIZE
)))) {
6498 * We didn't use our pre-allocated pool of
6499 * kernel virtual address. Deallocate the
6502 if (object
!= VM_OBJECT_NULL
) {
6503 vm_object_unlock(object
);
6505 kr
= vm_map_remove(kernel_map
, start
, end
, VM_MAP_NO_FLAGS
);
6506 if (object
!= VM_OBJECT_NULL
) {
6507 vm_object_lock(object
);
6509 assert(kr
== KERN_SUCCESS
);
6512 * We used a kernel virtual address from our
6513 * pre-allocated pool. Put it back in the pool
6516 assert(end
- start
== PAGE_SIZE
);
6517 i
= (int) ((start
- vm_paging_base_address
) >> PAGE_SHIFT
);
6518 assert(i
>= 0 && i
< VM_PAGING_NUM_PAGES
);
6520 /* undo the pmap mapping */
6521 pmap_remove(kernel_pmap
, start
, end
);
6523 simple_lock(&vm_paging_lock
);
6524 vm_paging_page_inuse
[i
] = FALSE
;
6525 if (vm_paging_page_waiter
) {
6526 thread_wakeup(&vm_paging_page_waiter
);
6528 simple_unlock(&vm_paging_lock
);
6535 * "iv" is the "initial vector". Ideally, we want to
6536 * have a different one for each page we encrypt, so that
6537 * crackers can't find encryption patterns too easily.
6539 #define SWAP_CRYPT_AES_KEY_SIZE 128 /* XXX 192 and 256 don't work ! */
6540 boolean_t swap_crypt_ctx_initialized
= FALSE
;
6541 aes_32t swap_crypt_key
[8]; /* big enough for a 256 key */
6542 aes_ctx swap_crypt_ctx
;
6543 const unsigned char swap_crypt_null_iv
[AES_BLOCK_SIZE
] = {0xa, };
6546 boolean_t swap_crypt_ctx_tested
= FALSE
;
6547 unsigned char swap_crypt_test_page_ref
[4096] __attribute__((aligned(4096)));
6548 unsigned char swap_crypt_test_page_encrypt
[4096] __attribute__((aligned(4096)));
6549 unsigned char swap_crypt_test_page_decrypt
[4096] __attribute__((aligned(4096)));
6553 * Initialize the encryption context: key and key size.
6555 void swap_crypt_ctx_initialize(void); /* forward */
6557 swap_crypt_ctx_initialize(void)
6562 * No need for locking to protect swap_crypt_ctx_initialized
6563 * because the first use of encryption will come from the
6564 * pageout thread (we won't pagein before there's been a pageout)
6565 * and there's only one pageout thread.
6567 if (swap_crypt_ctx_initialized
== FALSE
) {
6569 i
< (sizeof (swap_crypt_key
) /
6570 sizeof (swap_crypt_key
[0]));
6572 swap_crypt_key
[i
] = random();
6574 aes_encrypt_key((const unsigned char *) swap_crypt_key
,
6575 SWAP_CRYPT_AES_KEY_SIZE
,
6576 &swap_crypt_ctx
.encrypt
);
6577 aes_decrypt_key((const unsigned char *) swap_crypt_key
,
6578 SWAP_CRYPT_AES_KEY_SIZE
,
6579 &swap_crypt_ctx
.decrypt
);
6580 swap_crypt_ctx_initialized
= TRUE
;
6585 * Validate the encryption algorithms.
6587 if (swap_crypt_ctx_tested
== FALSE
) {
6589 for (i
= 0; i
< 4096; i
++) {
6590 swap_crypt_test_page_ref
[i
] = (char) i
;
6593 aes_encrypt_cbc(swap_crypt_test_page_ref
,
6595 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6596 swap_crypt_test_page_encrypt
,
6597 &swap_crypt_ctx
.encrypt
);
6599 aes_decrypt_cbc(swap_crypt_test_page_encrypt
,
6601 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6602 swap_crypt_test_page_decrypt
,
6603 &swap_crypt_ctx
.decrypt
);
6604 /* compare result with original */
6605 for (i
= 0; i
< 4096; i
++) {
6606 if (swap_crypt_test_page_decrypt
[i
] !=
6607 swap_crypt_test_page_ref
[i
]) {
6608 panic("encryption test failed");
6613 aes_encrypt_cbc(swap_crypt_test_page_decrypt
,
6615 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6616 swap_crypt_test_page_decrypt
,
6617 &swap_crypt_ctx
.encrypt
);
6618 /* decrypt in place */
6619 aes_decrypt_cbc(swap_crypt_test_page_decrypt
,
6621 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6622 swap_crypt_test_page_decrypt
,
6623 &swap_crypt_ctx
.decrypt
);
6624 for (i
= 0; i
< 4096; i
++) {
6625 if (swap_crypt_test_page_decrypt
[i
] !=
6626 swap_crypt_test_page_ref
[i
]) {
6627 panic("in place encryption test failed");
6631 swap_crypt_ctx_tested
= TRUE
;
6639 * Encrypt the given page, for secure paging.
6640 * The page might already be mapped at kernel virtual
6641 * address "kernel_mapping_offset". Otherwise, we need
6645 * The page's object is locked, but this lock will be released
6647 * The page is busy and not accessible by users (not entered in any pmap).
6652 vm_map_offset_t kernel_mapping_offset
)
6655 vm_map_size_t kernel_mapping_size
;
6656 vm_offset_t kernel_vaddr
;
6658 unsigned char aes_iv
[AES_BLOCK_SIZE
];
6660 memory_object_t pager_object
;
6661 vm_object_offset_t paging_offset
;
6665 if (! vm_pages_encrypted
) {
6666 vm_pages_encrypted
= TRUE
;
6670 assert(page
->dirty
|| page
->precious
);
6672 if (page
->encrypted
) {
6674 * Already encrypted: no need to do it again.
6676 vm_page_encrypt_already_encrypted_counter
++;
6679 ASSERT_PAGE_DECRYPTED(page
);
6682 * Take a paging-in-progress reference to keep the object
6683 * alive even if we have to unlock it (in vm_paging_map_object()
6686 vm_object_paging_begin(page
->object
);
6688 if (kernel_mapping_offset
== 0) {
6690 * The page hasn't already been mapped in kernel space
6691 * by the caller. Map it now, so that we can access
6692 * its contents and encrypt them.
6694 kernel_mapping_size
= PAGE_SIZE
;
6695 kr
= vm_paging_map_object(&kernel_mapping_offset
,
6699 &kernel_mapping_size
,
6700 VM_PROT_READ
| VM_PROT_WRITE
,
6702 if (kr
!= KERN_SUCCESS
) {
6703 panic("vm_page_encrypt: "
6704 "could not map page in kernel: 0x%x\n",
6708 kernel_mapping_size
= 0;
6710 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
6712 if (swap_crypt_ctx_initialized
== FALSE
) {
6713 swap_crypt_ctx_initialize();
6715 assert(swap_crypt_ctx_initialized
);
6718 * Prepare an "initial vector" for the encryption.
6719 * We use the "pager" and the "paging_offset" for that
6720 * page to obfuscate the encrypted data a bit more and
6721 * prevent crackers from finding patterns that they could
6722 * use to break the key.
6724 bzero(&encrypt_iv
.aes_iv
[0], sizeof (encrypt_iv
.aes_iv
));
6725 encrypt_iv
.vm
.pager_object
= page
->object
->pager
;
6726 encrypt_iv
.vm
.paging_offset
=
6727 page
->object
->paging_offset
+ page
->offset
;
6729 /* encrypt the "initial vector" */
6730 aes_encrypt_cbc((const unsigned char *) &encrypt_iv
.aes_iv
[0],
6733 &encrypt_iv
.aes_iv
[0],
6734 &swap_crypt_ctx
.encrypt
);
6739 aes_encrypt_cbc((const unsigned char *) kernel_vaddr
,
6740 &encrypt_iv
.aes_iv
[0],
6741 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6742 (unsigned char *) kernel_vaddr
,
6743 &swap_crypt_ctx
.encrypt
);
6745 vm_page_encrypt_counter
++;
6748 * Unmap the page from the kernel's address space,
6749 * if we had to map it ourselves. Otherwise, let
6750 * the caller undo the mapping if needed.
6752 if (kernel_mapping_size
!= 0) {
6753 vm_paging_unmap_object(page
->object
,
6754 kernel_mapping_offset
,
6755 kernel_mapping_offset
+ kernel_mapping_size
);
6759 * Clear the "reference" and "modified" bits.
6760 * This should clean up any impact the encryption had
6762 * The page was kept busy and disconnected from all pmaps,
6763 * so it can't have been referenced or modified from user
6765 * The software bits will be reset later after the I/O
6766 * has completed (in upl_commit_range()).
6768 pmap_clear_refmod(page
->phys_page
, VM_MEM_REFERENCED
| VM_MEM_MODIFIED
);
6770 page
->encrypted
= TRUE
;
6772 vm_object_paging_end(page
->object
);
6778 * Decrypt the given page.
6779 * The page might already be mapped at kernel virtual
6780 * address "kernel_mapping_offset". Otherwise, we need
6784 * The page's VM object is locked but will be unlocked and relocked.
6785 * The page is busy and not accessible by users (not entered in any pmap).
6790 vm_map_offset_t kernel_mapping_offset
)
6793 vm_map_size_t kernel_mapping_size
;
6794 vm_offset_t kernel_vaddr
;
6796 unsigned char aes_iv
[AES_BLOCK_SIZE
];
6798 memory_object_t pager_object
;
6799 vm_object_offset_t paging_offset
;
6802 boolean_t was_dirty
;
6805 assert(page
->encrypted
);
6807 was_dirty
= page
->dirty
;
6810 * Take a paging-in-progress reference to keep the object
6811 * alive even if we have to unlock it (in vm_paging_map_object()
6814 vm_object_paging_begin(page
->object
);
6816 if (kernel_mapping_offset
== 0) {
6818 * The page hasn't already been mapped in kernel space
6819 * by the caller. Map it now, so that we can access
6820 * its contents and decrypt them.
6822 kernel_mapping_size
= PAGE_SIZE
;
6823 kr
= vm_paging_map_object(&kernel_mapping_offset
,
6827 &kernel_mapping_size
,
6828 VM_PROT_READ
| VM_PROT_WRITE
,
6830 if (kr
!= KERN_SUCCESS
) {
6831 panic("vm_page_decrypt: "
6832 "could not map page in kernel: 0x%x\n",
6836 kernel_mapping_size
= 0;
6838 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
6840 assert(swap_crypt_ctx_initialized
);
6843 * Prepare an "initial vector" for the decryption.
6844 * It has to be the same as the "initial vector" we
6845 * used to encrypt that page.
6847 bzero(&decrypt_iv
.aes_iv
[0], sizeof (decrypt_iv
.aes_iv
));
6848 decrypt_iv
.vm
.pager_object
= page
->object
->pager
;
6849 decrypt_iv
.vm
.paging_offset
=
6850 page
->object
->paging_offset
+ page
->offset
;
6852 /* encrypt the "initial vector" */
6853 aes_encrypt_cbc((const unsigned char *) &decrypt_iv
.aes_iv
[0],
6856 &decrypt_iv
.aes_iv
[0],
6857 &swap_crypt_ctx
.encrypt
);
6862 aes_decrypt_cbc((const unsigned char *) kernel_vaddr
,
6863 &decrypt_iv
.aes_iv
[0],
6864 PAGE_SIZE
/ AES_BLOCK_SIZE
,
6865 (unsigned char *) kernel_vaddr
,
6866 &swap_crypt_ctx
.decrypt
);
6867 vm_page_decrypt_counter
++;
6870 * Unmap the page from the kernel's address space,
6871 * if we had to map it ourselves. Otherwise, let
6872 * the caller undo the mapping if needed.
6874 if (kernel_mapping_size
!= 0) {
6875 vm_paging_unmap_object(page
->object
,
6877 kernel_vaddr
+ PAGE_SIZE
);
6882 * The pager did not specify that the page would be
6883 * clean when it got paged in, so let's not clean it here
6888 * After decryption, the page is actually still clean.
6889 * It was encrypted as part of paging, which "cleans"
6890 * the "dirty" pages.
6891 * Noone could access it after it was encrypted
6892 * and the decryption doesn't count.
6894 page
->dirty
= FALSE
;
6895 assert (page
->cs_validated
== FALSE
);
6896 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
6898 page
->encrypted
= FALSE
;
6901 * We've just modified the page's contents via the data cache and part
6902 * of the new contents might still be in the cache and not yet in RAM.
6903 * Since the page is now available and might get gathered in a UPL to
6904 * be part of a DMA transfer from a driver that expects the memory to
6905 * be coherent at this point, we have to flush the data cache.
6907 pmap_sync_page_attributes_phys(page
->phys_page
);
6909 * Since the page is not mapped yet, some code might assume that it
6910 * doesn't need to invalidate the instruction cache when writing to
6911 * that page. That code relies on "pmapped" being FALSE, so that the
6912 * caches get synchronized when the page is first mapped.
6914 assert(pmap_verify_free(page
->phys_page
));
6915 page
->pmapped
= FALSE
;
6916 page
->wpmapped
= FALSE
;
6918 vm_object_paging_end(page
->object
);
6921 #if DEVELOPMENT || DEBUG
6922 unsigned long upl_encrypt_upls
= 0;
6923 unsigned long upl_encrypt_pages
= 0;
6930 * Encrypts all the pages in the UPL, within the specified range.
6936 upl_offset_t crypt_offset
,
6937 upl_size_t crypt_size
)
6939 upl_size_t upl_size
, subupl_size
=crypt_size
;
6940 upl_offset_t offset_in_upl
, subupl_offset
=crypt_offset
;
6941 vm_object_t upl_object
;
6942 vm_object_offset_t upl_offset
;
6944 vm_object_t shadow_object
;
6945 vm_object_offset_t shadow_offset
;
6946 vm_object_offset_t paging_offset
;
6947 vm_object_offset_t base_offset
;
6948 int isVectorUPL
= 0;
6949 upl_t vector_upl
= NULL
;
6951 if((isVectorUPL
= vector_upl_is_valid(upl
)))
6954 process_upl_to_encrypt
:
6956 crypt_size
= subupl_size
;
6957 crypt_offset
= subupl_offset
;
6958 upl
= vector_upl_subupl_byoffset(vector_upl
, &crypt_offset
, &crypt_size
);
6960 panic("upl_encrypt: Accessing a sub-upl that doesn't exist\n");
6961 subupl_size
-= crypt_size
;
6962 subupl_offset
+= crypt_size
;
6965 #if DEVELOPMENT || DEBUG
6967 upl_encrypt_pages
+= crypt_size
/ PAGE_SIZE
;
6969 upl_object
= upl
->map_object
;
6970 upl_offset
= upl
->offset
;
6971 upl_size
= upl
->size
;
6973 vm_object_lock(upl_object
);
6976 * Find the VM object that contains the actual pages.
6978 if (upl_object
->pageout
) {
6979 shadow_object
= upl_object
->shadow
;
6981 * The offset in the shadow object is actually also
6982 * accounted for in upl->offset. It possibly shouldn't be
6983 * this way, but for now don't account for it twice.
6986 assert(upl_object
->paging_offset
== 0); /* XXX ? */
6987 vm_object_lock(shadow_object
);
6989 shadow_object
= upl_object
;
6993 paging_offset
= shadow_object
->paging_offset
;
6994 vm_object_paging_begin(shadow_object
);
6996 if (shadow_object
!= upl_object
)
6997 vm_object_unlock(upl_object
);
7000 base_offset
= shadow_offset
;
7001 base_offset
+= upl_offset
;
7002 base_offset
+= crypt_offset
;
7003 base_offset
-= paging_offset
;
7005 assert(crypt_offset
+ crypt_size
<= upl_size
);
7007 for (offset_in_upl
= 0;
7008 offset_in_upl
< crypt_size
;
7009 offset_in_upl
+= PAGE_SIZE
) {
7010 page
= vm_page_lookup(shadow_object
,
7011 base_offset
+ offset_in_upl
);
7012 if (page
== VM_PAGE_NULL
) {
7013 panic("upl_encrypt: "
7014 "no page for (obj=%p,off=0x%llx+0x%x)!\n",
7020 * Disconnect the page from all pmaps, so that nobody can
7021 * access it while it's encrypted. After that point, all
7022 * accesses to this page will cause a page fault and block
7023 * while the page is busy being encrypted. After the
7024 * encryption completes, any access will cause a
7025 * page fault and the page gets decrypted at that time.
7027 pmap_disconnect(page
->phys_page
);
7028 vm_page_encrypt(page
, 0);
7030 if (vm_object_lock_avoid(shadow_object
)) {
7032 * Give vm_pageout_scan() a chance to convert more
7033 * pages from "clean-in-place" to "clean-and-free",
7034 * if it's interested in the same pages we selected
7037 vm_object_unlock(shadow_object
);
7039 vm_object_lock(shadow_object
);
7043 vm_object_paging_end(shadow_object
);
7044 vm_object_unlock(shadow_object
);
7046 if(isVectorUPL
&& subupl_size
)
7047 goto process_upl_to_encrypt
;
7054 __unused upl_offset_t crypt_offset
,
7055 __unused upl_size_t crypt_size
)
7061 __unused vm_page_t page
,
7062 __unused vm_map_offset_t kernel_mapping_offset
)
7068 __unused vm_page_t page
,
7069 __unused vm_map_offset_t kernel_mapping_offset
)
7076 vm_pageout_queue_steal(vm_page_t page
, boolean_t queues_locked
)
7080 pageout
= page
->pageout
;
7082 page
->list_req_pending
= FALSE
;
7083 page
->cleaning
= FALSE
;
7084 page
->pageout
= FALSE
;
7086 if (!queues_locked
) {
7087 vm_page_lockspin_queues();
7091 * need to drop the laundry count...
7092 * we may also need to remove it
7093 * from the I/O paging queue...
7094 * vm_pageout_throttle_up handles both cases
7096 * the laundry and pageout_queue flags are cleared...
7098 vm_pageout_throttle_up(page
);
7100 if (pageout
== TRUE
) {
7102 * toss the wire count we picked up
7103 * when we intially set this page up
7106 vm_page_unwire(page
, TRUE
);
7108 vm_page_steal_pageout_page
++;
7110 if (!queues_locked
) {
7111 vm_page_unlock_queues();
7116 vector_upl_create(vm_offset_t upl_offset
)
7118 int vector_upl_size
= sizeof(struct _vector_upl
);
7121 vector_upl_t vector_upl
= (vector_upl_t
)kalloc(vector_upl_size
);
7123 upl
= upl_create(0,UPL_VECTOR
,0);
7124 upl
->vector_upl
= vector_upl
;
7125 upl
->offset
= upl_offset
;
7126 vector_upl
->size
= 0;
7127 vector_upl
->offset
= upl_offset
;
7128 vector_upl
->invalid_upls
=0;
7129 vector_upl
->num_upls
=0;
7130 vector_upl
->pagelist
= NULL
;
7132 for(i
=0; i
< MAX_VECTOR_UPL_ELEMENTS
; i
++) {
7133 vector_upl
->upl_iostates
[i
].size
= 0;
7134 vector_upl
->upl_iostates
[i
].offset
= 0;
7141 vector_upl_deallocate(upl_t upl
)
7144 vector_upl_t vector_upl
= upl
->vector_upl
;
7146 if(vector_upl
->invalid_upls
!= vector_upl
->num_upls
)
7147 panic("Deallocating non-empty Vectored UPL\n");
7148 kfree(vector_upl
->pagelist
,(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)));
7149 vector_upl
->invalid_upls
=0;
7150 vector_upl
->num_upls
= 0;
7151 vector_upl
->pagelist
= NULL
;
7152 vector_upl
->size
= 0;
7153 vector_upl
->offset
= 0;
7154 kfree(vector_upl
, sizeof(struct _vector_upl
));
7155 vector_upl
= (vector_upl_t
)0xdeadbeef;
7158 panic("vector_upl_deallocate was passed a non-vectored upl\n");
7161 panic("vector_upl_deallocate was passed a NULL upl\n");
7165 vector_upl_is_valid(upl_t upl
)
7167 if(upl
&& ((upl
->flags
& UPL_VECTOR
)==UPL_VECTOR
)) {
7168 vector_upl_t vector_upl
= upl
->vector_upl
;
7169 if(vector_upl
== NULL
|| vector_upl
== (vector_upl_t
)0xdeadbeef || vector_upl
== (vector_upl_t
)0xfeedbeef)
7178 vector_upl_set_subupl(upl_t upl
,upl_t subupl
, uint32_t io_size
)
7180 if(vector_upl_is_valid(upl
)) {
7181 vector_upl_t vector_upl
= upl
->vector_upl
;
7186 if(io_size
< PAGE_SIZE
)
7187 io_size
= PAGE_SIZE
;
7188 subupl
->vector_upl
= (void*)vector_upl
;
7189 vector_upl
->upl_elems
[vector_upl
->num_upls
++] = subupl
;
7190 vector_upl
->size
+= io_size
;
7191 upl
->size
+= io_size
;
7194 uint32_t i
=0,invalid_upls
=0;
7195 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
7196 if(vector_upl
->upl_elems
[i
] == subupl
)
7199 if(i
== vector_upl
->num_upls
)
7200 panic("Trying to remove sub-upl when none exists");
7202 vector_upl
->upl_elems
[i
] = NULL
;
7203 invalid_upls
= hw_atomic_add(&(vector_upl
)->invalid_upls
, 1);
7204 if(invalid_upls
== vector_upl
->num_upls
)
7211 panic("vector_upl_set_subupl was passed a NULL upl element\n");
7214 panic("vector_upl_set_subupl was passed a non-vectored upl\n");
7217 panic("vector_upl_set_subupl was passed a NULL upl\n");
7223 vector_upl_set_pagelist(upl_t upl
)
7225 if(vector_upl_is_valid(upl
)) {
7227 vector_upl_t vector_upl
= upl
->vector_upl
;
7230 vm_offset_t pagelist_size
=0, cur_upl_pagelist_size
=0;
7232 vector_upl
->pagelist
= (upl_page_info_array_t
)kalloc(sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
));
7234 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
7235 cur_upl_pagelist_size
= sizeof(struct upl_page_info
) * vector_upl
->upl_elems
[i
]->size
/PAGE_SIZE
;
7236 bcopy(UPL_GET_INTERNAL_PAGE_LIST_SIMPLE(vector_upl
->upl_elems
[i
]), (char*)vector_upl
->pagelist
+ pagelist_size
, cur_upl_pagelist_size
);
7237 pagelist_size
+= cur_upl_pagelist_size
;
7238 if(vector_upl
->upl_elems
[i
]->highest_page
> upl
->highest_page
)
7239 upl
->highest_page
= vector_upl
->upl_elems
[i
]->highest_page
;
7241 assert( pagelist_size
== (sizeof(struct upl_page_info
)*(vector_upl
->size
/PAGE_SIZE
)) );
7244 panic("vector_upl_set_pagelist was passed a non-vectored upl\n");
7247 panic("vector_upl_set_pagelist was passed a NULL upl\n");
7252 vector_upl_subupl_byindex(upl_t upl
, uint32_t index
)
7254 if(vector_upl_is_valid(upl
)) {
7255 vector_upl_t vector_upl
= upl
->vector_upl
;
7257 if(index
< vector_upl
->num_upls
)
7258 return vector_upl
->upl_elems
[index
];
7261 panic("vector_upl_subupl_byindex was passed a non-vectored upl\n");
7267 vector_upl_subupl_byoffset(upl_t upl
, upl_offset_t
*upl_offset
, upl_size_t
*upl_size
)
7269 if(vector_upl_is_valid(upl
)) {
7271 vector_upl_t vector_upl
= upl
->vector_upl
;
7274 upl_t subupl
= NULL
;
7275 vector_upl_iostates_t subupl_state
;
7277 for(i
=0; i
< vector_upl
->num_upls
; i
++) {
7278 subupl
= vector_upl
->upl_elems
[i
];
7279 subupl_state
= vector_upl
->upl_iostates
[i
];
7280 if( *upl_offset
<= (subupl_state
.offset
+ subupl_state
.size
- 1)) {
7281 /* We could have been passed an offset/size pair that belongs
7282 * to an UPL element that has already been committed/aborted.
7283 * If so, return NULL.
7287 if((subupl_state
.offset
+ subupl_state
.size
) < (*upl_offset
+ *upl_size
)) {
7288 *upl_size
= (subupl_state
.offset
+ subupl_state
.size
) - *upl_offset
;
7289 if(*upl_size
> subupl_state
.size
)
7290 *upl_size
= subupl_state
.size
;
7292 if(*upl_offset
>= subupl_state
.offset
)
7293 *upl_offset
-= subupl_state
.offset
;
7295 panic("Vector UPL offset miscalculation\n");
7301 panic("vector_upl_subupl_byoffset was passed a non-vectored UPL\n");
7307 vector_upl_get_submap(upl_t upl
, vm_map_t
*v_upl_submap
, vm_offset_t
*submap_dst_addr
)
7309 *v_upl_submap
= NULL
;
7311 if(vector_upl_is_valid(upl
)) {
7312 vector_upl_t vector_upl
= upl
->vector_upl
;
7314 *v_upl_submap
= vector_upl
->submap
;
7315 *submap_dst_addr
= vector_upl
->submap_dst_addr
;
7318 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7321 panic("vector_upl_get_submap was passed a null UPL\n");
7325 vector_upl_set_submap(upl_t upl
, vm_map_t submap
, vm_offset_t submap_dst_addr
)
7327 if(vector_upl_is_valid(upl
)) {
7328 vector_upl_t vector_upl
= upl
->vector_upl
;
7330 vector_upl
->submap
= submap
;
7331 vector_upl
->submap_dst_addr
= submap_dst_addr
;
7334 panic("vector_upl_get_submap was passed a non-vectored UPL\n");
7337 panic("vector_upl_get_submap was passed a NULL UPL\n");
7341 vector_upl_set_iostate(upl_t upl
, upl_t subupl
, upl_offset_t offset
, upl_size_t size
)
7343 if(vector_upl_is_valid(upl
)) {
7345 vector_upl_t vector_upl
= upl
->vector_upl
;
7348 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
7349 if(vector_upl
->upl_elems
[i
] == subupl
)
7353 if(i
== vector_upl
->num_upls
)
7354 panic("setting sub-upl iostate when none exists");
7356 vector_upl
->upl_iostates
[i
].offset
= offset
;
7357 if(size
< PAGE_SIZE
)
7359 vector_upl
->upl_iostates
[i
].size
= size
;
7362 panic("vector_upl_set_iostate was passed a non-vectored UPL\n");
7365 panic("vector_upl_set_iostate was passed a NULL UPL\n");
7369 vector_upl_get_iostate(upl_t upl
, upl_t subupl
, upl_offset_t
*offset
, upl_size_t
*size
)
7371 if(vector_upl_is_valid(upl
)) {
7373 vector_upl_t vector_upl
= upl
->vector_upl
;
7376 for(i
= 0; i
< vector_upl
->num_upls
; i
++) {
7377 if(vector_upl
->upl_elems
[i
] == subupl
)
7381 if(i
== vector_upl
->num_upls
)
7382 panic("getting sub-upl iostate when none exists");
7384 *offset
= vector_upl
->upl_iostates
[i
].offset
;
7385 *size
= vector_upl
->upl_iostates
[i
].size
;
7388 panic("vector_upl_get_iostate was passed a non-vectored UPL\n");
7391 panic("vector_upl_get_iostate was passed a NULL UPL\n");
7395 vector_upl_get_iostate_byindex(upl_t upl
, uint32_t index
, upl_offset_t
*offset
, upl_size_t
*size
)
7397 if(vector_upl_is_valid(upl
)) {
7398 vector_upl_t vector_upl
= upl
->vector_upl
;
7400 if(index
< vector_upl
->num_upls
) {
7401 *offset
= vector_upl
->upl_iostates
[index
].offset
;
7402 *size
= vector_upl
->upl_iostates
[index
].size
;
7405 *offset
= *size
= 0;
7408 panic("vector_upl_get_iostate_byindex was passed a non-vectored UPL\n");
7411 panic("vector_upl_get_iostate_byindex was passed a NULL UPL\n");
7415 upl_get_internal_vectorupl_pagelist(upl_t upl
)
7417 return ((vector_upl_t
)(upl
->vector_upl
))->pagelist
;
7421 upl_get_internal_vectorupl(upl_t upl
)
7423 return upl
->vector_upl
;
7427 upl_get_internal_pagelist_offset(void)
7429 return sizeof(struct upl
);
7438 upl
->flags
|= UPL_CLEAR_DIRTY
;
7440 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
7451 upl
->ext_ref_count
++;
7453 if (!upl
->ext_ref_count
) {
7454 panic("upl_set_referenced not %p\n", upl
);
7456 upl
->ext_ref_count
--;
7462 vm_page_is_slideable(vm_page_t m
)
7464 boolean_t result
= FALSE
;
7465 vm_object_t slide_object
= slide_info
.slide_object
;
7466 mach_vm_offset_t start
= slide_info
.start
;
7467 mach_vm_offset_t end
= slide_info
.end
;
7469 /* make sure our page belongs to the one object allowed to do this */
7470 if (slide_object
== VM_OBJECT_NULL
) {
7474 /*Should we traverse down the chain?*/
7475 if (m
->object
!= slide_object
) {
7479 if(!m
->slid
&& (start
<= m
->offset
&& end
> m
->offset
)) {
7485 int vm_page_slide_counter
= 0;
7486 int vm_page_slide_errors
= 0;
7490 vm_map_offset_t kernel_mapping_offset
)
7493 vm_map_size_t kernel_mapping_size
;
7494 vm_offset_t kernel_vaddr
;
7495 uint32_t pageIndex
= 0;
7497 assert(!page
->slid
);
7500 * Take a paging-in-progress reference to keep the object
7501 * alive even if we have to unlock it (in vm_paging_map_object()
7504 vm_object_paging_begin(page
->object
);
7506 if (kernel_mapping_offset
== 0) {
7508 * The page hasn't already been mapped in kernel space
7509 * by the caller. Map it now, so that we can access
7510 * its contents and decrypt them.
7512 kernel_mapping_size
= PAGE_SIZE
;
7513 kr
= vm_paging_map_object(&kernel_mapping_offset
,
7517 &kernel_mapping_size
,
7518 VM_PROT_READ
| VM_PROT_WRITE
,
7520 if (kr
!= KERN_SUCCESS
) {
7521 panic("vm_page_slide: "
7522 "could not map page in kernel: 0x%x\n",
7526 kernel_mapping_size
= 0;
7528 kernel_vaddr
= CAST_DOWN(vm_offset_t
, kernel_mapping_offset
);
7531 * Slide the pointers on the page.
7534 /*assert that slide_file_info.start/end are page-aligned?*/
7536 pageIndex
= (uint32_t)((page
->offset
- slide_info
.start
)/PAGE_SIZE
);
7537 kr
= vm_shared_region_slide(kernel_vaddr
, pageIndex
);
7538 vm_page_slide_counter
++;
7541 * Unmap the page from the kernel's address space,
7543 if (kernel_mapping_size
!= 0) {
7544 vm_paging_unmap_object(page
->object
,
7546 kernel_vaddr
+ PAGE_SIZE
);
7549 page
->dirty
= FALSE
;
7550 pmap_clear_refmod(page
->phys_page
, VM_MEM_MODIFIED
| VM_MEM_REFERENCED
);
7552 if (kr
== KERN_SUCCESS
) {
7556 vm_page_slide_errors
++;
7559 vm_object_paging_end(page
->object
);
7567 boolean_t
upl_device_page(upl_page_info_t
*upl
)
7569 return(UPL_DEVICE_PAGE(upl
));
7571 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
7573 return(UPL_PAGE_PRESENT(upl
, index
));
7575 boolean_t
upl_speculative_page(upl_page_info_t
*upl
, int index
)
7577 return(UPL_SPECULATIVE_PAGE(upl
, index
));
7579 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
7581 return(UPL_DIRTY_PAGE(upl
, index
));
7583 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
7585 return(UPL_VALID_PAGE(upl
, index
));
7587 ppnum_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
7589 return(UPL_PHYS_PAGE(upl
, index
));
7594 vm_countdirtypages(void)
7606 vm_page_lock_queues();
7607 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
7609 if (m
==(vm_page_t
)0) break;
7611 if(m
->dirty
) dpages
++;
7612 if(m
->pageout
) pgopages
++;
7613 if(m
->precious
) precpages
++;
7615 assert(m
->object
!= kernel_object
);
7616 m
= (vm_page_t
) queue_next(&m
->pageq
);
7617 if (m
==(vm_page_t
)0) break;
7619 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
7620 vm_page_unlock_queues();
7622 vm_page_lock_queues();
7623 m
= (vm_page_t
) queue_first(&vm_page_queue_throttled
);
7625 if (m
==(vm_page_t
)0) break;
7629 assert(!m
->pageout
);
7630 assert(m
->object
!= kernel_object
);
7631 m
= (vm_page_t
) queue_next(&m
->pageq
);
7632 if (m
==(vm_page_t
)0) break;
7634 } while (!queue_end(&vm_page_queue_throttled
,(queue_entry_t
) m
));
7635 vm_page_unlock_queues();
7637 vm_page_lock_queues();
7638 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
7640 if (m
==(vm_page_t
)0) break;
7642 if(m
->dirty
) dpages
++;
7643 if(m
->pageout
) pgopages
++;
7644 if(m
->precious
) precpages
++;
7646 assert(m
->object
!= kernel_object
);
7647 m
= (vm_page_t
) queue_next(&m
->pageq
);
7648 if (m
==(vm_page_t
)0) break;
7650 } while (!queue_end(&vm_page_queue_zf
,(queue_entry_t
) m
));
7651 vm_page_unlock_queues();
7653 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
7659 vm_page_lock_queues();
7660 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
7663 if(m
== (vm_page_t
)0) break;
7664 if(m
->dirty
) dpages
++;
7665 if(m
->pageout
) pgopages
++;
7666 if(m
->precious
) precpages
++;
7668 assert(m
->object
!= kernel_object
);
7669 m
= (vm_page_t
) queue_next(&m
->pageq
);
7670 if(m
== (vm_page_t
)0) break;
7672 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
7673 vm_page_unlock_queues();
7675 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
7678 #endif /* MACH_BSD */
7680 ppnum_t
upl_get_highest_page(
7683 return upl
->highest_page
;
7686 upl_size_t
upl_get_size(
7693 kern_return_t
upl_ubc_alias_set(upl_t upl
, uintptr_t alias1
, uintptr_t alias2
)
7695 upl
->ubc_alias1
= alias1
;
7696 upl
->ubc_alias2
= alias2
;
7697 return KERN_SUCCESS
;
7699 int upl_ubc_alias_get(upl_t upl
, uintptr_t * al
, uintptr_t * al2
)
7702 *al
= upl
->ubc_alias1
;
7704 *al2
= upl
->ubc_alias2
;
7705 return KERN_SUCCESS
;
7707 #endif /* UPL_DEBUG */
7712 #include <ddb/db_output.h>
7713 #include <ddb/db_print.h>
7714 #include <vm/vm_print.h>
7716 #define printf kdbprintf
7717 void db_pageout(void);
7723 iprintf("VM Statistics:\n");
7725 iprintf("pages:\n");
7727 iprintf("activ %5d inact %5d free %5d",
7728 vm_page_active_count
, vm_page_inactive_count
,
7729 vm_page_free_count
);
7730 printf(" wire %5d gobbl %5d\n",
7731 vm_page_wire_count
, vm_page_gobble_count
);
7733 iprintf("target:\n");
7735 iprintf("min %5d inact %5d free %5d",
7736 vm_page_free_min
, vm_page_inactive_target
,
7737 vm_page_free_target
);
7738 printf(" resrv %5d\n", vm_page_free_reserved
);
7740 iprintf("pause:\n");
7746 extern int c_laundry_pages_freed
;
7747 #endif /* MACH_COUNTERS */
7752 iprintf("Pageout Statistics:\n");
7754 iprintf("active %5d inactv %5d\n",
7755 vm_pageout_active
, vm_pageout_inactive
);
7756 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
7757 vm_pageout_inactive_nolock
, vm_pageout_inactive_avoid
,
7758 vm_pageout_inactive_busy
, vm_pageout_inactive_absent
);
7759 iprintf("used %5d clean %5d dirty(internal) %5d dirty(external) %5d\n",
7760 vm_pageout_inactive_used
, vm_pageout_inactive_clean
,
7761 vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
);
7763 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed
);
7764 #endif /* MACH_COUNTERS */
7765 #if MACH_CLUSTER_STATS
7766 iprintf("Cluster Statistics:\n");
7768 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
7769 vm_pageout_cluster_dirtied
, vm_pageout_cluster_cleaned
,
7770 vm_pageout_cluster_collisions
);
7771 iprintf("clusters %5d conversions %5d\n",
7772 vm_pageout_cluster_clusters
, vm_pageout_cluster_conversions
);
7774 iprintf("Target Statistics:\n");
7776 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
7777 vm_pageout_target_collisions
, vm_pageout_target_page_dirtied
,
7778 vm_pageout_target_page_freed
);
7780 #endif /* MACH_CLUSTER_STATS */
7784 #endif /* MACH_KDB */