2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 * File: vm/vm_pageout.c
54 * Author: Avadis Tevanian, Jr., Michael Wayne Young
57 * The proverbial page-out daemon.
60 #include <mach_pagemap.h>
61 #include <mach_cluster_stats.h>
63 #include <advisory_pageout.h>
65 #include <mach/mach_types.h>
66 #include <mach/memory_object.h>
67 #include <mach/memory_object_default.h>
68 #include <mach/memory_object_control_server.h>
69 #include <mach/mach_host_server.h>
70 #include <mach/vm_param.h>
71 #include <mach/vm_statistics.h>
72 #include <kern/host_statistics.h>
73 #include <kern/counters.h>
74 #include <kern/thread.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_object.h>
79 #include <vm/vm_page.h>
80 #include <vm/vm_pageout.h>
81 #include <machine/vm_tuning.h>
82 #include <kern/misc_protos.h>
84 extern ipc_port_t memory_manager_default
;
86 #ifndef VM_PAGE_LAUNDRY_MAX
87 #define VM_PAGE_LAUNDRY_MAX 6 /* outstanding DMM page cleans */
88 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
90 #ifndef VM_PAGEOUT_BURST_MAX
91 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
92 #endif /* VM_PAGEOUT_BURST_MAX */
94 #ifndef VM_PAGEOUT_DISCARD_MAX
95 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
96 #endif /* VM_PAGEOUT_DISCARD_MAX */
98 #ifndef VM_PAGEOUT_BURST_WAIT
99 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
100 #endif /* VM_PAGEOUT_BURST_WAIT */
102 #ifndef VM_PAGEOUT_EMPTY_WAIT
103 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
104 #endif /* VM_PAGEOUT_EMPTY_WAIT */
107 * To obtain a reasonable LRU approximation, the inactive queue
108 * needs to be large enough to give pages on it a chance to be
109 * referenced a second time. This macro defines the fraction
110 * of active+inactive pages that should be inactive.
111 * The pageout daemon uses it to update vm_page_inactive_target.
113 * If vm_page_free_count falls below vm_page_free_target and
114 * vm_page_inactive_count is below vm_page_inactive_target,
115 * then the pageout daemon starts running.
118 #ifndef VM_PAGE_INACTIVE_TARGET
119 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
120 #endif /* VM_PAGE_INACTIVE_TARGET */
123 * Once the pageout daemon starts running, it keeps going
124 * until vm_page_free_count meets or exceeds vm_page_free_target.
127 #ifndef VM_PAGE_FREE_TARGET
128 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
129 #endif /* VM_PAGE_FREE_TARGET */
132 * The pageout daemon always starts running once vm_page_free_count
133 * falls below vm_page_free_min.
136 #ifndef VM_PAGE_FREE_MIN
137 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
138 #endif /* VM_PAGE_FREE_MIN */
141 * When vm_page_free_count falls below vm_page_free_reserved,
142 * only vm-privileged threads can allocate pages. vm-privilege
143 * allows the pageout daemon and default pager (and any other
144 * associated threads needed for default pageout) to continue
145 * operation by dipping into the reserved pool of pages.
148 #ifndef VM_PAGE_FREE_RESERVED
149 #define VM_PAGE_FREE_RESERVED \
150 ((16 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
151 #endif /* VM_PAGE_FREE_RESERVED */
154 * Exported variable used to broadcast the activation of the pageout scan
155 * Working Set uses this to throttle its use of pmap removes. In this
156 * way, code which runs within memory in an uncontested context does
157 * not keep encountering soft faults.
160 unsigned int vm_pageout_scan_event_counter
= 0;
163 * Forward declarations for internal routines.
165 extern void vm_pageout_continue(void);
166 extern void vm_pageout_scan(void);
167 extern void vm_pageout_throttle(vm_page_t m
);
168 extern vm_page_t
vm_pageout_cluster_page(
170 vm_object_offset_t offset
,
171 boolean_t precious_clean
);
173 unsigned int vm_pageout_reserved_internal
= 0;
174 unsigned int vm_pageout_reserved_really
= 0;
176 unsigned int vm_page_laundry_max
= 0; /* # of clusters outstanding */
177 unsigned int vm_page_laundry_min
= 0;
178 unsigned int vm_pageout_burst_max
= 0;
179 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds per page */
180 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
181 unsigned int vm_pageout_burst_min
= 0;
182 unsigned int vm_pageout_pause_count
= 0;
183 unsigned int vm_pageout_pause_max
= 0;
184 unsigned int vm_free_page_pause
= 100; /* milliseconds */
187 * Protection against zero fill flushing live working sets derived
188 * from existing backing store and files
190 unsigned int vm_accellerate_zf_pageout_trigger
= 400;
191 unsigned int vm_zf_iterator
;
192 unsigned int vm_zf_iterator_count
= 40;
193 unsigned int last_page_zf
;
194 unsigned int vm_zf_count
= 0;
197 * These variables record the pageout daemon's actions:
198 * how many pages it looks at and what happens to those pages.
199 * No locking needed because only one thread modifies the variables.
202 unsigned int vm_pageout_active
= 0; /* debugging */
203 unsigned int vm_pageout_inactive
= 0; /* debugging */
204 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
205 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
206 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
207 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
208 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
209 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
210 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
211 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
212 unsigned int vm_pageout_inactive_dirty
= 0; /* debugging */
213 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
214 unsigned int vm_stat_discard
= 0; /* debugging */
215 unsigned int vm_stat_discard_sent
= 0; /* debugging */
216 unsigned int vm_stat_discard_failure
= 0; /* debugging */
217 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
218 unsigned int vm_pageout_scan_active_emm_throttle
= 0; /* debugging */
219 unsigned int vm_pageout_scan_active_emm_throttle_success
= 0; /* debugging */
220 unsigned int vm_pageout_scan_active_emm_throttle_failure
= 0; /* debugging */
221 unsigned int vm_pageout_scan_inactive_emm_throttle
= 0; /* debugging */
222 unsigned int vm_pageout_scan_inactive_emm_throttle_success
= 0; /* debugging */
223 unsigned int vm_pageout_scan_inactive_emm_throttle_failure
= 0; /* debugging */
226 unsigned int vm_pageout_out_of_line
= 0;
227 unsigned int vm_pageout_in_place
= 0;
229 * Routine: vm_pageout_object_allocate
231 * Allocate an object for use as out-of-line memory in a
232 * data_return/data_initialize message.
233 * The page must be in an unlocked object.
235 * If the page belongs to a trusted pager, cleaning in place
236 * will be used, which utilizes a special "pageout object"
237 * containing private alias pages for the real page frames.
238 * Untrusted pagers use normal out-of-line memory.
241 vm_pageout_object_allocate(
244 vm_object_offset_t offset
)
246 vm_object_t object
= m
->object
;
247 vm_object_t new_object
;
249 assert(object
->pager_ready
);
251 if (object
->pager_trusted
|| object
->internal
)
252 vm_pageout_throttle(m
);
254 new_object
= vm_object_allocate(size
);
256 if (object
->pager_trusted
) {
257 assert (offset
< object
->size
);
259 vm_object_lock(new_object
);
260 new_object
->pageout
= TRUE
;
261 new_object
->shadow
= object
;
262 new_object
->can_persist
= FALSE
;
263 new_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
264 new_object
->shadow_offset
= offset
;
265 vm_object_unlock(new_object
);
268 * Take a paging reference on the object. This will be dropped
269 * in vm_pageout_object_terminate()
271 vm_object_lock(object
);
272 vm_object_paging_begin(object
);
273 vm_object_unlock(object
);
275 vm_pageout_in_place
++;
277 vm_pageout_out_of_line
++;
281 #if MACH_CLUSTER_STATS
282 unsigned long vm_pageout_cluster_dirtied
= 0;
283 unsigned long vm_pageout_cluster_cleaned
= 0;
284 unsigned long vm_pageout_cluster_collisions
= 0;
285 unsigned long vm_pageout_cluster_clusters
= 0;
286 unsigned long vm_pageout_cluster_conversions
= 0;
287 unsigned long vm_pageout_target_collisions
= 0;
288 unsigned long vm_pageout_target_page_dirtied
= 0;
289 unsigned long vm_pageout_target_page_freed
= 0;
290 #define CLUSTER_STAT(clause) clause
291 #else /* MACH_CLUSTER_STATS */
292 #define CLUSTER_STAT(clause)
293 #endif /* MACH_CLUSTER_STATS */
296 * Routine: vm_pageout_object_terminate
298 * Destroy the pageout_object allocated by
299 * vm_pageout_object_allocate(), and perform all of the
300 * required cleanup actions.
303 * The object must be locked, and will be returned locked.
306 vm_pageout_object_terminate(
309 vm_object_t shadow_object
;
312 * Deal with the deallocation (last reference) of a pageout object
313 * (used for cleaning-in-place) by dropping the paging references/
314 * freeing pages in the original object.
317 assert(object
->pageout
);
318 shadow_object
= object
->shadow
;
319 vm_object_lock(shadow_object
);
321 while (!queue_empty(&object
->memq
)) {
323 vm_object_offset_t offset
;
325 p
= (vm_page_t
) queue_first(&object
->memq
);
330 assert(!p
->cleaning
);
336 m
= vm_page_lookup(shadow_object
,
337 offset
+ object
->shadow_offset
);
339 if(m
== VM_PAGE_NULL
)
342 /* used as a trigger on upl_commit etc to recognize the */
343 /* pageout daemon's subseqent desire to pageout a cleaning */
344 /* page. When the bit is on the upl commit code will */
345 /* respect the pageout bit in the target page over the */
346 /* caller's page list indication */
347 m
->dump_cleaning
= FALSE
;
350 * Account for the paging reference taken when
351 * m->cleaning was set on this page.
353 vm_object_paging_end(shadow_object
);
354 assert((m
->dirty
) || (m
->precious
) ||
355 (m
->busy
&& m
->cleaning
));
358 * Handle the trusted pager throttle.
360 vm_page_lock_queues();
362 vm_page_laundry_count
--;
364 if (vm_page_laundry_count
< vm_page_laundry_min
) {
365 vm_page_laundry_min
= 0;
366 thread_wakeup((event_t
) &vm_page_laundry_count
);
371 * Handle the "target" page(s). These pages are to be freed if
372 * successfully cleaned. Target pages are always busy, and are
373 * wired exactly once. The initial target pages are not mapped,
374 * (so cannot be referenced or modified) but converted target
375 * pages may have been modified between the selection as an
376 * adjacent page and conversion to a target.
380 assert(m
->wire_count
== 1);
383 #if MACH_CLUSTER_STATS
384 if (m
->wanted
) vm_pageout_target_collisions
++;
387 * Revoke all access to the page. Since the object is
388 * locked, and the page is busy, this prevents the page
389 * from being dirtied after the pmap_is_modified() call
392 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
395 * Since the page is left "dirty" but "not modifed", we
396 * can detect whether the page was redirtied during
397 * pageout by checking the modify state.
399 m
->dirty
= pmap_is_modified(m
->phys_addr
);
402 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
403 vm_page_unwire(m
);/* reactivates */
404 VM_STAT(reactivations
++);
407 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
408 vm_page_free(m
);/* clears busy, etc. */
410 vm_page_unlock_queues();
414 * Handle the "adjacent" pages. These pages were cleaned in
415 * place, and should be left alone.
416 * If prep_pin_count is nonzero, then someone is using the
417 * page, so make it active.
419 if (!m
->active
&& !m
->inactive
&& !m
->private) {
423 vm_page_deactivate(m
);
425 if((m
->busy
) && (m
->cleaning
)) {
427 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
430 /* We do not re-set m->dirty ! */
431 /* The page was busy so no extraneous activity */
432 /* could have occured. COPY_INTO is a read into the */
433 /* new pages. CLEAN_IN_PLACE does actually write */
434 /* out the pages but handling outside of this code */
435 /* will take care of resetting dirty. We clear the */
436 /* modify however for the Programmed I/O case. */
437 pmap_clear_modify(m
->phys_addr
);
440 if(shadow_object
->absent_count
== 1)
441 vm_object_absent_release(shadow_object
);
443 shadow_object
->absent_count
--;
445 m
->overwriting
= FALSE
;
446 } else if (m
->overwriting
) {
447 /* alternate request page list, write to page_list */
448 /* case. Occurs when the original page was wired */
449 /* at the time of the list request */
450 assert(m
->wire_count
!= 0);
451 vm_page_unwire(m
);/* reactivates */
452 m
->overwriting
= FALSE
;
455 * Set the dirty state according to whether or not the page was
456 * modified during the pageout. Note that we purposefully do
457 * NOT call pmap_clear_modify since the page is still mapped.
458 * If the page were to be dirtied between the 2 calls, this
459 * this fact would be lost. This code is only necessary to
460 * maintain statistics, since the pmap module is always
461 * consulted if m->dirty is false.
463 #if MACH_CLUSTER_STATS
464 m
->dirty
= pmap_is_modified(m
->phys_addr
);
466 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
467 else vm_pageout_cluster_cleaned
++;
468 if (m
->wanted
) vm_pageout_cluster_collisions
++;
477 * Wakeup any thread waiting for the page to be un-cleaning.
480 vm_page_unlock_queues();
483 * Account for the paging reference taken in vm_paging_object_allocate.
485 vm_object_paging_end(shadow_object
);
486 vm_object_unlock(shadow_object
);
488 assert(object
->ref_count
== 0);
489 assert(object
->paging_in_progress
== 0);
490 assert(object
->resident_page_count
== 0);
495 * Routine: vm_pageout_setup
497 * Set up a page for pageout (clean & flush).
499 * Move the page to a new object, as part of which it will be
500 * sent to its memory manager in a memory_object_data_write or
501 * memory_object_initialize message.
503 * The "new_object" and "new_offset" arguments
504 * indicate where the page should be moved.
507 * The page in question must not be on any pageout queues,
508 * and must be busy. The object to which it belongs
509 * must be unlocked, and the caller must hold a paging
510 * reference to it. The new_object must not be locked.
512 * This routine returns a pointer to a place-holder page,
513 * inserted at the same offset, to block out-of-order
514 * requests for the page. The place-holder page must
515 * be freed after the data_write or initialize message
518 * The original page is put on a paging queue and marked
523 register vm_page_t m
,
524 register vm_object_t new_object
,
525 vm_object_offset_t new_offset
)
527 register vm_object_t old_object
= m
->object
;
528 vm_object_offset_t paging_offset
;
529 vm_object_offset_t offset
;
530 register vm_page_t holding_page
;
531 register vm_page_t new_m
;
532 register vm_page_t new_page
;
533 boolean_t need_to_wire
= FALSE
;
537 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
538 (integer_t
)m
->object
, (integer_t
)m
->offset
,
539 (integer_t
)m
, (integer_t
)new_object
,
540 (integer_t
)new_offset
);
541 assert(m
&& m
->busy
&& !m
->absent
&& !m
->fictitious
&& !m
->error
&&
544 assert(m
->dirty
|| m
->precious
);
547 * Create a place-holder page where the old one was, to prevent
548 * attempted pageins of this page while we're unlocked.
550 VM_PAGE_GRAB_FICTITIOUS(holding_page
);
552 vm_object_lock(old_object
);
555 paging_offset
= offset
+ old_object
->paging_offset
;
557 if (old_object
->pager_trusted
) {
559 * This pager is trusted, so we can clean this page
560 * in place. Leave it in the old object, and mark it
561 * cleaning & pageout.
563 new_m
= holding_page
;
564 holding_page
= VM_PAGE_NULL
;
567 * Set up new page to be private shadow of real page.
569 new_m
->phys_addr
= m
->phys_addr
;
570 new_m
->fictitious
= FALSE
;
571 new_m
->pageout
= TRUE
;
574 * Mark real page as cleaning (indicating that we hold a
575 * paging reference to be released via m_o_d_r_c) and
576 * pageout (indicating that the page should be freed
577 * when the pageout completes).
579 pmap_clear_modify(m
->phys_addr
);
580 vm_page_lock_queues();
581 new_m
->private = TRUE
;
587 assert(m
->wire_count
== 1);
588 vm_page_unlock_queues();
592 m
->page_lock
= VM_PROT_NONE
;
594 m
->unlock_request
= VM_PROT_NONE
;
597 * Cannot clean in place, so rip the old page out of the
598 * object, and stick the holding page in. Set new_m to the
599 * page in the new object.
601 vm_page_lock_queues();
602 VM_PAGE_QUEUES_REMOVE(m
);
605 vm_page_insert(holding_page
, old_object
, offset
);
606 vm_page_unlock_queues();
611 new_m
->page_lock
= VM_PROT_NONE
;
612 new_m
->unlock_request
= VM_PROT_NONE
;
614 if (old_object
->internal
)
618 * Record that this page has been written out
621 vm_external_state_set(old_object
->existence_map
, offset
);
622 #endif /* MACH_PAGEMAP */
624 vm_object_unlock(old_object
);
626 vm_object_lock(new_object
);
629 * Put the page into the new object. If it is a not wired
630 * (if it's the real page) it will be activated.
633 vm_page_lock_queues();
634 vm_page_insert(new_m
, new_object
, new_offset
);
638 vm_page_activate(new_m
);
639 PAGE_WAKEUP_DONE(new_m
);
640 vm_page_unlock_queues();
642 vm_object_unlock(new_object
);
645 * Return the placeholder page to simplify cleanup.
647 return (holding_page
);
651 * Routine: vm_pageclean_setup
653 * Purpose: setup a page to be cleaned (made non-dirty), but not
654 * necessarily flushed from the VM page cache.
655 * This is accomplished by cleaning in place.
657 * The page must not be busy, and the object and page
658 * queues must be locked.
665 vm_object_t new_object
,
666 vm_object_offset_t new_offset
)
668 vm_object_t old_object
= m
->object
;
670 assert(!m
->cleaning
);
673 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
674 (integer_t
)old_object
, m
->offset
, (integer_t
)m
,
675 (integer_t
)new_m
, new_offset
);
677 pmap_clear_modify(m
->phys_addr
);
678 vm_object_paging_begin(old_object
);
681 * Record that this page has been written out
684 vm_external_state_set(old_object
->existence_map
, m
->offset
);
685 #endif /*MACH_PAGEMAP*/
688 * Mark original page as cleaning in place.
695 * Convert the fictitious page to a private shadow of
698 assert(new_m
->fictitious
);
699 new_m
->fictitious
= FALSE
;
700 new_m
->private = TRUE
;
701 new_m
->pageout
= TRUE
;
702 new_m
->phys_addr
= m
->phys_addr
;
705 vm_page_insert(new_m
, new_object
, new_offset
);
706 assert(!new_m
->wanted
);
714 vm_object_t new_object
,
715 vm_object_offset_t new_offset
)
718 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
719 m
, new_m
, new_object
, new_offset
, 0);
721 assert((!m
->busy
) && (!m
->cleaning
));
723 assert(!new_m
->private && !new_m
->fictitious
);
725 pmap_clear_modify(m
->phys_addr
);
728 vm_object_paging_begin(m
->object
);
729 vm_page_unlock_queues();
730 vm_object_unlock(m
->object
);
733 * Copy the original page to the new page.
735 vm_page_copy(m
, new_m
);
738 * Mark the old page as clean. A request to pmap_is_modified
739 * will get the right answer.
741 vm_object_lock(m
->object
);
744 vm_object_paging_end(m
->object
);
746 vm_page_lock_queues();
747 if (!m
->active
&& !m
->inactive
)
751 vm_page_insert(new_m
, new_object
, new_offset
);
752 vm_page_activate(new_m
);
753 new_m
->busy
= FALSE
; /* No other thread can be waiting */
758 * Routine: vm_pageout_initialize_page
760 * Causes the specified page to be initialized in
761 * the appropriate memory object. This routine is used to push
762 * pages into a copy-object when they are modified in the
765 * The page is moved to a temporary object and paged out.
768 * The page in question must not be on any pageout queues.
769 * The object to which it belongs must be locked.
770 * The page must be busy, but not hold a paging reference.
773 * Move this page to a completely new object.
776 vm_pageout_initialize_page(
780 vm_object_t new_object
;
782 vm_object_offset_t paging_offset
;
783 vm_page_t holding_page
;
787 "vm_pageout_initialize_page, page 0x%X\n",
788 (integer_t
)m
, 0, 0, 0, 0);
792 * Verify that we really want to clean this page
799 * Create a paging reference to let us play with the object.
802 paging_offset
= m
->offset
+ object
->paging_offset
;
803 vm_object_paging_begin(object
);
804 vm_object_unlock(object
);
805 if (m
->absent
|| m
->error
|| m
->restart
||
806 (!m
->dirty
&& !m
->precious
)) {
808 panic("reservation without pageout?"); /* alan */
812 /* set the page for future call to vm_fault_list_request */
814 vm_object_lock(m
->object
);
815 vm_page_lock_queues();
816 pmap_clear_modify(m
->phys_addr
);
819 m
->list_req_pending
= TRUE
;
823 vm_page_unlock_queues();
824 vm_object_unlock(m
->object
);
825 vm_pageout_throttle(m
);
828 * Write the data to its pager.
829 * Note that the data is passed by naming the new object,
830 * not a virtual address; the pager interface has been
831 * manipulated to use the "internal memory" data type.
832 * [The object reference from its allocation is donated
833 * to the eventual recipient.]
835 memory_object_data_initialize(object
->pager
,
839 vm_object_lock(object
);
842 #if MACH_CLUSTER_STATS
843 #define MAXCLUSTERPAGES 16
845 unsigned long pages_in_cluster
;
846 unsigned long pages_at_higher_offsets
;
847 unsigned long pages_at_lower_offsets
;
848 } cluster_stats
[MAXCLUSTERPAGES
];
849 #endif /* MACH_CLUSTER_STATS */
851 boolean_t allow_clustered_pageouts
= FALSE
;
854 * vm_pageout_cluster:
856 * Given a page, page it out, and attempt to clean adjacent pages
857 * in the same operation.
859 * The page must be busy, and the object unlocked w/ paging reference
860 * to prevent deallocation or collapse. The page must not be on any
867 vm_object_t object
= m
->object
;
868 vm_object_offset_t offset
= m
->offset
; /* from vm_object start */
869 vm_object_offset_t paging_offset
= m
->offset
+ object
->paging_offset
;
870 vm_object_t new_object
;
871 vm_object_offset_t new_offset
;
872 vm_size_t cluster_size
;
873 vm_object_offset_t cluster_offset
; /* from memory_object start */
874 vm_object_offset_t cluster_lower_bound
; /* from vm_object_start */
875 vm_object_offset_t cluster_upper_bound
; /* from vm_object_start */
876 vm_object_offset_t cluster_start
, cluster_end
;/* from vm_object start */
877 vm_object_offset_t offset_within_cluster
;
878 vm_size_t length_of_data
;
879 vm_page_t
friend, holding_page
;
881 boolean_t precious_clean
= TRUE
;
882 int pages_in_cluster
;
884 CLUSTER_STAT(int pages_at_higher_offsets
= 0;)
885 CLUSTER_STAT(int pages_at_lower_offsets
= 0;)
888 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
889 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
891 CLUSTER_STAT(vm_pageout_cluster_clusters
++;)
893 * Only a certain kind of page is appreciated here.
895 assert(m
->busy
&& (m
->dirty
|| m
->precious
) && (m
->wire_count
== 0));
896 assert(!m
->cleaning
&& !m
->pageout
&& !m
->inactive
&& !m
->active
);
898 vm_object_lock(object
);
899 cluster_size
= object
->cluster_size
;
901 assert(cluster_size
>= PAGE_SIZE
);
902 if (cluster_size
< PAGE_SIZE
) cluster_size
= PAGE_SIZE
;
903 assert(object
->pager_created
&& object
->pager_initialized
);
904 assert(object
->internal
|| object
->pager_ready
);
906 if (m
->precious
&& !m
->dirty
)
907 precious_clean
= TRUE
;
909 if (!object
->pager_trusted
|| !allow_clustered_pageouts
)
910 cluster_size
= PAGE_SIZE
;
911 vm_object_unlock(object
);
913 cluster_offset
= paging_offset
& (vm_object_offset_t
)(cluster_size
- 1);
914 /* bytes from beginning of cluster */
916 * Due to unaligned mappings, we have to be careful
917 * of negative offsets into the VM object. Clip the cluster
918 * boundary to the VM object, not the memory object.
920 if (offset
> cluster_offset
) {
921 cluster_lower_bound
= offset
- cluster_offset
;
924 cluster_lower_bound
= 0;
926 cluster_upper_bound
= (offset
- cluster_offset
) +
927 (vm_object_offset_t
)cluster_size
;
929 /* set the page for future call to vm_fault_list_request */
931 vm_object_lock(m
->object
);
932 vm_page_lock_queues();
934 m
->list_req_pending
= TRUE
;
938 vm_page_unlock_queues();
939 vm_object_unlock(m
->object
);
940 vm_pageout_throttle(m
);
943 * Search backward for adjacent eligible pages to clean in
947 cluster_start
= offset
;
948 if (offset
) { /* avoid wrap-around at zero */
949 for (cluster_start
= offset
- PAGE_SIZE_64
;
950 cluster_start
>= cluster_lower_bound
;
951 cluster_start
-= PAGE_SIZE_64
) {
952 assert(cluster_size
> PAGE_SIZE
);
954 vm_object_lock(object
);
955 vm_page_lock_queues();
957 if ((friend = vm_pageout_cluster_page(object
, cluster_start
,
958 precious_clean
)) == VM_PAGE_NULL
) {
959 vm_page_unlock_queues();
960 vm_object_unlock(object
);
963 new_offset
= (cluster_start
+ object
->paging_offset
)
964 & (cluster_size
- 1);
966 assert(new_offset
< cluster_offset
);
967 m
->list_req_pending
= TRUE
;
969 /* do nothing except advance the write request, all we really need to */
970 /* do is push the target page and let the code at the other end decide */
971 /* what is really the right size */
972 if (vm_page_free_count
<= vm_page_free_reserved
) {
978 vm_page_unlock_queues();
979 vm_object_unlock(object
);
980 if(m
->dirty
|| m
->object
->internal
) {
981 CLUSTER_STAT(pages_at_lower_offsets
++;)
985 cluster_start
+= PAGE_SIZE_64
;
987 assert(cluster_start
>= cluster_lower_bound
);
988 assert(cluster_start
<= offset
);
990 * Search forward for adjacent eligible pages to clean in
993 for (cluster_end
= offset
+ PAGE_SIZE_64
;
994 cluster_end
< cluster_upper_bound
;
995 cluster_end
+= PAGE_SIZE_64
) {
996 assert(cluster_size
> PAGE_SIZE
);
998 vm_object_lock(object
);
999 vm_page_lock_queues();
1001 if ((friend = vm_pageout_cluster_page(object
, cluster_end
,
1002 precious_clean
)) == VM_PAGE_NULL
) {
1003 vm_page_unlock_queues();
1004 vm_object_unlock(object
);
1007 new_offset
= (cluster_end
+ object
->paging_offset
)
1008 & (cluster_size
- 1);
1010 assert(new_offset
< cluster_size
);
1011 m
->list_req_pending
= TRUE
;
1013 /* do nothing except advance the write request, all we really need to */
1014 /* do is push the target page and let the code at the other end decide */
1015 /* what is really the right size */
1016 if (vm_page_free_count
<= vm_page_free_reserved
) {
1022 vm_page_unlock_queues();
1023 vm_object_unlock(object
);
1025 if(m
->dirty
|| m
->object
->internal
) {
1026 CLUSTER_STAT(pages_at_higher_offsets
++;)
1029 assert(cluster_end
<= cluster_upper_bound
);
1030 assert(cluster_end
>= offset
+ PAGE_SIZE
);
1033 * (offset - cluster_offset) is beginning of cluster_object
1034 * relative to vm_object start.
1036 offset_within_cluster
= cluster_start
- (offset
- cluster_offset
);
1037 length_of_data
= cluster_end
- cluster_start
;
1039 assert(offset_within_cluster
< cluster_size
);
1040 assert((offset_within_cluster
+ length_of_data
) <= cluster_size
);
1043 assert(rc
== KERN_SUCCESS
);
1045 pages_in_cluster
= length_of_data
/PAGE_SIZE
;
1047 #if MACH_CLUSTER_STATS
1048 (cluster_stats
[pages_at_lower_offsets
].pages_at_lower_offsets
)++;
1049 (cluster_stats
[pages_at_higher_offsets
].pages_at_higher_offsets
)++;
1050 (cluster_stats
[pages_in_cluster
].pages_in_cluster
)++;
1051 #endif /* MACH_CLUSTER_STATS */
1054 * Send the data to the pager.
1056 paging_offset
= cluster_start
+ object
->paging_offset
;
1058 rc
= memory_object_data_return(object
->pager
,
1064 vm_object_lock(object
);
1065 vm_object_paging_end(object
);
1068 assert(!object
->pager_trusted
);
1069 VM_PAGE_FREE(holding_page
);
1070 vm_object_paging_end(object
);
1073 vm_object_unlock(object
);
1077 * Trusted pager throttle.
1078 * Object must be unlocked, page queues must be unlocked.
1081 vm_pageout_throttle(
1082 register vm_page_t m
)
1084 vm_page_lock_queues();
1085 assert(!m
->laundry
);
1087 while (vm_page_laundry_count
>= vm_page_laundry_max
) {
1089 * Set the threshold for when vm_page_free()
1090 * should wake us up.
1092 vm_page_laundry_min
= vm_page_laundry_max
/2;
1094 assert_wait((event_t
) &vm_page_laundry_count
, THREAD_UNINT
);
1095 vm_page_unlock_queues();
1098 * Pause to let the default pager catch up.
1100 thread_block((void (*)(void)) 0);
1101 vm_page_lock_queues();
1103 vm_page_laundry_count
++;
1104 vm_page_unlock_queues();
1108 * The global variable vm_pageout_clean_active_pages controls whether
1109 * active pages are considered valid to be cleaned in place during a
1110 * clustered pageout. Performance measurements are necessary to determine
1113 int vm_pageout_clean_active_pages
= 1;
1115 * vm_pageout_cluster_page: [Internal]
1117 * return a vm_page_t to the page at (object,offset) if it is appropriate
1118 * to clean in place. Pages that are non-existent, busy, absent, already
1119 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1120 * page in a cluster.
1122 * The object must be locked on entry, and remains locked throughout
1127 vm_pageout_cluster_page(
1129 vm_object_offset_t offset
,
1130 boolean_t precious_clean
)
1135 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1136 (integer_t
)object
, offset
, 0, 0, 0);
1138 if ((m
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
1139 return(VM_PAGE_NULL
);
1141 if (m
->busy
|| m
->absent
|| m
->cleaning
||
1142 (m
->wire_count
!= 0) || m
->error
)
1143 return(VM_PAGE_NULL
);
1145 if (vm_pageout_clean_active_pages
) {
1146 if (!m
->active
&& !m
->inactive
) return(VM_PAGE_NULL
);
1148 if (!m
->inactive
) return(VM_PAGE_NULL
);
1151 assert(!m
->private);
1152 assert(!m
->fictitious
);
1154 if (!m
->dirty
) m
->dirty
= pmap_is_modified(m
->phys_addr
);
1156 if (precious_clean
) {
1157 if (!m
->precious
|| !m
->dirty
)
1158 return(VM_PAGE_NULL
);
1161 return(VM_PAGE_NULL
);
1167 * vm_pageout_scan does the dirty work for the pageout daemon.
1168 * It returns with vm_page_queue_free_lock held and
1169 * vm_page_free_wanted == 0.
1171 extern void vm_pageout_scan_continue(void); /* forward; */
1174 vm_pageout_scan(void)
1176 unsigned int burst_count
;
1177 boolean_t now
= FALSE
;
1178 unsigned int laundry_pages
;
1179 boolean_t need_more_inactive_pages
;
1180 unsigned int loop_detect
;
1182 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1185 * We want to gradually dribble pages from the active queue
1186 * to the inactive queue. If we let the inactive queue get
1187 * very small, and then suddenly dump many pages into it,
1188 * those pages won't get a sufficient chance to be referenced
1189 * before we start taking them from the inactive queue.
1191 * We must limit the rate at which we send pages to the pagers.
1192 * data_write messages consume memory, for message buffers and
1193 * for map-copy objects. If we get too far ahead of the pagers,
1194 * we can potentially run out of memory.
1196 * We can use the laundry count to limit directly the number
1197 * of pages outstanding to the default pager. A similar
1198 * strategy for external pagers doesn't work, because
1199 * external pagers don't have to deallocate the pages sent them,
1200 * and because we might have to send pages to external pagers
1201 * even if they aren't processing writes. So we also
1202 * use a burst count to limit writes to external pagers.
1204 * When memory is very tight, we can't rely on external pagers to
1205 * clean pages. They probably aren't running, because they
1206 * aren't vm-privileged. If we kept sending dirty pages to them,
1207 * we could exhaust the free list. However, we can't just ignore
1208 * pages belonging to external objects, because there might be no
1209 * pages belonging to internal objects. Hence, we get the page
1210 * into an internal object and then immediately double-page it,
1211 * sending it to the default pager.
1213 * consider_zone_gc should be last, because the other operations
1214 * might return memory to zones.
1221 mutex_lock(&vm_page_queue_free_lock
);
1222 now
= (vm_page_free_count
< vm_page_free_min
);
1223 mutex_unlock(&vm_page_queue_free_lock
);
1225 swapout_threads(now
);
1226 #endif /* THREAD_SWAPPER */
1229 consider_task_collect();
1230 consider_thread_collect();
1232 consider_machine_collect();
1234 loop_detect
= vm_page_active_count
+ vm_page_inactive_count
;
1236 if (vm_page_free_count
<= vm_page_free_reserved
) {
1237 need_more_inactive_pages
= TRUE
;
1239 need_more_inactive_pages
= FALSE
;
1242 need_more_inactive_pages
= FALSE
;
1245 for (burst_count
= 0;;) {
1246 register vm_page_t m
;
1247 register vm_object_t object
;
1250 * Recalculate vm_page_inactivate_target.
1253 vm_page_lock_queues();
1254 vm_page_inactive_target
=
1255 VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1256 vm_page_inactive_count
);
1259 * Move pages from active to inactive.
1262 while ((vm_page_inactive_count
< vm_page_inactive_target
||
1263 need_more_inactive_pages
) &&
1264 !queue_empty(&vm_page_queue_active
)) {
1265 register vm_object_t object
;
1267 vm_pageout_active
++;
1268 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1271 * If we're getting really low on memory,
1272 * try selecting a page that will go
1273 * directly to the default_pager.
1274 * If there are no such pages, we have to
1275 * page out a page backed by an EMM,
1276 * so that the default_pager can recover
1279 if (need_more_inactive_pages
&&
1280 (IP_VALID(memory_manager_default
))) {
1281 vm_pageout_scan_active_emm_throttle
++;
1283 assert(m
->active
&& !m
->inactive
);
1286 if (vm_object_lock_try(object
)) {
1288 if (object
->pager_trusted
||
1291 vm_pageout_scan_active_emm_throttle_success
++;
1292 goto object_locked_active
;
1295 vm_pageout_scan_active_emm_throttle_success
++;
1296 goto object_locked_active
;
1298 vm_object_unlock(object
);
1300 m
= (vm_page_t
) queue_next(&m
->pageq
);
1301 } while (!queue_end(&vm_page_queue_active
,
1302 (queue_entry_t
) m
));
1303 if (queue_end(&vm_page_queue_active
,
1304 (queue_entry_t
) m
)) {
1305 vm_pageout_scan_active_emm_throttle_failure
++;
1307 queue_first(&vm_page_queue_active
);
1311 assert(m
->active
&& !m
->inactive
);
1314 if (!vm_object_lock_try(object
)) {
1316 * Move page to end and continue.
1319 queue_remove(&vm_page_queue_active
, m
,
1321 queue_enter(&vm_page_queue_active
, m
,
1323 vm_page_unlock_queues();
1326 vm_page_lock_queues();
1330 object_locked_active
:
1332 * If the page is busy, then we pull it
1333 * off the active queue and leave it alone.
1337 vm_object_unlock(object
);
1338 queue_remove(&vm_page_queue_active
, m
,
1342 vm_page_active_count
--;
1347 * Deactivate the page while holding the object
1348 * locked, so we know the page is still not busy.
1349 * This should prevent races between pmap_enter
1350 * and pmap_clear_reference. The page might be
1351 * absent or fictitious, but vm_page_deactivate
1355 vm_page_deactivate(m
);
1356 vm_object_unlock(object
);
1360 * We are done if we have met our target *and*
1361 * nobody is still waiting for a page.
1363 if (vm_page_free_count
>= vm_page_free_target
) {
1364 mutex_lock(&vm_page_queue_free_lock
);
1365 if ((vm_page_free_count
>= vm_page_free_target
) &&
1366 (vm_page_free_wanted
== 0)) {
1367 vm_page_unlock_queues();
1370 mutex_unlock(&vm_page_queue_free_lock
);
1373 * Sometimes we have to pause:
1374 * 1) No inactive pages - nothing to do.
1375 * 2) Flow control - wait for untrusted pagers to catch up.
1378 if ((queue_empty(&vm_page_queue_inactive
) &&
1379 (queue_empty(&vm_page_queue_zf
))) ||
1380 ((--loop_detect
) == 0) ||
1381 (burst_count
>= vm_pageout_burst_max
)) {
1382 unsigned int pages
, msecs
;
1385 consider_machine_adjust();
1387 * vm_pageout_burst_wait is msecs/page.
1388 * If there is nothing for us to do, we wait
1389 * at least vm_pageout_empty_wait msecs.
1391 pages
= burst_count
;
1393 if (loop_detect
== 0) {
1394 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1395 msecs
= vm_free_page_pause
;
1398 msecs
= burst_count
* vm_pageout_burst_wait
;
1401 if (queue_empty(&vm_page_queue_inactive
) &&
1402 queue_empty(&vm_page_queue_zf
) &&
1403 (msecs
< vm_pageout_empty_wait
))
1404 msecs
= vm_pageout_empty_wait
;
1405 vm_page_unlock_queues();
1407 assert_wait_timeout(msecs
, THREAD_INTERRUPTIBLE
);
1408 counter(c_vm_pageout_scan_block
++);
1411 * Unfortunately, we don't have call_continuation
1412 * so we can't rely on tail-recursion.
1414 wait_result
= thread_block((void (*)(void)) 0);
1415 if (wait_result
!= THREAD_TIMED_OUT
)
1416 thread_cancel_timer();
1417 vm_pageout_scan_continue();
1423 vm_pageout_inactive
++;
1425 if (vm_zf_count
< vm_accellerate_zf_pageout_trigger
) {
1429 if((vm_zf_iterator
+=1) >= vm_zf_iterator_count
) {
1433 if(queue_empty(&vm_page_queue_zf
) ||
1434 (((last_page_zf
) || (vm_zf_iterator
== 0)) &&
1435 !queue_empty(&vm_page_queue_inactive
))) {
1436 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1439 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
1443 if ((vm_page_free_count
<= vm_page_free_reserved
) &&
1444 (IP_VALID(memory_manager_default
))) {
1446 * We're really low on memory. Try to select a page that
1447 * would go directly to the default_pager.
1448 * If there are no such pages, we have to page out a
1449 * page backed by an EMM, so that the default_pager
1450 * can recover it eventually.
1452 vm_pageout_scan_inactive_emm_throttle
++;
1454 assert(!m
->active
&& m
->inactive
);
1457 if (vm_object_lock_try(object
)) {
1459 if (object
->pager_trusted
||
1462 vm_pageout_scan_inactive_emm_throttle_success
++;
1463 goto object_locked_inactive
;
1466 vm_pageout_scan_inactive_emm_throttle_success
++;
1467 goto object_locked_inactive
;
1469 vm_object_unlock(object
);
1471 m
= (vm_page_t
) queue_next(&m
->pageq
);
1472 } while ((!queue_end(&vm_page_queue_zf
,
1474 && (!queue_end(&vm_page_queue_inactive
,
1475 (queue_entry_t
) m
)));
1477 if ((queue_end(&vm_page_queue_zf
,
1479 || (queue_end(&vm_page_queue_inactive
,
1480 (queue_entry_t
) m
))) {
1481 vm_pageout_scan_inactive_emm_throttle_failure
++;
1483 * We should check the "active" queue
1484 * for good candidates to page out.
1486 need_more_inactive_pages
= TRUE
;
1488 if(last_page_zf
== 0) {
1490 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1493 vm_zf_iterator
= vm_zf_iterator_count
- 2;
1495 vm_page_unlock_queues();
1500 assert(!m
->active
&& m
->inactive
);
1504 * Try to lock object; since we've got the
1505 * page queues lock, we can only try for this one.
1508 if (!vm_object_lock_try(object
)) {
1510 * Move page to end and continue.
1511 * Don't re-issue ticket
1514 queue_remove(&vm_page_queue_zf
, m
,
1516 queue_enter(&vm_page_queue_zf
, m
,
1519 queue_remove(&vm_page_queue_inactive
, m
,
1521 queue_enter(&vm_page_queue_inactive
, m
,
1524 vm_page_unlock_queues();
1527 vm_pageout_inactive_nolock
++;
1531 object_locked_inactive
:
1533 * Paging out pages of objects which pager is being
1534 * created by another thread must be avoided, because
1535 * this thread may claim for memory, thus leading to a
1536 * possible dead lock between it and the pageout thread
1537 * which will wait for pager creation, if such pages are
1538 * finally chosen. The remaining assumption is that there
1539 * will finally be enough available pages in the inactive
1540 * pool to page out in order to satisfy all memory claimed
1541 * by the thread which concurrently creates the pager.
1544 if (!object
->pager_initialized
&& object
->pager_created
) {
1546 * Move page to end and continue, hoping that
1547 * there will be enough other inactive pages to
1548 * page out so that the thread which currently
1549 * initializes the pager will succeed.
1550 * Don't re-grant the ticket, the page should
1551 * pulled from the queue and paged out whenever
1552 * one of its logically adjacent fellows is
1556 queue_remove(&vm_page_queue_zf
, m
,
1558 queue_enter(&vm_page_queue_zf
, m
,
1561 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1563 queue_remove(&vm_page_queue_inactive
, m
,
1565 queue_enter(&vm_page_queue_inactive
, m
,
1570 vm_page_unlock_queues();
1571 vm_object_unlock(object
);
1572 vm_pageout_inactive_avoid
++;
1577 * Remove the page from the inactive list.
1581 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1583 queue_remove(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1585 m
->inactive
= FALSE
;
1587 vm_page_inactive_count
--;
1589 if (m
->busy
|| !object
->alive
) {
1591 * Somebody is already playing with this page.
1592 * Leave it off the pageout queues.
1595 vm_page_unlock_queues();
1596 vm_object_unlock(object
);
1597 vm_pageout_inactive_busy
++;
1602 * If it's absent or in error, we can reclaim the page.
1605 if (m
->absent
|| m
->error
) {
1606 vm_pageout_inactive_absent
++;
1609 vm_page_unlock_queues();
1610 vm_object_unlock(object
);
1614 assert(!m
->private);
1615 assert(!m
->fictitious
);
1618 * If already cleaning this page in place, convert from
1619 * "adjacent" to "target". We can leave the page mapped,
1620 * and vm_pageout_object_terminate will determine whether
1621 * to free or reactivate.
1625 #if MACH_CLUSTER_STATS
1626 vm_pageout_cluster_conversions
++;
1630 m
->dump_cleaning
= TRUE
;
1632 vm_object_unlock(object
);
1633 vm_page_unlock_queues();
1638 * If it's being used, reactivate.
1639 * (Fictitious pages are either busy or absent.)
1642 if (m
->reference
|| pmap_is_referenced(m
->phys_addr
)) {
1643 vm_pageout_inactive_used
++;
1645 #if ADVISORY_PAGEOUT
1646 if (m
->discard_request
) {
1647 m
->discard_request
= FALSE
;
1649 #endif /* ADVISORY_PAGEOUT */
1651 vm_object_unlock(object
);
1652 vm_page_activate(m
);
1653 VM_STAT(reactivations
++);
1654 vm_page_unlock_queues();
1658 #if ADVISORY_PAGEOUT
1659 if (object
->advisory_pageout
) {
1660 boolean_t do_throttle
;
1661 memory_object_t pager
;
1662 vm_object_offset_t discard_offset
;
1664 if (m
->discard_request
) {
1665 vm_stat_discard_failure
++;
1666 goto mandatory_pageout
;
1669 assert(object
->pager_initialized
);
1670 m
->discard_request
= TRUE
;
1671 pager
= object
->pager
;
1673 /* system-wide throttle */
1674 do_throttle
= (vm_page_free_count
<=
1675 vm_page_free_reserved
);
1679 * JMM - Do we need a replacement throttle
1680 * mechanism for pagers?
1683 /* throttle on this pager */
1684 /* XXX lock ordering ? */
1686 do_throttle
= imq_full(&port
->ip_messages
);
1692 vm_stat_discard_throttle
++;
1694 /* ignore this page and skip to next */
1695 vm_page_unlock_queues();
1696 vm_object_unlock(object
);
1699 /* force mandatory pageout */
1700 goto mandatory_pageout
;
1704 /* proceed with discard_request */
1705 vm_page_activate(m
);
1707 VM_STAT(reactivations
++);
1708 discard_offset
= m
->offset
+ object
->paging_offset
;
1709 vm_stat_discard_sent
++;
1710 vm_page_unlock_queues();
1711 vm_object_unlock(object
);
1714 memory_object_discard_request(object->pager,
1721 #endif /* ADVISORY_PAGEOUT */
1724 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1725 (integer_t
)object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1728 * Eliminate all mappings.
1732 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1735 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1737 * If it's clean and not precious, we can free the page.
1740 if (!m
->dirty
&& !m
->precious
) {
1741 vm_pageout_inactive_clean
++;
1744 vm_page_unlock_queues();
1747 * If there is no memory object for the page, create
1748 * one and hand it to the default pager.
1751 if (!object
->pager_initialized
)
1752 vm_object_collapse(object
);
1753 if (!object
->pager_initialized
)
1754 vm_object_pager_create(object
);
1755 if (!object
->pager_initialized
) {
1757 * Still no pager for the object.
1758 * Reactivate the page.
1760 * Should only happen if there is no
1763 vm_page_lock_queues();
1764 vm_page_activate(m
);
1765 vm_page_unlock_queues();
1768 * And we are done with it.
1770 PAGE_WAKEUP_DONE(m
);
1771 vm_object_unlock(object
);
1774 * break here to get back to the preemption
1775 * point in the outer loop so that we don't
1776 * spin forever if there is no default pager.
1778 vm_pageout_dirty_no_pager
++;
1780 * Well there's no pager, but we can still reclaim
1781 * free pages out of the inactive list. Go back
1782 * to top of loop and look for suitable pages.
1787 if ((object
->pager_initialized
) &&
1788 (object
->pager
== MEMORY_OBJECT_NULL
)) {
1790 * This pager has been destroyed by either
1791 * memory_object_destroy or vm_object_destroy, and
1792 * so there is nowhere for the page to go.
1793 * Just free the page.
1796 vm_object_unlock(object
);
1800 vm_pageout_inactive_dirty
++;
1802 if (!object->internal)
1805 vm_object_paging_begin(object
);
1806 vm_object_unlock(object
);
1807 vm_pageout_cluster(m
); /* flush it */
1809 consider_machine_adjust();
1812 counter(unsigned int c_vm_pageout_scan_continue
= 0;)
1815 vm_pageout_scan_continue(void)
1818 * We just paused to let the pagers catch up.
1819 * If vm_page_laundry_count is still high,
1820 * then we aren't waiting long enough.
1821 * If we have paused some vm_pageout_pause_max times without
1822 * adjusting vm_pageout_burst_wait, it might be too big,
1823 * so we decrease it.
1826 vm_page_lock_queues();
1827 counter(++c_vm_pageout_scan_continue
);
1828 if (vm_page_laundry_count
> vm_pageout_burst_min
) {
1829 vm_pageout_burst_wait
++;
1830 vm_pageout_pause_count
= 0;
1831 } else if (++vm_pageout_pause_count
> vm_pageout_pause_max
) {
1832 vm_pageout_burst_wait
= (vm_pageout_burst_wait
* 3) / 4;
1833 if (vm_pageout_burst_wait
< 1)
1834 vm_pageout_burst_wait
= 1;
1835 vm_pageout_pause_count
= 0;
1837 vm_page_unlock_queues();
1840 void vm_page_free_reserve(int pages
);
1841 int vm_page_free_count_init
;
1844 vm_page_free_reserve(
1847 int free_after_reserve
;
1849 vm_page_free_reserved
+= pages
;
1851 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
1853 vm_page_free_min
= vm_page_free_reserved
+
1854 VM_PAGE_FREE_MIN(free_after_reserve
);
1856 vm_page_free_target
= vm_page_free_reserved
+
1857 VM_PAGE_FREE_TARGET(free_after_reserve
);
1859 if (vm_page_free_target
< vm_page_free_min
+ 5)
1860 vm_page_free_target
= vm_page_free_min
+ 5;
1864 * vm_pageout is the high level pageout daemon.
1871 thread_t self
= current_thread();
1875 * Set thread privileges.
1877 self
->vm_privilege
= TRUE
;
1878 stack_privilege(self
);
1882 self
->priority
= BASEPRI_PREEMPT
- 1;
1883 set_sched_pri(self
, self
->priority
);
1884 thread_unlock(self
);
1888 * Initialize some paging parameters.
1891 if (vm_page_laundry_max
== 0)
1892 vm_page_laundry_max
= VM_PAGE_LAUNDRY_MAX
;
1894 if (vm_pageout_burst_max
== 0)
1895 vm_pageout_burst_max
= VM_PAGEOUT_BURST_MAX
;
1897 if (vm_pageout_burst_wait
== 0)
1898 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
1900 if (vm_pageout_empty_wait
== 0)
1901 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
1903 vm_page_free_count_init
= vm_page_free_count
;
1906 * even if we've already called vm_page_free_reserve
1907 * call it again here to insure that the targets are
1908 * accurately calculated (it uses vm_page_free_count_init)
1909 * calling it with an arg of 0 will not change the reserve
1910 * but will re-calculate free_min and free_target
1912 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED
)
1913 vm_page_free_reserve(VM_PAGE_FREE_RESERVED
- vm_page_free_reserved
);
1915 vm_page_free_reserve(0);
1918 * vm_pageout_scan will set vm_page_inactive_target.
1920 * The pageout daemon is never done, so loop forever.
1921 * We should call vm_pageout_scan at least once each
1922 * time we are woken, even if vm_page_free_wanted is
1923 * zero, to check vm_page_free_target and
1924 * vm_page_inactive_target.
1927 vm_pageout_scan_event_counter
++;
1929 /* we hold vm_page_queue_free_lock now */
1930 assert(vm_page_free_wanted
== 0);
1931 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
1932 mutex_unlock(&vm_page_queue_free_lock
);
1933 counter(c_vm_pageout_block
++);
1934 thread_block((void (*)(void)) 0);
1940 vm_pageout_emergency_availability_request()
1945 vm_page_lock_queues();
1946 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1948 while (!queue_end(&vm_page_queue_inactive
, (queue_entry_t
) m
)) {
1950 m
= (vm_page_t
) queue_next(&m
->pageq
);
1954 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1955 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1956 || m
->precious
|| m
->cleaning
1957 || m
->dump_cleaning
|| m
->error
1958 || m
->pageout
|| m
->laundry
1959 || m
->list_req_pending
1960 || m
->overwriting
) {
1961 m
= (vm_page_t
) queue_next(&m
->pageq
);
1966 if (vm_object_lock_try(object
)) {
1967 if((!object
->alive
) ||
1968 (object
->pageout
)) {
1969 vm_object_unlock(object
);
1970 m
= (vm_page_t
) queue_next(&m
->pageq
);
1974 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1976 vm_object_unlock(object
);
1977 vm_page_unlock_queues();
1978 return KERN_SUCCESS
;
1980 m
= (vm_page_t
) queue_next(&m
->pageq
);
1983 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1985 while (!queue_end(&vm_page_queue_active
, (queue_entry_t
) m
)) {
1987 m
= (vm_page_t
) queue_next(&m
->pageq
);
1991 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1992 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1993 || m
->precious
|| m
->cleaning
1994 || m
->dump_cleaning
|| m
->error
1995 || m
->pageout
|| m
->laundry
1996 || m
->list_req_pending
1997 || m
->overwriting
) {
1998 m
= (vm_page_t
) queue_next(&m
->pageq
);
2003 if (vm_object_lock_try(object
)) {
2004 if((!object
->alive
) ||
2005 (object
->pageout
)) {
2006 vm_object_unlock(object
);
2007 m
= (vm_page_t
) queue_next(&m
->pageq
);
2011 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2013 vm_object_unlock(object
);
2014 vm_page_unlock_queues();
2015 return KERN_SUCCESS
;
2017 m
= (vm_page_t
) queue_next(&m
->pageq
);
2019 vm_page_unlock_queues();
2020 return KERN_FAILURE
;
2032 upl
= (upl_t
)kalloc(sizeof(struct upl
)
2033 + (sizeof(struct upl_page_info
)*(size
/page_size
)));
2035 upl
= (upl_t
)kalloc(sizeof(struct upl
));
2038 upl
->src_object
= NULL
;
2039 upl
->kaddr
= (vm_offset_t
)0;
2041 upl
->map_object
= NULL
;
2045 upl
->ubc_alias1
= 0;
2046 upl
->ubc_alias2
= 0;
2047 #endif /* UBC_DEBUG */
2059 vm_object_lock(upl
->map_object
->shadow
);
2060 queue_iterate(&upl
->map_object
->shadow
->uplq
,
2061 upl_ele
, upl_t
, uplq
) {
2062 if(upl_ele
== upl
) {
2063 queue_remove(&upl
->map_object
->shadow
->uplq
,
2064 upl_ele
, upl_t
, uplq
);
2068 vm_object_unlock(upl
->map_object
->shadow
);
2070 #endif /* UBC_DEBUG */
2072 if(!(upl
->flags
& UPL_DEVICE_MEMORY
))
2074 vm_object_deallocate(upl
->map_object
);
2075 if(upl
->flags
& UPL_INTERNAL
) {
2076 kfree((vm_offset_t
)upl
,
2077 sizeof(struct upl
) +
2078 (sizeof(struct upl_page_info
) * (upl
->size
/page_size
)));
2080 kfree((vm_offset_t
)upl
, sizeof(struct upl
));
2084 __private_extern__
void
2088 upl
->ref_count
-= 1;
2089 if(upl
->ref_count
== 0) {
2099 upl
->ref_count
-= 1;
2100 if(upl
->ref_count
== 0) {
2106 * Routine: vm_object_upl_request
2108 * Cause the population of a portion of a vm_object.
2109 * Depending on the nature of the request, the pages
2110 * returned may be contain valid data or be uninitialized.
2111 * A page list structure, listing the physical pages
2112 * will be returned upon request.
2113 * This function is called by the file system or any other
2114 * supplier of backing store to a pager.
2115 * IMPORTANT NOTE: The caller must still respect the relationship
2116 * between the vm_object and its backing memory object. The
2117 * caller MUST NOT substitute changes in the backing file
2118 * without first doing a memory_object_lock_request on the
2119 * target range unless it is know that the pages are not
2120 * shared with another entity at the pager level.
2122 * if a page list structure is present
2123 * return the mapped physical pages, where a
2124 * page is not present, return a non-initialized
2125 * one. If the no_sync bit is turned on, don't
2126 * call the pager unlock to synchronize with other
2127 * possible copies of the page. Leave pages busy
2128 * in the original object, if a page list structure
2129 * was specified. When a commit of the page list
2130 * pages is done, the dirty bit will be set for each one.
2132 * If a page list structure is present, return
2133 * all mapped pages. Where a page does not exist
2134 * map a zero filled one. Leave pages busy in
2135 * the original object. If a page list structure
2136 * is not specified, this call is a no-op.
2138 * Note: access of default pager objects has a rather interesting
2139 * twist. The caller of this routine, presumably the file system
2140 * page cache handling code, will never actually make a request
2141 * against a default pager backed object. Only the default
2142 * pager will make requests on backing store related vm_objects
2143 * In this way the default pager can maintain the relationship
2144 * between backing store files (abstract memory objects) and
2145 * the vm_objects (cache objects), they support.
2148 __private_extern__ kern_return_t
2149 vm_object_upl_request(
2151 vm_object_offset_t offset
,
2154 upl_page_info_array_t user_page_list
,
2155 unsigned int *page_list_count
,
2159 vm_object_offset_t dst_offset
= offset
;
2160 vm_size_t xfer_size
= size
;
2161 boolean_t do_m_lock
= FALSE
;
2165 boolean_t encountered_lrp
= FALSE
;
2167 vm_page_t alias_page
= NULL
;
2171 page_ticket
= (cntrl_flags
& UPL_PAGE_TICKET_MASK
)
2172 >> UPL_PAGE_TICKET_SHIFT
;
2174 if(((size
/page_size
) > MAX_UPL_TRANSFER
) && !object
->phys_contiguous
) {
2175 size
= MAX_UPL_TRANSFER
* page_size
;
2178 if(cntrl_flags
& UPL_SET_INTERNAL
)
2179 if(page_list_count
!= NULL
)
2180 *page_list_count
= MAX_UPL_TRANSFER
;
2181 if(((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
2182 ((page_list_count
!= NULL
) && (*page_list_count
!= 0)
2183 && *page_list_count
< (size
/page_size
)))
2184 return KERN_INVALID_ARGUMENT
;
2186 if((!object
->internal
) && (object
->paging_offset
!= 0))
2187 panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
2189 if((cntrl_flags
& UPL_COPYOUT_FROM
) && (upl_ptr
== NULL
)) {
2190 return KERN_SUCCESS
;
2193 if(cntrl_flags
& UPL_SET_INTERNAL
) {
2194 upl
= upl_create(TRUE
, size
);
2195 user_page_list
= (upl_page_info_t
*)
2196 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2197 upl
->flags
|= UPL_INTERNAL
;
2199 upl
= upl_create(FALSE
, size
);
2201 if(object
->phys_contiguous
) {
2203 upl
->offset
= offset
+ object
->paging_offset
;
2205 if(user_page_list
) {
2206 user_page_list
[0].phys_addr
=
2207 offset
+ object
->shadow_offset
;
2208 user_page_list
[0].device
= TRUE
;
2210 upl
->map_object
= vm_object_allocate(size
);
2211 vm_object_lock(upl
->map_object
);
2212 upl
->map_object
->shadow
= object
;
2213 upl
->flags
= UPL_DEVICE_MEMORY
| UPL_INTERNAL
;
2214 upl
->map_object
->pageout
= TRUE
;
2215 upl
->map_object
->can_persist
= FALSE
;
2216 upl
->map_object
->copy_strategy
2217 = MEMORY_OBJECT_COPY_NONE
;
2218 upl
->map_object
->shadow_offset
= offset
;
2219 vm_object_unlock(upl
->map_object
);
2220 return KERN_SUCCESS
;
2224 upl
->map_object
= vm_object_allocate(size
);
2225 vm_object_lock(upl
->map_object
);
2226 upl
->map_object
->shadow
= object
;
2228 upl
->offset
= offset
+ object
->paging_offset
;
2229 upl
->map_object
->pageout
= TRUE
;
2230 upl
->map_object
->can_persist
= FALSE
;
2231 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2232 upl
->map_object
->shadow_offset
= offset
;
2233 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
2234 vm_object_unlock(upl
->map_object
);
2237 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2238 vm_object_lock(object
);
2241 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
2242 #endif /* UBC_DEBUG */
2243 vm_object_paging_begin(object
);
2245 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
2246 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
2248 if(alias_page
== NULL
) {
2249 vm_object_unlock(object
);
2250 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2251 vm_object_lock(object
);
2253 if(((dst_page
= vm_page_lookup(object
,
2254 dst_offset
)) == VM_PAGE_NULL
) ||
2255 dst_page
->fictitious
||
2258 (dst_page
->wire_count
!= 0 &&
2259 !dst_page
->pageout
) ||
2260 ((!(dst_page
->dirty
|| dst_page
->precious
||
2261 pmap_is_modified(dst_page
->phys_addr
)))
2262 && (cntrl_flags
& UPL_RET_ONLY_DIRTY
)) ||
2263 ((!(dst_page
->inactive
))
2264 && (dst_page
->page_ticket
!= page_ticket
)
2265 && ((dst_page
->page_ticket
+1) != page_ticket
)
2266 && (cntrl_flags
& UPL_PAGEOUT
)) ||
2267 ((!dst_page
->list_req_pending
) &&
2268 (cntrl_flags
& UPL_RET_ONLY_DIRTY
) &&
2269 pmap_is_referenced(dst_page
->phys_addr
))) {
2271 user_page_list
[entry
].phys_addr
= 0;
2274 if(dst_page
->busy
&&
2275 (!(dst_page
->list_req_pending
&&
2276 dst_page
->pageout
))) {
2277 if(cntrl_flags
& UPL_NOBLOCK
) {
2279 user_page_list
[entry
]
2282 dst_offset
+= PAGE_SIZE_64
;
2283 xfer_size
-= PAGE_SIZE
;
2286 /*someone else is playing with the */
2287 /* page. We will have to wait. */
2288 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2291 /* Someone else already cleaning the page? */
2292 if((dst_page
->cleaning
|| dst_page
->absent
||
2293 dst_page
->wire_count
!= 0) &&
2294 !dst_page
->list_req_pending
) {
2296 user_page_list
[entry
].phys_addr
= 0;
2298 dst_offset
+= PAGE_SIZE_64
;
2299 xfer_size
-= PAGE_SIZE
;
2302 /* eliminate all mappings from the */
2303 /* original object and its prodigy */
2305 vm_page_lock_queues();
2306 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2307 pmap_page_protect(dst_page
->phys_addr
, VM_PROT_NONE
);
2309 /* pageout statistics gathering. count */
2310 /* all the pages we will page out that */
2311 /* were not counted in the initial */
2312 /* vm_pageout_scan work */
2313 if(dst_page
->list_req_pending
)
2314 encountered_lrp
= TRUE
;
2315 if((dst_page
->dirty
||
2316 (dst_page
->object
->internal
&&
2317 dst_page
->precious
)) &&
2318 (dst_page
->list_req_pending
2320 if(encountered_lrp
) {
2322 (pages_at_higher_offsets
++;)
2325 (pages_at_lower_offsets
++;)
2329 /* Turn off busy indication on pending */
2330 /* pageout. Note: we can only get here */
2331 /* in the request pending case. */
2332 dst_page
->list_req_pending
= FALSE
;
2333 dst_page
->busy
= FALSE
;
2334 dst_page
->cleaning
= FALSE
;
2336 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2337 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2339 /* use pageclean setup, it is more convenient */
2340 /* even for the pageout cases here */
2341 vm_pageclean_setup(dst_page
, alias_page
,
2342 upl
->map_object
, size
- xfer_size
);
2345 dst_page
->dirty
= FALSE
;
2346 dst_page
->precious
= TRUE
;
2349 if(dst_page
->pageout
)
2350 dst_page
->busy
= TRUE
;
2352 alias_page
->absent
= FALSE
;
2354 if((!(cntrl_flags
& UPL_CLEAN_IN_PLACE
))
2355 || (cntrl_flags
& UPL_PAGEOUT
)) {
2356 /* deny access to the target page */
2357 /* while it is being worked on */
2358 if((!dst_page
->pageout
) &&
2359 (dst_page
->wire_count
== 0)) {
2360 dst_page
->busy
= TRUE
;
2361 dst_page
->pageout
= TRUE
;
2362 vm_page_wire(dst_page
);
2365 if(user_page_list
) {
2366 user_page_list
[entry
].phys_addr
2367 = dst_page
->phys_addr
;
2368 user_page_list
[entry
].dirty
=
2370 user_page_list
[entry
].pageout
=
2372 user_page_list
[entry
].absent
=
2374 user_page_list
[entry
].precious
=
2378 vm_page_unlock_queues();
2381 dst_offset
+= PAGE_SIZE_64
;
2382 xfer_size
-= PAGE_SIZE
;
2386 if(alias_page
== NULL
) {
2387 vm_object_unlock(object
);
2388 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2389 vm_object_lock(object
);
2391 dst_page
= vm_page_lookup(object
, dst_offset
);
2392 if(dst_page
!= VM_PAGE_NULL
) {
2393 if((cntrl_flags
& UPL_RET_ONLY_ABSENT
) &&
2394 !((dst_page
->list_req_pending
)
2395 && (dst_page
->absent
))) {
2396 /* we are doing extended range */
2397 /* requests. we want to grab */
2398 /* pages around some which are */
2399 /* already present. */
2401 user_page_list
[entry
].phys_addr
= 0;
2403 dst_offset
+= PAGE_SIZE_64
;
2404 xfer_size
-= PAGE_SIZE
;
2407 if((dst_page
->cleaning
) &&
2408 !(dst_page
->list_req_pending
)) {
2409 /*someone else is writing to the */
2410 /* page. We will have to wait. */
2411 PAGE_SLEEP(object
,dst_page
,THREAD_UNINT
);
2414 if ((dst_page
->fictitious
&&
2415 dst_page
->list_req_pending
)) {
2416 /* dump the fictitious page */
2417 dst_page
->list_req_pending
= FALSE
;
2418 dst_page
->clustered
= FALSE
;
2419 vm_page_lock_queues();
2420 vm_page_free(dst_page
);
2421 vm_page_unlock_queues();
2422 } else if ((dst_page
->absent
&&
2423 dst_page
->list_req_pending
)) {
2424 /* the default_pager case */
2425 dst_page
->list_req_pending
= FALSE
;
2426 dst_page
->busy
= FALSE
;
2427 dst_page
->clustered
= FALSE
;
2430 if((dst_page
= vm_page_lookup(object
, dst_offset
)) ==
2432 if(object
->private) {
2434 * This is a nasty wrinkle for users
2435 * of upl who encounter device or
2436 * private memory however, it is
2437 * unavoidable, only a fault can
2438 * reslove the actual backing
2439 * physical page by asking the
2443 user_page_list
[entry
]
2446 dst_offset
+= PAGE_SIZE_64
;
2447 xfer_size
-= PAGE_SIZE
;
2450 /* need to allocate a page */
2451 dst_page
= vm_page_alloc(object
, dst_offset
);
2452 if (dst_page
== VM_PAGE_NULL
) {
2453 vm_object_unlock(object
);
2455 vm_object_lock(object
);
2458 dst_page
->busy
= FALSE
;
2460 if(cntrl_flags
& UPL_NO_SYNC
) {
2461 dst_page
->page_lock
= 0;
2462 dst_page
->unlock_request
= 0;
2465 dst_page
->absent
= TRUE
;
2466 object
->absent_count
++;
2469 if(cntrl_flags
& UPL_NO_SYNC
) {
2470 dst_page
->page_lock
= 0;
2471 dst_page
->unlock_request
= 0;
2474 dst_page
->overwriting
= TRUE
;
2475 if(dst_page
->fictitious
) {
2476 panic("need corner case for fictitious page");
2478 if(dst_page
->page_lock
) {
2483 /* eliminate all mappings from the */
2484 /* original object and its prodigy */
2486 if(dst_page
->busy
) {
2487 /*someone else is playing with the */
2488 /* page. We will have to wait. */
2489 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2493 vm_page_lock_queues();
2494 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2495 pmap_page_protect(dst_page
->phys_addr
, VM_PROT_NONE
);
2497 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2498 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2500 vm_pageclean_setup(dst_page
, alias_page
,
2501 upl
->map_object
, size
- xfer_size
);
2503 if(cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
2504 /* clean in place for read implies */
2505 /* that a write will be done on all */
2506 /* the pages that are dirty before */
2507 /* a upl commit is done. The caller */
2508 /* is obligated to preserve the */
2509 /* contents of all pages marked */
2511 upl
->flags
|= UPL_CLEAR_DIRTY
;
2515 dst_page
->dirty
= FALSE
;
2516 dst_page
->precious
= TRUE
;
2519 if (dst_page
->wire_count
== 0) {
2520 /* deny access to the target page while */
2521 /* it is being worked on */
2522 dst_page
->busy
= TRUE
;
2524 vm_page_wire(dst_page
);
2526 /* expect the page to be used */
2527 dst_page
->reference
= TRUE
;
2528 dst_page
->precious
=
2529 (cntrl_flags
& UPL_PRECIOUS
)
2531 alias_page
->absent
= FALSE
;
2533 if(user_page_list
) {
2534 user_page_list
[entry
].phys_addr
2535 = dst_page
->phys_addr
;
2536 user_page_list
[entry
].dirty
=
2538 user_page_list
[entry
].pageout
=
2540 user_page_list
[entry
].absent
=
2542 user_page_list
[entry
].precious
=
2545 vm_page_unlock_queues();
2548 dst_offset
+= PAGE_SIZE_64
;
2549 xfer_size
-= PAGE_SIZE
;
2553 if (upl
->flags
& UPL_INTERNAL
) {
2554 if(page_list_count
!= NULL
)
2555 *page_list_count
= 0;
2556 } else if (*page_list_count
> entry
) {
2557 if(page_list_count
!= NULL
)
2558 *page_list_count
= entry
;
2561 if(alias_page
!= NULL
) {
2562 vm_page_lock_queues();
2563 vm_page_free(alias_page
);
2564 vm_page_unlock_queues();
2568 vm_prot_t access_required
;
2569 /* call back all associated pages from other users of the pager */
2570 /* all future updates will be on data which is based on the */
2571 /* changes we are going to make here. Note: it is assumed that */
2572 /* we already hold copies of the data so we will not be seeing */
2573 /* an avalanche of incoming data from the pager */
2574 access_required
= (cntrl_flags
& UPL_COPYOUT_FROM
)
2575 ? VM_PROT_READ
: VM_PROT_WRITE
;
2579 if(!object
->pager_ready
) {
2580 wait_result_t wait_result
;
2582 wait_result
= vm_object_sleep(object
,
2583 VM_OBJECT_EVENT_PAGER_READY
,
2585 if (wait_result
!= THREAD_AWAKENED
) {
2586 vm_object_unlock(object
);
2587 return(KERN_FAILURE
);
2592 vm_object_unlock(object
);
2594 if (rc
= memory_object_data_unlock(
2596 dst_offset
+ object
->paging_offset
,
2599 if (rc
== MACH_SEND_INTERRUPTED
)
2602 return KERN_FAILURE
;
2607 /* lets wait on the last page requested */
2608 /* NOTE: we will have to update lock completed routine to signal */
2609 if(dst_page
!= VM_PAGE_NULL
&&
2610 (access_required
& dst_page
->page_lock
) != access_required
) {
2611 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
2612 thread_block((void (*)(void))0);
2613 vm_object_lock(object
);
2616 vm_object_unlock(object
);
2617 return KERN_SUCCESS
;
2620 /* JMM - Backward compatability for now */
2622 vm_fault_list_request(
2623 memory_object_control_t control
,
2624 vm_object_offset_t offset
,
2627 upl_page_info_t
**user_page_list_ptr
,
2628 int page_list_count
,
2631 int local_list_count
;
2632 upl_page_info_t
*user_page_list
;
2635 if (user_page_list_ptr
!= NULL
) {
2636 local_list_count
= page_list_count
;
2637 user_page_list
= *user_page_list_ptr
;
2639 local_list_count
= 0;
2640 user_page_list
= NULL
;
2642 kr
= memory_object_upl_request(control
,
2650 if(kr
!= KERN_SUCCESS
)
2653 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
2654 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
2657 return KERN_SUCCESS
;
2663 * Routine: vm_object_super_upl_request
2665 * Cause the population of a portion of a vm_object
2666 * in much the same way as memory_object_upl_request.
2667 * Depending on the nature of the request, the pages
2668 * returned may be contain valid data or be uninitialized.
2669 * However, the region may be expanded up to the super
2670 * cluster size provided.
2673 __private_extern__ kern_return_t
2674 vm_object_super_upl_request(
2676 vm_object_offset_t offset
,
2678 vm_size_t super_cluster
,
2680 upl_page_info_t
*user_page_list
,
2681 unsigned int *page_list_count
,
2684 vm_page_t target_page
;
2687 if(object
->paging_offset
> offset
)
2688 return KERN_FAILURE
;
2690 offset
= offset
- object
->paging_offset
;
2691 if(cntrl_flags
& UPL_PAGEOUT
) {
2692 if((target_page
= vm_page_lookup(object
, offset
))
2694 ticket
= target_page
->page_ticket
;
2695 cntrl_flags
= cntrl_flags
& ~(int)UPL_PAGE_TICKET_MASK
;
2696 cntrl_flags
= cntrl_flags
|
2697 ((ticket
<< UPL_PAGE_TICKET_SHIFT
)
2698 & UPL_PAGE_TICKET_MASK
);
2703 /* turns off super cluster exercised by the default_pager */
2705 super_cluster = size;
2707 if ((super_cluster
> size
) &&
2708 (vm_page_free_count
> vm_page_free_reserved
)) {
2710 vm_object_offset_t base_offset
;
2711 vm_size_t super_size
;
2713 base_offset
= (offset
&
2714 ~((vm_object_offset_t
) super_cluster
- 1));
2715 super_size
= (offset
+size
) > (base_offset
+ super_cluster
) ?
2716 super_cluster
<<1 : super_cluster
;
2717 super_size
= ((base_offset
+ super_size
) > object
->size
) ?
2718 (object
->size
- base_offset
) : super_size
;
2719 if(offset
> (base_offset
+ super_size
))
2720 panic("vm_object_super_upl_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset
, base_offset
, super_size
, super_cluster
, size
, object
->paging_offset
);
2721 /* apparently there is a case where the vm requests a */
2722 /* page to be written out who's offset is beyond the */
2724 if((offset
+ size
) > (base_offset
+ super_size
))
2725 super_size
= (offset
+ size
) - base_offset
;
2727 offset
= base_offset
;
2730 vm_object_upl_request(object
, offset
, size
,
2731 upl
, user_page_list
, page_list_count
,
2740 vm_offset_t
*dst_addr
)
2743 vm_object_offset_t offset
;
2748 if (upl
== UPL_NULL
)
2749 return KERN_INVALID_ARGUMENT
;
2753 /* check to see if already mapped */
2754 if(UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
2756 return KERN_FAILURE
;
2759 offset
= 0; /* Always map the entire object */
2762 vm_object_lock(upl
->map_object
);
2763 upl
->map_object
->ref_count
++;
2764 vm_object_res_reference(upl
->map_object
);
2765 vm_object_unlock(upl
->map_object
);
2770 /* NEED A UPL_MAP ALIAS */
2771 kr
= vm_map_enter(map
, dst_addr
, size
, (vm_offset_t
) 0, TRUE
,
2772 upl
->map_object
, offset
, FALSE
,
2773 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
2775 if (kr
!= KERN_SUCCESS
) {
2780 for(addr
=*dst_addr
; size
> 0; size
-=PAGE_SIZE
,addr
+=PAGE_SIZE
) {
2781 m
= vm_page_lookup(upl
->map_object
, offset
);
2783 unsigned int cache_attr
;
2784 cache_attr
= ((unsigned int)m
->object
->wimg_bits
) & VM_WIMG_MASK
;
2786 PMAP_ENTER(map
->pmap
, addr
,
2790 offset
+=PAGE_SIZE_64
;
2792 upl
->ref_count
++; /* hold a reference for the mapping */
2793 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
2794 upl
->kaddr
= *dst_addr
;
2796 return KERN_SUCCESS
;
2808 if (upl
== UPL_NULL
)
2809 return KERN_INVALID_ARGUMENT
;
2812 if(upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
2815 assert(upl
->ref_count
> 1);
2816 upl
->ref_count
--; /* removing mapping ref */
2817 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
2818 upl
->kaddr
= (vm_offset_t
) 0;
2821 vm_deallocate(map
, addr
, size
);
2822 return KERN_SUCCESS
;
2825 return KERN_FAILURE
;
2834 upl_page_info_t
*page_list
,
2835 mach_msg_type_number_t count
,
2838 vm_size_t xfer_size
= size
;
2839 vm_object_t shadow_object
= upl
->map_object
->shadow
;
2840 vm_object_t object
= upl
->map_object
;
2841 vm_object_offset_t target_offset
;
2842 vm_object_offset_t page_offset
;
2847 if (upl
== UPL_NULL
)
2848 return KERN_INVALID_ARGUMENT
;
2854 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
2856 } else if ((offset
+ size
) > upl
->size
) {
2858 return KERN_FAILURE
;
2861 vm_object_lock(shadow_object
);
2863 entry
= offset
/PAGE_SIZE
;
2864 target_offset
= (vm_object_offset_t
)offset
;
2869 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
2872 page_offset
= t
->offset
;
2875 m
= vm_page_lookup(shadow_object
,
2876 page_offset
+ object
->shadow_offset
);
2877 if(m
!= VM_PAGE_NULL
) {
2878 vm_object_paging_end(shadow_object
);
2879 vm_page_lock_queues();
2880 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
2881 (flags
& UPL_COMMIT_CLEAR_DIRTY
)) {
2882 pmap_clear_modify(m
->phys_addr
);
2886 p
= &(page_list
[entry
]);
2887 if(p
->phys_addr
&& p
->pageout
&& !m
->pageout
) {
2891 } else if (page_list
[entry
].phys_addr
&&
2892 !p
->pageout
&& m
->pageout
&&
2893 !m
->dump_cleaning
) {
2896 m
->overwriting
= FALSE
;
2898 PAGE_WAKEUP_DONE(m
);
2900 page_list
[entry
].phys_addr
= 0;
2902 m
->dump_cleaning
= FALSE
;
2904 vm_page_laundry_count
--;
2906 if (vm_page_laundry_count
< vm_page_laundry_min
) {
2907 vm_page_laundry_min
= 0;
2908 thread_wakeup((event_t
)
2909 &vm_page_laundry_count
);
2913 m
->cleaning
= FALSE
;
2915 #if MACH_CLUSTER_STATS
2916 if (m
->wanted
) vm_pageout_target_collisions
++;
2918 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2919 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2922 vm_pageout_target_page_dirtied
++;)
2923 vm_page_unwire(m
);/* reactivates */
2924 VM_STAT(reactivations
++);
2925 PAGE_WAKEUP_DONE(m
);
2928 vm_pageout_target_page_freed
++;)
2929 vm_page_free(m
);/* clears busy, etc. */
2930 VM_STAT(pageouts
++);
2932 vm_page_unlock_queues();
2933 target_offset
+= PAGE_SIZE_64
;
2934 xfer_size
-= PAGE_SIZE
;
2938 if (flags
& UPL_COMMIT_INACTIVATE
) {
2939 vm_page_deactivate(m
);
2940 m
->reference
= FALSE
;
2941 pmap_clear_reference(m
->phys_addr
);
2942 } else if (!m
->active
&& !m
->inactive
) {
2944 vm_page_activate(m
);
2946 vm_page_deactivate(m
);
2948 #if MACH_CLUSTER_STATS
2949 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2951 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
2952 else vm_pageout_cluster_cleaned
++;
2953 if (m
->wanted
) vm_pageout_cluster_collisions
++;
2958 if((m
->busy
) && (m
->cleaning
)) {
2959 /* the request_page_list case */
2962 if(shadow_object
->absent_count
== 1)
2963 vm_object_absent_release(shadow_object
);
2965 shadow_object
->absent_count
--;
2967 m
->overwriting
= FALSE
;
2971 else if (m
->overwriting
) {
2972 /* alternate request page list, write to
2973 /* page_list case. Occurs when the original
2974 /* page was wired at the time of the list
2976 assert(m
->wire_count
!= 0);
2977 vm_page_unwire(m
);/* reactivates */
2978 m
->overwriting
= FALSE
;
2980 m
->cleaning
= FALSE
;
2981 /* It is a part of the semantic of COPYOUT_FROM */
2982 /* UPLs that a commit implies cache sync */
2983 /* between the vm page and the backing store */
2984 /* this can be used to strip the precious bit */
2985 /* as well as clean */
2986 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
2987 m
->precious
= FALSE
;
2989 if (flags
& UPL_COMMIT_SET_DIRTY
) {
2993 * Wakeup any thread waiting for the page to be un-cleaning.
2996 vm_page_unlock_queues();
3000 target_offset
+= PAGE_SIZE_64
;
3001 xfer_size
-= PAGE_SIZE
;
3005 vm_object_unlock(shadow_object
);
3006 if(flags
& UPL_COMMIT_NOTIFY_EMPTY
) {
3007 if((upl
->flags
& UPL_DEVICE_MEMORY
)
3008 || (queue_empty(&upl
->map_object
->memq
)))
3013 return KERN_SUCCESS
;
3024 vm_size_t xfer_size
= size
;
3025 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3026 vm_object_t object
= upl
->map_object
;
3027 vm_object_offset_t target_offset
;
3028 vm_object_offset_t page_offset
;
3033 if (upl
== UPL_NULL
)
3034 return KERN_INVALID_ARGUMENT
;
3037 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3039 } else if ((offset
+ size
) > upl
->size
) {
3041 return KERN_FAILURE
;
3044 vm_object_lock(shadow_object
);
3046 entry
= offset
/PAGE_SIZE
;
3047 target_offset
= (vm_object_offset_t
)offset
;
3052 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
3055 page_offset
= t
->offset
;
3058 m
= vm_page_lookup(shadow_object
,
3059 page_offset
+ object
->shadow_offset
);
3060 if(m
!= VM_PAGE_NULL
) {
3061 vm_object_paging_end(m
->object
);
3062 vm_page_lock_queues();
3064 /* COPYOUT = FALSE case */
3065 /* check for error conditions which must */
3066 /* be passed back to the pages customer */
3067 if(error
& UPL_ABORT_RESTART
) {
3070 vm_object_absent_release(m
->object
);
3071 m
->page_error
= KERN_MEMORY_ERROR
;
3073 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3076 m
->clustered
= FALSE
;
3077 } else if(error
& UPL_ABORT_ERROR
) {
3080 vm_object_absent_release(m
->object
);
3081 m
->page_error
= KERN_MEMORY_ERROR
;
3083 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3084 m
->clustered
= TRUE
;
3086 m
->clustered
= TRUE
;
3090 m
->cleaning
= FALSE
;
3091 m
->overwriting
= FALSE
;
3092 PAGE_WAKEUP_DONE(m
);
3096 vm_page_activate(m
);
3099 vm_page_unlock_queues();
3100 target_offset
+= PAGE_SIZE_64
;
3101 xfer_size
-= PAGE_SIZE
;
3106 * Handle the trusted pager throttle.
3109 vm_page_laundry_count
--;
3111 if (vm_page_laundry_count
3112 < vm_page_laundry_min
) {
3113 vm_page_laundry_min
= 0;
3114 thread_wakeup((event_t
)
3115 &vm_page_laundry_count
);
3120 assert(m
->wire_count
== 1);
3124 m
->dump_cleaning
= FALSE
;
3125 m
->cleaning
= FALSE
;
3127 m
->overwriting
= FALSE
;
3129 vm_external_state_clr(
3130 m
->object
->existence_map
, m
->offset
);
3131 #endif /* MACH_PAGEMAP */
3132 if(error
& UPL_ABORT_DUMP_PAGES
) {
3134 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
3138 vm_page_unlock_queues();
3141 target_offset
+= PAGE_SIZE_64
;
3142 xfer_size
-= PAGE_SIZE
;
3145 vm_object_unlock(shadow_object
);
3146 if(error
& UPL_ABORT_NOTIFY_EMPTY
) {
3147 if((upl
->flags
& UPL_DEVICE_MEMORY
)
3148 || (queue_empty(&upl
->map_object
->memq
)))
3152 return KERN_SUCCESS
;
3160 vm_object_t object
= NULL
;
3161 vm_object_t shadow_object
= NULL
;
3162 vm_object_offset_t offset
;
3163 vm_object_offset_t shadow_offset
;
3164 vm_object_offset_t target_offset
;
3168 if (upl
== UPL_NULL
)
3169 return KERN_INVALID_ARGUMENT
;
3172 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3174 return KERN_SUCCESS
;
3177 object
= upl
->map_object
;
3179 if (object
== NULL
) {
3180 panic("upl_abort: upl object is not backed by an object");
3182 return KERN_INVALID_ARGUMENT
;
3185 shadow_object
= upl
->map_object
->shadow
;
3186 shadow_offset
= upl
->map_object
->shadow_offset
;
3188 vm_object_lock(shadow_object
);
3189 for(i
= 0; i
<(upl
->size
); i
+=PAGE_SIZE
, offset
+= PAGE_SIZE_64
) {
3190 if((t
= vm_page_lookup(object
,offset
)) != NULL
) {
3191 target_offset
= t
->offset
+ shadow_offset
;
3192 if((m
= vm_page_lookup(shadow_object
, target_offset
)) != NULL
) {
3193 vm_object_paging_end(m
->object
);
3194 vm_page_lock_queues();
3196 /* COPYOUT = FALSE case */
3197 /* check for error conditions which must */
3198 /* be passed back to the pages customer */
3199 if(error
& UPL_ABORT_RESTART
) {
3202 vm_object_absent_release(m
->object
);
3203 m
->page_error
= KERN_MEMORY_ERROR
;
3205 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3208 m
->clustered
= FALSE
;
3209 } else if(error
& UPL_ABORT_ERROR
) {
3212 vm_object_absent_release(m
->object
);
3213 m
->page_error
= KERN_MEMORY_ERROR
;
3215 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3216 m
->clustered
= TRUE
;
3218 m
->clustered
= TRUE
;
3221 m
->cleaning
= FALSE
;
3222 m
->overwriting
= FALSE
;
3223 PAGE_WAKEUP_DONE(m
);
3227 vm_page_activate(m
);
3229 vm_page_unlock_queues();
3233 * Handle the trusted pager throttle.
3236 vm_page_laundry_count
--;
3238 if (vm_page_laundry_count
3239 < vm_page_laundry_min
) {
3240 vm_page_laundry_min
= 0;
3241 thread_wakeup((event_t
)
3242 &vm_page_laundry_count
);
3247 assert(m
->wire_count
== 1);
3251 m
->dump_cleaning
= FALSE
;
3252 m
->cleaning
= FALSE
;
3254 m
->overwriting
= FALSE
;
3256 vm_external_state_clr(
3257 m
->object
->existence_map
, m
->offset
);
3258 #endif /* MACH_PAGEMAP */
3259 if(error
& UPL_ABORT_DUMP_PAGES
) {
3261 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
3265 vm_page_unlock_queues();
3269 vm_object_unlock(shadow_object
);
3270 /* Remove all the pages from the map object so */
3271 /* vm_pageout_object_terminate will work properly. */
3272 while (!queue_empty(&upl
->map_object
->memq
)) {
3275 p
= (vm_page_t
) queue_first(&upl
->map_object
->memq
);
3280 assert(!p
->cleaning
);
3285 return KERN_SUCCESS
;
3288 /* an option on commit should be wire */
3292 upl_page_info_t
*page_list
,
3293 mach_msg_type_number_t count
)
3295 if (upl
== UPL_NULL
)
3296 return KERN_INVALID_ARGUMENT
;
3302 if (upl
->flags
& UPL_DEVICE_MEMORY
)
3304 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3305 (upl
->flags
& UPL_PAGE_SYNC_DONE
)) {
3306 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3307 vm_object_t object
= upl
->map_object
;
3308 vm_object_offset_t target_offset
;
3313 vm_object_lock(shadow_object
);
3315 target_offset
= object
->shadow_offset
;
3316 xfer_end
= upl
->size
+ object
->shadow_offset
;
3318 while(target_offset
< xfer_end
) {
3319 if ((t
= vm_page_lookup(object
,
3320 target_offset
- object
->shadow_offset
))
3323 shadow_object
, target_offset
);
3324 if(m
!= VM_PAGE_NULL
) {
3325 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
3326 pmap_clear_modify(m
->phys_addr
);
3329 /* It is a part of the semantic of */
3330 /* COPYOUT_FROM UPLs that a commit */
3331 /* implies cache sync between the */
3332 /* vm page and the backing store */
3333 /* this can be used to strip the */
3334 /* precious bit as well as clean */
3335 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
3336 m
->precious
= FALSE
;
3339 target_offset
+= PAGE_SIZE_64
;
3341 vm_object_unlock(shadow_object
);
3344 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3345 vm_object_t object
= upl
->map_object
;
3346 vm_object_offset_t target_offset
;
3353 vm_object_lock(shadow_object
);
3356 target_offset
= object
->shadow_offset
;
3357 xfer_end
= upl
->size
+ object
->shadow_offset
;
3359 while(target_offset
< xfer_end
) {
3361 if ((t
= vm_page_lookup(object
,
3362 target_offset
- object
->shadow_offset
))
3364 target_offset
+= PAGE_SIZE_64
;
3369 m
= vm_page_lookup(shadow_object
, target_offset
);
3370 if(m
!= VM_PAGE_NULL
) {
3371 p
= &(page_list
[entry
]);
3372 if(page_list
[entry
].phys_addr
&&
3373 p
->pageout
&& !m
->pageout
) {
3374 vm_page_lock_queues();
3378 vm_page_unlock_queues();
3379 } else if (page_list
[entry
].phys_addr
&&
3380 !p
->pageout
&& m
->pageout
&&
3381 !m
->dump_cleaning
) {
3382 vm_page_lock_queues();
3385 m
->overwriting
= FALSE
;
3387 PAGE_WAKEUP_DONE(m
);
3388 vm_page_unlock_queues();
3390 page_list
[entry
].phys_addr
= 0;
3392 target_offset
+= PAGE_SIZE_64
;
3396 vm_object_unlock(shadow_object
);
3399 return KERN_SUCCESS
;
3403 upl_get_internal_pagelist_offset()
3405 return sizeof(struct upl
);
3412 upl
->flags
|= UPL_CLEAR_DIRTY
;
3419 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
3425 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
3427 return(UPL_PAGE_PRESENT(upl
, index
));
3429 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
3431 return(UPL_DIRTY_PAGE(upl
, index
));
3433 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
3435 return(UPL_VALID_PAGE(upl
, index
));
3437 vm_offset_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
3439 return((vm_offset_t
)UPL_PHYS_PAGE(upl
, index
));
3443 vm_countdirtypages(void)
3455 vm_page_lock_queues();
3456 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
3458 if (m
==(vm_page_t
)0) break;
3460 if(m
->dirty
) dpages
++;
3461 if(m
->pageout
) pgopages
++;
3462 if(m
->precious
) precpages
++;
3464 m
= (vm_page_t
) queue_next(&m
->pageq
);
3465 if (m
==(vm_page_t
)0) break;
3467 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
3468 vm_page_unlock_queues();
3470 vm_page_lock_queues();
3471 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
3473 if (m
==(vm_page_t
)0) break;
3475 if(m
->dirty
) dpages
++;
3476 if(m
->pageout
) pgopages
++;
3477 if(m
->precious
) precpages
++;
3479 m
= (vm_page_t
) queue_next(&m
->pageq
);
3480 if (m
==(vm_page_t
)0) break;
3482 } while (!queue_end(&vm_page_queue_zf
,(queue_entry_t
) m
));
3483 vm_page_unlock_queues();
3485 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3491 vm_page_lock_queues();
3492 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
3495 if(m
== (vm_page_t
)0) break;
3496 if(m
->dirty
) dpages
++;
3497 if(m
->pageout
) pgopages
++;
3498 if(m
->precious
) precpages
++;
3500 m
= (vm_page_t
) queue_next(&m
->pageq
);
3501 if(m
== (vm_page_t
)0) break;
3503 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
3504 vm_page_unlock_queues();
3506 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3509 #endif /* MACH_BSD */
3512 kern_return_t
upl_ubc_alias_set(upl_t upl
, unsigned int alias1
, unsigned int alias2
)
3514 upl
->ubc_alias1
= alias1
;
3515 upl
->ubc_alias2
= alias2
;
3516 return KERN_SUCCESS
;
3518 int upl_ubc_alias_get(upl_t upl
, unsigned int * al
, unsigned int * al2
)
3521 *al
= upl
->ubc_alias1
;
3523 *al2
= upl
->ubc_alias2
;
3524 return KERN_SUCCESS
;
3526 #endif /* UBC_DEBUG */
3531 #include <ddb/db_output.h>
3532 #include <ddb/db_print.h>
3533 #include <vm/vm_print.h>
3535 #define printf kdbprintf
3536 extern int db_indent
;
3537 void db_pageout(void);
3542 extern int vm_page_gobble_count
;
3544 iprintf("VM Statistics:\n");
3546 iprintf("pages:\n");
3548 iprintf("activ %5d inact %5d free %5d",
3549 vm_page_active_count
, vm_page_inactive_count
,
3550 vm_page_free_count
);
3551 printf(" wire %5d gobbl %5d\n",
3552 vm_page_wire_count
, vm_page_gobble_count
);
3553 iprintf("laund %5d\n",
3554 vm_page_laundry_count
);
3556 iprintf("target:\n");
3558 iprintf("min %5d inact %5d free %5d",
3559 vm_page_free_min
, vm_page_inactive_target
,
3560 vm_page_free_target
);
3561 printf(" resrv %5d\n", vm_page_free_reserved
);
3564 iprintf("burst:\n");
3566 iprintf("max %5d min %5d wait %5d empty %5d\n",
3567 vm_pageout_burst_max
, vm_pageout_burst_min
,
3568 vm_pageout_burst_wait
, vm_pageout_empty_wait
);
3570 iprintf("pause:\n");
3572 iprintf("count %5d max %5d\n",
3573 vm_pageout_pause_count
, vm_pageout_pause_max
);
3575 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue
);
3576 #endif /* MACH_COUNTERS */
3586 extern int c_laundry_pages_freed
;
3587 #endif /* MACH_COUNTERS */
3589 iprintf("Pageout Statistics:\n");
3591 iprintf("active %5d inactv %5d\n",
3592 vm_pageout_active
, vm_pageout_inactive
);
3593 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
3594 vm_pageout_inactive_nolock
, vm_pageout_inactive_avoid
,
3595 vm_pageout_inactive_busy
, vm_pageout_inactive_absent
);
3596 iprintf("used %5d clean %5d dirty %5d\n",
3597 vm_pageout_inactive_used
, vm_pageout_inactive_clean
,
3598 vm_pageout_inactive_dirty
);
3600 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed
);
3601 #endif /* MACH_COUNTERS */
3602 #if MACH_CLUSTER_STATS
3603 iprintf("Cluster Statistics:\n");
3605 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
3606 vm_pageout_cluster_dirtied
, vm_pageout_cluster_cleaned
,
3607 vm_pageout_cluster_collisions
);
3608 iprintf("clusters %5d conversions %5d\n",
3609 vm_pageout_cluster_clusters
, vm_pageout_cluster_conversions
);
3611 iprintf("Target Statistics:\n");
3613 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
3614 vm_pageout_target_collisions
, vm_pageout_target_page_dirtied
,
3615 vm_pageout_target_page_freed
);
3617 #endif /* MACH_CLUSTER_STATS */
3621 #if MACH_CLUSTER_STATS
3622 unsigned long vm_pageout_cluster_dirtied
= 0;
3623 unsigned long vm_pageout_cluster_cleaned
= 0;
3624 unsigned long vm_pageout_cluster_collisions
= 0;
3625 unsigned long vm_pageout_cluster_clusters
= 0;
3626 unsigned long vm_pageout_cluster_conversions
= 0;
3627 unsigned long vm_pageout_target_collisions
= 0;
3628 unsigned long vm_pageout_target_page_dirtied
= 0;
3629 unsigned long vm_pageout_target_page_freed
= 0;
3630 #define CLUSTER_STAT(clause) clause
3631 #else /* MACH_CLUSTER_STATS */
3632 #define CLUSTER_STAT(clause)
3633 #endif /* MACH_CLUSTER_STATS */
3635 #endif /* MACH_KDB */