2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
43 * Carnegie Mellon requests users of this software to return to
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
56 * File: vm/vm_pageout.c
57 * Author: Avadis Tevanian, Jr., Michael Wayne Young
60 * The proverbial page-out daemon.
63 #include <mach_pagemap.h>
64 #include <mach_cluster_stats.h>
66 #include <advisory_pageout.h>
68 #include <mach/mach_types.h>
69 #include <mach/memory_object.h>
70 #include <mach/memory_object_default.h>
71 #include <mach/memory_object_control_server.h>
72 #include <mach/mach_host_server.h>
73 #include <mach/vm_param.h>
74 #include <mach/vm_statistics.h>
75 #include <kern/host_statistics.h>
76 #include <kern/counters.h>
77 #include <kern/thread.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_object.h>
82 #include <vm/vm_page.h>
83 #include <vm/vm_pageout.h>
84 #include <machine/vm_tuning.h>
85 #include <kern/misc_protos.h>
87 extern ipc_port_t memory_manager_default
;
89 #ifndef VM_PAGE_LAUNDRY_MAX
90 #define VM_PAGE_LAUNDRY_MAX 6 /* outstanding DMM page cleans */
91 #endif /* VM_PAGEOUT_LAUNDRY_MAX */
93 #ifndef VM_PAGEOUT_BURST_MAX
94 #define VM_PAGEOUT_BURST_MAX 32 /* simultaneous EMM page cleans */
95 #endif /* VM_PAGEOUT_BURST_MAX */
97 #ifndef VM_PAGEOUT_DISCARD_MAX
98 #define VM_PAGEOUT_DISCARD_MAX 68 /* simultaneous EMM page cleans */
99 #endif /* VM_PAGEOUT_DISCARD_MAX */
101 #ifndef VM_PAGEOUT_BURST_WAIT
102 #define VM_PAGEOUT_BURST_WAIT 30 /* milliseconds per page */
103 #endif /* VM_PAGEOUT_BURST_WAIT */
105 #ifndef VM_PAGEOUT_EMPTY_WAIT
106 #define VM_PAGEOUT_EMPTY_WAIT 200 /* milliseconds */
107 #endif /* VM_PAGEOUT_EMPTY_WAIT */
110 * To obtain a reasonable LRU approximation, the inactive queue
111 * needs to be large enough to give pages on it a chance to be
112 * referenced a second time. This macro defines the fraction
113 * of active+inactive pages that should be inactive.
114 * The pageout daemon uses it to update vm_page_inactive_target.
116 * If vm_page_free_count falls below vm_page_free_target and
117 * vm_page_inactive_count is below vm_page_inactive_target,
118 * then the pageout daemon starts running.
121 #ifndef VM_PAGE_INACTIVE_TARGET
122 #define VM_PAGE_INACTIVE_TARGET(avail) ((avail) * 1 / 3)
123 #endif /* VM_PAGE_INACTIVE_TARGET */
126 * Once the pageout daemon starts running, it keeps going
127 * until vm_page_free_count meets or exceeds vm_page_free_target.
130 #ifndef VM_PAGE_FREE_TARGET
131 #define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
132 #endif /* VM_PAGE_FREE_TARGET */
135 * The pageout daemon always starts running once vm_page_free_count
136 * falls below vm_page_free_min.
139 #ifndef VM_PAGE_FREE_MIN
140 #define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
141 #endif /* VM_PAGE_FREE_MIN */
144 * When vm_page_free_count falls below vm_page_free_reserved,
145 * only vm-privileged threads can allocate pages. vm-privilege
146 * allows the pageout daemon and default pager (and any other
147 * associated threads needed for default pageout) to continue
148 * operation by dipping into the reserved pool of pages.
151 #ifndef VM_PAGE_FREE_RESERVED
152 #define VM_PAGE_FREE_RESERVED \
153 ((16 * VM_PAGE_LAUNDRY_MAX) + NCPUS)
154 #endif /* VM_PAGE_FREE_RESERVED */
157 * Exported variable used to broadcast the activation of the pageout scan
158 * Working Set uses this to throttle its use of pmap removes. In this
159 * way, code which runs within memory in an uncontested context does
160 * not keep encountering soft faults.
163 unsigned int vm_pageout_scan_event_counter
= 0;
166 * Forward declarations for internal routines.
168 extern void vm_pageout_continue(void);
169 extern void vm_pageout_scan(void);
170 extern void vm_pageout_throttle(vm_page_t m
);
171 extern vm_page_t
vm_pageout_cluster_page(
173 vm_object_offset_t offset
,
174 boolean_t precious_clean
);
176 unsigned int vm_pageout_reserved_internal
= 0;
177 unsigned int vm_pageout_reserved_really
= 0;
179 unsigned int vm_page_laundry_max
= 0; /* # of clusters outstanding */
180 unsigned int vm_page_laundry_min
= 0;
181 unsigned int vm_pageout_burst_max
= 0;
182 unsigned int vm_pageout_burst_wait
= 0; /* milliseconds per page */
183 unsigned int vm_pageout_empty_wait
= 0; /* milliseconds */
184 unsigned int vm_pageout_burst_min
= 0;
185 unsigned int vm_pageout_pause_count
= 0;
186 unsigned int vm_pageout_pause_max
= 0;
187 unsigned int vm_free_page_pause
= 100; /* milliseconds */
190 * Protection against zero fill flushing live working sets derived
191 * from existing backing store and files
193 unsigned int vm_accellerate_zf_pageout_trigger
= 400;
194 unsigned int vm_zf_iterator
;
195 unsigned int vm_zf_iterator_count
= 40;
196 unsigned int last_page_zf
;
197 unsigned int vm_zf_count
= 0;
200 * These variables record the pageout daemon's actions:
201 * how many pages it looks at and what happens to those pages.
202 * No locking needed because only one thread modifies the variables.
205 unsigned int vm_pageout_active
= 0; /* debugging */
206 unsigned int vm_pageout_inactive
= 0; /* debugging */
207 unsigned int vm_pageout_inactive_throttled
= 0; /* debugging */
208 unsigned int vm_pageout_inactive_forced
= 0; /* debugging */
209 unsigned int vm_pageout_inactive_nolock
= 0; /* debugging */
210 unsigned int vm_pageout_inactive_avoid
= 0; /* debugging */
211 unsigned int vm_pageout_inactive_busy
= 0; /* debugging */
212 unsigned int vm_pageout_inactive_absent
= 0; /* debugging */
213 unsigned int vm_pageout_inactive_used
= 0; /* debugging */
214 unsigned int vm_pageout_inactive_clean
= 0; /* debugging */
215 unsigned int vm_pageout_inactive_dirty
= 0; /* debugging */
216 unsigned int vm_pageout_dirty_no_pager
= 0; /* debugging */
217 unsigned int vm_stat_discard
= 0; /* debugging */
218 unsigned int vm_stat_discard_sent
= 0; /* debugging */
219 unsigned int vm_stat_discard_failure
= 0; /* debugging */
220 unsigned int vm_stat_discard_throttle
= 0; /* debugging */
221 unsigned int vm_pageout_scan_active_emm_throttle
= 0; /* debugging */
222 unsigned int vm_pageout_scan_active_emm_throttle_success
= 0; /* debugging */
223 unsigned int vm_pageout_scan_active_emm_throttle_failure
= 0; /* debugging */
224 unsigned int vm_pageout_scan_inactive_emm_throttle
= 0; /* debugging */
225 unsigned int vm_pageout_scan_inactive_emm_throttle_success
= 0; /* debugging */
226 unsigned int vm_pageout_scan_inactive_emm_throttle_failure
= 0; /* debugging */
229 unsigned int vm_pageout_out_of_line
= 0;
230 unsigned int vm_pageout_in_place
= 0;
232 * Routine: vm_pageout_object_allocate
234 * Allocate an object for use as out-of-line memory in a
235 * data_return/data_initialize message.
236 * The page must be in an unlocked object.
238 * If the page belongs to a trusted pager, cleaning in place
239 * will be used, which utilizes a special "pageout object"
240 * containing private alias pages for the real page frames.
241 * Untrusted pagers use normal out-of-line memory.
244 vm_pageout_object_allocate(
247 vm_object_offset_t offset
)
249 vm_object_t object
= m
->object
;
250 vm_object_t new_object
;
252 assert(object
->pager_ready
);
254 if (object
->pager_trusted
|| object
->internal
)
255 vm_pageout_throttle(m
);
257 new_object
= vm_object_allocate(size
);
259 if (object
->pager_trusted
) {
260 assert (offset
< object
->size
);
262 vm_object_lock(new_object
);
263 new_object
->pageout
= TRUE
;
264 new_object
->shadow
= object
;
265 new_object
->can_persist
= FALSE
;
266 new_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
267 new_object
->shadow_offset
= offset
;
268 vm_object_unlock(new_object
);
271 * Take a paging reference on the object. This will be dropped
272 * in vm_pageout_object_terminate()
274 vm_object_lock(object
);
275 vm_object_paging_begin(object
);
276 vm_object_unlock(object
);
278 vm_pageout_in_place
++;
280 vm_pageout_out_of_line
++;
284 #if MACH_CLUSTER_STATS
285 unsigned long vm_pageout_cluster_dirtied
= 0;
286 unsigned long vm_pageout_cluster_cleaned
= 0;
287 unsigned long vm_pageout_cluster_collisions
= 0;
288 unsigned long vm_pageout_cluster_clusters
= 0;
289 unsigned long vm_pageout_cluster_conversions
= 0;
290 unsigned long vm_pageout_target_collisions
= 0;
291 unsigned long vm_pageout_target_page_dirtied
= 0;
292 unsigned long vm_pageout_target_page_freed
= 0;
293 #define CLUSTER_STAT(clause) clause
294 #else /* MACH_CLUSTER_STATS */
295 #define CLUSTER_STAT(clause)
296 #endif /* MACH_CLUSTER_STATS */
299 * Routine: vm_pageout_object_terminate
301 * Destroy the pageout_object allocated by
302 * vm_pageout_object_allocate(), and perform all of the
303 * required cleanup actions.
306 * The object must be locked, and will be returned locked.
309 vm_pageout_object_terminate(
312 vm_object_t shadow_object
;
315 * Deal with the deallocation (last reference) of a pageout object
316 * (used for cleaning-in-place) by dropping the paging references/
317 * freeing pages in the original object.
320 assert(object
->pageout
);
321 shadow_object
= object
->shadow
;
322 vm_object_lock(shadow_object
);
324 while (!queue_empty(&object
->memq
)) {
326 vm_object_offset_t offset
;
328 p
= (vm_page_t
) queue_first(&object
->memq
);
333 assert(!p
->cleaning
);
339 m
= vm_page_lookup(shadow_object
,
340 offset
+ object
->shadow_offset
);
342 if(m
== VM_PAGE_NULL
)
345 /* used as a trigger on upl_commit etc to recognize the */
346 /* pageout daemon's subseqent desire to pageout a cleaning */
347 /* page. When the bit is on the upl commit code will */
348 /* respect the pageout bit in the target page over the */
349 /* caller's page list indication */
350 m
->dump_cleaning
= FALSE
;
353 * Account for the paging reference taken when
354 * m->cleaning was set on this page.
356 vm_object_paging_end(shadow_object
);
357 assert((m
->dirty
) || (m
->precious
) ||
358 (m
->busy
&& m
->cleaning
));
361 * Handle the trusted pager throttle.
363 vm_page_lock_queues();
365 vm_page_laundry_count
--;
367 if (vm_page_laundry_count
< vm_page_laundry_min
) {
368 vm_page_laundry_min
= 0;
369 thread_wakeup((event_t
) &vm_page_laundry_count
);
374 * Handle the "target" page(s). These pages are to be freed if
375 * successfully cleaned. Target pages are always busy, and are
376 * wired exactly once. The initial target pages are not mapped,
377 * (so cannot be referenced or modified) but converted target
378 * pages may have been modified between the selection as an
379 * adjacent page and conversion to a target.
383 assert(m
->wire_count
== 1);
386 #if MACH_CLUSTER_STATS
387 if (m
->wanted
) vm_pageout_target_collisions
++;
390 * Revoke all access to the page. Since the object is
391 * locked, and the page is busy, this prevents the page
392 * from being dirtied after the pmap_is_modified() call
395 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
398 * Since the page is left "dirty" but "not modifed", we
399 * can detect whether the page was redirtied during
400 * pageout by checking the modify state.
402 m
->dirty
= pmap_is_modified(m
->phys_addr
);
405 CLUSTER_STAT(vm_pageout_target_page_dirtied
++;)
406 vm_page_unwire(m
);/* reactivates */
407 VM_STAT(reactivations
++);
410 CLUSTER_STAT(vm_pageout_target_page_freed
++;)
411 vm_page_free(m
);/* clears busy, etc. */
413 vm_page_unlock_queues();
417 * Handle the "adjacent" pages. These pages were cleaned in
418 * place, and should be left alone.
419 * If prep_pin_count is nonzero, then someone is using the
420 * page, so make it active.
422 if (!m
->active
&& !m
->inactive
&& !m
->private) {
426 vm_page_deactivate(m
);
428 if((m
->busy
) && (m
->cleaning
)) {
430 /* the request_page_list case, (COPY_OUT_FROM FALSE) */
433 /* We do not re-set m->dirty ! */
434 /* The page was busy so no extraneous activity */
435 /* could have occured. COPY_INTO is a read into the */
436 /* new pages. CLEAN_IN_PLACE does actually write */
437 /* out the pages but handling outside of this code */
438 /* will take care of resetting dirty. We clear the */
439 /* modify however for the Programmed I/O case. */
440 pmap_clear_modify(m
->phys_addr
);
443 if(shadow_object
->absent_count
== 1)
444 vm_object_absent_release(shadow_object
);
446 shadow_object
->absent_count
--;
448 m
->overwriting
= FALSE
;
449 } else if (m
->overwriting
) {
450 /* alternate request page list, write to page_list */
451 /* case. Occurs when the original page was wired */
452 /* at the time of the list request */
453 assert(m
->wire_count
!= 0);
454 vm_page_unwire(m
);/* reactivates */
455 m
->overwriting
= FALSE
;
458 * Set the dirty state according to whether or not the page was
459 * modified during the pageout. Note that we purposefully do
460 * NOT call pmap_clear_modify since the page is still mapped.
461 * If the page were to be dirtied between the 2 calls, this
462 * this fact would be lost. This code is only necessary to
463 * maintain statistics, since the pmap module is always
464 * consulted if m->dirty is false.
466 #if MACH_CLUSTER_STATS
467 m
->dirty
= pmap_is_modified(m
->phys_addr
);
469 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
470 else vm_pageout_cluster_cleaned
++;
471 if (m
->wanted
) vm_pageout_cluster_collisions
++;
480 * Wakeup any thread waiting for the page to be un-cleaning.
483 vm_page_unlock_queues();
486 * Account for the paging reference taken in vm_paging_object_allocate.
488 vm_object_paging_end(shadow_object
);
489 vm_object_unlock(shadow_object
);
491 assert(object
->ref_count
== 0);
492 assert(object
->paging_in_progress
== 0);
493 assert(object
->resident_page_count
== 0);
498 * Routine: vm_pageout_setup
500 * Set up a page for pageout (clean & flush).
502 * Move the page to a new object, as part of which it will be
503 * sent to its memory manager in a memory_object_data_write or
504 * memory_object_initialize message.
506 * The "new_object" and "new_offset" arguments
507 * indicate where the page should be moved.
510 * The page in question must not be on any pageout queues,
511 * and must be busy. The object to which it belongs
512 * must be unlocked, and the caller must hold a paging
513 * reference to it. The new_object must not be locked.
515 * This routine returns a pointer to a place-holder page,
516 * inserted at the same offset, to block out-of-order
517 * requests for the page. The place-holder page must
518 * be freed after the data_write or initialize message
521 * The original page is put on a paging queue and marked
526 register vm_page_t m
,
527 register vm_object_t new_object
,
528 vm_object_offset_t new_offset
)
530 register vm_object_t old_object
= m
->object
;
531 vm_object_offset_t paging_offset
;
532 vm_object_offset_t offset
;
533 register vm_page_t holding_page
;
534 register vm_page_t new_m
;
535 register vm_page_t new_page
;
536 boolean_t need_to_wire
= FALSE
;
540 "vm_pageout_setup, obj 0x%X off 0x%X page 0x%X new obj 0x%X offset 0x%X\n",
541 (integer_t
)m
->object
, (integer_t
)m
->offset
,
542 (integer_t
)m
, (integer_t
)new_object
,
543 (integer_t
)new_offset
);
544 assert(m
&& m
->busy
&& !m
->absent
&& !m
->fictitious
&& !m
->error
&&
547 assert(m
->dirty
|| m
->precious
);
550 * Create a place-holder page where the old one was, to prevent
551 * attempted pageins of this page while we're unlocked.
553 VM_PAGE_GRAB_FICTITIOUS(holding_page
);
555 vm_object_lock(old_object
);
558 paging_offset
= offset
+ old_object
->paging_offset
;
560 if (old_object
->pager_trusted
) {
562 * This pager is trusted, so we can clean this page
563 * in place. Leave it in the old object, and mark it
564 * cleaning & pageout.
566 new_m
= holding_page
;
567 holding_page
= VM_PAGE_NULL
;
570 * Set up new page to be private shadow of real page.
572 new_m
->phys_addr
= m
->phys_addr
;
573 new_m
->fictitious
= FALSE
;
574 new_m
->pageout
= TRUE
;
577 * Mark real page as cleaning (indicating that we hold a
578 * paging reference to be released via m_o_d_r_c) and
579 * pageout (indicating that the page should be freed
580 * when the pageout completes).
582 pmap_clear_modify(m
->phys_addr
);
583 vm_page_lock_queues();
584 new_m
->private = TRUE
;
590 assert(m
->wire_count
== 1);
591 vm_page_unlock_queues();
595 m
->page_lock
= VM_PROT_NONE
;
597 m
->unlock_request
= VM_PROT_NONE
;
600 * Cannot clean in place, so rip the old page out of the
601 * object, and stick the holding page in. Set new_m to the
602 * page in the new object.
604 vm_page_lock_queues();
605 VM_PAGE_QUEUES_REMOVE(m
);
608 vm_page_insert(holding_page
, old_object
, offset
);
609 vm_page_unlock_queues();
614 new_m
->page_lock
= VM_PROT_NONE
;
615 new_m
->unlock_request
= VM_PROT_NONE
;
617 if (old_object
->internal
)
621 * Record that this page has been written out
624 vm_external_state_set(old_object
->existence_map
, offset
);
625 #endif /* MACH_PAGEMAP */
627 vm_object_unlock(old_object
);
629 vm_object_lock(new_object
);
632 * Put the page into the new object. If it is a not wired
633 * (if it's the real page) it will be activated.
636 vm_page_lock_queues();
637 vm_page_insert(new_m
, new_object
, new_offset
);
641 vm_page_activate(new_m
);
642 PAGE_WAKEUP_DONE(new_m
);
643 vm_page_unlock_queues();
645 vm_object_unlock(new_object
);
648 * Return the placeholder page to simplify cleanup.
650 return (holding_page
);
654 * Routine: vm_pageclean_setup
656 * Purpose: setup a page to be cleaned (made non-dirty), but not
657 * necessarily flushed from the VM page cache.
658 * This is accomplished by cleaning in place.
660 * The page must not be busy, and the object and page
661 * queues must be locked.
668 vm_object_t new_object
,
669 vm_object_offset_t new_offset
)
671 vm_object_t old_object
= m
->object
;
673 assert(!m
->cleaning
);
676 "vm_pageclean_setup, obj 0x%X off 0x%X page 0x%X new 0x%X new_off 0x%X\n",
677 (integer_t
)old_object
, m
->offset
, (integer_t
)m
,
678 (integer_t
)new_m
, new_offset
);
680 pmap_clear_modify(m
->phys_addr
);
681 vm_object_paging_begin(old_object
);
684 * Record that this page has been written out
687 vm_external_state_set(old_object
->existence_map
, m
->offset
);
688 #endif /*MACH_PAGEMAP*/
691 * Mark original page as cleaning in place.
698 * Convert the fictitious page to a private shadow of
701 assert(new_m
->fictitious
);
702 new_m
->fictitious
= FALSE
;
703 new_m
->private = TRUE
;
704 new_m
->pageout
= TRUE
;
705 new_m
->phys_addr
= m
->phys_addr
;
708 vm_page_insert(new_m
, new_object
, new_offset
);
709 assert(!new_m
->wanted
);
717 vm_object_t new_object
,
718 vm_object_offset_t new_offset
)
721 "vm_pageclean_copy, page 0x%X new_m 0x%X new_obj 0x%X offset 0x%X\n",
722 m
, new_m
, new_object
, new_offset
, 0);
724 assert((!m
->busy
) && (!m
->cleaning
));
726 assert(!new_m
->private && !new_m
->fictitious
);
728 pmap_clear_modify(m
->phys_addr
);
731 vm_object_paging_begin(m
->object
);
732 vm_page_unlock_queues();
733 vm_object_unlock(m
->object
);
736 * Copy the original page to the new page.
738 vm_page_copy(m
, new_m
);
741 * Mark the old page as clean. A request to pmap_is_modified
742 * will get the right answer.
744 vm_object_lock(m
->object
);
747 vm_object_paging_end(m
->object
);
749 vm_page_lock_queues();
750 if (!m
->active
&& !m
->inactive
)
754 vm_page_insert(new_m
, new_object
, new_offset
);
755 vm_page_activate(new_m
);
756 new_m
->busy
= FALSE
; /* No other thread can be waiting */
761 * Routine: vm_pageout_initialize_page
763 * Causes the specified page to be initialized in
764 * the appropriate memory object. This routine is used to push
765 * pages into a copy-object when they are modified in the
768 * The page is moved to a temporary object and paged out.
771 * The page in question must not be on any pageout queues.
772 * The object to which it belongs must be locked.
773 * The page must be busy, but not hold a paging reference.
776 * Move this page to a completely new object.
779 vm_pageout_initialize_page(
783 vm_object_t new_object
;
785 vm_object_offset_t paging_offset
;
786 vm_page_t holding_page
;
790 "vm_pageout_initialize_page, page 0x%X\n",
791 (integer_t
)m
, 0, 0, 0, 0);
795 * Verify that we really want to clean this page
802 * Create a paging reference to let us play with the object.
805 paging_offset
= m
->offset
+ object
->paging_offset
;
806 vm_object_paging_begin(object
);
807 vm_object_unlock(object
);
808 if (m
->absent
|| m
->error
|| m
->restart
||
809 (!m
->dirty
&& !m
->precious
)) {
811 panic("reservation without pageout?"); /* alan */
815 /* set the page for future call to vm_fault_list_request */
817 vm_object_lock(m
->object
);
818 vm_page_lock_queues();
819 pmap_clear_modify(m
->phys_addr
);
822 m
->list_req_pending
= TRUE
;
826 vm_page_unlock_queues();
827 vm_object_unlock(m
->object
);
828 vm_pageout_throttle(m
);
831 * Write the data to its pager.
832 * Note that the data is passed by naming the new object,
833 * not a virtual address; the pager interface has been
834 * manipulated to use the "internal memory" data type.
835 * [The object reference from its allocation is donated
836 * to the eventual recipient.]
838 memory_object_data_initialize(object
->pager
,
842 vm_object_lock(object
);
845 #if MACH_CLUSTER_STATS
846 #define MAXCLUSTERPAGES 16
848 unsigned long pages_in_cluster
;
849 unsigned long pages_at_higher_offsets
;
850 unsigned long pages_at_lower_offsets
;
851 } cluster_stats
[MAXCLUSTERPAGES
];
852 #endif /* MACH_CLUSTER_STATS */
854 boolean_t allow_clustered_pageouts
= FALSE
;
857 * vm_pageout_cluster:
859 * Given a page, page it out, and attempt to clean adjacent pages
860 * in the same operation.
862 * The page must be busy, and the object unlocked w/ paging reference
863 * to prevent deallocation or collapse. The page must not be on any
870 vm_object_t object
= m
->object
;
871 vm_object_offset_t offset
= m
->offset
; /* from vm_object start */
872 vm_object_offset_t paging_offset
= m
->offset
+ object
->paging_offset
;
873 vm_object_t new_object
;
874 vm_object_offset_t new_offset
;
875 vm_size_t cluster_size
;
876 vm_object_offset_t cluster_offset
; /* from memory_object start */
877 vm_object_offset_t cluster_lower_bound
; /* from vm_object_start */
878 vm_object_offset_t cluster_upper_bound
; /* from vm_object_start */
879 vm_object_offset_t cluster_start
, cluster_end
;/* from vm_object start */
880 vm_object_offset_t offset_within_cluster
;
881 vm_size_t length_of_data
;
882 vm_page_t
friend, holding_page
;
884 boolean_t precious_clean
= TRUE
;
885 int pages_in_cluster
;
887 CLUSTER_STAT(int pages_at_higher_offsets
= 0;)
888 CLUSTER_STAT(int pages_at_lower_offsets
= 0;)
891 "vm_pageout_cluster, object 0x%X offset 0x%X page 0x%X\n",
892 (integer_t
)object
, offset
, (integer_t
)m
, 0, 0);
894 CLUSTER_STAT(vm_pageout_cluster_clusters
++;)
896 * Only a certain kind of page is appreciated here.
898 assert(m
->busy
&& (m
->dirty
|| m
->precious
) && (m
->wire_count
== 0));
899 assert(!m
->cleaning
&& !m
->pageout
&& !m
->inactive
&& !m
->active
);
901 vm_object_lock(object
);
902 cluster_size
= object
->cluster_size
;
904 assert(cluster_size
>= PAGE_SIZE
);
905 if (cluster_size
< PAGE_SIZE
) cluster_size
= PAGE_SIZE
;
906 assert(object
->pager_created
&& object
->pager_initialized
);
907 assert(object
->internal
|| object
->pager_ready
);
909 if (m
->precious
&& !m
->dirty
)
910 precious_clean
= TRUE
;
912 if (!object
->pager_trusted
|| !allow_clustered_pageouts
)
913 cluster_size
= PAGE_SIZE
;
914 vm_object_unlock(object
);
916 cluster_offset
= paging_offset
& (vm_object_offset_t
)(cluster_size
- 1);
917 /* bytes from beginning of cluster */
919 * Due to unaligned mappings, we have to be careful
920 * of negative offsets into the VM object. Clip the cluster
921 * boundary to the VM object, not the memory object.
923 if (offset
> cluster_offset
) {
924 cluster_lower_bound
= offset
- cluster_offset
;
927 cluster_lower_bound
= 0;
929 cluster_upper_bound
= (offset
- cluster_offset
) +
930 (vm_object_offset_t
)cluster_size
;
932 /* set the page for future call to vm_fault_list_request */
934 vm_object_lock(m
->object
);
935 vm_page_lock_queues();
937 m
->list_req_pending
= TRUE
;
941 vm_page_unlock_queues();
942 vm_object_unlock(m
->object
);
943 vm_pageout_throttle(m
);
946 * Search backward for adjacent eligible pages to clean in
950 cluster_start
= offset
;
951 if (offset
) { /* avoid wrap-around at zero */
952 for (cluster_start
= offset
- PAGE_SIZE_64
;
953 cluster_start
>= cluster_lower_bound
;
954 cluster_start
-= PAGE_SIZE_64
) {
955 assert(cluster_size
> PAGE_SIZE
);
957 vm_object_lock(object
);
958 vm_page_lock_queues();
960 if ((friend = vm_pageout_cluster_page(object
, cluster_start
,
961 precious_clean
)) == VM_PAGE_NULL
) {
962 vm_page_unlock_queues();
963 vm_object_unlock(object
);
966 new_offset
= (cluster_start
+ object
->paging_offset
)
967 & (cluster_size
- 1);
969 assert(new_offset
< cluster_offset
);
970 m
->list_req_pending
= TRUE
;
972 /* do nothing except advance the write request, all we really need to */
973 /* do is push the target page and let the code at the other end decide */
974 /* what is really the right size */
975 if (vm_page_free_count
<= vm_page_free_reserved
) {
981 vm_page_unlock_queues();
982 vm_object_unlock(object
);
983 if(m
->dirty
|| m
->object
->internal
) {
984 CLUSTER_STAT(pages_at_lower_offsets
++;)
988 cluster_start
+= PAGE_SIZE_64
;
990 assert(cluster_start
>= cluster_lower_bound
);
991 assert(cluster_start
<= offset
);
993 * Search forward for adjacent eligible pages to clean in
996 for (cluster_end
= offset
+ PAGE_SIZE_64
;
997 cluster_end
< cluster_upper_bound
;
998 cluster_end
+= PAGE_SIZE_64
) {
999 assert(cluster_size
> PAGE_SIZE
);
1001 vm_object_lock(object
);
1002 vm_page_lock_queues();
1004 if ((friend = vm_pageout_cluster_page(object
, cluster_end
,
1005 precious_clean
)) == VM_PAGE_NULL
) {
1006 vm_page_unlock_queues();
1007 vm_object_unlock(object
);
1010 new_offset
= (cluster_end
+ object
->paging_offset
)
1011 & (cluster_size
- 1);
1013 assert(new_offset
< cluster_size
);
1014 m
->list_req_pending
= TRUE
;
1016 /* do nothing except advance the write request, all we really need to */
1017 /* do is push the target page and let the code at the other end decide */
1018 /* what is really the right size */
1019 if (vm_page_free_count
<= vm_page_free_reserved
) {
1025 vm_page_unlock_queues();
1026 vm_object_unlock(object
);
1028 if(m
->dirty
|| m
->object
->internal
) {
1029 CLUSTER_STAT(pages_at_higher_offsets
++;)
1032 assert(cluster_end
<= cluster_upper_bound
);
1033 assert(cluster_end
>= offset
+ PAGE_SIZE
);
1036 * (offset - cluster_offset) is beginning of cluster_object
1037 * relative to vm_object start.
1039 offset_within_cluster
= cluster_start
- (offset
- cluster_offset
);
1040 length_of_data
= cluster_end
- cluster_start
;
1042 assert(offset_within_cluster
< cluster_size
);
1043 assert((offset_within_cluster
+ length_of_data
) <= cluster_size
);
1046 assert(rc
== KERN_SUCCESS
);
1048 pages_in_cluster
= length_of_data
/PAGE_SIZE
;
1050 #if MACH_CLUSTER_STATS
1051 (cluster_stats
[pages_at_lower_offsets
].pages_at_lower_offsets
)++;
1052 (cluster_stats
[pages_at_higher_offsets
].pages_at_higher_offsets
)++;
1053 (cluster_stats
[pages_in_cluster
].pages_in_cluster
)++;
1054 #endif /* MACH_CLUSTER_STATS */
1057 * Send the data to the pager.
1059 paging_offset
= cluster_start
+ object
->paging_offset
;
1061 rc
= memory_object_data_return(object
->pager
,
1067 vm_object_lock(object
);
1068 vm_object_paging_end(object
);
1071 assert(!object
->pager_trusted
);
1072 VM_PAGE_FREE(holding_page
);
1073 vm_object_paging_end(object
);
1076 vm_object_unlock(object
);
1080 * Trusted pager throttle.
1081 * Object must be unlocked, page queues must be unlocked.
1084 vm_pageout_throttle(
1085 register vm_page_t m
)
1087 vm_page_lock_queues();
1088 assert(!m
->laundry
);
1090 while (vm_page_laundry_count
>= vm_page_laundry_max
) {
1092 * Set the threshold for when vm_page_free()
1093 * should wake us up.
1095 vm_page_laundry_min
= vm_page_laundry_max
/2;
1097 assert_wait((event_t
) &vm_page_laundry_count
, THREAD_UNINT
);
1098 vm_page_unlock_queues();
1101 * Pause to let the default pager catch up.
1103 thread_block((void (*)(void)) 0);
1104 vm_page_lock_queues();
1106 vm_page_laundry_count
++;
1107 vm_page_unlock_queues();
1111 * The global variable vm_pageout_clean_active_pages controls whether
1112 * active pages are considered valid to be cleaned in place during a
1113 * clustered pageout. Performance measurements are necessary to determine
1116 int vm_pageout_clean_active_pages
= 1;
1118 * vm_pageout_cluster_page: [Internal]
1120 * return a vm_page_t to the page at (object,offset) if it is appropriate
1121 * to clean in place. Pages that are non-existent, busy, absent, already
1122 * cleaning, or not dirty are not eligible to be cleaned as an adjacent
1123 * page in a cluster.
1125 * The object must be locked on entry, and remains locked throughout
1130 vm_pageout_cluster_page(
1132 vm_object_offset_t offset
,
1133 boolean_t precious_clean
)
1138 "vm_pageout_cluster_page, object 0x%X offset 0x%X\n",
1139 (integer_t
)object
, offset
, 0, 0, 0);
1141 if ((m
= vm_page_lookup(object
, offset
)) == VM_PAGE_NULL
)
1142 return(VM_PAGE_NULL
);
1144 if (m
->busy
|| m
->absent
|| m
->cleaning
||
1145 (m
->wire_count
!= 0) || m
->error
)
1146 return(VM_PAGE_NULL
);
1148 if (vm_pageout_clean_active_pages
) {
1149 if (!m
->active
&& !m
->inactive
) return(VM_PAGE_NULL
);
1151 if (!m
->inactive
) return(VM_PAGE_NULL
);
1154 assert(!m
->private);
1155 assert(!m
->fictitious
);
1157 if (!m
->dirty
) m
->dirty
= pmap_is_modified(m
->phys_addr
);
1159 if (precious_clean
) {
1160 if (!m
->precious
|| !m
->dirty
)
1161 return(VM_PAGE_NULL
);
1164 return(VM_PAGE_NULL
);
1170 * vm_pageout_scan does the dirty work for the pageout daemon.
1171 * It returns with vm_page_queue_free_lock held and
1172 * vm_page_free_wanted == 0.
1174 extern void vm_pageout_scan_continue(void); /* forward; */
1177 vm_pageout_scan(void)
1179 unsigned int burst_count
;
1180 boolean_t now
= FALSE
;
1181 unsigned int laundry_pages
;
1182 boolean_t need_more_inactive_pages
;
1183 unsigned int loop_detect
;
1185 XPR(XPR_VM_PAGEOUT
, "vm_pageout_scan\n", 0, 0, 0, 0, 0);
1188 * We want to gradually dribble pages from the active queue
1189 * to the inactive queue. If we let the inactive queue get
1190 * very small, and then suddenly dump many pages into it,
1191 * those pages won't get a sufficient chance to be referenced
1192 * before we start taking them from the inactive queue.
1194 * We must limit the rate at which we send pages to the pagers.
1195 * data_write messages consume memory, for message buffers and
1196 * for map-copy objects. If we get too far ahead of the pagers,
1197 * we can potentially run out of memory.
1199 * We can use the laundry count to limit directly the number
1200 * of pages outstanding to the default pager. A similar
1201 * strategy for external pagers doesn't work, because
1202 * external pagers don't have to deallocate the pages sent them,
1203 * and because we might have to send pages to external pagers
1204 * even if they aren't processing writes. So we also
1205 * use a burst count to limit writes to external pagers.
1207 * When memory is very tight, we can't rely on external pagers to
1208 * clean pages. They probably aren't running, because they
1209 * aren't vm-privileged. If we kept sending dirty pages to them,
1210 * we could exhaust the free list. However, we can't just ignore
1211 * pages belonging to external objects, because there might be no
1212 * pages belonging to internal objects. Hence, we get the page
1213 * into an internal object and then immediately double-page it,
1214 * sending it to the default pager.
1216 * consider_zone_gc should be last, because the other operations
1217 * might return memory to zones.
1224 mutex_lock(&vm_page_queue_free_lock
);
1225 now
= (vm_page_free_count
< vm_page_free_min
);
1226 mutex_unlock(&vm_page_queue_free_lock
);
1228 swapout_threads(now
);
1229 #endif /* THREAD_SWAPPER */
1232 consider_task_collect();
1233 consider_thread_collect();
1235 consider_machine_collect();
1237 loop_detect
= vm_page_active_count
+ vm_page_inactive_count
;
1239 if (vm_page_free_count
<= vm_page_free_reserved
) {
1240 need_more_inactive_pages
= TRUE
;
1242 need_more_inactive_pages
= FALSE
;
1245 need_more_inactive_pages
= FALSE
;
1248 for (burst_count
= 0;;) {
1249 register vm_page_t m
;
1250 register vm_object_t object
;
1253 * Recalculate vm_page_inactivate_target.
1256 vm_page_lock_queues();
1257 vm_page_inactive_target
=
1258 VM_PAGE_INACTIVE_TARGET(vm_page_active_count
+
1259 vm_page_inactive_count
);
1262 * Move pages from active to inactive.
1265 while ((vm_page_inactive_count
< vm_page_inactive_target
||
1266 need_more_inactive_pages
) &&
1267 !queue_empty(&vm_page_queue_active
)) {
1268 register vm_object_t object
;
1270 vm_pageout_active
++;
1271 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1274 * If we're getting really low on memory,
1275 * try selecting a page that will go
1276 * directly to the default_pager.
1277 * If there are no such pages, we have to
1278 * page out a page backed by an EMM,
1279 * so that the default_pager can recover
1282 if (need_more_inactive_pages
&&
1283 (IP_VALID(memory_manager_default
))) {
1284 vm_pageout_scan_active_emm_throttle
++;
1286 assert(m
->active
&& !m
->inactive
);
1289 if (vm_object_lock_try(object
)) {
1291 if (object
->pager_trusted
||
1294 vm_pageout_scan_active_emm_throttle_success
++;
1295 goto object_locked_active
;
1298 vm_pageout_scan_active_emm_throttle_success
++;
1299 goto object_locked_active
;
1301 vm_object_unlock(object
);
1303 m
= (vm_page_t
) queue_next(&m
->pageq
);
1304 } while (!queue_end(&vm_page_queue_active
,
1305 (queue_entry_t
) m
));
1306 if (queue_end(&vm_page_queue_active
,
1307 (queue_entry_t
) m
)) {
1308 vm_pageout_scan_active_emm_throttle_failure
++;
1310 queue_first(&vm_page_queue_active
);
1314 assert(m
->active
&& !m
->inactive
);
1317 if (!vm_object_lock_try(object
)) {
1319 * Move page to end and continue.
1322 queue_remove(&vm_page_queue_active
, m
,
1324 queue_enter(&vm_page_queue_active
, m
,
1326 vm_page_unlock_queues();
1329 vm_page_lock_queues();
1333 object_locked_active
:
1335 * If the page is busy, then we pull it
1336 * off the active queue and leave it alone.
1340 vm_object_unlock(object
);
1341 queue_remove(&vm_page_queue_active
, m
,
1345 vm_page_active_count
--;
1350 * Deactivate the page while holding the object
1351 * locked, so we know the page is still not busy.
1352 * This should prevent races between pmap_enter
1353 * and pmap_clear_reference. The page might be
1354 * absent or fictitious, but vm_page_deactivate
1358 vm_page_deactivate(m
);
1359 vm_object_unlock(object
);
1363 * We are done if we have met our target *and*
1364 * nobody is still waiting for a page.
1366 if (vm_page_free_count
>= vm_page_free_target
) {
1367 mutex_lock(&vm_page_queue_free_lock
);
1368 if ((vm_page_free_count
>= vm_page_free_target
) &&
1369 (vm_page_free_wanted
== 0)) {
1370 vm_page_unlock_queues();
1373 mutex_unlock(&vm_page_queue_free_lock
);
1376 * Sometimes we have to pause:
1377 * 1) No inactive pages - nothing to do.
1378 * 2) Flow control - wait for untrusted pagers to catch up.
1381 if ((queue_empty(&vm_page_queue_inactive
) &&
1382 (queue_empty(&vm_page_queue_zf
))) ||
1383 ((--loop_detect
) == 0) ||
1384 (burst_count
>= vm_pageout_burst_max
)) {
1385 unsigned int pages
, msecs
;
1388 consider_machine_adjust();
1390 * vm_pageout_burst_wait is msecs/page.
1391 * If there is nothing for us to do, we wait
1392 * at least vm_pageout_empty_wait msecs.
1394 pages
= burst_count
;
1396 if (loop_detect
== 0) {
1397 printf("Warning: No physical memory suitable for pageout or reclaim, pageout thread temporarily going to sleep\n");
1398 msecs
= vm_free_page_pause
;
1401 msecs
= burst_count
* vm_pageout_burst_wait
;
1404 if (queue_empty(&vm_page_queue_inactive
) &&
1405 queue_empty(&vm_page_queue_zf
) &&
1406 (msecs
< vm_pageout_empty_wait
))
1407 msecs
= vm_pageout_empty_wait
;
1408 vm_page_unlock_queues();
1410 assert_wait_timeout(msecs
, THREAD_INTERRUPTIBLE
);
1411 counter(c_vm_pageout_scan_block
++);
1414 * Unfortunately, we don't have call_continuation
1415 * so we can't rely on tail-recursion.
1417 wait_result
= thread_block((void (*)(void)) 0);
1418 if (wait_result
!= THREAD_TIMED_OUT
)
1419 thread_cancel_timer();
1420 vm_pageout_scan_continue();
1426 vm_pageout_inactive
++;
1428 if (vm_zf_count
< vm_accellerate_zf_pageout_trigger
) {
1432 if((vm_zf_iterator
+=1) >= vm_zf_iterator_count
) {
1436 if(queue_empty(&vm_page_queue_zf
) ||
1437 (((last_page_zf
) || (vm_zf_iterator
== 0)) &&
1438 !queue_empty(&vm_page_queue_inactive
))) {
1439 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1442 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
1446 if ((vm_page_free_count
<= vm_page_free_reserved
) &&
1447 (IP_VALID(memory_manager_default
))) {
1449 * We're really low on memory. Try to select a page that
1450 * would go directly to the default_pager.
1451 * If there are no such pages, we have to page out a
1452 * page backed by an EMM, so that the default_pager
1453 * can recover it eventually.
1455 vm_pageout_scan_inactive_emm_throttle
++;
1457 assert(!m
->active
&& m
->inactive
);
1460 if (vm_object_lock_try(object
)) {
1462 if (object
->pager_trusted
||
1465 vm_pageout_scan_inactive_emm_throttle_success
++;
1466 goto object_locked_inactive
;
1469 vm_pageout_scan_inactive_emm_throttle_success
++;
1470 goto object_locked_inactive
;
1472 vm_object_unlock(object
);
1474 m
= (vm_page_t
) queue_next(&m
->pageq
);
1475 } while ((!queue_end(&vm_page_queue_zf
,
1477 && (!queue_end(&vm_page_queue_inactive
,
1478 (queue_entry_t
) m
)));
1480 if ((queue_end(&vm_page_queue_zf
,
1482 || (queue_end(&vm_page_queue_inactive
,
1483 (queue_entry_t
) m
))) {
1484 vm_pageout_scan_inactive_emm_throttle_failure
++;
1486 * We should check the "active" queue
1487 * for good candidates to page out.
1489 need_more_inactive_pages
= TRUE
;
1491 if(last_page_zf
== 0) {
1493 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1496 vm_zf_iterator
= vm_zf_iterator_count
- 2;
1498 vm_page_unlock_queues();
1503 assert(!m
->active
&& m
->inactive
);
1507 * Try to lock object; since we've got the
1508 * page queues lock, we can only try for this one.
1511 if (!vm_object_lock_try(object
)) {
1513 * Move page to end and continue.
1514 * Don't re-issue ticket
1517 queue_remove(&vm_page_queue_zf
, m
,
1519 queue_enter(&vm_page_queue_zf
, m
,
1522 queue_remove(&vm_page_queue_inactive
, m
,
1524 queue_enter(&vm_page_queue_inactive
, m
,
1527 vm_page_unlock_queues();
1530 vm_pageout_inactive_nolock
++;
1534 object_locked_inactive
:
1536 * Paging out pages of objects which pager is being
1537 * created by another thread must be avoided, because
1538 * this thread may claim for memory, thus leading to a
1539 * possible dead lock between it and the pageout thread
1540 * which will wait for pager creation, if such pages are
1541 * finally chosen. The remaining assumption is that there
1542 * will finally be enough available pages in the inactive
1543 * pool to page out in order to satisfy all memory claimed
1544 * by the thread which concurrently creates the pager.
1547 if (!object
->pager_initialized
&& object
->pager_created
) {
1549 * Move page to end and continue, hoping that
1550 * there will be enough other inactive pages to
1551 * page out so that the thread which currently
1552 * initializes the pager will succeed.
1553 * Don't re-grant the ticket, the page should
1554 * pulled from the queue and paged out whenever
1555 * one of its logically adjacent fellows is
1559 queue_remove(&vm_page_queue_zf
, m
,
1561 queue_enter(&vm_page_queue_zf
, m
,
1564 vm_zf_iterator
= vm_zf_iterator_count
- 1;
1566 queue_remove(&vm_page_queue_inactive
, m
,
1568 queue_enter(&vm_page_queue_inactive
, m
,
1573 vm_page_unlock_queues();
1574 vm_object_unlock(object
);
1575 vm_pageout_inactive_avoid
++;
1580 * Remove the page from the inactive list.
1584 queue_remove(&vm_page_queue_zf
, m
, vm_page_t
, pageq
);
1586 queue_remove(&vm_page_queue_inactive
, m
, vm_page_t
, pageq
);
1588 m
->inactive
= FALSE
;
1590 vm_page_inactive_count
--;
1592 if (m
->busy
|| !object
->alive
) {
1594 * Somebody is already playing with this page.
1595 * Leave it off the pageout queues.
1598 vm_page_unlock_queues();
1599 vm_object_unlock(object
);
1600 vm_pageout_inactive_busy
++;
1605 * If it's absent or in error, we can reclaim the page.
1608 if (m
->absent
|| m
->error
) {
1609 vm_pageout_inactive_absent
++;
1612 vm_page_unlock_queues();
1613 vm_object_unlock(object
);
1617 assert(!m
->private);
1618 assert(!m
->fictitious
);
1621 * If already cleaning this page in place, convert from
1622 * "adjacent" to "target". We can leave the page mapped,
1623 * and vm_pageout_object_terminate will determine whether
1624 * to free or reactivate.
1628 #if MACH_CLUSTER_STATS
1629 vm_pageout_cluster_conversions
++;
1633 m
->dump_cleaning
= TRUE
;
1635 vm_object_unlock(object
);
1636 vm_page_unlock_queues();
1641 * If it's being used, reactivate.
1642 * (Fictitious pages are either busy or absent.)
1645 if (m
->reference
|| pmap_is_referenced(m
->phys_addr
)) {
1646 vm_pageout_inactive_used
++;
1648 #if ADVISORY_PAGEOUT
1649 if (m
->discard_request
) {
1650 m
->discard_request
= FALSE
;
1652 #endif /* ADVISORY_PAGEOUT */
1654 vm_object_unlock(object
);
1655 vm_page_activate(m
);
1656 VM_STAT(reactivations
++);
1657 vm_page_unlock_queues();
1661 #if ADVISORY_PAGEOUT
1662 if (object
->advisory_pageout
) {
1663 boolean_t do_throttle
;
1664 memory_object_t pager
;
1665 vm_object_offset_t discard_offset
;
1667 if (m
->discard_request
) {
1668 vm_stat_discard_failure
++;
1669 goto mandatory_pageout
;
1672 assert(object
->pager_initialized
);
1673 m
->discard_request
= TRUE
;
1674 pager
= object
->pager
;
1676 /* system-wide throttle */
1677 do_throttle
= (vm_page_free_count
<=
1678 vm_page_free_reserved
);
1682 * JMM - Do we need a replacement throttle
1683 * mechanism for pagers?
1686 /* throttle on this pager */
1687 /* XXX lock ordering ? */
1689 do_throttle
= imq_full(&port
->ip_messages
);
1695 vm_stat_discard_throttle
++;
1697 /* ignore this page and skip to next */
1698 vm_page_unlock_queues();
1699 vm_object_unlock(object
);
1702 /* force mandatory pageout */
1703 goto mandatory_pageout
;
1707 /* proceed with discard_request */
1708 vm_page_activate(m
);
1710 VM_STAT(reactivations
++);
1711 discard_offset
= m
->offset
+ object
->paging_offset
;
1712 vm_stat_discard_sent
++;
1713 vm_page_unlock_queues();
1714 vm_object_unlock(object
);
1717 memory_object_discard_request(object->pager,
1724 #endif /* ADVISORY_PAGEOUT */
1727 "vm_pageout_scan, replace object 0x%X offset 0x%X page 0x%X\n",
1728 (integer_t
)object
, (integer_t
)m
->offset
, (integer_t
)m
, 0,0);
1731 * Eliminate all mappings.
1735 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1738 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1740 * If it's clean and not precious, we can free the page.
1743 if (!m
->dirty
&& !m
->precious
) {
1744 vm_pageout_inactive_clean
++;
1747 vm_page_unlock_queues();
1750 * If there is no memory object for the page, create
1751 * one and hand it to the default pager.
1754 if (!object
->pager_initialized
)
1755 vm_object_collapse(object
);
1756 if (!object
->pager_initialized
)
1757 vm_object_pager_create(object
);
1758 if (!object
->pager_initialized
) {
1760 * Still no pager for the object.
1761 * Reactivate the page.
1763 * Should only happen if there is no
1766 vm_page_lock_queues();
1767 vm_page_activate(m
);
1768 vm_page_unlock_queues();
1771 * And we are done with it.
1773 PAGE_WAKEUP_DONE(m
);
1774 vm_object_unlock(object
);
1777 * break here to get back to the preemption
1778 * point in the outer loop so that we don't
1779 * spin forever if there is no default pager.
1781 vm_pageout_dirty_no_pager
++;
1783 * Well there's no pager, but we can still reclaim
1784 * free pages out of the inactive list. Go back
1785 * to top of loop and look for suitable pages.
1790 if ((object
->pager_initialized
) &&
1791 (object
->pager
== MEMORY_OBJECT_NULL
)) {
1793 * This pager has been destroyed by either
1794 * memory_object_destroy or vm_object_destroy, and
1795 * so there is nowhere for the page to go.
1796 * Just free the page.
1799 vm_object_unlock(object
);
1803 vm_pageout_inactive_dirty
++;
1805 if (!object->internal)
1808 vm_object_paging_begin(object
);
1809 vm_object_unlock(object
);
1810 vm_pageout_cluster(m
); /* flush it */
1812 consider_machine_adjust();
1815 counter(unsigned int c_vm_pageout_scan_continue
= 0;)
1818 vm_pageout_scan_continue(void)
1821 * We just paused to let the pagers catch up.
1822 * If vm_page_laundry_count is still high,
1823 * then we aren't waiting long enough.
1824 * If we have paused some vm_pageout_pause_max times without
1825 * adjusting vm_pageout_burst_wait, it might be too big,
1826 * so we decrease it.
1829 vm_page_lock_queues();
1830 counter(++c_vm_pageout_scan_continue
);
1831 if (vm_page_laundry_count
> vm_pageout_burst_min
) {
1832 vm_pageout_burst_wait
++;
1833 vm_pageout_pause_count
= 0;
1834 } else if (++vm_pageout_pause_count
> vm_pageout_pause_max
) {
1835 vm_pageout_burst_wait
= (vm_pageout_burst_wait
* 3) / 4;
1836 if (vm_pageout_burst_wait
< 1)
1837 vm_pageout_burst_wait
= 1;
1838 vm_pageout_pause_count
= 0;
1840 vm_page_unlock_queues();
1843 void vm_page_free_reserve(int pages
);
1844 int vm_page_free_count_init
;
1847 vm_page_free_reserve(
1850 int free_after_reserve
;
1852 vm_page_free_reserved
+= pages
;
1854 free_after_reserve
= vm_page_free_count_init
- vm_page_free_reserved
;
1856 vm_page_free_min
= vm_page_free_reserved
+
1857 VM_PAGE_FREE_MIN(free_after_reserve
);
1859 vm_page_free_target
= vm_page_free_reserved
+
1860 VM_PAGE_FREE_TARGET(free_after_reserve
);
1862 if (vm_page_free_target
< vm_page_free_min
+ 5)
1863 vm_page_free_target
= vm_page_free_min
+ 5;
1867 * vm_pageout is the high level pageout daemon.
1874 thread_t self
= current_thread();
1878 * Set thread privileges.
1880 self
->vm_privilege
= TRUE
;
1881 stack_privilege(self
);
1885 self
->priority
= BASEPRI_PREEMPT
- 1;
1886 set_sched_pri(self
, self
->priority
);
1887 thread_unlock(self
);
1891 * Initialize some paging parameters.
1894 if (vm_page_laundry_max
== 0)
1895 vm_page_laundry_max
= VM_PAGE_LAUNDRY_MAX
;
1897 if (vm_pageout_burst_max
== 0)
1898 vm_pageout_burst_max
= VM_PAGEOUT_BURST_MAX
;
1900 if (vm_pageout_burst_wait
== 0)
1901 vm_pageout_burst_wait
= VM_PAGEOUT_BURST_WAIT
;
1903 if (vm_pageout_empty_wait
== 0)
1904 vm_pageout_empty_wait
= VM_PAGEOUT_EMPTY_WAIT
;
1906 vm_page_free_count_init
= vm_page_free_count
;
1909 * even if we've already called vm_page_free_reserve
1910 * call it again here to insure that the targets are
1911 * accurately calculated (it uses vm_page_free_count_init)
1912 * calling it with an arg of 0 will not change the reserve
1913 * but will re-calculate free_min and free_target
1915 if (vm_page_free_reserved
< VM_PAGE_FREE_RESERVED
)
1916 vm_page_free_reserve(VM_PAGE_FREE_RESERVED
- vm_page_free_reserved
);
1918 vm_page_free_reserve(0);
1921 * vm_pageout_scan will set vm_page_inactive_target.
1923 * The pageout daemon is never done, so loop forever.
1924 * We should call vm_pageout_scan at least once each
1925 * time we are woken, even if vm_page_free_wanted is
1926 * zero, to check vm_page_free_target and
1927 * vm_page_inactive_target.
1930 vm_pageout_scan_event_counter
++;
1932 /* we hold vm_page_queue_free_lock now */
1933 assert(vm_page_free_wanted
== 0);
1934 assert_wait((event_t
) &vm_page_free_wanted
, THREAD_UNINT
);
1935 mutex_unlock(&vm_page_queue_free_lock
);
1936 counter(c_vm_pageout_block
++);
1937 thread_block((void (*)(void)) 0);
1943 vm_pageout_emergency_availability_request()
1948 vm_page_lock_queues();
1949 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
1951 while (!queue_end(&vm_page_queue_inactive
, (queue_entry_t
) m
)) {
1953 m
= (vm_page_t
) queue_next(&m
->pageq
);
1957 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1958 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1959 || m
->precious
|| m
->cleaning
1960 || m
->dump_cleaning
|| m
->error
1961 || m
->pageout
|| m
->laundry
1962 || m
->list_req_pending
1963 || m
->overwriting
) {
1964 m
= (vm_page_t
) queue_next(&m
->pageq
);
1969 if (vm_object_lock_try(object
)) {
1970 if((!object
->alive
) ||
1971 (object
->pageout
)) {
1972 vm_object_unlock(object
);
1973 m
= (vm_page_t
) queue_next(&m
->pageq
);
1977 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
1979 vm_object_unlock(object
);
1980 vm_page_unlock_queues();
1981 return KERN_SUCCESS
;
1983 m
= (vm_page_t
) queue_next(&m
->pageq
);
1986 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
1988 while (!queue_end(&vm_page_queue_active
, (queue_entry_t
) m
)) {
1990 m
= (vm_page_t
) queue_next(&m
->pageq
);
1994 m
->dirty
= pmap_is_modified(m
->phys_addr
);
1995 if(m
->dirty
|| m
->busy
|| m
->wire_count
|| m
->absent
1996 || m
->precious
|| m
->cleaning
1997 || m
->dump_cleaning
|| m
->error
1998 || m
->pageout
|| m
->laundry
1999 || m
->list_req_pending
2000 || m
->overwriting
) {
2001 m
= (vm_page_t
) queue_next(&m
->pageq
);
2006 if (vm_object_lock_try(object
)) {
2007 if((!object
->alive
) ||
2008 (object
->pageout
)) {
2009 vm_object_unlock(object
);
2010 m
= (vm_page_t
) queue_next(&m
->pageq
);
2014 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2016 vm_object_unlock(object
);
2017 vm_page_unlock_queues();
2018 return KERN_SUCCESS
;
2020 m
= (vm_page_t
) queue_next(&m
->pageq
);
2022 vm_page_unlock_queues();
2023 return KERN_FAILURE
;
2035 upl
= (upl_t
)kalloc(sizeof(struct upl
)
2036 + (sizeof(struct upl_page_info
)*(size
/page_size
)));
2038 upl
= (upl_t
)kalloc(sizeof(struct upl
));
2041 upl
->src_object
= NULL
;
2042 upl
->kaddr
= (vm_offset_t
)0;
2044 upl
->map_object
= NULL
;
2048 upl
->ubc_alias1
= 0;
2049 upl
->ubc_alias2
= 0;
2050 #endif /* UBC_DEBUG */
2062 vm_object_lock(upl
->map_object
->shadow
);
2063 queue_iterate(&upl
->map_object
->shadow
->uplq
,
2064 upl_ele
, upl_t
, uplq
) {
2065 if(upl_ele
== upl
) {
2066 queue_remove(&upl
->map_object
->shadow
->uplq
,
2067 upl_ele
, upl_t
, uplq
);
2071 vm_object_unlock(upl
->map_object
->shadow
);
2073 #endif /* UBC_DEBUG */
2075 if(!(upl
->flags
& UPL_DEVICE_MEMORY
))
2077 vm_object_deallocate(upl
->map_object
);
2078 if(upl
->flags
& UPL_INTERNAL
) {
2079 kfree((vm_offset_t
)upl
,
2080 sizeof(struct upl
) +
2081 (sizeof(struct upl_page_info
) * (upl
->size
/page_size
)));
2083 kfree((vm_offset_t
)upl
, sizeof(struct upl
));
2087 __private_extern__
void
2091 upl
->ref_count
-= 1;
2092 if(upl
->ref_count
== 0) {
2102 upl
->ref_count
-= 1;
2103 if(upl
->ref_count
== 0) {
2109 * Routine: vm_object_upl_request
2111 * Cause the population of a portion of a vm_object.
2112 * Depending on the nature of the request, the pages
2113 * returned may be contain valid data or be uninitialized.
2114 * A page list structure, listing the physical pages
2115 * will be returned upon request.
2116 * This function is called by the file system or any other
2117 * supplier of backing store to a pager.
2118 * IMPORTANT NOTE: The caller must still respect the relationship
2119 * between the vm_object and its backing memory object. The
2120 * caller MUST NOT substitute changes in the backing file
2121 * without first doing a memory_object_lock_request on the
2122 * target range unless it is know that the pages are not
2123 * shared with another entity at the pager level.
2125 * if a page list structure is present
2126 * return the mapped physical pages, where a
2127 * page is not present, return a non-initialized
2128 * one. If the no_sync bit is turned on, don't
2129 * call the pager unlock to synchronize with other
2130 * possible copies of the page. Leave pages busy
2131 * in the original object, if a page list structure
2132 * was specified. When a commit of the page list
2133 * pages is done, the dirty bit will be set for each one.
2135 * If a page list structure is present, return
2136 * all mapped pages. Where a page does not exist
2137 * map a zero filled one. Leave pages busy in
2138 * the original object. If a page list structure
2139 * is not specified, this call is a no-op.
2141 * Note: access of default pager objects has a rather interesting
2142 * twist. The caller of this routine, presumably the file system
2143 * page cache handling code, will never actually make a request
2144 * against a default pager backed object. Only the default
2145 * pager will make requests on backing store related vm_objects
2146 * In this way the default pager can maintain the relationship
2147 * between backing store files (abstract memory objects) and
2148 * the vm_objects (cache objects), they support.
2151 __private_extern__ kern_return_t
2152 vm_object_upl_request(
2154 vm_object_offset_t offset
,
2157 upl_page_info_array_t user_page_list
,
2158 unsigned int *page_list_count
,
2162 vm_object_offset_t dst_offset
= offset
;
2163 vm_size_t xfer_size
= size
;
2164 boolean_t do_m_lock
= FALSE
;
2168 boolean_t encountered_lrp
= FALSE
;
2170 vm_page_t alias_page
= NULL
;
2174 page_ticket
= (cntrl_flags
& UPL_PAGE_TICKET_MASK
)
2175 >> UPL_PAGE_TICKET_SHIFT
;
2177 if(((size
/page_size
) > MAX_UPL_TRANSFER
) && !object
->phys_contiguous
) {
2178 size
= MAX_UPL_TRANSFER
* page_size
;
2181 if(cntrl_flags
& UPL_SET_INTERNAL
)
2182 if(page_list_count
!= NULL
)
2183 *page_list_count
= MAX_UPL_TRANSFER
;
2184 if(((cntrl_flags
& UPL_SET_INTERNAL
) && !(object
->phys_contiguous
)) &&
2185 ((page_list_count
!= NULL
) && (*page_list_count
!= 0)
2186 && *page_list_count
< (size
/page_size
)))
2187 return KERN_INVALID_ARGUMENT
;
2189 if((!object
->internal
) && (object
->paging_offset
!= 0))
2190 panic("vm_object_upl_request: vnode object with non-zero paging offset\n");
2192 if((cntrl_flags
& UPL_COPYOUT_FROM
) && (upl_ptr
== NULL
)) {
2193 return KERN_SUCCESS
;
2196 if(cntrl_flags
& UPL_SET_INTERNAL
) {
2197 upl
= upl_create(TRUE
, size
);
2198 user_page_list
= (upl_page_info_t
*)
2199 (((vm_offset_t
)upl
) + sizeof(struct upl
));
2200 upl
->flags
|= UPL_INTERNAL
;
2202 upl
= upl_create(FALSE
, size
);
2204 if(object
->phys_contiguous
) {
2206 upl
->offset
= offset
+ object
->paging_offset
;
2208 if(user_page_list
) {
2209 user_page_list
[0].phys_addr
=
2210 offset
+ object
->shadow_offset
;
2211 user_page_list
[0].device
= TRUE
;
2213 upl
->map_object
= vm_object_allocate(size
);
2214 vm_object_lock(upl
->map_object
);
2215 upl
->map_object
->shadow
= object
;
2216 upl
->flags
= UPL_DEVICE_MEMORY
| UPL_INTERNAL
;
2217 upl
->map_object
->pageout
= TRUE
;
2218 upl
->map_object
->can_persist
= FALSE
;
2219 upl
->map_object
->copy_strategy
2220 = MEMORY_OBJECT_COPY_NONE
;
2221 upl
->map_object
->shadow_offset
= offset
;
2222 vm_object_unlock(upl
->map_object
);
2223 return KERN_SUCCESS
;
2227 upl
->map_object
= vm_object_allocate(size
);
2228 vm_object_lock(upl
->map_object
);
2229 upl
->map_object
->shadow
= object
;
2231 upl
->offset
= offset
+ object
->paging_offset
;
2232 upl
->map_object
->pageout
= TRUE
;
2233 upl
->map_object
->can_persist
= FALSE
;
2234 upl
->map_object
->copy_strategy
= MEMORY_OBJECT_COPY_NONE
;
2235 upl
->map_object
->shadow_offset
= offset
;
2236 upl
->map_object
->wimg_bits
= object
->wimg_bits
;
2237 vm_object_unlock(upl
->map_object
);
2240 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2241 vm_object_lock(object
);
2244 queue_enter(&object
->uplq
, upl
, upl_t
, uplq
);
2245 #endif /* UBC_DEBUG */
2246 vm_object_paging_begin(object
);
2248 if(cntrl_flags
& UPL_COPYOUT_FROM
) {
2249 upl
->flags
|= UPL_PAGE_SYNC_DONE
;
2251 if(alias_page
== NULL
) {
2252 vm_object_unlock(object
);
2253 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2254 vm_object_lock(object
);
2256 if(((dst_page
= vm_page_lookup(object
,
2257 dst_offset
)) == VM_PAGE_NULL
) ||
2258 dst_page
->fictitious
||
2261 (dst_page
->wire_count
!= 0 &&
2262 !dst_page
->pageout
) ||
2263 ((!(dst_page
->dirty
|| dst_page
->precious
||
2264 pmap_is_modified(dst_page
->phys_addr
)))
2265 && (cntrl_flags
& UPL_RET_ONLY_DIRTY
)) ||
2266 ((!(dst_page
->inactive
))
2267 && (dst_page
->page_ticket
!= page_ticket
)
2268 && ((dst_page
->page_ticket
+1) != page_ticket
)
2269 && (cntrl_flags
& UPL_PAGEOUT
)) ||
2270 ((!dst_page
->list_req_pending
) &&
2271 (cntrl_flags
& UPL_RET_ONLY_DIRTY
) &&
2272 pmap_is_referenced(dst_page
->phys_addr
))) {
2274 user_page_list
[entry
].phys_addr
= 0;
2277 if(dst_page
->busy
&&
2278 (!(dst_page
->list_req_pending
&&
2279 dst_page
->pageout
))) {
2280 if(cntrl_flags
& UPL_NOBLOCK
) {
2282 user_page_list
[entry
]
2285 dst_offset
+= PAGE_SIZE_64
;
2286 xfer_size
-= PAGE_SIZE
;
2289 /*someone else is playing with the */
2290 /* page. We will have to wait. */
2291 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2294 /* Someone else already cleaning the page? */
2295 if((dst_page
->cleaning
|| dst_page
->absent
||
2296 dst_page
->wire_count
!= 0) &&
2297 !dst_page
->list_req_pending
) {
2299 user_page_list
[entry
].phys_addr
= 0;
2301 dst_offset
+= PAGE_SIZE_64
;
2302 xfer_size
-= PAGE_SIZE
;
2305 /* eliminate all mappings from the */
2306 /* original object and its prodigy */
2308 vm_page_lock_queues();
2309 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2310 pmap_page_protect(dst_page
->phys_addr
, VM_PROT_NONE
);
2312 /* pageout statistics gathering. count */
2313 /* all the pages we will page out that */
2314 /* were not counted in the initial */
2315 /* vm_pageout_scan work */
2316 if(dst_page
->list_req_pending
)
2317 encountered_lrp
= TRUE
;
2318 if((dst_page
->dirty
||
2319 (dst_page
->object
->internal
&&
2320 dst_page
->precious
)) &&
2321 (dst_page
->list_req_pending
2323 if(encountered_lrp
) {
2325 (pages_at_higher_offsets
++;)
2328 (pages_at_lower_offsets
++;)
2332 /* Turn off busy indication on pending */
2333 /* pageout. Note: we can only get here */
2334 /* in the request pending case. */
2335 dst_page
->list_req_pending
= FALSE
;
2336 dst_page
->busy
= FALSE
;
2337 dst_page
->cleaning
= FALSE
;
2339 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2340 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2342 /* use pageclean setup, it is more convenient */
2343 /* even for the pageout cases here */
2344 vm_pageclean_setup(dst_page
, alias_page
,
2345 upl
->map_object
, size
- xfer_size
);
2348 dst_page
->dirty
= FALSE
;
2349 dst_page
->precious
= TRUE
;
2352 if(dst_page
->pageout
)
2353 dst_page
->busy
= TRUE
;
2355 alias_page
->absent
= FALSE
;
2357 if((!(cntrl_flags
& UPL_CLEAN_IN_PLACE
))
2358 || (cntrl_flags
& UPL_PAGEOUT
)) {
2359 /* deny access to the target page */
2360 /* while it is being worked on */
2361 if((!dst_page
->pageout
) &&
2362 (dst_page
->wire_count
== 0)) {
2363 dst_page
->busy
= TRUE
;
2364 dst_page
->pageout
= TRUE
;
2365 vm_page_wire(dst_page
);
2368 if(user_page_list
) {
2369 user_page_list
[entry
].phys_addr
2370 = dst_page
->phys_addr
;
2371 user_page_list
[entry
].dirty
=
2373 user_page_list
[entry
].pageout
=
2375 user_page_list
[entry
].absent
=
2377 user_page_list
[entry
].precious
=
2381 vm_page_unlock_queues();
2384 dst_offset
+= PAGE_SIZE_64
;
2385 xfer_size
-= PAGE_SIZE
;
2389 if(alias_page
== NULL
) {
2390 vm_object_unlock(object
);
2391 VM_PAGE_GRAB_FICTITIOUS(alias_page
);
2392 vm_object_lock(object
);
2394 dst_page
= vm_page_lookup(object
, dst_offset
);
2395 if(dst_page
!= VM_PAGE_NULL
) {
2396 if((cntrl_flags
& UPL_RET_ONLY_ABSENT
) &&
2397 !((dst_page
->list_req_pending
)
2398 && (dst_page
->absent
))) {
2399 /* we are doing extended range */
2400 /* requests. we want to grab */
2401 /* pages around some which are */
2402 /* already present. */
2404 user_page_list
[entry
].phys_addr
= 0;
2406 dst_offset
+= PAGE_SIZE_64
;
2407 xfer_size
-= PAGE_SIZE
;
2410 if((dst_page
->cleaning
) &&
2411 !(dst_page
->list_req_pending
)) {
2412 /*someone else is writing to the */
2413 /* page. We will have to wait. */
2414 PAGE_SLEEP(object
,dst_page
,THREAD_UNINT
);
2417 if ((dst_page
->fictitious
&&
2418 dst_page
->list_req_pending
)) {
2419 /* dump the fictitious page */
2420 dst_page
->list_req_pending
= FALSE
;
2421 dst_page
->clustered
= FALSE
;
2422 vm_page_lock_queues();
2423 vm_page_free(dst_page
);
2424 vm_page_unlock_queues();
2425 } else if ((dst_page
->absent
&&
2426 dst_page
->list_req_pending
)) {
2427 /* the default_pager case */
2428 dst_page
->list_req_pending
= FALSE
;
2429 dst_page
->busy
= FALSE
;
2430 dst_page
->clustered
= FALSE
;
2433 if((dst_page
= vm_page_lookup(object
, dst_offset
)) ==
2435 if(object
->private) {
2437 * This is a nasty wrinkle for users
2438 * of upl who encounter device or
2439 * private memory however, it is
2440 * unavoidable, only a fault can
2441 * reslove the actual backing
2442 * physical page by asking the
2446 user_page_list
[entry
]
2449 dst_offset
+= PAGE_SIZE_64
;
2450 xfer_size
-= PAGE_SIZE
;
2453 /* need to allocate a page */
2454 dst_page
= vm_page_alloc(object
, dst_offset
);
2455 if (dst_page
== VM_PAGE_NULL
) {
2456 vm_object_unlock(object
);
2458 vm_object_lock(object
);
2461 dst_page
->busy
= FALSE
;
2463 if(cntrl_flags
& UPL_NO_SYNC
) {
2464 dst_page
->page_lock
= 0;
2465 dst_page
->unlock_request
= 0;
2468 dst_page
->absent
= TRUE
;
2469 object
->absent_count
++;
2472 if(cntrl_flags
& UPL_NO_SYNC
) {
2473 dst_page
->page_lock
= 0;
2474 dst_page
->unlock_request
= 0;
2477 dst_page
->overwriting
= TRUE
;
2478 if(dst_page
->fictitious
) {
2479 panic("need corner case for fictitious page");
2481 if(dst_page
->page_lock
) {
2486 /* eliminate all mappings from the */
2487 /* original object and its prodigy */
2489 if(dst_page
->busy
) {
2490 /*someone else is playing with the */
2491 /* page. We will have to wait. */
2492 PAGE_SLEEP(object
, dst_page
, THREAD_UNINT
);
2496 vm_page_lock_queues();
2497 if( !(cntrl_flags
& UPL_FILE_IO
)) {
2498 pmap_page_protect(dst_page
->phys_addr
, VM_PROT_NONE
);
2500 dirty
= pmap_is_modified(dst_page
->phys_addr
);
2501 dirty
= dirty
? TRUE
: dst_page
->dirty
;
2503 vm_pageclean_setup(dst_page
, alias_page
,
2504 upl
->map_object
, size
- xfer_size
);
2506 if(cntrl_flags
& UPL_CLEAN_IN_PLACE
) {
2507 /* clean in place for read implies */
2508 /* that a write will be done on all */
2509 /* the pages that are dirty before */
2510 /* a upl commit is done. The caller */
2511 /* is obligated to preserve the */
2512 /* contents of all pages marked */
2514 upl
->flags
|= UPL_CLEAR_DIRTY
;
2518 dst_page
->dirty
= FALSE
;
2519 dst_page
->precious
= TRUE
;
2522 if (dst_page
->wire_count
== 0) {
2523 /* deny access to the target page while */
2524 /* it is being worked on */
2525 dst_page
->busy
= TRUE
;
2527 vm_page_wire(dst_page
);
2529 /* expect the page to be used */
2530 dst_page
->reference
= TRUE
;
2531 dst_page
->precious
=
2532 (cntrl_flags
& UPL_PRECIOUS
)
2534 alias_page
->absent
= FALSE
;
2536 if(user_page_list
) {
2537 user_page_list
[entry
].phys_addr
2538 = dst_page
->phys_addr
;
2539 user_page_list
[entry
].dirty
=
2541 user_page_list
[entry
].pageout
=
2543 user_page_list
[entry
].absent
=
2545 user_page_list
[entry
].precious
=
2548 vm_page_unlock_queues();
2551 dst_offset
+= PAGE_SIZE_64
;
2552 xfer_size
-= PAGE_SIZE
;
2556 if (upl
->flags
& UPL_INTERNAL
) {
2557 if(page_list_count
!= NULL
)
2558 *page_list_count
= 0;
2559 } else if (*page_list_count
> entry
) {
2560 if(page_list_count
!= NULL
)
2561 *page_list_count
= entry
;
2564 if(alias_page
!= NULL
) {
2565 vm_page_lock_queues();
2566 vm_page_free(alias_page
);
2567 vm_page_unlock_queues();
2571 vm_prot_t access_required
;
2572 /* call back all associated pages from other users of the pager */
2573 /* all future updates will be on data which is based on the */
2574 /* changes we are going to make here. Note: it is assumed that */
2575 /* we already hold copies of the data so we will not be seeing */
2576 /* an avalanche of incoming data from the pager */
2577 access_required
= (cntrl_flags
& UPL_COPYOUT_FROM
)
2578 ? VM_PROT_READ
: VM_PROT_WRITE
;
2582 if(!object
->pager_ready
) {
2583 wait_result_t wait_result
;
2585 wait_result
= vm_object_sleep(object
,
2586 VM_OBJECT_EVENT_PAGER_READY
,
2588 if (wait_result
!= THREAD_AWAKENED
) {
2589 vm_object_unlock(object
);
2590 return(KERN_FAILURE
);
2595 vm_object_unlock(object
);
2597 if (rc
= memory_object_data_unlock(
2599 dst_offset
+ object
->paging_offset
,
2602 if (rc
== MACH_SEND_INTERRUPTED
)
2605 return KERN_FAILURE
;
2610 /* lets wait on the last page requested */
2611 /* NOTE: we will have to update lock completed routine to signal */
2612 if(dst_page
!= VM_PAGE_NULL
&&
2613 (access_required
& dst_page
->page_lock
) != access_required
) {
2614 PAGE_ASSERT_WAIT(dst_page
, THREAD_UNINT
);
2615 thread_block((void (*)(void))0);
2616 vm_object_lock(object
);
2619 vm_object_unlock(object
);
2620 return KERN_SUCCESS
;
2623 /* JMM - Backward compatability for now */
2625 vm_fault_list_request(
2626 memory_object_control_t control
,
2627 vm_object_offset_t offset
,
2630 upl_page_info_t
**user_page_list_ptr
,
2631 int page_list_count
,
2634 int local_list_count
;
2635 upl_page_info_t
*user_page_list
;
2638 if (user_page_list_ptr
!= NULL
) {
2639 local_list_count
= page_list_count
;
2640 user_page_list
= *user_page_list_ptr
;
2642 local_list_count
= 0;
2643 user_page_list
= NULL
;
2645 kr
= memory_object_upl_request(control
,
2653 if(kr
!= KERN_SUCCESS
)
2656 if ((user_page_list_ptr
!= NULL
) && (cntrl_flags
& UPL_INTERNAL
)) {
2657 *user_page_list_ptr
= UPL_GET_INTERNAL_PAGE_LIST(*upl_ptr
);
2660 return KERN_SUCCESS
;
2666 * Routine: vm_object_super_upl_request
2668 * Cause the population of a portion of a vm_object
2669 * in much the same way as memory_object_upl_request.
2670 * Depending on the nature of the request, the pages
2671 * returned may be contain valid data or be uninitialized.
2672 * However, the region may be expanded up to the super
2673 * cluster size provided.
2676 __private_extern__ kern_return_t
2677 vm_object_super_upl_request(
2679 vm_object_offset_t offset
,
2681 vm_size_t super_cluster
,
2683 upl_page_info_t
*user_page_list
,
2684 unsigned int *page_list_count
,
2687 vm_page_t target_page
;
2690 if(object
->paging_offset
> offset
)
2691 return KERN_FAILURE
;
2693 offset
= offset
- object
->paging_offset
;
2694 if(cntrl_flags
& UPL_PAGEOUT
) {
2695 if((target_page
= vm_page_lookup(object
, offset
))
2697 ticket
= target_page
->page_ticket
;
2698 cntrl_flags
= cntrl_flags
& ~(int)UPL_PAGE_TICKET_MASK
;
2699 cntrl_flags
= cntrl_flags
|
2700 ((ticket
<< UPL_PAGE_TICKET_SHIFT
)
2701 & UPL_PAGE_TICKET_MASK
);
2706 /* turns off super cluster exercised by the default_pager */
2708 super_cluster = size;
2710 if ((super_cluster
> size
) &&
2711 (vm_page_free_count
> vm_page_free_reserved
)) {
2713 vm_object_offset_t base_offset
;
2714 vm_size_t super_size
;
2716 base_offset
= (offset
&
2717 ~((vm_object_offset_t
) super_cluster
- 1));
2718 super_size
= (offset
+size
) > (base_offset
+ super_cluster
) ?
2719 super_cluster
<<1 : super_cluster
;
2720 super_size
= ((base_offset
+ super_size
) > object
->size
) ?
2721 (object
->size
- base_offset
) : super_size
;
2722 if(offset
> (base_offset
+ super_size
))
2723 panic("vm_object_super_upl_request: Missed target pageout 0x%x,0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", offset
, base_offset
, super_size
, super_cluster
, size
, object
->paging_offset
);
2724 /* apparently there is a case where the vm requests a */
2725 /* page to be written out who's offset is beyond the */
2727 if((offset
+ size
) > (base_offset
+ super_size
))
2728 super_size
= (offset
+ size
) - base_offset
;
2730 offset
= base_offset
;
2733 vm_object_upl_request(object
, offset
, size
,
2734 upl
, user_page_list
, page_list_count
,
2743 vm_offset_t
*dst_addr
)
2746 vm_object_offset_t offset
;
2751 if (upl
== UPL_NULL
)
2752 return KERN_INVALID_ARGUMENT
;
2756 /* check to see if already mapped */
2757 if(UPL_PAGE_LIST_MAPPED
& upl
->flags
) {
2759 return KERN_FAILURE
;
2762 offset
= 0; /* Always map the entire object */
2765 vm_object_lock(upl
->map_object
);
2766 upl
->map_object
->ref_count
++;
2767 vm_object_res_reference(upl
->map_object
);
2768 vm_object_unlock(upl
->map_object
);
2773 /* NEED A UPL_MAP ALIAS */
2774 kr
= vm_map_enter(map
, dst_addr
, size
, (vm_offset_t
) 0, TRUE
,
2775 upl
->map_object
, offset
, FALSE
,
2776 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
2778 if (kr
!= KERN_SUCCESS
) {
2783 for(addr
=*dst_addr
; size
> 0; size
-=PAGE_SIZE
,addr
+=PAGE_SIZE
) {
2784 m
= vm_page_lookup(upl
->map_object
, offset
);
2786 unsigned int cache_attr
;
2787 cache_attr
= ((unsigned int)m
->object
->wimg_bits
) & VM_WIMG_MASK
;
2789 PMAP_ENTER(map
->pmap
, addr
,
2793 offset
+=PAGE_SIZE_64
;
2795 upl
->ref_count
++; /* hold a reference for the mapping */
2796 upl
->flags
|= UPL_PAGE_LIST_MAPPED
;
2797 upl
->kaddr
= *dst_addr
;
2799 return KERN_SUCCESS
;
2811 if (upl
== UPL_NULL
)
2812 return KERN_INVALID_ARGUMENT
;
2815 if(upl
->flags
& UPL_PAGE_LIST_MAPPED
) {
2818 assert(upl
->ref_count
> 1);
2819 upl
->ref_count
--; /* removing mapping ref */
2820 upl
->flags
&= ~UPL_PAGE_LIST_MAPPED
;
2821 upl
->kaddr
= (vm_offset_t
) 0;
2824 vm_deallocate(map
, addr
, size
);
2825 return KERN_SUCCESS
;
2828 return KERN_FAILURE
;
2837 upl_page_info_t
*page_list
,
2838 mach_msg_type_number_t count
,
2841 vm_size_t xfer_size
= size
;
2842 vm_object_t shadow_object
= upl
->map_object
->shadow
;
2843 vm_object_t object
= upl
->map_object
;
2844 vm_object_offset_t target_offset
;
2845 vm_object_offset_t page_offset
;
2850 if (upl
== UPL_NULL
)
2851 return KERN_INVALID_ARGUMENT
;
2857 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
2859 } else if ((offset
+ size
) > upl
->size
) {
2861 return KERN_FAILURE
;
2864 vm_object_lock(shadow_object
);
2866 entry
= offset
/PAGE_SIZE
;
2867 target_offset
= (vm_object_offset_t
)offset
;
2872 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
2875 page_offset
= t
->offset
;
2878 m
= vm_page_lookup(shadow_object
,
2879 page_offset
+ object
->shadow_offset
);
2880 if(m
!= VM_PAGE_NULL
) {
2881 vm_object_paging_end(shadow_object
);
2882 vm_page_lock_queues();
2883 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
2884 (flags
& UPL_COMMIT_CLEAR_DIRTY
)) {
2885 pmap_clear_modify(m
->phys_addr
);
2889 p
= &(page_list
[entry
]);
2890 if(p
->phys_addr
&& p
->pageout
&& !m
->pageout
) {
2894 } else if (page_list
[entry
].phys_addr
&&
2895 !p
->pageout
&& m
->pageout
&&
2896 !m
->dump_cleaning
) {
2899 m
->overwriting
= FALSE
;
2901 PAGE_WAKEUP_DONE(m
);
2903 page_list
[entry
].phys_addr
= 0;
2905 m
->dump_cleaning
= FALSE
;
2907 vm_page_laundry_count
--;
2909 if (vm_page_laundry_count
< vm_page_laundry_min
) {
2910 vm_page_laundry_min
= 0;
2911 thread_wakeup((event_t
)
2912 &vm_page_laundry_count
);
2916 m
->cleaning
= FALSE
;
2918 #if MACH_CLUSTER_STATS
2919 if (m
->wanted
) vm_pageout_target_collisions
++;
2921 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
2922 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2925 vm_pageout_target_page_dirtied
++;)
2926 vm_page_unwire(m
);/* reactivates */
2927 VM_STAT(reactivations
++);
2928 PAGE_WAKEUP_DONE(m
);
2931 vm_pageout_target_page_freed
++;)
2932 vm_page_free(m
);/* clears busy, etc. */
2933 VM_STAT(pageouts
++);
2935 vm_page_unlock_queues();
2936 target_offset
+= PAGE_SIZE_64
;
2937 xfer_size
-= PAGE_SIZE
;
2941 if (flags
& UPL_COMMIT_INACTIVATE
) {
2942 vm_page_deactivate(m
);
2943 m
->reference
= FALSE
;
2944 pmap_clear_reference(m
->phys_addr
);
2945 } else if (!m
->active
&& !m
->inactive
) {
2947 vm_page_activate(m
);
2949 vm_page_deactivate(m
);
2951 #if MACH_CLUSTER_STATS
2952 m
->dirty
= pmap_is_modified(m
->phys_addr
);
2954 if (m
->dirty
) vm_pageout_cluster_dirtied
++;
2955 else vm_pageout_cluster_cleaned
++;
2956 if (m
->wanted
) vm_pageout_cluster_collisions
++;
2961 if((m
->busy
) && (m
->cleaning
)) {
2962 /* the request_page_list case */
2965 if(shadow_object
->absent_count
== 1)
2966 vm_object_absent_release(shadow_object
);
2968 shadow_object
->absent_count
--;
2970 m
->overwriting
= FALSE
;
2974 else if (m
->overwriting
) {
2975 /* alternate request page list, write to
2976 /* page_list case. Occurs when the original
2977 /* page was wired at the time of the list
2979 assert(m
->wire_count
!= 0);
2980 vm_page_unwire(m
);/* reactivates */
2981 m
->overwriting
= FALSE
;
2983 m
->cleaning
= FALSE
;
2984 /* It is a part of the semantic of COPYOUT_FROM */
2985 /* UPLs that a commit implies cache sync */
2986 /* between the vm page and the backing store */
2987 /* this can be used to strip the precious bit */
2988 /* as well as clean */
2989 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
2990 m
->precious
= FALSE
;
2992 if (flags
& UPL_COMMIT_SET_DIRTY
) {
2996 * Wakeup any thread waiting for the page to be un-cleaning.
2999 vm_page_unlock_queues();
3003 target_offset
+= PAGE_SIZE_64
;
3004 xfer_size
-= PAGE_SIZE
;
3008 vm_object_unlock(shadow_object
);
3009 if(flags
& UPL_COMMIT_NOTIFY_EMPTY
) {
3010 if((upl
->flags
& UPL_DEVICE_MEMORY
)
3011 || (queue_empty(&upl
->map_object
->memq
)))
3016 return KERN_SUCCESS
;
3027 vm_size_t xfer_size
= size
;
3028 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3029 vm_object_t object
= upl
->map_object
;
3030 vm_object_offset_t target_offset
;
3031 vm_object_offset_t page_offset
;
3036 if (upl
== UPL_NULL
)
3037 return KERN_INVALID_ARGUMENT
;
3040 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3042 } else if ((offset
+ size
) > upl
->size
) {
3044 return KERN_FAILURE
;
3047 vm_object_lock(shadow_object
);
3049 entry
= offset
/PAGE_SIZE
;
3050 target_offset
= (vm_object_offset_t
)offset
;
3055 if((t
= vm_page_lookup(object
, target_offset
)) != NULL
) {
3058 page_offset
= t
->offset
;
3061 m
= vm_page_lookup(shadow_object
,
3062 page_offset
+ object
->shadow_offset
);
3063 if(m
!= VM_PAGE_NULL
) {
3064 vm_object_paging_end(m
->object
);
3065 vm_page_lock_queues();
3067 /* COPYOUT = FALSE case */
3068 /* check for error conditions which must */
3069 /* be passed back to the pages customer */
3070 if(error
& UPL_ABORT_RESTART
) {
3073 vm_object_absent_release(m
->object
);
3074 m
->page_error
= KERN_MEMORY_ERROR
;
3076 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3079 m
->clustered
= FALSE
;
3080 } else if(error
& UPL_ABORT_ERROR
) {
3083 vm_object_absent_release(m
->object
);
3084 m
->page_error
= KERN_MEMORY_ERROR
;
3086 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3087 m
->clustered
= TRUE
;
3089 m
->clustered
= TRUE
;
3093 m
->cleaning
= FALSE
;
3094 m
->overwriting
= FALSE
;
3095 PAGE_WAKEUP_DONE(m
);
3099 vm_page_activate(m
);
3102 vm_page_unlock_queues();
3103 target_offset
+= PAGE_SIZE_64
;
3104 xfer_size
-= PAGE_SIZE
;
3109 * Handle the trusted pager throttle.
3112 vm_page_laundry_count
--;
3114 if (vm_page_laundry_count
3115 < vm_page_laundry_min
) {
3116 vm_page_laundry_min
= 0;
3117 thread_wakeup((event_t
)
3118 &vm_page_laundry_count
);
3123 assert(m
->wire_count
== 1);
3127 m
->dump_cleaning
= FALSE
;
3128 m
->cleaning
= FALSE
;
3130 m
->overwriting
= FALSE
;
3132 vm_external_state_clr(
3133 m
->object
->existence_map
, m
->offset
);
3134 #endif /* MACH_PAGEMAP */
3135 if(error
& UPL_ABORT_DUMP_PAGES
) {
3137 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
3141 vm_page_unlock_queues();
3144 target_offset
+= PAGE_SIZE_64
;
3145 xfer_size
-= PAGE_SIZE
;
3148 vm_object_unlock(shadow_object
);
3149 if(error
& UPL_ABORT_NOTIFY_EMPTY
) {
3150 if((upl
->flags
& UPL_DEVICE_MEMORY
)
3151 || (queue_empty(&upl
->map_object
->memq
)))
3155 return KERN_SUCCESS
;
3163 vm_object_t object
= NULL
;
3164 vm_object_t shadow_object
= NULL
;
3165 vm_object_offset_t offset
;
3166 vm_object_offset_t shadow_offset
;
3167 vm_object_offset_t target_offset
;
3171 if (upl
== UPL_NULL
)
3172 return KERN_INVALID_ARGUMENT
;
3175 if(upl
->flags
& UPL_DEVICE_MEMORY
) {
3177 return KERN_SUCCESS
;
3180 object
= upl
->map_object
;
3182 if (object
== NULL
) {
3183 panic("upl_abort: upl object is not backed by an object");
3185 return KERN_INVALID_ARGUMENT
;
3188 shadow_object
= upl
->map_object
->shadow
;
3189 shadow_offset
= upl
->map_object
->shadow_offset
;
3191 vm_object_lock(shadow_object
);
3192 for(i
= 0; i
<(upl
->size
); i
+=PAGE_SIZE
, offset
+= PAGE_SIZE_64
) {
3193 if((t
= vm_page_lookup(object
,offset
)) != NULL
) {
3194 target_offset
= t
->offset
+ shadow_offset
;
3195 if((m
= vm_page_lookup(shadow_object
, target_offset
)) != NULL
) {
3196 vm_object_paging_end(m
->object
);
3197 vm_page_lock_queues();
3199 /* COPYOUT = FALSE case */
3200 /* check for error conditions which must */
3201 /* be passed back to the pages customer */
3202 if(error
& UPL_ABORT_RESTART
) {
3205 vm_object_absent_release(m
->object
);
3206 m
->page_error
= KERN_MEMORY_ERROR
;
3208 } else if(error
& UPL_ABORT_UNAVAILABLE
) {
3211 m
->clustered
= FALSE
;
3212 } else if(error
& UPL_ABORT_ERROR
) {
3215 vm_object_absent_release(m
->object
);
3216 m
->page_error
= KERN_MEMORY_ERROR
;
3218 } else if(error
& UPL_ABORT_DUMP_PAGES
) {
3219 m
->clustered
= TRUE
;
3221 m
->clustered
= TRUE
;
3224 m
->cleaning
= FALSE
;
3225 m
->overwriting
= FALSE
;
3226 PAGE_WAKEUP_DONE(m
);
3230 vm_page_activate(m
);
3232 vm_page_unlock_queues();
3236 * Handle the trusted pager throttle.
3239 vm_page_laundry_count
--;
3241 if (vm_page_laundry_count
3242 < vm_page_laundry_min
) {
3243 vm_page_laundry_min
= 0;
3244 thread_wakeup((event_t
)
3245 &vm_page_laundry_count
);
3250 assert(m
->wire_count
== 1);
3254 m
->dump_cleaning
= FALSE
;
3255 m
->cleaning
= FALSE
;
3257 m
->overwriting
= FALSE
;
3259 vm_external_state_clr(
3260 m
->object
->existence_map
, m
->offset
);
3261 #endif /* MACH_PAGEMAP */
3262 if(error
& UPL_ABORT_DUMP_PAGES
) {
3264 pmap_page_protect(m
->phys_addr
, VM_PROT_NONE
);
3268 vm_page_unlock_queues();
3272 vm_object_unlock(shadow_object
);
3273 /* Remove all the pages from the map object so */
3274 /* vm_pageout_object_terminate will work properly. */
3275 while (!queue_empty(&upl
->map_object
->memq
)) {
3278 p
= (vm_page_t
) queue_first(&upl
->map_object
->memq
);
3283 assert(!p
->cleaning
);
3288 return KERN_SUCCESS
;
3291 /* an option on commit should be wire */
3295 upl_page_info_t
*page_list
,
3296 mach_msg_type_number_t count
)
3298 if (upl
== UPL_NULL
)
3299 return KERN_INVALID_ARGUMENT
;
3305 if (upl
->flags
& UPL_DEVICE_MEMORY
)
3307 if ((upl
->flags
& UPL_CLEAR_DIRTY
) ||
3308 (upl
->flags
& UPL_PAGE_SYNC_DONE
)) {
3309 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3310 vm_object_t object
= upl
->map_object
;
3311 vm_object_offset_t target_offset
;
3316 vm_object_lock(shadow_object
);
3318 target_offset
= object
->shadow_offset
;
3319 xfer_end
= upl
->size
+ object
->shadow_offset
;
3321 while(target_offset
< xfer_end
) {
3322 if ((t
= vm_page_lookup(object
,
3323 target_offset
- object
->shadow_offset
))
3326 shadow_object
, target_offset
);
3327 if(m
!= VM_PAGE_NULL
) {
3328 if (upl
->flags
& UPL_CLEAR_DIRTY
) {
3329 pmap_clear_modify(m
->phys_addr
);
3332 /* It is a part of the semantic of */
3333 /* COPYOUT_FROM UPLs that a commit */
3334 /* implies cache sync between the */
3335 /* vm page and the backing store */
3336 /* this can be used to strip the */
3337 /* precious bit as well as clean */
3338 if (upl
->flags
& UPL_PAGE_SYNC_DONE
)
3339 m
->precious
= FALSE
;
3342 target_offset
+= PAGE_SIZE_64
;
3344 vm_object_unlock(shadow_object
);
3347 vm_object_t shadow_object
= upl
->map_object
->shadow
;
3348 vm_object_t object
= upl
->map_object
;
3349 vm_object_offset_t target_offset
;
3356 vm_object_lock(shadow_object
);
3359 target_offset
= object
->shadow_offset
;
3360 xfer_end
= upl
->size
+ object
->shadow_offset
;
3362 while(target_offset
< xfer_end
) {
3364 if ((t
= vm_page_lookup(object
,
3365 target_offset
- object
->shadow_offset
))
3367 target_offset
+= PAGE_SIZE_64
;
3372 m
= vm_page_lookup(shadow_object
, target_offset
);
3373 if(m
!= VM_PAGE_NULL
) {
3374 p
= &(page_list
[entry
]);
3375 if(page_list
[entry
].phys_addr
&&
3376 p
->pageout
&& !m
->pageout
) {
3377 vm_page_lock_queues();
3381 vm_page_unlock_queues();
3382 } else if (page_list
[entry
].phys_addr
&&
3383 !p
->pageout
&& m
->pageout
&&
3384 !m
->dump_cleaning
) {
3385 vm_page_lock_queues();
3388 m
->overwriting
= FALSE
;
3390 PAGE_WAKEUP_DONE(m
);
3391 vm_page_unlock_queues();
3393 page_list
[entry
].phys_addr
= 0;
3395 target_offset
+= PAGE_SIZE_64
;
3399 vm_object_unlock(shadow_object
);
3402 return KERN_SUCCESS
;
3406 upl_get_internal_pagelist_offset()
3408 return sizeof(struct upl
);
3415 upl
->flags
|= UPL_CLEAR_DIRTY
;
3422 upl
->flags
&= ~UPL_CLEAR_DIRTY
;
3428 boolean_t
upl_page_present(upl_page_info_t
*upl
, int index
)
3430 return(UPL_PAGE_PRESENT(upl
, index
));
3432 boolean_t
upl_dirty_page(upl_page_info_t
*upl
, int index
)
3434 return(UPL_DIRTY_PAGE(upl
, index
));
3436 boolean_t
upl_valid_page(upl_page_info_t
*upl
, int index
)
3438 return(UPL_VALID_PAGE(upl
, index
));
3440 vm_offset_t
upl_phys_page(upl_page_info_t
*upl
, int index
)
3442 return((vm_offset_t
)UPL_PHYS_PAGE(upl
, index
));
3446 vm_countdirtypages(void)
3458 vm_page_lock_queues();
3459 m
= (vm_page_t
) queue_first(&vm_page_queue_inactive
);
3461 if (m
==(vm_page_t
)0) break;
3463 if(m
->dirty
) dpages
++;
3464 if(m
->pageout
) pgopages
++;
3465 if(m
->precious
) precpages
++;
3467 m
= (vm_page_t
) queue_next(&m
->pageq
);
3468 if (m
==(vm_page_t
)0) break;
3470 } while (!queue_end(&vm_page_queue_inactive
,(queue_entry_t
) m
));
3471 vm_page_unlock_queues();
3473 vm_page_lock_queues();
3474 m
= (vm_page_t
) queue_first(&vm_page_queue_zf
);
3476 if (m
==(vm_page_t
)0) break;
3478 if(m
->dirty
) dpages
++;
3479 if(m
->pageout
) pgopages
++;
3480 if(m
->precious
) precpages
++;
3482 m
= (vm_page_t
) queue_next(&m
->pageq
);
3483 if (m
==(vm_page_t
)0) break;
3485 } while (!queue_end(&vm_page_queue_zf
,(queue_entry_t
) m
));
3486 vm_page_unlock_queues();
3488 printf("IN Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3494 vm_page_lock_queues();
3495 m
= (vm_page_t
) queue_first(&vm_page_queue_active
);
3498 if(m
== (vm_page_t
)0) break;
3499 if(m
->dirty
) dpages
++;
3500 if(m
->pageout
) pgopages
++;
3501 if(m
->precious
) precpages
++;
3503 m
= (vm_page_t
) queue_next(&m
->pageq
);
3504 if(m
== (vm_page_t
)0) break;
3506 } while (!queue_end(&vm_page_queue_active
,(queue_entry_t
) m
));
3507 vm_page_unlock_queues();
3509 printf("AC Q: %d : %d : %d\n", dpages
, pgopages
, precpages
);
3512 #endif /* MACH_BSD */
3515 kern_return_t
upl_ubc_alias_set(upl_t upl
, unsigned int alias1
, unsigned int alias2
)
3517 upl
->ubc_alias1
= alias1
;
3518 upl
->ubc_alias2
= alias2
;
3519 return KERN_SUCCESS
;
3521 int upl_ubc_alias_get(upl_t upl
, unsigned int * al
, unsigned int * al2
)
3524 *al
= upl
->ubc_alias1
;
3526 *al2
= upl
->ubc_alias2
;
3527 return KERN_SUCCESS
;
3529 #endif /* UBC_DEBUG */
3534 #include <ddb/db_output.h>
3535 #include <ddb/db_print.h>
3536 #include <vm/vm_print.h>
3538 #define printf kdbprintf
3539 extern int db_indent
;
3540 void db_pageout(void);
3545 extern int vm_page_gobble_count
;
3547 iprintf("VM Statistics:\n");
3549 iprintf("pages:\n");
3551 iprintf("activ %5d inact %5d free %5d",
3552 vm_page_active_count
, vm_page_inactive_count
,
3553 vm_page_free_count
);
3554 printf(" wire %5d gobbl %5d\n",
3555 vm_page_wire_count
, vm_page_gobble_count
);
3556 iprintf("laund %5d\n",
3557 vm_page_laundry_count
);
3559 iprintf("target:\n");
3561 iprintf("min %5d inact %5d free %5d",
3562 vm_page_free_min
, vm_page_inactive_target
,
3563 vm_page_free_target
);
3564 printf(" resrv %5d\n", vm_page_free_reserved
);
3567 iprintf("burst:\n");
3569 iprintf("max %5d min %5d wait %5d empty %5d\n",
3570 vm_pageout_burst_max
, vm_pageout_burst_min
,
3571 vm_pageout_burst_wait
, vm_pageout_empty_wait
);
3573 iprintf("pause:\n");
3575 iprintf("count %5d max %5d\n",
3576 vm_pageout_pause_count
, vm_pageout_pause_max
);
3578 iprintf("scan_continue called %8d\n", c_vm_pageout_scan_continue
);
3579 #endif /* MACH_COUNTERS */
3589 extern int c_laundry_pages_freed
;
3590 #endif /* MACH_COUNTERS */
3592 iprintf("Pageout Statistics:\n");
3594 iprintf("active %5d inactv %5d\n",
3595 vm_pageout_active
, vm_pageout_inactive
);
3596 iprintf("nolock %5d avoid %5d busy %5d absent %5d\n",
3597 vm_pageout_inactive_nolock
, vm_pageout_inactive_avoid
,
3598 vm_pageout_inactive_busy
, vm_pageout_inactive_absent
);
3599 iprintf("used %5d clean %5d dirty %5d\n",
3600 vm_pageout_inactive_used
, vm_pageout_inactive_clean
,
3601 vm_pageout_inactive_dirty
);
3603 iprintf("laundry_pages_freed %d\n", c_laundry_pages_freed
);
3604 #endif /* MACH_COUNTERS */
3605 #if MACH_CLUSTER_STATS
3606 iprintf("Cluster Statistics:\n");
3608 iprintf("dirtied %5d cleaned %5d collisions %5d\n",
3609 vm_pageout_cluster_dirtied
, vm_pageout_cluster_cleaned
,
3610 vm_pageout_cluster_collisions
);
3611 iprintf("clusters %5d conversions %5d\n",
3612 vm_pageout_cluster_clusters
, vm_pageout_cluster_conversions
);
3614 iprintf("Target Statistics:\n");
3616 iprintf("collisions %5d page_dirtied %5d page_freed %5d\n",
3617 vm_pageout_target_collisions
, vm_pageout_target_page_dirtied
,
3618 vm_pageout_target_page_freed
);
3620 #endif /* MACH_CLUSTER_STATS */
3624 #if MACH_CLUSTER_STATS
3625 unsigned long vm_pageout_cluster_dirtied
= 0;
3626 unsigned long vm_pageout_cluster_cleaned
= 0;
3627 unsigned long vm_pageout_cluster_collisions
= 0;
3628 unsigned long vm_pageout_cluster_clusters
= 0;
3629 unsigned long vm_pageout_cluster_conversions
= 0;
3630 unsigned long vm_pageout_target_collisions
= 0;
3631 unsigned long vm_pageout_target_page_dirtied
= 0;
3632 unsigned long vm_pageout_target_page_freed
= 0;
3633 #define CLUSTER_STAT(clause) clause
3634 #else /* MACH_CLUSTER_STATS */
3635 #define CLUSTER_STAT(clause)
3636 #endif /* MACH_CLUSTER_STATS */
3638 #endif /* MACH_KDB */